├── .binder ├── postBuild └── requirements.txt ├── .github ├── dependabot.yml ├── pympdata_logo.png ├── pympdata_logo.svg └── workflows │ ├── joss.yml │ ├── pdoc.yml │ ├── pypi.yml │ ├── readme_julia.yml │ ├── readme_matlab.yml │ ├── readme_python.yml │ ├── readme_rust.yml │ ├── stale_issues.yml │ └── tests.yml ├── .gitignore ├── .gitmodules ├── .pre-commit-config.yaml ├── .zenodo.json ├── LICENSE ├── PyMPDATA ├── __init__.py ├── boundary_conditions │ ├── __init__.py │ ├── constant.py │ ├── extrapolated.py │ ├── periodic.py │ └── polar.py ├── impl │ ├── __init__.py │ ├── clock.py │ ├── domain_decomposition.py │ ├── enumerations.py │ ├── field.py │ ├── formulae_antidiff.py │ ├── formulae_axpy.py │ ├── formulae_flux.py │ ├── formulae_laplacian.py │ ├── formulae_nonoscillatory.py │ ├── formulae_upwind.py │ ├── grid.py │ ├── indexers.py │ ├── meta.py │ ├── traversals.py │ ├── traversals_common.py │ ├── traversals_halos_scalar.py │ ├── traversals_halos_vector.py │ ├── traversals_scalar.py │ └── traversals_vector.py ├── options.py ├── scalar_field.py ├── solver.py ├── stepper.py └── vector_field.py ├── README.md ├── docs ├── bibliography.json ├── generate_html.py ├── markdown │ └── pympdata_landing.md └── templates │ ├── custom.css │ ├── index.html.jinja2 │ ├── syntax-highlighting.css │ └── theme.css ├── examples ├── MANIFEST.in ├── PyMPDATA_examples │ ├── Arabas_and_Farhat_2020 │ │ ├── Bjerksund_and_Stensland_1993.py │ │ ├── Black_Scholes_1973.py │ │ ├── __init__.py │ │ ├── analysis_figures_2_and_3.py │ │ ├── analysis_table_1.py │ │ ├── colors.py │ │ ├── fig_1.ipynb │ │ ├── fig_2.ipynb │ │ ├── fig_3.ipynb │ │ ├── options.py │ │ ├── setup1_european_corridor.py │ │ ├── setup2_american_put.py │ │ ├── simulation.py │ │ └── tab_1.ipynb │ ├── Bartman_et_al_2022 │ │ ├── __init__.py │ │ └── fig_X.ipynb │ ├── DPDC │ │ ├── __init__.py │ │ └── demo.ipynb │ ├── Jarecka_et_al_2015 │ │ ├── __init__.py │ │ ├── fig_6.ipynb │ │ ├── formulae.py │ │ ├── plot_output.py │ │ ├── settings.py │ │ └── simulation.py │ ├── Jaruga_et_al_2015 │ │ ├── __init__.py │ │ └── fig19.ipynb │ ├── Magnuszewski_et_al_2025 │ │ ├── __init__.py │ │ ├── asian_option.py │ │ ├── barraquand_data.py │ │ ├── common.py │ │ ├── figs.ipynb │ │ ├── monte_carlo.py │ │ └── table_1.ipynb │ ├── Molenkamp_test_as_in_Jaruga_et_al_2015_Fig_12 │ │ ├── __init__.py │ │ ├── analysis.py │ │ ├── demo.ipynb │ │ ├── settings.py │ │ └── simulation.py │ ├── Olesik_et_al_2022 │ │ ├── East_and_Marshall_1954.py │ │ ├── README.md │ │ ├── __init__.py │ │ ├── analysis.py │ │ ├── coordinates.py │ │ ├── demo_analytical_solution.ipynb │ │ ├── demo_make_convergences.ipynb │ │ ├── demo_make_dispersion_ratio.ipynb │ │ ├── demo_make_plots.ipynb │ │ ├── demo_wall_times.ipynb │ │ ├── equilibrium_drop_growth.py │ │ ├── plotter.py │ │ ├── settings.py │ │ ├── simulation.py │ │ ├── wall_time.py │ │ ├── wall_time_refdata.txt │ │ └── wall_time_textable.txt │ ├── Shipway_and_Hill_2012 │ │ ├── __init__.py │ │ ├── arakawa_c.py │ │ ├── droplet_activation.py │ │ ├── fig_1.ipynb │ │ ├── formulae.py │ │ ├── mpdata.py │ │ ├── plot.py │ │ └── settings.py │ ├── Smolarkiewicz_1984 │ │ ├── __init__.py │ │ ├── figs_13-14.ipynb │ │ ├── settings.py │ │ └── simulation.py │ ├── Smolarkiewicz_2006_Figs_3_4_10_11_12 │ │ ├── __init__.py │ │ ├── demo.ipynb │ │ ├── settings.py │ │ └── simulation.py │ ├── Williamson_and_Rasch_1989_as_in_Jaruga_et_al_2015_Fig_14 │ │ ├── __init__.py │ │ └── demo_over_the_pole.ipynb │ ├── __init__.py │ ├── advection_diffusion_1d │ │ ├── __init__.py │ │ └── demo.ipynb │ ├── advection_diffusion_2d │ │ ├── __init__.py │ │ └── advection-diffusion-2d.ipynb │ ├── trixi_comparison │ │ ├── __init__.py │ │ └── advection_comparison.ipynb │ ├── utils │ │ ├── __init__.py │ │ ├── discretisation.py │ │ ├── error_norms.py │ │ ├── financial_formulae │ │ │ ├── Bjerksund_and_Stensland_1993.py │ │ │ ├── Black_Scholes_1973.py │ │ │ ├── __init__.py │ │ │ └── asian_option.py │ │ └── nondivergent_vector_field_2d.py │ └── wikipedia_example │ │ ├── __init__.py │ │ ├── demo.ipynb │ │ └── settings.py ├── README.md ├── docs │ └── pympdata_examples_landing.md ├── pyproject.toml └── setup.py ├── paper ├── fig-crop.pdf ├── fig-perf.pdf ├── paper.bib └── paper.md ├── pyproject.toml ├── setup.py └── tests ├── __init__.py ├── smoke_tests ├── __init__.py ├── arabas_and_farhat_2020 │ └── test_black_scholes.py ├── jarecka_et_al_2015 │ └── test_just_do_it.py ├── jaruga_et_al_2015 │ ├── __init__.py │ ├── test_boussinesq.py │ └── test_libmpdata_refdata.py ├── kinematic_2d │ ├── __init__.py │ └── test_single_timestep.py ├── magnuszewski_et_al_2025 │ ├── __init__.py │ └── test_figs_1_2.py ├── olesik_et_al_2022 │ ├── __init__.py │ ├── convergence_refdata.txt │ ├── test_discretisation.py │ ├── test_moment_of_r_integral.py │ ├── test_simulation.py │ └── test_wall_time.py ├── smolarkiewicz_1983 │ └── test_against_libmpdata_refdata.py ├── smolarkiewicz_2006 │ └── test_run_all.py ├── timing │ ├── __init__.py │ ├── conftest.py │ ├── test_timing_1d.py │ ├── test_timing_2d.py │ └── test_timing_3d.py └── utils │ └── test_financial_formulae.py └── unit_tests ├── __init__.py ├── conftest.py ├── quick_look.py ├── test_boundary_condition_extrapolated_1d.py ├── test_boundary_condition_extrapolated_2d.py ├── test_boundary_condition_polar_2d.py ├── test_boundary_conditions_periodic.py ├── test_clock.py ├── test_diffusion_only_2d.py ├── test_domain_decomposition.py ├── test_dpdc.py ├── test_explicit_fill_halos.py ├── test_formulae_upwind.py ├── test_grid.py ├── test_mpdata_2d.py ├── test_scalar_field.py ├── test_shared_advector.py ├── test_solver.py ├── test_stepper.py ├── test_traversals.py ├── test_traversals_with_bc_periodic.py ├── test_upwind_1d.py └── test_upwind_2d.py /.binder/postBuild: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # mimicking what happens on Colab: packages are fetched from PyPI, only notebooks from the repo 4 | 5 | set -e 6 | shopt -s extglob 7 | rm -rfv !("examples") 8 | -------------------------------------------------------------------------------- /.binder/requirements.txt: -------------------------------------------------------------------------------- 1 | PyMPDATA-examples 2 | -------------------------------------------------------------------------------- /.github/dependabot.yml: -------------------------------------------------------------------------------- 1 | version: 2 2 | updates: 3 | - package-ecosystem: pip 4 | directory: "/" 5 | schedule: 6 | interval: daily 7 | - package-ecosystem: pip 8 | directory: "/examples/" 9 | schedule: 10 | interval: daily 11 | -------------------------------------------------------------------------------- /.github/pympdata_logo.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/open-atmos/PyMPDATA/4b57c09e22d2162963d05249e1b2124c0e5b2747/.github/pympdata_logo.png -------------------------------------------------------------------------------- /.github/workflows/joss.yml: -------------------------------------------------------------------------------- 1 | name: Build JOSS paper draft 2 | 3 | on: 4 | push: 5 | branches: 6 | - main 7 | pull_request: 8 | branches: 9 | - main 10 | 11 | jobs: 12 | paper: 13 | runs-on: ubuntu-latest 14 | name: Paper Draft 15 | steps: 16 | - name: Checkout 17 | uses: actions/checkout@v2 18 | - name: TeX and PDF 19 | uses: docker://openjournals/paperdraft:latest 20 | with: 21 | args: '-k paper/paper.md' 22 | env: 23 | GIT_SHA: $GITHUB_SHA 24 | JOURNAL: joss 25 | - name: Upload 26 | uses: actions/upload-artifact@v4 27 | with: 28 | name: paper 29 | path: paper/ 30 | -------------------------------------------------------------------------------- /.github/workflows/pdoc.yml: -------------------------------------------------------------------------------- 1 | name: pdoc 2 | 3 | defaults: 4 | run: 5 | shell: bash 6 | 7 | on: 8 | push: 9 | branches: [ main ] 10 | pull_request: 11 | branches: [ main ] 12 | schedule: 13 | - cron: '0 13 * * 4' 14 | 15 | jobs: 16 | pdoc: 17 | strategy: 18 | matrix: 19 | platform: [ubuntu-latest, macos-latest, windows-latest] 20 | runs-on: ${{ matrix.platform }} 21 | steps: 22 | - uses: actions/checkout@v2 23 | with: 24 | submodules: recursive 25 | persist-credentials: false 26 | - uses: actions/setup-python@v5.2.0 27 | with: 28 | python-version: 3.9 29 | - env: 30 | JUPYTER_PLATFORM_DIRS: 1 31 | run: | 32 | pip install pdoc nbformat gitpython 33 | pip install -e . -e ./examples 34 | python -We docs/generate_html.py . . 35 | 36 | python - <&1 | tee build.log 41 | exit `fgrep -v "warning: no previously-included files matching" buid.log | fgrep -i warning | wc -l` 42 | twine check --strict dist/* 43 | exit `tar tzf dist/*.tar.gz | fgrep ".ipynb" | wc -l` 44 | cd .. 45 | 46 | - if: github.event_name == 'push' && github.ref == 'refs/heads/main' 47 | uses: pypa/gh-action-pypi-publish@release/v1.12 48 | with: 49 | attestations: false 50 | repository_url: https://test.pypi.org/legacy/ 51 | packages-dir: ${{ matrix.package-dir }}/dist 52 | 53 | - if: startsWith(github.ref, 'refs/tags') 54 | uses: pypa/gh-action-pypi-publish@release/v1.12 55 | with: 56 | attestations: false 57 | packages-dir: ${{ matrix.package-dir }}/dist 58 | -------------------------------------------------------------------------------- /.github/workflows/readme_julia.yml: -------------------------------------------------------------------------------- 1 | name: readme_julia 2 | 3 | defaults: 4 | run: 5 | shell: bash 6 | 7 | on: 8 | push: 9 | branches: [ main ] 10 | pull_request: 11 | branches: [ main ] 12 | schedule: 13 | - cron: '0 13 * * 4' 14 | 15 | jobs: 16 | julia: 17 | runs-on: ubuntu-latest 18 | steps: 19 | - uses: actions/checkout@v2 20 | - uses: actions/setup-python@v1 21 | with: 22 | python-version: 3.9 23 | - run: pip install -e . 24 | - run: pip install pytest-codeblocks pytest 25 | - run: python -c "import pytest_codeblocks; code=pytest_codeblocks.extract_from_file('docs/markdown/pympdata_landing.md'); f=open('readme.jl', 'w'); f.writelines(block.code for block in code if block.syntax=='Julia'); f.close()" 26 | - run: cat readme.jl 27 | - uses: julia-actions/setup-julia@v1 28 | - run: julia readme.jl 29 | -------------------------------------------------------------------------------- /.github/workflows/readme_matlab.yml: -------------------------------------------------------------------------------- 1 | name: readme_matlab 2 | 3 | defaults: 4 | run: 5 | shell: bash 6 | 7 | on: 8 | push: 9 | branches: [ main ] 10 | pull_request: 11 | branches: [ main ] 12 | schedule: 13 | - cron: '0 13 * * 4' 14 | 15 | jobs: 16 | matlab: 17 | strategy: 18 | matrix: 19 | # https://www.mathworks.com/support/requirements/python-compatibility.html 20 | include: 21 | - matlab-version: "R2021b" 22 | python-version: "3.9" 23 | - matlab-version: "R2024b" 24 | python-version: "3.12" 25 | runs-on: ubuntu-latest 26 | steps: 27 | - uses: actions/checkout@v4.2.2 28 | - uses: actions/setup-python@v5.3.0 29 | with: 30 | python-version: ${{ matrix.python-version }} 31 | - run: pip install -e . 32 | - run: pip install pytest-codeblocks pytest 33 | - run: python -c "import pytest_codeblocks; code=pytest_codeblocks.extract_from_file('docs/markdown/pympdata_landing.md'); f=open('readme.m', 'w'); f.writelines(block.code for block in code if block.syntax=='Matlab'); f.close()" 34 | - run: cat readme.m 35 | - uses: matlab-actions/setup-matlab@v2.3.0 36 | with: 37 | release: ${{ matrix.matlab-version }} 38 | cache: true 39 | - uses: matlab-actions/run-command@v2.1.1 40 | with: 41 | command: pe=pyenv; assert(pe.Version == "${{ matrix.python-version }}"); 42 | - run: echo NUMBA_NUM_THREADS=1 >> $GITHUB_ENV 43 | - uses: matlab-actions/run-command@v2.1.1 44 | with: 45 | command: readme 46 | -------------------------------------------------------------------------------- /.github/workflows/readme_python.yml: -------------------------------------------------------------------------------- 1 | name: readme_python 2 | 3 | defaults: 4 | run: 5 | shell: bash 6 | 7 | on: 8 | push: 9 | branches: [ main ] 10 | pull_request: 11 | branches: [ main ] 12 | schedule: 13 | - cron: '0 13 * * 4' 14 | 15 | jobs: 16 | python: 17 | runs-on: ubuntu-latest 18 | steps: 19 | - uses: actions/checkout@v2 20 | - uses: actions/setup-python@v1 21 | with: 22 | python-version: 3.9 23 | - run: pip install -e . 24 | - run: pip install pytest-codeblocks pytest matplotlib 25 | - run: python -c "import pytest_codeblocks; code=pytest_codeblocks.extract_from_file('docs/markdown/pympdata_landing.md'); f=open('readme.py', 'w'); f.writelines(block.code for block in code if block.syntax=='Python'); f.close()" 26 | - run: python -We readme.py 27 | - run: cat readme.py 28 | - run: ls readme_grid.png 29 | - run: ls readme_gauss_0.png 30 | - run: ls readme_gauss.png 31 | 32 | - name: artefacts 33 | if: github.ref == 'refs/heads/main' 34 | uses: eine/tip@master 35 | with: 36 | token: ${{ secrets.GITHUB_TOKEN }} 37 | files: | 38 | readme_grid.png 39 | readme_gauss_0.png 40 | readme_gauss.png 41 | -------------------------------------------------------------------------------- /.github/workflows/readme_rust.yml: -------------------------------------------------------------------------------- 1 | name: readme_rust 2 | 3 | defaults: 4 | run: 5 | shell: bash 6 | 7 | on: 8 | push: 9 | branches: [ main ] 10 | pull_request: 11 | branches: [ main ] 12 | schedule: 13 | - cron: '0 13 * * 4' 14 | 15 | jobs: 16 | rust: 17 | runs-on: ubuntu-latest 18 | steps: 19 | - uses: actions/checkout@v2 20 | - uses: dtolnay/rust-toolchain@stable 21 | with: 22 | components: rust-src 23 | - uses: actions/setup-python@v5 24 | with: 25 | python-version: 3.9 26 | - run: pip install -e . 27 | - run: pip install pytest-codeblocks pytest 28 | 29 | - run: | 30 | cat >Cargo.toml < eps else 0 62 | return max( 63 | ats(*focus_psi, 1) - (ats(*focus_psi, 2) - ats(*focus_psi, 1)) * cnst, 0 64 | ) 65 | edg = span + halo - 1 - focus_psi[ARG_FOCUS][dim] 66 | den = ats(*focus_psi, edg - 1) - ats(*focus_psi, edg - 2) 67 | nom = ats(*focus_psi, edg) - ats(*focus_psi, edg - 1) 68 | cnst = nom / den if abs(den) > eps else 0 69 | return max( 70 | ats(*focus_psi, -1) + (ats(*focus_psi, -1) - ats(*focus_psi, -2)) * cnst, 0 71 | ) 72 | 73 | if dtype == complex: 74 | 75 | @numba.njit(**jit_flags) 76 | def fill_halos_scalar(psi, span, sign): 77 | return complex( 78 | impl( 79 | (psi[META_AND_DATA_META], psi[META_AND_DATA_DATA].real), span, sign 80 | ), 81 | impl( 82 | (psi[META_AND_DATA_META], psi[META_AND_DATA_DATA].imag), span, sign 83 | ), 84 | ) 85 | 86 | else: 87 | 88 | @numba.njit(**jit_flags) 89 | def fill_halos_scalar(psi, span, sign): 90 | return impl(psi, span, sign) 91 | 92 | return make_fill_halos_loop(jit_flags, set_value, fill_halos_scalar) 93 | 94 | 95 | @lru_cache() 96 | def _make_vector_extrapolated(atv, set_value, jit_flags, dimension_index): 97 | @numba.njit(**jit_flags) 98 | def fill_halos_parallel(focus_psi, _, sign): 99 | return atv(*focus_psi, sign + 0.5) 100 | 101 | @numba.njit(**jit_flags) 102 | def fill_halos_normal(focus_psi, _, sign, __): 103 | return atv(*focus_psi, sign, 0.5) 104 | 105 | return make_fill_halos_loop_vector( 106 | jit_flags, set_value, fill_halos_parallel, fill_halos_normal, dimension_index 107 | ) 108 | -------------------------------------------------------------------------------- /PyMPDATA/boundary_conditions/periodic.py: -------------------------------------------------------------------------------- 1 | """periodic/cyclic boundary condition logic""" 2 | 3 | from functools import lru_cache 4 | 5 | import numba 6 | 7 | from PyMPDATA.impl.enumerations import SIGN_LEFT, SIGN_RIGHT 8 | from PyMPDATA.impl.traversals_common import ( 9 | make_fill_halos_loop, 10 | make_fill_halos_loop_vector, 11 | ) 12 | 13 | 14 | class Periodic: 15 | """class which instances are to be passed in boundary_conditions tuple to the 16 | `PyMPDATA.scalar_field.ScalarField` and 17 | `PyMPDATA.vector_field.VectorField` __init__ methods""" 18 | 19 | def __init__(self): 20 | assert SIGN_RIGHT == -1 21 | assert SIGN_LEFT == +1 22 | 23 | @staticmethod 24 | def make_scalar(indexers, __, ___, jit_flags, dimension_index): 25 | """returns (lru-cached) Numba-compiled scalar halo-filling callable""" 26 | return _make_scalar_periodic( 27 | indexers.ats[dimension_index], indexers.set, jit_flags 28 | ) 29 | 30 | @staticmethod 31 | def make_vector(indexers, __, ___, jit_flags, dimension_index): 32 | """returns (lru-cached) Numba-compiled vector halo-filling callable""" 33 | return _make_vector_periodic( 34 | indexers.atv, indexers.set, jit_flags, dimension_index, indexers.n_dims 35 | ) 36 | 37 | 38 | @lru_cache() 39 | def _make_scalar_periodic(ats, set_value, jit_flags): 40 | @numba.njit(**jit_flags) 41 | def fill_halos_scalar(focus_psi, span, sign): 42 | return ats(*focus_psi, sign * span) 43 | 44 | return make_fill_halos_loop(jit_flags, set_value, fill_halos_scalar) 45 | 46 | 47 | @lru_cache() 48 | def _make_vector_periodic(atv, set_value, jit_flags, dimension_index, n_dims): 49 | @numba.njit(**jit_flags) 50 | def fill_halos_parallel(focus_psi, span, sign): 51 | offset = 0.5 if sign == SIGN_LEFT else 1.5 52 | return atv[dimension_index](*focus_psi, sign * (span - offset)) 53 | 54 | @numba.njit(**jit_flags) 55 | def fill_halos_normal(focus_psi, span, sign, dim): 56 | if n_dims == 3 and dimension_index == dim + 1: 57 | return atv[dim](*focus_psi, 0.5, sign * span) 58 | return atv[dimension_index](*focus_psi, sign * span, 0.5) 59 | 60 | return make_fill_halos_loop_vector( 61 | jit_flags, set_value, fill_halos_parallel, fill_halos_normal, dimension_index 62 | ) 63 | -------------------------------------------------------------------------------- /PyMPDATA/boundary_conditions/polar.py: -------------------------------------------------------------------------------- 1 | """polar boundary condition for use in with spherical coordinates""" 2 | 3 | from functools import lru_cache 4 | 5 | import numba 6 | 7 | from PyMPDATA.impl.enumerations import ARG_FOCUS, SIGN_LEFT, SIGN_RIGHT 8 | from PyMPDATA.impl.traversals_common import ( 9 | make_fill_halos_loop, 10 | make_fill_halos_loop_vector, 11 | ) 12 | 13 | 14 | class Polar: 15 | """class which instances are to be passed in boundary_conditions tuple to the 16 | `ScalarField` and `VectorField` __init__ methods""" 17 | 18 | def __init__(self, grid, longitude_idx, latitude_idx): 19 | assert SIGN_RIGHT == -1 20 | assert SIGN_LEFT == +1 21 | 22 | self.nlon = grid[longitude_idx] 23 | self.nlat = grid[latitude_idx] 24 | assert self.nlon % 2 == 0 25 | 26 | self.nlon_half = self.nlon // 2 27 | self.lon_idx = longitude_idx 28 | self.lat_idx = latitude_idx 29 | 30 | def make_scalar(self, indexers, halo, _, jit_flags, dimension_index): 31 | """returns (lru-cached) Numba-compiled scalar halo-filling callable""" 32 | nlon_half = self.nlon_half 33 | nlat = self.nlat 34 | lon_idx = self.lon_idx 35 | lat_idx = self.lat_idx 36 | left_edge_idx = halo - 1 37 | right_edge_idx = nlat + halo 38 | ats = indexers.ats[dimension_index] 39 | set_value = indexers.set 40 | 41 | @numba.njit(**jit_flags) 42 | def fill_halos(psi, _, sign): 43 | lon = psi[ARG_FOCUS][lon_idx] 44 | lat = psi[ARG_FOCUS][lat_idx] 45 | if lat <= left_edge_idx: 46 | step = (left_edge_idx - lat) * 2 + 1 47 | else: 48 | step = (lat - right_edge_idx) * 2 + 1 49 | 50 | val = nlon_half * (-1 if lon > nlon_half else 1) 51 | return ats(*psi, sign * step, val) 52 | 53 | return make_fill_halos_loop(jit_flags, set_value, fill_halos) 54 | 55 | @staticmethod 56 | def make_vector(indexers, _, __, jit_flags, dimension_index): 57 | """returns (lru-cached) Numba-compiled vector halo-filling callable""" 58 | return _make_vector_polar( 59 | indexers.atv, indexers.set, jit_flags, dimension_index 60 | ) 61 | 62 | 63 | @lru_cache() 64 | def _make_vector_polar(_atv, set_value, jit_flags, dimension_index): 65 | @numba.njit(**jit_flags) 66 | def fill_halos_parallel(_1, _2, _3): 67 | return 0 # TODO #120 68 | 69 | @numba.njit(**jit_flags) 70 | def fill_halos_normal(_1, _2, _3, _4): 71 | return 0 # TODO #120 72 | 73 | return make_fill_halos_loop_vector( 74 | jit_flags, set_value, fill_halos_parallel, fill_halos_normal, dimension_index 75 | ) 76 | -------------------------------------------------------------------------------- /PyMPDATA/impl/__init__.py: -------------------------------------------------------------------------------- 1 | """package internals, if anything from within is needed to be referenced 2 | from user code, please report implementation leak into public API as an issue""" 3 | -------------------------------------------------------------------------------- /PyMPDATA/impl/clock.py: -------------------------------------------------------------------------------- 1 | """CPU-time returning clock() function which works from within njit-ted code, 2 | no time unit guaranteed, returned value only for relative time measurements""" 3 | 4 | import ctypes 5 | import sys 6 | 7 | import numba 8 | import numpy as np 9 | 10 | if sys.version_info < (3, 13): 11 | clock = ctypes.pythonapi._PyTime_GetSystemClock # pylint:disable=protected-access 12 | clock.argtypes = [] 13 | clock.restype = ctypes.c_int64 14 | else: 15 | clock_impl = ctypes.pythonapi.PyTime_Time 16 | clock_impl.argtypes = [ctypes.c_void_p] 17 | clock_impl.restype = ctypes.c_int64 18 | 19 | assert ctypes.c_time_t == ctypes.c_int64 # pylint: disable=no-member 20 | 21 | @numba.jit("int64()", nopython=True) 22 | def clock(): 23 | """Numba-JITable version of clock function for Python > 3.12""" 24 | result = np.empty(shape=(1,), dtype=np.int64) 25 | clock_impl(result.ctypes) 26 | return result[0] 27 | -------------------------------------------------------------------------------- /PyMPDATA/impl/domain_decomposition.py: -------------------------------------------------------------------------------- 1 | """logic defining domain decomposition scheme for multi-threading""" 2 | 3 | import math 4 | 5 | import numba 6 | 7 | 8 | def make_subdomain(jit_flags): 9 | """returns an njit-ted function returning start-stop index tuple 10 | for a given domain span, thread rank and thread-pool size""" 11 | 12 | @numba.njit(**jit_flags) 13 | def subdomain(span, rank, size): 14 | if rank >= size: 15 | raise ValueError("rank >= size") 16 | 17 | n_max = math.ceil(span / size) 18 | start = n_max * rank 19 | stop = start + (n_max if start + n_max <= span else span - start) 20 | return start, stop 21 | 22 | return subdomain 23 | -------------------------------------------------------------------------------- /PyMPDATA/impl/enumerations.py: -------------------------------------------------------------------------------- 1 | """common constants named with the intention of improving code readibility 2 | (mostly integer indices used for indexing tuples)""" 3 | 4 | import numpy as np 5 | 6 | ARG_FOCUS, ARG_DATA, ARG_DATA_OUTER, ARG_DATA_MID3D, ARG_DATA_INNER = 0, 1, 1, 2, 3 7 | """ indices within tuple passed in fill_halos boundary-condition calls """ 8 | 9 | MAX_DIM_NUM = 3 10 | """ maximal number of dimensions supported by the package """ 11 | 12 | OUTER, MID3D, INNER = 0, 1, -1 13 | """ labels for identifying 1st, 2nd, and 3rd dimensions """ 14 | 15 | IMPL_META_AND_DATA, IMPL_BC = 0, 1 16 | """ indices of "meta and data" and "bc" elements of the impl tuple in Field instances """ 17 | 18 | META_AND_DATA_META, META_AND_DATA_DATA = 0, 1 19 | """ indices of "meta" and "data" elements of the "meta and data" impl Field property """ 20 | 21 | SIGN_LEFT, SIGN_RIGHT = +1, -1 22 | """ left-hand and right-hand domain sides as used in boundary conditions logic """ 23 | 24 | RNG_START, RNG_STOP, RNG_STEP = 0, 1, 2 25 | """ indices of elements in range-expressing tuples """ 26 | 27 | INVALID_INDEX = -44 28 | """ value with which never-to-be-used unused-dimension tuples are filled with in traversals """ 29 | 30 | INVALID_INIT_VALUE, BUFFER_DEFAULT_VALUE = np.nan, np.nan 31 | """ values with which arrays are filled at initialisation """ 32 | 33 | INVALID_HALO_VALUE = 666 34 | """ value used when constructing never-to-be-used instances of Constant boundary condition """ 35 | 36 | INVALID_NULL_VALUE = 0.0 37 | """ value with which the never-to-be-used "null" fields are populated """ 38 | 39 | ONE_FOR_STAGGERED_GRID = 1 40 | """ used for explaining the purpose of +1 index addition if related to Arakawa-C grid shift """ 41 | -------------------------------------------------------------------------------- /PyMPDATA/impl/formulae_axpy.py: -------------------------------------------------------------------------------- 1 | """basic a*x+y operation logic for use in Fickian term handling""" 2 | 3 | import numba 4 | 5 | from .enumerations import INNER, MID3D, OUTER 6 | from .meta import META_HALO_VALID 7 | 8 | 9 | def make_axpy(options, traversals): 10 | """returns njit-ted function for use with given traversals""" 11 | 12 | n_dims = traversals.n_dims 13 | 14 | @numba.njit(**options.jit_flags) 15 | # pylint: disable=too-many-arguments 16 | def axpy( 17 | out_meta, 18 | out_outer, 19 | out_mid3d, 20 | out_inner, 21 | a_coeffs, 22 | _, 23 | x_outer, 24 | x_mid3d, 25 | x_inner, 26 | __, 27 | y_outer, 28 | y_mid3d, 29 | y_inner, 30 | ): 31 | if n_dims > 1: 32 | out_outer[:] = a_coeffs[OUTER] * x_outer[:] + y_outer[:] 33 | if n_dims > 2: 34 | out_mid3d[:] = a_coeffs[MID3D] * x_mid3d[:] + y_mid3d[:] 35 | out_inner[:] = a_coeffs[INNER] * x_inner[:] + y_inner[:] 36 | out_meta[META_HALO_VALID] = False 37 | 38 | return axpy 39 | -------------------------------------------------------------------------------- /PyMPDATA/impl/formulae_flux.py: -------------------------------------------------------------------------------- 1 | """staggered-grid flux logic including infinite-gauge logic handling""" 2 | 3 | import numba 4 | import numpy as np 5 | 6 | from PyMPDATA.impl.enumerations import MAX_DIM_NUM 7 | 8 | 9 | def make_flux_first_pass(options, traversals): 10 | """returns njit-ted function for use with given traversals""" 11 | idx = traversals.indexers[traversals.n_dims] 12 | apply_vector = traversals.apply_vector() 13 | 14 | formulae_flux_first_pass = tuple( 15 | ( 16 | __make_flux( 17 | options.jit_flags, 18 | idx.atv[i], 19 | idx.ats[i], 20 | first_pass=True, 21 | infinite_gauge=False, 22 | ) 23 | if idx.ats[i] is not None 24 | else None 25 | ) 26 | for i in range(MAX_DIM_NUM) 27 | ) 28 | 29 | @numba.njit(**options.jit_flags) 30 | def apply(traversals_data, vectmp_a, advector, advectee): 31 | null_scalarfield, null_bc = traversals_data.null_scalar_field 32 | return apply_vector( 33 | *formulae_flux_first_pass, 34 | *vectmp_a.field, 35 | *advectee.field, 36 | advectee.bc, 37 | *advector.field, 38 | advector.bc, 39 | *null_scalarfield, 40 | null_bc, 41 | traversals_data.buffer 42 | ) 43 | 44 | return apply 45 | 46 | 47 | def make_flux_subsequent(options, traversals): 48 | """returns njit-ted function for use with given traversals""" 49 | idx = traversals.indexers[traversals.n_dims] 50 | apply_vector = traversals.apply_vector() 51 | 52 | formulae_flux_subsequent = tuple( 53 | ( 54 | __make_flux( 55 | options.jit_flags, 56 | idx.atv[i], 57 | idx.ats[i], 58 | first_pass=False, 59 | infinite_gauge=options.infinite_gauge, 60 | ) 61 | if idx.ats[i] is not None 62 | else None 63 | ) 64 | for i in range(MAX_DIM_NUM) 65 | ) 66 | 67 | @numba.njit(**options.jit_flags) 68 | def apply(traversals_data, flux, psi, g_c_corr): 69 | null_scalarfield, null_bc = traversals_data.null_scalar_field 70 | return apply_vector( 71 | *formulae_flux_subsequent, 72 | *flux.field, 73 | *psi.field, 74 | psi.bc, 75 | *g_c_corr.field, 76 | g_c_corr.bc, 77 | *null_scalarfield, 78 | null_bc, 79 | traversals_data.buffer 80 | ) 81 | 82 | return apply 83 | 84 | 85 | def __make_flux(jit_flags, atv, ats, first_pass, infinite_gauge): 86 | @numba.njit(**jit_flags) 87 | def minimum_0(arg): 88 | return (arg - np.abs(arg)) / 2 89 | 90 | @numba.njit(**jit_flags) 91 | def maximum_0(arg): 92 | return (arg + np.abs(arg)) / 2 93 | 94 | if not first_pass and infinite_gauge: 95 | 96 | @numba.njit(**jit_flags) 97 | def flux(_, advector, __): 98 | return atv(*advector, +0.5) 99 | 100 | else: 101 | 102 | @numba.njit(**jit_flags) 103 | def flux(advectee, advector, __): 104 | return maximum_0(atv(*advector, +0.5)) * ats(*advectee, 0) + minimum_0( 105 | atv(*advector, +0.5) 106 | ) * ats(*advectee, 1) 107 | 108 | return flux 109 | -------------------------------------------------------------------------------- /PyMPDATA/impl/formulae_laplacian.py: -------------------------------------------------------------------------------- 1 | """logic for handling the Fickian term by modifying physical velocity""" 2 | 3 | import numba 4 | 5 | from ..impl.enumerations import MAX_DIM_NUM 6 | from ..impl.traversals import Traversals 7 | from ..options import Options 8 | 9 | 10 | def make_laplacian(non_unit_g_factor: bool, options: Options, traversals: Traversals): 11 | """returns njit-ted function for use with given traversals""" 12 | if not options.non_zero_mu_coeff: 13 | 14 | @numba.njit(**options.jit_flags) 15 | def apply(_1, _2, _3): 16 | return 17 | 18 | else: 19 | idx = traversals.indexers[traversals.n_dims] 20 | apply_vector = traversals.apply_vector() 21 | 22 | formulae_laplacian = tuple( 23 | ( 24 | __make_laplacian( 25 | options.jit_flags, idx.ats[i], options.epsilon, non_unit_g_factor 26 | ) 27 | if idx.ats[i] is not None 28 | else None 29 | ) 30 | for i in range(MAX_DIM_NUM) 31 | ) 32 | 33 | @numba.njit(**options.jit_flags) 34 | def apply(traversals_data, advector, advectee): 35 | null_vecfield, null_vecfield_bc = traversals_data.null_vector_field 36 | null_scalarfield, null_scalarfield_bc = traversals_data.null_scalar_field 37 | return apply_vector( 38 | *formulae_laplacian, 39 | *advector.field, 40 | *advectee.field, 41 | advectee.bc, 42 | *null_vecfield, 43 | null_vecfield_bc, 44 | *null_scalarfield, 45 | null_scalarfield_bc, 46 | traversals_data.buffer 47 | ) 48 | 49 | return apply 50 | 51 | 52 | def __make_laplacian(jit_flags, ats, epsilon, non_unit_g_factor): 53 | if non_unit_g_factor: 54 | raise NotImplementedError() 55 | 56 | @numba.njit(**jit_flags) 57 | def fun(advectee, _, __): 58 | return ( 59 | -2 60 | * (ats(*advectee, 1) - ats(*advectee, 0)) 61 | / (ats(*advectee, 1) + ats(*advectee, 0) + epsilon) 62 | ) 63 | 64 | return fun 65 | -------------------------------------------------------------------------------- /PyMPDATA/impl/formulae_upwind.py: -------------------------------------------------------------------------------- 1 | """upwind/donor-cell formula logic including G-factor handling""" 2 | 3 | import numba 4 | 5 | from PyMPDATA.impl.enumerations import MAX_DIM_NUM 6 | 7 | 8 | def make_upwind(options, non_unit_g_factor, traversals): 9 | """returns an njit-ted function for use with given traversals""" 10 | apply_scalar = traversals.apply_scalar(loop=True) 11 | idx = traversals.indexers[traversals.n_dims] 12 | 13 | formulae_upwind = tuple( 14 | ( 15 | __make_upwind(options.jit_flags, idx.atv[i], idx.ats[i], non_unit_g_factor) 16 | if idx.ats[i] is not None 17 | else None 18 | ) 19 | for i in range(MAX_DIM_NUM) 20 | ) 21 | 22 | @numba.njit(**options.jit_flags) 23 | def apply(traversals_data, psi, flux, g_factor): 24 | null_scalarfield, null_scalarfield_bc = traversals_data.null_scalar_field 25 | return apply_scalar( 26 | *formulae_upwind, 27 | *psi.field, 28 | *flux.field, 29 | flux.bc, 30 | *g_factor.field, 31 | g_factor.bc, 32 | *null_scalarfield, 33 | null_scalarfield_bc, 34 | *null_scalarfield, 35 | null_scalarfield_bc, 36 | *null_scalarfield, 37 | null_scalarfield_bc, 38 | traversals_data.buffer 39 | ) 40 | 41 | return apply 42 | 43 | 44 | def __make_upwind(jit_flags, atv, ats, nug): 45 | @numba.njit(**jit_flags) 46 | def upwind(init, flux, g_factor, _, __, ___): 47 | result = +atv(*flux, -0.5) - atv(*flux, 0.5) 48 | if nug: 49 | result /= ats(*g_factor, 0) 50 | return init + result 51 | 52 | return upwind 53 | -------------------------------------------------------------------------------- /PyMPDATA/impl/grid.py: -------------------------------------------------------------------------------- 1 | """ 2 | static (extents known to JIT) and dynamic (run-time extents) grid handling logic 3 | """ 4 | 5 | import numba 6 | 7 | from PyMPDATA.impl.domain_decomposition import make_subdomain 8 | from PyMPDATA.impl.meta import META_N_INNER, META_N_MID3D, META_N_OUTER 9 | 10 | 11 | def make_chunk(span, n_threads, jit_flags): 12 | """returns an njit-ted function which returns the subdomain extent for a 13 | given thread, for static grid size no calculations are done at run-time""" 14 | static = span > 0 15 | 16 | subdomain = make_subdomain(jit_flags) 17 | 18 | if static: 19 | rngs = tuple(subdomain(span, th, n_threads) for th in range(n_threads)) 20 | 21 | @numba.njit(**jit_flags) 22 | def _impl(_, thread_id): 23 | return rngs[thread_id] 24 | 25 | else: 26 | 27 | @numba.njit(**jit_flags) 28 | def _impl(meta, thread_id): 29 | return subdomain(meta[META_N_OUTER], thread_id, n_threads) 30 | 31 | return _impl 32 | 33 | 34 | def make_domain(grid, jit_flags): 35 | """returns an njit-ted function which, for static grids, returns a compile-time-constant 36 | grid size, and otherwise returns the grid size encoded in the meta tuple""" 37 | static = grid[0] > 0 38 | 39 | if static: 40 | 41 | @numba.njit(**jit_flags) 42 | def _impl(_): 43 | return grid 44 | 45 | else: 46 | 47 | @numba.njit(**jit_flags) 48 | def _impl(meta): 49 | return meta[META_N_OUTER], meta[META_N_MID3D], meta[META_N_INNER] 50 | 51 | return _impl 52 | -------------------------------------------------------------------------------- /PyMPDATA/impl/meta.py: -------------------------------------------------------------------------------- 1 | """constants for indexing and a factory for creating the "meta" tuples""" 2 | 3 | from collections import namedtuple 4 | from pathlib import Path 5 | 6 | import numpy as np 7 | 8 | from .enumerations import INNER, MID3D, OUTER 9 | 10 | META_HALO_VALID = 0 11 | META_N_OUTER = 1 12 | META_N_MID3D = 2 13 | META_N_INNER = 3 14 | META_IS_NULL = 4 15 | META_SIZE = 5 16 | 17 | _Impl = namedtuple(Path(__file__).stem + "_Impl", ("field", "bc")) 18 | 19 | 20 | def make_meta(halo_valid: bool, grid): 21 | """returns a "meta" tuple for a given grid size and halo status""" 22 | meta = np.empty(META_SIZE, dtype=int) 23 | meta[META_HALO_VALID] = halo_valid 24 | meta[META_N_OUTER] = grid[OUTER] if len(grid) > 1 else 0 25 | meta[META_N_MID3D] = grid[MID3D] if len(grid) > 2 else 0 26 | meta[META_N_INNER] = grid[INNER] 27 | meta[META_IS_NULL] = False 28 | return meta 29 | -------------------------------------------------------------------------------- /PyMPDATA/impl/traversals.py: -------------------------------------------------------------------------------- 1 | """staggered-grid traversals orchestration""" 2 | 3 | from collections import namedtuple 4 | from pathlib import Path 5 | 6 | import numpy as np 7 | 8 | from ..scalar_field import ScalarField 9 | from ..vector_field import VectorField 10 | from .enumerations import BUFFER_DEFAULT_VALUE, INNER, MID3D, OUTER 11 | from .grid import make_chunk, make_domain 12 | from .indexers import make_indexers 13 | from .traversals_halos_scalar import _make_fill_halos_scalar 14 | from .traversals_halos_vector import _make_fill_halos_vector 15 | from .traversals_scalar import _make_apply_scalar 16 | from .traversals_vector import _make_apply_vector 17 | 18 | 19 | class Traversals: 20 | """groups njit-ted traversals for a given grid, halo, jit_flags and threading settings""" 21 | 22 | def __init__( 23 | self, *, grid, halo, jit_flags, n_threads, left_first: tuple, buffer_size 24 | ): 25 | assert not (n_threads > 1 and len(grid) == 1) 26 | tmp = ( 27 | grid[OUTER] if len(grid) > 1 else 0, 28 | grid[MID3D] if len(grid) > 2 else 0, 29 | grid[INNER], 30 | ) 31 | domain = make_domain( 32 | tmp, 33 | jit_flags, 34 | ) 35 | chunk = make_chunk(grid[OUTER], n_threads, jit_flags) 36 | 37 | self.n_dims = len(grid) 38 | self.jit_flags = jit_flags 39 | self.indexers = make_indexers(jit_flags) 40 | 41 | self.data = namedtuple( 42 | Path(__file__).stem + "TraversalsData", 43 | ("null_scalar_field", "null_vector_field", "buffer"), # NullFields 44 | )( 45 | null_scalar_field=ScalarField.make_null(self.n_dims, self).impl, 46 | null_vector_field=VectorField.make_null(self.n_dims, self).impl, 47 | buffer=np.full((buffer_size,), BUFFER_DEFAULT_VALUE), 48 | ) 49 | 50 | common_kwargs = { 51 | "jit_flags": jit_flags, 52 | "halo": halo, 53 | "n_dims": self.n_dims, 54 | "chunker": chunk, 55 | "spanner": domain, 56 | } 57 | self._code = { 58 | "fill_halos_scalar": _make_fill_halos_scalar( 59 | left_first=left_first, 60 | **common_kwargs, 61 | ), 62 | "fill_halos_vector": _make_fill_halos_vector( 63 | left_first=left_first, 64 | **common_kwargs, 65 | ), 66 | } 67 | common_kwargs = { 68 | **common_kwargs, 69 | "indexers": self.indexers, 70 | **{ 71 | "boundary_cond_vector": self._code["fill_halos_vector"], 72 | "boundary_cond_scalar": self._code["fill_halos_scalar"], 73 | "n_threads": n_threads, 74 | }, 75 | } 76 | self._code = { 77 | **self._code, 78 | **{ 79 | "apply_scalar": _make_apply_scalar( 80 | loop=False, 81 | **common_kwargs, 82 | ), 83 | "apply_scalar_loop": _make_apply_scalar( 84 | loop=True, 85 | **common_kwargs, 86 | ), 87 | "apply_vector": _make_apply_vector( 88 | **common_kwargs, 89 | ), 90 | }, 91 | } 92 | 93 | def apply_scalar(self, *, loop): 94 | """returns scalar field traversal function in two flavours: 95 | - loop=True sums contributions over dimensions (used in summing upwind fluxes only) 96 | - loop=False does no summing 97 | """ 98 | if loop: 99 | return self._code["apply_scalar_loop"] 100 | return self._code["apply_scalar"] 101 | 102 | def apply_vector(self): 103 | """returns vector field traversal function""" 104 | return self._code["apply_vector"] 105 | -------------------------------------------------------------------------------- /PyMPDATA/impl/traversals_common.py: -------------------------------------------------------------------------------- 1 | """commons for scalar and vector field traversals""" 2 | 3 | # pylint: disable=too-many-arguments,line-too-long,unused-argument 4 | import numba 5 | 6 | from .enumerations import OUTER, RNG_STOP 7 | 8 | 9 | def make_common(jit_flags, spanner, chunker): 10 | """returns Numba-compiled callable producing common parameters""" 11 | 12 | @numba.njit(**jit_flags) 13 | def common(meta, thread_id): 14 | span = spanner(meta) 15 | rng_outer = chunker(meta, thread_id) 16 | last_thread = rng_outer[RNG_STOP] == span[OUTER] 17 | first_thread = thread_id == 0 18 | return span, rng_outer, last_thread, first_thread 19 | 20 | return common 21 | 22 | 23 | def make_fill_halos_loop(jit_flags, set_value, fill_halos): 24 | """returns Numba-compiled halo-filling callable""" 25 | 26 | @numba.njit(**jit_flags) 27 | def fill_halos_loop(buffer, i_rng, j_rng, k_rng, psi, span, sign): 28 | for i in i_rng: 29 | for j in j_rng: 30 | for k in k_rng: 31 | focus = (i, j, k) 32 | set_value(psi, *focus, fill_halos((focus, psi), span, sign)) 33 | 34 | return fill_halos_loop 35 | 36 | 37 | def make_fill_halos_loop_vector( 38 | jit_flags, set_value, fill_halos_parallel, fill_halos_normal, dimension_index 39 | ): 40 | """returns Numba-compiled halo-filling callable""" 41 | 42 | @numba.njit(**jit_flags) 43 | def fill_halos_loop_vector( 44 | buffer, i_rng, j_rng, k_rng, components, dim, span, sign 45 | ): 46 | parallel = dim % len(components) == dimension_index 47 | for i in i_rng: 48 | for j in j_rng: 49 | for k in k_rng: 50 | focus = (i, j, k) 51 | if parallel: 52 | set_value( 53 | components[dim], 54 | *focus, 55 | fill_halos_parallel((focus, components), span, sign) 56 | ) 57 | else: 58 | set_value( 59 | components[dim], 60 | *focus, 61 | fill_halos_normal((focus, components), span, sign, dim) 62 | ) 63 | 64 | return fill_halos_loop_vector 65 | -------------------------------------------------------------------------------- /PyMPDATA/scalar_field.py: -------------------------------------------------------------------------------- 1 | """ 2 | scalar field abstractions for the staggered grid 3 | """ 4 | 5 | import inspect 6 | 7 | import numpy as np 8 | 9 | from PyMPDATA.boundary_conditions import Constant 10 | from PyMPDATA.impl.enumerations import INVALID_INIT_VALUE 11 | from PyMPDATA.impl.field import Field 12 | 13 | 14 | class ScalarField(Field): 15 | """n-dimensional scalar field including halo data, used to represent advectee, g_factor, etc.""" 16 | 17 | def __init__(self, data: np.ndarray, halo: int, boundary_conditions: tuple): 18 | super().__init__( 19 | grid=data.shape, 20 | boundary_conditions=boundary_conditions, 21 | halo=halo, 22 | dtype=data.dtype, 23 | fill_halos_name="fill_halos_scalar", 24 | ) 25 | 26 | for dim_length in data.shape: 27 | assert halo <= dim_length 28 | for boundary_condition in boundary_conditions: 29 | assert not inspect.isclass(boundary_condition) 30 | 31 | shape_with_halo = tuple(data.shape[i] + 2 * halo for i in range(self.n_dims)) 32 | self.data = np.full(shape_with_halo, INVALID_INIT_VALUE, dtype=data.dtype) 33 | self._impl_data = (self.data,) 34 | self.domain = tuple( 35 | slice(halo, self.data.shape[i] - halo) for i in range(self.n_dims) 36 | ) 37 | self.get()[:] = data[:] 38 | 39 | @staticmethod 40 | def clone(field, dtype=None): 41 | """returns an instance with the same dimensionality and same halo size as the argument 42 | optionally with a different data type""" 43 | dtype = dtype or field.dtype 44 | # note: copy=False is OK as the ctor anyhow copies the data to an array with halo 45 | return ScalarField( 46 | field.get().astype(dtype, copy=False), field.halo, field.boundary_conditions 47 | ) 48 | 49 | def get(self) -> np.ndarray: # note: actually a view is returned 50 | """returns a view (not a copy) of the field data excluding halo""" 51 | results = self.data[self.domain] 52 | return results 53 | 54 | @staticmethod 55 | def make_null(n_dims, traversals): 56 | """returns a scalar field instance with no allocated data storage, 57 | see `Field._make_null` other properties of the returned field""" 58 | return Field._make_null( 59 | ScalarField( 60 | np.empty(shape=[0] * n_dims), 61 | halo=0, 62 | boundary_conditions=tuple([Constant(np.nan)] * n_dims), 63 | ), 64 | traversals, 65 | ) 66 | -------------------------------------------------------------------------------- /docs/templates/custom.css: -------------------------------------------------------------------------------- 1 | img { 2 | max-width: 100%; 3 | } 4 | 5 | mark { 6 | background-color: #859bed; 7 | color: black; 8 | } 9 | 10 | ul { 11 | margin-left: 2em; 12 | } 13 | -------------------------------------------------------------------------------- /docs/templates/theme.css: -------------------------------------------------------------------------------- 1 | :root { 2 | --pdoc-background: #212529; 3 | } 4 | 5 | .pdoc { 6 | --text: #f7f7f7; 7 | --muted: #9d9d9d; 8 | --link: #58a6ff; 9 | --link-hover: #3989ff; 10 | --code: #333; 11 | --active: #555; 12 | 13 | --accent: #343434; 14 | --accent2: #555; 15 | 16 | --nav-hover: rgba(0, 0, 0, 0.1); 17 | --name: #77C1FF; 18 | --def: #0cdd0c; 19 | --annotation: #00c037; 20 | } 21 | -------------------------------------------------------------------------------- /examples/MANIFEST.in: -------------------------------------------------------------------------------- 1 | global-exclude *.ipynb 2 | include docs/*.md 3 | -------------------------------------------------------------------------------- /examples/PyMPDATA_examples/Arabas_and_Farhat_2020/Bjerksund_and_Stensland_1993.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import PyMPDATA_examples.Arabas_and_Farhat_2020.Black_Scholes_1973 as BS 3 | 4 | 5 | def _phi( 6 | S: [np.ndarray, float], 7 | gamma: float, 8 | H: float, 9 | I: float, 10 | r: float, 11 | b: float, 12 | var: float, 13 | T: float, 14 | ): 15 | lmbd = (-r + gamma * b + 0.5 * gamma * (gamma - 1) * var) * T 16 | d = -(np.log(S / H) + (b + (gamma - 0.5) * var) * T) / np.sqrt(var * T) 17 | kappa = 2 * b / var + (2 * gamma - 1) 18 | return ( 19 | np.exp(lmbd) 20 | * np.power(S, gamma) 21 | * ( 22 | BS.N(d) 23 | - pow((I / S), kappa) * BS.N(d - 2 * np.log(I / S) / np.sqrt(var * T)) 24 | ) 25 | ) 26 | 27 | 28 | def c_amer( 29 | S: [np.ndarray, float], 30 | K: [float, np.ndarray], 31 | T: float, 32 | r: float, 33 | b: float, 34 | sgma: float, 35 | ): 36 | if b >= r: 37 | return BS.c_euro(S, K=K, T=T, r=r, b=b, sgma=sgma) 38 | 39 | var = sgma * sgma 40 | beta = (0.5 - b / var) + np.sqrt(pow((b / var - 0.5), 2) + 2 * r / var) 41 | BInf = beta / (beta - 1) * K 42 | B0 = np.maximum(K, r / (r - b) * K) 43 | ht = -(b * T + 2 * sgma * np.sqrt(T)) * B0 / (BInf - B0) 44 | I = B0 + (BInf - B0) * (1 - np.exp(ht)) 45 | alpha = (I - K) * pow(I, -beta) 46 | 47 | return np.where( 48 | S >= I, 49 | S - K, 50 | alpha * np.power(S, beta) 51 | + ( 52 | -alpha * _phi(S, gamma=beta, H=I, I=I, r=r, b=b, var=var, T=T) 53 | + _phi(S, gamma=1, H=I, I=I, r=r, b=b, var=var, T=T) 54 | - _phi(S, gamma=1, H=K, I=I, r=r, b=b, var=var, T=T) 55 | - K * _phi(S, gamma=0, H=I, I=I, r=r, b=b, var=var, T=T) 56 | + K * _phi(S, gamma=0, H=K, I=I, r=r, b=b, var=var, T=T) 57 | ), 58 | ) 59 | 60 | 61 | def p_amer(S: [np.ndarray, float], K: float, T: float, r: float, b: float, sgma: float): 62 | return c_amer(K, K=S, T=T, r=r - b, b=-b, sgma=sgma) 63 | -------------------------------------------------------------------------------- /examples/PyMPDATA_examples/Arabas_and_Farhat_2020/Black_Scholes_1973.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | from scipy.special import erf # pylint: disable=no-name-in-module 3 | 4 | 5 | def N(x: float): 6 | return (1 + erf(x / np.sqrt(2))) / 2 7 | 8 | 9 | def c_euro(S: np.ndarray, K: float, T: float, r: float, b: float, sgma: float): 10 | d1 = (np.log(S / K) + (b + sgma * sgma / 2) * T) / sgma / np.sqrt(T) 11 | d2 = d1 - sgma * np.sqrt(T) 12 | return S * np.exp(b - r) * N(d1) - K * np.exp(-r * T) * N(d2) 13 | 14 | 15 | def p_euro(S: np.ndarray, K: float, T: float, r: float, b: float, sgma: float): 16 | d1 = (np.log(S / K) + (b + sgma * sgma / 2) * T) / sgma / np.sqrt(T) 17 | d2 = d1 - sgma * np.sqrt(T) 18 | return K * np.exp(-r * T) * N(-d2) - S * np.exp((b - r) * T) * N(-d1) 19 | -------------------------------------------------------------------------------- /examples/PyMPDATA_examples/Arabas_and_Farhat_2020/__init__.py: -------------------------------------------------------------------------------- 1 | """ 2 | This example implements simulations presented in the 3 | [Arabas and Farhat 2020](https://doi.org/10.1016/j.cam.2019.05.023) 4 | study on pricing of European and American options using MPDATA. 5 | 6 | Each notebook in this directory corresponds to a figure or a table in the paper. 7 | 8 | fig_1.ipynb: 9 | .. include:: ./fig_1.ipynb.badges.md 10 | 11 | fig_2.ipynb: 12 | .. include:: ./fig_2.ipynb.badges.md 13 | 14 | fig_3.ipynb: 15 | .. include:: ./fig_3.ipynb.badges.md 16 | 17 | tab_1.ipynb: 18 | .. include:: ./tab_1.ipynb.badges.md 19 | """ 20 | 21 | from .simulation import Simulation 22 | -------------------------------------------------------------------------------- /examples/PyMPDATA_examples/Arabas_and_Farhat_2020/analysis_figures_2_and_3.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | from joblib import Parallel, delayed, parallel_backend 3 | from PyMPDATA_examples.Arabas_and_Farhat_2020.setup1_european_corridor import Settings 4 | from PyMPDATA_examples.Arabas_and_Farhat_2020.simulation import Simulation 5 | from PyMPDATA_examples.utils.error_norms import L2 6 | 7 | 8 | def compute(simulation): 9 | output = [] 10 | for n_iters in (1, 2): 11 | simulation.run(n_iters) 12 | output.append( 13 | { 14 | "n_iters": n_iters, 15 | "log2_C": np.log2(simulation.C), 16 | "log2_C_opt": np.log2(simulation.settings.C_opt), 17 | "log2_l2": np.log2(simulation.l2), 18 | "log2_l2_opt": np.log2(simulation.settings.l2_opt), 19 | "err2": error_L2_norm( 20 | simulation.solvers, 21 | simulation.settings, 22 | simulation.S, 23 | simulation.nt, 24 | n_iters, 25 | ), 26 | } 27 | ) 28 | return output 29 | 30 | 31 | def convergence_in_space(num=8): 32 | with parallel_backend("threading", n_jobs=-2): 33 | data = Parallel(verbose=10)( 34 | delayed(compute)( 35 | Simulation(Settings(l2_opt=2**log2_l2_opt, C_opt=2**log2_C_opt)) 36 | ) 37 | for log2_C_opt in np.linspace(-9.5, -6, num=num) 38 | for log2_l2_opt in range(1, 4) 39 | ) 40 | result = {} 41 | for pair in data: 42 | for datum in pair: 43 | label = f" $\\lambda^2\\approx2^{{{datum['log2_l2_opt']}}}$" 44 | key = ("upwind" + label, "MPDATA" + label)[datum["n_iters"] - 1] 45 | if key not in result: 46 | result[key] = ([], []) 47 | result[key][0].append(datum["log2_C"]) 48 | result[key][1].append(datum["err2"]) 49 | return result 50 | 51 | 52 | def convergence_in_time(num=13): 53 | with parallel_backend("threading", n_jobs=-2): 54 | data = Parallel(verbose=10)( 55 | delayed(compute)( 56 | Simulation(Settings(l2_opt=2**log2_l2_opt, C_opt=2**log2_C_opt)) 57 | ) 58 | for log2_C_opt in np.log2((0.01, 0.005, 0.0025)) 59 | for log2_l2_opt in np.linspace(1.1, 3.5, num=num) 60 | ) 61 | result = {} 62 | for pair in data: 63 | for datum in pair: 64 | label = f" $C\\approx{2**(datum['log2_C_opt']):.4f}$" 65 | key = ("upwind" + label, "MPDATA" + label)[datum["n_iters"] - 1] 66 | if key not in result: 67 | result[key] = ([], []) 68 | result[key][0].append(datum["log2_l2"]) 69 | result[key][1].append(datum["err2"]) 70 | return result 71 | 72 | 73 | def error_L2_norm(solvers, settings, S, nt, n_iters: int): 74 | numerical = solvers[n_iters].advectee.get() 75 | analytical = settings.analytical_solution(S) 76 | return L2(numerical, analytical, nt) 77 | -------------------------------------------------------------------------------- /examples/PyMPDATA_examples/Arabas_and_Farhat_2020/analysis_table_1.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | from joblib import Parallel, delayed, parallel_backend 3 | from PyMPDATA_examples.Arabas_and_Farhat_2020.analysis_figures_2_and_3 import ( 4 | error_L2_norm, 5 | ) 6 | from PyMPDATA_examples.Arabas_and_Farhat_2020.setup2_american_put import Settings 7 | from PyMPDATA_examples.Arabas_and_Farhat_2020.simulation import Simulation 8 | 9 | 10 | def compute_row(simulations): 11 | S0 = simulations[0].settings.S0 12 | T = simulations[0].settings.T 13 | for i in range(1, len(simulations)): 14 | assert simulations[i].settings.T == T 15 | assert simulations[i].settings.S0 == S0 16 | row = [T, S0] 17 | f = None 18 | for simulation in simulations: 19 | f = simulation.run(n_iters=2) 20 | row.append( 21 | error_L2_norm( 22 | simulation.solvers, 23 | simulation.settings, 24 | simulation.S, 25 | simulation.nt, 26 | n_iters=2, 27 | ) 28 | ) 29 | np.testing.assert_almost_equal(simulation.S[simulation.ix_match], S0) 30 | row.append(f[simulations[-1].ix_match]) 31 | row.append(simulations[0].settings.analytical_solution(S0)) 32 | row.append(simulations[0].settings.analytical_solution(S0, amer=False)) 33 | return row 34 | 35 | 36 | def table_1_data(): 37 | with parallel_backend("threading", n_jobs=-2): 38 | result = Parallel(verbose=10)( 39 | delayed(compute_row)( 40 | tuple( 41 | Simulation(Settings(T=T, C_opt=C_opt, S0=S0)) 42 | for C_opt in (0.02, 0.01, 0.005) 43 | ) 44 | ) 45 | for T in (0.25, 0.5, 3) 46 | for S0 in (80, 90, 100, 110, 120) 47 | ) 48 | return result 49 | -------------------------------------------------------------------------------- /examples/PyMPDATA_examples/Arabas_and_Farhat_2020/colors.py: -------------------------------------------------------------------------------- 1 | colors = ("purple", "teal", "turquoise") 2 | -------------------------------------------------------------------------------- /examples/PyMPDATA_examples/Arabas_and_Farhat_2020/options.py: -------------------------------------------------------------------------------- 1 | OPTIONS = { 2 | "n_iters": 2, 3 | "infinite_gauge": True, 4 | "nonoscillatory": True, 5 | "divergent_flow": True, 6 | "third_order_terms": True, 7 | "non_zero_mu_coeff": True, 8 | } 9 | -------------------------------------------------------------------------------- /examples/PyMPDATA_examples/Arabas_and_Farhat_2020/setup1_european_corridor.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import PyMPDATA_examples.utils.financial_formulae.Black_Scholes_1973 as BS73 3 | from pystrict import strict 4 | 5 | 6 | @strict 7 | class Settings: 8 | S0 = 55 9 | T = 0.5 10 | amer = False 11 | S_min = 10 12 | S_max = 2000 13 | sigma = 0.6 14 | r = 0.008 15 | K1 = 75 16 | K2 = 175 17 | S_match = 175 18 | 19 | def __init__(self, *, n_iters: int = 2, l2_opt: int = 2, C_opt: float = 0.034): 20 | self.n_iters = n_iters 21 | self.l2_opt = l2_opt 22 | self.C_opt = C_opt 23 | 24 | def payoff(self, S: np.ndarray): 25 | return np.exp(-self.r * self.T) * ( 26 | np.maximum(0, self.K2 - S) - np.maximum(0, self.K1 - S) 27 | ) 28 | 29 | def analytical_solution(self, S: np.ndarray): 30 | return BS73.p_euro( 31 | S, K=self.K2, T=self.T, r=self.r, b=self.r, sgma=self.sigma 32 | ) - BS73.p_euro(S, K=self.K1, T=self.T, r=self.r, b=self.r, sgma=self.sigma) 33 | -------------------------------------------------------------------------------- /examples/PyMPDATA_examples/Arabas_and_Farhat_2020/setup2_american_put.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import PyMPDATA_examples.utils.financial_formulae.Bjerksund_and_Stensland_1993 as BS93 3 | import PyMPDATA_examples.utils.financial_formulae.Black_Scholes_1973 as BS73 4 | from pystrict import strict 5 | 6 | 7 | @strict 8 | class Settings: 9 | amer = True 10 | l2_opt = 2.05 11 | S_min = 0.05 12 | S_max = 500 13 | K = 100 14 | r = 0.08 15 | sigma = 0.2 16 | n_iters = 2 17 | 18 | def __init__(self, *, T: float, C_opt: float, S0: float): 19 | self.T = T 20 | self.C_opt = C_opt 21 | self.S0 = S0 22 | self.S_match = S0 23 | 24 | def payoff(self, S: np.ndarray): 25 | return np.exp(-self.r * self.T) * (np.maximum(0, self.K - S)) 26 | 27 | def analytical_solution(self, S: [np.ndarray, float], amer=True): 28 | if not amer: 29 | return BS73.p_euro( 30 | S, K=self.K, T=self.T, r=self.r, b=self.r, sgma=self.sigma 31 | ) 32 | return BS93.p_amer(S, K=self.K, T=self.T, r=self.r, b=self.r, sgma=self.sigma) 33 | -------------------------------------------------------------------------------- /examples/PyMPDATA_examples/Arabas_and_Farhat_2020/simulation.py: -------------------------------------------------------------------------------- 1 | import numba 2 | import numpy as np 3 | from PyMPDATA_examples.Arabas_and_Farhat_2020.options import OPTIONS 4 | 5 | from PyMPDATA import Options, ScalarField, Solver, Stepper, VectorField 6 | from PyMPDATA.boundary_conditions import Extrapolated 7 | 8 | 9 | class Simulation: 10 | @staticmethod 11 | def _factory( 12 | *, options: Options, advectee: np.ndarray, advector: float, boundary_conditions 13 | ): 14 | stepper = Stepper( 15 | options=options, n_dims=len(advectee.shape), non_unit_g_factor=False 16 | ) 17 | return Solver( 18 | stepper=stepper, 19 | advectee=ScalarField( 20 | advectee.astype(dtype=options.dtype), 21 | halo=options.n_halo, 22 | boundary_conditions=boundary_conditions, 23 | ), 24 | advector=VectorField( 25 | (np.full(advectee.shape[0] + 1, advector, dtype=options.dtype),), 26 | halo=options.n_halo, 27 | boundary_conditions=boundary_conditions, 28 | ), 29 | ) 30 | 31 | def __init__(self, settings): 32 | self.settings = settings 33 | 34 | sigma2 = pow(settings.sigma, 2) 35 | dx_opt = abs( 36 | settings.C_opt / (0.5 * sigma2 - settings.r) * settings.l2_opt * sigma2 37 | ) 38 | dt_opt = pow(dx_opt, 2) / sigma2 / settings.l2_opt 39 | 40 | # adjusting dt so that nt is integer 41 | self.dt = settings.T 42 | self.nt = 0 43 | while self.dt > dt_opt: 44 | self.nt += 1 45 | self.dt = settings.T / self.nt 46 | 47 | # adjusting dx to match requested l^2 48 | dx = np.sqrt(settings.l2_opt * self.dt) * settings.sigma 49 | 50 | # calculating actual u number and lambda 51 | self.C = -(0.5 * sigma2 - settings.r) * (-self.dt) / dx 52 | self.l2 = dx * dx / sigma2 / self.dt 53 | 54 | # adjusting nx and setting S_beg, S_end 55 | S_beg = settings.S_match 56 | self.nx = 1 57 | while S_beg > settings.S_min: 58 | self.nx += 1 59 | S_beg = np.exp(np.log(settings.S_match) - self.nx * dx) 60 | 61 | self.ix_match = self.nx 62 | 63 | S_end = settings.S_match 64 | while S_end < settings.S_max: 65 | self.nx += 1 66 | S_end = np.exp(np.log(S_beg) + (self.nx - 1) * dx) 67 | 68 | # asset price 69 | self.S = np.exp(np.log(S_beg) + np.arange(self.nx) * dx) 70 | 71 | self.mu_coeff = (0.5 / self.l2,) 72 | self.solvers = {} 73 | self.solvers[1] = self._factory( 74 | advectee=settings.payoff(self.S), 75 | advector=self.C, 76 | options=Options(n_iters=1, non_zero_mu_coeff=True), 77 | boundary_conditions=(Extrapolated(),), 78 | ) 79 | self.solvers[2] = self._factory( 80 | advectee=settings.payoff(self.S), 81 | advector=self.C, 82 | options=Options(**OPTIONS), 83 | boundary_conditions=(Extrapolated(),), 84 | ) 85 | 86 | def run(self, n_iters: int): 87 | if self.settings.amer: 88 | psi = self.solvers[n_iters].advectee.data 89 | f_T = np.empty_like(psi) 90 | f_T[:] = psi[:] / np.exp(-self.settings.r * self.settings.T) 91 | T = self.settings.T 92 | r = self.settings.r 93 | dt = self.dt 94 | 95 | @numba.experimental.jitclass([]) 96 | class PostStep: 97 | # pylint: disable=too-few-public-methods 98 | def __init__(self): 99 | pass 100 | 101 | def call(self, psi, step): 102 | t = T - (step + 1) * dt 103 | psi += np.maximum(psi, f_T / np.exp(r * t)) - psi 104 | 105 | self.solvers[n_iters].advance(self.nt, self.mu_coeff, PostStep()) 106 | else: 107 | self.solvers[n_iters].advance(self.nt, self.mu_coeff) 108 | 109 | return self.solvers[n_iters].advectee.get() 110 | 111 | def terminal_value(self): 112 | return self.solvers[1].advectee.get() 113 | -------------------------------------------------------------------------------- /examples/PyMPDATA_examples/Bartman_et_al_2022/__init__.py: -------------------------------------------------------------------------------- 1 | """ 2 | This example is based on the paper: 3 | [Bartman et al. 2022](https://doi.org/10.21105/joss.03896). 4 | 5 | fig_X.ipynb: 6 | .. include:: ./fig_X.ipynb.badges.md 7 | """ 8 | -------------------------------------------------------------------------------- /examples/PyMPDATA_examples/DPDC/__init__.py: -------------------------------------------------------------------------------- 1 | """ 2 | This example demonstrates the use of the Double-Pass Donor-Cell option in `PyMPDATA.options`. 3 | 4 | demo.ipynb: 5 | .. include:: ./demo.ipynb.badges.md 6 | """ 7 | -------------------------------------------------------------------------------- /examples/PyMPDATA_examples/Jarecka_et_al_2015/__init__.py: -------------------------------------------------------------------------------- 1 | """ 2 | This module showcases the PyMPDATA implementation of an MPDATA-based shallow-water equations 3 | solver discussed and bencharked against analytical solutions in 4 | [Jarecka_et_al_2015](https://doi.org/10.1016/j.jcp.2015.02.003). 5 | 6 | fig_6.ipynb: 7 | .. include:: ./fig_6.ipynb.badges.md 8 | """ 9 | 10 | from .plot_output import plot_output 11 | from .settings import Settings 12 | from .simulation import Simulation 13 | -------------------------------------------------------------------------------- /examples/PyMPDATA_examples/Jarecka_et_al_2015/fig_6.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": {}, 6 | "source": [ 7 | "[![preview notebook](https://img.shields.io/static/v1?label=render%20on&logo=github&color=87ce3e&message=GitHub)](https://github.com/open-atmos/PyMPDATA/blob/main/examples/PyMPDATA_examples/Jarecka_et_al_2015/fig_6.ipynb)\n", 8 | "[![launch on mybinder.org](https://mybinder.org/badge_logo.svg)](https://mybinder.org/v2/gh/open-atmos/PyMPDATA.git/main?urlpath=lab/tree/examples/PyMPDATA_examples/Jarecka_et_al_2015/fig_6.ipynb)\n", 9 | "[![launch on Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/open-atmos/PyMPDATA/blob/main/examples/PyMPDATA_examples/Jarecka_et_al_2015/fig_6.ipynb)" 10 | ] 11 | }, 12 | { 13 | "cell_type": "markdown", 14 | "metadata": {}, 15 | "source": [ 16 | "based on Fig. 6 from Jarecka, Jaruga & Smolarkiewicz 2015 (J. Comp. Phys. 289) \"A spreading drop of shallow water\" \n", 17 | "https://doi.org/10.1016/j.jcp.2015.02.003\n", 18 | "\n", 19 | "notes (what is not yet done as in the paper):\n", 20 | "- diverget-flow option is not used yet\n", 21 | "- no extrapolation in time for the velocity field yet" 22 | ] 23 | }, 24 | { 25 | "cell_type": "code", 26 | "execution_count": null, 27 | "metadata": {}, 28 | "outputs": [], 29 | "source": [ 30 | "import sys\n", 31 | "if 'google.colab' in sys.modules:\n", 32 | " !pip --quiet install open-atmos-jupyter-utils\n", 33 | " from open_atmos_jupyter_utils import pip_install_on_colab\n", 34 | " pip_install_on_colab('PyMPDATA-examples')" 35 | ] 36 | }, 37 | { 38 | "cell_type": "code", 39 | "execution_count": null, 40 | "metadata": { 41 | "pycharm": { 42 | "name": "#%%\n" 43 | } 44 | }, 45 | "outputs": [], 46 | "source": [ 47 | "from open_atmos_jupyter_utils import show_plot\n", 48 | "from PyMPDATA_examples.Jarecka_et_al_2015 import Settings, Simulation, plot_output" 49 | ] 50 | }, 51 | { 52 | "cell_type": "code", 53 | "execution_count": null, 54 | "metadata": {}, 55 | "outputs": [], 56 | "source": [ 57 | "settings = Settings()" 58 | ] 59 | }, 60 | { 61 | "cell_type": "code", 62 | "execution_count": null, 63 | "metadata": {}, 64 | "outputs": [], 65 | "source": [ 66 | "simulation = Simulation(settings)\n", 67 | "output = simulation.run()" 68 | ] 69 | }, 70 | { 71 | "cell_type": "code", 72 | "execution_count": null, 73 | "metadata": {}, 74 | "outputs": [], 75 | "source": [ 76 | "times = (1, 3, 7)" 77 | ] 78 | }, 79 | { 80 | "cell_type": "code", 81 | "execution_count": null, 82 | "metadata": { 83 | "pycharm": { 84 | "name": "#%%" 85 | } 86 | }, 87 | "outputs": [], 88 | "source": [ 89 | "plot_output(times, output, settings)\n", 90 | "show_plot(\"fig_6.pdf\")" 91 | ] 92 | }, 93 | { 94 | "cell_type": "code", 95 | "execution_count": null, 96 | "metadata": {}, 97 | "outputs": [], 98 | "source": [] 99 | }, 100 | { 101 | "cell_type": "code", 102 | "execution_count": null, 103 | "metadata": {}, 104 | "outputs": [], 105 | "source": [] 106 | } 107 | ], 108 | "metadata": { 109 | "kernelspec": { 110 | "display_name": "Python 3 (ipykernel)", 111 | "language": "python", 112 | "name": "python3" 113 | }, 114 | "language_info": { 115 | "codemirror_mode": { 116 | "name": "ipython", 117 | "version": 3 118 | }, 119 | "file_extension": ".py", 120 | "mimetype": "text/x-python", 121 | "name": "python", 122 | "nbconvert_exporter": "python", 123 | "pygments_lexer": "ipython3", 124 | "version": "3.9.2" 125 | } 126 | }, 127 | "nbformat": 4, 128 | "nbformat_minor": 4 129 | } 130 | -------------------------------------------------------------------------------- /examples/PyMPDATA_examples/Jarecka_et_al_2015/formulae.py: -------------------------------------------------------------------------------- 1 | # adapted from: 2 | # https://github.com/igfuw/shallow-water-elliptic-drop/blob/master/analytical/analytic_equations.py 3 | # Code used in the paper of Jarecka, Jaruga, Smolarkiewicz - 4 | # "A Spreading Drop of Shallow Water" (JCP 289, doi:10.1016/j.jcp.2015.02.003). 5 | 6 | import numba 7 | import numpy as np 8 | from scipy.integrate import odeint 9 | 10 | 11 | def amplitude(x, y, lx, ly): 12 | A = 1 / lx / ly 13 | h = A * (1 - (x / lx) ** 2 - (y / ly) ** 2) 14 | return np.where(h > 0, h, 0) 15 | 16 | 17 | @numba.njit() 18 | def deriv(y, _): 19 | """ 20 | return derivatives of [lambda_x, dlambda_x/dt, lambda_y, dlambda_y/dt 21 | four first-order ODEs based on eq. 7 (Jarecka, Jaruga, Smolarkiewicz) 22 | """ 23 | return np.array((y[1], 2.0 / y[0] ** 2 / y[2], y[3], 2.0 / y[0] / y[2] ** 2)) 24 | 25 | 26 | def d2_el_lamb_lamb_t_evol(times, lamb_x0, lamb_y0): 27 | """ 28 | solving coupled nonlinear second-order ODEs - eq. 7 (Jarecka, Jaruga, Smolarkiewicz) 29 | returning array with first dim denoting time, second dim: 30 | [lambda_x, dot{lambda_x}, lambda_y, dot{lambda_y} 31 | """ 32 | assert times[0] == 0 33 | yinit = np.array([lamb_x0, 0.0, lamb_y0, 0.0]) # initial values (dot_lamb = 0.) 34 | result, info = odeint(deriv, yinit, times, full_output=True) 35 | assert info["message"] == "Integration successful." 36 | return result 37 | -------------------------------------------------------------------------------- /examples/PyMPDATA_examples/Jarecka_et_al_2015/settings.py: -------------------------------------------------------------------------------- 1 | from pystrict import strict 2 | 3 | from PyMPDATA import Options 4 | 5 | 6 | @strict 7 | class Settings: 8 | def __init__(self): 9 | self.dt = 0.01 10 | self.dx = 0.05 11 | self.dy = 0.05 12 | self.nx = 401 13 | self.ny = 401 14 | self.eps = 1e-7 15 | self.lx0 = 2 16 | self.ly0 = 1 17 | self.options = Options(nonoscillatory=True, infinite_gauge=True) 18 | 19 | @property 20 | def nt(self): 21 | return int(7 / self.dt) 22 | 23 | @property 24 | def outfreq(self): 25 | return int(1 / self.dt) 26 | -------------------------------------------------------------------------------- /examples/PyMPDATA_examples/Jarecka_et_al_2015/simulation.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | from PyMPDATA_examples.Jarecka_et_al_2015 import formulae 3 | 4 | from PyMPDATA import ScalarField, Solver, Stepper, VectorField 5 | from PyMPDATA.boundary_conditions import Constant 6 | 7 | 8 | class Simulation: 9 | def __init__(self, settings): 10 | self.settings = settings 11 | s = settings 12 | 13 | halo = settings.options.n_halo 14 | grid = (s.nx, s.ny) 15 | bcs = [Constant(value=0)] * len(grid) 16 | 17 | self.advector = VectorField( 18 | (np.zeros((s.nx + 1, s.ny)), np.zeros((s.nx, s.ny + 1))), halo, bcs 19 | ) 20 | 21 | xi, yi = np.indices(grid, dtype=float) 22 | xi -= (s.nx - 1) / 2 23 | yi -= (s.ny - 1) / 2 24 | x = xi * s.dx 25 | y = yi * s.dy 26 | h0 = formulae.amplitude(x, y, s.lx0, s.ly0) 27 | 28 | advectees = { 29 | "h": ScalarField(h0, halo, bcs), 30 | "uh": ScalarField(np.zeros(grid), halo, bcs), 31 | "vh": ScalarField(np.zeros(grid), halo, bcs), 32 | } 33 | 34 | stepper = Stepper(options=s.options, grid=grid) 35 | self.solvers = { 36 | k: Solver(stepper, v, self.advector) for k, v in advectees.items() 37 | } 38 | 39 | @staticmethod 40 | def interpolate(psi, axis): 41 | idx = ( 42 | (slice(None, -1), slice(None, None)), 43 | (slice(None, None), slice(None, -1)), 44 | ) 45 | return np.diff(psi, axis=axis) / 2 + psi[idx[axis]] 46 | 47 | def run(self): 48 | s = self.settings 49 | grid_step = (s.dx, s.dy) 50 | idx = ((slice(1, -1), slice(None, None)), (slice(None, None), slice(1, -1))) 51 | output = [] 52 | for it in range(s.nt + 1): 53 | if it != 0: 54 | h = self.solvers["h"].advectee.get() 55 | for xy, k in enumerate(("uh", "vh")): 56 | mask = h > s.eps 57 | vel = np.where(mask, np.nan, 0) 58 | np.divide(self.solvers[k].advectee.get(), h, where=mask, out=vel) 59 | self.advector.get_component(xy)[idx[xy]] = ( 60 | self.interpolate(vel, axis=xy) * s.dt / grid_step[xy] 61 | ) 62 | self.solvers["h"].advance(1) 63 | assert h.ctypes.data == self.solvers["h"].advectee.get().ctypes.data 64 | for xy, k in enumerate(("uh", "vh")): 65 | psi = self.solvers[k].advectee.get() 66 | psi[:] -= s.dt / 2 * h * np.gradient(h, grid_step[xy], axis=xy) 67 | self.solvers[k].advance(1) 68 | psi[:] -= s.dt / 2 * h * np.gradient(h, grid_step[xy], axis=xy) 69 | if it % s.outfreq == 0: 70 | output.append( 71 | { 72 | k: self.solvers[k].advectee.get().copy() 73 | for k in self.solvers.keys() 74 | } 75 | ) 76 | return output 77 | -------------------------------------------------------------------------------- /examples/PyMPDATA_examples/Jaruga_et_al_2015/__init__.py: -------------------------------------------------------------------------------- 1 | """ 2 | This module showcases a PyMPDATA implementation of an MPDATA-based Boussinesq system solver 3 | with Poisson equation for the pressure-term solved using a bespoke (libmpdata++-based) 4 | implementation of the generalised conjugate-residual scheme. Simulation setup based on 5 | Fig 19 in [Jaruga_et_al_2015](https://doi.org/10.5194/gmd-8-1005-2015) (based on [Smolarkiewicz 6 | & Pudykiewicz 1992](https://doi.org/10.1175/1520-0469(1992)049%3C2082:ACOSLA%3E2.0.CO;2)). 7 | 8 | fig19.ipynb: 9 | .. include:: ./fig19.ipynb.badges.md 10 | """ 11 | -------------------------------------------------------------------------------- /examples/PyMPDATA_examples/Magnuszewski_et_al_2025/__init__.py: -------------------------------------------------------------------------------- 1 | """ 2 | Path-dependent option pricing with two-dimensional PDE using MPDATA 3 | ([Magnuszewski and Arabas 2025](https://doi.org/10.48550/arXiv.2505.24435)) 4 | 5 | figs.ipynb: 6 | .. include:: ./figs.ipynb.badges.md 7 | 8 | table_1.ipynb: 9 | .. include:: ./table_1.ipynb.badges.md 10 | """ 11 | -------------------------------------------------------------------------------- /examples/PyMPDATA_examples/Magnuszewski_et_al_2025/barraquand_data.py: -------------------------------------------------------------------------------- 1 | # table 6 from [Barraquand and Pudet 1994](https://doi.org/10.1111/j.1467-9965.1996.tb00111.x) 2 | headers = ["sigma", "T", "K", "call_price", "put_price"] 3 | 4 | # 0.1,0.25,95,6.132,0.013 5 | # 0.1,0.25,100,1.869,0.626 6 | # 0.1,0.25,105,0.151,3.785 7 | # 0.1,0.5,95,7.248,0.046 8 | # 0.1,0.5,100,3.1,0.655 9 | # 0.1,0.5,105,0.727,3.039 10 | # 0.1,1,95,9.313,0.084 11 | # 0.1,1,100,5.279,0.577 12 | # 0.1,1,105,2.313,2.137 13 | # 0.2,0.25,95,6.5,0.379 14 | # 0.2,0.25,100,2.96,1.716 15 | # 0.2,0.25,105,0.966,4.598 16 | # 0.4,0.25,95,8.151,2.025 17 | # 0.4,0.25,100,5.218,3.970 18 | # 0.4,0.25,105,3.106,6.735 19 | # 0.2,0.5,95,7.793,0.731 20 | # 0.2,1,95,10.336,1.099 21 | # 0.4,1,95,13.825,4.550 22 | # 0.4,0.5,95,10.425,3.215 23 | 24 | table = """ 25 | 0.2,0.5,100,4.548,2.102 26 | 0.2,0.5,105,2.241,4.552 27 | 0.2,1,100,7.079,2.369 28 | 0.2,1,105,4.539,4.356 29 | 0.4,0.5,100,7.650,5.197 30 | 0.4,0.5,105,5.444,7.748 31 | 0.4,1,100,11.213,6.465 32 | 0.4,1,105,8.989,8.767 33 | """ 34 | -------------------------------------------------------------------------------- /examples/PyMPDATA_examples/Magnuszewski_et_al_2025/common.py: -------------------------------------------------------------------------------- 1 | from PyMPDATA import Options 2 | 3 | OPTIONS = { 4 | "UPWIND": Options( 5 | n_iters=1, 6 | non_zero_mu_coeff=True, 7 | ), 8 | "MPDATA (2 it.)": Options( 9 | n_iters=2, 10 | nonoscillatory=True, 11 | non_zero_mu_coeff=True, 12 | ), 13 | "MPDATA (4 it.)": Options( 14 | n_iters=4, 15 | nonoscillatory=True, 16 | non_zero_mu_coeff=True, 17 | ), 18 | } 19 | -------------------------------------------------------------------------------- /examples/PyMPDATA_examples/Magnuszewski_et_al_2025/monte_carlo.py: -------------------------------------------------------------------------------- 1 | """ 2 | This code is a Python numba-fied implementation of the Monte Carlo method 3 | for pricing Asian options taken from 4 | [Numerical Methods in Finance with C++](https://doi.org/10.1017/CBO9781139017404) 5 | """ 6 | 7 | from functools import cached_property, lru_cache, partial 8 | from typing import Callable 9 | 10 | import numba 11 | import numpy as np 12 | 13 | jit = partial(numba.jit, fastmath=True, error_model="numpy", cache=True, nopython=True) 14 | 15 | # pylint: disable=too-few-public-methods 16 | 17 | 18 | class BSModel: 19 | def __init__(self, S0, r, sigma, T, M, seed): 20 | self.S0 = S0 21 | self.r = r 22 | self.sigma = sigma 23 | self.sigma2 = sigma * sigma 24 | self.b = r - 0.5 * self.sigma2 25 | self.T = T 26 | self.M = M 27 | self.t = np.linspace(0, T, M) 28 | self.bt = self.b * self.t 29 | self.sqrt_tm = np.sqrt(T / M) 30 | self.seed = seed 31 | 32 | @cached_property 33 | def generate_path(self): 34 | M = self.M 35 | S0 = self.S0 36 | bt = self.bt 37 | sigma = self.sigma 38 | sqrt_tm = self.sqrt_tm 39 | seed = self.seed 40 | 41 | @jit 42 | def numba_seed(): 43 | np.random.seed(seed) 44 | 45 | if seed is not None: 46 | numba_seed() 47 | 48 | @jit 49 | def body(path): 50 | path[:] = S0 * np.exp( 51 | bt + sigma * np.cumsum(np.random.standard_normal(M)) * sqrt_tm 52 | ) 53 | 54 | return body 55 | 56 | 57 | class PathDependentOption: 58 | def __init__(self, T, model, N): 59 | self.T = T 60 | self.model = model 61 | self.N = N 62 | self.payoff: Callable[[np.ndarray], float] = lambda path: 0.0 63 | 64 | @cached_property 65 | def price_by_mc(self): 66 | T = self.T 67 | model_generate_path = self.model.generate_path 68 | model_r = self.model.r 69 | payoff = self.payoff 70 | M = self.model.M 71 | N = self.N 72 | 73 | @jit 74 | def body(): 75 | sum_ct = 0.0 76 | path = np.empty(M) 77 | for _ in range(N): 78 | model_generate_path(path) 79 | sum_ct += payoff(path) 80 | return np.exp(-model_r * T) * (sum_ct / N) 81 | 82 | return body 83 | 84 | 85 | @lru_cache 86 | def make_payoff(K: float, option_type: str, average_type: str = "arithmetic"): 87 | assert average_type in ["arithmetic", "geometric"] 88 | if average_type != "arithmetic": 89 | raise NotImplementedError("Only arithmetic average is supported") 90 | if option_type == "call": 91 | 92 | @jit 93 | def payoff(path): 94 | return max(np.mean(path) - K, 0) 95 | 96 | elif option_type == "put": 97 | 98 | @jit 99 | def payoff(path): 100 | return max(K - np.mean(path), 0) 101 | 102 | else: 103 | raise ValueError("Invalid option") 104 | return payoff 105 | 106 | 107 | class FixedStrikeArithmeticAsianOption(PathDependentOption): 108 | def __init__(self, T, K, variant, model, N): 109 | super().__init__(T, model, N) 110 | self.K = K 111 | self.payoff = make_payoff(K, variant) 112 | 113 | 114 | class FixedStrikeGeometricAsianOption(PathDependentOption): 115 | def __init__(self, T, K, variant, model, N): 116 | super().__init__(T, model, N) 117 | self.K = K 118 | 119 | if variant == "call": 120 | self.payoff = lambda path: max(np.exp(np.mean(np.log(path))) - K, 0) 121 | elif variant == "put": 122 | self.payoff = lambda path: max(K - np.exp(np.mean(np.log(path))), 0) 123 | else: 124 | raise ValueError("Invalid option type") 125 | -------------------------------------------------------------------------------- /examples/PyMPDATA_examples/Molenkamp_test_as_in_Jaruga_et_al_2015_Fig_12/__init__.py: -------------------------------------------------------------------------------- 1 | """ 2 | This is an example based on the solid-body rotation test from 3 | [Molenkap 1968](https://doi.org/10.1175/1520-0450%281968%29007%3C0160:AOFDMA%3E2.0.CO;2) 4 | (as in Fig. 12 in [Jaruga et al. 2015](https://doi.org/10.5194/gmd-8-1005-2015)). 5 | 6 | demo.ipynb: 7 | .. include:: ./demo.ipynb.badges.md 8 | """ 9 | -------------------------------------------------------------------------------- /examples/PyMPDATA_examples/Molenkamp_test_as_in_Jaruga_et_al_2015_Fig_12/analysis.py: -------------------------------------------------------------------------------- 1 | from joblib import Parallel, delayed 2 | from PyMPDATA_examples.Molenkamp_test_as_in_Jaruga_et_al_2015_Fig_12.settings import ( 3 | Settings, 4 | ) 5 | from PyMPDATA_examples.Molenkamp_test_as_in_Jaruga_et_al_2015_Fig_12.simulation import ( 6 | Simulation, 7 | ) 8 | 9 | from PyMPDATA import Options 10 | 11 | options = { 12 | "upwind": Options(n_iters=1), 13 | "2+fct": Options(n_iters=2, nonoscillatory=True), 14 | "3+fct+tot": Options(n_iters=3, nonoscillatory=True, third_order_terms=True), 15 | "2+fct+iga": Options(n_iters=2, nonoscillatory=True, infinite_gauge=True), 16 | } 17 | 18 | 19 | def compute_panel(panel): 20 | settings = Settings(n_rotations=6) 21 | simulation = Simulation(settings, options[panel]) 22 | if panel == "upwind": 23 | return simulation.state 24 | simulation.run() 25 | return simulation.state 26 | 27 | 28 | def fig_12_data(): 29 | data = Parallel(n_jobs=-2)( 30 | delayed(compute_panel)(panel) 31 | for panel in ["upwind", "2+fct", "3+fct+tot", "2+fct+iga"] 32 | ) 33 | return data 34 | -------------------------------------------------------------------------------- /examples/PyMPDATA_examples/Molenkamp_test_as_in_Jaruga_et_al_2015_Fig_12/demo.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": {}, 6 | "source": [ 7 | "[![preview notebook](https://img.shields.io/static/v1?label=render%20on&logo=github&color=87ce3e&message=GitHub)](https://github.com/open-atmos/PyMPDATA/blob/main/examples/PyMPDATA_examples/Molenkamp_test_as_in_Jaruga_et_al_2015_Fig_12/demo.ipynb)\n", 8 | "[![launch on mybinder.org](https://mybinder.org/badge_logo.svg)](https://mybinder.org/v2/gh/open-atmos/PyMPDATA.git/main?urlpath=lab/tree/examples/PyMPDATA_examples/Molenkamp_test_as_in_Jaruga_et_al_2015_Fig_12/demo.ipynb)\n", 9 | "[![launch on Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/open-atmos/PyMPDATA/blob/main/examples/PyMPDATA_examples/Molenkamp_test_as_in_Jaruga_et_al_2015_Fig_12/demo.ipynb)" 10 | ] 11 | }, 12 | { 13 | "cell_type": "markdown", 14 | "metadata": {}, 15 | "source": [ 16 | "references:\n", 17 | "- Molenkamp 1968: https://doi.org/10.1175/1520-0450%281968%29007%3C0160:AOFDMA%3E2.0.CO;2\n", 18 | "- Anderson & Fattahi 1974: https://doi.org/10.1175/1520-0469%281974%29031%3C1500:ACONSO%3E2.0.CO;2\n", 19 | "- Smolarkiewicz & Margolin 1998: https://doi.org/10.1006/jcph.1998.5901\n", 20 | "- Jaruga et al. 2015 https://doi.org/10.5194/gmd-8-1005-2015" 21 | ] 22 | }, 23 | { 24 | "cell_type": "code", 25 | "execution_count": null, 26 | "metadata": {}, 27 | "outputs": [], 28 | "source": [ 29 | "import sys\n", 30 | "if 'google.colab' in sys.modules:\n", 31 | " !pip --quiet install open-atmos-jupyter-utils\n", 32 | " from open_atmos_jupyter_utils import pip_install_on_colab\n", 33 | " pip_install_on_colab('PyMPDATA-examples')" 34 | ] 35 | }, 36 | { 37 | "cell_type": "code", 38 | "execution_count": null, 39 | "metadata": {}, 40 | "outputs": [], 41 | "source": [ 42 | "import numpy as np\n", 43 | "import matplotlib.pyplot as plt\n", 44 | "from open_atmos_jupyter_utils import show_plot\n", 45 | "from PyMPDATA_examples.Molenkamp_test_as_in_Jaruga_et_al_2015_Fig_12.analysis import fig_12_data\n", 46 | "from PyMPDATA_examples.Molenkamp_test_as_in_Jaruga_et_al_2015_Fig_12.settings import h0, h" 47 | ] 48 | }, 49 | { 50 | "cell_type": "code", 51 | "execution_count": null, 52 | "metadata": {}, 53 | "outputs": [], 54 | "source": [ 55 | "data = fig_12_data()" 56 | ] 57 | }, 58 | { 59 | "cell_type": "code", 60 | "execution_count": null, 61 | "metadata": {}, 62 | "outputs": [], 63 | "source": [ 64 | "fig, axs = plt.subplots(1,4,figsize=(15,4))\n", 65 | "\n", 66 | "def plot(data, ax):\n", 67 | " a1 = ax.imshow(data.T, origin='lower', cmap=plt.cm.Reds)\n", 68 | " cset = ax.contour(data.T, cmap=plt.cm.Set2)\n", 69 | " ax.clabel(cset, inline=True, fmt='%1.1f', fontsize=10)\n", 70 | " fig.colorbar(a1, ax=ax)\n", 71 | " a1.set_clim(vmin=h0-1, vmax=h0+h+1)\n", 72 | " ax.set_xlim(25,75)\n", 73 | " ax.set_ylim(50,100)\n", 74 | " ax.set_title(f\"min:{np.amin(data):.4g} max:{np.amax(data):.4g}\")\n", 75 | "\n", 76 | "for i in range(4):\n", 77 | " plot(data[i], axs[i])\n", 78 | "\n", 79 | "show_plot()" 80 | ] 81 | }, 82 | { 83 | "cell_type": "code", 84 | "execution_count": null, 85 | "metadata": {}, 86 | "outputs": [], 87 | "source": [] 88 | } 89 | ], 90 | "metadata": { 91 | "kernelspec": { 92 | "display_name": "Python 3 (ipykernel)", 93 | "language": "python", 94 | "name": "python3" 95 | }, 96 | "language_info": { 97 | "codemirror_mode": { 98 | "name": "ipython", 99 | "version": 3 100 | }, 101 | "file_extension": ".py", 102 | "mimetype": "text/x-python", 103 | "name": "python", 104 | "nbconvert_exporter": "python", 105 | "pygments_lexer": "ipython3", 106 | "version": "3.9.2" 107 | } 108 | }, 109 | "nbformat": 4, 110 | "nbformat_minor": 4 111 | } 112 | -------------------------------------------------------------------------------- /examples/PyMPDATA_examples/Molenkamp_test_as_in_Jaruga_et_al_2015_Fig_12/settings.py: -------------------------------------------------------------------------------- 1 | import numba 2 | import numpy as np 3 | from pystrict import strict 4 | 5 | grid = (100, 100) 6 | 7 | dt = 0.1 8 | dx = 1 9 | dy = 1 10 | omega = 0.1 11 | h = 4.0 12 | h0 = 1 13 | 14 | r = 15.0 * dx 15 | x0 = 50 * dx 16 | y0 = 75 * dy 17 | xc = 0.5 * grid[0] * dx 18 | yc = 0.5 * grid[1] * dy 19 | 20 | 21 | @strict 22 | class Settings: 23 | def __init__(self, n_rotations: int = 6): 24 | self.n_rotations = n_rotations 25 | 26 | @property 27 | def dt(self): 28 | return dt 29 | 30 | @property 31 | def nt(self): 32 | return int(628 * self.n_rotations) 33 | 34 | @property 35 | def size(self): 36 | return self.xrange[1], self.yrange[1] 37 | 38 | @property 39 | def xrange(self): 40 | return 0, grid[0] * dx 41 | 42 | @property 43 | def yrange(self): 44 | return 0, grid[1] * dy 45 | 46 | @property 47 | def grid(self): 48 | return grid 49 | 50 | @staticmethod 51 | @numba.njit() 52 | def pdf(x, y): 53 | tmp = (x - x0) ** 2 + (y - y0) ** 2 54 | return h0 + np.where( 55 | # if 56 | tmp - r**2 <= 0, 57 | # then 58 | h - np.sqrt(tmp / (r / h) ** 2), 59 | # else 60 | 0.0, 61 | ) 62 | 63 | @staticmethod 64 | def stream_function(xX, yY): 65 | x = xX * grid[0] * dx 66 | y = yY * grid[1] * dy 67 | return 1 / 2 * omega * ((x - xc) ** 2 + (y - yc) ** 2) 68 | -------------------------------------------------------------------------------- /examples/PyMPDATA_examples/Molenkamp_test_as_in_Jaruga_et_al_2015_Fig_12/simulation.py: -------------------------------------------------------------------------------- 1 | from PyMPDATA_examples.Molenkamp_test_as_in_Jaruga_et_al_2015_Fig_12.settings import ( 2 | Settings, 3 | ) 4 | from PyMPDATA_examples.utils import nondivergent_vector_field_2d 5 | from PyMPDATA_examples.utils.discretisation import from_pdf_2d 6 | 7 | from PyMPDATA import Options, ScalarField, Solver, Stepper 8 | from PyMPDATA.boundary_conditions import Periodic 9 | 10 | 11 | class Simulation: 12 | def __init__(self, settings: Settings, options: Options): 13 | _, __, z = from_pdf_2d( 14 | settings.pdf, 15 | xrange=settings.xrange, 16 | yrange=settings.yrange, 17 | gridsize=settings.grid, 18 | ) 19 | stepper = Stepper(options=options, grid=settings.grid, non_unit_g_factor=False) 20 | advector = nondivergent_vector_field_2d( 21 | settings.grid, 22 | settings.size, 23 | settings.dt, 24 | settings.stream_function, 25 | options.n_halo, 26 | ) 27 | advectee = ScalarField( 28 | z.astype(dtype=options.dtype), 29 | halo=options.n_halo, 30 | boundary_conditions=(Periodic(), Periodic()), 31 | ) 32 | self.mpdata = Solver(stepper=stepper, advectee=advectee, advector=advector) 33 | self.nt = settings.nt 34 | 35 | @property 36 | def state(self): 37 | return self.mpdata.advectee.get().copy() 38 | 39 | def run(self): 40 | self.mpdata.advance(self.nt) 41 | -------------------------------------------------------------------------------- /examples/PyMPDATA_examples/Olesik_et_al_2022/East_and_Marshall_1954.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | 4 | # pylint: disable=too-few-public-methods 5 | class SizeDistribution: 6 | def __init__(self, si): 7 | self.si = si 8 | self.n0 = ( 9 | 465 / si.centimetre**3 10 | ) # adjusted wrt original 700 to match mixing ratio of 1g/kg 11 | self.kappa = 22 12 | 13 | def pdf(self, r): 14 | return ( 15 | (self.n0 * self.si.micrometre) 16 | / r 17 | * np.exp(-self.kappa * (np.log10(r / (7 * self.si.micrometre)) ** 2)) 18 | * (1 / self.si.micrometre) 19 | ) 20 | -------------------------------------------------------------------------------- /examples/PyMPDATA_examples/Olesik_et_al_2022/README.md: -------------------------------------------------------------------------------- 1 | # Olesik et al 2022 2 | Following examples are used in article named "On numerical broadening of particle size spectra:\\ 3 | a condensational growth study using PyMPDATA" (https://doi.org/10.5194/gmd-15-3879-2022) 4 | 5 | ## Examples/Demos: 6 | - plots (Figures 1-9): 7 | [![Binder](https://mybinder.org/badge_logo.svg)](https://mybinder.org/v2/gh/open-atmos/PyMPDATA.git/main?filepath=examples/PyMPDATA_examples%2FOlesik_et_al_2022/demo_make_plots.ipynb) 8 | [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/open-atmos/PyMPDATA/blob/main/examples/PyMPDATA/Olesik_et_al_2022/demo_make_plots.ipynb) 9 | 10 | - relative diaspersion (Table 1): 11 | [![Binder](https://mybinder.org/badge_logo.svg)](https://mybinder.org/v2/gh/open-atmos/PyMPDATA.git/main?filepath=examples/PyMPDATA%2FOlesik_et_al_2022/demo_make_dispersion_ratio.ipynb) 12 | [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/open-atmos/PyMPDATA/blob/main/examples/PyMPDATA_examples/Olesik_et_al_2022/demo_make_dispersion_ratio.ipynb) 13 | 14 | - wall times (Table 2): 15 | [![Binder](https://mybinder.org/badge_logo.svg)](https://mybinder.org/v2/gh/open-atmos/PyMPDATA.git/main?filepath=examples/PyMPDATA_examples%2FOlesik_et_al_2022/demo_wall_times.ipynb) 16 | [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/open-atmos/PyMPDATA/blob/main/PyMPDATA_examples/Olesik_et_al_2022/demo_wall_times.ipynb) 17 | 18 | - appendix (Figures A1-A9): 19 | [![Binder](https://mybinder.org/badge_logo.svg)](https://mybinder.org/v2/gh/open-atmos/PyMPDATA.git/main?filepath=examples/PyMPDATA_examples%2FOlesik_et_al_2022/demo_make_convergences.ipynb) 20 | [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/open-atmos/PyMPDATA/blob/main/examples/PyMPDATA_examples/Olesik_et_al_2022/demo_make_convergences.ipynb) 21 | -------------------------------------------------------------------------------- /examples/PyMPDATA_examples/Olesik_et_al_2022/__init__.py: -------------------------------------------------------------------------------- 1 | """ 2 | This is a PyMPDATA implementation of 1D particle population condensational growth 3 | problem with coordinate transformations, as presented in 4 | [Olesik et al. 2022](https://doi.org/10.5194/gmd-15-3879-2022). 5 | 6 | demo_analytical_solution.ipynb: 7 | .. include:: ./demo_analytical_solution.ipynb.badges.md 8 | 9 | demo_make_convergences.ipynb: 10 | .. include:: ./demo_make_convergences.ipynb.badges.md 11 | 12 | demo_make_dispersion_ratio.ipynb: 13 | .. include:: ./demo_make_dispersion_ratio.ipynb.badges.md 14 | 15 | demo_make_plots.ipynb: 16 | .. include:: ./demo_make_plots.ipynb.badges.md 17 | 18 | demo_wall_times.ipynb: 19 | .. include:: ./demo_wall_times.ipynb.badges.md 20 | """ 21 | -------------------------------------------------------------------------------- /examples/PyMPDATA_examples/Olesik_et_al_2022/coordinates.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | 4 | class x_id: 5 | @staticmethod 6 | def x(r): 7 | return r 8 | 9 | @staticmethod 10 | def r(x): 11 | return x 12 | 13 | @staticmethod 14 | def dx_dr(r): 15 | return r**0 16 | 17 | @staticmethod 18 | def moment_of_r_integral(x, k): 19 | return 1 / (k + 1) * x ** (k + 1) 20 | 21 | 22 | class x_p2: 23 | @staticmethod 24 | def x(r): 25 | return r**2 26 | 27 | @staticmethod 28 | def r(x): 29 | return np.sqrt(np.where(x < 0, 1e10, x)) 30 | 31 | @staticmethod 32 | def dx_dr(r): 33 | return 2 * r 34 | 35 | @staticmethod 36 | def moment_of_r_integral(x, k): 37 | return 2 / (k + 2) * x ** ((k + 2) / 2) 38 | 39 | 40 | class x_p3: 41 | @staticmethod 42 | def x(r): 43 | return r**3 44 | 45 | @staticmethod 46 | def r(x): 47 | return np.power(x, 1 / 3) 48 | 49 | @staticmethod 50 | def dx_dr(r): 51 | return 3 * r**2 52 | 53 | @staticmethod 54 | def moment_of_r_integral(x, k): 55 | return 3 / (k + 3) * x ** ((k + 3) / 3) 56 | 57 | 58 | class x_log_of_pn: 59 | def __init__(self, r0=1, base=np.e, n=3): 60 | self.r0 = r0 61 | self.base = base 62 | self.n = n 63 | 64 | def x(self, r): 65 | return np.log(r**self.n / self.r0**self.n) / np.log(self.base) 66 | 67 | def r(self, x): 68 | return self.r0 * self.base ** (x / self.n) 69 | 70 | def dx_dr(self, r): 71 | return self.n / r / np.log(self.base) 72 | 73 | def moment_of_r_integral(self, x, k): 74 | return ( 75 | self.r0**k 76 | * self.n 77 | / (k * np.log(self.base)) 78 | * self.base ** (k / self.n * x) 79 | ) 80 | -------------------------------------------------------------------------------- /examples/PyMPDATA_examples/Olesik_et_al_2022/demo_wall_times.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": {}, 6 | "source": [ 7 | "[![preview notebook](https://img.shields.io/static/v1?label=render%20on&logo=github&color=87ce3e&message=GitHub)](https://github.com/open-atmos/PyMPDATA/blob/main/examples/PyMPDATA_examples/Olesik_et_al_2022/demo_wall_times.ipynb)\n", 8 | "[![launch on mybinder.org](https://mybinder.org/badge_logo.svg)](https://mybinder.org/v2/gh/open-atmos/PyMPDATA.git/main?urlpath=lab/tree/examples/PyMPDATA_examples/Olesik_et_al_2022/demo_wall_times.ipynb)\n", 9 | "[![launch on Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/open-atmos/PyMPDATA/blob/main/examples/PyMPDATA_examples/Olesik_et_al_2022/demo_wall_times.ipynb)" 10 | ] 11 | }, 12 | { 13 | "cell_type": "markdown", 14 | "metadata": {}, 15 | "source": [ 16 | "see [Olesik et al. 2022](https://doi.org/10.5194/gmd-15-3879-2022)" 17 | ] 18 | }, 19 | { 20 | "cell_type": "code", 21 | "execution_count": null, 22 | "metadata": {}, 23 | "outputs": [], 24 | "source": [ 25 | "import sys\n", 26 | "if 'google.colab' in sys.modules:\n", 27 | " !pip --quiet install open-atmos-jupyter-utils\n", 28 | " from open_atmos_jupyter_utils import pip_install_on_colab\n", 29 | " pip_install_on_colab('PyMPDATA-examples')" 30 | ] 31 | }, 32 | { 33 | "cell_type": "code", 34 | "execution_count": null, 35 | "metadata": {}, 36 | "outputs": [], 37 | "source": [ 38 | "from PyMPDATA_examples.Olesik_et_al_2022.wall_time import test_wall_time" 39 | ] 40 | }, 41 | { 42 | "cell_type": "code", 43 | "execution_count": null, 44 | "metadata": {}, 45 | "outputs": [], 46 | "source": [ 47 | "test_wall_time(generate=False, print_tab=True)" 48 | ] 49 | }, 50 | { 51 | "cell_type": "code", 52 | "execution_count": null, 53 | "metadata": {}, 54 | "outputs": [], 55 | "source": [] 56 | } 57 | ], 58 | "metadata": { 59 | "kernelspec": { 60 | "display_name": "Python 3 (ipykernel)", 61 | "language": "python", 62 | "name": "python3" 63 | }, 64 | "language_info": { 65 | "codemirror_mode": { 66 | "name": "ipython", 67 | "version": 3 68 | }, 69 | "file_extension": ".py", 70 | "mimetype": "text/x-python", 71 | "name": "python", 72 | "nbconvert_exporter": "python", 73 | "pygments_lexer": "ipython3", 74 | "version": "3.9.2" 75 | } 76 | }, 77 | "nbformat": 4, 78 | "nbformat_minor": 4 79 | } 80 | -------------------------------------------------------------------------------- /examples/PyMPDATA_examples/Olesik_et_al_2022/equilibrium_drop_growth.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | 4 | class DrDt: 5 | """eq. 7.20 in Rogers and Yau 1989""" 6 | 7 | def __init__(self, ksi_1, S): 8 | self.ksi = (S - 1) * ksi_1 9 | 10 | def __call__(self, r): 11 | return self.ksi / r 12 | 13 | def mean(self, r1, r2): 14 | return self.ksi * np.log(r2 / r1) / (r2 - r1) 15 | 16 | 17 | # pylint: disable=too-few-public-methods 18 | class PdfEvolver: 19 | """eq. 7.32 in Rogers and Yau 1989""" 20 | 21 | def __init__(self, pdf, drdt: DrDt, t): 22 | self.t = t 23 | self.pdf = pdf 24 | self.drdt = drdt 25 | 26 | def __call__(self, r): 27 | with np.errstate(invalid="ignore"): 28 | arg = np.sqrt(r**2 - 2 * self.drdt.ksi * self.t) 29 | result = r / arg * self.pdf(arg) 30 | 31 | if isinstance(result.magnitude, np.ndarray): 32 | result = ( 33 | np.where(np.isfinite(result.magnitude), result.magnitude, 0) 34 | * result.units 35 | ) 36 | else: 37 | if not np.isfinite(result): 38 | result = 0 * result.units 39 | 40 | return result 41 | -------------------------------------------------------------------------------- /examples/PyMPDATA_examples/Olesik_et_al_2022/wall_time.py: -------------------------------------------------------------------------------- 1 | import pathlib 2 | 3 | import numpy as np 4 | from PyMPDATA_examples.Olesik_et_al_2022.coordinates import x_id, x_log_of_pn 5 | from PyMPDATA_examples.Olesik_et_al_2022.settings import ( 6 | Settings, 7 | default_GC_max, 8 | default_nr, 9 | default_opt_set, 10 | ) 11 | from PyMPDATA_examples.Olesik_et_al_2022.simulation import Simulation 12 | 13 | from PyMPDATA import Options 14 | 15 | grid_layout_set = (x_log_of_pn(r0=1, base=2),) 16 | opt_set = default_opt_set.values() 17 | 18 | RTOL = 1.5 19 | 20 | 21 | def test_wall_time(n_runs=3, mrats=(20,), generate=False, print_tab=True, rtol=RTOL): 22 | settings = Settings(nr=default_nr * 10, mixing_ratios_g_kg=np.array(mrats)) 23 | table_data = {"opts": [], "values": []} 24 | for grid in grid_layout_set: 25 | for opts in opt_set: 26 | i = 0 27 | minimum_values = [] 28 | while i < n_runs: 29 | result = make_data(settings, grid, opts) 30 | wall_times = np.asarray(result["wall_time"]) 31 | minimal = np.nanmin(wall_times[wall_times > 0]) 32 | minimum_values.append(minimal) 33 | i += 1 34 | selected_value = np.min(minimum_values) 35 | if opts == {"n_iters": 1}: 36 | norm = selected_value 37 | table_data["opts"].append(str(opts) + "(" + grid.__class__.__name__ + ")") 38 | table_data["values"].append( 39 | np.nan if norm == 0 else round(selected_value / norm, 1) 40 | ) 41 | make_textable(data=table_data, generate=generate, print_tab=print_tab) 42 | compare_refdata(data=table_data, rtol=rtol, generate=generate) 43 | 44 | 45 | def make_data(settings, grid, opts): 46 | options = Options(**opts) 47 | simulation = Simulation( 48 | settings=settings, 49 | grid_layout=grid, 50 | psi_coord=x_id(), 51 | opts=options, 52 | GC_max=default_GC_max, 53 | ) 54 | result = {"wall_time": []} 55 | last_step = 0 56 | for n_steps in simulation.out_steps: 57 | steps = n_steps - last_step 58 | wall_time_per_timestep = simulation.step(steps) 59 | last_step += steps 60 | result["wall_time"].append(wall_time_per_timestep) 61 | return result 62 | 63 | 64 | def make_textable(data, generate=False, print_tab=False): 65 | latex_data = ( 66 | r"\hline" + " Variant & Elapsed Real Time (wrt upwind) " + r"\\ \hline" + "\n" 67 | ) 68 | for opt, value in zip(data["opts"], data["values"]): 69 | latex_data += r"\hline" + f" {opt} & {value} " + r"\\ \hline" + "\n" 70 | latex_start = r"\begin{table}[]" + "\n" + r"\begin{tabular}{| l | l |}" + "\n" 71 | latex_end = r"\end{tabular}" + "\n" + r"\end{table}" 72 | latex_table = latex_start + latex_data + latex_end 73 | if print_tab: 74 | print(latex_table) 75 | with open( 76 | pathlib.Path(__file__).parent.joinpath("wall_time_textable.txt"), 77 | "w+" if generate else "r", 78 | encoding="utf-8", 79 | ) as f: 80 | if generate: 81 | f.write(latex_table) 82 | 83 | 84 | def compare_refdata(data, rtol, generate=False): 85 | delimiter = ";" 86 | path = pathlib.Path(__file__).parent.joinpath("wall_time_refdata.txt") 87 | if generate: 88 | table = ( 89 | np.char.array(np.concatenate([data["opts"], data["values"]])) 90 | .reshape(2, len(data["values"])) 91 | .T 92 | ) 93 | np.savetxt(path, table, delimiter=delimiter, fmt="%s") 94 | else: 95 | table = np.loadtxt(path, delimiter=delimiter, dtype=str) 96 | np.testing.assert_allclose( 97 | actual=data["values"], 98 | desired=np.array(table[:, 1].astype(float)), 99 | rtol=rtol, 100 | ) 101 | np.testing.assert_array_equal(data["opts"], table[:, 0]) 102 | -------------------------------------------------------------------------------- /examples/PyMPDATA_examples/Olesik_et_al_2022/wall_time_refdata.txt: -------------------------------------------------------------------------------- 1 | {'n_iters': 1}(x_log_of_pn);1.0 2 | {'n_iters': 2}(x_log_of_pn);2.5 3 | {'n_iters': 2, 'infinite_gauge': True}(x_log_of_pn);2.2 4 | {'n_iters': 2, 'infinite_gauge': True, 'nonoscillatory': True}(x_log_of_pn);5.9 5 | {'n_iters': 2, 'DPDC': True, 'infinite_gauge': True, 'nonoscillatory': True}(x_log_of_pn);6.2 6 | {'n_iters': 3, 'third_order_terms': True}(x_log_of_pn);5.7 7 | {'n_iters': 3}(x_log_of_pn);4.1 8 | {'n_iters': 3, 'third_order_terms': True, 'infinite_gauge': True, 'nonoscillatory': True}(x_log_of_pn);10.9 9 | -------------------------------------------------------------------------------- /examples/PyMPDATA_examples/Olesik_et_al_2022/wall_time_textable.txt: -------------------------------------------------------------------------------- 1 | \begin{table}[] 2 | \begin{tabular}{| l | l |} 3 | \hline Variant & Elapsed Real Time (wrt upwind) \\ \hline 4 | \hline {'n_iters': 1}(x_log_of_pn) & 1.0 \\ \hline 5 | \hline {'n_iters': 2}(x_log_of_pn) & 2.5 \\ \hline 6 | \hline {'n_iters': 2, 'infinite_gauge': True}(x_log_of_pn) & 2.2 \\ \hline 7 | \hline {'n_iters': 2, 'infinite_gauge': True, 'nonoscillatory': True}(x_log_of_pn) & 5.9 \\ \hline 8 | \hline {'n_iters': 2, 'DPDC': True, 'infinite_gauge': True, 'nonoscillatory': True}(x_log_of_pn) & 6.2 \\ \hline 9 | \hline {'n_iters': 3, 'third_order_terms': True}(x_log_of_pn) & 5.7 \\ \hline 10 | \hline {'n_iters': 3}(x_log_of_pn) & 4.1 \\ \hline 11 | \hline {'n_iters': 3, 'third_order_terms': True, 'infinite_gauge': True, 'nonoscillatory': True}(x_log_of_pn) & 10.9 \\ \hline 12 | \end{tabular} 13 | \end{table} -------------------------------------------------------------------------------- /examples/PyMPDATA_examples/Shipway_and_Hill_2012/__init__.py: -------------------------------------------------------------------------------- 1 | """ 2 | This is an example of 2D droplet size-spectral/spatial problem of 3 | condensational growth in a column of air, 4 | as described in [Shipway and Hill 2012](https://doi.org/10.1002/qj.1913). 5 | 6 | fig_1.ipynb 7 | .. include:: ./fig_1.ipynb.badges.md 8 | """ 9 | 10 | from .arakawa_c import arakawa_c 11 | from .droplet_activation import DropletActivation 12 | from .formulae import convert_to 13 | from .mpdata import MPDATA 14 | from .plot import plot 15 | from .settings import Settings, const, si 16 | -------------------------------------------------------------------------------- /examples/PyMPDATA_examples/Shipway_and_Hill_2012/arakawa_c.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | 4 | class arakawa_c: 5 | @staticmethod 6 | def z_scalar_coord(grid): 7 | zZ = np.linspace(1 / 2, grid[0] - 1 / 2, grid[0]) 8 | return zZ 9 | 10 | @staticmethod 11 | def z_vector_coord(grid): 12 | zZ = np.linspace(0, grid[0], grid[0] + 1) 13 | return zZ 14 | -------------------------------------------------------------------------------- /examples/PyMPDATA_examples/Shipway_and_Hill_2012/droplet_activation.py: -------------------------------------------------------------------------------- 1 | from functools import lru_cache 2 | 3 | import numba 4 | import numpy as np 5 | 6 | from PyMPDATA.impl.enumerations import ( 7 | ARG_DATA, 8 | ARG_FOCUS, 9 | META_AND_DATA_DATA, 10 | META_AND_DATA_META, 11 | OUTER, 12 | SIGN_RIGHT, 13 | ) 14 | from PyMPDATA.impl.traversals_common import make_fill_halos_loop 15 | 16 | 17 | @lru_cache() 18 | def _make_scalar(value, set_value, halo, dtype, jit_flags, _): 19 | @numba.njit(**jit_flags) 20 | def impl(psi, __, sign): 21 | if sign == SIGN_RIGHT: 22 | return 0 23 | z = psi[ARG_FOCUS][OUTER] 24 | activated = np.sum(psi[ARG_DATA][z : z + 1, halo:-halo]) 25 | # assert activated < value 26 | result = max(0, value - activated) 27 | return result 28 | 29 | if dtype == complex: 30 | 31 | @numba.njit(**jit_flags) 32 | def fill_halos_scalar(psi, n, sign): 33 | return complex( 34 | impl((psi[META_AND_DATA_META], psi[META_AND_DATA_DATA].real), n, sign), 35 | impl((psi[META_AND_DATA_META], psi[META_AND_DATA_DATA].imag), n, sign), 36 | ) 37 | 38 | else: 39 | 40 | @numba.njit(**jit_flags) 41 | def fill_halos_scalar(psi, n, sign): 42 | return impl(psi, n, sign) 43 | 44 | return make_fill_halos_loop(jit_flags, set_value, fill_halos_scalar) 45 | 46 | 47 | # pylint: disable=too-few-public-methods 48 | class DropletActivation: 49 | def __init__(self, value, dr, dz): 50 | self._value = value / dz / dr 51 | 52 | def make_scalar(self, indexers, halo, dtype, jit_flags, dimension_index): 53 | return _make_scalar( 54 | self._value, indexers.set, halo, dtype, jit_flags, dimension_index 55 | ) 56 | -------------------------------------------------------------------------------- /examples/PyMPDATA_examples/Shipway_and_Hill_2012/formulae.py: -------------------------------------------------------------------------------- 1 | from collections import namedtuple 2 | 3 | import numpy as np 4 | from scipy import constants 5 | 6 | 7 | def convert_to(value, unit): 8 | value /= unit 9 | 10 | 11 | si = namedtuple( 12 | "si", 13 | ( 14 | "kg", 15 | "m", 16 | "s", 17 | "metres", 18 | "second", 19 | "um", 20 | "hPa", 21 | "micrometre", 22 | "minutes", 23 | "km", 24 | "dimensionless", 25 | "kelvin", 26 | "mg", 27 | ), 28 | )( 29 | kg=1.0, 30 | m=1.0, 31 | s=1.0, 32 | metres=1.0, 33 | second=1.0, 34 | um=1e-6, 35 | hPa=100.0, 36 | micrometre=1e-6, 37 | minutes=60.0, 38 | km=1000.0, 39 | dimensionless=1.0, 40 | kelvin=1.0, 41 | mg=1e-6, 42 | ) 43 | 44 | _Mv = 0.018015 45 | _Md = 0.028970 46 | 47 | const = namedtuple( 48 | "const", 49 | ( 50 | "eps", 51 | "g", 52 | "p1000", 53 | "Rd", 54 | "Rv", 55 | "c_pd", 56 | "c_pv", 57 | "lv", 58 | "rho_l", 59 | "T0", 60 | "ARM_C1", 61 | "ARM_C2", 62 | "ARM_C3", 63 | ), 64 | )( 65 | eps=_Mv / _Md, 66 | g=constants.g, 67 | p1000=1000 * si.hPa, 68 | Rd=287.0027, 69 | Rv=constants.R / _Mv, 70 | c_pd=1005, 71 | c_pv=1850, 72 | lv=2.5e6, 73 | rho_l=1e3 * si.kg / si.m**3, 74 | T0=constants.zero_Celsius, 75 | ARM_C1=6.1094 * si.hPa, 76 | ARM_C2=17.625 * si.dimensionless, 77 | ARM_C3=243.04 * si.kelvin, 78 | ) 79 | 80 | 81 | def rho_d(p, qv, theta_std): 82 | return ( 83 | p 84 | * (1 - 1 / (1 + const.eps / qv)) 85 | / (np.power(p / const.p1000, const.Rd / const.c_pd) * const.Rd * theta_std) 86 | ) 87 | 88 | 89 | def drho_dz(g, p, T, qv, lv, dql_dz=0): 90 | Rq = const.Rv / (1 / qv + 1) + const.Rd / (1 + qv) 91 | cp = const.c_pv / (1 / qv + 1) + const.c_pd / (1 + qv) 92 | rho = p / Rq / T 93 | return (g / T * rho * (Rq / cp - 1) - p * lv / cp / T**2 * dql_dz) / Rq 94 | 95 | 96 | # A14 in libcloudph++ 1.0 paper 97 | def temperature(rhod, thd): 98 | return thd * np.power( 99 | rhod * thd / const.p1000 * const.Rd, 100 | const.Rd / const.c_pd / (1 - const.Rd / const.c_pd), 101 | ) 102 | 103 | 104 | # A15 in libcloudph++ 1.0 paper 105 | def pressure(rhod, T, qv): 106 | return rhod * (1 + qv) * (const.Rv / (1 / qv + 1) + const.Rd / (1 + qv)) * T 107 | 108 | 109 | def th_dry(th_std, qv): 110 | return th_std * np.power(1 + qv / const.eps, const.Rd / const.c_pd) 111 | 112 | 113 | def pvs_Celsius(T): 114 | return const.ARM_C1 * np.exp((const.ARM_C2 * T) / (T + const.ARM_C3)) 115 | 116 | 117 | def pv(p, qv): 118 | return p * qv / (qv + const.eps) 119 | -------------------------------------------------------------------------------- /examples/PyMPDATA_examples/Shipway_and_Hill_2012/mpdata.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | from PyMPDATA import ScalarField, Solver, Stepper, VectorField 4 | from PyMPDATA.boundary_conditions import Constant, Extrapolated 5 | from PyMPDATA.impl.enumerations import INNER, OUTER 6 | 7 | from .arakawa_c import arakawa_c 8 | 9 | 10 | class MPDATA: 11 | # pylint: disable=too-few-public-methods 12 | def __init__( 13 | self, nz, dt, qv_of_zZ_at_t0, g_factor_of_zZ, nr, options, activation_bc 14 | ): 15 | self.t = 0 16 | self.dt = dt 17 | self.fields = ("qv", "ql") 18 | 19 | self.options = options 20 | 21 | self._solvers = {} 22 | for k in self.fields: 23 | grid = (nz, nr) if nr > 1 and k == "ql" else (nz,) 24 | 25 | bcs_extrapol = tuple( 26 | Extrapolated(dim=d) 27 | for d in ((OUTER, INNER) if k == "ql" and nr > 1 else (INNER,)) 28 | ) 29 | bcs_zero = tuple( 30 | Extrapolated(dim=d) 31 | for d in ((OUTER, INNER) if k == "ql" and nr > 1 else (INNER,)) 32 | ) 33 | 34 | stepper = Stepper( 35 | options=self.options, n_dims=len(grid), non_unit_g_factor=True 36 | ) 37 | 38 | data = g_factor_of_zZ(arakawa_c.z_scalar_coord(grid)) 39 | if nr > 1 and k == "ql": 40 | data = np.repeat(data.reshape(-1, 1), nr, axis=1).squeeze() 41 | g_factor = ScalarField( 42 | data=data, halo=self.options.n_halo, boundary_conditions=bcs_extrapol 43 | ) 44 | 45 | if nr == 1 or k == "qv": 46 | data = (np.zeros(nz + 1),) 47 | else: 48 | data = (np.zeros((nz + 1, nr)), np.zeros((nz, nr + 1))) 49 | advector = VectorField( 50 | data=data, halo=self.options.n_halo, boundary_conditions=bcs_zero 51 | ) 52 | if k == "qv": 53 | data = qv_of_zZ_at_t0(arakawa_c.z_scalar_coord(grid)) 54 | bcs = (Constant(value=data[0]),) 55 | else: 56 | data = np.zeros(grid) 57 | if nr == 1: 58 | bcs = (Constant(value=0),) 59 | else: 60 | bcs = (Constant(value=0), activation_bc) 61 | advectee = ScalarField( 62 | data=data, halo=self.options.n_halo, boundary_conditions=bcs 63 | ) 64 | self._solvers[k] = Solver( 65 | stepper=stepper, advectee=advectee, advector=advector, g_factor=g_factor 66 | ) 67 | 68 | def __getitem__(self, k): 69 | return self._solvers[k] 70 | -------------------------------------------------------------------------------- /examples/PyMPDATA_examples/Shipway_and_Hill_2012/settings.py: -------------------------------------------------------------------------------- 1 | from typing import Optional 2 | 3 | import numpy as np 4 | from numdifftools import Derivative 5 | from PyMPDATA_examples.Olesik_et_al_2022.settings import ksi_1 as default_ksi_1 6 | from pystrict import strict 7 | from scipy.integrate import solve_ivp 8 | from scipy.interpolate import interp1d 9 | 10 | from . import formulae 11 | from .arakawa_c import arakawa_c 12 | from .formulae import const, si 13 | 14 | 15 | @strict 16 | class Settings: 17 | def __init__( 18 | self, 19 | dt: float, 20 | dz: float, 21 | rhod_w_const: float, 22 | t_max: float = 15 * si.minutes, 23 | nr: int = 1, 24 | r_min: float = np.nan, 25 | r_max: float = np.nan, 26 | p0: Optional[float] = None, 27 | ksi_1: float = default_ksi_1.to_base_units().magnitude, 28 | z_max: float = 3000 * si.metres, 29 | apprx_drhod_dz: bool = True, 30 | ): 31 | self.dt = dt 32 | self.dz = dz 33 | 34 | self.nr = nr 35 | self.ksi_1 = ksi_1 36 | 37 | self.z_max = z_max 38 | self.t_max = t_max 39 | 40 | self.qv = interp1d((0, 740, 3260), (0.015, 0.0138, 0.0024)) 41 | self._th = interp1d((0, 740, 3260), (297.9, 297.9, 312.66)) 42 | 43 | # note: not in the paper, 44 | # https://github.com/BShipway/KiD/tree/master/src/physconst.f90#L43 45 | p0 = p0 or 1000 * si.hPa 46 | 47 | self.rhod0 = formulae.rho_d(p0, self.qv(0), self._th(0)) 48 | self.thd = lambda z: formulae.th_dry(self._th(z), self.qv(z)) 49 | 50 | def drhod_dz(z, rhod): 51 | T = formulae.temperature(rhod[0], self.thd(z)) 52 | p = formulae.pressure(rhod[0], T, self.qv(z)) 53 | drhod_dz = formulae.drho_dz(const.g, p, T, self.qv(z), const.lv) 54 | if not apprx_drhod_dz: # to resolve issue #335 55 | qv = self.qv(z) 56 | dqv_dz = Derivative(self.qv)(z) 57 | drhod_dz = drhod_dz / (1 + qv) - rhod * dqv_dz / (1 + qv) 58 | return drhod_dz 59 | 60 | z_points = np.arange(0, self.z_max + self.dz / 2, self.dz / 2) 61 | rhod_solution = solve_ivp( 62 | fun=drhod_dz, 63 | t_span=(0, self.z_max), 64 | y0=np.asarray((self.rhod0,)), 65 | t_eval=z_points, 66 | ) 67 | assert rhod_solution.success 68 | 69 | self.rhod = interp1d(z_points, rhod_solution.y[0]) 70 | 71 | self.t_1 = 600 * si.s 72 | self.rhod_w = lambda t: ( 73 | rhod_w_const * np.sin(np.pi * t / self.t_1) if t < self.t_1 else 0 74 | ) 75 | 76 | self.r_min = r_min 77 | self.r_max = r_max 78 | self.bin_boundaries, self.dr = np.linspace( 79 | self.r_min, self.r_max, self.nr + 1, retstep=True 80 | ) 81 | 82 | self.dr_power = {} 83 | for k in (1, 2, 3, 4): 84 | self.dr_power[k] = ( 85 | self.bin_boundaries[1:] ** k - self.bin_boundaries[:-1] ** k 86 | ) 87 | self.dr_power[k] = self.dr_power[k].reshape(1, -1).T 88 | 89 | self.z_vec = self.dz * arakawa_c.z_vector_coord((self.nz,)) 90 | 91 | @property 92 | def nz(self): 93 | nz = self.z_max / self.dz 94 | assert nz == int(nz) 95 | return int(nz) 96 | 97 | @property 98 | def nt(self): 99 | nt = self.t_max / self.dt 100 | assert nt == int(nt) 101 | return int(nt) 102 | -------------------------------------------------------------------------------- /examples/PyMPDATA_examples/Smolarkiewicz_1984/__init__.py: -------------------------------------------------------------------------------- 1 | """ 2 | This is an example of 3D advection, a spherical signal revolving in a box, based on 3 | [Smolarkiewicz, 1984](https://doi.org/10.1016/0021-9991(84)90121-9). 4 | 5 | figs_13-14.ipynb: 6 | .. include:: ./figs_13-14.ipynb.badges.md 7 | """ 8 | 9 | from .settings import Settings 10 | from .simulation import Simulation 11 | -------------------------------------------------------------------------------- /examples/PyMPDATA_examples/Smolarkiewicz_1984/settings.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | from pystrict import strict 3 | 4 | 5 | @strict 6 | class Settings: 7 | def __init__(self, n: int, dt: float): 8 | self.grid = (n, n, n) 9 | self.dt = dt 10 | self.L = 100 11 | self.dx = self.L / n 12 | self.dy = self.dx 13 | self.dz = self.dx 14 | self.h = 4 15 | self.r = 15 16 | d = 25 / np.sqrt(3) 17 | self.x0 = 50 - d 18 | self.y0 = 50 + d 19 | self.z0 = 50 + d 20 | 21 | self.omega = 0.1 22 | self.xc = 50 23 | self.yc = 50 24 | self.zc = 50 25 | 26 | @property 27 | def advector(self): 28 | """constant angular velocity rotational field""" 29 | 30 | data = [None, None, None] 31 | for index, letter in enumerate(("x", "y", "z")): 32 | i, j, k = np.indices((g + (gi == index) for gi, g in enumerate(self.grid))) 33 | if letter == "x": 34 | data[index] = ( 35 | -((j + 0.5) * self.dy - self.yc) + ((k + 0.5) * self.dz - self.zc) 36 | ) / self.dx 37 | elif letter == "y": 38 | data[index] = ( 39 | +((i + 0.5) * self.dx - self.xc) - ((k + 0.5) * self.dz - self.zc) 40 | ) / self.dy 41 | elif letter == "z": 42 | data[index] = ( 43 | -((i + 0.5) * self.dx - self.xc) + ((j + 0.5) * self.dy - self.yc) 44 | ) / self.dz 45 | data[index] *= self.omega / np.sqrt(3) * self.dt 46 | return data 47 | 48 | @property 49 | def advectee(self): 50 | i, j, k = np.indices(self.grid) 51 | dist = ( 52 | ((i + 0.5) * self.dx - self.x0) ** 2 53 | + ((j + 0.5) * self.dy - self.y0) ** 2 54 | + ((k + 0.5) * self.dz - self.z0) ** 2 55 | ) 56 | return np.where(dist - pow(self.r, 2) <= 0, self.h, 0) 57 | -------------------------------------------------------------------------------- /examples/PyMPDATA_examples/Smolarkiewicz_1984/simulation.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | from PyMPDATA import ScalarField, Solver, Stepper, VectorField 4 | from PyMPDATA.boundary_conditions import Constant 5 | 6 | 7 | class Simulation: 8 | def __init__(self, settings, options, static=True, n_threads=None): 9 | bcs = tuple(Constant(0) for _ in settings.grid) 10 | 11 | advector = VectorField( 12 | data=tuple(comp.astype(options.dtype) for comp in settings.advector), 13 | halo=options.n_halo, 14 | boundary_conditions=bcs, 15 | ) 16 | 17 | advectee = ScalarField( 18 | data=np.asarray(settings.advectee, dtype=options.dtype), 19 | halo=options.n_halo, 20 | boundary_conditions=bcs, 21 | ) 22 | 23 | args = {"grid": settings.grid} if static else {"n_dims": len(settings.grid)} 24 | if n_threads is not None: 25 | args["n_threads"] = n_threads 26 | stepper = Stepper(options=options, **args) 27 | self.solver = Solver(stepper=stepper, advectee=advectee, advector=advector) 28 | 29 | def run(self, nt): 30 | return self.solver.advance(nt) 31 | 32 | @property 33 | def advectee(self): 34 | return self.solver.advectee 35 | -------------------------------------------------------------------------------- /examples/PyMPDATA_examples/Smolarkiewicz_2006_Figs_3_4_10_11_12/__init__.py: -------------------------------------------------------------------------------- 1 | """ 2 | This is an example of 1D homogeneous advection test from 3 | [Smolarkiewicz 2006](https://doi.org/10.1002/fld.1071), depicting infinite-gauge and 4 | flux-corrected transport case. 5 | 6 | demo.ipynb: 7 | .. include:: ./demo.ipynb.badges.md 8 | 9 | ![plot](https://github.com/open-atmos/PyMPDATA/releases/download/tip/fig_4.svg) 10 | """ 11 | -------------------------------------------------------------------------------- /examples/PyMPDATA_examples/Smolarkiewicz_2006_Figs_3_4_10_11_12/settings.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | from pystrict import strict 3 | 4 | 5 | @strict 6 | class Settings: 7 | nt = 1600 8 | dt = 1 9 | nx = 500 10 | C = 0.5 11 | x_min = -250 12 | x_max = 250 13 | 14 | def __init__(self, shape: str): 15 | if shape == "cosine": 16 | self.cdf = Settings.cdf_cosine 17 | elif shape == "rect": 18 | self.cdf = Settings.cdf_rect 19 | else: 20 | raise ValueError() 21 | 22 | @staticmethod 23 | def cdf_cosine(x): 24 | x_mid = -150 25 | f = 2 / 12 26 | amplitude = 2 27 | 28 | pdf = np.where(np.abs(x - x_mid) < 10, amplitude * np.cos(f * (x - x_mid)), 0) 29 | return np.cumsum(pdf) 30 | 31 | @staticmethod 32 | def cdf_rect(x): 33 | x_mid = -150 34 | amplitude = 2 35 | offset = 2 36 | 37 | pdf = offset + np.where(np.abs(x - x_mid) <= 12, amplitude, 0) 38 | return np.cumsum(pdf) 39 | -------------------------------------------------------------------------------- /examples/PyMPDATA_examples/Smolarkiewicz_2006_Figs_3_4_10_11_12/simulation.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | from PyMPDATA_examples.Smolarkiewicz_2006_Figs_3_4_10_11_12.settings import Settings 3 | from PyMPDATA_examples.utils.discretisation import from_cdf_1d 4 | 5 | from PyMPDATA import Options, ScalarField, Solver, Stepper, VectorField 6 | from PyMPDATA.boundary_conditions import Periodic 7 | 8 | 9 | class Simulation: 10 | def __init__(self, settings: Settings, options: Options): 11 | _, state = from_cdf_1d( 12 | settings.cdf, settings.x_min, settings.x_max, settings.nx 13 | ) 14 | 15 | self.stepper = Solver( 16 | stepper=Stepper( 17 | options=options, n_dims=len(state.shape), non_unit_g_factor=False 18 | ), 19 | advectee=ScalarField( 20 | state.astype(options.dtype), 21 | halo=options.n_halo, 22 | boundary_conditions=(Periodic(),), 23 | ), 24 | advector=VectorField( 25 | (np.full(state.shape[0] + 1, settings.C, dtype=options.dtype),), 26 | halo=options.n_halo, 27 | boundary_conditions=(Periodic(),), 28 | ), 29 | ) 30 | self.nt = settings.nt 31 | 32 | @property 33 | def state(self): 34 | return self.stepper.advectee.get().copy() 35 | 36 | def run(self): 37 | self.stepper.advance(self.nt) 38 | -------------------------------------------------------------------------------- /examples/PyMPDATA_examples/Williamson_and_Rasch_1989_as_in_Jaruga_et_al_2015_Fig_14/__init__.py: -------------------------------------------------------------------------------- 1 | """ 2 | This example is based on 3 | [Williamson and Rasch 1989](https://doi.org/10.1175/1520-0493(1989)117%3C0102:TDSLTW%3E2.0.CO;2). 4 | It demonstrates the use of `PyMPDATA.solver.g_factor` 5 | to transform the advection problem onto a sphere. 6 | 7 | demo_over_the_pole.ipynb: 8 | .. include:: ./demo_over_the_pole.ipynb.badges.md 9 | """ 10 | -------------------------------------------------------------------------------- /examples/PyMPDATA_examples/__init__.py: -------------------------------------------------------------------------------- 1 | """ 2 | .. include:: ../docs/pympdata_examples_landing.md 3 | """ 4 | 5 | from importlib.metadata import PackageNotFoundError, version 6 | 7 | try: 8 | __version__ = version(__name__) 9 | except PackageNotFoundError: 10 | # package is not installed 11 | pass 12 | -------------------------------------------------------------------------------- /examples/PyMPDATA_examples/advection_diffusion_1d/__init__.py: -------------------------------------------------------------------------------- 1 | """ 2 | PyMPDATA 1D advection-diffusion example with error analysis for different initial parameters. 3 | 4 | demo.ipynb: 5 | .. include:: ./demo.ipynb.badges.md 6 | """ 7 | -------------------------------------------------------------------------------- /examples/PyMPDATA_examples/advection_diffusion_2d/__init__.py: -------------------------------------------------------------------------------- 1 | """ 2 | PyMPDATA 2D advection-diffusion example with gif creation. 3 | 4 | advection-diffusion-2d.ipynb: 5 | .. include:: ./advection-diffusion-2d.ipynb.badges.md 6 | """ 7 | -------------------------------------------------------------------------------- /examples/PyMPDATA_examples/trixi_comparison/__init__.py: -------------------------------------------------------------------------------- 1 | """ 2 | This example uses a basic 2D advection test case to compare PyMPDATA 3 | solution against Trixi.jl (Julia DG code) 4 | 5 | advection_comparison.ipynb: 6 | .. include:: ./advection_comparison.ipynb.badges.md 7 | """ 8 | -------------------------------------------------------------------------------- /examples/PyMPDATA_examples/utils/__init__.py: -------------------------------------------------------------------------------- 1 | """ 2 | Various utility functions for PyMPDATA examples. 3 | """ 4 | 5 | from open_atmos_jupyter_utils import show_plot 6 | 7 | from .nondivergent_vector_field_2d import nondivergent_vector_field_2d 8 | -------------------------------------------------------------------------------- /examples/PyMPDATA_examples/utils/discretisation.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | from scipy import integrate 3 | 4 | 5 | def from_pdf_2d(pdf: callable, xrange: list, yrange: list, gridsize: list): 6 | z = np.empty(gridsize) 7 | dx, dy = (xrange[1] - xrange[0]) / gridsize[0], (yrange[1] - yrange[0]) / gridsize[ 8 | 1 9 | ] 10 | for i in range(gridsize[0]): 11 | for j in range(gridsize[1]): 12 | z[i, j] = ( 13 | integrate.nquad( 14 | pdf, 15 | ranges=( 16 | (xrange[0] + dx * i, xrange[0] + dx * (i + 1)), 17 | (yrange[0] + dy * j, yrange[0] + dy * (j + 1)), 18 | ), 19 | )[0] 20 | / dx 21 | / dy 22 | ) 23 | x = np.linspace(xrange[0] + dx / 2, xrange[1] - dx / 2, gridsize[0]) 24 | y = np.linspace(yrange[0] + dy / 2, yrange[1] - dy / 2, gridsize[1]) 25 | return x, y, z 26 | 27 | 28 | def from_cdf_1d(cdf: callable, x_min: float, x_max: float, nx: int): 29 | dx = (x_max - x_min) / nx 30 | x = np.linspace(x_min + dx / 2, x_max - dx / 2, nx) 31 | xh = np.linspace(x_min, x_max, nx + 1) 32 | y = np.diff(cdf(xh)) / dx 33 | return x, y 34 | 35 | 36 | def discretised_analytical_solution(rh, pdf_t, midpoint_value=False, r=None): 37 | if midpoint_value: 38 | assert r is not None 39 | else: 40 | assert r is None 41 | output = np.empty(rh.shape[0] - 1) 42 | for i in range(output.shape[0]): 43 | if midpoint_value: 44 | output[i] = pdf_t(r[i]) 45 | else: 46 | dcdf, _ = integrate.quad(pdf_t, rh[i], rh[i + 1]) 47 | output[i] = dcdf / (rh[i + 1] - rh[i]) 48 | return output 49 | -------------------------------------------------------------------------------- /examples/PyMPDATA_examples/utils/error_norms.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | 4 | def L2(numerical, analytical, nt): 5 | assert numerical.shape == analytical.shape 6 | N = analytical.size 7 | err2 = np.log(np.sqrt(sum(pow(numerical - analytical, 2)) / nt / N)) / np.log(2) 8 | return err2 9 | 10 | 11 | def Smolarkiewicz_Grabowski_1990_eq21(numerical, analytical, T): 12 | assert numerical.shape == analytical.shape 13 | NX = analytical.size 14 | err = np.sqrt(sum(pow(numerical - analytical, 2)) / NX) / T 15 | return err 16 | 17 | 18 | def modified_Smolarkiewicz_Rasch_r0(numerical, analytical, T, g_factor): 19 | NX = analytical.size 20 | err = ( 21 | np.sqrt(sum(pow(numerical - analytical, 2) * g_factor) / NX) 22 | / np.amax(analytical) 23 | / T 24 | ) 25 | return err 26 | -------------------------------------------------------------------------------- /examples/PyMPDATA_examples/utils/financial_formulae/Bjerksund_and_Stensland_1993.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import PyMPDATA_examples.utils.financial_formulae.Black_Scholes_1973 as BS 3 | 4 | 5 | def _phi( 6 | S: [np.ndarray, float], 7 | gamma: float, 8 | H: float, 9 | I: float, 10 | r: float, 11 | b: float, 12 | var: float, 13 | T: float, 14 | ): 15 | lmbd = (-r + gamma * b + 0.5 * gamma * (gamma - 1) * var) * T 16 | d = -(np.log(S / H) + (b + (gamma - 0.5) * var) * T) / np.sqrt(var * T) 17 | kappa = 2 * b / var + (2 * gamma - 1) 18 | return ( 19 | np.exp(lmbd) 20 | * np.power(S, gamma) 21 | * ( 22 | BS.N(d) 23 | - pow((I / S), kappa) * BS.N(d - 2 * np.log(I / S) / np.sqrt(var * T)) 24 | ) 25 | ) 26 | 27 | 28 | def c_amer( 29 | S: [np.ndarray, float], 30 | K: [float, np.ndarray], 31 | T: float, 32 | r: float, 33 | b: float, 34 | sgma: float, 35 | ): 36 | if b >= r: 37 | return BS.c_euro(S, K=K, T=T, r=r, b=b, sgma=sgma) 38 | 39 | var = sgma * sgma 40 | beta = (0.5 - b / var) + np.sqrt(pow((b / var - 0.5), 2) + 2 * r / var) 41 | BInf = beta / (beta - 1) * K 42 | B0 = np.maximum(K, r / (r - b) * K) 43 | ht = -(b * T + 2 * sgma * np.sqrt(T)) * B0 / (BInf - B0) 44 | I = B0 + (BInf - B0) * (1 - np.exp(ht)) 45 | alpha = (I - K) * pow(I, -beta) 46 | 47 | return np.where( 48 | S >= I, 49 | S - K, 50 | alpha * np.power(S, beta) 51 | + ( 52 | -alpha * _phi(S, gamma=beta, H=I, I=I, r=r, b=b, var=var, T=T) 53 | + _phi(S, gamma=1, H=I, I=I, r=r, b=b, var=var, T=T) 54 | - _phi(S, gamma=1, H=K, I=I, r=r, b=b, var=var, T=T) 55 | - K * _phi(S, gamma=0, H=I, I=I, r=r, b=b, var=var, T=T) 56 | + K * _phi(S, gamma=0, H=K, I=I, r=r, b=b, var=var, T=T) 57 | ), 58 | ) 59 | 60 | 61 | def p_amer(S: [np.ndarray, float], K: float, T: float, r: float, b: float, sgma: float): 62 | return c_amer(K, K=S, T=T, r=r - b, b=-b, sgma=sgma) 63 | -------------------------------------------------------------------------------- /examples/PyMPDATA_examples/utils/financial_formulae/Black_Scholes_1973.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | from scipy.special import erf # pylint: disable=no-name-in-module 3 | 4 | 5 | def N(x: float): 6 | return (1 + erf(x / np.sqrt(2))) / 2 7 | 8 | 9 | def c_euro(S: np.ndarray, K: float, T: float, r: float, b: float, sgma: float): 10 | d1 = (np.log(S / K) + (b + sgma * sgma / 2) * T) / sgma / np.sqrt(T) 11 | d2 = d1 - sgma * np.sqrt(T) 12 | return S * np.exp(b - r) * N(d1) - K * np.exp(-r * T) * N(d2) 13 | 14 | 15 | def p_euro(S: np.ndarray, K: float, T: float, r: float, b: float, sgma: float): 16 | d1 = (np.log(S / K) + (b + sgma * sgma / 2) * T) / sgma / np.sqrt(T) 17 | d2 = d1 - sgma * np.sqrt(T) 18 | return K * np.exp(-r * T) * N(-d2) - S * np.exp((b - r) * T) * N(-d1) 19 | 20 | 21 | def c_euro_with_dividend( 22 | S: np.ndarray, K: float, T: float, r: float, sgma: float, dividend_yield: float 23 | ): 24 | b = r - dividend_yield 25 | return c_euro(S, K, T, r, b, sgma) 26 | 27 | 28 | def p_euro_with_dividend( 29 | S: np.ndarray, K: float, T: float, r: float, sgma: float, dividend_yield: float 30 | ): 31 | b = r - dividend_yield 32 | return p_euro(S, K, T, r, b, sgma) 33 | -------------------------------------------------------------------------------- /examples/PyMPDATA_examples/utils/financial_formulae/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/open-atmos/PyMPDATA/4b57c09e22d2162963d05249e1b2124c0e5b2747/examples/PyMPDATA_examples/utils/financial_formulae/__init__.py -------------------------------------------------------------------------------- /examples/PyMPDATA_examples/utils/financial_formulae/asian_option.py: -------------------------------------------------------------------------------- 1 | # pylint: disable=line-too-long 2 | """ 3 | Closed-forms for geometric Asian options are taken from: 4 | [Derivatives Markets Appendix 19A](https://media.pearsoncmg.com/ph/bp/bridgepages/teamsite/mcdonald/McDonald-web-19-A.pdf) 5 | """ 6 | 7 | import numpy as np 8 | 9 | from .Black_Scholes_1973 import c_euro_with_dividend, p_euro_with_dividend 10 | 11 | # for fun in (c_euro_with_dividend, p_euro_with_dividend): 12 | # locals()['geometric_asian_average_price_'+fun.__name__[0]] = lambda **args: fun( 13 | # **{key:value for key, value in args.items() if key not in ('sgma', 'dividend_yield')}, 14 | # sgma=args['sgma']/np.sqrt(3), 15 | # dividend_yield=0.5*(args['r'] + args['dividend_yield'] + args['sgma']**2/6), 16 | # ) 17 | 18 | 19 | def geometric_asian_average_price_c(S, K, T, r, sgma, dividend_yield): 20 | return c_euro_with_dividend( 21 | S=S, 22 | K=K, 23 | T=T, 24 | r=r, 25 | sgma=sgma / np.sqrt(3), 26 | dividend_yield=0.5 * (r + dividend_yield + sgma**2 / 6), 27 | ) 28 | 29 | 30 | def geometric_asian_average_price_p(S, K, T, r, sgma, dividend_yield): 31 | return p_euro_with_dividend( 32 | S=S, 33 | K=K, 34 | T=T, 35 | r=r, 36 | sgma=sgma / np.sqrt(3), 37 | dividend_yield=0.5 * (r + dividend_yield + sgma**2 / 6), 38 | ) 39 | 40 | 41 | def geometric_asian_average_strike_c(S, K, T, r, sgma, dividend_yield): 42 | return c_euro_with_dividend( 43 | S=S, 44 | K=K, 45 | T=T, 46 | dividend_yield=dividend_yield, 47 | sgma=sgma * np.sqrt(T / 3), 48 | r=0.5 * (r + dividend_yield + sgma**2 / 6), 49 | ) 50 | 51 | 52 | def geometric_asian_average_strike_p(S, K, T, r, sgma, dividend_yield): 53 | return p_euro_with_dividend( 54 | S=S, 55 | K=K, 56 | T=T, 57 | dividend_yield=dividend_yield, 58 | sgma=sgma * np.sqrt(T / 3), 59 | r=0.5 * (r + dividend_yield + sgma**2 / 6), 60 | ) 61 | -------------------------------------------------------------------------------- /examples/PyMPDATA_examples/utils/nondivergent_vector_field_2d.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | from PyMPDATA import VectorField 4 | from PyMPDATA.boundary_conditions import Periodic 5 | 6 | 7 | def nondivergent_vector_field_2d(grid, size, dt, stream_function: callable, halo): 8 | dx = size[0] / grid[0] 9 | dz = size[1] / grid[1] 10 | dxX = 1 / grid[0] 11 | dzZ = 1 / grid[1] 12 | 13 | xX, zZ = x_vec_coord(grid) 14 | rho_velocity_x = ( 15 | -(stream_function(xX, zZ + dzZ / 2) - stream_function(xX, zZ - dzZ / 2)) / dz 16 | ) 17 | 18 | xX, zZ = z_vec_coord(grid) 19 | rho_velocity_z = ( 20 | stream_function(xX + dxX / 2, zZ) - stream_function(xX - dxX / 2, zZ) 21 | ) / dx 22 | 23 | GC = [rho_velocity_x * dt / dx, rho_velocity_z * dt / dz] 24 | 25 | # CFL condition 26 | for val in GC: 27 | np.testing.assert_array_less(np.abs(val), 1) 28 | 29 | result = VectorField(GC, halo=halo, boundary_conditions=(Periodic(), Periodic())) 30 | 31 | # nondivergence (of velocity field, hence dt) 32 | assert np.amax(abs(result.div((dt, dt)).get())) < 5e-9 33 | 34 | return result 35 | 36 | 37 | def x_vec_coord(grid): 38 | nx = grid[0] + 1 39 | nz = grid[1] 40 | xX = np.repeat(np.linspace(0, grid[0], nx).reshape((nx, 1)), nz, axis=1) / grid[0] 41 | assert np.amin(xX) == 0 42 | assert np.amax(xX) == 1 43 | assert xX.shape == (nx, nz) 44 | zZ = ( 45 | np.repeat(np.linspace(1 / 2, grid[1] - 1 / 2, nz).reshape((1, nz)), nx, axis=0) 46 | / grid[1] 47 | ) 48 | assert np.amin(zZ) >= 0 49 | assert np.amax(zZ) <= 1 50 | assert zZ.shape == (nx, nz) 51 | return xX, zZ 52 | 53 | 54 | def z_vec_coord(grid): 55 | nx = grid[0] 56 | nz = grid[1] + 1 57 | xX = ( 58 | np.repeat(np.linspace(1 / 2, grid[0] - 1 / 2, nx).reshape((nx, 1)), nz, axis=1) 59 | / grid[0] 60 | ) 61 | assert np.amin(xX) >= 0 62 | assert np.amax(xX) <= 1 63 | assert xX.shape == (nx, nz) 64 | zZ = np.repeat(np.linspace(0, grid[1], nz).reshape((1, nz)), nx, axis=0) / grid[1] 65 | assert np.amin(zZ) == 0 66 | assert np.amax(zZ) == 1 67 | assert zZ.shape == (nx, nz) 68 | return xX, zZ 69 | -------------------------------------------------------------------------------- /examples/PyMPDATA_examples/wikipedia_example/__init__.py: -------------------------------------------------------------------------------- 1 | """ 2 | This is an example based on the solid-body rotation test made for PyMPDATA Wikipedia page. 3 | 4 | demo.ipynb: 5 | .. include:: ./demo.ipynb.badges.md 6 | """ 7 | -------------------------------------------------------------------------------- /examples/PyMPDATA_examples/wikipedia_example/settings.py: -------------------------------------------------------------------------------- 1 | import numba 2 | import numpy as np 3 | from pystrict import strict 4 | 5 | grid = (100, 100) 6 | 7 | dt = 0.1 8 | dx = 1 9 | dy = 1 10 | omega = 0.1 11 | h = 4.0 12 | h0 = 1 13 | 14 | r = 15.0 * dx 15 | x0 = 50 * dx 16 | y0 = 75 * dy 17 | xc = 0.5 * grid[0] * dx 18 | yc = 0.5 * grid[1] * dy 19 | 20 | 21 | @strict 22 | class Settings: 23 | def __init__(self, n_rotations: int = 6): 24 | self.n_rotations = n_rotations 25 | 26 | @property 27 | def dt(self): 28 | return dt 29 | 30 | @property 31 | def nt(self) -> int: 32 | return int(628 * self.n_rotations) 33 | 34 | @property 35 | def size(self): 36 | return self.xrange[1], self.yrange[1] 37 | 38 | @property 39 | def xrange(self): 40 | return 0, grid[0] * dx 41 | 42 | @property 43 | def yrange(self): 44 | return 0, grid[1] * dy 45 | 46 | @property 47 | def grid(self): 48 | return grid 49 | 50 | @staticmethod 51 | @numba.njit() 52 | def pdf(x, y): 53 | tmp = (x - x0) ** 2 + (y - y0) ** 2 54 | return h0 + np.where( 55 | # if 56 | tmp - r**2 <= 0, 57 | # then 58 | h - np.sqrt(tmp / (r / h) ** 2), 59 | # else 60 | 0.0, 61 | ) 62 | 63 | @staticmethod 64 | def stream_function(xX, yY): 65 | x = xX * grid[0] * dx 66 | y = yY * grid[1] * dy 67 | return 1 / 2 * omega * ((x - xc) ** 2 + (y - yc) ** 2) 68 | -------------------------------------------------------------------------------- /examples/README.md: -------------------------------------------------------------------------------- 1 | [![License: GPL v3](https://img.shields.io/badge/License-GPL%20v3-blue.svg)](https://www.gnu.org/licenses/gpl-3.0.html) 2 | [![DOI](https://zenodo.org/badge/366746474.svg)](https://zenodo.org/badge/latestdoi/366746474) 3 | 4 | [![PyPI version](https://badge.fury.io/py/PyMPDATA-examples.svg)](https://pypi.org/project/PyMPDATA-examples) 5 | [![API docs](https://img.shields.io/badge/API_docs-pdoc3-blue.svg)](https://open-atmos.github.io/PyMPDATA-examples/) 6 | 7 | For a list of examples, see [PyMPDATA-examples documentation](https://open-atmos.github.io/PyMPDATA/PyMPDATA_examples.html). 8 | 9 | For information on package development, see [PyMPDATA README](https://github.com/open-atmos/PyMPDATA/blob/main/README.md). 10 | -------------------------------------------------------------------------------- /examples/pyproject.toml: -------------------------------------------------------------------------------- 1 | [tool.setuptools_scm] 2 | root = ".." 3 | local_scheme = "no-local-version" 4 | version_scheme = "post-release" 5 | 6 | [build-system] 7 | requires = ['setuptools==80.3.1', 'setuptools-scm==8.3.1'] 8 | -------------------------------------------------------------------------------- /examples/setup.py: -------------------------------------------------------------------------------- 1 | """the magick behind ``pip install ...``""" 2 | 3 | import os 4 | import re 5 | 6 | from setuptools import find_packages, setup 7 | 8 | 9 | def get_long_description(): 10 | """returns contents of the pdoc landing site with pdoc links converted into URLs""" 11 | with open("docs/pympdata_examples_landing.md", "r", encoding="utf8") as file: 12 | pdoc_links = re.compile( 13 | r"(`)([\w\d_-]*).([\w\d_-]*)(`)", re.MULTILINE | re.UNICODE 14 | ) 15 | return pdoc_links.sub( 16 | r'\3', 17 | file.read(), 18 | ) 19 | 20 | 21 | CI = "CI" in os.environ 22 | 23 | setup( 24 | name="pympdata-examples", 25 | description="PyMPDATA usage examples reproducing results from literature" 26 | " and depicting how to use PyMPDATA in Python from Jupyter notebooks", 27 | install_requires=[ 28 | "PyMPDATA", 29 | "open-atmos-jupyter-utils", 30 | "pystrict", 31 | "matplotlib", 32 | "ipywidgets" + "==8.1.7" if CI else "", 33 | "scipy", 34 | "pint", 35 | "joblib", 36 | "sympy", 37 | "imageio", 38 | "meshio", 39 | "numdifftools", 40 | "pandas", 41 | ], 42 | author="https://github.com/open-atmos/PyMPDATA/graphs/contributors", 43 | license="GPL-3.0", 44 | long_description=get_long_description(), 45 | long_description_content_type="text/markdown", 46 | packages=find_packages(include=["PyMPDATA_examples", "PyMPDATA_examples.*"]), 47 | package_data={"": ["*/*/*.txt"]}, 48 | include_package_data=True, 49 | project_urls={ 50 | "Tracker": "https://github.com/open-atmos/PyMPDATA/issues", 51 | "Documentation": "https://open-atmos.github.io/PyMPDATA", 52 | "Source": "https://github.com/open-atmos/PyMPDATA", 53 | }, 54 | ) 55 | -------------------------------------------------------------------------------- /paper/fig-crop.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/open-atmos/PyMPDATA/4b57c09e22d2162963d05249e1b2124c0e5b2747/paper/fig-crop.pdf -------------------------------------------------------------------------------- /paper/fig-perf.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/open-atmos/PyMPDATA/4b57c09e22d2162963d05249e1b2124c0e5b2747/paper/fig-perf.pdf -------------------------------------------------------------------------------- /pyproject.toml: -------------------------------------------------------------------------------- 1 | [tool.isort] 2 | profile = "black" 3 | 4 | [tool.setuptools_scm] 5 | local_scheme = "no-local-version" 6 | version_scheme = "post-release" 7 | 8 | [build-system] 9 | requires = ['setuptools==80.3.1', 'setuptools-scm==8.3.1'] 10 | -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | """ 2 | the magick behind ``pip install ...`` 3 | """ 4 | 5 | import os 6 | import platform 7 | import sys 8 | 9 | from setuptools import find_packages, setup 10 | 11 | 12 | def get_long_description(): 13 | """returns contents of README.md file""" 14 | with open("README.md", "r", encoding="utf8") as file: 15 | long_description = file.read() 16 | long_description = long_description.replace( 17 | "pympdata_logo.svg", "pympdata_logo.png" 18 | ) 19 | return long_description 20 | 21 | 22 | CI = "CI" in os.environ 23 | _32bit = platform.architecture()[0] == "32bit" 24 | 25 | setup( 26 | name="pympdata", 27 | description="Numba-accelerated Pythonic implementation of MPDATA " 28 | "with examples in Python, Julia, Rust and Matlab", 29 | install_requires=[ 30 | "numba" 31 | + ( 32 | { 33 | 8: "==0.58.1", 34 | 9: "==0.58.1", 35 | 10: "==0.58.1", 36 | 11: "==0.58.1", 37 | 12: "==0.59.1", 38 | 13: "==0.61.2", 39 | }[sys.version_info.minor] 40 | if CI and not _32bit 41 | else "" 42 | ), 43 | "numpy" 44 | + ( 45 | { 46 | 8: "==1.24.4", 47 | 9: "==1.24.4", 48 | 10: "==1.24.4", 49 | 11: "==1.24.4", 50 | 12: "==1.26.4", 51 | 13: "==2.2.5", 52 | }[sys.version_info.minor] 53 | if CI 54 | else "" 55 | ), 56 | "pystrict", 57 | ], 58 | extras_require={ 59 | "tests": [ 60 | "PyMPDATA-examples", 61 | "matplotlib" + (">=3.2.2" if CI else ""), 62 | "scipy" 63 | + ( 64 | { 65 | 8: "==1.10.1", 66 | 9: "==1.10.1", 67 | 10: "==1.10.1", 68 | 11: "==1.10.1", 69 | 12: "==1.13.0", 70 | 13: "==1.15.3", 71 | }[sys.version_info.minor] 72 | if CI and not _32bit 73 | else "" 74 | ), 75 | "jupyter-core" + ("<5.0.0" if CI else ""), 76 | "jupyter_client" + ("==8.6.3" if CI else ""), 77 | "ipywidgets" + ("==8.1.7" if CI else ""), 78 | "ipykernel" + ("==7.0.0a1" if CI else ""), 79 | "ghapi", 80 | "pytest", 81 | "pytest-benchmark", 82 | "joblib" + ("==1.4.0" if CI else ""), 83 | "imageio", 84 | "nbformat", 85 | ] 86 | }, 87 | author="https://github.com/open-atmos/PyMPDATA/graphs/contributors", 88 | author_email="sylwester.arabas@agh.edu.pl", 89 | license="GPL-3.0", 90 | packages=find_packages(include=["PyMPDATA", "PyMPDATA.*"]), 91 | long_description=get_long_description(), 92 | long_description_content_type="text/markdown", 93 | classifiers=[ 94 | "Development Status :: 4 - Beta", 95 | "Intended Audience :: Science/Research", 96 | "License :: OSI Approved :: GNU General Public License v3 (GPLv3)", 97 | "Operating System :: OS Independent", 98 | "Programming Language :: Python :: 3", 99 | "Topic :: Scientific/Engineering", 100 | "Topic :: Scientific/Engineering :: Atmospheric Science", 101 | "Topic :: Scientific/Engineering :: Mathematics", 102 | "Topic :: Scientific/Engineering :: Physics", 103 | "Topic :: Software Development :: Libraries", 104 | ], 105 | keywords="atmospheric-modelling, numba, numerical-integration, " 106 | "advection, pde-solver, advection-diffusion", 107 | project_urls={ 108 | "Tracker": "https://github.com/open-atmos/PyMPDATA/issues", 109 | "Documentation": "https://open-atmos.github.io/PyMPDATA", 110 | "Source": "https://github.com/open-atmos/PyMPDATA", 111 | }, 112 | ) 113 | -------------------------------------------------------------------------------- /tests/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/open-atmos/PyMPDATA/4b57c09e22d2162963d05249e1b2124c0e5b2747/tests/__init__.py -------------------------------------------------------------------------------- /tests/smoke_tests/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/open-atmos/PyMPDATA/4b57c09e22d2162963d05249e1b2124c0e5b2747/tests/smoke_tests/__init__.py -------------------------------------------------------------------------------- /tests/smoke_tests/arabas_and_farhat_2020/test_black_scholes.py: -------------------------------------------------------------------------------- 1 | # pylint: disable=missing-module-docstring,missing-class-docstring,missing-function-docstring 2 | from PyMPDATA_examples.Arabas_and_Farhat_2020 import Simulation 3 | from PyMPDATA_examples.Arabas_and_Farhat_2020.setup2_american_put import Settings 4 | 5 | 6 | def test_black_scholes(): 7 | # arrange 8 | settings = Settings(T=0.25, C_opt=0.02, S0=80) 9 | simulation = Simulation(settings) 10 | 11 | # act 12 | simulation.run(n_iters=2) 13 | 14 | # assert 15 | -------------------------------------------------------------------------------- /tests/smoke_tests/jarecka_et_al_2015/test_just_do_it.py: -------------------------------------------------------------------------------- 1 | # pylint: disable=missing-module-docstring,missing-class-docstring,missing-function-docstring 2 | import pytest 3 | from matplotlib import pylab 4 | from PyMPDATA_examples.Jarecka_et_al_2015 import Settings, Simulation, plot_output 5 | from PyMPDATA_examples.utils.error_norms import L2 6 | 7 | 8 | @pytest.mark.parametrize("n_x", (101, 100)) 9 | @pytest.mark.parametrize("n_y", (101, 100)) 10 | def test_just_do_it(n_x, n_y, plot=False): 11 | # arrange 12 | settings = Settings() 13 | settings.dx *= settings.nx / n_x 14 | settings.nx = n_x 15 | settings.dy *= settings.ny / n_y 16 | settings.ny = n_y 17 | simulation = Simulation(settings) 18 | times = (1, 3, 7) 19 | 20 | # act 21 | output = simulation.run() 22 | 23 | # plot 24 | plot_data = plot_output(times, output, settings, return_data=True) 25 | if plot: 26 | pylab.show() 27 | 28 | # assert 29 | for item in plot_data.values(): 30 | assert 2 ** L2(item["h_numeric"], item["h_analytic"], nt=settings.nt) < 5e-3 31 | assert 2 ** L2(item["q_h_numeric"], item["q_h_analytic"], nt=settings.nt) < 5e-2 32 | -------------------------------------------------------------------------------- /tests/smoke_tests/jaruga_et_al_2015/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/open-atmos/PyMPDATA/4b57c09e22d2162963d05249e1b2124c0e5b2747/tests/smoke_tests/jaruga_et_al_2015/__init__.py -------------------------------------------------------------------------------- /tests/smoke_tests/jaruga_et_al_2015/test_boussinesq.py: -------------------------------------------------------------------------------- 1 | """tests for buoyant-bubble test case from Fig. 3 in [Smolarkiewicz & Pudykiewicz 2 | 1992](https://doi.org/10.1175/1520-0469(1992)049%3C2082:ACOSLA%3E2.0.CO;2), 3 | as in libmpdata++ paper ([Jaruga et al. 2015](https://doi.org/10.5194/gmd-8-1005-2015), Fig. 19) 4 | """ 5 | 6 | # pylint: disable=missing-class-docstring,missing-function-docstring 7 | 8 | from pathlib import Path 9 | 10 | import numpy as np 11 | import pytest 12 | from open_atmos_jupyter_utils import notebook_vars 13 | from PyMPDATA_examples import Jaruga_et_al_2015 14 | 15 | PLOT = False 16 | 17 | 18 | @pytest.fixture(scope="session", name="variables") 19 | def variables_fixture(): 20 | return notebook_vars( 21 | file=Path(Jaruga_et_al_2015.__file__).parent / "fig19.ipynb", 22 | plot=PLOT, 23 | ) 24 | 25 | 26 | class TestFig19: 27 | @staticmethod 28 | def test_maximal_theta(variables): 29 | max_at_t0 = variables["SETUP"].Tht_ref + variables["SETUP"].Tht_dlt 30 | acceptable_overshoot = 1e-4 31 | assert ( 32 | max_at_t0 < np.amax(variables["output"]) < max_at_t0 + acceptable_overshoot 33 | ) 34 | 35 | @staticmethod 36 | def test_minimal_theta(variables): 37 | min_at_t0 = variables["SETUP"].Tht_ref 38 | acceptable_undershoot = 5e-3 39 | assert ( 40 | min_at_t0 - acceptable_undershoot < np.amin(variables["output"]) < min_at_t0 41 | ) 42 | 43 | @staticmethod 44 | @pytest.mark.parametrize( 45 | "area", 46 | ( 47 | (slice(0, 20), slice(None)), 48 | (slice(80, None), slice(None)), 49 | (slice(None), slice(0, 30)), 50 | (slice(None), slice(70, None)), 51 | ), 52 | ) 53 | def test_theta_at_domain_edges_equal_to_reference_value(area, variables): 54 | psi_at_last_step = variables["output"][-1, :, :] 55 | np.testing.assert_allclose( 56 | actual=psi_at_last_step[area], 57 | desired=variables["SETUP"].Tht_ref, 58 | rtol=1.5e-5, 59 | ) 60 | -------------------------------------------------------------------------------- /tests/smoke_tests/jaruga_et_al_2015/test_libmpdata_refdata.py: -------------------------------------------------------------------------------- 1 | # pylint: disable=missing-module-docstring,missing-class-docstring,missing-function-docstring 2 | import numpy as np 3 | import pytest 4 | from PyMPDATA_examples.Molenkamp_test_as_in_Jaruga_et_al_2015_Fig_12.analysis import ( 5 | fig_12_data, 6 | ) 7 | 8 | 9 | @pytest.fixture(scope="module") 10 | def data(): 11 | return fig_12_data() 12 | 13 | 14 | # pylint: disable-next=redefined-outer-name 15 | def test_upwind(data): 16 | sut = data[0] 17 | assert np.amin(sut) == 1 18 | np.testing.assert_approx_equal(np.amax(sut), 4.796, significant=4) 19 | 20 | 21 | # pylint: disable-next=redefined-outer-name 22 | def test_2_nonosc(data): 23 | sut = data[1] 24 | np.testing.assert_approx_equal(np.amin(sut), 1) 25 | np.testing.assert_approx_equal(np.amax(sut), 3.52544410, significant=2) 26 | 27 | 28 | # pylint: disable-next=redefined-outer-name 29 | def test_3_nonosc_tot(data): 30 | sut = data[2] 31 | np.testing.assert_approx_equal(np.amin(sut), 1) 32 | np.testing.assert_approx_equal(np.amax(sut), 4.26672894, significant=2) 33 | 34 | 35 | # pylint: disable-next=redefined-outer-name 36 | def test_2_nonosc_iga(data): 37 | sut = data[3] 38 | np.testing.assert_approx_equal(np.amin(sut), 1) 39 | np.testing.assert_approx_equal(np.amax(sut), 4.25518091, significant=2) 40 | -------------------------------------------------------------------------------- /tests/smoke_tests/kinematic_2d/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/open-atmos/PyMPDATA/4b57c09e22d2162963d05249e1b2124c0e5b2747/tests/smoke_tests/kinematic_2d/__init__.py -------------------------------------------------------------------------------- /tests/smoke_tests/kinematic_2d/test_single_timestep.py: -------------------------------------------------------------------------------- 1 | # pylint: disable=missing-module-docstring,missing-class-docstring,missing-function-docstring 2 | import numpy as np 3 | import pytest 4 | from PyMPDATA_examples.utils import nondivergent_vector_field_2d 5 | 6 | from PyMPDATA import Options, ScalarField, Solver, Stepper 7 | from PyMPDATA.boundary_conditions import Periodic 8 | 9 | GRID = (75, 75) 10 | SIZE = (1500, 1500) 11 | TIMESTEP = 1 12 | RHOD_W_MAX = 0.6 13 | 14 | 15 | def stream_function(x_01, z_01): 16 | x_span = SIZE[0] 17 | return ( 18 | -RHOD_W_MAX * x_span / np.pi * np.sin(np.pi * z_01) * np.cos(2 * np.pi * x_01) 19 | ) 20 | 21 | 22 | def rhod_of_z(arg): 23 | return 1 - arg * 1e-4 24 | 25 | 26 | RHOD = np.repeat( 27 | rhod_of_z((np.arange(GRID[1]) + 1 / 2) / GRID[1]).reshape((1, GRID[1])), 28 | GRID[0], 29 | axis=0, 30 | ) 31 | 32 | VALUES = {"th": np.full(GRID, 300), "qv": np.full(GRID, 0.001)} 33 | 34 | 35 | @pytest.mark.parametrize( 36 | "options", 37 | ( 38 | Options(n_iters=1), 39 | Options(n_iters=2), 40 | Options(n_iters=2, nonoscillatory=True), 41 | Options(n_iters=3, nonoscillatory=True), 42 | Options(n_iters=2, nonoscillatory=True, infinite_gauge=True), 43 | Options(nonoscillatory=True, infinite_gauge=True, third_order_terms=True), 44 | Options(nonoscillatory=False, infinite_gauge=True), 45 | Options(nonoscillatory=False, third_order_terms=True), 46 | Options(nonoscillatory=False, infinite_gauge=True, third_order_terms=True), 47 | ), 48 | ) 49 | def test_single_timestep(options): 50 | # Arrange 51 | stepper = Stepper(options=options, grid=GRID, non_unit_g_factor=True) 52 | advector = nondivergent_vector_field_2d( 53 | GRID, SIZE, TIMESTEP, stream_function, options.n_halo 54 | ) 55 | g_factor = ScalarField( 56 | RHOD.astype(dtype=options.dtype), 57 | halo=options.n_halo, 58 | boundary_conditions=(Periodic(), Periodic()), 59 | ) 60 | mpdatas = {} 61 | for key, value in VALUES.items(): 62 | advectee = ScalarField( 63 | np.full(GRID, value, dtype=options.dtype), 64 | halo=options.n_halo, 65 | boundary_conditions=(Periodic(), Periodic()), 66 | ) 67 | mpdatas[key] = Solver( 68 | stepper=stepper, advectee=advectee, advector=advector, g_factor=g_factor 69 | ) 70 | 71 | # Act 72 | for mpdata in mpdatas.values(): 73 | mpdata.advance(n_steps=1) 74 | 75 | # Assert 76 | for value in mpdatas.values(): 77 | assert np.isfinite(value.advectee.get()).all() 78 | -------------------------------------------------------------------------------- /tests/smoke_tests/magnuszewski_et_al_2025/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/open-atmos/PyMPDATA/4b57c09e22d2162963d05249e1b2124c0e5b2747/tests/smoke_tests/magnuszewski_et_al_2025/__init__.py -------------------------------------------------------------------------------- /tests/smoke_tests/magnuszewski_et_al_2025/test_figs_1_2.py: -------------------------------------------------------------------------------- 1 | """ 2 | tests for Asian (path-dependent) option pricing example using 2D advection-diffusion PDE 3 | """ 4 | 5 | from pathlib import Path 6 | 7 | import numpy as np 8 | import pytest 9 | from open_atmos_jupyter_utils import notebook_vars 10 | from PyMPDATA_examples import Magnuszewski_et_al_2025 11 | 12 | PLOT = False 13 | 14 | 15 | @pytest.fixture(scope="session", name="variables") 16 | def _variables_fixture(): 17 | return notebook_vars( 18 | file=Path(Magnuszewski_et_al_2025.__file__).parent / "figs.ipynb", 19 | plot=PLOT, 20 | ) 21 | 22 | 23 | def _datasets(variables): 24 | return { 25 | "mc": variables["arithmetic_by_mc"], 26 | "upwind": variables["output"]["UPWIND"][-1][:, 0], 27 | "mpdata_2it": variables["output"]["MPDATA (2 it.)"][-1][:, 0], 28 | "mpdata_4it": variables["output"]["MPDATA (4 it.)"][-1][:, 0], 29 | "kemna-vorst": variables["geometric_price"], 30 | "black-scholes": variables["euro_price"], 31 | } 32 | 33 | 34 | class TestFigs: 35 | """basic assertions for Fig 1 and Fig 2 data and axes""" 36 | 37 | @staticmethod 38 | @pytest.mark.parametrize( 39 | "x_or_y, l_or_r, fmt", 40 | ( 41 | ("x", 0, "-0.5"), 42 | ("y", 0, "-0.5"), 43 | ("x", 1, "{grid_minus_half[0]}"), 44 | ("y", 1, "{grid_minus_half[1]}"), 45 | ), 46 | ) 47 | def test_fig_1_axis_ranges(variables, x_or_y, l_or_r, fmt): 48 | """ 49 | checks if both X and Y axes start at -dx/2, -dy/2, respectively""" 50 | for axs in variables["fig1_axs"]: 51 | assert str(getattr(axs, f"get_{x_or_y}lim")()[l_or_r]) == fmt.format( 52 | grid_minus_half=(variables["grid"][0] - 0.5, variables["grid"][1] - 0.5) 53 | ) 54 | 55 | @staticmethod 56 | @pytest.mark.parametrize( 57 | "lower, higher", 58 | ( 59 | ("mpdata_4it", "upwind"), 60 | ("mpdata_2it", "upwind"), 61 | ("mc", "black-scholes"), # European analytic above UPWIND 62 | ("kemna-vorst", "mc"), 63 | ("mc", "upwind"), 64 | ), 65 | ) 66 | def test_fig_2_order_of_lines(variables, lower, higher): 67 | """checks if a given set of points is above/below another one""" 68 | data = _datasets(variables) 69 | assert (data[lower] <= data[higher]).all() 70 | 71 | @staticmethod 72 | @pytest.mark.parametrize( 73 | "key", 74 | ("mc", "upwind", "mpdata_2it", "mpdata_4it", "kemna-vorst", "black-scholes"), 75 | ) 76 | def test_fig_2_all_datasets_monotonic(variables, key): 77 | """checks if all points within a dataset constitute a monotonically increasing set""" 78 | data = _datasets(variables) 79 | assert (np.diff(data[key]) >= 0).all() 80 | -------------------------------------------------------------------------------- /tests/smoke_tests/olesik_et_al_2022/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/open-atmos/PyMPDATA/4b57c09e22d2162963d05249e1b2124c0e5b2747/tests/smoke_tests/olesik_et_al_2022/__init__.py -------------------------------------------------------------------------------- /tests/smoke_tests/olesik_et_al_2022/convergence_refdata.txt: -------------------------------------------------------------------------------- 1 | 64.0 64.0 64.0 160.0 160.0 160.0 256.0 256.0 256.0 2 | 0.15 0.5 0.85 0.15 0.5 0.85 0.15 0.5 0.85 -------------------------------------------------------------------------------- /tests/smoke_tests/olesik_et_al_2022/test_discretisation.py: -------------------------------------------------------------------------------- 1 | # pylint: disable=missing-module-docstring,missing-function-docstring,invalid-name 2 | import numpy as np 3 | import pint 4 | import pytest 5 | from matplotlib import pyplot 6 | from PyMPDATA_examples.Olesik_et_al_2022.coordinates import x_id, x_log_of_pn, x_p2 7 | from PyMPDATA_examples.Olesik_et_al_2022.East_and_Marshall_1954 import SizeDistribution 8 | from PyMPDATA_examples.utils.discretisation import discretised_analytical_solution 9 | from scipy import integrate 10 | 11 | 12 | def diff(x): 13 | return np.diff(x.magnitude) * x.units 14 | 15 | 16 | @pytest.mark.parametrize("grid", [x_id(), x_log_of_pn(r0=1), x_p2()]) 17 | @pytest.mark.parametrize("coord", [x_id(), x_log_of_pn(r0=1), x_p2()]) 18 | def test_size_distribution(grid, coord, plot=False): 19 | # Arrange 20 | si = pint.UnitRegistry() 21 | sd = SizeDistribution(si) 22 | n_unit = si.centimetres**-3 / si.micrometre 23 | r_unit = si.micrometre 24 | 25 | # Act 26 | x = grid.x(np.linspace(1, 18, 100)) * r_unit 27 | dx_dr = coord.dx_dr 28 | numpdfx = x[1:] - diff(x) / 2 29 | 30 | def pdf_t(r): 31 | return sd.pdf(r * r_unit).to(n_unit).magnitude / dx_dr(r * r_unit).magnitude 32 | 33 | numpdfy = discretised_analytical_solution(rh=x.magnitude, pdf_t=pdf_t) * n_unit 34 | 35 | # Plot 36 | if plot: 37 | # Fig 3 from East & Marshall 1954 38 | si.setup_matplotlib() 39 | pyplot.plot(numpdfx, numpdfy, label="cdf") 40 | pyplot.plot( 41 | numpdfx, pdf_t(numpdfx.magnitude) * n_unit, label="pdf", linestyle="--" 42 | ) 43 | pyplot.legend() 44 | pyplot.gca().yaxis.set_units(1 / si.centimetre**3 / si.micrometre) 45 | pyplot.show() 46 | 47 | # Assert 48 | totalpdf = np.sum(numpdfy * (diff(x))) 49 | integratedpdf, _ = integrate.quad(pdf_t, x[0].magnitude, x[-1].magnitude) 50 | print(totalpdf, integratedpdf) 51 | np.testing.assert_array_almost_equal( 52 | totalpdf.magnitude, integratedpdf # pylint: disable=no-member 53 | ) 54 | 55 | 56 | def test_quad_vs_midpoint(): 57 | # Arrange 58 | x = np.linspace(0, np.pi, 3) 59 | a = discretised_analytical_solution( 60 | x, np.sin, midpoint_value=True, r=x[:-1] + np.diff(x) / 2 61 | ) 62 | b = discretised_analytical_solution(x, np.sin, midpoint_value=False) 63 | test_val = np.abs((a - b) / a) 64 | # Assert 65 | assert (test_val > 0.05).all() 66 | assert (test_val < 0.1).all() 67 | -------------------------------------------------------------------------------- /tests/smoke_tests/olesik_et_al_2022/test_moment_of_r_integral.py: -------------------------------------------------------------------------------- 1 | # pylint: disable=missing-module-docstring,missing-class-docstring,missing-function-docstring 2 | import numpy as np 3 | import pint 4 | import pytest 5 | from PyMPDATA_examples.Olesik_et_al_2022.coordinates import x_id, x_log_of_pn, x_p2 6 | 7 | si = pint.UnitRegistry() 8 | 9 | 10 | @pytest.mark.parametrize("k", [0, 1, 2, 3]) 11 | @pytest.mark.parametrize("coord", [x_id(), x_log_of_pn(r0=1 * si.um, n=1), x_p2()]) 12 | def test_moment_of_r_integral(k, coord): 13 | # Arrange 14 | r_0 = 2 * si.um 15 | r_1 = 4 * si.um 16 | 17 | # Act 18 | with np.errstate(divide="ignore", invalid="ignore"): 19 | integral = coord.moment_of_r_integral( 20 | coord.x(r_1), k 21 | ) - coord.moment_of_r_integral(coord.x(r_0), k) 22 | 23 | # Assert 24 | if coord.__class__ == x_id: 25 | assert integral.check(f"[length]**{k+1}") 26 | elif coord.__class__ == x_p2: 27 | assert integral.check(f"[length]**{k+2}") 28 | elif coord.__class__ == x_log_of_pn: 29 | assert integral.check(f"[length]**{k}") 30 | -------------------------------------------------------------------------------- /tests/smoke_tests/olesik_et_al_2022/test_simulation.py: -------------------------------------------------------------------------------- 1 | # pylint: disable=missing-module-docstring,missing-class-docstring,missing-function-docstring 2 | import warnings 3 | 4 | import numpy as np 5 | import pytest 6 | from numba.core.errors import NumbaExperimentalFeatureWarning 7 | from PyMPDATA_examples.Olesik_et_al_2022.analysis import compute_figure_data 8 | from PyMPDATA_examples.Olesik_et_al_2022.coordinates import x_id, x_log_of_pn, x_p2 9 | from PyMPDATA_examples.Olesik_et_al_2022.settings import ( 10 | Settings, 11 | default_GC_max, 12 | default_nr, 13 | ) 14 | from PyMPDATA_examples.Olesik_et_al_2022.simulation import Simulation 15 | 16 | from PyMPDATA.options import Options 17 | 18 | settings = Settings() 19 | grid_layout_set = (x_id(), x_p2(), x_log_of_pn(r0=1, n=1)) 20 | opt_set = ( 21 | {"n_iters": 1}, 22 | {"n_iters": 2, "nonoscillatory": True}, 23 | { 24 | "n_iters": 3, 25 | "third_order_terms": True, 26 | "infinite_gauge": True, 27 | "nonoscillatory": True, 28 | }, 29 | ) 30 | 31 | 32 | @pytest.fixture(scope="module") 33 | def data(): 34 | with warnings.catch_warnings(): 35 | warnings.simplefilter("ignore", category=NumbaExperimentalFeatureWarning) 36 | result, _ = compute_figure_data( 37 | nr=default_nr, 38 | GC_max=default_GC_max, 39 | psi_coord=x_id(), 40 | grid_layouts=grid_layout_set, 41 | opt_set=opt_set, 42 | ) 43 | return result 44 | 45 | 46 | @pytest.mark.parametrize( 47 | "psi_coord", [x_id(), x_p2(), x_log_of_pn(r0=1 * settings.si.um, n=1)] 48 | ) 49 | @pytest.mark.parametrize("grid_layout", [x_id(), x_p2(), x_log_of_pn(r0=1, n=3)]) 50 | @pytest.mark.parametrize("nonoscillatory", [False, True]) 51 | def test_init(grid_layout, psi_coord, nonoscillatory): 52 | # Arrange 53 | opts = Options(nonoscillatory=nonoscillatory) 54 | 55 | # Act 56 | simulation = Simulation( 57 | settings, 58 | grid_layout=grid_layout, 59 | GC_max=default_GC_max, 60 | psi_coord=psi_coord, 61 | opts=opts, 62 | ) 63 | simulation.step(1) 64 | 65 | # Asserts for array shapes 66 | assert simulation.n_of_r.shape[0] == settings.nr 67 | 68 | # Asserts for Jacobian 69 | g_factor_with_halo = simulation.solver.g_factor.data 70 | assert np.isfinite(g_factor_with_halo).all() 71 | if isinstance(psi_coord, type(grid_layout)): 72 | np.testing.assert_array_almost_equal(np.diff(g_factor_with_halo), 0) 73 | else: 74 | assert (np.diff(g_factor_with_halo) >= 0).all() or ( 75 | np.diff(g_factor_with_halo) <= 0 76 | ).all() 77 | 78 | 79 | @pytest.mark.parametrize("grid_layout", grid_layout_set) 80 | @pytest.mark.parametrize("opts", opt_set) 81 | # pylint: disable-next=redefined-outer-name 82 | def test_n_finite(grid_layout, opts, data): 83 | # Arrange 84 | grid_layout_str = grid_layout.__class__.__name__ 85 | psi = data[grid_layout_str]["numerical"][str(opts)][-1].magnitude 86 | 87 | # Assert 88 | assert np.isfinite(psi).all() 89 | assert 69 < np.amax(psi) < 225 90 | 91 | 92 | @pytest.mark.parametrize("grid_layout", grid_layout_set) 93 | @pytest.mark.parametrize("opts", opt_set) 94 | # pylint: disable-next=redefined-outer-name 95 | def test_error_norm_finite(grid_layout, opts, data): 96 | # Arrange 97 | grid_layout_str = grid_layout.__class__.__name__ 98 | sut = data[grid_layout_str]["error_L2"][str(opts)] 99 | 100 | # Assert 101 | assert np.isfinite(sut).all() 102 | -------------------------------------------------------------------------------- /tests/smoke_tests/olesik_et_al_2022/test_wall_time.py: -------------------------------------------------------------------------------- 1 | # pylint: disable=missing-module-docstring,missing-class-docstring,missing-function-docstring 2 | from PyMPDATA_examples.Olesik_et_al_2022 import wall_time 3 | 4 | 5 | def test_wall_time(): 6 | wall_time.test_wall_time() 7 | -------------------------------------------------------------------------------- /tests/smoke_tests/smolarkiewicz_1983/test_against_libmpdata_refdata.py: -------------------------------------------------------------------------------- 1 | # pylint: disable=missing-module-docstring,missing-class-docstring,missing-function-docstring 2 | import numpy as np 3 | import pytest 4 | from PyMPDATA_examples.Smolarkiewicz_1984 import Settings, Simulation 5 | 6 | from PyMPDATA import Options 7 | 8 | # https://github.com/igfuw/libmpdataxx/blob/master/tests/paper_2015_GMD/4_revolving_sphere_3d/... 9 | STATS = { 10 | # ...refdata/stats_upwind.txt.gz 11 | Options(n_iters=1): { 12 | 0: { 13 | "min(solution)": 0.00000000, 14 | "max(solution)": 4.00000000, 15 | }, 16 | 566: { 17 | "max(solution)": 1.72131033, 18 | "min(solution)": 0.00000000, 19 | "Linf": 3.38441916, 20 | "L2": 0.00567238, 21 | "L1": 0.00128141, 22 | }, 23 | }, 24 | # ...refdata/stats_basic.txt.gz 25 | Options(n_iters=2): { 26 | 0: { 27 | "min(solution)": 0.00000000, 28 | "max(solution)": 4.00000000, 29 | }, 30 | 556: { 31 | "max(solution)": 4.94170863, 32 | "min(solution)": 0.00000000, 33 | "Linf": 2.92022414, 34 | "L2": 0.00367407, 35 | "L1": 0.00065310, 36 | }, 37 | }, 38 | # ...refdata/stats_fct.txt.gz 39 | Options(n_iters=2, nonoscillatory=True): { 40 | 0: { 41 | "min(solution)": 0.00000000, 42 | "max(solution)": 4.00000000, 43 | }, 44 | 556: { 45 | "max(solution)": 3.99999989, 46 | "min(solution)": 0.00000000, 47 | "Linf": 2.90357355, 48 | "L2": 0.00365567, 49 | "L1": 0.00064920, 50 | }, 51 | }, 52 | # ...refdata/stats_iga.txt.gz 53 | Options(n_iters=2, infinite_gauge=True): { 54 | 0: { 55 | "min(solution)": 0.00000000, 56 | "max(solution)": 4.00000000, 57 | }, 58 | 556: { 59 | "max(solution)": 6.16075462, 60 | "min(solution)": -1.01495101, 61 | "Linf": 2.94529169, 62 | "L2": 0.00328204, 63 | "L1": 0.00064378, 64 | }, 65 | }, 66 | # ...refdata/stats_iga_fct.txt.gz 67 | Options(n_iters=2, infinite_gauge=True, nonoscillatory=True): { 68 | 0: {"min(solution)": 0.00000000, "max(solution)": 4.00000000}, 69 | 556: { 70 | "max(solution)": 3.99999978, 71 | "min(solution)": 0.00000000, 72 | "Linf": 2.74523808, 73 | "L2": 0.00281070, 74 | "L1": 0.00038398, 75 | }, 76 | }, 77 | } 78 | 79 | SETTINGS = Settings(n=59 + 1, dt=0.018 * 2 * np.pi) 80 | 81 | 82 | @pytest.mark.parametrize( 83 | "options", tuple(pytest.param(opt, id=str(opt)) for opt in STATS) 84 | ) 85 | def test_against_libmpdata_refdata(options): 86 | # arrange 87 | simulation = Simulation(SETTINGS, options) 88 | actual = {} 89 | 90 | # act 91 | steps_done = 0 92 | for timesteps in STATS[options]: 93 | simulation.run(nt=timesteps - steps_done) 94 | steps_done += timesteps 95 | psi = simulation.solver.advectee.get() 96 | absdiff = np.abs(psi - SETTINGS.advectee) 97 | volume = np.prod(SETTINGS.grid) 98 | time = steps_done * SETTINGS.dt 99 | actual[steps_done] = { 100 | "min(solution)": np.amin(psi), 101 | "max(solution)": np.amax(psi), 102 | } 103 | if steps_done > 0: 104 | actual[steps_done]["Linf"] = np.amax(absdiff) 105 | actual[steps_done]["L1"] = 1 / time * (1 / volume * np.sum((absdiff) ** 1)) 106 | actual[steps_done]["L2"] = ( 107 | 1 / time * (1 / volume * np.sum((absdiff) ** 2)) ** 0.5 108 | ) 109 | 110 | # assert 111 | for step in STATS[options].keys(): 112 | for stat in STATS[options][step].keys(): 113 | np.testing.assert_approx_equal( 114 | desired=STATS[options][step][stat], 115 | actual=actual[step][stat], 116 | significant=1, # TODO #96 117 | err_msg=stat, 118 | ) 119 | -------------------------------------------------------------------------------- /tests/smoke_tests/smolarkiewicz_2006/test_run_all.py: -------------------------------------------------------------------------------- 1 | # pylint: disable=missing-module-docstring,missing-class-docstring,missing-function-docstring 2 | import numpy as np 3 | import pytest 4 | from PyMPDATA_examples.Smolarkiewicz_2006_Figs_3_4_10_11_12.settings import Settings 5 | from PyMPDATA_examples.Smolarkiewicz_2006_Figs_3_4_10_11_12.simulation import Simulation 6 | 7 | from PyMPDATA.options import Options 8 | 9 | 10 | class TestSmolarkiewicz2006: 11 | dtypes = (np.float32, np.float64) 12 | 13 | @staticmethod 14 | @pytest.mark.parametrize("dtype", dtypes) 15 | def test_fig3(dtype: np.floating): 16 | # Arrange 17 | simulation = Simulation(Settings("cosine"), Options(n_iters=1, dtype=dtype)) 18 | psi_0 = simulation.state 19 | 20 | # Act 21 | simulation.run() 22 | psi_t = simulation.state 23 | 24 | # Assert 25 | epsilon = 1e-20 26 | assert psi_t.dtype == dtype 27 | assert np.amin(psi_0) == 0 28 | assert np.amax(psi_0) == 2 29 | assert 0 < np.amin(psi_t) < epsilon 30 | assert 0.45 < np.amax(psi_t) < 0.5 31 | 32 | @staticmethod 33 | @pytest.mark.parametrize("dtype", dtypes) 34 | def test_fig4(dtype: np.floating): 35 | # Arrange 36 | simulation = Simulation(Settings("cosine"), Options(n_iters=2, dtype=dtype)) 37 | psi_0 = simulation.state 38 | 39 | # Act 40 | simulation.run() 41 | psi_t = simulation.state 42 | 43 | # Assert 44 | epsilon = 1e-20 45 | assert psi_t.dtype == dtype 46 | assert np.amin(psi_0) == 0 47 | assert np.amax(psi_0) == 2 48 | assert 0 < np.amin(psi_t) < epsilon 49 | assert 1.3 < np.amax(psi_t) < 1.4 50 | 51 | @staticmethod 52 | @pytest.mark.parametrize("dtype", dtypes) 53 | def test_fig10(dtype: np.floating): 54 | # Arrange 55 | simulation = Simulation( 56 | Settings("cosine"), Options(infinite_gauge=True, n_iters=2, dtype=dtype) 57 | ) 58 | 59 | # Act 60 | simulation.run() 61 | psi_t = simulation.state 62 | 63 | # Assert 64 | assert psi_t.dtype == dtype 65 | assert -0.1 < np.amin(psi_t) < 0 66 | assert 1.75 < np.amax(psi_t) < 1.9 67 | 68 | @staticmethod 69 | @pytest.mark.parametrize("dtype", dtypes) 70 | def test_fig11(dtype: np.floating): 71 | # Arrange 72 | simulation = Simulation( 73 | Settings("rect"), Options(infinite_gauge=True, n_iters=2, dtype=dtype) 74 | ) 75 | 76 | # Act 77 | simulation.run() 78 | psi_t = simulation.state 79 | 80 | # Assert 81 | assert psi_t.dtype == dtype 82 | assert -1.9 < np.amin(psi_t) < 2 83 | assert 4 < np.amax(psi_t) < 4.2 84 | 85 | @staticmethod 86 | @pytest.mark.parametrize("dtype", dtypes) 87 | def test_fig12(dtype: np.floating): 88 | # Arrange 89 | simulation = Simulation( 90 | Settings("rect"), 91 | Options(n_iters=2, infinite_gauge=True, nonoscillatory=True, dtype=dtype), 92 | ) 93 | 94 | # Act 95 | simulation.run() 96 | psi_t = simulation.state 97 | 98 | # Assert 99 | assert psi_t.dtype == dtype 100 | assert np.amin(psi_t) >= 2 101 | assert np.amax(psi_t) <= 4 102 | assert np.amax(psi_t) > 3 103 | -------------------------------------------------------------------------------- /tests/smoke_tests/timing/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/open-atmos/PyMPDATA/4b57c09e22d2162963d05249e1b2124c0e5b2747/tests/smoke_tests/timing/__init__.py -------------------------------------------------------------------------------- /tests/smoke_tests/timing/conftest.py: -------------------------------------------------------------------------------- 1 | """pytest fixtures for timing tests""" 2 | 3 | import numba 4 | import pytest 5 | 6 | __num_threads = [pytest.param(1, id="serial")] 7 | 8 | try: 9 | numba.parfors.parfor.ensure_parallel_support() 10 | n = numba.config.NUMBA_NUM_THREADS # pylint: disable=no-member 11 | assert n != 1 12 | __num_threads.append(pytest.param(n, id=f"threads ({n})")) 13 | except numba.core.errors.UnsupportedParforsError: 14 | pass 15 | 16 | 17 | @pytest.fixture(params=__num_threads, name="num_threads") 18 | def num_threads_fixture(request): 19 | """pytest fixture providing thread-pool size for tests: single-thread case 20 | for setups in which Numba reports no parallel support, and single- 21 | as well as multi-threaded test runs otherwise. For the multi-threaded 22 | case, the number of threads is set to NUMBA_NUM_THREADS.""" 23 | return request.param 24 | -------------------------------------------------------------------------------- /tests/smoke_tests/timing/test_timing_1d.py: -------------------------------------------------------------------------------- 1 | # pylint: disable=missing-module-docstring,missing-class-docstring,missing-function-docstring 2 | import numpy as np 3 | import pytest 4 | from PyMPDATA_examples.Smolarkiewicz_2006_Figs_3_4_10_11_12.settings import Settings 5 | from PyMPDATA_examples.Smolarkiewicz_2006_Figs_3_4_10_11_12.simulation import Simulation 6 | 7 | from PyMPDATA.options import Options 8 | 9 | 10 | @pytest.mark.parametrize( 11 | "options", 12 | [ 13 | Options(n_iters=1), 14 | Options(n_iters=2), 15 | Options(n_iters=3), 16 | Options(n_iters=4), 17 | Options(n_iters=2, infinite_gauge=True), 18 | Options(n_iters=3, infinite_gauge=True), 19 | Options(n_iters=2, nonoscillatory=True), 20 | Options(n_iters=3, nonoscillatory=True), 21 | Options(n_iters=2, divergent_flow=True), 22 | Options(n_iters=3, divergent_flow=True), 23 | Options(n_iters=2, third_order_terms=True), 24 | Options(n_iters=3, third_order_terms=True), 25 | ], 26 | ) 27 | # pylint: disable-next=redefined-outer-name 28 | def test_timing_1d(benchmark, options): 29 | simulation = Simulation(Settings("cosine"), options) 30 | psi0 = simulation.stepper.advectee.get().copy() 31 | 32 | def set_psi(): 33 | simulation.stepper.advectee.get()[:] = psi0 34 | 35 | benchmark.pedantic(simulation.run, {}, setup=set_psi, warmup_rounds=1, rounds=3) 36 | 37 | print(np.amin(simulation.state), np.amax(simulation.state)) 38 | if not options.infinite_gauge: 39 | assert np.amin(simulation.state) >= 0 40 | -------------------------------------------------------------------------------- /tests/smoke_tests/timing/test_timing_3d.py: -------------------------------------------------------------------------------- 1 | # pylint: disable=missing-module-docstring,missing-class-docstring,missing-function-docstring 2 | import numba 3 | import numpy as np 4 | import pytest 5 | from PyMPDATA_examples.Smolarkiewicz_1984 import Settings, Simulation 6 | 7 | from PyMPDATA import Options 8 | 9 | 10 | @pytest.mark.parametrize( 11 | "options", 12 | [ 13 | {"n_iters": 1}, 14 | # TODO #96 15 | # {'n_iters': 2}, 16 | # {'n_iters': 3, 'infinite_gauge': True}, 17 | # {'n_iters': 2, 'infinite_gauge': True, 'nonoscillatory': True}, 18 | # {'n_iters': 3, 'infinite_gauge': False, 'third_order_terms': True}, 19 | # {'n_iters': 3, 'infinite_gauge': True, 'third_order_terms': True, 'nonoscillatory': True}, 20 | ], 21 | ) 22 | @pytest.mark.parametrize("dtype", (np.float64,)) 23 | @pytest.mark.parametrize("static", (True, False)) 24 | # pylint: disable-next=redefined-outer-name 25 | def test_timing_3d(benchmark, options, dtype, static, num_threads): 26 | numba.set_num_threads(num_threads) 27 | 28 | settings = Settings(n=20, dt=1) 29 | simulation = Simulation(settings, Options(**options, dtype=dtype), static=static) 30 | 31 | def reset(): 32 | simulation.solver.advectee.get()[:] = settings.advectee 33 | 34 | n_steps = 10 35 | benchmark.pedantic( 36 | simulation.run, (n_steps,), setup=reset, warmup_rounds=1, rounds=1 37 | ) 38 | -------------------------------------------------------------------------------- /tests/smoke_tests/utils/test_financial_formulae.py: -------------------------------------------------------------------------------- 1 | # pylint: disable=missing-module-docstring,missing-class-docstring,missing-function-docstring 2 | # pylint: disable=too-many-arguments,invalid-name 3 | import numpy as np 4 | import pytest 5 | from PyMPDATA_examples.utils.financial_formulae import Black_Scholes_1973 as BS73 6 | from PyMPDATA_examples.utils.financial_formulae import asian_option as AO 7 | 8 | 9 | class TestFinancialFormulae: 10 | @staticmethod 11 | @pytest.mark.parametrize( 12 | "funs", 13 | ( 14 | {"normal": BS73.c_euro, "with_dividend": BS73.c_euro_with_dividend}, 15 | {"normal": BS73.p_euro, "with_dividend": BS73.p_euro_with_dividend}, 16 | ), 17 | ) 18 | @pytest.mark.parametrize("price", (np.array([95, 100, 105]),)) 19 | @pytest.mark.parametrize("strike", (100, 10)) 20 | @pytest.mark.parametrize("time_to_maturity", (1, 0.5)) 21 | @pytest.mark.parametrize("risk_free_rate", (0.05, 0.001)) 22 | @pytest.mark.parametrize("volatility", (0.2, 0.5)) 23 | @pytest.mark.parametrize("dividend_yield", (0.02, 0)) 24 | def test_black_scholes_with_dividend( 25 | funs: dict, 26 | price, 27 | strike, 28 | time_to_maturity, 29 | risk_free_rate, 30 | volatility, 31 | dividend_yield, 32 | ): 33 | common_args = { 34 | "S": price, 35 | "K": strike, 36 | "T": time_to_maturity, 37 | "sgma": volatility, 38 | "r": risk_free_rate, 39 | } 40 | price_dividend = funs["with_dividend"]( 41 | dividend_yield=dividend_yield, **common_args 42 | ) 43 | price_normal = funs["normal"](b=risk_free_rate - dividend_yield, **common_args) 44 | assert np.allclose(price_dividend, price_normal) 45 | 46 | @staticmethod 47 | @pytest.mark.parametrize( 48 | "fun, expected_value", 49 | ( 50 | (AO.geometric_asian_average_price_c, 3.246), 51 | (AO.geometric_asian_average_price_p, 2.026), 52 | (AO.geometric_asian_average_strike_c, 3.725), 53 | (AO.geometric_asian_average_strike_p, 1.869), 54 | ), 55 | ) 56 | def test_asian_geometric_average(fun: callable, expected_value): 57 | """ 58 | Analytic results are taken from [Derivatives Markets]( 59 | https://faculty.ksu.edu.sa/sites/default/files/derivatives_markets_3e_0.pdf) page 413 60 | """ 61 | price = fun(S=40, K=40, T=1, r=0.08, sgma=0.3, dividend_yield=0) 62 | assert np.allclose(price, expected_value, rtol=1e-3) 63 | -------------------------------------------------------------------------------- /tests/unit_tests/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/open-atmos/PyMPDATA/4b57c09e22d2162963d05249e1b2124c0e5b2747/tests/unit_tests/__init__.py -------------------------------------------------------------------------------- /tests/unit_tests/conftest.py: -------------------------------------------------------------------------------- 1 | # pylint: disable=missing-module-docstring,missing-function-docstring 2 | import numba 3 | import pytest 4 | 5 | __n_threads = (1, 2, 3) 6 | try: 7 | numba.parfors.parfor.ensure_parallel_support() 8 | except numba.core.errors.UnsupportedParforsError: 9 | __n_threads = (1,) 10 | 11 | 12 | @pytest.fixture(params=__n_threads, name="n_threads") 13 | def n_threads_fixture(request): 14 | return request.param 15 | -------------------------------------------------------------------------------- /tests/unit_tests/quick_look.py: -------------------------------------------------------------------------------- 1 | """utility routines for plotting 2D fields""" 2 | 3 | import numpy as np 4 | from matplotlib import colors, pyplot 5 | 6 | from PyMPDATA import ScalarField, VectorField 7 | from PyMPDATA.impl.field import Field 8 | 9 | 10 | def quick_look(field: Field, plot: bool = True): 11 | """plots either scalar or vector field together with halo region 12 | rendering arrows in a staggered-grid-aware manner""" 13 | halo = field.halo 14 | grid = field.grid 15 | pyplot.title(f"{grid=} {halo=} class={field.__class__.__name__}") 16 | if isinstance(field, ScalarField): 17 | norm = colors.Normalize(vmin=np.amin(field.get()), vmax=np.amax(field.get())) 18 | pyplot.imshow( 19 | X=field.data.T, 20 | origin="lower", 21 | extent=(-halo, grid[0] + halo, -halo, grid[1] + halo), 22 | cmap="gray", 23 | norm=norm, 24 | ) 25 | pyplot.colorbar() 26 | elif isinstance(field, VectorField): 27 | arrow_colors = ("green", "blue") 28 | quiver_common_kwargs = {"pivot": "mid", "width": 0.005} 29 | max_in_domain = [np.amax(field.get_component(i)) for i in (0, 1)] 30 | arrows_xy = ( 31 | np.mgrid[ 32 | -(halo - 1) : grid[0] + 1 + (halo - 1) : 1, 33 | 1 / 2 - halo : grid[1] + halo : 1, 34 | ], 35 | np.mgrid[ 36 | 1 / 2 - halo : grid[0] + halo : 1, 37 | -(halo - 1) : grid[1] + 1 + (halo - 1) : 1, 38 | ], 39 | ) 40 | pyplot.xlim(-halo, grid[0] + halo) 41 | pyplot.ylim(-halo, grid[1] + halo) 42 | for dim in (0, 1): 43 | pyplot.quiver( 44 | *arrows_xy[dim], 45 | field.data[dim].flatten() / max_in_domain[dim] if dim == 0 else 0, 46 | field.data[dim].flatten() / max_in_domain[dim] if dim == 1 else 0, 47 | color=arrow_colors[dim], 48 | **quiver_common_kwargs, 49 | ) 50 | for i, val in enumerate(field.data[dim].flatten()): 51 | if np.isfinite(val): 52 | continue 53 | pyplot.annotate( 54 | text="NaN", 55 | xy=( 56 | arrows_xy[dim][0].flatten()[i], 57 | arrows_xy[dim][1].flatten()[i], 58 | ), 59 | ha="center", 60 | va="center", 61 | color=arrow_colors[dim], 62 | ) 63 | else: 64 | assert False 65 | pyplot.hlines( 66 | y=range(-halo, grid[1] + 1 + halo), 67 | xmin=-halo, 68 | xmax=grid[0] + halo, 69 | color="r", 70 | linewidth=0.5, 71 | ) 72 | pyplot.vlines( 73 | x=range(-halo, grid[0] + 1 + halo), 74 | ymin=-halo, 75 | ymax=grid[1] + halo, 76 | color="r", 77 | linewidth=0.5, 78 | ) 79 | pyplot.hlines(y=range(grid[1] + 1), xmin=0, xmax=grid[0], color="r", linewidth=3) 80 | pyplot.vlines(x=range(grid[0] + 1), ymin=0, ymax=grid[1], color="r", linewidth=3) 81 | for i, x_y in enumerate(("x", "y")): 82 | getattr(pyplot, f"{x_y}ticks")( 83 | np.linspace(-halo + 0.5, grid[i] + halo - 0.5, grid[i] + 2 * halo) 84 | ) 85 | pyplot.xlabel("x/dx (outer dim)") 86 | pyplot.ylabel("y/dy (inner dim)") 87 | pyplot.grid(linestyle=":") 88 | if plot: 89 | pyplot.show() 90 | else: 91 | pyplot.clf() 92 | -------------------------------------------------------------------------------- /tests/unit_tests/test_boundary_condition_extrapolated_1d.py: -------------------------------------------------------------------------------- 1 | # pylint: disable=missing-module-docstring,missing-class-docstring,missing-function-docstring 2 | import warnings 3 | 4 | import numpy as np 5 | import pytest 6 | from numba.core.errors import NumbaExperimentalFeatureWarning 7 | from scipy import interpolate 8 | 9 | from PyMPDATA import Options, ScalarField, VectorField 10 | from PyMPDATA.boundary_conditions import Extrapolated 11 | from PyMPDATA.impl.enumerations import MAX_DIM_NUM 12 | from PyMPDATA.impl.traversals import Traversals 13 | 14 | JIT_FLAGS = Options().jit_flags 15 | 16 | 17 | class TestBoundaryConditionExtrapolated: 18 | @staticmethod 19 | @pytest.mark.parametrize("halo", (1, 2, 3, 4)) 20 | @pytest.mark.parametrize( 21 | "data", 22 | ( 23 | np.array([11, 12, 13, 14], dtype=float), 24 | np.array([11, 12, 13, 14], dtype=complex), 25 | np.array([1, 2, 3, 4], dtype=float), 26 | np.array([1, 2, 3, 4], dtype=complex), 27 | ), 28 | ) 29 | def test_1d_scalar(data, halo, n_threads=1): 30 | # arrange 31 | boundary_conditions = (Extrapolated(),) 32 | field = ScalarField(data, halo, boundary_conditions) 33 | # pylint:disable=duplicate-code 34 | traversals = Traversals( 35 | grid=field.grid, 36 | halo=halo, 37 | jit_flags=JIT_FLAGS, 38 | n_threads=n_threads, 39 | left_first=tuple([True] * MAX_DIM_NUM), 40 | buffer_size=0, 41 | ) 42 | field.assemble(traversals) 43 | meta_and_data, fill_halos = field.impl 44 | sut = traversals._code["fill_halos_scalar"] # pylint:disable=protected-access 45 | 46 | # act 47 | thread_id = 0 48 | with warnings.catch_warnings(): 49 | warnings.simplefilter("ignore", category=NumbaExperimentalFeatureWarning) 50 | sut(thread_id, *meta_and_data, fill_halos, traversals.data.buffer) 51 | 52 | # assert 53 | extrapolator = interpolate.interp1d( 54 | np.linspace(halo, len(data) - 1 + halo, len(data)), 55 | data, 56 | fill_value="extrapolate", 57 | ) 58 | np.testing.assert_array_equal( 59 | field.data[0:halo], np.maximum(extrapolator(np.arange(halo)), 0) 60 | ) 61 | np.testing.assert_array_equal( 62 | field.data[-halo:], 63 | np.maximum( 64 | extrapolator( 65 | np.linspace(len(data) + halo, len(data) + 2 * halo - 1, halo) 66 | ), 67 | 0, 68 | ), 69 | ) 70 | 71 | @staticmethod 72 | @pytest.mark.parametrize("data", (np.array([0, 2, 3, 0], dtype=float),)) 73 | @pytest.mark.parametrize("halo", (2, 3, 4)) 74 | def test_1d_vector(data, halo, n_threads=1): 75 | # arrange 76 | boundary_condition = (Extrapolated(),) 77 | field = VectorField((data,), halo, boundary_condition) 78 | # pylint:disable=duplicate-code 79 | traversals = Traversals( 80 | grid=field.grid, 81 | halo=halo, 82 | jit_flags=JIT_FLAGS, 83 | n_threads=n_threads, 84 | left_first=tuple([True] * MAX_DIM_NUM), 85 | buffer_size=0, 86 | ) 87 | field.assemble(traversals) 88 | meta_and_data, fill_halos = field.impl 89 | meta_and_data = ( 90 | meta_and_data[0], 91 | (meta_and_data[1], meta_and_data[2], meta_and_data[3]), 92 | ) 93 | sut = traversals._code["fill_halos_vector"] # pylint:disable=protected-access 94 | 95 | # act 96 | thread_id = 0 97 | with warnings.catch_warnings(): 98 | warnings.simplefilter("ignore", category=NumbaExperimentalFeatureWarning) 99 | sut(thread_id, *meta_and_data, fill_halos, traversals.data.buffer) 100 | 101 | # assert 102 | assert (field.data[0][0 : halo - 1] == data[0]).all() 103 | assert (field.data[0][-(halo - 1) :] == data[-1]).all() 104 | -------------------------------------------------------------------------------- /tests/unit_tests/test_boundary_condition_extrapolated_2d.py: -------------------------------------------------------------------------------- 1 | # pylint: disable=missing-module-docstring,missing-class-docstring,missing-function-docstring 2 | 3 | import numpy as np 4 | import pytest 5 | 6 | from PyMPDATA import Options, ScalarField, VectorField 7 | from PyMPDATA.boundary_conditions import Constant, Extrapolated 8 | from PyMPDATA.impl.enumerations import MAX_DIM_NUM 9 | from PyMPDATA.impl.traversals import Traversals 10 | from tests.unit_tests.quick_look import quick_look 11 | 12 | JIT_FLAGS = Options().jit_flags 13 | 14 | 15 | class TestBoundaryConditionExtrapolated2D: 16 | @staticmethod 17 | @pytest.mark.parametrize("n_threads", (1, 2)) 18 | @pytest.mark.parametrize("n_halo", (1, 2)) 19 | @pytest.mark.parametrize( 20 | "boundary_conditions", 21 | ( 22 | (Extrapolated(0), Extrapolated(-1)), 23 | (Constant(0), Extrapolated(-1)), 24 | (Extrapolated(0), Constant(0)), 25 | ), 26 | ) 27 | def test_scalar_field( 28 | n_threads: int, n_halo: int, boundary_conditions: tuple, plot=False 29 | ): 30 | # arrange 31 | advectee = ScalarField( 32 | data=np.asarray([[0.0, 1.0, 2.0], [3.0, 4.0, 5.0]]), 33 | boundary_conditions=boundary_conditions, 34 | halo=n_halo, 35 | ) 36 | traversals = Traversals( 37 | grid=advectee.grid, 38 | halo=n_halo, 39 | jit_flags=JIT_FLAGS, 40 | n_threads=n_threads, 41 | left_first=tuple([True] * MAX_DIM_NUM), 42 | buffer_size=0, 43 | ) 44 | advectee.assemble(traversals) 45 | 46 | # act / plot 47 | quick_look(advectee, plot) 48 | advectee._debug_fill_halos( # pylint:disable=protected-access 49 | traversals, range(n_threads) 50 | ) 51 | quick_look(advectee, plot) 52 | 53 | # assert 54 | assert np.isfinite(advectee.data).all() 55 | 56 | @staticmethod 57 | @pytest.mark.parametrize("n_threads", (1, 2)) 58 | @pytest.mark.parametrize("n_halo", (1, 2)) 59 | @pytest.mark.parametrize( 60 | "boundary_conditions", 61 | ( 62 | (Extrapolated(0), Extrapolated(-1)), 63 | (Constant(0), Extrapolated(-1)), 64 | (Extrapolated(0), Constant(0)), 65 | ), 66 | ) 67 | def test_vector_field( 68 | n_threads: int, n_halo: int, boundary_conditions: tuple, plot=False 69 | ): 70 | # arrange 71 | advector = VectorField( 72 | data=( 73 | np.asarray([[-1, 1.0], [2.0, 3.0], [4.0, 5.0]]), 74 | np.asarray([[-1.0, 1.0, 2.0], [3.0, 4.0, 5.0]]), 75 | ), 76 | boundary_conditions=boundary_conditions, 77 | halo=n_halo, 78 | ) 79 | 80 | traversals = Traversals( 81 | grid=advector.grid, 82 | halo=n_halo, 83 | jit_flags=JIT_FLAGS, 84 | n_threads=n_threads, 85 | left_first=tuple([True] * MAX_DIM_NUM), 86 | buffer_size=0, 87 | ) 88 | advector.assemble(traversals) 89 | 90 | # act / plot 91 | quick_look(advector, plot) 92 | advector._debug_fill_halos( # pylint:disable=protected-access 93 | traversals, range(n_threads) 94 | ) 95 | quick_look(advector, plot) 96 | 97 | # assert 98 | for component in advector.data: 99 | assert np.isfinite(component).all() 100 | -------------------------------------------------------------------------------- /tests/unit_tests/test_clock.py: -------------------------------------------------------------------------------- 1 | # pylint: disable=missing-module-docstring,missing-class-docstring,missing-function-docstring 2 | import time 3 | 4 | import numba 5 | 6 | from PyMPDATA.impl.clock import clock 7 | from PyMPDATA.options import Options 8 | 9 | jit_flags = Options().jit_flags 10 | 11 | 12 | class TestClock: 13 | @staticmethod 14 | def test_clock_python(): 15 | clock() 16 | 17 | @staticmethod 18 | def test_clock_numba_jit(): 19 | @numba.jit(**{**jit_flags, "forceobj": True}) 20 | def test(): 21 | clock() 22 | 23 | test() 24 | 25 | @staticmethod 26 | def test_clock_numba_njit(): 27 | @numba.njit(**jit_flags) 28 | def test(): 29 | clock() 30 | 31 | test() 32 | 33 | @staticmethod 34 | def test_clock_value(): 35 | # Arrange 36 | factor = 4 37 | base = 0.5 38 | 39 | sec_base = None 40 | warmup = 1 41 | for _ in range(warmup + 1): 42 | start = clock() 43 | time.sleep(base) 44 | sec_base = clock() - start 45 | 46 | # Act 47 | start = clock() 48 | time.sleep(base * factor) 49 | sec_factor = clock() - start 50 | 51 | # Assert 52 | assert abs(sec_factor / sec_base / factor - 1) < 0.5 53 | -------------------------------------------------------------------------------- /tests/unit_tests/test_diffusion_only_2d.py: -------------------------------------------------------------------------------- 1 | # pylint: disable=missing-module-docstring,missing-class-docstring,missing-function-docstring 2 | import numpy as np 3 | 4 | from PyMPDATA import Options, ScalarField, Solver, Stepper, VectorField 5 | from PyMPDATA.boundary_conditions import Periodic 6 | 7 | 8 | def test_diffusion_only_2d( 9 | data0=np.array([[0, 0, 0], [0, 1.0, 0], [0, 0, 0]]), mu_coeff=(0.1, 0.1), n_steps=1 10 | ): 11 | # Arrange 12 | options = Options(non_zero_mu_coeff=True) 13 | boundary_conditions = tuple([Periodic()] * 2) 14 | advectee = ScalarField(data0, options.n_halo, boundary_conditions) 15 | advector = VectorField( 16 | data=( 17 | np.zeros((data0.shape[0] + 1, data0.shape[1])), 18 | np.zeros((data0.shape[0], data0.shape[1] + 1)), 19 | ), 20 | halo=options.n_halo, 21 | boundary_conditions=boundary_conditions, 22 | ) 23 | solver = Solver( 24 | stepper=Stepper(options=options, grid=data0.shape), 25 | advector=advector, 26 | advectee=advectee, 27 | ) 28 | 29 | # Act 30 | solver.advance(n_steps=n_steps, mu_coeff=mu_coeff) 31 | 32 | # Assert 33 | data1 = solver.advectee.get() 34 | np.testing.assert_almost_equal(actual=np.sum(data1), desired=np.sum(data0)) 35 | assert np.amax(data0) > np.amax(data1) 36 | assert np.amin(data1) >= 0 37 | assert np.count_nonzero(data1) == 5 38 | -------------------------------------------------------------------------------- /tests/unit_tests/test_domain_decomposition.py: -------------------------------------------------------------------------------- 1 | # pylint: disable=missing-module-docstring,missing-class-docstring,missing-function-docstring 2 | import pytest 3 | 4 | from PyMPDATA import Options 5 | from PyMPDATA.impl.domain_decomposition import make_subdomain 6 | 7 | JIT_FLAGS = Options().jit_flags 8 | 9 | 10 | @pytest.mark.parametrize( 11 | "span, rank, size, result", 12 | [ 13 | (10, 0, 1, (0, 10)), 14 | pytest.param(1, 1, 1, (0, 1), marks=pytest.mark.xfail(raises=ValueError)), 15 | (10, 0, 3, (0, 4)), 16 | (10, 1, 3, (4, 8)), 17 | (10, 2, 3, (8, 10)), 18 | (10, 0, 11, (0, 1)), 19 | (10, 9, 11, (9, 10)), 20 | ], 21 | ) 22 | def test_subdomain(span, rank, size, result): 23 | subdomain = make_subdomain(JIT_FLAGS) 24 | assert subdomain(span, rank, size) == result 25 | -------------------------------------------------------------------------------- /tests/unit_tests/test_dpdc.py: -------------------------------------------------------------------------------- 1 | # pylint: disable=missing-module-docstring,missing-class-docstring,missing-function-docstring 2 | import numpy as np 3 | import pytest 4 | 5 | from PyMPDATA import Options, ScalarField, Solver, Stepper, VectorField 6 | from PyMPDATA.boundary_conditions import Periodic 7 | 8 | 9 | @pytest.mark.parametrize("n_iters", [2, 3, 4]) 10 | def test_double_pass_donor_cell(n_iters): 11 | courant = 0.5 12 | 13 | options = Options(n_iters=n_iters, DPDC=True, nonoscillatory=True) 14 | state = np.array([0, 1, 0], dtype=options.dtype) 15 | boundary_conditions = (Periodic(),) 16 | 17 | mpdata = Solver( 18 | stepper=Stepper(options=options, n_dims=state.ndim, non_unit_g_factor=False), 19 | advectee=ScalarField( 20 | state, halo=options.n_halo, boundary_conditions=boundary_conditions 21 | ), 22 | advector=VectorField( 23 | (np.full(state.shape[0] + 1, courant, dtype=options.dtype),), 24 | halo=options.n_halo, 25 | boundary_conditions=boundary_conditions, 26 | ), 27 | ) 28 | steps = 1 29 | 30 | conserved = np.sum(mpdata.advectee.get()) 31 | mpdata.advance(steps) 32 | 33 | assert np.sum(mpdata.advectee.get()) == conserved 34 | -------------------------------------------------------------------------------- /tests/unit_tests/test_formulae_upwind.py: -------------------------------------------------------------------------------- 1 | # pylint: disable=missing-module-docstring,missing-class-docstring,missing-function-docstring 2 | import warnings 3 | 4 | import numpy as np 5 | from numba.core.errors import NumbaExperimentalFeatureWarning 6 | 7 | from PyMPDATA import Options, ScalarField, VectorField 8 | from PyMPDATA.boundary_conditions import Periodic 9 | from PyMPDATA.impl.enumerations import IMPL_BC, IMPL_META_AND_DATA, MAX_DIM_NUM 10 | from PyMPDATA.impl.formulae_upwind import make_upwind 11 | from PyMPDATA.impl.meta import _Impl 12 | from PyMPDATA.impl.traversals import Traversals 13 | 14 | 15 | def test_formulae_upwind(): 16 | # Arrange 17 | psi_data = np.array((0.0, 1, 0)) 18 | flux_data = np.array((0.0, 0, 1, 0)) 19 | 20 | options = Options() 21 | halo = options.n_halo 22 | traversals = Traversals( 23 | grid=psi_data.shape, 24 | halo=halo, 25 | jit_flags=options.jit_flags, 26 | n_threads=1, 27 | left_first=tuple([True] * MAX_DIM_NUM), 28 | buffer_size=0, 29 | ) 30 | upwind = make_upwind( 31 | options=options, non_unit_g_factor=False, traversals=traversals 32 | ) 33 | 34 | boundary_conditions = (Periodic(),) 35 | 36 | psi = ScalarField(psi_data, halo, boundary_conditions) 37 | psi.assemble(traversals) 38 | psi_impl = psi.impl 39 | 40 | flux = VectorField((flux_data,), halo, boundary_conditions) 41 | flux.assemble(traversals) 42 | flux_impl = flux.impl 43 | 44 | # Act 45 | with warnings.catch_warnings(): 46 | warnings.simplefilter("ignore", category=NumbaExperimentalFeatureWarning) 47 | upwind( 48 | traversals.data, 49 | _Impl(field=psi_impl[IMPL_META_AND_DATA], bc=psi_impl[IMPL_BC]), 50 | _Impl(field=flux_impl[IMPL_META_AND_DATA], bc=flux_impl[IMPL_BC]), 51 | _Impl( 52 | field=traversals.data.null_scalar_field[IMPL_META_AND_DATA], 53 | bc=traversals.data.null_scalar_field[IMPL_BC], 54 | ), 55 | ) 56 | 57 | # Assert 58 | np.testing.assert_array_equal(psi.get(), np.roll(psi_data, 1)) 59 | -------------------------------------------------------------------------------- /tests/unit_tests/test_grid.py: -------------------------------------------------------------------------------- 1 | # pylint: disable=missing-module-docstring,missing-class-docstring,missing-function-docstring 2 | import pytest 3 | 4 | from PyMPDATA import Options 5 | from PyMPDATA.impl.domain_decomposition import make_subdomain 6 | from PyMPDATA.impl.grid import make_chunk, make_domain 7 | from PyMPDATA.impl.meta import META_N_INNER, META_N_MID3D, META_N_OUTER, META_SIZE 8 | 9 | meta = [None] * META_SIZE 10 | meta[META_N_OUTER] = 200 11 | meta[META_N_MID3D] = 0 12 | meta[META_N_INNER] = 2000 13 | meta = tuple(meta) 14 | 15 | JIT_FLAGS = Options().jit_flags 16 | 17 | 18 | class TestStaticGrid: 19 | @staticmethod 20 | def test_make_grid_static(): 21 | # arrange 22 | grid = (100, 1000) 23 | assert grid[0] != meta[META_N_OUTER] 24 | assert grid[0] != meta[META_N_INNER] 25 | 26 | # act 27 | grid_fun = make_domain(grid, jit_flags=JIT_FLAGS) 28 | 29 | # assert 30 | assert grid == grid_fun(meta) 31 | 32 | @staticmethod 33 | def test_make_grid_dynamic(): 34 | # arrange 35 | grid = (0,) 36 | 37 | # act 38 | grid_fun = make_domain(grid, jit_flags=JIT_FLAGS) 39 | 40 | # assert 41 | assert (meta[META_N_OUTER], meta[META_N_MID3D], meta[META_N_INNER]) == grid_fun( 42 | meta 43 | ) 44 | 45 | @staticmethod 46 | @pytest.mark.parametrize("span", (3, 30, 300)) 47 | @pytest.mark.parametrize("n_threads", (1, 2, 3)) 48 | def test_make_irng_static(span, n_threads): 49 | # arrange 50 | assert span != meta[META_N_OUTER] 51 | subdomain = make_subdomain(JIT_FLAGS) 52 | 53 | # act 54 | irng_fun = make_chunk(span=span, n_threads=n_threads, jit_flags=JIT_FLAGS) 55 | 56 | # assert 57 | for thread_id in range(n_threads): 58 | assert subdomain(span, thread_id, n_threads) == irng_fun(meta, thread_id) 59 | 60 | @staticmethod 61 | @pytest.mark.parametrize("n_threads", (1, 2, 3)) 62 | def test_make_irng_dynamic(n_threads): 63 | # arrange 64 | span = 0 65 | subdomain = make_subdomain(JIT_FLAGS) 66 | 67 | # act 68 | irng_fun = make_chunk(span=span, n_threads=n_threads, jit_flags=JIT_FLAGS) 69 | 70 | # assert 71 | for thread_id in range(n_threads): 72 | assert subdomain(meta[META_N_OUTER], thread_id, n_threads) == irng_fun( 73 | meta, thread_id 74 | ) 75 | -------------------------------------------------------------------------------- /tests/unit_tests/test_scalar_field.py: -------------------------------------------------------------------------------- 1 | # pylint: disable=missing-module-docstring,missing-class-docstring,missing-function-docstring 2 | import numpy as np 3 | 4 | from PyMPDATA import ScalarField 5 | from PyMPDATA.boundary_conditions import Periodic 6 | 7 | 8 | class TestScalarField: 9 | @staticmethod 10 | def test_1d_contiguous(): 11 | grid = (44,) 12 | data = np.empty(grid) 13 | boundary_conditions = (Periodic(),) 14 | sut = ScalarField(data, halo=1, boundary_conditions=boundary_conditions) 15 | assert sut.get().data.contiguous 16 | 17 | @staticmethod 18 | def test_2d_first_dim_not_contiguous(): 19 | grid = (44, 44) 20 | data = np.empty(grid) 21 | boundary_conditions = (Periodic(), Periodic()) 22 | sut = ScalarField(data, halo=1, boundary_conditions=boundary_conditions) 23 | assert not sut.get()[:, 0].data.contiguous 24 | 25 | @staticmethod 26 | def test_2d_second_dim_contiguous(): 27 | grid = (44, 44) 28 | data = np.empty(grid) 29 | boundary_conditions = (Periodic(), Periodic()) 30 | sut = ScalarField(data, halo=1, boundary_conditions=boundary_conditions) 31 | assert sut.get()[0, :].data.contiguous 32 | -------------------------------------------------------------------------------- /tests/unit_tests/test_shared_advector.py: -------------------------------------------------------------------------------- 1 | # pylint: disable=missing-module-docstring,missing-class-docstring,missing-function-docstring 2 | import numpy as np 3 | 4 | from PyMPDATA import Options, ScalarField, Solver, Stepper, VectorField 5 | from PyMPDATA.boundary_conditions import Periodic 6 | 7 | 8 | def test_shared_advector(): 9 | n_x = 100 10 | arr = np.zeros(n_x) 11 | opt1 = Options(n_iters=2, DPDC=True) 12 | opt2 = Options(n_iters=2) 13 | b_c = (Periodic(),) 14 | 15 | halo = opt1.n_halo 16 | assert opt2.n_halo == halo 17 | 18 | advector = VectorField( 19 | data=(np.zeros(n_x + 1),), halo=halo, boundary_conditions=b_c 20 | ) 21 | _ = Solver( 22 | stepper=Stepper(options=opt1, grid=(n_x,)), 23 | advectee=ScalarField(data=arr, halo=halo, boundary_conditions=b_c), 24 | advector=advector, 25 | ) 26 | solver = Solver( 27 | stepper=Stepper(options=opt2, grid=(n_x,)), 28 | advectee=ScalarField(data=arr, halo=halo, boundary_conditions=b_c), 29 | advector=advector, 30 | ) 31 | solver.advance(1) 32 | -------------------------------------------------------------------------------- /tests/unit_tests/test_solver.py: -------------------------------------------------------------------------------- 1 | # pylint: disable=missing-module-docstring,missing-class-docstring,missing-function-docstring 2 | import numpy as np 3 | import pytest 4 | 5 | from PyMPDATA import Options, ScalarField, Solver, Stepper, VectorField 6 | from PyMPDATA.boundary_conditions import Periodic 7 | 8 | BCS = (Periodic(),) 9 | 10 | 11 | @pytest.mark.parametrize( 12 | "case", 13 | ( 14 | {"g_factor": None, "non_zero_mu_coeff": True, "mu": None}, 15 | {"g_factor": None, "non_zero_mu_coeff": True, "mu": (0,)}, 16 | pytest.param( 17 | {"g_factor": None, "non_zero_mu_coeff": False, "mu": (0,)}, 18 | marks=pytest.mark.xfail(strict=True), 19 | ), 20 | pytest.param( 21 | { 22 | "g_factor": ScalarField(np.asarray([1.0, 1]), Options().n_halo, BCS), 23 | "non_zero_mu_coeff": True, 24 | "mu": None, 25 | }, 26 | marks=pytest.mark.xfail(strict=True), 27 | ), 28 | ), 29 | ) 30 | def test_mu_arg_handling(case): 31 | opt = Options(non_zero_mu_coeff=case["non_zero_mu_coeff"]) 32 | advector = VectorField((np.asarray([1.0, 2, 3]),), opt.n_halo, BCS) 33 | advectee = ScalarField(np.asarray([4.0, 5]), opt.n_halo, BCS) 34 | stepper = Stepper(options=opt, n_dims=1) 35 | sut = Solver(stepper, advectee, advector, case["g_factor"]) 36 | 37 | sut.advance(1, mu_coeff=case["mu"]) 38 | -------------------------------------------------------------------------------- /tests/unit_tests/test_stepper.py: -------------------------------------------------------------------------------- 1 | # pylint: disable=missing-module-docstring,missing-class-docstring,missing-function-docstring,unused-argument,too-many-arguments,protected-access,invalid-name 2 | from functools import lru_cache 3 | 4 | import numba 5 | import numpy as np 6 | import pytest 7 | 8 | from PyMPDATA import Options, ScalarField, Solver, Stepper, VectorField 9 | from PyMPDATA.boundary_conditions import Periodic 10 | 11 | 12 | def instantiate_solver(*, b_c, buf_size=0): 13 | n_x = 10 14 | opt = Options(n_iters=1) 15 | advector = VectorField( 16 | data=(np.zeros(n_x + 1),), halo=opt.n_halo, boundary_conditions=b_c 17 | ) 18 | solver = Solver( 19 | stepper=Stepper(options=opt, grid=(n_x,), buffer_size=buf_size), 20 | advectee=ScalarField( 21 | data=np.zeros(n_x), halo=opt.n_halo, boundary_conditions=b_c 22 | ), 23 | advector=advector, 24 | ) 25 | return solver 26 | 27 | 28 | class TestStepper: 29 | @staticmethod 30 | def test_zero_steps(): 31 | # arrange 32 | solver = instantiate_solver(b_c=(Periodic(),)) 33 | 34 | # act 35 | time_per_step = solver.advance(0) 36 | 37 | # assert 38 | assert not np.isfinite(time_per_step) 39 | 40 | @staticmethod 41 | @pytest.mark.parametrize( 42 | "buffer_size", 43 | ( 44 | 0, 45 | 1, 46 | 2, 47 | ), 48 | ) 49 | def test_buffer(buffer_size): 50 | # arrange 51 | VALUE = 44 52 | 53 | class Custom: 54 | @lru_cache() 55 | def make_scalar(self, *args): 56 | @numba.njit 57 | def fill_halos(buffer, i_rng, j_rng, k_rng, psi, span, sign): 58 | buffer[:] = VALUE 59 | 60 | return fill_halos 61 | 62 | @lru_cache() 63 | def make_vector(self, *args): 64 | @numba.njit 65 | def fill_halos(buffer, i_rng, j_rng, k_rng, comp, psi, span, sign): 66 | buffer[:] = VALUE 67 | 68 | return fill_halos 69 | 70 | solver = instantiate_solver(b_c=(Custom(),), buf_size=buffer_size) 71 | 72 | # act 73 | solver.advance(1) 74 | 75 | # assert 76 | buf = solver._Solver__stepper.traversals.data.buffer 77 | assert (buf == VALUE).all() 78 | assert buf.size == buffer_size 79 | -------------------------------------------------------------------------------- /tests/unit_tests/test_upwind_1d.py: -------------------------------------------------------------------------------- 1 | # pylint: disable=missing-module-docstring,missing-class-docstring,missing-function-docstring 2 | import numpy as np 3 | 4 | from PyMPDATA import Options, ScalarField, Solver, Stepper, VectorField 5 | from PyMPDATA.boundary_conditions import Periodic 6 | 7 | 8 | def test_upwind_1d(): 9 | state = np.array([0, 1, 0]) 10 | courant = 1 11 | 12 | options = Options(n_iters=1) 13 | mpdata = Solver( 14 | stepper=Stepper( 15 | options=options, n_dims=len(state.shape), non_unit_g_factor=False 16 | ), 17 | advectee=ScalarField( 18 | state.astype(options.dtype), 19 | halo=options.n_halo, 20 | boundary_conditions=(Periodic(),), 21 | ), 22 | advector=VectorField( 23 | (np.full(state.shape[0] + 1, courant, dtype=options.dtype),), 24 | halo=options.n_halo, 25 | boundary_conditions=(Periodic(),), 26 | ), 27 | ) 28 | n_steps = 5 29 | 30 | conserved = np.sum(mpdata.advectee.get()) 31 | mpdata.advance(n_steps) 32 | 33 | assert np.sum(mpdata.advectee.get()) == conserved 34 | -------------------------------------------------------------------------------- /tests/unit_tests/test_upwind_2d.py: -------------------------------------------------------------------------------- 1 | # pylint: disable=missing-module-docstring,missing-class-docstring,missing-function-docstring 2 | import numpy as np 3 | import pytest 4 | 5 | from PyMPDATA import Options, ScalarField, Solver, Stepper, VectorField 6 | from PyMPDATA.boundary_conditions import Periodic 7 | 8 | 9 | @pytest.mark.parametrize( 10 | "shape, ij0, out, courant_number", 11 | [ 12 | pytest.param((3, 1), (1, 0), np.array([[0.0], [0.0], [44.0]]), (1.0, 0.0)), 13 | pytest.param((1, 3), (0, 1), np.array([[0.0, 0.0, 44.0]]), (0.0, 1.0)), 14 | pytest.param((1, 3), (0, 1), np.array([[44.0, 0.0, 0.0]]), (0.0, -1.0)), 15 | pytest.param( 16 | (3, 3), 17 | (1, 1), 18 | np.array([[0, 0, 0], [0, 0, 22], [0.0, 22.0, 0.0]]), 19 | (0.5, 0.5), 20 | ), 21 | pytest.param( 22 | (3, 3), 23 | (1, 1), 24 | np.array([[0, 0, 0], [0, 0, 22], [0.0, 22.0, 0.0]]), 25 | (0.5, 0.5), 26 | ), 27 | ], 28 | ) 29 | def test_upwind(shape, ij0, out, courant_number): 30 | value = 44 31 | scalar_field_init = np.zeros(shape) 32 | scalar_field_init[ij0] = value 33 | 34 | vector_field_init = ( 35 | np.full((shape[0] + 1, shape[1]), courant_number[0]), 36 | np.full((shape[0], shape[1] + 1), courant_number[1]), 37 | ) 38 | options = Options(n_iters=1) 39 | 40 | bcs = (Periodic(), Periodic()) 41 | advectee = ScalarField( 42 | scalar_field_init, halo=options.n_halo, boundary_conditions=bcs 43 | ) 44 | advector = VectorField( 45 | vector_field_init, halo=options.n_halo, boundary_conditions=bcs 46 | ) 47 | 48 | mpdata = Solver( 49 | stepper=Stepper(options=options, grid=shape, n_threads=1), 50 | advector=advector, 51 | advectee=advectee, 52 | ) 53 | mpdata.advance(n_steps=1) 54 | 55 | np.testing.assert_array_equal(mpdata.advectee.get(), out) 56 | --------------------------------------------------------------------------------