├── .github
├── dependabot.yml
└── workflows
│ ├── continuous-integration.yml
│ └── release.yml
├── .gitignore
├── .readthedocs.yaml
├── LICENSE.rst
├── README.md
├── docs
├── .nojekyll
├── index.html
└── source
│ ├── Makefile
│ ├── conf.py
│ └── index.rst
├── joss
├── paper.bib
└── paper.md
├── pyproject.toml
├── pyspod
├── __init__.py
├── emulation
│ ├── __init__.py
│ ├── base.py
│ └── neural_nets.py
├── pod
│ ├── __init__.py
│ ├── base.py
│ ├── standard.py
│ └── utils.py
├── spod
│ ├── __init__.py
│ ├── base.py
│ ├── standard.py
│ ├── streaming.py
│ └── utils.py
└── utils
│ ├── __init__.py
│ ├── errors.py
│ ├── io.py
│ ├── parallel.py
│ ├── plotting_support
│ ├── coast.mat
│ └── coast_centred.mat
│ ├── postproc.py
│ ├── reader.py
│ └── weights.py
├── readme
├── MEI.png
└── PySPOD_logo2.png
├── setup.cfg
├── setup.py
├── tests
├── data
│ ├── earthquakes_data.nc
│ ├── era_interim_data.nc
│ ├── fluidmechanics_data.mat
│ ├── input.yaml
│ ├── input_optional.yaml
│ ├── input_postproc_2d.yaml
│ ├── input_postproc_3d.yaml
│ ├── input_spod.yaml
│ ├── input_tutorial1.yaml
│ └── input_tutorial2.yaml
├── test_emulation.py
├── test_pod_parallel.py
├── test_pod_serial.py
├── test_spod_parallel.py
├── test_spod_serial.py
├── test_tutorials.py
├── test_utils_parallel.py
└── test_utils_serial.py
├── tox.ini
└── tutorials
├── README.md
├── climate
├── ERA20C_MEI_2D
│ ├── E20C_MONTHLYMEAN00_1900_2010_MEI.py
│ ├── ERA20C_MEI_2D.ipynb
│ └── ERA20C_MEI_2D.py
└── ERA20C_QBO_3D
│ ├── E20C_MONTHLYMEAN00_1900_2010_U131128_3D.py
│ ├── ERA20C_QBO_3D.ipynb
│ └── ERA20C_QBO_3D.py
├── tutorial1
├── tutorial1.ipynb
└── tutorial1.py
└── tutorial2
├── tutorial2.ipynb
└── tutorial2.py
/.github/dependabot.yml:
--------------------------------------------------------------------------------
1 | version: 2
2 | updates:
3 | - package-ecosystem: github-actions
4 | directory: /
5 | schedule:
6 | interval: weekly
7 |
--------------------------------------------------------------------------------
/.github/workflows/continuous-integration.yml:
--------------------------------------------------------------------------------
1 | name: continuous-integration
2 |
3 | on:
4 | schedule:
5 | - cron: '0 3 * * 0'
6 | push:
7 | branches:
8 | - main
9 | - io
10 | pull_request:
11 | branches:
12 | - main
13 | - io
14 | workflow_dispatch:
15 |
16 | jobs:
17 |
18 | test:
19 | runs-on: ${{ matrix.os }}
20 | timeout-minutes: 240
21 | strategy:
22 | fail-fast: false
23 | matrix:
24 | os:
25 | - ubuntu-latest
26 | - macos-latest
27 | mpi:
28 | - mpich
29 | - openmpi
30 | py:
31 | - "3.8"
32 | - "3.9"
33 | - "3.10"
34 | - "3.11"
35 | - "3.12"
36 | exclude:
37 | - os: macos-latest
38 | py: "3.8"
39 |
40 | steps:
41 | - name: Configure hostname
42 | if: runner.os == 'Linux' || runner.os == 'macOS'
43 | run: echo 127.0.0.1 `hostname` | sudo tee -a /etc/hosts > /dev/null
44 |
45 | - name: Checkout
46 | uses: actions/checkout@v4
47 |
48 | - name: Setup MPI (${{ matrix.mpi }})
49 | uses: mpi4py/setup-mpi@v1
50 | with:
51 | mpi: ${{ matrix.mpi }}
52 |
53 | - name: Use Python ${{ matrix.py }}
54 | uses: actions/setup-python@v5
55 | with:
56 | python-version: ${{ matrix.py }}
57 | architecture: ${{ startsWith(matrix.os, 'macos-') && 'arm64' || 'x64' }}
58 |
59 | - name: Install
60 | run: python -m pip install .[mpi,test,ai] pytest-cov
61 |
62 | - name: Test mpiexec_pod
63 | run: mpiexec -n 2 python -m coverage run tests/test_pod_parallel.py
64 |
65 | - name: Test mpiexec_spod
66 | run: mpiexec -n 2 python -m coverage run tests/test_spod_parallel.py
67 |
68 | - name: Test mpiexec_utils
69 | run: mpiexec -n 2 python -m coverage run tests/test_utils_parallel.py
70 |
71 | - name: Coverage combine
72 | run: coverage combine
73 |
74 | - name: Coverage report
75 | run: coverage report
76 |
77 | - name: Pytest --with-mpi
78 | run: python -m pytest --cov=pyspod --cov-report=xml tests/ --with-mpi -v
79 |
80 | - name: Upload coverage to Codecov
81 | uses: codecov/codecov-action@v5
82 | with:
83 | token: ${{ secrets.CODECOV_TOKEN }}
84 | fail_ci_if_error: true
85 | files: ./coverage.xml
86 | verbose: true
87 |
88 | test-no-mpi:
89 | runs-on: ${{ matrix.os }}
90 | timeout-minutes: 120
91 | strategy:
92 | fail-fast: false
93 | matrix:
94 | os:
95 | - ubuntu-latest
96 | - macos-latest
97 | py:
98 | - "3.11"
99 | - "3.12"
100 |
101 | steps:
102 |
103 | - name: Checkout
104 | uses: actions/checkout@v4
105 |
106 | - name: Use Python ${{ matrix.py }}
107 | uses: actions/setup-python@v5
108 | with:
109 | python-version: ${{ matrix.py }}
110 | architecture: ${{ startsWith(matrix.os, 'macos-') && 'arm64' || 'x64' }}
111 |
112 | - name: Install
113 | run: python -m pip install .[test,ai]
114 |
115 | - name: Test package
116 | run: python -m pytest -v tests/
117 |
--------------------------------------------------------------------------------
/.github/workflows/release.yml:
--------------------------------------------------------------------------------
1 | name: release
2 |
3 | permissions:
4 | contents: read
5 |
6 | on:
7 | release:
8 | types:
9 | - published
10 | workflow_dispatch:
11 |
12 | jobs:
13 |
14 | distribution:
15 | runs-on: ubuntu-latest
16 |
17 | steps:
18 |
19 | - name: Checkout
20 | uses: actions/checkout@v4
21 |
22 | - name: Setup Python
23 | uses: actions/setup-python@v5
24 | with:
25 | python-version: 3
26 |
27 | - name: Upgrade pip
28 | run: python -m pip install -U pip
29 |
30 | - name: Install build and twine
31 | run: python -m pip install -U build twine
32 |
33 | - name: Build distribution
34 | run: python -m build
35 |
36 | - name: Check source distribution
37 | run: python -m twine check dist/pyspod-*.tar.gz
38 |
39 | - name: Check binary distribution
40 | run: python -m twine check dist/pyspod-*.whl
41 |
42 | - name: Upload distribution assets
43 | uses: actions/upload-artifact@v4
44 | with:
45 | name: release
46 | path: |
47 | dist/*.tar.gz
48 | dist/*.whl
49 |
50 |
51 | pypi-publish:
52 |
53 | if: ${{ github.event_name == 'release' }}
54 | name: Upload release to PyPI
55 | runs-on: ubuntu-latest
56 | needs: distribution
57 | environment:
58 | name: pypi
59 | url: https://pypi.org/p/pyspod
60 | permissions:
61 | id-token: write
62 |
63 | steps:
64 |
65 | - name: Download distribution assets
66 | uses: actions/download-artifact@v4
67 | with:
68 | name: release
69 | path: dist
70 |
71 | - name: Publish package distributions to PyPI
72 | uses: pypa/gh-action-pypi-publish@release/v1
73 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 |
2 | # Byte-compiled / optimized / DLL files
3 | __pycache__/
4 | *.py[cod]
5 | *$py.class
6 |
7 | # C extensions
8 | *.so
9 |
10 | # Distribution / packaging
11 | .Python
12 | .DS_Store
13 | build/
14 | develop-eggs/
15 | dist/
16 | downloads/
17 | results/
18 | eggs/
19 | .eggs/
20 | lib/
21 | lib64/
22 | parts/
23 | sdist/
24 | var/
25 | wheels/
26 | share/python-wheels/
27 | *results/
28 | *.egg-info/
29 | .installed.cfg
30 | *.egg
31 | MANIFEST
32 |
33 | # PyInstaller
34 | # Usually these files are written by a python script from a template
35 | # before PyInstaller builds the exe, so as to inject date/other infos into it.
36 | *.manifest
37 | *.spec
38 |
39 | # netCDF data files
40 | *.nc
41 |
42 | # mat data files
43 | *.mat
44 |
45 | # numpy data files
46 | *.npy
47 | *.npz
48 |
49 | # Installer logs
50 | pip-log.txt
51 | pip-delete-this-directory.txt
52 |
53 | # Unit test / coverage reports
54 | htmlcov/
55 | .tox/
56 | .nox/
57 | .coverage
58 | .coverage.*
59 | .cache
60 | nosetests.xml
61 | coverage.xml
62 | *.cover
63 | *.py,cover
64 | .hypothesis/
65 | .pytest_cache/
66 | cover/
67 |
68 | # Translations
69 | *.mo
70 | *.pot
71 |
72 | # Django stuff:
73 | *.log
74 | local_settings.py
75 | db.sqlite3
76 | db.sqlite3-journal
77 |
78 | # Flask stuff:
79 | instance/
80 | .webassets-cache
81 |
82 | # Scrapy stuff:
83 | .scrapy
84 |
85 | # Sphinx documentation
86 | docs/_build/
87 |
88 | # PyBuilder
89 | .pybuilder/
90 | target/
91 |
92 | # Jupyter Notebook
93 | .ipynb_checkpoints
94 |
95 | # IPython
96 | profile_default/
97 | ipython_config.py
98 |
99 | # pyenv
100 | # For a library or package, you might want to ignore these files since the code is
101 | # intended to run in multiple environments; otherwise, check them in:
102 | # .python-version
103 |
104 | # pipenv
105 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
106 | # However, in case of collaboration, if having platform-specific dependencies or dependencies
107 | # having no cross-platform support, pipenv may install dependencies that don't work, or not
108 | # install all needed dependencies.
109 | #Pipfile.lock
110 |
111 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow
112 | __pypackages__/
113 |
114 | # Celery stuff
115 | celerybeat-schedule
116 | celerybeat.pid
117 |
118 | # SageMath parsed files
119 | *.sage.py
120 |
121 | # Environments
122 | .env
123 | .venv
124 | env/
125 | venv/
126 | ENV/
127 | env.bak/
128 | venv.bak/
129 |
130 | # Spyder project settings
131 | .spyderproject
132 | .spyproject
133 |
134 | # Rope project settings
135 | .ropeproject
136 |
137 | # mkdocs documentation
138 | /site
139 |
140 | # mypy
141 | .mypy_cache/
142 | .dmypy.json
143 | dmypy.json
144 |
145 | # Pyre type checker
146 | .pyre/
147 |
148 | # pytype static type analyzer
149 | .pytype/
150 |
151 | # Cython debug symbols
152 | cython_debug/
153 |
--------------------------------------------------------------------------------
/.readthedocs.yaml:
--------------------------------------------------------------------------------
1 | # https://readthedocs.org/projects/pyspod/builds/
2 |
3 | version: 2
4 |
5 | formats: all
6 |
7 | build:
8 | os: ubuntu-20.04
9 | tools:
10 | python: "3.10"
11 |
12 | python:
13 | install:
14 | - path: .[docs]
15 |
16 | sphinx:
17 | configuration: docs/source/conf.py
18 | fail_on_warning: true
19 |
--------------------------------------------------------------------------------
/LICENSE.rst:
--------------------------------------------------------------------------------
1 | Copyright (c) 2022-2025 Gianmarco Mengaldo
2 |
3 | Permission is hereby granted, free of charge, to any person obtaining a copy
4 | of this software and associated documentation files (the "Software"), to deal
5 | in the Software without restriction, including without limitation the rights
6 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
7 | copies of the Software, and to permit persons to whom the Software is
8 | furnished to do so, subject to the following conditions:
9 |
10 | The above copyright notice and this permission notice shall be included in all
11 | copies or substantial portions of the Software.
12 |
13 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
16 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
17 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
18 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
19 | SOFTWARE.
20 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
10 |
11 |
12 |
13 |
14 |
15 |
16 |
17 |
18 |
19 |
20 |
21 |
22 |
23 |
24 |
25 |
26 |
27 |
28 |
29 |
30 |
31 |
32 |
33 | # *PySPOD*: A parallel (distributed) Python SPOD package
34 |
35 | ## What do we implement?
36 |
37 | In this package we implement two versions of SPOD, both available as **parallel and distributed** (i.e. they can run on multiple cores/nodes on large-scale HPC machines) via [mpi4py](https://github.com/mpi4py/mpi4py):
38 |
39 | - **spod_standard**: this is the **batch** algorithm as described in [(Schmidt and Towne, 2019)](https://doi.org/10.1017/jfm.2018.283).
40 | - **spod_streaming**: that is the **streaming** algorithm presented in [(Schmidt and Towne, 2019)](https://doi.org/10.1017/jfm.2018.283).
41 |
42 | We additionally implement the calculation of time coefficients and the reconstruction of the data, given a set of modes $\phi$ and coefficients *a*, as explained in [(Chu and Schmidt, 2021)](10.1007/s00162-021-00588-6) and [(Nekkanti and Schmidt, 2021)](https://doi.org/10.1017/jfm.2021.681). The library comes with a package to emulating the reduced space, that is to forecasting the time coefficients using neural networks, as described in [Lario et al., 2022](https://doi.org/10.1016/j.jcp.2022.111475).
43 |
44 | To see how to use the **PySPOD** package, you can look at the [**Tutorials**](tutorials/README.md).
45 |
46 | For additional information, you can also consult the PySPOD website: [**http://www.mathexlab.com/PySPOD/**](http://www.mathexlab.com/PySPOD/).
47 |
48 | ## How to cite this work
49 | Current references to the PySPOD library is:
50 |
51 | ```bash
52 | @article{rogowski2024unlocking,
53 | title={Unlocking massively parallel spectral proper orthogonal decompositions in the PySPOD package},
54 | author={Rogowski, Marcin and Yeung, Brandon CY and Schmidt, Oliver T and Maulik, Romit and Dalcin, Lisandro and Parsani, Matteo and Mengaldo, Gianmarco},
55 | journal={Computer Physics Communications},
56 | pages={109246},
57 | year={2024},
58 | publisher={Elsevier}
59 | }
60 | ```
61 |
62 | ```bash
63 | @article{lario2022neural,
64 | title={Neural-network learning of SPOD latent dynamics},
65 | author={Lario, Andrea and Maulik, Romit and Schmidt, Oliver T and Rozza, Gianluigi and Mengaldo, Gianmarco},
66 | journal={Journal of Computational Physics},
67 | volume={468},
68 | pages={111475},
69 | year={2022},
70 | publisher={Elsevier}
71 | }
72 | ```
73 |
74 | ```bash
75 | @article{mengaldo2021pyspod,
76 | title={Pyspod: A python package for spectral proper orthogonal decomposition (spod)},
77 | author={Mengaldo, Gianmarco and Maulik, Romit},
78 | journal={Journal of Open Source Software},
79 | volume={6},
80 | number={60},
81 | pages={2862},
82 | year={2021}
83 | }
84 | ```
85 |
86 | ## What data can we apply SPOD to?
87 |
88 | SPOD can be applied to **wide-sense stationary data**. Examples of these arise in different fields, including **fluidmechanics**, and **weather** and **climate**, among others.
89 |
90 | ## How do I install the library?
91 |
92 | If you want to download and install the latest version from `main`:
93 | - download the library
94 | - from the top directory of PySPOD, type
95 |
96 | ```bash
97 | python3 setup.py install
98 | ```
99 |
100 | > To allow for parallel capabilities, you need to have installed an MPI distribution in your machine. Currently MPI distributions tested are [Open MPI](https://www.open-mpi.org), and [Mpich](https://www.mpich.org). Note that the library will still work in **serial** (no parallel capabilities), if you **do not have MPI**.
101 |
102 |
103 |
104 | ## Recent works with **PySPOD**
105 |
106 | Please, [contact me](mailto:gianmarco.mengaldo@gmail.com) if you used PySPOD for a publication and you want it to be advertised here.
107 |
108 | - A. Lario, R. Maulik, G. Rozza, G. Mengaldo, [Neural-Network learning of SPOD latent space]([https://arxiv.org/abs/2110.09218](https://doi.org/10.1016/j.jcp.2022.111475))
109 |
110 | ## Authors and contributors
111 |
112 | **PySPOD** is currently developed and mantained by
113 |
114 | * [G. Mengaldo](mailto:mpegim@nus.edu.sg), National University of Singapore (Singapore).
115 |
116 | Current active contributors include:
117 |
118 | * [M. Rogowski](https://mrogowski.github.io), King Abdullah University of Science and Technology (Saudi Arabia).
119 | * [L. Dalcin](https://cemse.kaust.edu.sa/ecrc/people/person/lisandro-dalcin), King Abdullah University of Science and Technology (Saudi Arabia).
120 | * [R. Maulik](https://romit-maulik.github.io), Argonne National Laboratory (US).
121 | * [A. Lario](https://www.math.sissa.it/users/andrea-lario), SISSA (Italy)
122 |
123 | ## How to contribute
124 |
125 | Contributions improving code and documentation, as well as suggestions about new features are more than welcome!
126 |
127 | The guidelines to contribute are as follows:
128 | 1. open a new issue describing the bug you intend to fix or the feature you want to add.
129 | 2. fork the project and open your own branch related to the issue you just opened, and call the branch `fix/name-of-the-issue` if it is a bug fix, or `feature/name-of-the-issue` if you are adding a feature.
130 | 3. ensure to use 4 spaces for formatting the code.
131 | 4. if you add a feature, it should be accompanied by relevant tests to ensure it functions correctly, while the code continue to be developed.
132 | 5. commit your changes with a self-explanatory commit message.
133 | 6. push your commits and submit a pull request. Please, remember to rebase properly in order to maintain a clean, linear git history.
134 |
135 | [Contact us](mailto:mpegim@nus.edu.sg) by email for further information or questions about **PySPOD** or ways on how to contribute.
136 |
137 |
138 | ## License
139 |
140 | See the [LICENSE](LICENSE.rst) file for license rights and limitations (MIT).
141 |
--------------------------------------------------------------------------------
/docs/.nojekyll:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MathEXLab/PySPOD/9d69ac0724bfffcedaf104b084d050a94100cf65/docs/.nojekyll
--------------------------------------------------------------------------------
/docs/index.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
--------------------------------------------------------------------------------
/docs/source/Makefile:
--------------------------------------------------------------------------------
1 | # Minimal makefile for Sphinx documentation
2 | #
3 |
4 | # You can set these variables from the command line, and also
5 | # from the environment for the first two.
6 | SPHINXOPTS ?=
7 | SPHINXBUILD ?= sphinx-build
8 | SOURCEDIR = .
9 | BUILDDIR = _build
10 |
11 | # Put it first so that "make" without argument is like "make help".
12 | help:
13 | @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
14 |
15 | .PHONY: help Makefile
16 |
17 | # Catch-all target: route all unknown targets to Sphinx using the new
18 | # "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS).
19 | %: Makefile
20 | @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
21 |
--------------------------------------------------------------------------------
/docs/source/conf.py:
--------------------------------------------------------------------------------
1 | # Configuration file for the Sphinx documentation builder.
2 | #
3 | # For the full list of built-in configuration values, see the documentation:
4 | # https://www.sphinx-doc.org/en/master/usage/configuration.html
5 |
6 | # -- Path setup --------------------------------------------------------------
7 |
8 | # If extensions (or modules to document with autodoc) are in another directory,
9 | # add these directories to sys.path here. If the directory is relative to the
10 | # documentation root, use os.path.abspath to make it absolute, like shown here.
11 |
12 | import os
13 | import sys
14 | import typing
15 | import datetime
16 | import importlib
17 | sys.path.insert(0, os.path.abspath('.'))
18 | sys.path.insert(0, os.path.abspath('../../'))
19 | _today = datetime.datetime.now()
20 |
21 |
22 |
23 | # -- Project information -----------------------------------------------------
24 | # General information about the project.
25 |
26 | def pkg_version():
27 | import re
28 | here = os.path.dirname(__file__)
29 | pardir = [os.path.pardir] * 2
30 | topdir = os.path.join(here, *pardir)
31 | with open(os.path.join(topdir, 'pyspod', '__init__.py')) as f:
32 | m = re.search(r"__version__\s*=\s*'(.*)'", f.read())
33 | return m.groups()[0]
34 |
35 | project = 'PySPOD: Python SPOD'
36 | package = 'pyspod'
37 | author = ', '.join([
38 | 'Gianmarco Mengaldo',
39 | 'Marcin Rogowski',
40 | 'Lisandro Dalcin',
41 | 'Romit Maulik',
42 | 'Andrea Lario',
43 | ])
44 | copyright = f'{_today.year}, {author}'
45 |
46 | release = pkg_version()
47 | version = release.rsplit('.', 1)[0]
48 |
49 |
50 | # -- General configuration ---------------------------------------------------
51 |
52 | # Add any Sphinx extension module names here, as strings. They can be
53 | # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
54 | # ones.
55 |
56 | extensions = [
57 | 'sphinx.ext.autodoc',
58 | 'sphinx.ext.autosummary',
59 | 'sphinx.ext.coverage',
60 | 'sphinx.ext.graphviz',
61 | 'sphinx.ext.doctest',
62 | 'sphinx.ext.intersphinx',
63 | 'sphinx.ext.todo',
64 | 'sphinx.ext.coverage',
65 | 'sphinx.ext.viewcode',
66 | 'sphinx.ext.imgmath',
67 | 'sphinx.ext.ifconfig',
68 | 'sphinx.ext.autosectionlabel',
69 | 'sphinx.ext.napoleon',
70 | ]
71 |
72 | intersphinx_mapping = {
73 | 'python': ('https://docs.python.org/3', None),
74 | 'numpy': ('https://numpy.org/doc/stable/', None),
75 | 'scipy': ('https://docs.scipy.org/doc/scipy/', None),
76 | 'matplotlib': ('https://matplotlib.org/stable', None),
77 | }
78 |
79 | templates_path = ['_templates']
80 | exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
81 | source_suffix = '.rst'
82 | needs_sphinx = '5.0.0'
83 | default_role = 'any'
84 |
85 | try:
86 | import sphinx_rtd_theme
87 | if 'sphinx_rtd_theme' not in extensions:
88 | extensions.append('sphinx_rtd_theme')
89 | except ImportError:
90 | sphinx_rtd_theme = None
91 |
92 |
93 | def _setup_numpy_typing():
94 | try:
95 | import numpy as np
96 | except ImportError:
97 | np = type(sys)('numpy')
98 | sys.modules[np.__name__] = np
99 | np.dtype = type('dtype', (), {})
100 | np.dtype.__module__ = np.__name__
101 |
102 | try:
103 | import numpy.typing as npt
104 | except ImportError:
105 | npt = type(sys)('numpy.typing')
106 | np.typing = npt
107 | sys.modules[npt.__name__] = npt
108 | npt.__all__ = []
109 | for attr in ['ArrayLike', 'DTypeLike']:
110 | setattr(npt, attr, typing.Any)
111 | npt.__all__.append(attr)
112 |
113 | # The master toctree document.
114 | master_doc = 'index'
115 |
116 | # autoclass
117 | autoclass_content = 'both'
118 |
119 |
120 | # This is also used if you do content translation via gettext catalogs.
121 | # Usually you set "language" from the command line for these cases.
122 | # language = 'en'
123 |
124 | # There are two options for replacing |today|: either, you set today to some
125 | # non-false value, then it is used:
126 | #today = ''
127 | # Else, today_fmt is used as the format for a strftime call.
128 | #today_fmt = '%B %d, %Y'
129 |
130 | # List of patterns, relative to source directory, that match files and
131 | # directories to ignore when looking for source files.
132 | exclude_patterns = []
133 |
134 | # The reST default role (used for this markup: `text`) to use for all
135 | # documents.
136 | #default_role = None
137 |
138 | # If true, '()' will be appended to :func: etc. cross-reference text.
139 | add_function_parentheses = True
140 |
141 | # If true, the current module name will be prepended to all description
142 | # unit titles (such as .. function::).
143 | add_module_names = False
144 |
145 | # If true, sectionauthor and moduleauthor directives will be shown in the
146 | # output. They are ignored by default.
147 | #show_authors = False
148 |
149 | # The name of the Pygments (syntax highlighting) style to use.
150 | pygments_style = 'sphinx'
151 |
152 | # A list of ignored prefixes for module index sorting.
153 | #modindex_common_prefix = []
154 |
155 | # If true, keep warnings as "system message" paragraphs in the built documents.
156 | keep_warnings = False
157 |
158 | # If true, `todo` and `todoList` produce output, else they produce nothing.
159 | todo_include_todos = True
160 |
161 | # -- Options for viewcode extension ---------------------------------------
162 |
163 | # Follow alias objects that are imported from another module such as functions,
164 | # classes and attributes. As side effects, this option ... ???
165 | # If false, ... ???.
166 | # The default is True.
167 | viewcode_import = True
168 |
169 | # -- Options for HTML output ----------------------------------------------
170 |
171 | # The theme to use for HTML and HTML Help pages. See the documentation for
172 | # a list of builtin themes.
173 | html_theme = (
174 | 'sphinx_rtd_theme' if 'sphinx_rtd_theme' in extensions else 'default'
175 | )
176 |
177 | # Theme options are theme-specific and customize the look and feel of a theme
178 | # further. For a list of options available for each theme, see the
179 | # documentation.
180 | #html_theme_options = {}
181 |
182 | # Add any paths that contain custom themes here, relative to this directory.
183 | #html_theme_path = []
184 |
185 | # The name for this set of Sphinx documents. If None, it defaults to
186 | # " v documentation".
187 | #html_title = None
188 |
189 | # A shorter title for the navigation bar. Default is the same as html_title.
190 | #html_short_title = None
191 |
192 | # The name of an image file (relative to this directory) to place at the top
193 | # of the sidebar.
194 | #html_logo = None
195 |
196 | # The name of an image file (within the static path) to use as favicon of the
197 | # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
198 | # pixels large.
199 | #html_favicon = None
200 |
201 | # Add any paths that contain custom static files (such as style sheets) here,
202 | # relative to this directory. They are copied after the builtin static files,
203 | # so a file named "default.css" will overwrite the builtin "default.css".
204 | # html_static_path = ['_static']
205 |
206 | # Add any extra paths that contain custom files (such as robots.txt or
207 | # .htaccess) here, relative to this directory. These files are copied
208 | # directly to the root of the documentation.
209 | #html_extra_path = ['_tutorials']
210 |
211 | # If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
212 | # using the given strftime format.
213 | html_last_updated_fmt = '%b %d, %Y'
214 |
215 | # If true, SmartyPants will be used to convert quotes and dashes to
216 | # typographically correct entities.
217 | #html_use_smartypants = True
218 |
219 | # Custom sidebar templates, maps document names to template names.
220 | #html_sidebars = {}
221 |
222 | # Additional templates that should be rendered to pages, maps page names to
223 | # template names.
224 | #html_additional_pages = {}
225 |
226 | # If false, no module index is generated.
227 | #html_domain_indices = True
228 |
229 | # If false, no index is generated.
230 | html_use_index = True
231 |
232 | # If true, the index is split into individual pages for each letter.
233 | #html_split_index = False
234 |
235 | # If true, links to the reST sources are added to the pages.
236 | html_show_sourcelink = True
237 |
238 | # If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
239 | #html_show_sphinx = True
240 |
241 | # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
242 | html_show_copyright = True
243 |
244 | # If true, an OpenSearch description file will be output, and all pages will
245 | # contain a tag referring to it. The value of this option must be the
246 | # base URL from which the finished HTML is served.
247 | #html_use_opensearch = ''
248 |
249 | # This is the file name suffix for HTML files (e.g. ".xhtml").
250 | #html_file_suffix = None
251 |
252 | # Language to be used for generating the HTML full-text search index.
253 | # Sphinx supports the following languages:
254 | # 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
255 | # 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
256 | #html_search_language = 'en'
257 |
258 | # A dictionary with options for the search language support, empty by default.
259 | # Now only 'ja' uses this config value
260 | #html_search_options = {'type': 'default'}
261 |
262 | # The name of a javascript file (relative to the configuration directory) that
263 | # implements a search results scorer. If empty, the default will be used.
264 | #html_search_scorer = 'scorer.js'
265 |
266 | # Output file base name for HTML help builder.
267 | htmlhelp_basename = f'{package}doc'
268 |
269 | # -- Options for LaTeX output ---------------------------------------------
270 |
271 | latex_elements = {
272 | # The paper size ('letterpaper' or 'a4paper').
273 | #'papersize': 'letterpaper',
274 |
275 | # The font size ('10pt', '11pt' or '12pt').
276 | #'pointsize': '10pt',
277 |
278 | # Additional stuff for the LaTeX preamble.
279 | #'preamble': '',
280 |
281 | # Latex figure (float) alignment
282 | #'figure_align': 'htbp',
283 | }
284 |
285 | # Grouping the document tree into LaTeX files. List of tuples
286 | # (source start file, target name, title,
287 | # author, documentclass [howto, manual, or own class]).
288 | latex_documents = [
289 | (master_doc, f'{package}.tex', f'{project} Documentation', author, 'manual'),
290 | ]
291 |
292 | # The name of an image file (relative to this directory) to place at the top of
293 | # the title page.
294 | #latex_logo = None
295 |
296 | # For "manual" documents, if this is true, then toplevel headings are parts,
297 | # not chapters.
298 | #latex_use_parts = False
299 |
300 | # If true, show page references after internal links.
301 | #latex_show_pagerefs = False
302 |
303 | # If true, show URL addresses after external links.
304 | #latex_show_urls = False
305 |
306 | # Documents to append as an appendix to all manuals.
307 | #latex_appendices = []
308 |
309 | # If false, no module index is generated.
310 | #latex_domain_indices = True
311 |
312 |
313 | # -- Options for manual page output ---------------------------------------
314 |
315 | # One entry per manual page. List of tuples
316 | # (source start file, name, description, authors, manual section).
317 | man_pages = [
318 | (master_doc, f'{package}', f'{project} Documentation', [author], 1)
319 | ]
320 |
321 | # If true, show URL addresses after external links.
322 | #man_show_urls = False
323 |
324 |
325 | # -- Options for Texinfo output -------------------------------------------
326 |
327 | # Grouping the document tree into Texinfo files. List of tuples
328 | # (source start file, target name, title, author,
329 | # dir menu entry, description, category)
330 | texinfo_documents = [
331 | (master_doc, f'{package}', f'{project} Documentation',
332 | author, f'{package}.', 'One line description of project.',
333 | 'Miscellaneous'),
334 | ]
335 |
336 | # Documents to append as an appendix to all manuals.
337 | #texinfo_appendices = []
338 |
339 | # If false, no module index is generated.
340 | #texinfo_domain_indices = True
341 |
342 | # How to display URL addresses: 'footnote', 'no', or 'inline'.
343 | #texinfo_show_urls = 'footnote'
344 |
345 | # If true, do not generate a @detailmenu in the "Top" node's menu.
346 | #texinfo_no_detailmenu = False
347 |
--------------------------------------------------------------------------------
/docs/source/index.rst:
--------------------------------------------------------------------------------
1 | .. pyspod documentation master file, created by
2 | sphinx-quickstart on Fri Oct 30 22:20:02 2020.
3 | You can adapt this file completely to your liking, but it should at least
4 | contain the root `toctree` directive.
5 |
6 | Welcome to PySPOD's documentation!
7 | ==================================
8 |
9 | .. toctree::
10 | :maxdepth: 2
11 | :caption: Contents:
12 |
13 |
14 |
15 | * The **GitHub repository** of this package can be found at `PySPOD `_ along installation instructions, and how to get started.
16 |
17 | * **Tutorials** can be found at `PySPOD-Tutorials `_
18 |
19 | * The package uses `GitHub Actions `_ for **continuous integration**.
20 |
21 |
22 |
23 | Indices and table
24 | -----------------
25 |
26 | * :ref:`genindex`
27 | * :ref:`modindex`
28 |
29 |
30 |
31 | SPOD module
32 | ===========
33 |
34 | SPOD base
35 | ---------
36 |
37 | .. automodule:: pyspod.spod.base
38 | :members: Base
39 |
40 |
41 | SPOD standard
42 | -------------
43 |
44 | .. automodule:: pyspod.spod.standard
45 | :members: Standard
46 |
47 |
48 | SPOD streaming
49 | --------------
50 |
51 | .. automodule:: pyspod.spod.streaming
52 | :members: Streaming
53 |
54 |
55 | SPOD utils
56 | ----------
57 |
58 | .. automodule:: pyspod.spod.utils
59 | :members:
60 | check_orthogonality,
61 | compute_coeffs_op,
62 | compute_coeffs_conv,
63 | compute_reconstruction
64 |
65 |
66 | Utils module
67 | ============
68 |
69 |
70 | Postprocessing
71 | --------------
72 |
73 | .. automodule:: pyspod.utils.postproc
74 | :members:
75 | find_nearest_freq,
76 | find_nearest_coords,
77 | get_modes_at_freq,
78 | get_data_from_file,
79 | plot_eigs,
80 | plot_eigs_vs_frequency,
81 | plot_eigs_vs_period,
82 | plot_2d_modes_at_frequency,
83 | plot_3d_modes_slice_at_frequency,
84 | plot_mode_tracers,
85 | plot_2d_data,
86 | plot_data_tracers,
87 | generate_2d_data_video
88 |
89 |
90 | Weights
91 | --------------
92 |
93 | .. automodule:: pyspod.utils.weights
94 | :members:
95 | geo_trapz_2D,
96 | geo_trapz_3D,
97 | custom,
98 | apply_normalization
99 |
--------------------------------------------------------------------------------
/joss/paper.bib:
--------------------------------------------------------------------------------
1 | @book{lumley1970,
2 | title={Stochastic tools in turbulence},
3 | author={Lumley, John L},
4 | year={2007},
5 | publisher={Courier Corporation}
6 | }
7 |
8 | @article{towne2017,
9 | title={Spectral proper orthogonal decomposition and its relationship to dynamic mode decomposition and resolvent analysis},
10 | author={Towne, Aaron and Schmidt, Oliver T and Colonius, Tim},
11 | journal={Journal of Fluid Mechanics},
12 | volume={847},
13 | pages={821--867},
14 | year={2018},
15 | doi={10.1017/jfm.2018.283},
16 | publisher={Cambridge University Press}
17 | }
18 |
19 | @article{schmidt2019a,
20 | title={Spectral empirical orthogonal function analysis of weather and climate data},
21 | author={Schmidt, Oliver T and Mengaldo, Gianmarco and Balsamo, Gianpaolo and Wedi, Nils P},
22 | journal={Monthly Weather Review},
23 | volume={147},
24 | number={8},
25 | pages={2979--2995},
26 | doi={10.1175/mwr-d-18-0337.1},
27 | year={2019}
28 | }
29 |
30 | @article{schmidt2019b,
31 | title={An efficient streaming algorithm for spectral proper orthogonal decomposition},
32 | author={Schmidt, Oliver T and Towne, Aaron},
33 | journal={Computer Physics Communications},
34 | volume={237},
35 | pages={98--109},
36 | year={2019},
37 | doi={10.1016/j.cpc.2018.11.009},
38 | publisher={Elsevier}
39 | }
40 |
41 | @article{schmidt2020,
42 | title={Guide to spectral proper orthogonal decomposition},
43 | author={Schmidt, Oliver T and Colonius, Tim},
44 | journal={AIAA Journal},
45 | volume={58},
46 | number={3},
47 | pages={1023--1033},
48 | year={2020},
49 | doi={10.2514/1.J058809},
50 | publisher={American Institute of Aeronautics and Astronautics}
51 | }
52 |
53 | @article{paolo2018,
54 | title={Response of Pacific-sector Antarctic ice shelves to the El Ni{\~n}o/Southern oscillation},
55 | author={Paolo, FS and Padman, L and Fricker, HA and Adusumilli, S and Howard, S and Siegfried, MR},
56 | journal={Nature geoscience},
57 | volume={11},
58 | number={2},
59 | pages={121--126},
60 | year={2018},
61 | doi={10.1038/s41561-017-0033-0},
62 | publisher={Nature Publishing Group}
63 | }
64 |
65 | @misc{schmidt-code,
66 | title = {Matlab {SPOD} code},
67 | author={Schmidt, Oliver T},
68 | howpublished = {\url{https://github.com/SpectralPOD/spod_matlab}},
69 | note = {Accessed: 1 March 2021},
70 | year = 2020
71 | }
72 |
73 | @misc{spod-code-jburrows,
74 | title = {Python {SPOD} code},
75 | author={Burrows, Travis J},
76 | howpublished = {\url{https://github.com/tjburrows/spod_python}},
77 | note = {Accessed: 1 March 2021},
78 | year = 2020
79 | }
80 |
81 | @misc{spod-code-loiseau,
82 | title = {Python {SPOD} code},
83 | author={Loiseau, Jean-Christophe},
84 | howpublished = {\url{https://gist.github.com/loiseaujc/dd3739fa779d7bdd678639ae7396a73b}},
85 | note = {Accessed: 1 March 2021},
86 | year = 2019
87 | }
88 |
--------------------------------------------------------------------------------
/joss/paper.md:
--------------------------------------------------------------------------------
1 | ---
2 | title: 'PySPOD: A Python package for Spectral Proper Orthogonal Decomposition (SPOD)'
3 | tags:
4 | - Python
5 | - dynamical systems
6 | - nonlinear dynamics
7 | - data-driven dynamics
8 | - data mining
9 | authors:
10 | - name: Gianmarco Mengaldo
11 | orcid: 0000-0002-0157-5477
12 | affiliation: "1" # (Multiple affiliations must be quoted)
13 | - name: Romit Maulik
14 | affiliation: "2" # (Multiple affiliations must be quoted)
15 | affiliations:
16 | - name: Department of Mechanical Engineering, National University of Singapore (SG)
17 | index: 1
18 | - name: Argonne Leadership Computing Facility, Argonne National Laboratory (USA)
19 | index: 2
20 | date: 6 November 2020
21 | bibliography: paper.bib
22 |
23 | # Optional fields if submitting to a AAS journal too, see this blog post:
24 | # https://blog.joss.theoj.org/2018/12/a-new-collaboration-with-aas-publishing
25 |
27 | ---
28 |
29 | # Summary
30 |
31 | Large unstructured datasets may contain complex coherent patterns that
32 | evolve in time and space, and that the human eye cannot grasp. These
33 | patterns are frequently essential to unlock our understanding of complex
34 | systems that can arise in nature, such as the evolution of the atmosphere
35 | in the short (weather prediction) and long term (climate prediction),
36 | the behavior of turbulent flows, and the dynamics of plate tectonics,
37 | among several others. Identifying these coherent structures can
38 | prove crucial to facilitate the construction of modeling tools that can
39 | help anticipate scenarios that would not otherwise be predictable.
40 |
41 | Within this context, dynamical systems theory, complemented with recent
42 | developments in machine learning and data mining tools, is achieving tremendous
43 | advances in our ability to acquire actionable information from complex
44 | data. Singular-value decomposition based techniques, in particular, are
45 | a promising area that is gaining popularity, due to their links to reduced
46 | order modeling and dynamical systems. Also, these techniques can be
47 | used in the context of machine learning as additional inputs to the learning
48 | architecture, thereby augmenting the dataset and possibly helping in
49 | the interpretability of the results.
50 |
51 | Several variants of singular-value decomposition (SVD) based techniques
52 | have been proposed in the literature, this library provides efficient
53 | implementations of the spectral proper orthogonal decomposition
54 | (SPOD) [@lumley1970; @towne2017]. SPOD is also referred to as spectral
55 | empirical orthogonal function (SEOF) in the weather and climate community
56 | [@schmidt2019a]. SPOD differs from other SVD-based techniques as it is
57 | derived from a standard (space-time) POD problem for stationary data and
58 | leads to modes that are (i) time harmonic and oscillate at a single frequency,
59 | (ii) are coherent in both time and space, (iii) optimally represent the space-time
60 | statistical variability of the underlying stationary random processes, and
61 | (iv) are both spatially and space-time orthogonal [@schmidt2020].
62 | We note that the `PySPOD` implements the Python counterpart of the Matlab
63 | code [@schmidt-code], with the addition of the streaming algorithm outlined
64 | by @schmidt2019b. We also acknowledge that there exist other two Python
65 | packages implementing SPOD. The first, by @spod-code-jburrows, is also a
66 | Python counterpart of the Matlab code of @schmidt-code. However, our
67 | implementation provides extensive post-processing capabilities, testing,
68 | and tutorial. It also adds the streaming version [@schmidt2019b], that
69 | is not present in @spod-code-jburrows. Similar differences exist between
70 | `PySPOD` and the Python package presented in @spod-code-loiseau.
71 |
72 | # Capabilities
73 |
74 | `PySPOD` is a modular Python package that implements three different
75 | variants of SPOD, (i) a low storage [@towne2017; @schmidt2019a],
76 | (ii) a low RAM [@towne2017; @schmidt2019a], and (iii) a streaming version
77 | [@schmidt2019b]. The three versions differ in terms of I/O and RAM requirements.
78 | The low storage version allows faster computations, and it is intended for small
79 | datasets, or high RAM machines. The low RAM version can handle
80 | large datasets, but it is typically slower than the low storage counterpart.
81 | The streaming version is a streaming implementation of SPOD.
82 | The API to the library offers a flexible and user-friendly experience, and
83 | the library can be complemented with additional SPOD algorithms in an easy-to-implement
84 | way. The structure of the library and the use of Python enable efficient
85 | interfacing with low level and highly optimized libraries (written in C
86 | or Fortran) for the calculation of e.g. the fast Fourier transform, eigenvalue
87 | decomposition, and other linear algebra operations. Users can also take advantage
88 | of the ready-to-use postproc tools offered, and they can easily extend
89 | the postproc functionalities to suit their own needs.
90 |
91 | `PySPOD` is designed to be used in different fields of engineering and applied
92 | science, including weather and climate, fluid mechanics, seismology, among others.
93 | It can be used as a production code, for the analysis of large datasets, as well
94 | as for experimenting on smaller problems. Users can be students and experts alike.
95 | For an overview of the guidelines one should follow when using SPOD, the reader
96 | can refer to @schmidt2020.
97 |
98 | In \autoref{fig:MEI}, we show the application of this package to identify
99 | the Multivariate ENSO Index from ECMWF reanalysis datasets (E20C in particular),
100 | where we used monthly averages of (i) mean sea level pressure (MSL), (ii) Zonal
101 | component of the surface wind (U10), (iii) Meridional component of the surface
102 | wind (V10), (iv) Sea surface temperature (SST),(v) 2-meter temperature (T2M),
103 | and (vi) Total cloud cover (TCC). \autoref{fig:MEI} shows the leading
104 | modes of the meridional component of the surface wind (left), and of the mean
105 | seal-level pressure (right). It is possible to appreciate a possible coupling
106 | between ENSO and the vortices over West Antarctica (that in turn could affect
107 | the height of the ice shelves [@paolo2018]). For more detail regarding this
108 | simulation, the interested reader can refer to @schmidt2019a.
109 |
110 | 
111 |
112 |
113 |
114 | # Acknowledgements
115 | G. Mengaldo wants to thank Oliver T. Schmidt for fruitful discussions.
116 | We also thank the reviewers who helped substantially improve the software package.
117 |
118 |
119 |
120 | # References
121 |
--------------------------------------------------------------------------------
/pyproject.toml:
--------------------------------------------------------------------------------
1 | [build-system]
2 | requires = ["setuptools >= 42"]
3 | build-backend = "setuptools.build_meta"
4 |
--------------------------------------------------------------------------------
/pyspod/__init__.py:
--------------------------------------------------------------------------------
1 | '''PySPOD'''
2 | from .pod.base import Base as pod_base
3 | from .pod.standard import Standard as pod_standard
4 | from .spod.base import Base as spod_base
5 | from .spod.standard import Standard as spod_standard
6 | from .spod.streaming import Streaming as spod_streaming
7 |
8 | __version__ = '2.0.0'
9 |
--------------------------------------------------------------------------------
/pyspod/emulation/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MathEXLab/PySPOD/9d69ac0724bfffcedaf104b084d050a94100cf65/pyspod/emulation/__init__.py
--------------------------------------------------------------------------------
/pyspod/emulation/base.py:
--------------------------------------------------------------------------------
1 | '''Derived module from spod_base.py for SPOD emulation.'''
2 |
3 | # Import standard Python packages
4 | import os
5 | import sys
6 | import time
7 | import numpy as np
8 | from tqdm import tqdm
9 | CWD = os.getcwd()
10 |
11 |
12 |
13 | ## Emulation Base class
14 | ## ----------------------------------------------------------------------------
15 |
16 | class Base():
17 | '''
18 | Class that implements a non-intrusive emulation of
19 | the latent-space dynamics via neural networks.
20 |
21 | The computation is performed on the data *data* passed
22 | to the constructor of the `SPOD_low_ram` class, derived
23 | from the `SPOD_Base` class.
24 | '''
25 | def __init__(self, params):
26 | self._data_type = params['data_type']
27 | self._normalization = params.get('normalization', 'localmax')
28 | self._savedir = params.get('savedir', os.path.join(CWD,'results'))
29 |
30 |
31 | def scaler(self, data):
32 | '''
33 | Evaluate normalization vector
34 | '''
35 | if self._data_type.lower() == 'real':
36 | vec = np.zeros(data.shape[0], dtype=float)
37 | if self._normalization.lower() == 'localmax':
38 | max_re = np.amax(data[:,:],axis=1) * 10
39 | min_re = np.amin(data[:,:],axis=1) * 10
40 | for i in range (data.shape[0]):
41 | vec[i] = max(abs(max_re[i]), abs(min_re[i]))
42 | elif self._normalization.lower() == 'globalmax':
43 | max_re = max([max(l) for l in data[:,:].real])
44 | min_re = min([min(l) for l in data[:,:].real])
45 | for i in range(data.shape[0]):
46 | vec[i] = max(abs(max_re), abs(min_re))
47 | else:
48 | for i in range (data.shape[0]):
49 | vec[i] = 1.0
50 | elif self._data_type == 'complex':
51 | vec = np.zeros(data.shape[0], dtype=complex)
52 | if self._normalization.lower() == 'localmax':
53 | max_re = np.amax(data[:,:].real,axis=1) * 10
54 | min_re = np.amin(data[:,:].real,axis=1) * 10
55 | max_im = np.amax(data[:,:].imag,axis=1) * 10
56 | min_im = np.amin(data[:,:].imag,axis=1) * 10
57 | for i in range (data.shape[0]):
58 | vec[i] = \
59 | max(abs(max_re[i]), abs(min_re[i])) + \
60 | (max(abs(max_im[i]), abs(min_im[i]))) * 1j
61 | elif self._normalization.lower() == 'globalmax':
62 | max_re = max([max(l) for l in data[:,:].real])
63 | min_re = min([min(l) for l in data[:,:].real])
64 | max_im = max([max(l) for l in data[:,:].imag])
65 | min_im = min([min(l) for l in data[:,:].imag])
66 | for i in range(data.shape[0]):
67 | vec[i] = \
68 | max(abs(max_re), abs(min_re)) + \
69 | (max(abs(max_im), abs(min_im))) * 1j
70 | else:
71 | for i in range (data.shape[0]):
72 | vec[i] = 1.0 + 1j
73 | else:
74 | raise TypeError('You need to specify data_type; real or complex.')
75 | return vec
76 |
77 |
78 | def scale_data(self, data, vec=None):
79 | '''
80 | Normalize data given a normalization vector and a matrix of data
81 | '''
82 | if vec.shape[0] == 0:
83 | print('No normalization performed')
84 | return
85 | data_out = np.zeros_like(data)
86 | if self._data_type.lower() == 'real':
87 | for j in range(data.shape[1]):
88 | data_out[:,j]= data[:,j] / vec.real
89 | elif self._data_type.lower() == 'complex':
90 | for j in range(data.shape[1]):
91 | data_out.real[:,j] = data[:,j].real / vec.real
92 | data_out.imag[:,j] = data[:,j].imag / vec.imag
93 | else:
94 | raise TypeError('You need to specify data_type; real or complex.')
95 | return data_out
96 |
97 |
98 | def descale_data(self, data, vec=None):
99 | if vec.shape[0] == 0:
100 | print('No denormalization is performed')
101 | return
102 | data_out = np.zeros_like(data)
103 | if self._data_type.lower() == 'real':
104 | for j in range(data.shape[1]):
105 | data_out[:,j]= data[:,j].real * vec.real
106 | elif self._data_type.lower() == 'complex':
107 | for j in range(data.shape[1]):
108 | data_out.real[:,j] = data[:,j].real * vec.real
109 | data_out.imag[:,j] = data[:,j].imag * vec.imag
110 | else:
111 | raise TypeError('You need to specify data_type; real or complex.')
112 | return data_out
113 |
114 | ## ----------------------------------------------------------------------------
115 |
--------------------------------------------------------------------------------
/pyspod/emulation/neural_nets.py:
--------------------------------------------------------------------------------
1 | '''Derived module from spod_base.py for SPOD emulation.'''
2 |
3 | # Import standard Python packages
4 | import os
5 | import sys
6 | import time
7 | import shutil
8 | import numpy as np
9 | from tqdm import tqdm
10 | import tensorflow as tf
11 | from tensorflow.keras import models
12 | from tensorflow.keras import optimizers
13 | from tensorflow.keras import regularizers
14 | from tensorflow.keras import backend as K
15 | from tensorflow.keras.layers import Dense
16 | from tensorflow.keras.layers import LSTM
17 | from tensorflow.keras.layers import Dropout
18 | from tensorflow.keras.models import load_model
19 | from tensorflow.keras.models import Model
20 | from tensorflow.keras.models import Sequential
21 | from pyspod.emulation.base import Base
22 |
23 | # set seeds
24 | from numpy.random import seed; seed(1)
25 | tf.random.set_seed(2)
26 | tf.config.threading.set_intra_op_parallelism_threads(1)
27 | tf.config.threading.set_inter_op_parallelism_threads(1)
28 |
29 |
30 |
31 |
32 |
33 | ## Emulation class
34 | ## ----------------------------------------------------------------------------
35 |
36 | class Neural_Nets(Base):
37 | '''
38 | Class that implements a non-intrusive emulation of
39 | the latent-space dynamics via neural networks.
40 |
41 | The computation is performed on the data *data* passed
42 | to the constructor of the `SPOD_low_ram` class, derived
43 | from the `SPOD_Base` class.
44 | '''
45 | def __init__(self, params):
46 | super().__init__(params)
47 | self._network = params.get('network' , 'lstm')
48 | self._n_neurons = params.get('n_neurons' , 20)
49 | self._epochs = params.get('epochs' , 20)
50 | self._batch_size = params.get('batch_size', 32)
51 | self._n_seq_in = params.get('n_seq_in' , 1)
52 | self._n_seq_out = params.get('n_seq_out' , 1)
53 | self._dropout = params.get('dropout' , 0)
54 |
55 |
56 | def build_lstm(self):
57 | '''
58 | Build a Long-Short Term Memory network
59 | '''
60 | def coeff_determination(y_pred, y_true):
61 | SS_res = K.sum(K.square(y_true-y_pred), axis=0)
62 | SS_tot = K.sum(K.square(y_true - K.mean(y_true,axis=0)), axis=0)
63 | return K.mean(1 - SS_res/(SS_tot + K.epsilon()) )
64 | self.model = Sequential()
65 | self.model.add(LSTM(self._n_neurons,
66 | input_shape=(self._n_seq_in, self._n_features)))
67 | self.model.add(Dropout(self._dropout))
68 | self.model.add(Dense(
69 | self._n_seq_out * self._n_features, activation='linear'))
70 | opt = optimizers.Adam(
71 | learning_rate=0.001, beta_1=0.9, beta_2=0.999, epsilon=0,
72 | amsgrad=False)
73 | self.model.compile(
74 | optimizer=opt, loss='mse', metrics=[coeff_determination])
75 | self.model.summary()
76 |
77 |
78 | def build_cnn(self):
79 | '''
80 | Build a Covolutional Neural Network
81 | '''
82 | def coeff_determination(y_pred, y_true):
83 | SS_res = K.sum(K.square(y_true-y_pred), axis=0)
84 | SS_tot = K.sum(K.square(y_true - K.mean(y_true,axis=0)), axis=0)
85 | return K.mean(1 - SS_res/(SS_tot + K.epsilon()) )
86 | self.model = Sequential()
87 | ## to be added ...
88 | pass
89 |
90 |
91 | def extract_sequences(self, data, fh=1):
92 | '''
93 | Create training and validation sets of data for a LSTM network
94 | '''
95 | if fh < 1: raise ValueError('`fh` must be >= 1.')
96 | # self._n_features = data.shape[0]
97 | nt = data.shape[1]
98 | states = np.copy(np.transpose(data))
99 | total_size = nt - self._n_seq_in - self._n_seq_out - fh + 1
100 |
101 | x = np.zeros(shape=(total_size, self._n_seq_in, self._n_features))
102 | y = np.zeros(shape=(total_size, self._n_seq_out * self._n_features))
103 | idx_x = np.empty([total_size, self._n_seq_in ], int)
104 | idx_y = np.empty([total_size, self._n_seq_out], int)
105 | cnt = 0
106 | for t in tqdm(range(total_size), desc='extract sequences'):
107 | idx_x[cnt,...] = np.arange(t, t+self._n_seq_in)
108 | idx_y[cnt,...] = np.arange( t+self._n_seq_in-1+fh,
109 | t+self._n_seq_in-1+self._n_seq_out+fh)
110 | x[cnt,:,:] = states[None,idx_x[cnt],:]
111 | y[cnt,:] = np.reshape(states[idx_y[cnt],:],
112 | [self._n_seq_out*self._n_features])
113 | cnt = cnt + 1
114 | print('**********************************')
115 | print('* DATA LAYOUT *')
116 | print('**********************************')
117 | print('data_size = ', data.shape)
118 | print('x.shape = ', x.shape)
119 | print('y.shape = ', y.shape)
120 | print('**********************************')
121 | return x, y
122 |
123 |
124 | def model_initialize(self, data):
125 | '''
126 | Initialization of a network
127 | '''
128 | self._n_features = data.shape[0]
129 | # construct the neural network model
130 | if self._network.lower() == 'lstm':
131 | self.build_lstm()
132 | elif self._network.lower() == 'cnn':
133 | self.build_cnn()
134 | else:
135 | raise ValueError(self._network.lower(), ' not found.')
136 |
137 |
138 | def model_train(self, data_train, data_valid, idx=0):
139 | '''
140 | Train a network previously initialized
141 | '''
142 | self._train(data_train, data_valid, name='real'+str(idx))
143 | if not np.isreal(data_train).all():
144 | self._train(data_train.imag, data_valid.imag, name='imag'+str(idx))
145 |
146 |
147 | def model_inference(self, data_in, idx=0):
148 | '''
149 | Predict the coefficients of a time serie, given an input sequence
150 | '''
151 | n_seq_in = self._n_seq_in
152 | n_seq_out = self._n_seq_out
153 | n_features = self._n_features
154 | # number of time snapshots
155 | nt = data_in.shape[1]
156 | # check the size of the input array
157 | if nt < n_seq_in:
158 | raise ValueError(network.lower(), 'data input error.')
159 |
160 | # initialization of variables and vectors
161 | input_batch = np.zeros([1, n_seq_in, n_features])
162 | prediction = np.zeros([n_seq_out, n_features])
163 | coeffs_tmp = np.zeros([n_seq_out, nt, n_features], dtype=complex)
164 | states = np.zeros([ n_seq_in, n_features] , dtype=complex)
165 | coeffs = np.zeros([n_features, nt] , dtype=complex)
166 | idx_x = np.empty([nt-n_seq_in, n_seq_in] , int)
167 |
168 | ## compute real part
169 | cnt = 0
170 | name_tmp = 'real'+str(idx)
171 | name_real = os.path.join(self._savedir, name_tmp+'.weights.h5')
172 | self.model.load_weights(name_real)
173 | for t in tqdm(range(n_seq_in,nt,n_seq_out), desc='inference_real'):
174 | idx_x[cnt,...] = np.arange(t-n_seq_in, t)
175 | states[:,:] = np.copy(np.transpose(data_in[:,idx_x[cnt]]))
176 | input_batch[0,:,:] = states[None,:,:].real
177 | output_state = self.model.predict(input_batch, verbose=0)
178 | coeffs_tmp[:,cnt,:] = np.reshape(
179 | output_state[:], [n_seq_out, n_features])
180 | cnt = cnt + 1
181 |
182 | ## compute imaginary part if present
183 | if not np.isreal(data_in).all():
184 | cnt = 0
185 | name_tmp = 'imag'+str(idx)
186 | name_imag = os.path.join(self._savedir, name_tmp+'.weights.h5')
187 | self.model.load_weights(name_imag)
188 | for t in tqdm(range(n_seq_in,nt,n_seq_out), desc='inference_imag'):
189 | idx_x[cnt,...] = np.arange(t-n_seq_in, t)
190 | states[:,:] = np.copy(np.transpose(data_in[:,idx_x[cnt]]))
191 | input_batch[0,:,:] = states[None,:,:].imag
192 | output_state = self.model.predict(input_batch, verbose=0)
193 | prediction[:,:] = np.reshape(
194 | output_state[:], [n_seq_out,n_features])
195 | coeffs_tmp[:,cnt,:] = coeffs_tmp[:,cnt,:] + prediction * 1j
196 | cnt = cnt + 1
197 | coeffs[:,:n_seq_in] = data_in[:,:n_seq_in]
198 | for i in range(cnt):
199 | lb = (n_seq_out * i) + n_seq_in
200 | ub = n_seq_in + (n_seq_out * (i + 1))
201 | coeffs[:,lb:ub] = np.transpose(coeffs_tmp[:,i,:])
202 | return coeffs
203 |
204 |
205 | def _train(self, data_train, data_valid, name):
206 | ## extract sequences
207 | train_data_ip, train_data_op = self.extract_sequences(data=data_train)
208 | valid_data_ip, valid_data_op = self.extract_sequences(data=data_valid)
209 |
210 | # training
211 | name_filepath = os.path.join(self._savedir, name+'.weights.h5')
212 | cb_chk = tf.keras.callbacks.ModelCheckpoint(
213 | name_filepath,
214 | monitor='loss',
215 | mode='min',
216 | save_best_only=True,
217 | save_weights_only=True,
218 | verbose=0)
219 | cb_early = tf.keras.callbacks.EarlyStopping(
220 | monitor='val_loss',
221 | min_delta = 0.000001,
222 | patience=10,
223 | verbose=1)
224 | cb_lr = tf.keras.callbacks.ReduceLROnPlateau(
225 | monitor="val_loss",
226 | min_delta=0.00001,
227 | patience=10,
228 | factor=0.2,
229 | verbose=0)
230 | self.callbacks_list = [cb_chk]
231 | self.train_history = self.model.fit(
232 | x=train_data_ip, y=train_data_op,
233 | validation_data=(valid_data_ip, valid_data_op),
234 | epochs= self._epochs,
235 | batch_size=self._batch_size,
236 | callbacks=self.callbacks_list,
237 | verbose=2)
238 |
239 | ## ----------------------------------------------------------------------------
240 |
--------------------------------------------------------------------------------
/pyspod/pod/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MathEXLab/PySPOD/9d69ac0724bfffcedaf104b084d050a94100cf65/pyspod/pod/__init__.py
--------------------------------------------------------------------------------
/pyspod/pod/standard.py:
--------------------------------------------------------------------------------
1 | '''
2 | Base module for the POD:
3 | - `fit` and `predict` methods must be implemented in inherited classes
4 | '''
5 | from __future__ import division
6 |
7 | # Import standard Python packages
8 | import os
9 | import sys
10 | import time
11 | import pickle
12 | import warnings
13 | import scipy
14 | import numpy as np
15 |
16 | # Import custom Python packages
17 | from pyspod.pod.base import Base
18 | import pyspod.utils.parallel as utils_par
19 | BYTE_TO_GB = 9.3132257461548e-10
20 |
21 |
22 |
23 | ## Standard POD class
24 | ## ----------------------------------------------------------------------------
25 |
26 | class Standard(Base):
27 | '''
28 | Class that implements the standard Proper Orthogonal Decomposition.
29 | '''
30 | def fit(self, data_list):
31 | '''
32 | Class-specific method to fit the data matrix `data` using standard POD.
33 | '''
34 | start = time.time()
35 |
36 | ## if user forgets to pass list for single data list,
37 | ## make it to be a list
38 | if not isinstance(data_list, list): data_list = [data_list]
39 |
40 | self._pr0(f' ')
41 | self._pr0(f'Initialize data ...')
42 | self._initialize(data_list)
43 |
44 | ## reshape data and remove mean
45 | d = self._data.reshape(self._nt, self._data[0,...].size)
46 | d = d - self._t_mean
47 | d = d.T
48 |
49 | ## eigendecomposition
50 | Q = d.conj().T @ (d * self._weights)
51 | Q = utils_par.allreduce(Q, comm=self._comm)
52 | w, v = scipy.linalg.eig(Q)
53 |
54 | # bases
55 | self._pr0(f' ')
56 | self._pr0(f'Calculating standard POD ...')
57 | st = time.time()
58 | phi = np.real(d @ v) / np.sqrt(w[:])
59 |
60 | # truncation and save
61 | phi_r = phi[:,0:self._n_modes_save]
62 | self._file_modes = os.path.join(self._savedir_sim, 'modes.npy')
63 | shape = [*self._xshape,self._nv,self._n_modes_save]
64 | if self._comm: shape[self._max_axis] = -1
65 | phi_r.shape = shape
66 | utils_par.npy_save(
67 | self._comm, self._file_modes, phi_r, axis=self._max_axis)
68 | self._pr0(f'done. Elapsed time: {time.time() - st} s.')
69 | self._pr0(f'Modes saved in {self._file_modes}')
70 | self._eigs = w
71 | self._store_and_save()
72 | return self
73 |
74 | ## ----------------------------------------------------------------------------
75 |
--------------------------------------------------------------------------------
/pyspod/pod/utils.py:
--------------------------------------------------------------------------------
1 | """Utils for SPOD method."""
2 | # Import standard Python packages
3 | import os
4 | import sys
5 | import time
6 | import yaml
7 | import psutil
8 | import warnings
9 | import numpy as np
10 |
11 | # Import custom Python packages
12 | import pyspod.utils.parallel as utils_par
13 | import pyspod.utils.postproc as post
14 | CWD = os.getcwd()
15 |
16 |
17 | def compute_coeffs_op(data, results_dir, modes_idx=None,
18 | savedir=None, dtype='double', comm=None):
19 | '''
20 | Compute coefficients through projection.
21 | '''
22 | s0 = time.time()
23 | utils_par.pr0(f'\nComputing coefficients ...', comm)
24 | if comm:
25 | rank = comm.rank
26 | size = comm.size
27 | else:
28 | rank = 0
29 | size = 1
30 |
31 | ## get dtypes
32 | dt_float, dt_complex = _get_dtype(dtype)
33 |
34 | ## load required files
35 | nt = data.shape[0]
36 | file_weights = os.path.join(results_dir, 'weights.npy')
37 | file_modes = os.path.join(results_dir, 'modes.npy')
38 | file_eigs = os.path.join(results_dir, 'eigs.npz')
39 | file_params = os.path.join(results_dir, 'params_modes.yaml')
40 | weights = np.lib.format.open_memmap(file_weights)
41 | phir = np.lib.format.open_memmap(file_modes)
42 | eigs = np.load(file_eigs)
43 | with open(file_params) as f:
44 | params = yaml.load(f, Loader=yaml.FullLoader)
45 |
46 | ## get required parameters
47 | nv = params['n_variables']
48 | xdim = params['n_space_dims']
49 | n_modes_save = phir.shape[-1]
50 |
51 | ## set datatypes
52 | data = _set_dtype(data, dtype)
53 | phir = _set_dtype(phir, dtype)
54 | weights = _set_dtype(weights, dtype)
55 |
56 | ## distribute data and weights if parallel
57 | data, max_axis, _ = utils_par.distribute_data(data=data, comm=comm)
58 | weights = utils_par.distribute_dimension(
59 | data=weights, max_axis=max_axis, comm=comm)
60 |
61 | # distribute modes if parallel
62 | phir = utils_par.distribute_dimension(\
63 | data=phir, max_axis=max_axis, comm=comm)
64 | phir = np.reshape(phir, [data[0,...].size,n_modes_save])
65 |
66 | ## add axis for single variable
67 | if not isinstance(data,np.ndarray): data = data.values
68 | if (nv == 1) and (data.ndim != xdim + 2):
69 | data = data[...,np.newaxis]
70 | xshape_nv = data[0,...].shape
71 |
72 | ## flatten spatial x variable dimensions
73 | data = np.reshape(data, [nt, data[0,...].size])
74 | weights = np.reshape(weights, [data[0,...].size, 1])
75 | utils_par.pr0(f'- I/: {time.time() - s0} s.', comm)
76 | st = time.time()
77 |
78 | ## compute time mean and subtract from data (reuse the one from fit?)
79 | lt_mean = np.mean(data, axis=0); data = data - lt_mean
80 | utils_par.pr0(f'- data and time mean: {time.time() - st} s.', comm)
81 | st = time.time()
82 |
83 | # compute coefficients
84 | coeffs = np.transpose(phir) @ np.transpose(data)
85 | coeffs = utils_par.allreduce(data=coeffs, comm=comm)
86 | utils_par.pr0(f'- phir x data: {time.time() - s0} s.', comm)
87 | st = time.time()
88 | del data
89 |
90 | ## create coeffs folder
91 | coeffs_dir = os.path.join(results_dir, f'coeffs')
92 | if savedir is not None:
93 | coeffs_dir = os.path.join(coeffs_dir, savedir)
94 | if rank == 0:
95 | if not os.path.exists(coeffs_dir): os.makedirs(coeffs_dir)
96 | utils_par.barrier(comm)
97 |
98 | # save coefficients
99 | file_coeffs = os.path.join(coeffs_dir, 'coeffs.npy')
100 | if rank == 0: np.save(file_coeffs, coeffs)
101 |
102 | ## save auxiliary files
103 | file_phir = os.path.join(coeffs_dir, 'modes_r.npy')
104 | file_lt_mean = os.path.join(coeffs_dir, 'ltm.npy')
105 | shape_tmp = (*xshape_nv,n_modes_save)
106 | shape_phir = [*shape_tmp]
107 | shape_lt_mean = [*xshape_nv]
108 | if comm:
109 | shape_phir[max_axis] = -1
110 | shape_lt_mean[max_axis] = -1
111 | phir.shape = shape_tmp
112 | lt_mean.shape = xshape_nv
113 | utils_par.npy_save(comm, file_phir, phir, axis=max_axis)
114 | utils_par.npy_save(comm, file_lt_mean, lt_mean, axis=max_axis)
115 | utils_par.pr0(f'- /O: {time.time() - s0} s.', comm)
116 | st = time.time()
117 |
118 | ## dump file with coeffs params
119 | params['coeffs_dir' ] = str(coeffs_dir)
120 | params['modes_idx' ] = modes_idx
121 | params['max_axis' ] = int(max_axis)
122 | path_params_coeffs = os.path.join(coeffs_dir, 'params_coeffs.yaml')
123 | with open(path_params_coeffs, 'w') as f: yaml.dump(params, f)
124 | utils_par.pr0(f'- saving completed: {time.time() - st} s.', comm)
125 | utils_par.pr0(f'---------------------------------------' , comm)
126 | utils_par.pr0(f'Coefficients saved in: {file_coeffs}' , comm)
127 | utils_par.pr0(f'Elapsed time: {time.time() - s0} s.' , comm)
128 | utils_par.barrier(comm)
129 | return file_coeffs, coeffs_dir
130 |
131 |
132 | def compute_reconstruction(
133 | coeffs_dir, time_idx, coeffs=None,
134 | savedir=None, filename=None, dtype='double', comm=None):
135 | '''
136 | Reconstruct original data through oblique projection.
137 | '''
138 | s0 = time.time()
139 | utils_par.pr0('\nReconstructing data from coefficients ...', comm)
140 | if comm:
141 | rank = comm.rank
142 | size = comm.size
143 | else:
144 | rank = 0
145 | size = 1
146 |
147 | ## get dtypes
148 | dt_float, dt_complex = _get_dtype(dtype)
149 |
150 | ## load required files
151 | coeffs_dir = os.path.join(CWD, coeffs_dir)
152 | file_lt_mean = os.path.join(coeffs_dir, 'ltm.npy')
153 | file_phir = os.path.join(coeffs_dir, 'modes_r.npy')
154 | file_params = os.path.join(coeffs_dir, 'params_coeffs.yaml')
155 | lt_mean = np.lib.format.open_memmap(file_lt_mean)
156 | phir = np.lib.format.open_memmap(file_phir)
157 | with open(file_params) as f:
158 | params = yaml.load(f, Loader=yaml.FullLoader)
159 | xshape_nv = lt_mean.shape
160 | ## try to load coeffiecients from file if not provided
161 | if coeffs is None:
162 | try:
163 | file_coeffs = os.path.join(coeffs_dir, 'coeffs.npy')
164 | coeffs = np.lib.format.open_memmap(file_coeffs)
165 | except:
166 | raise Exception('`coeffs` file not found.')
167 |
168 | ## set datatypes
169 | coeffs = _set_dtype(coeffs, dtype)
170 | phir = _set_dtype(phir, dtype)
171 |
172 | # get time snapshots to be reconstructed
173 | nt = coeffs.shape[1]
174 | if time_idx is None:
175 | time_idx = [0,nt%2,nt-1]
176 | elif isinstance(time_idx, str):
177 | if time_idx.lower() == 'all': time_idx = np.arange(0, nt)
178 | elif time_idx.lower() == 'half': time_idx = np.arange(0, nt, 2)
179 | elif time_idx.lower() == 'quarter': time_idx = np.arange(0, nt, 4)
180 | elif time_idx.lower() == 'tenth': time_idx = np.arange(0, nt, 10)
181 | elif time_idx.lower() == 'hundredth': time_idx = np.arange(0, nt, 100)
182 | elif isinstance(time_idx, list):
183 | time_idx = time_idx
184 | else:
185 | raise TypeError('`time_idx` parameter type not recognized.')
186 |
187 | ## distribute modes_r and longtime mean
188 | max_axis = params['max_axis']
189 | phir = utils_par.distribute_dimension(
190 | data=phir, max_axis=max_axis, comm=comm)
191 | lt_mean = utils_par.distribute_dimension(
192 | data=lt_mean, max_axis=max_axis, comm=comm)
193 |
194 | ## phi x coeffs
195 | Q_reconstructed = phir @ coeffs[:,time_idx]
196 | utils_par.pr0(f'- phi x coeffs completed: {time.time() - s0} s.', comm)
197 | st = time.time()
198 | del phir, coeffs
199 |
200 | ## add time mean
201 | Q_reconstructed = Q_reconstructed + lt_mean[...,None]
202 | utils_par.pr0(f'- added time mean: {time.time() - st} s.', comm)
203 | st = time.time()
204 | del lt_mean
205 |
206 | ## reshape and save reconstructed solution
207 | if filename is None: filename = 'reconstructed'
208 | if savedir is not None:
209 | coeffs_dir = os.path.join(coeffs_dir, savedir)
210 | if rank == 0:
211 | if not os.path.exists(coeffs_dir): os.makedirs(coeffs_dir)
212 | utils_par.barrier(comm)
213 | file_dynamics = os.path.join(coeffs_dir, filename+'.npy')
214 | shape = [*xshape_nv,len(time_idx)]
215 | if comm:
216 | shape[max_axis] = -1
217 | Q_reconstructed.shape = shape
218 | Q_reconstructed = np.moveaxis(Q_reconstructed, -1, 0)
219 | utils_par.npy_save(comm, file_dynamics, Q_reconstructed, axis=max_axis+1)
220 | utils_par.pr0(f'- data saved: {time.time() - st} s.' , comm)
221 | utils_par.pr0(f'--------------------------------------------', comm)
222 | utils_par.pr0(f'Reconstructed data saved in: {file_dynamics}', comm)
223 | utils_par.pr0(f'Elapsed time: {time.time() - s0} s.' , comm)
224 | utils_par.barrier(comm)
225 | return file_dynamics, coeffs_dir
226 |
227 |
228 | def _get_dtype(dtype):
229 | if dtype == 'double':
230 | d_float = np.float64
231 | d_complex = np.complex128
232 | else:
233 | d_float = np.float32
234 | d_complex = np.complex64
235 | return d_float, d_complex
236 |
237 |
238 | def _set_dtype(d, dtype):
239 | ## set data type
240 | dt_float, dt_complex = _get_dtype(dtype)
241 | if d.dtype == float : d = d.astype(dt_float )
242 | elif d.dtype == complex: d = d.astype(dt_complex)
243 | return d
244 |
--------------------------------------------------------------------------------
/pyspod/spod/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MathEXLab/PySPOD/9d69ac0724bfffcedaf104b084d050a94100cf65/pyspod/spod/__init__.py
--------------------------------------------------------------------------------
/pyspod/spod/standard.py:
--------------------------------------------------------------------------------
1 | '''Derived module from spod_base.py for standard SPOD.'''
2 |
3 | # Import standard Python packages
4 | import os
5 | import sys
6 | import time
7 | import math
8 |
9 | import numpy as np
10 | from numpy import linalg as la
11 | import scipy.io.matlab as siom
12 |
13 | # Import custom Python packages
14 | from pyspod.spod.base import Base
15 | import pyspod.utils.parallel as utils_par
16 | try:
17 | from mpi4py import MPI
18 | except:
19 | pass
20 |
21 | class Standard(Base):
22 | '''
23 | Class that implements a distributed batch version of the
24 | Spectral Proper Orthogonal Decomposition algorithm to the input data.
25 |
26 | The computation is performed on the *data* passed
27 | to the `fit` method of the `Standard` class, derived
28 | from the `Base` class.
29 | '''
30 | def fit(self, data_list, variables = None):
31 | '''
32 | Class-specific method to fit the data matrix using the SPOD
33 | batch algorithm.
34 |
35 | :param list data_list: list containing data matrices for which
36 | to compute the SPOD.
37 | '''
38 | start0 = time.time()
39 | start = time.time()
40 |
41 | ## initialize data and variables
42 | self._pr0(f' ')
43 | self._pr0(f'Initialize data ...')
44 | self._initialize(data_list, variables)
45 | self._pr0(f'Time to initialize: {time.time() - start} s.')
46 |
47 | start = time.time()
48 |
49 | self._pr0(f' ')
50 | self._pr0(f'Calculating temporal DFT (parallel)')
51 | self._pr0(f'------------------------------------')
52 |
53 | # check if blocks are already saved in memory
54 | blocks_present = False
55 | if self._reuse_blocks:
56 | blocks_present = self._are_blocks_present(
57 | self._n_blocks, self._n_freq, self._blocks_folder, self._comm)
58 |
59 | # loop over number of blocks and generate Fourier realizations,
60 | # if blocks are not saved in storage
61 |
62 | ## check if blocks already computed or not
63 | if blocks_present:
64 | # load blocks if present
65 | Q_hats = {}
66 | size_qhat = [*self._xshape, self._n_blocks]
67 | for f in range(self._n_freq):
68 | Q_hats[f] = np.empty(size_qhat, dtype=self._complex)
69 |
70 | for i_blk in range(0, self._n_blocks):
71 | print(f'Loading block {i_blk}/{self._n_blocks}')
72 | for i_freq in range(0, self._n_freq):
73 | file = f'fft_block{i_blk:08d}_freq{i_freq:08d}.npy'
74 | path = os.path.join(self._blocks_folder, file)
75 | Q_hats[i_freq][...,i_blk] = np.load(path)
76 | for f in range(self._n_freq):
77 | Q_hats[f] = utils_par.distribute_dimension(
78 | data=Q_hats[f], max_axis=self._max_axis, comm=self._comm)
79 | qhat = Q_hats[f]
80 | shape = [qhat[...,0].size, qhat.shape[-1]]
81 | Q_hats[f] = np.reshape(Q_hats[f], shape)
82 | del self.data
83 | else:
84 | # loop over number of blocks and generate Fourier realizations
85 | if isinstance(self.data, dict):
86 | last_key = list(self.data)[-1]
87 | last_val = self.data[last_key]["v"]
88 | xvsize = last_val[0,...].size
89 | else:
90 | xvsize = self.data[0,...].size
91 |
92 | Q_hats = {}
93 | for i_blk in range(0,self._n_blocks):
94 | st = time.time()
95 |
96 | # compute block
97 | qhat = np.empty([self._n_freq, xvsize], dtype=self._complex)
98 | qhat[:], offset = self._compute_blocks(i_blk)
99 | Q_hats[i_blk] = {}
100 | for f in range(self._n_freq):
101 | Q_hats[i_blk][f] = qhat[f,:].copy()
102 |
103 | # save FFT blocks in storage memory
104 | if self._savefft == True:
105 | Q_blk_hat = qhat
106 | for i_freq in range(0, self._n_freq):
107 | Q_blk_hat_fr = Q_blk_hat[i_freq,:]
108 | file = f'fft_block{i_blk:08d}_freq{i_freq:08d}.npy'
109 | path = os.path.join(self._blocks_folder, file)
110 | shape = [*self._xshape]
111 | if self._comm: shape[self._max_axis] = -1
112 | Q_blk_hat_fr.shape = shape
113 | utils_par.npy_save(
114 | self._comm, path, Q_blk_hat_fr,
115 | axis=self._max_axis)
116 |
117 | # print info file
118 | self._pr0(f'block {(i_blk+1)}/{(self._n_blocks)}'
119 | f' ({(offset)}:{(self._n_dft+offset)}); '
120 | f'Elapsed time: {time.time() - st} s.')
121 |
122 | del self.data
123 |
124 | st = time.time()
125 | # move from Q_hats[i_blk][f] to Q_hats[f]
126 | Q_hats = self._flip_qhat(Q_hats)
127 | self._pr0(f'- Time spent transposing Q_hats dictionaries: {time.time() - st} s.')
128 |
129 | self._pr0(f'------------------------------------')
130 | self._pr0(f'Time to compute DFT: {time.time() - start} s.')
131 | if self._comm: self._comm.Barrier()
132 | start = time.time()
133 |
134 | # Loop over all frequencies and calculate SPOD
135 | self._pr0(f' ')
136 | self._pr0(f'Calculating SPOD (parallel)')
137 | self._pr0(f'------------------------------------')
138 | self._eigs = np.zeros([self._n_freq,self._n_blocks],
139 | dtype=self._complex)
140 |
141 | ## compute standard spod
142 | self._compute_standard_spod(Q_hats)
143 |
144 | # store and save results
145 | self._store_and_save()
146 | self._pr0(f'------------------------------------')
147 | self._pr0(f' ')
148 | self._pr0(f'Results saved in folder {self._savedir_sim}')
149 | self._pr0(f'Time to compute SPOD: {time.time() - start} s.')
150 | self._pr0(f'------------------------------------')
151 | self._pr0(f' ')
152 | self._pr0(f'Total time: {time.time() - start0} s.')
153 | if self._comm: self._comm.Barrier()
154 | return self
155 |
156 |
157 | def _compute_blocks(self, i_blk):
158 | '''Compute FFT blocks.'''
159 | # get time index for present block
160 | offset = min(i_blk * (self._n_dft - self._n_overlap) \
161 | + self._n_dft, self._nt) - self._n_dft
162 |
163 | Q_blk = self._get_block(offset, offset+self._n_dft)
164 |
165 | # Subtract longtime or provided mean
166 | Q_blk -= self._t_mean
167 |
168 | # if block mean is to be subtracted,
169 | # do it now that all data is collected
170 | if self._mean_type.lower() == 'blockwise':
171 | Q_blk -= np.mean(Q_blk, axis=0)
172 |
173 | # normalize by pointwise variance
174 | if self._normalize_data:
175 | den = self._n_dft - 1
176 | Q_var = np.sum((Q_blk - np.mean(Q_blk, axis=0))**2, axis=0) / den
177 | # address division-by-0 problem with NaNs
178 | Q_var[Q_var < 4 * np.finfo(float).eps] = 1
179 | Q_blk /= Q_var
180 |
181 | Q_blk *= self._window
182 | Q_blk = self._set_dtype(Q_blk)
183 |
184 | if self._isrealx and not self._fullspectrum:
185 | Q_blk_hat = (self._win_weight / self._n_dft) * np.fft.rfft(Q_blk, axis=0)
186 | else:
187 | Q_blk_hat = (self._win_weight / self._n_dft) * np.fft.fft(Q_blk, axis=0)[0:self._n_freq,:]
188 | return Q_blk_hat, offset
189 |
190 | def _compute_standard_spod(self, Q_hats):
191 | '''Compute standard SPOD.'''
192 |
193 | comm = self._comm
194 | # compute inner product in frequency space, for given frequency
195 | st = time.time()
196 | M = [None]*self._n_freq
197 | for f in range(0,self._n_freq):
198 | Q_hat_f = np.squeeze(Q_hats[f])
199 | M[f] = Q_hat_f.conj().T @ (Q_hat_f * self._weights) / self._n_blocks
200 | del Q_hat_f
201 | M = np.stack(M)
202 | M = utils_par.allreduce(data=M, comm=self._comm)
203 | self._pr0(f'- M computation: {time.time() - st} s.')
204 | st = time.time()
205 |
206 | ## compute eigenvalues and eigenvectors
207 | L, V = la.eig(M)
208 | L = np.real_if_close(L, tol=1000000)
209 | del M
210 |
211 |
212 | # reorder eigenvalues and eigenvectors
213 | ## double non-zero freq and non-Nyquist
214 | for f, Lf in enumerate(L):
215 | idx = np.argsort(Lf)[::-1]
216 | L[f,:] = L[f,idx]
217 | vf = V[f,...]
218 | vf = vf[:,idx]
219 | V[f] = vf
220 | self._pr0(f'- Eig computation: {time.time() - st} s.')
221 | st = time.time()
222 |
223 | # compute spatial modes for given frequency
224 | L_diag = np.sqrt(self._n_blocks) * np.sqrt(L)
225 | L_diag_inv = 1. / L_diag
226 |
227 | if not self._savefreq_disk2:
228 | for f in range(0,self._n_freq):
229 | s0 = time.time()
230 | ## compute
231 | phi = np.matmul(Q_hats[f], V[f,...] * L_diag_inv[f,None,:])
232 | phi = phi[...,0:self._n_modes_save]
233 | del Q_hats[f]
234 |
235 | sstime = time.time()
236 | ## save modes
237 | if self._savefreq_disk:
238 | filename = f'freq_idx_{f:08d}.npy'
239 | p_modes = os.path.join(self._modes_dir, filename)
240 |
241 | shape = [*self._xshape,self._nv,self._n_modes_save]
242 |
243 | if comm:
244 | shape[self._max_axis] = -1
245 |
246 | phi.shape = shape
247 | utils_par.npy_save(self._comm, p_modes, phi, axis=self._max_axis)
248 |
249 | self._pr0(
250 | f'freq: {f+1}/{self._n_freq}; (f = {self._freq[f]:.5f}); '
251 | f'Elapsed time: {(time.time() - s0):.5f} s.')
252 |
253 |
254 | ####################################
255 | ####################################
256 | ####################################
257 | else: # savefreq_disk2
258 | ####################################
259 | ####################################
260 | ####################################
261 | assert self._reader._flattened, "savefreq_disk2 currently only works with flattened data"
262 | rank = comm.rank
263 | ftype = MPI.C_FLOAT_COMPLEX if self._complex==np.complex64 else MPI.C_DOUBLE_COMPLEX
264 |
265 | cum_cctime = 0
266 | cum_sstime = 0
267 |
268 | phi_dict = {}
269 | for f in range(0,self._n_freq):
270 | s0 = time.time()
271 | phi_dict[f] = {}
272 | phi = np.matmul(Q_hats[f], V[f,...] * L_diag_inv[f,None,:])[:,:self._n_modes_save]
273 | Q_hats[f] = None
274 | cum_cctime += time.time() - s0
275 |
276 | s1 = time.time()
277 | for m in range(0,self._n_modes_save):
278 | phi_dict[f][m] = phi[:,m].copy() # make sure modes beyond n_modes_save can be deallocated
279 | del phi
280 | cum_sstime += time.time() - s1
281 |
282 | self._pr0(
283 | f'freq: {f+1}/{self._n_freq}; (f = {self._freq[f]:.5f}); '
284 | f'Elapsed time: {(time.time() - s0):.5f} s.')
285 |
286 | del V
287 | del Q_hats
288 |
289 | sstime = time.time()
290 |
291 | # get max phi shape
292 | phi0_max = comm.allreduce(phi_dict[0][0].shape[0], op=MPI.MAX)
293 | phi_dtype = phi_dict[0][0].dtype
294 | mpi_dtype = ftype.Create_contiguous(phi0_max).Commit()
295 | local_elements = np.array(phi_dict[0][0].shape[0])
296 | recvcounts = np.zeros(comm.size, dtype=np.int64)
297 | comm.Allgather(local_elements, recvcounts)
298 |
299 | total_files = self._n_freq * self._n_modes_save
300 |
301 | for ipass in range(0,math.ceil(total_files/comm.size)):
302 | write_s = ipass * comm.size
303 | write_e = min((ipass+1) * comm.size, total_files)
304 | write = None
305 |
306 | data = np.zeros(phi0_max*comm.size, dtype=phi_dtype)
307 |
308 | s_msgs = {}
309 | reqs_r = []
310 | reqs_s = []
311 |
312 | for i in range(write_s, write_e):
313 | f = i // self._n_modes_save
314 | m = i % self._n_modes_save
315 | writer = i % comm.size
316 |
317 | s_msgs[i] = [np.zeros(phi0_max, dtype=phi_dtype), mpi_dtype]
318 | s_msgs[i][0][0:phi_dict[f][m].shape[0]] = phi_dict[f][m][:] # phi0_max-shaped and 0-padded
319 | del phi_dict[f][m]
320 | reqs_s.append(comm.Isend(s_msgs[i], dest=writer))
321 |
322 | if rank == writer:
323 | write = (f,m)
324 | for irank in range(comm.size):
325 | reqs_r.append(comm.Irecv([data[phi0_max*irank:],mpi_dtype],source=irank))
326 |
327 | MPI.Request.Waitall(reqs_s)
328 | s_msgs = {}
329 |
330 | if write:
331 | f, m = write
332 | xtime = time.time()
333 | MPI.Request.Waitall(reqs_r)
334 | self._pr0(f' Waitall({len(reqs_r)}) {time.time()-xtime} seconds')
335 |
336 | for irank in range(comm.size):
337 | start = irank*phi0_max
338 | end = start+recvcounts[irank]
339 | start_nopad = np.sum(recvcounts[:irank])
340 | end_nopad = np.sum(recvcounts[:irank+1])
341 | data[start_nopad:end_nopad,...] = data[start:end,...]
342 |
343 | # write to disk
344 | data = data[:np.sum(recvcounts)].reshape(self._xshape+(self._nv,))
345 | filename = f'freq_idx_f{f:08d}_m{m:08d}.npy'
346 | print(f'rank {rank} saving {filename}')
347 | p_modes = os.path.join(self._modes_dir, filename)
348 | np.save(p_modes, data)
349 |
350 | mpi_dtype.Free()
351 |
352 | cum_sstime += time.time() - sstime
353 | self._pr0(f'- Modes computation {cum_cctime} s. Saving: {cum_sstime} s.')
354 |
355 | ## correct Fourier for one-sided spectrum
356 | if self._isrealx:
357 | L[1:-1,:] = 2 * L[1:-1,:]
358 |
359 | # get eigenvalues and confidence intervals
360 | self._eigs = np.abs(L)
361 |
362 | fac_lower = 2 * self._n_blocks / self._xi2_lower
363 | fac_upper = 2 * self._n_blocks / self._xi2_upper
364 | self._eigs_c[...,0] = self._eigs * fac_lower
365 | self._eigs_c[...,1] = self._eigs * fac_upper
366 |
367 | def _get_block(self, start, end):
368 | if isinstance(self.data, dict):
369 | last_key = list(self.data)[-1]
370 | last_val = self.data[last_key]["v"]
371 | Q_blk = np.empty((self._n_dft,)+last_val.shape[1:],dtype=last_val.dtype)
372 |
373 | cnt = 0
374 | bstart = start
375 | for k,v in self.data.items():
376 | v_s = v["s"]
377 | v_e = v["e"]
378 |
379 | read_here_s = max(v_s, bstart)
380 | read_here_e = min(v_e, end)
381 | read_here_cnt = read_here_e - read_here_s
382 |
383 | if read_here_cnt > 0:
384 | vals = v["v"]
385 | Q_blk[cnt:cnt+read_here_cnt,...] = vals[read_here_s-v_s:read_here_e-v_s,...]
386 | cnt += read_here_cnt
387 | bstart += read_here_cnt
388 |
389 | assert cnt == end-start, f'Not enough data read: cnt ({cnt}) != end-start ({end-start})'
390 |
391 | # delete blocks that are no longer needed
392 | keys_to_del = []
393 | for k,v in self.data.items():
394 | v_s = v["s"]
395 | v_e = v["e"]
396 | if start > v_e:
397 | keys_to_del.append(k)
398 |
399 | for k in keys_to_del:
400 | del self.data[k]
401 |
402 | Q_blk = Q_blk.reshape(self._n_dft, last_val[0,...].size)
403 | return Q_blk
404 | else:
405 | Q_blk = self.data[start:end,...].copy()
406 | Q_blk = Q_blk.reshape(self._n_dft, self.data[0,...].size)
407 | return Q_blk
408 |
409 | def _flip_qhat(self, Q_hats):
410 | last_blk = list(Q_hats)[-1]
411 | last_frq = Q_hats[last_blk]
412 | last_val = last_frq[list(last_frq)[-1]]
413 | xvsize = last_val.size
414 |
415 | Q_hat_f = {}
416 |
417 | for f in range(0,self._n_freq):
418 | Q_hat_f[f] = np.zeros((xvsize, self._n_blocks),dtype=last_val.dtype)
419 | for b,v in Q_hats.items():
420 | Q_hat_f[f][:,b] = v[f][:]
421 | for b,_ in Q_hats.items():
422 | del Q_hats[b][f]
423 | return Q_hat_f
424 |
--------------------------------------------------------------------------------
/pyspod/spod/streaming.py:
--------------------------------------------------------------------------------
1 | '''Derived module from spod_base.py for streaming SPOD.'''
2 |
3 | # import standard python packages
4 | import os
5 | import time
6 | import numpy as np
7 | from numpy import linalg as la
8 | import pyspod.utils.parallel as utils_par
9 | from pyspod.spod.base import Base
10 |
11 |
12 |
13 | class Streaming(Base):
14 | '''
15 | Class that implements a distributed streaming version of the
16 | Spectral Proper Orthogonal Decomposition algorithm to the input data.
17 |
18 | The computation is performed on the data passed
19 | to the `fit` method of the `Streaming` class, derived
20 | from the `Base` class.
21 | '''
22 |
23 | def fit(self, data_list):
24 | '''
25 | Class-specific method to fit the data matrix using the SPOD
26 | streaming algorithm.
27 |
28 | :param list data_list: list containing data matrices for which
29 | to compute the SPOD.
30 | '''
31 | start = time.time()
32 |
33 | ## if user forgets to pass list for single data list,
34 | ## make it to be a list
35 | if not isinstance(data_list, list): data_list = [data_list]
36 |
37 | ## initialize data and variables
38 | self._initialize(data_list, streaming = True)
39 |
40 | ## sqrt of weights
41 | sqrt_w = np.sqrt(self._weights)
42 |
43 | ## separation between adjacent blocks
44 | dn = self._n_dft - self._n_overlap
45 |
46 | ## number of blocks being updated in parallel if segments overlap
47 | n_blocks_par = int(np.ceil(self._n_dft / dn))
48 |
49 | ## sliding, relative time index for each block
50 | t_idx = np.zeros([n_blocks_par,1], dtype=int)
51 | for block_i in range(0,n_blocks_par):
52 | t_idx[block_i] = t_idx[block_i] - (block_i) * dn
53 |
54 | self._pr0(f' ')
55 | self._pr0(f'Calculating temporal DFT (streaming)')
56 | self._pr0(f'------------------------------------')
57 |
58 | ## obtain first snapshot to determine data size
59 | data = self._reader.get_data(ts=0)
60 | assert data.shape[0] == 1, 'Data returned should have been a single snapshot'
61 | flat_dim = int(data[0,...].size)
62 | n_m_save = self._n_modes_save
63 | n_freq = self._n_freq
64 | x_new = data[0,...]
65 | x_new = np.reshape(x_new,(flat_dim,1))
66 |
67 | ## allocate data arrays
68 | mu = np.zeros([flat_dim,1], dtype=self._complex)
69 | x_hat = np.zeros([flat_dim,n_freq],dtype=self._complex)
70 | x_sum = np.zeros([flat_dim,n_freq,n_blocks_par],dtype=self._complex)
71 | phi = np.zeros([flat_dim,n_freq,n_m_save],dtype=self._complex)
72 | u_hat = np.zeros([flat_dim,n_freq,n_m_save],dtype=self._complex)
73 | self._eigs = np.zeros([n_m_save,n_freq],dtype=self._complex)
74 |
75 | ## dft matrix
76 | dft = np.fft.fft(np.identity(self._n_dft))
77 |
78 | ## check if real for frequency axis
79 | if self._isrealx:
80 | dft[:,1:n_freq-1] = 2 * dft[:,1:n_freq-1]
81 | if self._fullspectrum:
82 | freq_idx = np.arange(0, int(self._n_dft), 1)
83 | else:
84 | freq_idx = np.arange(0, int(self._n_dft/2+1))
85 | dft = dft[:,freq_idx]
86 |
87 | # ## convergence tests
88 | # mse_prev = np.empty([int(1e3),n_m_save,n_freq],dtype=complex) * np.nan
89 | # proj_prev = np.empty([n_freq,int(1e3),n_m_save],dtype=complex) * np.nan
90 | # S_hat_prev = np.zeros([n_m_save,n_freq],dtype=complex)
91 |
92 | ## initialize counters
93 | block_i = 0
94 | ti = -1
95 | z = np.zeros([1,n_m_save], dtype=self._float)
96 | while True:
97 | ti = ti + 1
98 |
99 | ## get new snapshot and abort if data stream runs dry
100 | if ti > 0:
101 | try:
102 | x_new = self._reader.get_data(ti)
103 | x_new = np.reshape(x_new,(flat_dim,1))
104 | except:
105 | self._pr0(f'--> Data stream ended.')
106 | break
107 |
108 | ## update sample mean
109 | mu_old = mu
110 | mu = (ti * mu_old + x_new) / (ti + 1)
111 |
112 | ## update incomplete dft sums, eqn (17)
113 | update = False
114 | window = self._window
115 | for block_j in range(0,n_blocks_par):
116 | if t_idx[block_j] > -1:
117 | x_sum[:,:,block_j] = \
118 | x_sum[:,:,block_j] + window[t_idx[block_j]] * \
119 | dft[t_idx[block_j],:] * x_new
120 |
121 | ## check if sum is completed, and if so, initiate update
122 | if t_idx[block_j] == self._n_dft - 1:
123 | update = True
124 | x_hat = x_sum[:,:,block_j].copy()
125 | x_sum[:,:,block_j] = 0
126 | t_idx[block_j] = min(t_idx) - dn
127 | else:
128 | t_idx[block_j] = t_idx[block_j] + 1
129 | del x_new
130 |
131 | ## update basis if a dft sum is completed
132 | if update:
133 | block_i = block_i + 1
134 |
135 | ## subtract mean contribution to dft sum
136 | for row_idx in range(0,self._n_dft):
137 | x_hat = x_hat - (window[row_idx] * dft[row_idx,:]) * mu
138 |
139 | ## correct for windowing function and apply
140 | ## 1/self._n_dft factor
141 | x_hat = self._win_weight / self._n_dft * x_hat
142 |
143 | if block_i == 0:
144 | ## initialize basis with first vector
145 | self._pr0(
146 | f'--> Initializing left singular vectors; '
147 | f' Time {str(ti)} / block {str(block_i)}')
148 | u_hat[:,:,0] = x_hat * sqrt_w
149 | self._eigs[0,:] = np.sum(abs(u_hat[:,:,0]**2))
150 | else:
151 | ## update basis
152 | self._pr0(
153 | f'--> Updating left singular vectors; '
154 | f' Time {str(ti)} / block {str(block_i)}')
155 |
156 | # S_hat_prev = self._eigs.copy()
157 | for i_freq in range(0,n_freq):
158 |
159 | ## new data (weighted)
160 | x = x_hat[:,[i_freq]] * sqrt_w[:]
161 |
162 | ## old basis
163 | U = np.squeeze(u_hat[:,i_freq,:])
164 |
165 | ## old singular values
166 | S = np.diag(np.squeeze(self._eigs[:,i_freq]))
167 |
168 | ## product U^H*x needed in eqns. (27,32)
169 | Ux = np.matmul(U.conj().T, x)
170 | Ux = utils_par.allreduce(Ux, comm=self._comm)
171 |
172 | ## orthogonal complement to U, eqn. (27)
173 | u_p = x - np.matmul(U, Ux)
174 |
175 | ## norm of orthogonal complement
176 | abs_up = np.matmul(u_p.conj().T, u_p)
177 | abs_up = utils_par.allreduce(abs_up, comm=self._comm)
178 | abs_up = np.sqrt(abs_up)
179 |
180 | ## normalized orthogonal complement
181 | u_new = u_p / abs_up
182 | del u_p
183 |
184 | ## build K matrix and compute its SVD, eqn. (32)
185 | K_1 = np.hstack((np.sqrt(block_i+2) * S, Ux))
186 | del Ux
187 |
188 | K_2 = np.hstack((z, abs_up))
189 | K = np.vstack((K_1, K_2))
190 | del K_1, K_2
191 | K = np.sqrt((block_i+1) / (block_i+2)**2) * K
192 |
193 | ## calculate partial svd
194 | Up, Sp, _ = la.svd(K, full_matrices=False)
195 | del K
196 |
197 | ## update U as in eqn. (33)
198 | ## for simplicity, we could not rotate here and instead
199 | ## update U<-[U p] and Up<-[Up 0;0 1]*Up and rotate
200 | ## later; see Brand (LAA ,2006, section 4.1)
201 | U_tmp = np.hstack((U, u_new))
202 | U = np.dot(U_tmp, Up)
203 | del U_tmp
204 |
205 | ## best rank-k approximation, eqn. (37)
206 | u_hat[:,i_freq,:] = U[:,0:self._n_modes_save]
207 | self._eigs[:,i_freq] = Sp[0:self._n_modes_save]
208 |
209 | ## reset dft sum
210 | x_hat[:,:] = 0
211 |
212 | # phi_prev = phi
213 | # phi = u_hat * (1 / sqrt_w[:,:,np.newaxis])
214 |
215 | # ## convergence
216 | # for i_freq in range(0,n_freq):
217 | # proj_i_freq = (np.squeeze(phi_prev[:,i_freq,:]) * \
218 | # self._weights).conj().T @ np.squeeze(phi[:,i_freq,:])
219 | # proj_prev[i_freq,block_i,:] = \
220 | # np.amax(np.abs(proj_i_freq), axis=0)
221 | # mse_prev[block_i,:,:] = (np.abs(S_hat_prev**2 - \
222 | # self._eigs**2)**2) / (S_hat_prev**2)
223 |
224 | ## rescale such that _E = U_i^H * W * U_j = delta_ij
225 | phi = u_hat[:,:,0:n_m_save] * (1 / sqrt_w[:,:,np.newaxis])
226 |
227 | # ## shuffle and reshape
228 | phi = np.einsum('ijk->jik', phi)
229 |
230 | ## save modes
231 | for f in range(0,n_freq):
232 | filename = f'freq_idx_{f:08d}.npy'
233 | path_modes = os.path.join(self._modes_dir, filename)
234 | shape = [*self._xshape, self._nv, self._n_modes_save]
235 | if self._comm:
236 | shape[self._max_axis] = -1
237 | phif = phi[f,...]
238 | phif.shape = shape
239 | utils_par.npy_save(
240 | self._comm, path_modes, phif, axis=self._max_axis)
241 |
242 | # ## save modes
243 | # self._modes_dir = 'modes.npy'
244 | # path_modes = os.path.join(self._savedir_sim, self._modes_dir)
245 | # shape = [self._n_freq, *self._xshape, self._nv, self._n_modes_save]
246 | # if self._comm:
247 | # shape[self._max_axis+1] = -1
248 | # phi.shape = shape
249 | # utils_par.npy_save(self._comm, path_modes, phi, axis=self._max_axis+1)
250 |
251 | ## transpose eigs
252 | self._eigs = self._eigs.T
253 |
254 | # store and save results
255 | self._store_and_save()
256 | self._pr0(f'------------------------------------')
257 | self._pr0(f' ')
258 | self._pr0(f'Results saved in folder {self._savedir_sim}')
259 | self._pr0(f'Time to compute SPOD: {time.time() - start} s.')
260 | if self._comm: self._comm.Barrier()
261 | return self
262 |
--------------------------------------------------------------------------------
/pyspod/utils/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MathEXLab/PySPOD/9d69ac0724bfffcedaf104b084d050a94100cf65/pyspod/utils/__init__.py
--------------------------------------------------------------------------------
/pyspod/utils/errors.py:
--------------------------------------------------------------------------------
1 | import warnings
2 | import numpy as np
3 | from numpy import linalg
4 |
5 |
6 | def compute_l_errors(data, data_ref, norm_type='l2'):
7 | '''
8 | Compute error l norms of data with respect to a reference data
9 |
10 | :param numpy.ndarray data: data.
11 | :param numpy.ndarray data: reference data.
12 | :param str norm_type: type of norm to be used. Default is 'l2'.
13 |
14 | :return: the computed error.
15 | :rtype: numpy.ndarray
16 | '''
17 | n = data.size
18 | e = np.abs(data - data_ref)
19 | ef = e.flatten('C')
20 | e_rel = ef / data_ref.flatten('C')
21 | if norm_type == 'l1' : error_norm = np.linalg.norm(ef, 1) / n
22 | elif norm_type == 'l2' : error_norm = np.linalg.norm(ef) / n
23 | elif norm_type == 'linf': error_norm = np.amax(ef)
24 | elif norm_type == 'l1_rel' : error_norm = np.linalg.norm(e_rel, 1) / n
25 | elif norm_type == 'l2_rel' : error_norm = np.linalg.norm(e_rel) / n
26 | elif norm_type == 'linf_rel': error_norm = np.amax(e_rel)
27 | else:
28 | raise ValueError(norm_type, ' not implemented.')
29 | return error_norm
30 |
31 |
32 | def compute_h_errors(data, data_ref, dt, norm_type='h1'):
33 | '''
34 | Compute error h norms of data with respect to a reference data
35 |
36 | :param numpy.ndarray data: data.
37 | :param numpy.ndarray data: reference data.
38 | :param float dt: data time step.
39 | :param str norm_type: type of norm to be used. Default is 'h1'.
40 |
41 | :return: the computed error.
42 | :rtype: numpy.ndarray
43 | '''
44 | # warnings.warn("warning: for h1 calculation, dim0 must be time.")
45 | if norm_type == 'h1':
46 | err_h1 = 0
47 | for i in range(data.shape[0]):
48 | if i == 0:
49 | uprime = 0
50 | utrueprime = 0
51 | else:
52 | uprime = (data[i,...] - data[i-1,...]) / dt
53 | utrueprime = (data_ref[i,...] - data_ref[i-1,...]) / dt
54 | err_h1 = err_h1 + (dt * (np.sum(uprime - utrueprime)**2))
55 | error_norm = np.sqrt(err_h1)
56 |
57 | else:
58 | raise ValueError(norm_type, ' not implemented.')
59 | return error_norm
60 |
--------------------------------------------------------------------------------
/pyspod/utils/io.py:
--------------------------------------------------------------------------------
1 | '''Module implementing I/O utils used across the library.'''
2 | import os
3 | import sys
4 | import yaml
5 | import h5py
6 | import argparse
7 | import numpy as np
8 | import xarray as xr
9 | from os.path import splitext
10 |
11 |
12 |
13 | def read_data(data_file, format=None, comm=None):
14 | '''
15 | Read data file provided in some standard formats.
16 |
17 | :param str data_file: path to data file.
18 | :param str format: type of format to be read. Default is None.
19 | :param MPI.Comm comm: parallel communicator. Default is None.
20 |
21 | :return: the data from the data_file.
22 | :rtype: numpy.ndarray
23 | '''
24 | if not format:
25 | _, format = splitext(data_file)
26 | if comm:
27 | if comm.rank == 0: print(f'reading data with format: {format}')
28 | format = format.lower()
29 | if format == '.npy' or format == 'npy':
30 | d = npy_load(data_file)
31 | elif format == '.nc' or format == 'nc':
32 | d = xr.open_dataset(data_file)
33 | elif format == '.mat' or format == 'mat':
34 | with h5py.File(data_file, 'r') as f:
35 | d = dict()
36 | for k, v in f.items():
37 | d[k] = np.array(v)
38 | else:
39 | raise ValueError(format, ' format not supported')
40 | return d
41 |
42 |
43 | def read_config(parsed_file=None):
44 | '''
45 | Parse command line for a config file.
46 |
47 | :param str parsed_file: file to be parsed. Default is None.
48 | Parsing happens on the command line.
49 |
50 | :return: the parameters read from the config file.
51 | :rtype: dict
52 | '''
53 | parser = argparse.ArgumentParser(description='Config file.')
54 | parser.add_argument('--config_file', help='Configuration file.')
55 | if parsed_file:
56 | args = parser.parse_args(['--config_file', parsed_file])
57 | else:
58 | args = parser.parse_args()
59 |
60 | ## read yaml file
61 | with open(args.config_file) as file:
62 | l = yaml.load(file, Loader=yaml.FullLoader)
63 |
64 | ## get required keys
65 | l_req = l['required']
66 | keys_r = ['time_step', 'n_space_dims', 'n_variables', 'n_dft']
67 | params_req = _parse_yaml(l_req)
68 | f, k = _check_keys(params_req, keys_r)
69 | f, _ = _check_keys(l, 'optional')
70 | if f:
71 | l_opt = l['optional']
72 | params_opt = _parse_yaml(l_opt)
73 | params = {**params_req, **params_opt}
74 | else:
75 | params = params_req
76 | return params
77 |
78 |
79 | def _parse_yaml(l):
80 | params = dict()
81 | for i,d in enumerate(l):
82 | k = list(d.keys())[0]
83 | v = d[k]
84 | params[k] = v
85 | return params
86 |
87 |
88 | def _check_keys(l, keys):
89 | if isinstance(keys, str):
90 | keys = [keys]
91 | flag = True
92 | keys_not_found = list()
93 | for k in keys:
94 | if k not in l.keys():
95 | flag = False
96 | keys_not_found.append(k)
97 | return flag, keys_not_found
98 |
--------------------------------------------------------------------------------
/pyspod/utils/parallel.py:
--------------------------------------------------------------------------------
1 | '''Module implementing utils to support distributed deployment.'''
2 | import io
3 | import sys
4 | import importlib
5 | import numpy as np
6 |
7 |
8 | def _get_module_MPI(comm):
9 | assert comm is not None
10 | mod_name = type(comm).__module__
11 | try:
12 | return sys.modules[mod_name]
13 | except KeyError:
14 | return importlib.import_module(mod_name)
15 |
16 |
17 | def _get_module_dtlib(comm):
18 | MPI = _get_module_MPI(comm)
19 | pkg_name = MPI.__spec__.parent
20 | mod_name = pkg_name + '.util.dtlib'
21 | try:
22 | return sys.modules[mod_name]
23 | except KeyError:
24 | return importlib.import_module(mod_name)
25 |
26 |
27 | def pvar(data, comm):
28 | """
29 | Parallel computation of mean and variance.
30 | """
31 | n = np.size(data)
32 | m = np.mean(data)
33 | d = data - m
34 | d *= d
35 | v = np.sum(d)/n
36 |
37 | def op_stat(a, b):
38 | na, ma, va = a
39 | nb, mb, vb = b
40 | n = na + nb
41 | m = (na*ma + nb*mb)/n
42 | v = (na*va + nb*vb + na*nb*(ma-mb)**2/n)/n
43 | return ((n, m, v))
44 |
45 | (n, m, v) = comm.allreduce((n, m, v), op=op_stat)
46 | return v, m, n
47 |
48 |
49 | def create_subcomm(comm):
50 | '''Create subcommunicator'''
51 | MPI = _get_module_MPI(comm)
52 | size = comm.Get_size()
53 | rank = comm.Get_rank()
54 | for n in [3,4,2,1]:
55 | if size % n == 0:
56 | den = n
57 | break
58 | a, b = MPI.Compute_dims(size//den, 2)
59 | b *= den
60 | dims = (a,b)
61 | cart = comm.Create_cart(dims)
62 | coords = cart.Get_coords(cart.Get_rank())
63 | subcomm = cart.Sub([False, True])
64 | cart.Free()
65 | return dims[0], coords[0], subcomm
66 |
67 | # def blockdist(N, size, rank):
68 | # q, r = divmod(N, size)
69 | # n = q + (1 if r > rank else 0)
70 | # s = rank * q + min(rank, r)
71 | # return (n, s)
72 | #
73 | # import sys
74 | # sys.stdout.flush()
75 | # N = 7
76 | # n, s = _blockdist(N, dims[0], coords[0])
77 | # for i in range(s, s+n):
78 | # val = subcomm.allreduce(rank)
79 | # subcomm.Free()
80 | # cart.Free()
81 |
82 |
83 | def distribute(data, comm):
84 | """
85 | Distribute largest spatial dimension of data.
86 | """
87 | ## distribute largest spatial dimension based on data
88 | global_shape = data.shape
89 | max_axis = np.argmax(global_shape)
90 | if comm is not None:
91 | size = comm.size
92 | rank = comm.rank
93 | shape = data.shape
94 | index = [np.s_[:]] * len(shape)
95 | N = shape[max_axis]
96 | n, s = _blockdist(N, size, rank)
97 | index[max_axis] = np.s_[s:s+n]
98 | index = tuple(index)
99 | data = data[index]
100 | comm.Barrier()
101 | else:
102 | data = data
103 | return data, max_axis, global_shape
104 |
105 |
106 | def distribute_data(data, comm):
107 | """
108 | Distribute largest spatial dimension of data, assuming:
109 | - time dimensions appear as first coordinate of the array,
110 | - spatial dimensions follow.
111 | This is typically the case for `data`.
112 | """
113 | ## distribute largest spatial dimension based on data
114 | global_shape = data[0,...].shape ## spatial dimension
115 | max_axis = np.argmax(global_shape)
116 | if comm is not None:
117 | size = comm.size
118 | rank = comm.rank
119 | shape = data.shape
120 | index = [np.s_[:]] * len(shape)
121 | N = shape[max_axis+1]
122 | n, s = _blockdist(N, size, rank)
123 | index[max_axis+1] = np.s_[s:s+n]
124 | index = tuple(index)
125 | data = data[index]
126 | comm.Barrier()
127 | else:
128 | data = data
129 | return data, max_axis, global_shape
130 |
131 |
132 | def distribute_dimension(data, max_axis, comm):
133 | """
134 | Distribute desired spatial dimension, splitting partitions
135 | by value // comm.size, with remainder = value % comm.size
136 | """
137 | ## distribute largest spatial dimension based on data
138 | if comm is not None:
139 | size = comm.size
140 | rank = comm.rank
141 | shape = data.shape
142 | index = [np.s_[:]] * len(shape)
143 | N = shape[max_axis]
144 | n, s = _blockdist(N, size, rank)
145 | index[max_axis] = np.s_[s:s+n]
146 | index = tuple(index)
147 | data = data[index]
148 | comm.Barrier()
149 | else:
150 | data = data
151 | return data
152 |
153 |
154 | def _blockdist(N, size, rank):
155 | q, r = divmod(N, size)
156 | n = q + (1 if r > rank else 0)
157 | s = rank * q + min(rank, r)
158 | return (n, s) if rank < size else (0, 0)
159 |
160 |
161 | def allreduce(data, comm):
162 | if comm is not None:
163 | MPI = _get_module_MPI(comm)
164 | data = data.view(data.dtype.newbyteorder('='))
165 | data_reduced = np.zeros_like(data)
166 | comm.Barrier()
167 | comm.Allreduce(data, data_reduced, op=MPI.SUM)
168 | else:
169 | data_reduced = data
170 | return data_reduced
171 |
172 |
173 | def barrier(comm):
174 | if comm is not None:
175 | comm.Barrier()
176 |
177 |
178 | def pr0(string, comm):
179 | if comm is not None:
180 | if comm.rank == 0:
181 | print(string)
182 | else:
183 | print(string)
184 |
185 |
186 | def npy_save(comm, filename, array, axis=0):
187 | if comm is not None:
188 | MPI = _get_module_MPI(comm)
189 | dtlib = _get_module_dtlib(comm)
190 | array = array.view(array.dtype.newbyteorder('='))
191 | array = np.asarray(array)
192 | dtype = array.dtype
193 | shape = array.shape
194 | lcount = np.array(shape[axis], dtype=np.int64)
195 | gcount = np.empty_like(lcount)
196 | comm.Allreduce(lcount, gcount, op=MPI.SUM)
197 | gdispl = np.empty_like(lcount)
198 | comm.Scan(lcount, gdispl, op=MPI.SUM)
199 | gdispl -= lcount
200 | sizes = list(shape)
201 | sizes[axis] = int(gcount)
202 | starts = [0] * len(sizes)
203 | starts[axis] = int(gdispl)
204 |
205 | array = np.ascontiguousarray(array)
206 | if array.flags.c_contiguous:
207 | mpi_order = MPI.ORDER_C
208 | elif array.flags.f_contiguous:
209 | mpi_order = MPI.ORDER_FORTRAN
210 |
211 | file = MPI.File.Open(comm, filename, MPI.MODE_CREATE | MPI.MODE_WRONLY)
212 | file.Set_size(0) # truncate if the file exists
213 |
214 | offset = 0
215 | if comm.Get_rank() == 0:
216 | try:
217 | write_array_header = np.lib.format._write_array_header
218 | except AttributeError:
219 | write_array_header = np.lib.format.write_array_header_1_0
220 | data = np.lib.format.header_data_from_array_1_0(array)
221 | data['shape'] = tuple(sizes)
222 | fp = io.BytesIO()
223 | write_array_header(fp, data)
224 | header = fp.getvalue()
225 | offset = len(header)
226 | file.Write(header)
227 | offset = np.array(offset, dtype=np.int64)
228 | comm.Bcast(offset, root=0)
229 | datatype = dtlib.from_numpy_dtype(dtype)
230 |
231 | if shape[axis] > 0:
232 | subarray = datatype.Create_subarray(
233 | sizes=sizes,
234 | subsizes=shape,
235 | starts=starts,
236 | order=mpi_order,
237 | )
238 | else:
239 | subarray = datatype.Create_contiguous(0)
240 |
241 | datatype.Commit()
242 | subarray.Commit()
243 | file.Set_view(disp=offset, etype=datatype, filetype=subarray)
244 | datatype.Free()
245 | subarray.Free()
246 | file.Write_all(array)
247 | file.Sync()
248 | file.Close()
249 | else:
250 | np.save(filename, array)
251 |
252 |
253 | def npy_load(comm, filename, axis=0, count=None):
254 | if comm is not None:
255 | MPI = _get_module_MPI(comm)
256 | dtlib = _get_module_dtlib(comm)
257 | class _File(MPI.File):
258 | def read(self, size):
259 | buf = bytearray(size)
260 | status = MPI.Status()
261 | self.Read(buf, status)
262 | count = status.Get_count(MPI.BYTE)
263 | return buf[:count]
264 | try:
265 | np.lib.format._check_version
266 | np.lib.format._read_array_header
267 | def read_array_header(fp, version):
268 | np.lib.format._check_version(version)
269 | return np.lib.format._read_array_header(fp, version)
270 | except AttributeError:
271 | def read_array_header(fp, version):
272 | assert version == (1, 0)
273 | return np.lib.format.read_array_header_1_0(fp)
274 | file = MPI.File.Open(comm, filename, MPI.MODE_RDONLY)
275 | data = None
276 | if comm.Get_rank() == 0:
277 | fp = _File(file)
278 | version = np.lib.format.read_magic(fp)
279 | shape, fortran_order, dtype = read_array_header(fp, version)
280 | offset = file.Get_position()
281 | data = (offset, shape, dtype, "F" if fortran_order else "C")
282 | offset, sizes, dtype, npy_order = comm.bcast(data, root=0)
283 |
284 | if count is None:
285 | count = sizes[axis]
286 | size = comm.Get_size()
287 | rank = comm.Get_rank()
288 | count = count // size + count % size > rank
289 | count = np.array(count, dtype=np.int64)
290 | displ = np.empty_like(count)
291 | comm.Scan(count, displ, op=MPI.SUM)
292 | displ -= count
293 |
294 | shape = list(sizes)
295 | shape[axis] = int(count)
296 | starts = [0] * len(sizes)
297 | starts[axis] = int(displ)
298 | if npy_order == "C":
299 | mpi_order = MPI.ORDER_C
300 | else:
301 | mpi_order = MPI.ORDER_FORTRAN
302 | datatype = dtlib.from_numpy_dtype(dtype)
303 |
304 | if shape[axis] > 0:
305 | subarray = datatype.Create_subarray(
306 | sizes=sizes,
307 | subsizes=shape,
308 | starts=starts,
309 | order=mpi_order,
310 | )
311 | else:
312 | subarray = datatype.Create_contiguous(0)
313 |
314 | datatype.Commit()
315 | subarray.Commit()
316 | file.Set_view(disp=offset, etype=datatype, filetype=subarray)
317 | datatype.Free()
318 | subarray.Free()
319 | array = np.empty(shape, dtype, npy_order)
320 | file.Read_all(array)
321 | file.Close()
322 | else:
323 | array = np.load(filename)
324 | return array
325 |
--------------------------------------------------------------------------------
/pyspod/utils/plotting_support/coast.mat:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MathEXLab/PySPOD/9d69ac0724bfffcedaf104b084d050a94100cf65/pyspod/utils/plotting_support/coast.mat
--------------------------------------------------------------------------------
/pyspod/utils/plotting_support/coast_centred.mat:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MathEXLab/PySPOD/9d69ac0724bfffcedaf104b084d050a94100cf65/pyspod/utils/plotting_support/coast_centred.mat
--------------------------------------------------------------------------------
/pyspod/utils/weights.py:
--------------------------------------------------------------------------------
1 | '''Module implementing weights for standard cases.'''
2 |
3 | # import standard python packages
4 | import numpy as np
5 | import pyspod.utils.parallel as utils_par
6 |
7 |
8 | def geo_trapz_2D(x1_dim, x2_dim, n_vars, **kwargs):
9 | '''
10 | 2D integration weights for geospatial data via trapezoidal rule.
11 |
12 | :param numpy.ndarray x1_dim: first spatial coordinate.
13 | :param numpy.ndarray x2_dim: second spatial coordinate.
14 | :param int n_vars: number of variables.
15 |
16 | :return: the computed weights.
17 | :rtype: numpy.ndarray
18 | '''
19 | ## get optional parameter (radius of e.g. Earth). Default is 1
20 | R = kwargs.get('R', 1)
21 |
22 | ## define latitude and longitude coordinates
23 | lat = np.linspace(-90, 90, x1_dim)
24 | lon = np.linspace( 0,360, x2_dim+1)
25 | lon = lon[0:-1]
26 | lat_rad = lat / 360 * 2 * np.pi
27 | lon_rad = lon / 360 * 2 * np.pi
28 |
29 | diff_lat = np.diff(lat_rad)
30 | diff_lat = diff_lat[0:-1]
31 | d_lat = np.hstack([diff_lat[1]/2, diff_lat, diff_lat[-1]/2])
32 |
33 | tmp = np.diff(lon_rad, axis=0)
34 | d_lon = np.hstack([lon_rad[0]/2, tmp])
35 |
36 | d_lat = np.tile(d_lat, [x2_dim, 1])
37 | d_lon = np.tile(d_lon, [x1_dim, 1])
38 |
39 | ## cos(latitude) since lat \in [-90 90] deg
40 | dA = np.abs(R**2 * np.cos(lat_rad) * d_lon.T * d_lat).T
41 | dA = np.tile(dA, [n_vars, 1, 1])
42 | dA = np.einsum('ijk->jki', dA)
43 | w = { 'weights_name': 'geo_trapz_2D', 'weights': dA }
44 | return w
45 |
46 |
47 | def geo_trapz_3D(x1_dim, x2_dim, x3_dim, n_vars, **kwargs):
48 | '''
49 | 3D integration weights for geospatial data via trapezoidal rule.
50 |
51 | :param numpy.ndarray x1_dim: first spatial coordinate.
52 | :param numpy.ndarray x2_dim: second spatial coordinate.
53 | :param numpy.ndarray x3_dim: third spatial coordinate.
54 | :param int n_vars: number of variables.
55 |
56 | :return: the computed weights.
57 | :rtype: numpy.ndarray
58 | '''
59 | ## get optional parameter (radius of e.g. Earth). Default is 1
60 | R = kwargs.get('R', 1)
61 |
62 | ## define latitude and longitude coordinates
63 | lat = np.linspace(-90,90,x1_dim)
64 | lon = np.linspace(0,360,x2_dim+1)
65 | lon = lon[0:-1]
66 | lat_rad = lat / 360 * 2 * np.pi
67 | lon_rad = lon / 360 * 2 * np.pi
68 |
69 | diff_lat = np.diff(lat_rad)
70 | diff_lat = diff_lat[0:-1]
71 | d_lat = np.hstack([diff_lat[1]/2, diff_lat, diff_lat[-1]/2])
72 |
73 | tmp = np.diff(lon_rad, axis=0)
74 | d_lon = np.hstack([lon_rad[0]/2, tmp])
75 |
76 | d_lat = np.tile(d_lat, [x2_dim, 1])
77 | d_lon = np.tile(d_lon, [x1_dim, 1])
78 |
79 | ## cos(latitude) since lat \in [-90 90] deg
80 | dA = np.abs(R**2 * np.cos(lat_rad) * d_lon.T * d_lat).T
81 | dA = np.tile(dA, [x3_dim, 1, 1])
82 | dA = np.einsum('ijk->jki', dA)
83 | dA = np.tile(dA, [n_vars, 1, 1])
84 | w = { 'weights_name': 'geo_trapz_3D', 'weights': dA }
85 | return w
86 |
87 |
88 | def custom(**kwargs):
89 | '''
90 | Customized weights to be implemented by user if required.
91 | Note, weights must have the same dimension as the data
92 | flattened spatial dimension (i.e. if we have two spatial
93 | dimensions, with length 10, and 20, respectively, and
94 | we have two variables, this function must return a np.ndarray
95 | of dimension = 10 x 20 x 2 = 400).
96 | '''
97 | pass
98 |
99 |
100 | def apply_normalization(
101 | data, weights, n_vars, method='variance', comm=None):
102 | '''
103 | Normalization of weights if required by data variance.
104 |
105 | :param numpy.ndarray data: data.
106 | :param numpy.ndarray weights: weights.
107 | :param int n_vars: number of variables.
108 | :param int method: normalization method. Default is 'variance'.
109 | :param MPI.Comm comm: MPI communicator.
110 |
111 | :return: the normalized weights.
112 | :rtype: numpy.ndarray
113 | '''
114 |
115 | ## variable-wise normalization by variance via weight matrix
116 | if comm is not None:
117 | if method.lower() == 'variance':
118 | if comm.rank == 0:
119 | print('')
120 | print('Normalization by variance - parallel')
121 | print('------------------------------------')
122 | axis = tuple(np.arange(0, data[...,0].ndim))
123 | for i in range(0, n_vars):
124 | var, _, _ = utils_par.pvar(data[...,i], comm=comm)
125 | weights[...,i] = weights[...,i] / var
126 | else:
127 | if comm.rank:
128 | print('')
129 | print('No normalization performed')
130 | print('--------------------------')
131 | else:
132 | if method.lower() == 'variance':
133 | print('')
134 | print('Normalization by variance - serial')
135 | print('----------------------------------')
136 | axis = tuple(np.arange(0, data[...,0].ndim))
137 | for i in range(0, n_vars):
138 | var = np.nanvar(data[...,i], axis=axis)
139 | weights[...,i] = weights[...,i] / var
140 | else:
141 | print('')
142 | print('No normalization performed')
143 | print('--------------------------')
144 | return weights
145 |
--------------------------------------------------------------------------------
/readme/MEI.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MathEXLab/PySPOD/9d69ac0724bfffcedaf104b084d050a94100cf65/readme/MEI.png
--------------------------------------------------------------------------------
/readme/PySPOD_logo2.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MathEXLab/PySPOD/9d69ac0724bfffcedaf104b084d050a94100cf65/readme/PySPOD_logo2.png
--------------------------------------------------------------------------------
/setup.cfg:
--------------------------------------------------------------------------------
1 | [metadata]
2 | name = pyspod
3 | version = attr: pyspod.__version__
4 | description = Python Spectral Proper Orthogonal Decomposition
5 | long_description = file:README.md
6 | long_description_content_type = text/markdown
7 | author = Gianmarco Mengaldo, Marcin Rogowski, Lisandro Dalcin, Romit Maulik, Andrea Lario
8 | author_email = mpegim@nus.edu.sg, marcin.rogowski@gmail.com, dalcinl@gmail.com, rmaulik@anl.gov, alario@sissa.it
9 | url = https://github.com/MathEXLab/PySPOD
10 | license = MIT
11 | license_files = LICENSE.rst
12 | keywords =
13 | SPOD
14 | spectral proper orthogonal decomposition
15 | classifiers =
16 | Intended Audience :: Science/Research
17 | License :: OSI Approved :: MIT License
18 | Programming Language :: Python :: 3
19 | Programming Language :: Python :: 3.8
20 | Programming Language :: Python :: 3.9
21 | Programming Language :: Python :: 3.10
22 | Programming Language :: Python :: 3.11
23 | Programming Language :: Python :: 3.12
24 | Topic :: Scientific/Engineering :: Mathematics
25 |
26 | [options]
27 | packages = find_namespace:
28 | zip_safe = False
29 | include_package_data = True
30 | python_requires = >=3.7
31 | install_requires =
32 | importlib-metadata<5.0; python_version < '3.8'
33 | psutil
34 | tqdm
35 | numpy
36 | scipy
37 | h5py
38 | netcdf4
39 | xarray
40 | matplotlib
41 | pyyaml
42 |
43 | [options.package_data]
44 | pyspod.utils.plotting_support =
45 | coast.mat
46 | coast_centred.mat
47 |
48 | [options.extras_require]
49 | ai = tensorflow
50 | mpi = mpi4py >= 3.1
51 | test =
52 | pytest
53 | pytest-cov
54 | pytest-mpi
55 | coverage
56 | docs =
57 | Sphinx
58 | sphinx_rtd_theme
59 |
60 | [coverage:run]
61 | parallel = True
62 | branch = True
63 | source = pyspod
64 | [coverage:paths]
65 | source =
66 | ./pyspod
67 | */pyspod
68 |
--------------------------------------------------------------------------------
/setup.py:
--------------------------------------------------------------------------------
1 | from setuptools import setup
2 | setup()
3 |
--------------------------------------------------------------------------------
/tests/data/earthquakes_data.nc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MathEXLab/PySPOD/9d69ac0724bfffcedaf104b084d050a94100cf65/tests/data/earthquakes_data.nc
--------------------------------------------------------------------------------
/tests/data/era_interim_data.nc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MathEXLab/PySPOD/9d69ac0724bfffcedaf104b084d050a94100cf65/tests/data/era_interim_data.nc
--------------------------------------------------------------------------------
/tests/data/fluidmechanics_data.mat:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MathEXLab/PySPOD/9d69ac0724bfffcedaf104b084d050a94100cf65/tests/data/fluidmechanics_data.mat
--------------------------------------------------------------------------------
/tests/data/input.yaml:
--------------------------------------------------------------------------------
1 | required:
2 | - time_step : 1
3 | - n_space_dims: 2
4 | - n_variables : 1
5 | - n_dft : 30
6 |
--------------------------------------------------------------------------------
/tests/data/input_optional.yaml:
--------------------------------------------------------------------------------
1 | required:
2 | - time_step : 1
3 | - n_space_dims: 2
4 | - n_variables : 1
5 | - n_dft : 30
6 |
7 | optional:
8 | - overlap : 0
9 | - mean_type : 'blockwise'
10 | - normalize_weights: False
11 | - normalize_data : False
12 | - n_modes_save : 3
13 | - conf_level : 0.95
14 | - reuse_blocks : False
15 | - savefft : False
16 | - dtype : 'double'
17 | - savedir : 'spod_results'
18 |
--------------------------------------------------------------------------------
/tests/data/input_postproc_2d.yaml:
--------------------------------------------------------------------------------
1 | required:
2 | - time_step : 1
3 | - n_space_dims: 2
4 | - n_variables : 1
5 | - n_dft : 32
6 |
7 | optional:
8 | - overlap : 50
9 | - mean_type : 'blockwise'
10 | - normalize_weights: False
11 | - normalize_data : False
12 | - n_modes_save : 3
13 | - conf_level : 0.95
14 | - reuse_blocks : False
15 | - savefft : False
16 | - dtype : 'double'
17 | - savedir : 'results'
18 |
--------------------------------------------------------------------------------
/tests/data/input_postproc_3d.yaml:
--------------------------------------------------------------------------------
1 | required:
2 | - time_step : 1
3 | - n_space_dims: 3
4 | - n_variables : 1
5 | - n_dft : 100
6 |
7 | optional:
8 | - overlap : 0
9 | - mean_type : 'blockwise'
10 | - normalize_weights: False
11 | - normalize_data : False
12 | - n_modes_save : 3
13 | - conf_level : 0.95
14 | - reuse_blocks : False
15 | - savefft : False
16 | - dtype : 'double'
17 | - savedir : 'results'
18 |
--------------------------------------------------------------------------------
/tests/data/input_spod.yaml:
--------------------------------------------------------------------------------
1 | required:
2 | - time_step : 1
3 | - n_space_dims: 2
4 | - n_variables : 1
5 | - n_dft : 64
6 |
7 | optional:
8 | - overlap : 50
9 | - mean_type : 'blockwise'
10 | - normalize_weights: False
11 | - normalize_data : False
12 | - n_modes_save : 3
13 | - conf_level : 0.95
14 | - reuse_blocks : False
15 | - savefft : False
16 | - dtype : 'double'
17 | - savedir : 'spod_results'
18 | - fullspectrum : False
19 |
--------------------------------------------------------------------------------
/tests/data/input_tutorial1.yaml:
--------------------------------------------------------------------------------
1 | required:
2 | - time_step : 1
3 | - n_space_dims: 2
4 | - n_variables : 1
5 | - n_dft : 64
6 |
7 | optional:
8 | - overlap : 50
9 | - mean_type : 'longtime'
10 | - normalize_weights: False
11 | - normalize_data : False
12 | - n_modes_save : 40
13 | - conf_level : 0.95
14 | - reuse_blocks : False
15 | - savefft : False
16 | - dtype : 'double'
17 | - savedir : 'spod_results'
18 | - fullspectrum : False
19 |
--------------------------------------------------------------------------------
/tests/data/input_tutorial2.yaml:
--------------------------------------------------------------------------------
1 | required:
2 | - time_step : 12 #hours
3 | - n_space_dims: 2
4 | - n_variables : 1
5 | - n_dft : 730
6 |
7 | optional:
8 | - overlap : 0
9 | - mean_type : 'longtime'
10 | - normalize_weights: False
11 | - normalize_data : False
12 | - n_modes_save : 40
13 | - conf_level : 0.95
14 | - reuse_blocks : False
15 | - savefft : False
16 | - dtype : 'single'
17 | - savedir : 'spod_results'
18 | - fullspectrum : False
19 |
--------------------------------------------------------------------------------
/tests/test_emulation.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | # -*- coding: utf-8 -*-
3 | import os
4 | import sys
5 | import h5py
6 | import yaml
7 | import shutil
8 | import numpy as np
9 |
10 | # Current, parent and file paths
11 | CWD = os.getcwd()
12 | CF = os.path.realpath(__file__)
13 | CFD = os.path.dirname(CF)
14 | sys.path.append(os.path.join(CFD,'../'))
15 |
16 | # Import library specific modules
17 | from pyspod.pod.standard import Standard as pod_standard
18 | from pyspod.spod.standard import Standard as spod_standard
19 | from pyspod.emulation.neural_nets import Neural_Nets as emulation_nn
20 | import pyspod.pod.utils as utils_pod
21 | import pyspod.spod.utils as utils_spod
22 | import pyspod.pod.utils as utils_pod
23 | import pyspod.utils.io as utils_io
24 | import pyspod.utils.postproc as post
25 |
26 |
27 |
28 | def test_lstm_pod():
29 | ## -------------------------------------------------------------------------
30 | data_file = os.path.join(CFD,'./data', 'fluidmechanics_data.mat')
31 | data_dict = utils_io.read_data(data_file=data_file)
32 | data = data_dict['p'].T
33 | dt = data_dict['dt'][0,0]
34 | t = dt * np.arange(0,data.shape[0]).T
35 | nt = t.shape[0]
36 | train_ratio = 0.8
37 | test_ratio = (1 - train_ratio)
38 | params_pod = {
39 | # -- required parameters
40 | 'time_step' : dt,
41 | 'n_space_dims': 2,
42 | 'n_variables' : 1,
43 | # -- optional parameters
44 | 'overlap' : 50,
45 | 'normalize_weights': True,
46 | 'scale_data' : True,
47 | 'n_modes_save' : 8,
48 | 'dtype' : 'double',
49 | 'savedir' : os.path.join(CFD, 'results')
50 | }
51 | params_emulation = {
52 | 'network' : 'lstm',
53 | 'scaler' : 'localmax',
54 | 'data_type' : 'real',
55 | 'epochs' : 10,
56 | 'batch_size': 32,
57 | 'n_seq_in' : 60,
58 | 'n_seq_out' : 1,
59 | 'n_neurons' : 1,
60 | 'dropout' : 0.15,
61 | 'savedir' : os.path.join(CFD, 'results')
62 | }
63 | ## -------------------------------------------------------------------------
64 |
65 | ## training and testing database definition
66 | nt_train = int(train_ratio * nt)
67 | d_train = data[:nt_train,:,:]
68 | nt_test = nt - nt_train
69 | d_test = data[nt_train:,:,:]
70 |
71 | ## fit and transform pod
72 | pod_class = pod_standard(params=params_pod)
73 | pod = pod_class.fit(data_list=d_train)
74 | phi = np.load(os.path.join(pod._savedir_sim, 'modes.npy'))
75 | # coeffs_train, phi, tm, file_coeffs, max_axis = utils_pod.compute_coeffs_op(
76 | # data=d_train, results_dir=pod._savedir_sim)
77 | results_dir = pod._savedir_sim
78 | c_train_file, dir_train = utils_pod.compute_coeffs_op(
79 | d_train, results_dir, savedir='train')
80 | c_test_file, dir_test = utils_pod.compute_coeffs_op(
81 | d_test, results_dir, savedir='test')
82 |
83 | ## compute test coefficients
84 | # d_r_test = np.reshape(d_test[:,:,:], [nt_test,pod.nv*pod.nx])
85 | # for i in range(nt_test):
86 | # d_r_test[i,:] = np.squeeze(d_r_test[i,:]) - np.squeeze(tm)
87 | # coeffs_test = np.transpose(phi) @ d_r_test.T
88 |
89 | ## initialization of variables and structures
90 | n_modes = params_pod['n_modes_save']
91 | coeffs_train = np.load(c_train_file)
92 | coeffs_test = np.load(c_test_file)
93 | dim1_train = coeffs_train.shape[1]
94 | dim0_test = coeffs_test .shape[0]
95 | dim1_test = coeffs_test .shape[1]
96 | data_train = np.zeros([n_modes , dim1_train], dtype=float)
97 | data_test = np.zeros([n_modes , dim1_test] , dtype=float)
98 | coeffs = np.zeros([dim0_test, dim1_test] , dtype=float)
99 | coeffs_tmp = np.zeros([n_modes , dim1_test] , dtype=float)
100 |
101 | ## select lstm
102 | params_emulation['network'] = 'lstm'
103 |
104 | ## initialization Emulation class and run
105 | emulation = emulation_nn(params_emulation)
106 | emulation.model_initialize(data=data_train)
107 |
108 | ## normalize data
109 | c_train = coeffs_train[:,:]
110 | c_test = coeffs_test[:,:]
111 | scaler1 = emulation.scaler(data=c_train)
112 | scaler2 = emulation.scaler(data=c_train)
113 | data_train[:,:] = emulation.scale_data(c_train, vec=scaler1)
114 | data_test [:,:] = emulation.scale_data(c_test , vec=scaler1)
115 |
116 | ## train model
117 | emulation.model_train(data_train=data_train, data_valid=data_test)
118 | coeffs_tmp = emulation.model_inference(data_in=data_test)
119 |
120 | ## denormalize data
121 | coeffs[:,:] = emulation.descale_data(coeffs_tmp, scaler1)
122 |
123 | ## plot training history
124 | train_loss = emulation.train_history.history['loss']
125 | valid_loss = emulation.train_history.history['val_loss']
126 | post.plot_training_histories(
127 | train_loss, valid_loss,
128 | path=params_pod['savedir'],
129 | filename='history.png')
130 |
131 | # reconstruct solutions
132 | f_p, _ = utils_pod.compute_reconstruction(
133 | coeffs=c_test, coeffs_dir=dir_train, time_idx='all',
134 | savedir=dir_test, filename='recons_projection')
135 | f_e, _ = utils_pod.compute_reconstruction(
136 | coeffs=coeffs, coeffs_dir=dir_train, time_idx='all',
137 | savedir=dir_test, filename='recons_emulation')
138 | p_rec = np.load(f_p)
139 | e_rec = np.load(f_e)
140 |
141 | ## assert test
142 | tol = 1e-6
143 | savedir = pod._savedir
144 | assert(pod.dim ==4)
145 | assert(pod.shape ==(1, 20, 88, 1))
146 | assert(pod.nt ==800)
147 | assert(pod.nx ==1760)
148 | assert(pod.nv ==1)
149 | assert(pod.xdim ==2)
150 | assert(pod.xshape ==(20, 88))
151 | assert(pod.dt ==0.2)
152 | assert(pod.n_modes_save==8)
153 | assert((np.real(pod.eigs[0]) <90699.72245430+tol) and \
154 | (np.real(pod.eigs[0]) >90699.72245430-tol))
155 | assert((pod.weights[0,0] <19934.84235881+tol) and \
156 | (pod.weights[0,0] >19934.84235881-tol))
157 | assert((np.abs(e_rec[0,1,0]) <4.467810376724+tol) and \
158 | (np.abs(e_rec[0,1,0]) >4.467810376724-tol))
159 | assert((np.abs(e_rec[100,1,0]) <4.467810376724+tol) and \
160 | (np.abs(e_rec[100,1,0]) >4.467810376724-tol))
161 | assert((np.abs(e_rec[150,1,0]) <4.467810376761+tol) and \
162 | (np.abs(e_rec[150,1,0]) >4.467810376761-tol))
163 | assert((np.abs(e_rec[100,10,5])<4.463844748293+tol) and \
164 | (np.abs(e_rec[100,10,5])>4.463844748293-tol))
165 | assert((np.abs(e_rec[50,7,20]) <4.459104904890+tol) and \
166 | (np.abs(e_rec[50,7,20]) >4.459104904890-tol))
167 | assert((np.abs(e_rec[60,8,9]) <4.463696917777+tol) and \
168 | (np.abs(e_rec[60,8,9]) >4.463696917777-tol))
169 | # clean up results
170 | try:
171 | shutil.rmtree(os.path.join(CFD,'results'))
172 | except OSError as e:
173 | pass
174 |
175 |
176 | def test_lstm_spod():
177 | ## -------------------------------------------------------------------------
178 | data_file = os.path.join(CFD,'./data', 'fluidmechanics_data.mat')
179 | data_dict = utils_io.read_data(data_file=data_file)
180 | data = data_dict['p'].T
181 | dt = data_dict['dt'][0,0]
182 | t = dt * np.arange(0,data.shape[0]).T
183 | nt = t.shape[0]
184 | train_ratio = 0.95
185 | test_ratio = (1 - train_ratio)
186 | block_dimension = 64 * dt
187 | params_spod = {
188 | # -- required parameters
189 | 'time_step' : dt,
190 | 'n_space_dims': 2,
191 | 'n_variables' : 1,
192 | 'n_dft' : np.ceil(block_dimension / dt),
193 | # -- optional parameters
194 | 'overlap' : 50,
195 | 'mean_type' : 'blockwise',
196 | 'normalize_weights': False,
197 | 'normalize_data' : False,
198 | 'n_modes_save' : 3,
199 | 'conf_level' : 0.95,
200 | 'savedir' : os.path.join(CFD, 'results'),
201 | 'reuse_blocks' : False,
202 | 'fullspectrum' : True,
203 | 'dtype' : 'double'
204 | }
205 | params_emulation = {
206 | 'network' : 'lstm',
207 | 'scaler' : 'localmax',
208 | 'data_type' : 'complex',
209 | 'epochs' : 3,
210 | 'batch_size': 32,
211 | 'n_seq_in' : 30,
212 | 'n_seq_out' : 1,
213 | 'n_neurons' : 1,
214 | 'dropout' : 0.15,
215 | 'savedir' : os.path.join(CFD, 'results')
216 | }
217 | ## -------------------------------------------------------------------------
218 |
219 | ## training and testing database definition
220 | nt_train = int(train_ratio * nt)
221 | d_train = data[:nt_train,:,:]
222 | nt_test = nt - nt_train
223 | d_test = data[nt_train:,:,:]
224 |
225 | ## fit and transform spod
226 | spod_class = spod_standard(params=params_spod)
227 | spod = spod_class.fit(data_list=d_train)
228 | results_dir = spod.savedir_sim
229 | c_train_file, dir_train = utils_spod.compute_coeffs_op(
230 | d_train, results_dir, savedir='train')
231 | c_test_file, dir_test = utils_spod.compute_coeffs_op(
232 | d_test, results_dir, savedir='test')
233 |
234 | ## initialization of variables and structures
235 | coeffs_train = np.load(c_train_file)
236 | coeffs_test = np.load(c_test_file )
237 | f_train = os.path.join(dir_train, 'params_coeffs.yaml')
238 | f_test = os.path.join(dir_test , 'params_coeffs.yaml')
239 | with open(f_train) as f: params_train = yaml.load(f, Loader=yaml.FullLoader)
240 | with open(f_test ) as f: params_test = yaml.load(f, Loader=yaml.FullLoader)
241 | n_freq = params_train['n_freq_r']
242 | n_modes = params_spod['n_modes_save']
243 | n_feature = coeffs_train.shape[0]
244 | dim1_train = coeffs_train.shape[1]
245 | dim0_test = coeffs_test .shape[0]
246 | dim1_test = coeffs_test .shape[1]
247 | data_train = np.zeros([n_freq,dim1_train] , dtype=complex)
248 | data_test = np.zeros([n_freq,dim1_test ] , dtype=complex)
249 | coeffs_tmp = np.zeros([n_freq,dim1_test ] , dtype=complex)
250 | coeffs = np.zeros([dim0_test,dim1_test], dtype=complex)
251 |
252 | ## select lstm
253 | params_emulation['network'] = 'lstm'
254 |
255 | ## initialization Emulation class and run
256 | emulation = emulation_nn(params_emulation)
257 | emulation.model_initialize(data=data_train)
258 |
259 | for idx in range(n_modes):
260 | idx_x = list(range(idx,n_feature,n_modes))
261 | ## normalize data
262 | c_train = coeffs_train[idx_x,:]
263 | c_test = coeffs_test [idx_x,:]
264 | scaler1 = emulation.scaler(data=c_train)
265 | scaler2 = emulation.scaler(data=c_train)
266 | data_train[:,:] = emulation.scale_data(c_train, vec=scaler1)
267 | data_test [:,:] = emulation.scale_data(c_test , vec=scaler1)
268 |
269 | ## train model
270 | emulation.model_train(
271 | data_train=data_train, data_valid=data_test, idx=idx)
272 | coeffs_tmp = emulation.model_inference(data_in=data_test, idx=idx)
273 |
274 | ## denormalize data
275 | coeffs[idx_x,:] = emulation.descale_data(coeffs_tmp, scaler1)
276 |
277 | ## plot training history
278 | train_loss = emulation.train_history.history['loss']
279 | valid_loss = emulation.train_history.history['val_loss']
280 | post.plot_training_histories(
281 | train_loss, valid_loss,
282 | path=params_spod['savedir'],
283 | filename='history.png')
284 |
285 | ## reconstruct solutions
286 | f_p, _ = utils_spod.compute_reconstruction(
287 | coeffs_dir=dir_train, coeffs=coeffs_test, time_idx='all',
288 | savedir=dir_test, filename='recons_projection')
289 | f_e, _ = utils_spod.compute_reconstruction(
290 | coeffs_dir=dir_train, coeffs=coeffs, time_idx='all',
291 | savedir=dir_test, filename='recons_emulation')
292 | d_test = d_test[...,None]
293 | p_rec = np.load(f_p)
294 | e_rec = np.load(f_e)
295 |
296 | ## test visualization
297 | post.generate_2d_subplot(
298 | var1=d_test[10,...,0], title1='data',
299 | var2=p_rec [10,...,0], title2='projection',
300 | var3=e_rec [10,...,0], title3='lstm emulation',
301 | N_round=6, path=params_spod['savedir'], filename='emulation.png')
302 | post.plot_compare_time_series(
303 | series1=coeffs_test[0,:], series2=coeffs[0,:],
304 | label1='test', label2='lstm', legendLocation='upper left',
305 | path=params_spod['savedir'], filename='timeseries_comparison.png')
306 | _ = post.compute_energy_spectrum(coeffs_test[0,:])
307 |
308 | ## assert test solutions
309 | tol = 1e-6
310 | # print(f'{np.abs(p_rec[0,0,0,0]) = :}')
311 | # print(f'{np.abs(p_rec[10,0,0,0]) = :}')
312 | # print(f'{np.abs(p_rec[15,5,12,0]) = :}')
313 | # print(f'{np.abs(e_rec[0,0,0,0]) = :}')
314 | # print(f'{np.abs(e_rec[10,0,0,0]) = :}')
315 | # print(f'{np.abs(e_rec[15,5,12,0]) = :}')
316 | assert((np.abs(p_rec[0,0,0,0]) <4.467528967599+tol) and \
317 | (np.abs(p_rec[0,0,0,0]) >4.467528967599-tol))
318 | assert((np.abs(p_rec[10,0,0,0]) <4.465600418067+tol) and \
319 | (np.abs(p_rec[10,0,0,0]) >4.465600418067-tol))
320 | assert((np.abs(p_rec[15,5,12,0])<4.457098452307+tol) and \
321 | (np.abs(p_rec[15,5,12,0])>4.457098452307-tol))
322 | assert((np.abs(e_rec[0,0,0,0]) <4.467528967599+tol) and \
323 | (np.abs(e_rec[0,0,0,0]) >4.467528967599-tol))
324 | assert((np.abs(e_rec[10,0,0,0]) <4.465600418067+tol) and \
325 | (np.abs(e_rec[10,0,0,0]) >4.465600418067-tol))
326 | assert((np.abs(e_rec[15,5,12,0])<4.457098452307+tol) and \
327 | (np.abs(e_rec[15,5,12,0])>4.457098452307-tol))
328 | # # clean up results
329 | # try:
330 | # shutil.rmtree(os.path.join(CFD,'results'))
331 | # except OSError as e:
332 | # pass
333 |
334 |
335 | def test_cnn_spod():
336 | ## -------------------------------------------------------------------------
337 | data_file = os.path.join(CFD,'./data', 'fluidmechanics_data.mat')
338 | data_dict = utils_io.read_data(data_file=data_file)
339 | data = data_dict['p'].T
340 | dt = data_dict['dt'][0,0]
341 | t = dt * np.arange(0,data.shape[0]).T
342 | nt = t.shape[0]
343 | train_ratio = 0.95
344 | test_ratio = (1 - train_ratio)
345 | block_dimension = 64 * dt
346 | params_spod = {
347 | # -- required parameters
348 | 'time_step' : dt,
349 | 'n_space_dims': 2,
350 | 'n_variables' : 1,
351 | 'n_dft' : np.ceil(block_dimension / dt),
352 | # -- optional parameters
353 | 'overlap' : 50,
354 | 'mean_type' : 'blockwise',
355 | 'normalize_weights': False,
356 | 'normalize_data' : False,
357 | 'n_modes_save' : 3,
358 | 'conf_level' : 0.95,
359 | 'savedir' : os.path.join(CFD, 'results'),
360 | 'reuse_blocks' : False,
361 | 'fullspectrum' : True,
362 | 'dtype' : 'double'
363 | }
364 | params_emulation = {
365 | 'network' : 'lstm',
366 | 'scaler' : 'localmax',
367 | 'data_type' : 'complex',
368 | 'epochs' : 3,
369 | 'batch_size': 32,
370 | 'n_seq_in' : 30,
371 | 'n_seq_out' : 1,
372 | 'n_neurons' : 1,
373 | 'dropout' : 0.15,
374 | 'savedir' : os.path.join(CFD, 'results')
375 | }
376 | ## -------------------------------------------------------------------------
377 |
378 | ## training and testing database definition
379 | nt_train = int(train_ratio * nt)
380 | d_train = data[:nt_train,:,:]
381 | nt_test = nt - nt_train
382 | d_test = data[nt_train:,:,:]
383 |
384 | ## fit and transform spod
385 | spod_class = spod_standard(params=params_spod)
386 | spod = spod_class.fit(data_list=d_train)
387 | results_dir = spod.savedir_sim
388 | c_train_file, dir_train = utils_spod.compute_coeffs_op(
389 | d_train, results_dir, savedir='train')
390 | c_test_file, dir_test = utils_spod.compute_coeffs_op(
391 | d_test, results_dir, savedir='test')
392 |
393 | ## initialization of variables and structures
394 | coeffs_train = np.load(c_train_file)
395 | coeffs_test = np.load(c_test_file )
396 | f_train = os.path.join(dir_train, 'params_coeffs.yaml')
397 | f_test = os.path.join(dir_test , 'params_coeffs.yaml')
398 | with open(f_train) as f: params_train = yaml.load(f, Loader=yaml.FullLoader)
399 | with open(f_test ) as f: params_test = yaml.load(f, Loader=yaml.FullLoader)
400 | n_freq = params_train['n_freq_r']
401 | n_modes = params_spod['n_modes_save']
402 | dim0_test = coeffs_test .shape[0]
403 | dim1_train = coeffs_train.shape[1]
404 | dim1_test = coeffs_test .shape[1]
405 | n_feature = coeffs_train.shape[0]
406 | data_train = np.zeros([n_freq,dim1_train] , dtype=complex)
407 | data_test = np.zeros([n_freq,dim1_test ] , dtype=complex)
408 | coeffs_tmp = np.zeros([n_freq,dim1_test ] , dtype=complex)
409 | coeffs = np.zeros([dim0_test,dim1_test], dtype=complex)
410 |
411 | ## select cnn
412 | params_emulation['network'] = 'cnn'
413 |
414 | ## initialization Emulation class and run
415 | emulation = emulation_nn(params_emulation)
416 | emulation.model_initialize(data=data_train)
417 |
418 | # clean up results
419 | try:
420 | shutil.rmtree(os.path.join(CFD,'results'))
421 | except OSError as e:
422 | pass
423 |
424 |
425 |
426 | if __name__ == "__main__":
427 | test_lstm_pod ()
428 | test_lstm_spod()
429 | test_cnn_spod ()
430 |
--------------------------------------------------------------------------------
/tests/test_pod_parallel.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | # -*- coding: utf-8 -*-
3 | import os
4 | import sys
5 | import h5py
6 | import shutil
7 | import pytest
8 | import numpy as np
9 |
10 | # Current, parent and file paths
11 | CWD = os.getcwd()
12 | CF = os.path.realpath(__file__)
13 | CFD = os.path.dirname(CF)
14 | sys.path.append(os.path.join(CFD,'../'))
15 |
16 | # Import library specific modules
17 | from pyspod.pod.standard import Standard as pod_standard
18 | import pyspod.pod.utils as utils_pod
19 | import pyspod.utils.io as utils_io
20 | import pyspod.utils.parallel as utils_par
21 | import pyspod.utils.errors as utils_errors
22 | import pyspod.utils.postproc as post
23 |
24 |
25 | @pytest.mark.mpi(minsize=2, maxsize=3)
26 | def test_standard_class_compute():
27 | ## -------------------------------------------------------------------------
28 | data_file = os.path.join(CFD,'./data', 'fluidmechanics_data.mat')
29 | data_dict = utils_io.read_data(data_file=data_file)
30 | data = data_dict['p'].T
31 | dt = data_dict['dt'][0,0]
32 | t = dt * np.arange(0,data.shape[0]).T
33 | nt = t.shape[0]
34 | params = {
35 | # -- required parameters
36 | 'time_step' : dt,
37 | 'n_space_dims': 2,
38 | 'n_variables' : 1,
39 | # -- optional parameters
40 | 'normalize_weights': False,
41 | 'n_modes_save' : 8,
42 | 'dtype' : 'double',
43 | 'savedir' : os.path.join(CFD, 'results')
44 | }
45 | ## -------------------------------------------------------------------------
46 | try:
47 | from mpi4py import MPI
48 | comm = MPI.COMM_WORLD
49 | except:
50 | comm = None
51 | ## fit and transform pod
52 | pod_class = pod_standard(params=params, comm=comm)
53 | pod = pod_class.fit(data_list=data)
54 | results_dir = pod._savedir_sim
55 | file_coeffs, coeffs_dir = pod.compute_coeffs_op(
56 | data=data, results_dir=results_dir)
57 | file_dynamics, coeffs_dir = pod.compute_reconstruction(
58 | coeffs_dir=coeffs_dir, time_idx='all')
59 |
60 | ## assert test
61 | savedir = pod._savedir
62 | assert(pod.dim ==4)
63 | assert(pod.shape ==(1, 20, 88, 1))
64 | assert(pod.nt ==1000)
65 | assert(pod.nx ==1760)
66 | assert(pod.nv ==1)
67 | assert(pod.xdim ==2)
68 | assert(pod.xshape ==(20, 88))
69 | assert(pod.dt ==0.2)
70 | assert(pod.n_modes_save==8)
71 | modes = np.load(pod._file_modes)
72 | coeffs = np.load(file_coeffs)
73 | recons = np.load(file_dynamics)
74 | recons_cl = np.load(file_dynamics)
75 | # print(coeffs.shape)
76 | # print(recons.shape)
77 | tol1 = 1e-6; tol2 = 1e-10
78 | if comm.rank == 0:
79 | ## fit
80 | # print(np.real(pod.eigs[0]))
81 | # print(pod.weights[0])
82 | # print(np.abs(modes[0,1,0,0]))
83 | # print(np.abs(modes[10,3,0,2]))
84 | # print(np.abs(modes[14,15,0,1]))
85 | # print(np.min(np.abs(modes)))
86 | # print(np.max(np.abs(modes)))
87 | assert(modes.shape==(20, 88, 1, 8))
88 | assert((np.real(pod.eigs[0]) <5.507017010287017 +tol1) and \
89 | (np.real(pod.eigs[0]) >5.507017010287017 -tol1))
90 | assert((pod.weights[0,0] <1. +tol1) and \
91 | (pod.weights[0,0] >1. -tol1))
92 | assert((np.abs(modes[0,1,0,0]) <0.00083357978228883+tol2) and \
93 | (np.abs(modes[0,1,0,0]) >0.00083357978228883-tol2))
94 | assert((np.abs(modes[10,3,0,2]) <3.9895843115101e-05+tol2) and \
95 | (np.abs(modes[10,3,0,2]) >3.9895843115101e-05-tol2))
96 | assert((np.abs(modes[14,15,0,1])<5.6967220942460e-05+tol2) and \
97 | (np.abs(modes[14,15,0,1])>5.6967220942460e-05-tol2))
98 | assert((np.min(np.abs(modes)) <3.7644953502612e-08+tol2) and \
99 | (np.min(np.abs(modes)) >3.7644953502612e-08-tol2))
100 | assert((np.max(np.abs(modes)) <0.13122305680422694+tol2) and \
101 | (np.max(np.abs(modes)) >0.13122305680422694-tol2))
102 | ## transform
103 | # print(np.real(np.max(coeffs)))
104 | # print(np.real(np.max(recons)))
105 | assert(coeffs.shape==(8, 1000))
106 | assert(recons.shape==(1000, 20, 88, 1))
107 | assert((np.real(np.max(coeffs))<0.244272570390476+tol2) and \
108 | (np.real(np.max(coeffs))>0.244272570390476-tol2))
109 | assert((np.real(np.max(recons))<4.495997223290585+tol2) and \
110 | (np.real(np.max(recons))>4.495997223290585-tol2))
111 | assert((np.real(np.max(recons_cl))<4.495997223290585+tol2) and \
112 | (np.real(np.max(recons_cl))>4.495997223290585-tol2))
113 | x = data[...,None]
114 | l1 = utils_errors.compute_l_errors(recons, x, norm_type='l1')
115 | l2 = utils_errors.compute_l_errors(recons, x, norm_type='l2')
116 | li = utils_errors.compute_l_errors(recons, x, norm_type='linf')
117 | l1_r = utils_errors.compute_l_errors(recons, x, norm_type='l1_rel')
118 | l2_r = utils_errors.compute_l_errors(recons, x, norm_type='l2_rel')
119 | li_r = utils_errors.compute_l_errors(recons, x, norm_type='linf_rel')
120 | ## errors
121 | # print(f'{l1 = :}')
122 | # print(f'{l2 = :}')
123 | # print(f'{li = :}')
124 | # print(f'{l1_r = :}')
125 | # print(f'{l2_r = :}')
126 | # print(f'{li_r = :}')
127 | assert((l1 <0.002285731618209+tol2) and (l1 >0.002285731618209-tol2))
128 | assert((l2 <2.85867239211e-06+tol2) and (l2 >2.85867239211e-06-tol2))
129 | assert((li <0.095300914161469+tol2) and (li >0.095300914161469-tol2))
130 | assert((l1_r<0.000512977176726+tol2) and (l1_r>0.000512977176726-tol2))
131 | assert((l2_r<6.41990505721e-07+tol2) and (l2_r>6.41990505721e-07-tol2))
132 | assert((li_r<0.021960611302988+tol2) and (li_r>0.021960611302988-tol2))
133 | ## clean up results
134 | try:
135 | shutil.rmtree(os.path.join(CFD,'results'))
136 | except OSError as e:
137 | pass
138 |
139 | @pytest.mark.mpi(minsize=2, maxsize=3)
140 | def test_standard_utils_compute():
141 | ## -------------------------------------------------------------------------
142 | data_file = os.path.join(CFD,'./data', 'fluidmechanics_data.mat')
143 | data_dict = utils_io.read_data(data_file=data_file)
144 | data = data_dict['p'].T
145 | dt = data_dict['dt'][0,0]
146 | t = dt * np.arange(0,data.shape[0]).T
147 | nt = t.shape[0]
148 | params = {
149 | # -- required parameters
150 | 'time_step' : dt,
151 | 'n_space_dims': 2,
152 | 'n_variables' : 1,
153 | # -- optional parameters
154 | 'normalize_weights': False,
155 | 'n_modes_save' : 8,
156 | 'dtype' : 'double',
157 | 'savedir' : os.path.join(CFD, 'results')
158 | }
159 | ## -------------------------------------------------------------------------
160 | try:
161 | from mpi4py import MPI
162 | comm = MPI.COMM_WORLD
163 | except:
164 | comm = None
165 | ## fit and transform pod
166 | comm = MPI.COMM_WORLD
167 | pod_class = pod_standard(params=params, comm=comm)
168 | pod = pod_class.fit(data_list=data)
169 | results_dir = pod._savedir_sim
170 | file_coeffs, coeffs_dir = utils_pod.compute_coeffs_op(
171 | data=data, results_dir=results_dir, comm=comm)
172 | file_dynamics, coeffs_dir = utils_pod.compute_reconstruction(
173 | coeffs_dir=coeffs_dir, time_idx='all', comm=comm)
174 |
175 | ## assert test
176 | savedir = pod._savedir
177 | assert(pod.dim ==4)
178 | assert(pod.shape ==(1, 20, 88, 1))
179 | assert(pod.nt ==1000)
180 | assert(pod.nx ==1760)
181 | assert(pod.nv ==1)
182 | assert(pod.xdim ==2)
183 | assert(pod.xshape ==(20, 88))
184 | assert(pod.dt ==0.2)
185 | assert(pod.n_modes_save==8)
186 | modes = np.load(pod._file_modes)
187 | coeffs = np.load(file_coeffs)
188 | recons = np.load(file_dynamics)
189 | # print(coeffs.shape)
190 | # print(recons.shape)
191 | tol1 = 1e-6; tol2 = 1e-10
192 | if comm.rank == 0:
193 | ## fit
194 | # print(np.real(pod.eigs[0]))
195 | # print(pod.weights[0])
196 | # print(np.abs(modes[0,1,0,0]))
197 | # print(np.abs(modes[10,3,0,2]))
198 | # print(np.abs(modes[14,15,0,1]))
199 | # print(np.min(np.abs(modes)))
200 | # print(np.max(np.abs(modes)))
201 | assert(modes.shape==(20, 88, 1, 8))
202 | assert((np.real(pod.eigs[0]) <5.507017010287017 +tol1) and \
203 | (np.real(pod.eigs[0]) >5.507017010287017 -tol1))
204 | assert((pod.weights[0,0] <1. +tol1) and \
205 | (pod.weights[0,0] >1. -tol1))
206 | assert((np.abs(modes[0,1,0,0]) <0.00083357978228883+tol2) and \
207 | (np.abs(modes[0,1,0,0]) >0.00083357978228883-tol2))
208 | assert((np.abs(modes[10,3,0,2]) <3.9895843115101e-05+tol2) and \
209 | (np.abs(modes[10,3,0,2]) >3.9895843115101e-05-tol2))
210 | assert((np.abs(modes[14,15,0,1])<5.6967220942460e-05+tol2) and \
211 | (np.abs(modes[14,15,0,1])>5.6967220942460e-05-tol2))
212 | assert((np.min(np.abs(modes)) <3.7644953502612e-08+tol2) and \
213 | (np.min(np.abs(modes)) >3.7644953502612e-08-tol2))
214 | assert((np.max(np.abs(modes)) <0.13122305680422694+tol2) and \
215 | (np.max(np.abs(modes)) >0.13122305680422694-tol2))
216 | ## transform
217 | # print(np.real(np.max(coeffs)))
218 | # print(np.real(np.max(recons)))
219 | assert(coeffs.shape==(8, 1000))
220 | assert(recons.shape==(1000, 20, 88, 1))
221 | assert((np.real(np.max(coeffs))<0.244272570390476+tol2) and \
222 | (np.real(np.max(coeffs))>0.244272570390476-tol2))
223 | assert((np.real(np.max(recons))<4.495997223290585+tol2) and \
224 | (np.real(np.max(recons))>4.495997223290585-tol2))
225 | x = data[...,None]
226 | l1 = utils_errors.compute_l_errors(recons, x, norm_type='l1')
227 | l2 = utils_errors.compute_l_errors(recons, x, norm_type='l2')
228 | li = utils_errors.compute_l_errors(recons, x, norm_type='linf')
229 | l1_r = utils_errors.compute_l_errors(recons, x, norm_type='l1_rel')
230 | l2_r = utils_errors.compute_l_errors(recons, x, norm_type='l2_rel')
231 | li_r = utils_errors.compute_l_errors(recons, x, norm_type='linf_rel')
232 | ## errors
233 | # print(f'{l1 = :}')
234 | # print(f'{l2 = :}')
235 | # print(f'{li = :}')
236 | # print(f'{l1_r = :}')
237 | # print(f'{l2_r = :}')
238 | # print(f'{li_r = :}')
239 | assert((l1 <0.002285731618209+tol2) and (l1 >0.002285731618209-tol2))
240 | assert((l2 <2.85867239211e-06+tol2) and (l2 >2.85867239211e-06-tol2))
241 | assert((li <0.095300914161469+tol2) and (li >0.095300914161469-tol2))
242 | assert((l1_r<0.000512977176726+tol2) and (l1_r>0.000512977176726-tol2))
243 | assert((l2_r<6.41990505721e-07+tol2) and (l2_r>6.41990505721e-07-tol2))
244 | assert((li_r<0.021960611302988+tol2) and (li_r>0.021960611302988-tol2))
245 | ## clean up results
246 | try:
247 | shutil.rmtree(os.path.join(CFD,'results'))
248 | except OSError as e:
249 | pass
250 |
251 | @pytest.mark.mpi(minsize=2, maxsize=3)
252 | def test_standard_convergence():
253 | ## -------------------------------------------------------------------------
254 | data_file = os.path.join(CFD,'./data', 'fluidmechanics_data.mat')
255 | data_dict = utils_io.read_data(data_file=data_file)
256 | data = data_dict['p'].T
257 | dt = data_dict['dt'][0,0]
258 | t = dt * np.arange(0,data.shape[0]).T
259 | nt = t.shape[0]
260 | params = {
261 | # -- required parameters
262 | 'time_step' : dt,
263 | 'n_space_dims': 2,
264 | 'n_variables' : 1,
265 | # -- optional parameters
266 | 'normalize_weights': False,
267 | 'n_modes_save' : 300,
268 | 'dtype' : 'double',
269 | 'savedir' : os.path.join(CFD, 'results')
270 | }
271 | ## -------------------------------------------------------------------------
272 | try:
273 | from mpi4py import MPI
274 | comm = MPI.COMM_WORLD
275 | except:
276 | comm = None
277 | ## fit and transform pod
278 | pod_class = pod_standard(params=params, comm=comm)
279 | pod = pod_class.fit(data_list=data)
280 | results_dir = pod._savedir_sim
281 | file_coeffs, coeffs_dir = utils_pod.compute_coeffs_op(
282 | data=data, results_dir=results_dir, comm=comm)
283 | file_dynamics, coeffs_dir = utils_pod.compute_reconstruction(
284 | coeffs_dir=coeffs_dir, time_idx='all', comm=comm)
285 |
286 | ## assert test
287 | savedir = pod._savedir
288 | modes = np.load(pod._file_modes)
289 | coeffs = np.load(file_coeffs)
290 | recons = np.load(file_dynamics)
291 | tol1 = 1e-6; tol2 = 1e-10
292 | if comm.rank == 0:
293 | x = pod.get_data(data)
294 | l1 = utils_errors.compute_l_errors(recons, x, norm_type='l1')
295 | l2 = utils_errors.compute_l_errors(recons, x, norm_type='l2')
296 | li = utils_errors.compute_l_errors(recons, x, norm_type='linf')
297 | l1_r = utils_errors.compute_l_errors(recons, x, norm_type='l1_rel')
298 | l2_r = utils_errors.compute_l_errors(recons, x, norm_type='l2_rel')
299 | li_r = utils_errors.compute_l_errors(recons, x, norm_type='linf_rel')
300 | ## errors
301 | # print(f'{l1 = :}')
302 | # print(f'{l2 = :}')
303 | # print(f'{li = :}')
304 | # print(f'{l1_r = :}')
305 | # print(f'{l2_r = :}')
306 | # print(f'{li_r = :}')
307 | post.generate_2d_subplot(
308 | var1=x [10,...,0], title1='data1',
309 | var2=recons[10,...,0], title2='data2',
310 | N_round=6, path=params['savedir'],
311 | filename='rec.png')
312 | ## clean up results
313 | try:
314 | shutil.rmtree(os.path.join(CFD,'results'))
315 | except OSError as e:
316 | pass
317 |
318 |
319 |
320 | if __name__ == "__main__":
321 | test_standard_class_compute()
322 | test_standard_utils_compute()
323 | test_standard_convergence()
324 |
--------------------------------------------------------------------------------
/tests/test_pod_serial.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | # -*- coding: utf-8 -*-
3 | import os
4 | import sys
5 | import h5py
6 | import shutil
7 | import numpy as np
8 |
9 | # Current, parent and file paths
10 | CWD = os.getcwd()
11 | CF = os.path.realpath(__file__)
12 | CFD = os.path.dirname(CF)
13 | sys.path.append(os.path.join(CFD,'../'))
14 |
15 | # Import library specific modules
16 | from pyspod.pod.standard import Standard as pod_standard
17 | import pyspod.pod.utils as utils_pod
18 | import pyspod.utils.io as utils_io
19 | import pyspod.utils.errors as utils_errors
20 | import pyspod.utils.postproc as post
21 |
22 |
23 | def test_standard_class_compute():
24 | ## -------------------------------------------------------------------------
25 | data_file = os.path.join(CFD,'./data', 'fluidmechanics_data.mat')
26 | data_dict = utils_io.read_data(data_file=data_file)
27 | data = data_dict['p'].T
28 | dt = data_dict['dt'][0,0]
29 | t = dt * np.arange(0,data.shape[0]).T
30 | nt = t.shape[0]
31 | params = {
32 | # -- required parameters
33 | 'time_step' : dt,
34 | 'n_space_dims': 2,
35 | 'n_variables' : 1,
36 | # -- optional parameters
37 | 'normalize_weights': False,
38 | 'n_modes_save' : 8,
39 | 'dtype' : 'double',
40 | 'savedir' : os.path.join(CFD, 'results')
41 | }
42 | ## -------------------------------------------------------------------------
43 |
44 | ## fit and transform pod
45 | pod_class = pod_standard(params=params)
46 | pod = pod_class.fit(data_list=data)
47 | results_dir = pod._savedir_sim
48 | file_coeffs, coeffs_dir = pod.compute_coeffs_op(
49 | data=data, results_dir=results_dir)
50 | file_dynamics, coeffs_dir = pod.compute_reconstruction(
51 | coeffs_dir=coeffs_dir, time_idx='all')
52 |
53 | ## assert test
54 | savedir = pod._savedir
55 | assert(pod.dim ==4)
56 | assert(pod.shape ==(1, 20, 88, 1))
57 | assert(pod.nt ==1000)
58 | assert(pod.nx ==1760)
59 | assert(pod.nv ==1)
60 | assert(pod.xdim ==2)
61 | assert(pod.xshape ==(20, 88))
62 | assert(pod.dt ==0.2)
63 | assert(pod.n_modes_save==8)
64 | modes = np.load(pod._file_modes)
65 | coeffs = np.load(file_coeffs)
66 | recons = np.load(file_dynamics)
67 | recons_cl = np.load(file_dynamics)
68 | # print(coeffs.shape)
69 | # print(recons.shape)
70 | tol1 = 1e-6; tol2 = 1e-10
71 | ## fit
72 | # print(np.real(pod.eigs[0]))
73 | # print(pod.weights[0])
74 | # print(np.abs(modes[0,1,0,0]))
75 | # print(np.abs(modes[10,3,0,2]))
76 | # print(np.abs(modes[14,15,0,1]))
77 | # print(np.min(np.abs(modes)))
78 | # print(np.max(np.abs(modes)))
79 | assert(modes.shape==(20, 88, 1, 8))
80 | assert((np.real(pod.eigs[0]) <5.507017010287017 +tol1) and \
81 | (np.real(pod.eigs[0]) >5.507017010287017 -tol1))
82 | assert((pod.weights[0,0] <1. +tol1) and \
83 | (pod.weights[0,0] >1. -tol1))
84 | assert((np.abs(modes[0,1,0,0]) <0.00083357978228883+tol2) and \
85 | (np.abs(modes[0,1,0,0]) >0.00083357978228883-tol2))
86 | assert((np.abs(modes[10,3,0,2]) <3.9895843115101e-05+tol2) and \
87 | (np.abs(modes[10,3,0,2]) >3.9895843115101e-05-tol2))
88 | assert((np.abs(modes[14,15,0,1])<5.6967220942460e-05+tol2) and \
89 | (np.abs(modes[14,15,0,1])>5.6967220942460e-05-tol2))
90 | assert((np.min(np.abs(modes)) <3.7644953502612e-08+tol2) and \
91 | (np.min(np.abs(modes)) >3.7644953502612e-08-tol2))
92 | assert((np.max(np.abs(modes)) <0.13122305680422694+tol2) and \
93 | (np.max(np.abs(modes)) >0.13122305680422694-tol2))
94 | ## transform
95 | # print(np.real(np.max(coeffs)))
96 | # print(np.real(np.max(recons)))
97 | assert(coeffs.shape==(8, 1000))
98 | assert(recons.shape==(1000, 20, 88, 1))
99 | assert((np.real(np.max(coeffs))<0.244272570390476+tol2) and \
100 | (np.real(np.max(coeffs))>0.244272570390476-tol2))
101 | assert((np.real(np.max(recons))<4.495997223290585+tol2) and \
102 | (np.real(np.max(recons))>4.495997223290585-tol2))
103 | assert((np.real(np.max(recons_cl))<4.495997223290585+tol2) and \
104 | (np.real(np.max(recons_cl))>4.495997223290585-tol2))
105 | x = data[...,None]
106 | l1 = utils_errors.compute_l_errors(recons, x, norm_type='l1')
107 | l2 = utils_errors.compute_l_errors(recons, x, norm_type='l2')
108 | li = utils_errors.compute_l_errors(recons, x, norm_type='linf')
109 | l1_r = utils_errors.compute_l_errors(recons, x, norm_type='l1_rel')
110 | l2_r = utils_errors.compute_l_errors(recons, x, norm_type='l2_rel')
111 | li_r = utils_errors.compute_l_errors(recons, x, norm_type='linf_rel')
112 | ## errors
113 | # print(f'{l1 = :}')
114 | # print(f'{l2 = :}')
115 | # print(f'{li = :}')
116 | # print(f'{l1_r = :}')
117 | # print(f'{l2_r = :}')
118 | # print(f'{li_r = :}')
119 | assert((l1 <0.002285731618209+tol2) and (l1 >0.002285731618209-tol2))
120 | assert((l2 <2.85867239211e-06+tol2) and (l2 >2.85867239211e-06-tol2))
121 | assert((li <0.095300914161469+tol2) and (li >0.095300914161469-tol2))
122 | assert((l1_r<0.000512977176726+tol2) and (l1_r>0.000512977176726-tol2))
123 | assert((l2_r<6.41990505721e-07+tol2) and (l2_r>6.41990505721e-07-tol2))
124 | assert((li_r<0.021960611302988+tol2) and (li_r>0.021960611302988-tol2))
125 | ## clean up results
126 | try:
127 | shutil.rmtree(os.path.join(CFD,'results'))
128 | except OSError as e:
129 | pass
130 |
131 | def test_standard_utils_compute():
132 | ## -------------------------------------------------------------------------
133 | data_file = os.path.join(CFD,'./data', 'fluidmechanics_data.mat')
134 | data_dict = utils_io.read_data(data_file=data_file)
135 | data = data_dict['p'].T
136 | dt = data_dict['dt'][0,0]
137 | t = dt * np.arange(0,data.shape[0]).T
138 | nt = t.shape[0]
139 | params = {
140 | # -- required parameters
141 | 'time_step' : dt,
142 | 'n_space_dims': 2,
143 | 'n_variables' : 1,
144 | # -- optional parameters
145 | 'normalize_weights': False,
146 | 'n_modes_save' : 8,
147 | 'dtype' : 'double',
148 | 'savedir' : os.path.join(CFD, 'results')
149 | }
150 | ## -------------------------------------------------------------------------
151 |
152 | ## fit and transform pod
153 | pod_class = pod_standard(params=params)
154 | pod = pod_class.fit(data_list=data)
155 | results_dir = pod._savedir_sim
156 | file_coeffs, coeffs_dir = utils_pod.compute_coeffs_op(
157 | data=data, results_dir=results_dir)
158 | file_dynamics, coeffs_dir = utils_pod.compute_reconstruction(
159 | coeffs_dir=coeffs_dir, time_idx='all')
160 |
161 | ## assert test
162 | savedir = pod._savedir
163 | assert(pod.dim ==4)
164 | assert(pod.shape ==(1, 20, 88, 1))
165 | assert(pod.nt ==1000)
166 | assert(pod.nx ==1760)
167 | assert(pod.nv ==1)
168 | assert(pod.xdim ==2)
169 | assert(pod.xshape ==(20, 88))
170 | assert(pod.dt ==0.2)
171 | assert(pod.n_modes_save==8)
172 | modes = np.load(pod._file_modes)
173 | coeffs = np.load(file_coeffs)
174 | recons = np.load(file_dynamics)
175 | # print(coeffs.shape)
176 | # print(recons.shape)
177 | tol1 = 1e-6; tol2 = 1e-10
178 | ## fit
179 | # print(np.real(pod.eigs[0]))
180 | # print(pod.weights[0])
181 | # print(np.abs(modes[0,1,0,0]))
182 | # print(np.abs(modes[10,3,0,2]))
183 | # print(np.abs(modes[14,15,0,1]))
184 | # print(np.min(np.abs(modes)))
185 | # print(np.max(np.abs(modes)))
186 | assert(modes.shape==(20, 88, 1, 8))
187 | assert((np.real(pod.eigs[0]) <5.507017010287017 +tol1) and \
188 | (np.real(pod.eigs[0]) >5.507017010287017 -tol1))
189 | assert((pod.weights[0,0] <1. +tol1) and \
190 | (pod.weights[0,0] >1. -tol1))
191 | assert((np.abs(modes[0,1,0,0]) <0.00083357978228883+tol2) and \
192 | (np.abs(modes[0,1,0,0]) >0.00083357978228883-tol2))
193 | assert((np.abs(modes[10,3,0,2]) <3.9895843115101e-05+tol2) and \
194 | (np.abs(modes[10,3,0,2]) >3.9895843115101e-05-tol2))
195 | assert((np.abs(modes[14,15,0,1])<5.6967220942460e-05+tol2) and \
196 | (np.abs(modes[14,15,0,1])>5.6967220942460e-05-tol2))
197 | assert((np.min(np.abs(modes)) <3.7644953502612e-08+tol2) and \
198 | (np.min(np.abs(modes)) >3.7644953502612e-08-tol2))
199 | assert((np.max(np.abs(modes)) <0.13122305680422694+tol2) and \
200 | (np.max(np.abs(modes)) >0.13122305680422694-tol2))
201 | ## transform
202 | # print(np.real(np.max(coeffs)))
203 | # print(np.real(np.max(recons)))
204 | assert(coeffs.shape==(8, 1000))
205 | assert(recons.shape==(1000, 20, 88, 1))
206 | assert((np.real(np.max(coeffs))<0.244272570390476+tol2) and \
207 | (np.real(np.max(coeffs))>0.244272570390476-tol2))
208 | assert((np.real(np.max(recons))<4.495997223290585+tol2) and \
209 | (np.real(np.max(recons))>4.495997223290585-tol2))
210 | x = data[...,None]
211 | l1 = utils_errors.compute_l_errors(recons, x, norm_type='l1')
212 | l2 = utils_errors.compute_l_errors(recons, x, norm_type='l2')
213 | li = utils_errors.compute_l_errors(recons, x, norm_type='linf')
214 | l1_r = utils_errors.compute_l_errors(recons, x, norm_type='l1_rel')
215 | l2_r = utils_errors.compute_l_errors(recons, x, norm_type='l2_rel')
216 | li_r = utils_errors.compute_l_errors(recons, x, norm_type='linf_rel')
217 | ## errors
218 | # print(f'{l1 = :}')
219 | # print(f'{l2 = :}')
220 | # print(f'{li = :}')
221 | # print(f'{l1_r = :}')
222 | # print(f'{l2_r = :}')
223 | # print(f'{li_r = :}')
224 | assert((l1 <0.002285731618209+tol2) and (l1 >0.002285731618209-tol2))
225 | assert((l2 <2.85867239211e-06+tol2) and (l2 >2.85867239211e-06-tol2))
226 | assert((li <0.095300914161469+tol2) and (li >0.095300914161469-tol2))
227 | assert((l1_r<0.000512977176726+tol2) and (l1_r>0.000512977176726-tol2))
228 | assert((l2_r<6.41990505721e-07+tol2) and (l2_r>6.41990505721e-07-tol2))
229 | assert((li_r<0.021960611302988+tol2) and (li_r>0.021960611302988-tol2))
230 | ## clean up results
231 | try:
232 | shutil.rmtree(os.path.join(CFD,'results'))
233 | except OSError as e:
234 | pass
235 |
236 | def test_standard_convergence():
237 | ## -----------------------------------------------------------------------
238 | data_file = os.path.join(CFD,'./data', 'fluidmechanics_data.mat')
239 | data_dict = utils_io.read_data(data_file=data_file)
240 | data = data_dict['p'].T
241 | dt = data_dict['dt'][0,0]
242 | t = dt * np.arange(0,data.shape[0]).T
243 | nt = t.shape[0]
244 | params = {
245 | # -- required parameters
246 | 'time_step' : dt,
247 | 'n_space_dims': 2,
248 | 'n_variables' : 1,
249 | # -- optional parameters
250 | 'normalize_weights': False,
251 | 'n_modes_save' : 300,
252 | 'dtype' : 'double',
253 | 'savedir' : os.path.join(CFD, 'results')
254 | }
255 | ## -----------------------------------------------------------------------
256 |
257 | ## fit and transform pod
258 | pod_class = pod_standard(params=params)
259 | pod = pod_class.fit(data_list=data)
260 | results_dir = pod._savedir_sim
261 | file_coeffs, coeffs_dir = utils_pod.compute_coeffs_op(
262 | data=data, results_dir=results_dir)
263 | file_dynamics, coeffs_dir = utils_pod.compute_reconstruction(
264 | coeffs_dir=coeffs_dir, time_idx='all')
265 | ## assert test
266 | savedir = pod._savedir
267 | modes = np.load(pod._file_modes)
268 | coeffs = np.load(file_coeffs)
269 | recons = np.load(file_dynamics)
270 | tol1 = 1e-6; tol2 = 1e-10
271 | x = pod.get_data(data)
272 | l1 = utils_errors.compute_l_errors(recons, x, norm_type='l1')
273 | l2 = utils_errors.compute_l_errors(recons, x, norm_type='l2')
274 | li = utils_errors.compute_l_errors(recons, x, norm_type='linf')
275 | l1_r = utils_errors.compute_l_errors(recons, x, norm_type='l1_rel')
276 | l2_r = utils_errors.compute_l_errors(recons, x, norm_type='l2_rel')
277 | li_r = utils_errors.compute_l_errors(recons, x, norm_type='linf_rel')
278 | ## errors
279 | # print(f'{l1 = :}')
280 | # print(f'{l2 = :}')
281 | # print(f'{li = :}')
282 | # print(f'{l1_r = :}')
283 | # print(f'{l2_r = :}')
284 | # print(f'{li_r = :}')
285 | post.generate_2d_subplot(
286 | var1=x [10,...,0], title1='data1',
287 | var2=recons[10,...,0], title2='data2',
288 | N_round=6, path=params['savedir'],
289 | filename='rec.png')
290 | ## clean up results
291 | try:
292 | shutil.rmtree(os.path.join(CWD, params['savedir']))
293 | except OSError as e:
294 | pass
295 |
296 |
297 |
298 | if __name__ == "__main__":
299 | test_standard_class_compute()
300 | test_standard_utils_compute()
301 | test_standard_convergence()
302 |
--------------------------------------------------------------------------------
/tests/test_tutorials.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | # -*- coding: utf-8 -*-
3 | import os
4 | import sys
5 | import pytest
6 | import shutil
7 | import numpy as np
8 |
9 | # Current, parent and file paths
10 | CWD = os.getcwd()
11 | CF = os.path.realpath(__file__)
12 | CFD = os.path.dirname(CF)
13 | sys.path.append(os.path.join(CFD,'../'))
14 |
15 | # Import library specific modules
16 | from pyspod.spod.standard import Standard as spod_standard
17 | from pyspod.spod.streaming import Streaming as spod_streaming
18 | import pyspod.spod.utils as utils_spod
19 | import pyspod.utils.weights as utils_weights
20 | import pyspod.utils.errors as utils_errors
21 | import pyspod.utils.io as utils_io
22 | import pyspod.utils.postproc as post
23 |
24 |
25 |
26 |
27 | @pytest.mark.mpi(minsize=2, maxsize=3)
28 | def test_tutorial1():
29 | ## -------------------------------------------------------------------
30 | ## initialize MPI
31 | ## -------------------------------------------------------------------
32 | try:
33 | from mpi4py import MPI
34 | comm = MPI.COMM_WORLD
35 | rank = comm.rank
36 | except:
37 | comm = None
38 | rank = 0
39 | ## -------------------------------------------------------------------
40 |
41 |
42 |
43 | ## -------------------------------------------------------------------
44 | ## read data and params
45 | ## -------------------------------------------------------------------
46 | ## data
47 | data_file = os.path.join(CFD,'./data', 'fluidmechanics_data.mat')
48 | data_dict = utils_io.read_data(data_file=data_file)
49 | data = data_dict['p'].T
50 | dt = data_dict['dt'][0,0]
51 | nt = data.shape[0]
52 | x1 = data_dict['r'].T; x1 = x1[:,0]
53 | x2 = data_dict['x'].T; x2 = x2[0,:]
54 | ## params
55 | config_file = os.path.join(CFD, 'data', 'input_tutorial1.yaml')
56 | params = utils_io.read_config(config_file)
57 | params['time_step'] = dt
58 | ## -------------------------------------------------------------------
59 |
60 |
61 |
62 | ## -------------------------------------------------------------------
63 | ## compute spod modes and check orthogonality
64 | ## -------------------------------------------------------------------
65 | standard = spod_standard (params=params, comm=comm)
66 | streaming = spod_streaming(params=params, comm=comm)
67 | spod = standard.fit(data_list=data)
68 | results_dir = spod.savedir_sim
69 | flag, ortho = utils_spod.check_orthogonality(
70 | results_dir=results_dir, mode_idx1=[1],
71 | mode_idx2=[0], freq_idx=[5], dtype='double',
72 | comm=comm)
73 | ## -------------------------------------------------------------------
74 |
75 |
76 |
77 | ## -------------------------------------------------------------------
78 | ## compute coefficients
79 | ## -------------------------------------------------------------------
80 | file_coeffs, coeffs_dir = utils_spod.compute_coeffs_op(
81 | data=data, results_dir=results_dir, comm=comm)
82 | ## -------------------------------------------------------------------
83 |
84 |
85 |
86 | ## -------------------------------------------------------------------
87 | ## compute reconstruction
88 | ## -------------------------------------------------------------------
89 | file_dynamics, coeffs_dir = utils_spod.compute_reconstruction(
90 | coeffs_dir=coeffs_dir, time_idx='all', comm=comm)
91 | ## -------------------------------------------------------------------
92 |
93 |
94 |
95 | ## only rank 0
96 | if rank == 0:
97 | ## ---------------------------------------------------------------
98 | ## postprocessing
99 | ## ---------------------------------------------------------------
100 | ## plot eigenvalues
101 | spod.plot_eigs(filename='eigs.jpg')
102 | spod.plot_eigs_vs_frequency(filename='eigs_freq.jpg')
103 | spod.plot_eigs_vs_period(filename='eigs_period.jpg')
104 |
105 | ## identify frequency of interest
106 | T1 = 0.9; T2 = 4
107 | f1, f1_idx = spod.find_nearest_freq(freq_req=1/T1, freq=spod.freq)
108 | f2, f2_idx = spod.find_nearest_freq(freq_req=1/T2, freq=spod.freq)
109 |
110 | ## plot 2d modes at frequency of interest
111 | spod.plot_2d_modes_at_frequency(freq_req=f1, freq=spod.freq,
112 | modes_idx=[0,1,2], x1=x2, x2=x1, equal_axes=True, filename='modes_f1.jpg')
113 |
114 | ## plot 2d modes at frequency of interest
115 | spod.plot_2d_modes_at_frequency(freq_req=f2, freq=spod.freq,
116 | modes_idx=[0,1,2], x1=x2, x2=x1, equal_axes=True, filename='modes_f2.jpg')
117 |
118 | ## plot coefficients
119 | coeffs = np.load(file_coeffs)
120 | post.plot_coeffs(coeffs, coeffs_idx=[0,1], path=results_dir,
121 | filename='coeffs.jpg')
122 |
123 | ## plot reconstruction
124 | recons = np.load(file_dynamics)
125 | post.plot_2d_data(recons, time_idx=[0,10], filename='recons.jpg',
126 | path=results_dir, x1=x2, x2=x1, equal_axes=True)
127 |
128 | ## plot data
129 | data = spod.get_data(data)
130 | post.plot_2d_data(data, time_idx=[0,10], filename='data.jpg',
131 | path=results_dir, x1=x2, x2=x1, equal_axes=True)
132 | # post.plot_data_tracers(data, coords_list=[(5,0.5)],
133 | # time_limits=[0,nt], path=results_dir, filename='data_tracers.jpg')
134 | # post.generate_2d_data_video(
135 | # data, sampling=5, time_limits=[0,nt], x1=x2, x2=x1,
136 | # path=results_dir, filename='data_movie1.mp4')
137 | ## -------------------------------------------------------------
138 |
139 |
140 |
141 | ## -------------------------------------------------------------
142 | ## check results
143 | ## -------------------------------------------------------------
144 | tol = 1e-8; tol2 = 1e-3
145 | ## identify frequency of interest
146 | f_, f_idx = spod.find_nearest_freq(freq_req=1/12.5, freq=spod.freq)
147 | modes_at_freq = spod.get_modes_at_freq(freq_idx=f_idx)
148 | coeffs = np.load(file_coeffs)
149 | recons = np.load(file_dynamics)
150 | # print(f'{flag = :}')
151 | # print(f'{ortho = :}')
152 | # print(f'{np.min(np.abs(modes_at_freq)) = :}')
153 | # print(f'{np.max(np.abs(modes_at_freq)) = :}')
154 | ## fit
155 | assert(flag==True); assert(np.abs(ortho)<1e-15)
156 | assert((np.min(np.abs(modes_at_freq))<8.971537836e-07+tol) and \
157 | (np.min(np.abs(modes_at_freq))>8.971537836e-07-tol))
158 | assert((np.max(np.abs(modes_at_freq))<0.1874697574930+tol) and \
159 | (np.max(np.abs(modes_at_freq))>0.1874697574930-tol))
160 | ## transform
161 | # print(f'{np.real(np.max(coeffs)) = :}')
162 | # print(f'{np.real(np.max(recons)) = :}')
163 | # assert((np.real(np.max(coeffs))<29.749494933937+tol2) and \
164 | # (np.real(np.max(coeffs))>29.749494933937-tol2))
165 | assert((np.real(np.max(recons))< 4.498868461587+tol) and \
166 | (np.real(np.max(recons))> 4.498868461587-tol))
167 | x = data
168 | l1 = utils_errors.compute_l_errors(recons, x, norm_type='l1')
169 | l2 = utils_errors.compute_l_errors(recons, x, norm_type='l2')
170 | li = utils_errors.compute_l_errors(recons, x, norm_type='linf')
171 | l1_r = utils_errors.compute_l_errors(recons, x, norm_type='l1_rel')
172 | l2_r = utils_errors.compute_l_errors(recons, x, norm_type='l2_rel')
173 | li_r = utils_errors.compute_l_errors(recons, x, norm_type='linf_rel')
174 | # print(f'{l1 = :}')
175 | # print(f'{l2 = :}')
176 | # print(f'{li = :}')
177 | # print(f'{l1_r = :}')
178 | # print(f'{l2_r = :}')
179 | # print(f'{li_r = :}')
180 | ## errors
181 | assert((l1 <0.0001259132509+tol) and (l1 >0.0001259132509-tol))
182 | assert((l2 <1.253008689e-07+tol) and (l2 >1.253008689e-07-tol))
183 | assert((li <0.0014188523793+tol) and (li >0.0014188523793-tol))
184 | assert((l1_r<2.823629398e-05+tol) and (l1_r>2.823629398e-05-tol))
185 | assert((l2_r<2.810256299e-08+tol) and (l2_r>2.810256299e-08-tol))
186 | assert((li_r<0.0003185130419+tol) and (li_r>0.0003185130419-tol))
187 | try:
188 | shutil.rmtree(os.path.join(CWD, params['savedir']))
189 | except OSError as e:
190 | pass
191 | ## -------------------------------------------------------------
192 |
193 |
194 |
195 | @pytest.mark.mpi(minsize=2, maxsize=3)
196 | def test_tutorial2(test_two_stage_reader=False):
197 | ## -------------------------------------------------------------------
198 | ## initialize MPI
199 | ## -------------------------------------------------------------------
200 | try:
201 | from mpi4py import MPI
202 | comm = MPI.COMM_WORLD
203 | rank = comm.rank
204 | except:
205 | comm = None
206 | rank = 0
207 | ## -------------------------------------------------------------------
208 |
209 |
210 |
211 | ## -------------------------------------------------------------------
212 | ## read data and params
213 | ## -------------------------------------------------------------------
214 | ## data
215 | data_file = os.path.join(CFD, './data/', 'era_interim_data.nc')
216 | ds = utils_io.read_data(data_file=data_file)
217 | print(ds)
218 | ## we extract time, longitude and latitude
219 | t = np.array(ds['time'])
220 | x1 = np.array(ds['longitude']) - 180
221 | x2 = np.array(ds['latitude'])
222 | data = ds['tp']
223 | nt = len(t)
224 | print('shape of t (time): ', t.shape)
225 | print('shape of x1 (longitude): ', x1.shape)
226 | print('shape of x2 (latitude) : ', x2.shape)
227 | ## params
228 | config_file = os.path.join(CFD, 'data', 'input_tutorial2.yaml')
229 | params = utils_io.read_config(config_file)
230 |
231 | if test_two_stage_reader:
232 | params['savefreq_disk'] = False
233 | params['savefreq_disk2'] = True
234 |
235 | ## set weights
236 | weights = utils_weights.geo_trapz_2D(
237 | x1_dim=x2.shape[0], x2_dim=x1.shape[0],
238 | n_vars=params['n_variables'])
239 | ## -------------------------------------------------------------------
240 |
241 |
242 |
243 | ## -------------------------------------------------------------------
244 | ## compute spod modes and check orthogonality
245 | ## -------------------------------------------------------------------
246 | standard = spod_standard (params=params, weights=weights, comm=comm)
247 | streaming = spod_streaming(params=params, weights=weights, comm=comm)
248 |
249 | if test_two_stage_reader:
250 | spod = standard.fit(data_list=[data_file],variables=['tp'])
251 | else:
252 | spod = standard.fit(data_list=data)
253 |
254 | results_dir = spod.savedir_sim
255 | if not test_two_stage_reader:
256 | flag, ortho = utils_spod.check_orthogonality(
257 | results_dir=results_dir, mode_idx1=[1],
258 | mode_idx2=[0], freq_idx=[5], dtype='single',
259 | comm=comm)
260 | ## -------------------------------------------------------------------
261 |
262 | # ## -------------------------------------------------------------------
263 | # ## compute coefficients
264 | # ## -------------------------------------------------------------------
265 | # file_coeffs, coeffs_dir = utils_spod.compute_coeffs_op(
266 | # data=data, results_dir=results_dir, comm=comm)
267 | # ## -------------------------------------------------------------------
268 | #
269 | #
270 | #
271 | # ## -------------------------------------------------------------------
272 | # ## compute reconstruction
273 | # ## -------------------------------------------------------------------
274 | # file_dynamics, coeffs_dir = utils_spod.compute_reconstruction(
275 | # coeffs_dir=coeffs_dir, time_idx=[0,1,2,3,4,5,6,7,8,9,10],
276 | # comm=comm)
277 | # ## -------------------------------------------------------------------
278 |
279 |
280 |
281 | ## only rank 0
282 | if rank == 0:
283 | ## ---------------------------------------------------------------
284 | ## postprocessing
285 | ## ---------------------------------------------------------------
286 | ## plot eigenvalues
287 | spod.plot_eigs(filename='eigs.jpg')
288 | spod.plot_eigs_vs_frequency(filename='eigs_freq.jpg')
289 | spod.plot_eigs_vs_period(filename='eigs_period.jpg',
290 | xticks=[24*10,24*20,24*40,24*60,24*90])
291 |
292 | ## identify frequency of interest
293 | T1 = 960; T2 = 1008
294 | f1, f1_idx = spod.find_nearest_freq(freq_req=1/T1, freq=spod.freq)
295 | f2, f2_idx = spod.find_nearest_freq(freq_req=1/T2, freq=spod.freq)
296 |
297 | ## plot 2d modes at frequency of interest
298 | spod.plot_2d_modes_at_frequency(freq_req=f1, freq=spod.freq,
299 | modes_idx=[0,1,2], x1=x1, x2=x2, coastlines='centred',
300 | equal_axes=True, filename='modes_f1.jpg')
301 |
302 | ## plot 2d modes at frequency of interest
303 | spod.plot_2d_modes_at_frequency(freq_req=f2, freq=spod.freq,
304 | modes_idx=[0,1,2], x1=x1, x2=x2, coastlines='centred',
305 | equal_axes=True, filename='modes_f2.jpg')
306 |
307 | # ## plot coefficients
308 | # coeffs = np.load(file_coeffs)
309 | # post.plot_coeffs(coeffs, coeffs_idx=[0,1], path=results_dir,
310 | # filename='coeffs.jpg')
311 |
312 | # # plot reconstruction
313 | # recons = np.load(file_dynamics)
314 | # post.plot_2d_data(recons, time_idx=[0,10], filename='recons.jpg',
315 | # path=results_dir, x1=x1, x2=x2, coastlines='centred',
316 | # equal_axes=True)
317 |
318 | ## plot data
319 | data = data.values[...,None]
320 | post.plot_2d_data(data, time_idx=[0,10], filename='data.jpg',
321 | path=results_dir, x1=x1, x2=x2, coastlines='centred',
322 | equal_axes=True)
323 | # post.plot_data_tracers(data, coords_list=[(5,0.5)],
324 | # time_limits=[0,nt], path=results_dir, filename='data_tracers.jpg')
325 | # post.generate_2d_data_video(
326 | # data, sampling=5, time_limits=[0,nt],
327 | # x1=x1, x2=x2, coastlines='centred',
328 | # path=results_dir, filename='data_movie1.mp4')
329 | ## -------------------------------------------------------------
330 |
331 |
332 |
333 | ## -------------------------------------------------------------
334 | ## check results
335 | ## -------------------------------------------------------------
336 | tol = 1e-3
337 | ## identify frequency of interest
338 | f_, f_idx = spod.find_nearest_freq(freq_req=1/12.5, freq=spod.freq)
339 | modes_at_freq = spod.get_modes_at_freq(freq_idx=f_idx)
340 | # coeffs = np.load(file_coeffs)
341 | # recons = np.load(file_dynamics)
342 | # print(f'{flag = :}')
343 | # print(f'{ortho = :}')
344 | # print(f'{np.min(np.abs(modes_at_freq)) = :}')
345 | # print(f'{np.max(np.abs(modes_at_freq)) = :}')
346 | ## fit
347 | if not test_two_stage_reader:
348 | assert(flag==True); assert(np.abs(ortho)<1e-7)
349 | assert((np.min(np.abs(modes_at_freq))<1.6945059542e-06+tol) and \
350 | (np.min(np.abs(modes_at_freq))>1.6945059542e-06-tol))
351 | assert((np.max(np.abs(modes_at_freq))<4.50340747833251+tol) and \
352 | (np.max(np.abs(modes_at_freq))>4.50340747833251-tol))
353 | ## transform
354 | # print(f'{np.real(np.max(coeffs)) = :}')
355 | # print(f'{np.real(np.max(recons)) = :}')
356 | # assert((np.real(np.max(coeffs))<29.7494889132212+tol) and \
357 | # (np.real(np.max(coeffs))>29.7494889132212-tol))
358 | # assert((np.real(np.max(recons))< 4.4988684614862+tol) and \
359 | # (np.real(np.max(recons))> 4.4988684614862-tol))
360 | # x = data
361 | # l1 = utils_errors.compute_l_errors(recons, x, norm_type='l1')
362 | # l2 = utils_errors.compute_l_errors(recons, x, norm_type='l2')
363 | # li = utils_errors.compute_l_errors(recons, x, norm_type='linf')
364 | # l1_r = utils_errors.compute_l_errors(recons, x, norm_type='l1_rel')
365 | # l2_r = utils_errors.compute_l_errors(recons, x, norm_type='l2_rel')
366 | # li_r = utils_errors.compute_l_errors(recons, x, norm_type='linf_rel')
367 | # print(f'{l1 = :}')
368 | # print(f'{l2 = :}')
369 | # print(f'{li = :}')
370 | # print(f'{l1_r = :}')
371 | # print(f'{l2_r = :}')
372 | # print(f'{li_r = :}')
373 | ## errors
374 | # assert((l1 <0.0001259132511+tol) and (l1 >0.0001259132511-tol))
375 | # assert((l2 <1.253008691e-07+tol) and (l2 >1.253008691e-07-tol))
376 | # assert((li <0.0014188522711+tol) and (li >0.0014188522711-tol))
377 | # assert((l1_r<2.823629403e-05+tol) and (l1_r>2.823629403e-05-tol))
378 | # assert((l2_r<2.810256306e-08+tol) and (l2_r>2.810256306e-08-tol))
379 | # assert((li_r<0.0003185130176+tol) and (li_r>0.0003185130176-tol))
380 | try:
381 | shutil.rmtree(os.path.join(CWD, params['savedir']))
382 | except OSError as e:
383 | pass
384 | ## -------------------------------------------------------------
385 |
386 | @pytest.mark.mpi(minsize=2, maxsize=2)
387 | def test_tutorial3():
388 | return test_tutorial2(test_two_stage_reader=True)
389 |
390 | if __name__ == "__main__":
391 | test_tutorial1()
392 | test_tutorial2()
393 | test_tutorial3()
--------------------------------------------------------------------------------
/tests/test_utils_parallel.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | # -*- coding: utf-8 -*-
3 | import os
4 | import sys
5 | import shutil
6 | import pytest
7 | import numpy as np
8 | import xarray as xr
9 | from pyspod.utils.reader import reader_2stage as utils_reader_2stage
10 |
11 | # Current, parent and file paths
12 | CWD = os.getcwd()
13 | CF = os.path.realpath(__file__)
14 | CFD = os.path.dirname(CF)
15 | sys.path.append(os.path.join(CFD,'../'))
16 |
17 | # Import library specific modules
18 | import pyspod.utils.io as utils_io
19 | import pyspod.utils.parallel as utils_par
20 |
21 |
22 | @pytest.mark.mpi(minsize=2, maxsize=2)
23 | def test_parallel_pvar():
24 | try:
25 | from mpi4py import MPI
26 | comm = MPI.COMM_WORLD
27 | except:
28 | comm = None
29 | rank = comm.rank
30 | ## ------------------------------------------------------------------------
31 | data_file = os.path.join(CFD,'./data', 'fluidmechanics_data.mat')
32 | data_dict = utils_io.read_data(data_file=data_file)
33 | data = data_dict['p'].T
34 | dt = data_dict['dt'][0,0]
35 | ## ------------------------------------------------------------------------
36 | v, m, n = utils_par.pvar(data, comm=comm)
37 | tol = 1e-10
38 | if comm.rank == 0:
39 | assert((v<5.12904124410e-05+tol )and(v>5.12904124410e-05-tol ))
40 | assert((m<4.459984976871076+tol )and(m>4.459984976871076-tol ))
41 |
42 | @pytest.mark.mpi(minsize=2, maxsize=2)
43 | def test_parallel_distribute():
44 | try:
45 | from mpi4py import MPI
46 | comm = MPI.COMM_WORLD
47 | except:
48 | comm = None
49 | rank = comm.rank
50 | ## ------------------------------------------------------------------------
51 | data_file = os.path.join(CFD,'./data', 'fluidmechanics_data.mat')
52 | data_dict = utils_io.read_data(data_file=data_file)
53 | data = data_dict['p'].T
54 | dt = data_dict['dt'][0,0]
55 | ## ------------------------------------------------------------------------
56 | dts, maxidx, gs = utils_par.distribute_data(data, comm=comm)
57 | space_data = data[0,...]
58 | dso = utils_par.distribute_dimension(space_data, maxidx, comm=comm)
59 | # print(f'{rank = :} {dso.shape = :}')
60 | # print(f'{rank = :} {dts.shape = :}')
61 | if rank == 0:
62 | assert(maxidx==1)
63 | assert(gs==(20,88))
64 | if comm.size == 1:
65 | if rank == 0:
66 | assert(dts.shape==(1000, 20, 88))
67 | assert(dso.shape==(20, 88))
68 | elif comm.size == 2:
69 | if rank == 1:
70 | assert(dts.shape==(1000, 20, 44))
71 | assert(dso.shape==(20, 44))
72 | elif comm.size == 3:
73 | if rank == 0:
74 | assert(dts.shape==(1000, 20, 30))
75 | assert(dso.shape==(20, 30))
76 | elif comm.size == 4:
77 | if rank == 3:
78 | assert(dts.shape==(1000, 20, 22))
79 | assert(dso.shape==(20, 22))
80 | elif comm.size == 5:
81 | if rank == 0:
82 | assert(dts.shape==(1000, 20, 18))
83 | assert(dso.shape==(20, 18))
84 | elif comm.size == 6:
85 | if rank == 0:
86 | assert(dts.shape==(1000, 20, 15))
87 | assert(dso.shape==(20, 15))
88 | elif comm.size == 7:
89 | if rank == 5:
90 | assert(dts.shape==(1000, 20, 12))
91 | assert(dso.shape==(20, 12))
92 | elif comm.size == 8:
93 | if rank == 0:
94 | assert(dts.shape==(1000, 20, 11))
95 | assert(dso.shape==(20, 11))
96 | else:
97 | if rank == 0:
98 | print('testing up to 8 MPI ranks; test_parallel_distribute skipped')
99 |
100 | @pytest.mark.mpi(minsize=2, maxsize=2)
101 | def test_parallel_allreduce():
102 | try:
103 | from mpi4py import MPI
104 | comm = MPI.COMM_WORLD
105 | except:
106 | comm = None
107 | rank = comm.rank
108 | ## ------------------------------------------------------------------------
109 | data_file = os.path.join(CFD,'./data', 'fluidmechanics_data.mat')
110 | data_dict = utils_io.read_data(data_file=data_file)
111 | data = data_dict['p'].T
112 | dt = data_dict['dt'][0,0]
113 | ## ------------------------------------------------------------------------
114 | dts, maxidx, gs = utils_par.distribute_data(data, comm=comm)
115 | dts = np.reshape(dts, [dts.shape[0], dts[0,...].size])
116 | k = dts @ dts.conj().T
117 | dts_r = utils_par.allreduce(k, comm=comm)
118 | # print(f'{rank = :} {np.sum(dts_r) = :}')
119 | tol = 1e-1
120 | if rank == 0:
121 | assert(maxidx==1)
122 | assert((np.sum(dts_r)<35009021572.78676+tol) and \
123 | (np.sum(dts_r)>35009021572.78676-tol))
124 |
125 | @pytest.mark.mpi(minsize=2, maxsize=2)
126 | def test_parallel_pr0():
127 | try:
128 | from mpi4py import MPI
129 | comm = MPI.COMM_WORLD
130 | except:
131 | comm = None
132 | rank = comm.rank
133 | utils_par.pr0(f'data rank: {rank}', comm=comm)
134 |
135 | @pytest.mark.mpi(minsize=2, maxsize=2)
136 | def test_parallel_npy(axis=0, dtype="d", order='C'):
137 | try:
138 | from mpi4py import MPI
139 | comm = MPI.COMM_WORLD
140 | except:
141 | comm = None
142 | rank = comm.rank
143 | path = os.path.join(CFD, 'tmp')
144 | filename = os.path.join(path, 'tmp.npy')
145 | if rank == 0:
146 | if not os.path.exists(path): os.makedirs(path)
147 | comm.Barrier()
148 | base_shape = [2, 3, 5]
149 | shape = list(base_shape)
150 | shape[axis] += rank
151 | value = rank**2 + rank + 1
152 | array = np.full(shape, value, dtype=dtype, order=order)
153 | utils_par.npy_save(comm, filename, array, axis)
154 | comm.Barrier()
155 | data = utils_par.npy_load(comm, filename, axis, count=shape[axis])
156 | assert data.shape == array.shape
157 | assert data.dtype == array.dtype
158 | assert np.allclose(data, array)
159 | if rank == 0:
160 | data = np.load(filename)
161 | assert data.dtype == array.dtype
162 | s = 0
163 | for i in range(comm.size):
164 | n = base_shape[axis] + i
165 | index = [slice(None)] * data.ndim
166 | index[axis] = slice(s, s + n)
167 | index = tuple(index)
168 | value = i**2 + i + 1
169 | assert np.allclose(data[index], value)
170 | s += n
171 | # clean up results
172 | try:
173 | shutil.rmtree(path)
174 | except OSError as e:
175 | pass
176 |
177 | @pytest.mark.mpi(minsize=2, maxsize=2)
178 | def test_parallel_distribute_2phase():
179 | try:
180 | from mpi4py import MPI
181 | comm = MPI.COMM_WORLD
182 | except:
183 | comm = None
184 | ## ------------------------------------------------------------------------
185 | data_file = os.path.join(CFD,'./data', 'earthquakes_data.nc')
186 | ds = xr.open_dataset(data_file)
187 | da = ds['slip_potency']
188 | ## ------------------------------------------------------------------------
189 |
190 | # reference 1 phase distribution
191 | dataRef, maxAxisRef, globShapeRef = utils_par.distribute_data(da, comm=comm)
192 |
193 | # 2 phase distribution
194 | xdim = 2
195 | nv = 1
196 | reader = utils_reader_2stage([data_file], xdim, np.float32, comm, nv, ['slip_potency'], nchunks = 2, nblocks = 3)
197 | data_dict = reader.get_data()
198 | maxAxis = reader.max_axis
199 | globShape = reader.xshape
200 | output_shape = (dataRef.shape[0],) + (np.prod(dataRef.shape[1:]),)
201 |
202 | data_np = np.zeros(output_shape)
203 | for _,d in data_dict.items():
204 | s = d["s"]
205 | e = d["e"]
206 | v = d["v"]
207 | data_np[s:e,...] = v[:,...,0]
208 |
209 | d1 = dataRef.to_numpy().flatten()
210 | d2 = data_np.flatten()
211 |
212 | all_d1_list = comm.gather(d1,root=0)
213 | all_d2_list = comm.gather(d2,root=0)
214 |
215 | if comm.rank == 0:
216 | all_d1 = np.concatenate(all_d1_list, axis=0)
217 | all_d2 = np.concatenate(all_d2_list, axis=0)
218 | assert np.allclose(sorted(all_d1),sorted(all_d2),atol=0.0001,rtol=0)
219 |
220 | @pytest.mark.mpi(minsize=2, maxsize=2)
221 | def test_parallel_distribute_2phase_chunks():
222 | try:
223 | from mpi4py import MPI
224 | comm = MPI.COMM_WORLD
225 | except:
226 | comm = None
227 | ## ------------------------------------------------------------------------
228 | data_file = os.path.join(CFD,'./data', 'earthquakes_data.nc')
229 | ds = xr.open_dataset(data_file)
230 | da = ds['slip_potency']
231 | ## ------------------------------------------------------------------------
232 |
233 | # reference 1 phase distribution
234 | dataRef, maxAxisRef, globShapeRef = utils_par.distribute_data(da, comm=comm)
235 |
236 | # 2 phase distribution
237 | xdim = 2
238 | nv = 1
239 | reader = utils_reader_2stage([data_file], xdim, np.float32, comm, nv, ['slip_potency'], nchunks = 2, nblocks = 3)
240 | data_dict = reader.get_data()
241 | maxAxis = reader.max_axis
242 | globShape = reader.xshape
243 |
244 | output_shape = (dataRef.shape[0],) + (np.prod(dataRef.shape[1:]),)
245 |
246 | data_np = np.zeros(output_shape)
247 | for _,d in data_dict.items():
248 | s = d["s"]
249 | e = d["e"]
250 | v = d["v"]
251 | data_np[s:e,...] = v[:,...,0]
252 |
253 | reader = utils_reader_2stage([data_file], xdim, np.float32, comm, nv, ['slip_potency'], nchunks = 6, nblocks = 3)
254 | data_dict = reader.get_data()
255 | maxAxis = reader.max_axis
256 | globShape = reader.xshape
257 |
258 | data_np2 = np.zeros(output_shape)
259 | for _,d in data_dict.items():
260 | s = d["s"]
261 | e = d["e"]
262 | v = d["v"]
263 | data_np2[s:e,...] = v[:,...,0]
264 |
265 | reader = utils_reader_2stage([data_file], xdim, np.float32, comm, nv, ['slip_potency'], nchunks = 3, nblocks = 6)
266 | data_dict = reader.get_data()
267 | maxAxis = reader.max_axis
268 | globShape = reader.xshape
269 |
270 | data_np3 = np.zeros(output_shape)
271 | for _,d in data_dict.items():
272 | s = d["s"]
273 | e = d["e"]
274 | v = d["v"]
275 | data_np3[s:e,...] = v[:,...,0]
276 |
277 | d1 = dataRef.to_numpy().flatten()
278 | d2 = data_np.flatten()
279 | d3 = data_np2.flatten()
280 | d4 = data_np3.flatten()
281 |
282 | all_d1_list = comm.gather(d1,root=0)
283 | all_d2_list = comm.gather(d2,root=0)
284 | all_d3_list = comm.gather(d3,root=0)
285 | all_d4_list = comm.gather(d4,root=0)
286 |
287 | if comm.rank == 0:
288 | all_d1 = np.concatenate(all_d1_list, axis=0)
289 | all_d2 = np.concatenate(all_d2_list, axis=0)
290 | all_d3 = np.concatenate(all_d3_list, axis=0)
291 | all_d4 = np.concatenate(all_d4_list, axis=0)
292 | assert np.allclose(sorted(all_d1),sorted(all_d2),atol=0.0001,rtol=0)
293 | assert np.allclose(sorted(all_d2),sorted(all_d3),atol=0.0001,rtol=0)
294 | assert np.allclose(sorted(all_d3),sorted(all_d4),atol=0.0001,rtol=0)
295 |
296 | if __name__ == "__main__":
297 | test_parallel_pvar()
298 | test_parallel_distribute()
299 | test_parallel_allreduce()
300 | test_parallel_pr0()
301 | test_parallel_distribute_2phase()
302 | test_parallel_distribute_2phase_chunks()
303 |
304 | for axis in range(3):
305 | for dtype in "iIqQfdFD":
306 | for order in "CF":
307 | test_parallel_npy(axis, dtype, order)
308 |
--------------------------------------------------------------------------------
/tox.ini:
--------------------------------------------------------------------------------
1 | [tox]
2 | skip_missing_interpreters=True
3 | envlist =
4 | py37
5 | py38
6 | py39
7 | py310
8 |
9 | [testenv]
10 | commands =
11 | {envpython} -c "import pyspod"
12 |
--------------------------------------------------------------------------------
/tutorials/README.md:
--------------------------------------------------------------------------------
1 | ## Tutorials
2 |
3 | We provide some tutorials that cover the main features of the
4 | PySPOD library. These are organized in the form of `jupyter-notebooks`,
5 | along with their plain `python` implementation.
6 |
7 | In particular, we divided the tutorials in such a way that
8 | they cover different functionalities of the library and practical
9 | application areas.
10 |
11 | ### Basic
12 |
13 | #### [Tutorial 1: 2D pressure fluctuations in a turbulent jet](tutorial1/tutorial1.ipynb)
14 |
15 | This tutorial shows a simple 2D application to a turbulent jet.
16 | The variable studied is pressure.
17 |
18 |
19 | #### [Tutorial 2: 2D total precipitation from the ERA Interim dataset](tutorial2/tutorial2.ipynb)
20 |
21 | This tutorial shows a 2D application to climate reanalysis data from the
22 | ERA Interim dataset. The variable studied is total precipitation, and the
23 | aim to capture the Madden-Julian Oscillation (MJO).
24 |
25 | ### Climate
26 |
27 | #### [Tutorial: 2D Multivariate ENSO Index](climate/ERA20C_MEI_2D/ERA20C_MEI_2D.ipynb)
28 |
29 | This tutorial shows how to download data from an ECMWF reanalysis dataset (ERA20C),
30 | and use **PySPOD** to identify spatio-temporal coherent structured in multivariate
31 | 2D data. In particular, we seek to identify the multivariate ENSO index (MEI).
32 | The data is composed by the following monthly-averaged variables: mean sea level
33 | pressure (MSL), zonal component of the surface wind (U10), meridional component
34 | of the surface wind (V10), sea surface temperature (SST), 2-meter temperature
35 | (T2M), and total cloud cover (TCC), on a 2D longitude-latitude grid.
36 |
37 | #### [Tutorial: 3D Quasi-Biennial Oscillation](climate/ERA20C_QBO_3D/ERA20C_QBO_3D.ipynb)
38 |
39 | This tutorial shows how to download data from an ECMWF reanalysis dataset (ERA20C),
40 | and use **PySPOD** to identify spatio-temporal coherent structured in univariate
41 | 3D data. In particular, we seek to identify the Quasi-Biennial Oscillation (QBO).
42 | The data is composed by the monthly-averages of the zonal-mean zonal winds
43 | on a 3D longitude, latitude, pressure-levels grid.
44 |
--------------------------------------------------------------------------------
/tutorials/climate/ERA20C_MEI_2D/E20C_MONTHLYMEAN00_1900_2010_MEI.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | from ecmwfapi import ECMWFDataServer
3 | server = ECMWFDataServer()
4 |
5 | def retrieve_era20c_mnth():
6 | """
7 | A function to demonstrate how to iterate efficiently over all months,
8 | for a list of years of the same decade (eg from 2000 to 2009) for an ERA-20C synoptic monthly means request.
9 | You can extend the number of years to adapt the iteration to your needs.
10 | You can use the variable 'target' to organise the requested data in files as you wish.
11 | """
12 | yearStart = 1900
13 | yearEnd = 2010
14 | monthStart = 1
15 | monthEnd = 12
16 | requestMonthList = []
17 | for year in list(range(yearStart, yearEnd + 1)):
18 | for month in list(range(monthStart, monthEnd + 1)):
19 | requestMonthList.append('%04d-%02d-01' % (year, month))
20 | requestMonths = "/".join(requestMonthList)
21 | target_sfc = "E20C_MONTHLYMEAN00_1900_2010_MEI.nc"
22 | era20c_mnth_sfc_request(requestMonths, target_sfc)
23 |
24 | def era20c_mnth_sfc_request(requestMonths, target):
25 | """
26 | An ERA era20c request for analysis, sfc data.
27 | You can change the keywords below to adapt it to your needs.
28 | (eg add or remove levels, parameters, times etc)
29 | """
30 | server.retrieve({
31 | "class": "e2",
32 | "stream": "mnth",
33 | "type": "an",
34 | "dataset": "era20c",
35 | "date": requestMonths,
36 | "expver": "1",
37 | "param": "34.128/151.128/164.128/165.128/166.128/167.128",
38 | "levtype": "sfc",
39 | "target": target,
40 | "format": "netcdf",
41 | "grid" : "1.5/1.5",
42 | "time": "00"
43 | })
44 | if __name__ == '__main__':
45 | retrieve_era20c_mnth()
46 |
--------------------------------------------------------------------------------
/tutorials/climate/ERA20C_MEI_2D/ERA20C_MEI_2D.py:
--------------------------------------------------------------------------------
1 | import os
2 | import sys
3 | import xarray as xr
4 | import numpy as np
5 | from pathlib import Path
6 |
7 | # Current, parent and file paths
8 | CWD = os.getcwd()
9 | CF = os.path.realpath(__file__)
10 | CFD = os.path.dirname(CF)
11 |
12 | # Import library specific modules
13 | sys.path.insert(0, os.path.join(CFD, "../../../"))
14 | from pyspod.spod.standard import Standard as spod_standard
15 | from pyspod.spod.streaming import Streaming as spod_streaming
16 | import pyspod.utils.weights as utils_weights
17 |
18 | # Current path
19 | CWD = os.getcwd()
20 |
21 | # Inspect and load data
22 | file = os.path.join(CFD, 'E20C_MONTHLYMEAN00_1900_2010_MEI.nc')
23 | ds = xr.open_dataset(file)
24 | print(ds)
25 |
26 | # we extract time, longitude and latitude
27 | t = np.array(ds['time'])
28 | x1 = np.array(ds['longitude'])
29 | x2 = np.array(ds['latitude'])
30 | nt = t.shape[0]
31 | print('shape of t (time): ', t.shape)
32 | print('shape of x1 (longitude): ', x1.shape)
33 | print('shape of x2 (latitude) : ', x2.shape)
34 |
35 | # we set the variables we want to use for the analysis
36 | # (we select all the variables present) and load them in RAM
37 | variables = ['sst', 'msl', 'tcc', 'u10', 'v10', 't2m']
38 | X = np.empty([t.shape[0], x1.shape[0], x2.shape[0], len(variables)])
39 | for i,var in enumerate(variables):
40 | X[...,i] = np.einsum('ijk->ikj', np.array(ds[var]))
41 | X[...,i] = np.nan_to_num(X[...,i])
42 | print('shape of data matrix X: ', X.shape)
43 |
44 | # define required and optional parameters
45 | params = dict()
46 |
47 | # -- required parameters
48 | params['time_step' ] = 720 # data time-sampling
49 | params['n_space_dims'] = 2 # number of spatial dimensions (longitude and latitude)
50 | params['n_variables' ] = len(variables) # number of variables
51 | params['n_dft' ] = np.ceil(12 * 5) # length of FFT blocks (100 time-snapshots)
52 |
53 | # -- optional parameters
54 | params['overlap' ] = 0 # dimension block overlap region
55 | params['mean_type' ] = 'blockwise' # type of mean to subtract to the data
56 | params['normalize_weights'] = True # normalization of weights by data variance
57 | params['normalize_data' ] = False # normalize data by data variance
58 | params['n_modes_save' ] = 3 # modes to be saved
59 | params['conf_level' ] = 0.95 # calculate confidence level
60 | params['reuse_blocks' ] = False # whether to reuse blocks if present
61 | params['savefft' ] = False # save FFT blocks to reuse them in the future (saves time)
62 | params['savedir' ] = os.path.join(CWD, 'results', Path(file).stem) # folder where to save results
63 |
64 | # Set weights
65 | weights = utils_weights.geo_trapz_2D(
66 | x1_dim=x2.shape[0], x2_dim=x1.shape[0],
67 | n_vars=len(variables), R=1)
68 |
69 | # Perform SPOD analysis using the standard module
70 | SPOD_analysis = spod_standard(
71 | params=params,
72 | weights=weights)
73 |
74 | # Fit SPOD
75 | spod = SPOD_analysis.fit(data_list=X)
76 |
77 | # Show results
78 | T_approx = 900 # approximate period (in days)
79 | freq_found, freq_idx = spod.find_nearest_freq(freq_req=1/T_approx, freq=spod.freq)
80 | modes_at_freq = spod.get_modes_at_freq(freq_idx=freq_idx)
81 |
82 | # spod.plot_eigs()
83 |
84 | freq = spod.freq*24
85 | spod.plot_eigs_vs_frequency(freq=freq)
86 | spod.plot_eigs_vs_period(freq=freq, xticks=[1, 7, 30, 365, 1825])
87 | spod.plot_2d_modes_at_frequency(
88 | freq_req=freq_found,
89 | freq=freq,
90 | x1=x1-180,
91 | x2=x2,
92 | coastlines='centred',
93 | modes_idx=[0,1],
94 | vars_idx=[1,4])
95 |
96 | data = spod.get_data(X)
97 |
98 | spod.plot_2d_data(
99 | data,
100 | x1=x1-180,
101 | x2=x2,
102 | coastlines='centred',
103 | vars_idx=[5],
104 | time_idx=[0,100,200])
105 |
106 | spod.generate_2d_data_video(
107 | data,
108 | x1=x1-180,
109 | x2=x2,
110 | # coastlines='centred',
111 | sampling=20,
112 | vars_idx=[5])
113 |
114 |
--------------------------------------------------------------------------------
/tutorials/climate/ERA20C_QBO_3D/E20C_MONTHLYMEAN00_1900_2010_U131128_3D.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | from ecmwfapi import ECMWFDataServer
3 | server = ECMWFDataServer()
4 |
5 | def retrieve_era20c_mnth():
6 | """
7 | A function to demonstrate how to iterate efficiently over all months,
8 | for a list of years of the same decade (eg from 2000 to 2009) for an ERA-20C synoptic monthly means request.
9 | You can extend the number of years to adapt the iteration to your needs.
10 | You can use the variable 'target' to organise the requested data in files as you wish.
11 | """
12 | yearStart = 1900
13 | yearEnd = 2010
14 | monthStart = 1
15 | monthEnd = 12
16 | requestMonthList = []
17 | for year in list(range(yearStart, yearEnd + 1)):
18 | for month in list(range(monthStart, monthEnd + 1)):
19 | requestMonthList.append('%04d-%02d-01' % (year, month))
20 | requestMonths = "/".join(requestMonthList)
21 | target_pl = "E20C_MONTHLYMEAN00_1900_2010_U131128_3D.nc"
22 | era20c_mnth_pl_request(requestMonths, target_pl)
23 |
24 | def era20c_mnth_pl_request(requestMonths, target):
25 | """
26 | An ERA era20c request for analysis, pl data.
27 | You can change the keywords below to adapt it to your needs.
28 | (eg add or remove levels, parameters, times etc)
29 | """
30 | server.retrieve({
31 | "class": "e2",
32 | "stream": "mnth",
33 | "type": "an",
34 | "dataset": "era20c",
35 | "date": requestMonths,
36 | "expver": "1",
37 | "levtype": "pl",
38 | "levelist": "1/5/10/50/100/150/200/250/350/450/550/650/750/800/850/900/950/1000",
39 | "param": "131.128",
40 | "target": target,
41 | "grid" : "1.5/1.5",
42 | "format": "netcdf",
43 | "time": "00"
44 | })
45 | if __name__ == '__main__':
46 | retrieve_era20c_mnth()
--------------------------------------------------------------------------------
/tutorials/climate/ERA20C_QBO_3D/ERA20C_QBO_3D.py:
--------------------------------------------------------------------------------
1 | import os
2 | import sys
3 | import xarray as xr
4 | import numpy as np
5 | from pathlib import Path
6 |
7 | # Current, parent and file paths
8 | CWD = os.getcwd()
9 | CF = os.path.realpath(__file__)
10 | CFD = os.path.dirname(CF)
11 |
12 | # Import library specific modules
13 | sys.path.insert(0, os.path.join(CFD, "../../../"))
14 | from pyspod.spod.standard import Standard as spod_standard
15 | from pyspod.spod.streaming import Streaming as spod_streaming
16 | import pyspod.utils.weights as utils_weights
17 |
18 | # Current path
19 | CWD = os.getcwd()
20 |
21 | # Inspect and load data
22 | file = os.path.join(CFD, 'E20C_MONTHLYMEAN00_1900_2010_U131128_3D.nc')
23 | ds = xr.open_dataset(file)
24 | print(ds)
25 |
26 | # we extract time, longitude and latitude
27 | t = np.array(ds['time'])
28 | x1 = np.array(ds['longitude'])
29 | x2 = np.array(ds['latitude'])
30 | x3 = np.array(ds['level'])
31 | nt = t.shape[0]
32 | print('shape of t (time): ', t.shape)
33 | print('shape of x1 (longitude): ', x1.shape)
34 | print('shape of x2 (latitude) : ', x2.shape)
35 | print('shape of x3 (level) : ', x3.shape)
36 |
37 | # we set the variables we want to use for the analysis
38 | # (we select all the variables present) and load them in RAM
39 | variables = ['u']
40 | X = np.empty([t.shape[0], x1.shape[0], x2.shape[0], x3.shape[0], len(variables)])
41 | for i,var in enumerate(variables):
42 | X[...,i] = np.einsum('tijk->tkji', np.array(ds[var]))
43 | X[...,i] = np.nan_to_num(X[...,i])
44 | print('shape of data matrix X: ', X.shape)
45 |
46 | # define required and optional parameters
47 | params = dict()
48 |
49 | # -- required parameters
50 | params['time_step' ] = 744 # data time-sampling
51 | params['n_space_dims'] = X[0,...,0].ndim # number of spatial dimensions (longitude and latitude)
52 | params['n_variables' ] = len(variables) # number of variables
53 | params['n_dft' ] = np.ceil(12 * 12) # length of FFT blocks (100 time-snapshots)
54 |
55 | # -- optional parameters
56 | params['overlap' ] = 0 # dimension block overlap region
57 | params['mean_type' ] = 'longtime' # type of mean to subtract to the data
58 | params['normalize_weights'] = False # normalization of weights by data variance
59 | params['normalize_data' ] = False # normalize data by data variance
60 | params['n_modes_save' ] = 5 # modes to be saved
61 | params['conf_level' ] = 0.95 # calculate confidence level
62 | params['reuse_blocks' ] = False # whether to reuse blocks if present
63 | params['savefft' ] = False # save FFT blocks to reuse them in the future (saves time)
64 | params['savedir' ] = os.path.join(CWD, 'results', Path(file).stem) # folder where to save results
65 |
66 | # Set weights
67 | weights = utils_weights.geo_trapz_3D(
68 | x1_dim=x2.shape[0], x2_dim=x1.shape[0], x3_dim=x3.shape[0],
69 | n_vars=len(variables), R=1)
70 |
71 | # Perform SPOD analysis using the standard module
72 | SPOD_analysis = spod_standard(
73 | params=params,
74 | weights=weights)
75 |
76 | # Fit SPOD
77 | spod = SPOD_analysis.fit(data_list=X)
78 |
79 | # Show results
80 | T_approx = 744 # approximate period (in days)
81 | freq_found, freq_idx = spod.find_nearest_freq(freq_req=1/T_approx, freq=spod.freq)
82 | modes_at_freq = spod.get_modes_at_freq(freq_idx=freq_idx)
83 |
84 | freq = spod.freq*24
85 | spod.plot_eigs()
86 | spod.plot_eigs_vs_frequency(freq=freq)
87 | spod.plot_eigs_vs_period (freq=freq, xticks=[1, 7, 30, 365, 740, 1825])
88 | spod.plot_3d_modes_slice_at_frequency(
89 | freq_req=freq_found,
90 | freq=freq,
91 | x1=x1-180,
92 | x2=x2,
93 | x3=x3,
94 | slice_dim=2,
95 | slice_id=2,
96 | coastlines='centred',
97 | modes_idx=[0,1,2],
98 | vars_idx=[0])
99 | spod.plot_mode_tracers(
100 | freq_req=freq_found,
101 | freq=freq,
102 | coords_list=[(100,0,2)],
103 | modes_idx=[0,1,2])
104 | data = spod.get_data(X)
105 | spod.plot_data_tracers(data, coords_list=[(100,0,2),(200,10,10)])
106 |
107 |
--------------------------------------------------------------------------------
/tutorials/tutorial1/tutorial1.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | # -*- coding: utf-8 -*-
3 | import os
4 | import sys
5 | import numpy as np
6 |
7 | # Current, parent and file paths
8 | CWD = os.getcwd()
9 | CFD = os.path.abspath('')
10 |
11 | # project libraries
12 | sys.path.append(os.path.join(CFD,"../../"))
13 |
14 | # Import library specific modules
15 | from pyspod.spod.standard import Standard as spod_standard
16 | from pyspod.spod.streaming import Streaming as spod_streaming
17 | import pyspod.spod.utils as utils_spod
18 | import pyspod.utils.weights as utils_weights
19 | import pyspod.utils.errors as utils_errors
20 | import pyspod.utils.io as utils_io
21 | import pyspod.utils.postproc as post
22 |
23 |
24 | ## -------------------------------------------------------------------
25 | ## initialize MPI
26 | ## -------------------------------------------------------------------
27 | try:
28 | from mpi4py import MPI
29 | comm = MPI.COMM_WORLD
30 | rank = comm.rank
31 | except:
32 | comm = None
33 | rank = 0
34 | ## -------------------------------------------------------------------
35 |
36 |
37 |
38 | ## -------------------------------------------------------------------
39 | ## read data and params
40 | ## -------------------------------------------------------------------
41 | ## data
42 | data_file = os.path.join(CFD, '../../tests/data', 'fluidmechanics_data.mat')
43 | data_dict = utils_io.read_data(data_file=data_file)
44 | data = data_dict['p'].T
45 | dt = data_dict['dt'][0,0]
46 | nt = data.shape[0]
47 | x1 = data_dict['r'].T; x1 = x1[:,0]
48 | x2 = data_dict['x'].T; x2 = x2[0,:]
49 | ## params
50 | config_file = os.path.join(CFD, '../../tests/data', 'input_tutorial1.yaml')
51 | params = utils_io.read_config(config_file)
52 | params['time_step'] = dt
53 | ## -------------------------------------------------------------------
54 |
55 |
56 |
57 | ## -------------------------------------------------------------------
58 | ## compute spod modes and check orthogonality
59 | ## -------------------------------------------------------------------
60 | standard = spod_standard (params=params, comm=comm)
61 | streaming = spod_streaming(params=params, comm=comm)
62 | spod = standard.fit(data_list=data)
63 | results_dir = spod.savedir_sim
64 | flag, ortho = utils_spod.check_orthogonality(
65 | results_dir=results_dir, mode_idx1=[1],
66 | mode_idx2=[0], freq_idx=[5], dtype='double',
67 | comm=comm)
68 | print(f'flag = {flag}, ortho = {ortho}')
69 | ## -------------------------------------------------------------------
70 |
71 |
72 |
73 | ## -------------------------------------------------------------------
74 | ## compute coefficients
75 | ## -------------------------------------------------------------------
76 | file_coeffs, coeffs_dir = utils_spod.compute_coeffs_op(
77 | data=data, results_dir=results_dir, comm=comm)
78 | ## -------------------------------------------------------------------
79 |
80 |
81 |
82 | ## -------------------------------------------------------------------
83 | ## compute reconstruction
84 | ## -------------------------------------------------------------------
85 | file_dynamics, coeffs_dir = utils_spod.compute_reconstruction(
86 | coeffs_dir=coeffs_dir, time_idx='all', comm=comm)
87 | ## -------------------------------------------------------------------
88 |
89 |
90 |
91 | ## only rank 0
92 | if rank == 0:
93 | ## ---------------------------------------------------------------
94 | ## postprocessing
95 | ## ---------------------------------------------------------------
96 | ## plot eigenvalues
97 | spod.plot_eigs(filename='eigs.jpg')
98 | spod.plot_eigs_vs_frequency(filename='eigs_freq.jpg')
99 | spod.plot_eigs_vs_period(filename='eigs_period.jpg')
100 |
101 | ## identify frequency of interest
102 | T1 = 0.9; T2 = 4
103 | f1, f1_idx = spod.find_nearest_freq(freq_req=1/T1, freq=spod.freq)
104 | f2, f2_idx = spod.find_nearest_freq(freq_req=1/T2, freq=spod.freq)
105 |
106 | ## plot 2d modes at frequency of interest
107 | spod.plot_2d_modes_at_frequency(freq_req=f1, freq=spod.freq,
108 | modes_idx=[0,1,2], x1=x2, x2=x1,
109 | equal_axes=True, filename='modes_f1.jpg')
110 |
111 | ## plot 2d modes at frequency of interest
112 | spod.plot_2d_modes_at_frequency(freq_req=f2, freq=spod.freq,
113 | modes_idx=[0,1,2], x1=x2, x2=x1,
114 | equal_axes=True, filename='modes_f2.jpg')
115 |
116 | ## plot coefficients
117 | coeffs = np.load(file_coeffs)
118 | post.plot_coeffs(coeffs, coeffs_idx=[0,1],
119 | path=results_dir, filename='coeffs.jpg')
120 |
121 | ## plot reconstruction
122 | recons = np.load(file_dynamics)
123 | post.plot_2d_data(recons, time_idx=[0,10], filename='recons.jpg',
124 | path=results_dir, x1=x2, x2=x1, equal_axes=True)
125 |
126 | ## plot data
127 | data = spod.get_data(data)
128 | post.plot_2d_data(data, time_idx=[0,10], filename='data.jpg',
129 | path=results_dir, x1=x2, x2=x1, equal_axes=True)
130 | post.plot_data_tracers(data, coords_list=[(5,0.5)],
131 | time_limits=[0,nt], path=results_dir, filename='data_tracers.jpg')
132 | post.generate_2d_data_video(
133 | data, sampling=5, time_limits=[0,nt], x1=x2, x2=x1,
134 | path=results_dir, filename='data_movie1.mp4')
135 | ## -------------------------------------------------------------
136 |
--------------------------------------------------------------------------------
/tutorials/tutorial2/tutorial2.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | # -*- coding: utf-8 -*-
3 | import os
4 | import sys
5 | import numpy as np
6 |
7 | # Current, parent and file paths
8 | CWD = os.getcwd()
9 | CFD = os.path.abspath('')
10 |
11 | # project libraries
12 | sys.path.append(os.path.join(CFD,"../../"))
13 |
14 | # Import library specific modules
15 | from pyspod.spod.standard import Standard as spod_standard
16 | from pyspod.spod.streaming import Streaming as spod_streaming
17 | import pyspod.spod.utils as utils_spod
18 | import pyspod.utils.weights as utils_weights
19 | import pyspod.utils.errors as utils_errors
20 | import pyspod.utils.io as utils_io
21 | import pyspod.utils.postproc as post
22 |
23 |
24 |
25 | ## -------------------------------------------------------------------
26 | ## initialize MPI
27 | ## -------------------------------------------------------------------
28 | try:
29 | from mpi4py import MPI
30 | comm = MPI.COMM_WORLD
31 | rank = comm.rank
32 | except:
33 | comm = None
34 | rank = 0
35 | ## -------------------------------------------------------------------
36 |
37 |
38 |
39 | ## -------------------------------------------------------------------
40 | ## read data and params
41 | ## -------------------------------------------------------------------
42 | ## data
43 | data_file = os.path.join(CFD, '../../tests/data/', 'era_interim_data.nc')
44 | ds = utils_io.read_data(data_file=data_file)
45 | print(ds)
46 | ## we extract time, longitude and latitude
47 | t = np.array(ds['time'])
48 | x1 = np.array(ds['longitude']) - 180
49 | x2 = np.array(ds['latitude'])
50 | data = ds['tp']
51 | nt = len(t)
52 | print('shape of t (time): ', t.shape)
53 | print('shape of x1 (longitude): ', x1.shape)
54 | print('shape of x2 (latitude) : ', x2.shape)
55 | ## params
56 | config_file = os.path.join(CFD, '../../tests/data', 'input_tutorial2.yaml')
57 | params = utils_io.read_config(config_file)
58 | ## set weights
59 | weights = utils_weights.geo_trapz_2D(
60 | x1_dim=x2.shape[0], x2_dim=x1.shape[0],
61 | n_vars=params['n_variables'])
62 | ## -------------------------------------------------------------------
63 |
64 |
65 |
66 | ## -------------------------------------------------------------------
67 | ## compute spod modes and check orthogonality
68 | ## -------------------------------------------------------------------
69 | standard = spod_standard (params=params, weights=weights, comm=comm)
70 | streaming = spod_streaming(params=params, weights=weights, comm=comm)
71 | spod = standard.fit(data_list=data)
72 | results_dir = spod.savedir_sim
73 | flag, ortho = utils_spod.check_orthogonality(
74 | results_dir=results_dir, mode_idx1=[1],
75 | mode_idx2=[0], freq_idx=[5], dtype='single',
76 | comm=comm)
77 | print(f'flag = {flag}, ortho = {ortho}')
78 | ## -------------------------------------------------------------------
79 |
80 |
81 |
82 | ## -------------------------------------------------------------------
83 | ## compute coefficients
84 | ## -------------------------------------------------------------------
85 | file_coeffs, coeffs_dir = utils_spod.compute_coeffs_op(
86 | data=data, results_dir=results_dir, comm=comm)
87 | ## -------------------------------------------------------------------
88 |
89 |
90 |
91 | ## -------------------------------------------------------------------
92 | ## compute reconstruction
93 | ## -------------------------------------------------------------------
94 | file_dynamics, coeffs_dir = utils_spod.compute_reconstruction(
95 | coeffs_dir=coeffs_dir, time_idx=[0,1,2,3,4,5,6,7,8,9,10],
96 | comm=comm)
97 | ## -------------------------------------------------------------------
98 |
99 |
100 |
101 | ## only rank 0
102 | if rank == 0:
103 | ## plot eigenvalues
104 | spod.plot_eigs(filename='eigs.jpg')
105 | spod.plot_eigs_vs_frequency(filename='eigs_freq.jpg')
106 | spod.plot_eigs_vs_period(filename='eigs_period.jpg',
107 | xticks=[24*10,24*20,24*40,24*60,24*90])
108 |
109 | ## identify frequency of interest
110 | T1 = 960; T2 = 1008
111 | f1, f1_idx = spod.find_nearest_freq(freq_req=1/T1, freq=spod.freq)
112 | f2, f2_idx = spod.find_nearest_freq(freq_req=1/T2, freq=spod.freq)
113 |
114 | ## plot 2d modes at frequency of interest
115 | spod.plot_2d_modes_at_frequency(freq_req=f1, freq=spod.freq,
116 | modes_idx=[0,1,2], x1=x1, x2=x2, coastlines='centred',
117 | equal_axes=True, filename='modes_f1.jpg')
118 |
119 | ## plot 2d modes at frequency of interest
120 | spod.plot_2d_modes_at_frequency(freq_req=f2, freq=spod.freq,
121 | modes_idx=[0,1,2], x1=x1, x2=x2, coastlines='centred',
122 | equal_axes=True, filename='modes_f2.jpg')
123 |
124 | ## plot coefficients
125 | coeffs = np.load(file_coeffs)
126 | post.plot_coeffs(coeffs, coeffs_idx=[0,1], path=results_dir,
127 | filename='coeffs.jpg')
128 |
129 | # plot reconstruction
130 | recons = np.load(file_dynamics)
131 | post.plot_2d_data(recons, time_idx=[0,10], filename='recons.jpg',
132 | path=results_dir, x1=x1, x2=x2, coastlines='centred',
133 | equal_axes=True)
134 |
135 | ## plot data
136 | data = spod.get_data(data)
137 | post.plot_2d_data(data, time_idx=[0,10], filename='data.jpg',
138 | path=results_dir, x1=x1, x2=x2, coastlines='centred',
139 | equal_axes=True)
140 | post.plot_data_tracers(data, coords_list=[(5,0.5)],
141 | time_limits=[0,nt], path=results_dir, filename='data_tracers.jpg')
142 | post.generate_2d_data_video(
143 | data, sampling=5, time_limits=[0,nt],
144 | x1=x1, x2=x2, coastlines='centred',
145 | path=results_dir, filename='data_movie1.mp4')
146 | ## -------------------------------------------------------------
147 |
--------------------------------------------------------------------------------