├── .git_archival.txt ├── .gitattributes ├── .github └── workflows │ ├── draft-pdf.yml │ ├── release.yml │ └── run-tests.yml ├── .gitignore ├── .pre-commit-config.yaml ├── .readthedocs.yaml ├── LICENSE ├── README.rst ├── codecov.yml ├── docs ├── JOSS │ ├── Fig0.png │ ├── Fig1.png │ ├── Fig2.png │ ├── Fig3.png │ ├── codemeta.json │ ├── default.csl │ ├── drawing.svg │ ├── generate.rb │ ├── paper.bib │ └── paper.md ├── Makefile ├── conf.py ├── index.rst ├── make.bat ├── tutorial_compose_observables.ipynb ├── tutorial_compute_differentiation.ipynb ├── tutorial_dmd_failed_for_pde_examples.ipynb ├── tutorial_dmd_separating_two_mixed_signals_400d_system.ipynb ├── tutorial_dmd_succeeds_pde_examples.ipynb ├── tutorial_dmd_with_control_128d_system.ipynb ├── tutorial_dmd_with_control_2d_system.ipynb ├── tutorial_koopman_edmd_with_rbf.ipynb ├── tutorial_koopman_edmdc_for_chaotic_duffing_oscillator.ipynb ├── tutorial_koopman_edmdc_for_vdp_system.ipynb ├── tutorial_koopman_eigenfunction_model_slow_manifold.ipynb ├── tutorial_koopman_hankel_dmdc_for_vdp_system.ipynb ├── tutorial_koopman_havok_3d_lorenz.ipynb ├── tutorial_koopman_kdmd_on_slow_manifold.ipynb ├── tutorial_koopman_nndmd_examples.ipynb ├── tutorial_linear_random_control_system.ipynb ├── tutorial_linear_system_koopman_eigenfunctions_with_edmd_and_nndmd.ipynb └── tutorial_sparse_modes_selection_2d_linear_system.ipynb ├── pyproject.toml ├── requirements-dev.txt ├── requirements-torch.txt ├── requirements.txt ├── setup.cfg ├── src └── pykoopman │ ├── __init__.py │ ├── analytics │ ├── __init__.py │ ├── _base_analyzer.py │ ├── _ms_pd21.py │ └── _pruned_koopman.py │ ├── common │ ├── __init__.py │ ├── cqgle.py │ ├── examples.py │ ├── ks.py │ ├── nlse.py │ ├── validation.py │ └── vbe.py │ ├── differentiation │ ├── __init__.py │ ├── _derivative.py │ └── _finite_difference.py │ ├── koopman.py │ ├── koopman_continuous.py │ ├── observables │ ├── __init__.py │ ├── _base.py │ ├── _custom_observables.py │ ├── _identity.py │ ├── _polynomial.py │ ├── _radial_basis_functions.py │ ├── _random_fourier_features.py │ └── _time_delay.py │ └── regression │ ├── __init__.py │ ├── _base.py │ ├── _base_ensemble.py │ ├── _dmd.py │ ├── _dmdc.py │ ├── _edmd.py │ ├── _edmdc.py │ ├── _havok.py │ ├── _kdmd.py │ └── _nndmd.py └── test ├── __init__.py ├── analytics └── test_analytics.py ├── conftest.py ├── differentiation └── test_differentiation.py ├── observables └── test_observables.py ├── regression └── test_regressors.py ├── test_koopman.py └── test_koopman_continuous.py /.git_archival.txt: -------------------------------------------------------------------------------- 1 | ref-names: HEAD -> master 2 | -------------------------------------------------------------------------------- /.gitattributes: -------------------------------------------------------------------------------- 1 | # Interpret Jupyter notebooks as Python 2 | *.ipynb linguist-language=Python 3 | 4 | # For automatic versioning via setuptools_scm_git_archive 5 | .git_archival.txt export-subst 6 | -------------------------------------------------------------------------------- /.github/workflows/draft-pdf.yml: -------------------------------------------------------------------------------- 1 | on: [push] 2 | 3 | jobs: 4 | paper: 5 | runs-on: ubuntu-latest 6 | name: Paper Draft 7 | steps: 8 | - name: Checkout 9 | uses: actions/checkout@v3 10 | - name: Build draft PDF 11 | uses: openjournals/openjournals-draft-action@master 12 | with: 13 | journal: joss 14 | # This should be the path to the paper within your repo. 15 | paper-path: docs/JOSS/paper.md 16 | - name: Upload 17 | uses: actions/upload-artifact@v4 18 | with: 19 | name: paper 20 | # This is the output path where Pandoc will write the compiled 21 | # PDF. Note, this should be the same directory as the input 22 | # paper.md 23 | path: docs/JOSS/paper.pdf 24 | -------------------------------------------------------------------------------- /.github/workflows/release.yml: -------------------------------------------------------------------------------- 1 | name: Release 2 | on: 3 | release: 4 | types: 5 | - published 6 | 7 | jobs: 8 | release: 9 | name: Deploy release to PyPI 10 | runs-on: ubuntu-latest 11 | steps: 12 | - name: Checkout source 13 | uses: actions/checkout@v1 14 | - name: Set up Python 15 | uses: actions/setup-python@v1 16 | with: 17 | python-version: 3.10.13 18 | - name: Install dependencies 19 | run: pip install wheel build 20 | - name: Build package 21 | run: python setup.py sdist bdist_wheel 22 | - name: Upload package 23 | uses: pypa/gh-action-pypi-publish@master 24 | with: 25 | user: __token__ 26 | password: ${{ secrets.PYPI_TOKEN }} 27 | -------------------------------------------------------------------------------- /.github/workflows/run-tests.yml: -------------------------------------------------------------------------------- 1 | name: Tests 2 | 3 | on: [push, pull_request] 4 | 5 | jobs: 6 | Linting: 7 | runs-on: ubuntu-latest 8 | 9 | steps: 10 | - uses: actions/checkout@v3 11 | - name: Set up Python 3.10.13 12 | uses: actions/setup-python@v3 13 | with: 14 | python-version: 3.10.13 15 | - name: Linting 16 | run: | 17 | pip install pre-commit 18 | pre-commit run --all-files 19 | Linux: 20 | needs: Linting 21 | runs-on: ubuntu-latest 22 | strategy: 23 | max-parallel: 8 24 | matrix: 25 | python-version: [3.10.13] 26 | 27 | steps: 28 | - uses: actions/checkout@v3 29 | - name: Set up Python ${{ matrix.python-version }} 30 | uses: actions/setup-python@v3 31 | with: 32 | python-version: ${{ matrix.python-version }} 33 | - name: Install dependencies 34 | run: | 35 | pip install -e .[dev] 36 | # pip install -r requirements-dev.txt 37 | - name: Test with pytest 38 | run: | 39 | py.test test 40 | - name: Generate coverage report 41 | run: | 42 | pip install pytest==7.4.4 43 | pip install pytest-cov==4.1.0 44 | pytest --cov=./ --cov-report=xml 45 | - name: Upload coverage reports to Codecov 46 | uses: codecov/codecov-action@v3 47 | 48 | # - uses: actions/cache@v1 49 | # with: 50 | # path: ~/.cache/pip 51 | # key: ${{ runner.os }}-pip-${{ hashFiles('**/requirements-dev.txt') }} 52 | # restore-keys: | 53 | # ${{ runner.os }}-pip- 54 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | build 2 | dist 3 | *.egg-info 4 | # automatically generated by setuptools-scm 5 | pysindy/version.py 6 | # sphinx gallery files 7 | docs/examples 8 | # virtual environment 9 | venv 10 | 11 | # eggs 12 | .eggs 13 | 14 | dev 15 | 16 | **/*cache* 17 | 18 | .coverage 19 | coverage.xml 20 | 21 | .idea 22 | 23 | docs/api 24 | docs/_build 25 | 26 | .ipynb_checkpoints 27 | */.ipynb_checkpoints/* 28 | */lightning_logs/* 29 | lightning_logs 30 | .DS_Store 31 | .vscode 32 | 33 | *.pyc 34 | 35 | *.sublime* 36 | 37 | Pipfile 38 | Pipfile.lock 39 | /htmlcov/ 40 | 41 | *~ 42 | -------------------------------------------------------------------------------- /.pre-commit-config.yaml: -------------------------------------------------------------------------------- 1 | # Settings for pre-commit 2 | fail_fast: false 3 | repos: 4 | - repo: https://github.com/pre-commit/pre-commit-hooks 5 | rev: v4.3.0 6 | hooks: 7 | - id: check-added-large-files 8 | args: ["--maxkb=102400"] 9 | - id: check-merge-conflict 10 | - id: trailing-whitespace 11 | - id: end-of-file-fixer 12 | - repo: https://github.com/asottile/reorder_python_imports 13 | rev: v3.9.0 14 | hooks: 15 | - id: reorder-python-imports 16 | exclude: ^(pre_commit/resources/|testing/resources/python3_hooks_repo/) 17 | args: [--py37-plus, --add-import, 'from __future__ import annotations'] 18 | - repo: https://github.com/ambv/black 19 | rev: 22.8.0 20 | hooks: 21 | - id: black 22 | - repo: https://github.com/PyCQA/flake8 23 | rev: 5.0.4 24 | hooks: 25 | - id: flake8 26 | args: ["--config=setup.cfg"] 27 | -------------------------------------------------------------------------------- /.readthedocs.yaml: -------------------------------------------------------------------------------- 1 | # .readthedocs.yaml 2 | # Read the Docs configuration file 3 | # See https://docs.readthedocs.io/en/stable/config-file/v2.html for details 4 | 5 | # Required 6 | version: 2 7 | 8 | # Set the version of Python and other tools you might need 9 | build: 10 | os: ubuntu-22.04 11 | tools: 12 | python: "3.10" 13 | # You can also specify other tool versions: 14 | # nodejs: "19" 15 | # rust: "1.64" 16 | # golang: "1.19" 17 | 18 | # Build documentation in the docs/ directory with Sphinx 19 | sphinx: 20 | configuration: docs/conf.py 21 | 22 | # If using Sphinx, optionally build your docs in additional formats such as PDF 23 | # formats: 24 | # - pdf 25 | 26 | # Optionally declare the Python requirements required to build your docs 27 | #python: 28 | # install: 29 | # - requirements: requirements-dev.txt 30 | # - method: pip 31 | # path: . 32 | python: 33 | install: 34 | - method: pip 35 | path: .[dev] 36 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright 2023 Shaowu Pan, Eurika Kaiser, Brian de Silva, J. Nathan Kutz and Steven L. Brunton 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /README.rst: -------------------------------------------------------------------------------- 1 | PyKoopman 2 | ========= 3 | 4 | |Build| |Docs| |PyPI| |Codecov| |DOI| |JOSS| 5 | 6 | **PyKoopman** is a Python package for computing data-driven approximations to the Koopman operator. 7 | 8 | Data-driven approximation of Koopman operator 9 | --------------------------------------------- 10 | 11 | .. figure:: docs/JOSS/Fig1.png 12 | 13 | Given a nonlinear dynamical system, 14 | 15 | .. math:: 16 | 17 | x'(t) = f(x(t)), 18 | 19 | the Koopman operator governs the temporal evolution of the measurement function. 20 | Unfortunately, it is an infinite-dimensional linear operator. Most of the time, one has to 21 | project the Koopman operator onto a finite-dimensional subspace that is spanned by user-defined/data-adaptive functions. 22 | 23 | .. math:: 24 | z = \Phi(x). 25 | 26 | If the system state is also contained in such subspace, then effectively, the nonlinear dynamical system is (approximately) 27 | linearized in a global sense. 28 | 29 | The goal of data-driven approximation of Koopman operator is to find such a set of 30 | functions that span such lifted space and the transition matrix associated with the 31 | lifted system. 32 | 33 | Structure of PyKoopman 34 | ^^^^^^^^^^^^^^^^^^^^^^ 35 | 36 | .. figure:: docs/JOSS/Fig2.png 37 | 38 | PyKoopman package is centered around the ``Koopman`` class and ``KoopmanContinuous`` class. It consists of two key components 39 | 40 | * ``observables``: a set of observables functions, which spans the subspace for projection. 41 | 42 | * ``regressor``: the optimization algorithm to find the best ``fit`` for the 43 | projection of Koopman operator. 44 | 45 | After ``Koopman``/``KoopmanContinuous`` object has been created, it must be fit to data, similar to a ``scikit-learn`` model. 46 | We design ``PyKoopman`` such that it is compatible to ``scikit-learn`` objects and methods as much as possible. 47 | 48 | 49 | Features implemented 50 | ^^^^^^^^^^^^^^^^^^^^ 51 | 52 | - Observable library for lifting the state into the observable space 53 | 54 | - Identity (for DMD/DMDc or in case users want to compute observables themselves): 55 | ``Identity`` 56 | - Multivariate polynomials: ``Polynomial`` 57 | - Time delay coordinates: ``TimeDelay`` 58 | - Radial basis functions: ``RadialBasisFunctions`` 59 | - Random Fourier features: ``RandomFourierFeatures`` 60 | - Custom library (defined by user-supplied functions): ``CustomObservables`` 61 | - Concatenation of observables: ``ConcatObservables`` 62 | 63 | 64 | - System identification method for performing regression 65 | 66 | - Dynamic mode decomposition: ``PyDMDRegressor`` 67 | - Dynamic mode decomposition with control: ``DMDc`` 68 | - Extended dynamic mode decomposition: ``EDMD`` 69 | - Extended dynamic mode decomposition with control: ``EDMDc`` 70 | - Kernel dynamic mode decomposition: ``KDMD`` 71 | - Hankel Alternative View of Koopman Analysis: ``HAVOK`` 72 | - Neural Network DMD: ``NNDMD`` 73 | 74 | - Sparse construction of Koopman invariant subspace 75 | 76 | - Multi-task learning based on linearity consistency 77 | 78 | 79 | Examples 80 | ^^^^^^^^ 81 | 82 | 1. `Learning how to create observables `__ 83 | 84 | 2. `Learning how to compute time derivatives `__ 85 | 86 | 3. `Dynamic mode decomposition on two mixed spatial signals `__ 87 | 88 | 4. `Dynamic mode decomposition with control on a 2D linear system `__ 89 | 90 | 5. `Dynamic mode decomposition with control (DMDc) for a 128D system `__ 91 | 92 | 6. `Dynamic mode decomposition with control on a high-dimensional linear system `__ 93 | 94 | 7. `Successful examples of using Dynamic mode decomposition on PDE system `__ 95 | 96 | 8. `Unsuccessful examples of using Dynamic mode decomposition on PDE system `__ 97 | 98 | 9. `Extended DMD for Van Der Pol System `__ 99 | 100 | 10. `Learning Koopman eigenfunctions on Slow manifold `__ 101 | 102 | 11. `Comparing DMD and KDMD for Slow manifold dynamics `__ 103 | 104 | 12. `Extended DMD with control for chaotic duffing oscillator `__ 105 | 106 | 13. `Extended DMD with control for Van der Pol oscillator `__ 107 | 108 | 14. `Hankel Alternative View of Koopman Operator for Lorenz System `__ 109 | 110 | 15. `Hankel DMD with control for Van der Pol Oscillator `__ 111 | 112 | 16. `Neural Network DMD on Slow Manifold `__ 113 | 114 | 17. `EDMD and NNDMD for a simple linear system `__ 115 | 116 | 18. `Sparisfying a minimal Koopman invariant subspace from EDMD for a simple linear system `__ 117 | 118 | Installation 119 | ------------- 120 | 121 | Language 122 | ^^^^^^^^^^^^^^^^^^^^ 123 | - Python == 3.10 124 | 125 | 126 | Installing with pip 127 | ^^^^^^^^^^^^^^^^^^^ 128 | 129 | If you are using Linux or macOS you can install PyKoopman with pip: 130 | 131 | .. code-block:: bash 132 | 133 | pip install pykoopman 134 | 135 | Installing from source 136 | ^^^^^^^^^^^^^^^^^^^^^^ 137 | First clone this repository: 138 | 139 | .. code-block:: bash 140 | 141 | git clone https://github.com/dynamicslab/pykoopman 142 | 143 | Second, it is highly recommended to use `venv` to get a local python environment 144 | 145 | .. code-block:: bash 146 | 147 | python -m venv venv 148 | source ./venv/bin/activate 149 | 150 | In windows, you activate virtual environment in a different way 151 | 152 | .. code-block:: bash 153 | 154 | .\venv\Scripts\activate.ps1 155 | 156 | Then, to install the package, run 157 | 158 | .. code-block:: bash 159 | 160 | python -m pip install -e . 161 | 162 | If you do not have root access, you should add the ``--user`` option to the above lines. 163 | 164 | 165 | Installing with GPU support 166 | ^^^^^^^^^^^^^^^^^^^^^^^^^^^ 167 | 168 | After you download the Github package, go to the directory, type 169 | 170 | .. code-block:: bash 171 | 172 | python -m pip install -r requirements-dev.txt 173 | 174 | Documentation 175 | ------------- 176 | The documentation for PyKoopman is hosted on `Read the Docs `__. 177 | 178 | Community guidelines 179 | -------------------- 180 | 181 | Contributing code 182 | ^^^^^^^^^^^^^^^^^ 183 | We welcome contributions to PyKoopman. To contribute a new feature please submit a 184 | pull request. To get started we recommend installing the packages in "developer mode" 185 | via 186 | 187 | .. code-block:: bash 188 | 189 | python -m pip install -e .[dev] 190 | 191 | This will allow you to run unit tests and automatically format your code. To be accepted your code should conform to PEP8 and pass all unit tests. Code can be tested by invoking 192 | 193 | .. code-block:: bash 194 | 195 | pytest 196 | 197 | We recommed using ``pre-commit`` to format your code. Once you have staged changes to commit 198 | 199 | .. code-block:: bash 200 | 201 | git add path/to/changed/file.py 202 | 203 | you can run the following to automatically reformat your staged code 204 | 205 | .. code-block:: bash 206 | 207 | pre-commit -a -v 208 | 209 | Note that you will then need to re-stage any changes ``pre-commit`` made to your code. 210 | 211 | Reporting issues or bugs 212 | ^^^^^^^^^^^^^^^^^^^^^^^^ 213 | If you find a bug in the code or want to request a new feature, please open an issue. 214 | 215 | Known issues: 216 | 217 | - Python 3.12 might cause unexpected problems. 218 | 219 | Citing PyKoopman 220 | ---------------- 221 | 222 | .. code-block:: text 223 | 224 | @article{Pan2024, doi = {10.21105/joss.05881}, 225 | url = {https://doi.org/10.21105/joss.05881}, 226 | year = {2024}, 227 | publisher = {The Open Journal}, 228 | volume = {9}, 229 | number = {94}, 230 | pages = {5881}, 231 | author = {Shaowu Pan and Eurika Kaiser and Brian M. de Silva and J. Nathan Kutz and Steven L. Brunton}, 232 | title = {PyKoopman: A Python Package for Data-Driven Approximation of the Koopman Operator}, 233 | journal = {Journal of Open Source Software}} 234 | 235 | Related packages 236 | ---------------- 237 | * `PySINDy `_ - A Python libray for the Sparse Identification of Nonlinear Dynamical 238 | systems (SINDy) method introduced in Brunton et al. (2016a). 239 | * `Deeptime `_ - A Python library for the analysis of time series data with methods for dimension reduction, clustering, and Markov model estimation. 240 | * `PyDMD `_ - A Python package using the Dynamic Mode Decomposition (DMD) for a data-driven model simplification based on spatiotemporal coherent structures. DMD is a great alternative to SINDy. 241 | * `pykoop `_ - a Koopman operator identification library written in Python 242 | * `DLKoopman `_ - a deep learning library for 243 | Koopman operator 244 | 245 | .. |Build| image:: https://github.com/dynamicslab/pykoopman/actions/workflows/run-tests.yml/badge.svg 246 | :target: https://github.com/dynamicslab/pykoopman/actions?query=workflow%3ATests 247 | 248 | .. |Docs| image:: https://readthedocs.org/projects/pykoopman/badge/?version=master 249 | :target: https://pykoopman.readthedocs.io/en/master/?badge=master 250 | :alt: Documentation Status 251 | 252 | .. |PyPI| image:: https://badge.fury.io/py/pykoopman.svg 253 | :target: https://badge.fury.io/py/pykoopman 254 | 255 | .. |Codecov| image:: https://codecov.io/github/dynamicslab/pykoopman/coverage.svg 256 | :target: https://app.codecov.io/gh/dynamicslab/pykoopman 257 | 258 | .. |DOI| image:: https://zenodo.org/badge/DOI/10.5281/zenodo.8060893.svg 259 | :target: https://doi.org/10.5281/zenodo.8060893 260 | 261 | .. |JOSS| image:: https://joss.theoj.org/papers/10.21105/joss.05881/status.svg 262 | :target: https://doi.org/10.21105/joss.05881 263 | -------------------------------------------------------------------------------- /codecov.yml: -------------------------------------------------------------------------------- 1 | coverage: 2 | precision: 2 3 | round: down 4 | range: "70...100" 5 | 6 | status: 7 | project: no 8 | patch: yes 9 | changes: no 10 | 11 | comment: 12 | layout: "header, diff, changes, tree" 13 | behavior: default 14 | 15 | ignore: 16 | - "*/tests/*" 17 | - "*/common/*" 18 | -------------------------------------------------------------------------------- /docs/JOSS/Fig0.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/dynamicslab/pykoopman/e3b010297a02ed8a24fbd147db7d22a53a50f724/docs/JOSS/Fig0.png -------------------------------------------------------------------------------- /docs/JOSS/Fig1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/dynamicslab/pykoopman/e3b010297a02ed8a24fbd147db7d22a53a50f724/docs/JOSS/Fig1.png -------------------------------------------------------------------------------- /docs/JOSS/Fig2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/dynamicslab/pykoopman/e3b010297a02ed8a24fbd147db7d22a53a50f724/docs/JOSS/Fig2.png -------------------------------------------------------------------------------- /docs/JOSS/Fig3.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/dynamicslab/pykoopman/e3b010297a02ed8a24fbd147db7d22a53a50f724/docs/JOSS/Fig3.png -------------------------------------------------------------------------------- /docs/JOSS/codemeta.json: -------------------------------------------------------------------------------- 1 | { 2 | "@context": "https://raw.githubusercontent.com/codemeta/codemeta/master/codemeta.jsonld", 3 | "@type": "Code", 4 | "author": [ 5 | { 6 | "@id": "https://orcid.org/0000-0002-2462-362X", 7 | "@type": "Person", 8 | "email": "shawnpan@uw.edu", 9 | "name": "Shaowu Pan", 10 | "affiliation": "Department of Applied Mathematics, University of Washington" 11 | }, 12 | { 13 | "@id": "https://orcid.org/0000-0001-6049-0812", 14 | "@type": "Person", 15 | "email": "eurika.kaiser@gmail.com", 16 | "name": "Eurika Kaiser", 17 | "affiliation": "Department of Applied Mathematics, University of Washington" 18 | }, 19 | { 20 | "@id": "https://orcid.org/0000-0003-0944-900X", 21 | "@type": "Person", 22 | "email": "briandesilva1@gmail.com", 23 | "name": "Brian M. de Silva", 24 | "affiliation": "Department of Mechanical Engineering, University of Washington" 25 | }, 26 | { 27 | "@id": "https://orcid.org/0000-0002-6004-2275", 28 | "@type": "Person", 29 | "email": "kutz@uw.edu", 30 | "name": "J. Nathan Kutz", 31 | "affiliation": "Department of Applied Mathematics, University of Washington" 32 | }, 33 | { 34 | "@id": "https://orcid.org/0000-0002-6565-5118", 35 | "@type": "Person", 36 | "email": "sbrunton@uw.edu", 37 | "name": "Steven L. Brunton", 38 | "affiliation": "Department of Mechanical Engineering, University of Washington" 39 | } 40 | ], 41 | "identifier": "", 42 | "codeRepository": "https://github.com/dynamicslab/pykoopman", 43 | "datePublished": "2021-10-21", 44 | "dateModified": "2021-10-21", 45 | "dateCreated": "2021-10-21", 46 | "description": "A Python Package for Data-Driven Approximation of the Koopman Operator", 47 | "keywords": "python, dynamical systems, Koopman operator, machine learning", 48 | "license": "MIT", 49 | "title": "PyKoopman", 50 | "version": "v0.2.0" 51 | } 52 | -------------------------------------------------------------------------------- /docs/JOSS/generate.rb: -------------------------------------------------------------------------------- 1 | #!/usr/bin/ruby 2 | 3 | # For an OO language, this is distinctly procedural. Should probably fix that. 4 | require 'json' 5 | 6 | details = Hash.new({}) 7 | 8 | capture_params = [ 9 | { :name => "title", :message => "Enter project name." }, 10 | { :name => "url", :message => "Enter the URL of the project repository." }, 11 | { :name => "description", :message => "Enter the (short) project description." }, 12 | { :name => "license", :message => "Enter the license this software shared under. (hit enter to skip)\nFor example MIT, BSD, GPL v3.0, Apache 2.0" }, 13 | { :name => "doi", :message => "Enter the DOI of the archived version of this code. (hit enter to skip)\nFor example http://dx.doi.org/10.6084/m9.figshare.828487" }, 14 | { :name => "keywords", :message => "Enter keywords that should be associated with this project (hit enter to skip)\nComma-separated, for example: turkey, chicken, pot pie" }, 15 | { :name => "version", :message => "Enter the version of your software (hit enter to skip)\nSEMVER preferred: http://semver.org e.g. v1.0.0" } 16 | ] 17 | 18 | puts "I'm going to try and help you prepare some things for your JOSS submission" 19 | puts "If all goes well then we'll have a nice codemeta.json file soon..." 20 | puts "" 21 | puts "************************************" 22 | puts "* First, some basic details *" 23 | puts "************************************" 24 | puts "" 25 | 26 | # Loop through the desired captures and print out for clarity 27 | capture_params.each do |param| 28 | puts param[:message] 29 | print "> " 30 | input = gets 31 | 32 | details[param[:name]] = input.chomp 33 | 34 | puts "" 35 | puts "OK, your project has #{param[:name]}: #{input}" 36 | puts "" 37 | end 38 | 39 | puts "" 40 | puts "************************************" 41 | puts "* Experimental stuff *" 42 | puts "************************************" 43 | puts "" 44 | 45 | puts "Would you like me to try and build a list of authors for you?" 46 | puts "(You need to be running this script in a git repository for this to work)" 47 | print "> (Y/N)" 48 | answer = gets.chomp 49 | 50 | case answer.downcase 51 | when "y", "yes" 52 | 53 | # Use git shortlog to extract a list of author names and commit counts. 54 | # Note we don't extract emails here as there's often different emails for 55 | # each user. Instead we capture emails at the end. 56 | 57 | git_log = `git shortlog --summary --numbered --no-merges` 58 | 59 | # ["252\tMichael Jackson", "151\tMC Hammer"] 60 | authors_and_counts = git_log.split("\n").map(&:strip) 61 | 62 | authors_and_counts.each do |author_count| 63 | count, author = author_count.split("\t").map(&:strip) 64 | 65 | puts "Looks like #{author} made #{count} commits" 66 | puts "Add them to the output?" 67 | print "> (Y/N)" 68 | answer = gets.chomp 69 | 70 | # If a user chooses to add this author to the output then we ask for some 71 | # additional information including their email, ORCID and affiliation. 72 | case answer.downcase 73 | when "y", "yes" 74 | puts "What is #{author}'s email address? (hit enter to skip)" 75 | print "> " 76 | email = gets.chomp 77 | 78 | puts "What is #{author}'s ORCID? (hit enter to skip)" 79 | puts "For example: http://orcid.org/0000-0000-0000-0000" 80 | print "> " 81 | orcid = gets.chomp 82 | 83 | puts "What is #{author}'s affiliation? (hit enter to skip)" 84 | print "> " 85 | affiliation = gets.chomp 86 | 87 | 88 | details['authors'].merge!(author => { 'commits' => count, 89 | 'email' => email, 90 | 'orcid' => orcid, 91 | 'affiliation' => affiliation }) 92 | 93 | when "n", "no" 94 | puts "OK boss..." 95 | puts "" 96 | end 97 | end 98 | when "n", "no" 99 | puts "OK boss..." 100 | puts "" 101 | end 102 | 103 | puts "Reticulating splines" 104 | 105 | 5.times do 106 | print "." 107 | sleep 0.5 108 | end 109 | 110 | puts "" 111 | puts "Generating some JSON goodness..." 112 | 113 | # TODO: work out how to use some kind of JSON template here. 114 | # Build the output list of authors from the inputs we've collected. 115 | output_authors = [] 116 | 117 | details['authors'].each do |author_name, values| 118 | entry = { 119 | "@id" => values['orcid'], 120 | "@type" => "Person", 121 | "email" => values['email'], 122 | "name" => author_name, 123 | "affiliation" => values['affiliation'] 124 | } 125 | output_authors << entry 126 | end 127 | 128 | # TODO: this is currently a static template (written out here). It would be good 129 | # to do something smarter here. 130 | output = { 131 | "@context" => "https://raw.githubusercontent.com/codemeta/codemeta/master/codemeta.jsonld", 132 | "@type" => "Code", 133 | "author" => output_authors, 134 | "identifier" => details['doi'], 135 | "codeRepository" => details['url'], 136 | "datePublished" => Time.now.strftime("%Y-%m-%d"), 137 | "dateModified" => Time.now.strftime("%Y-%m-%d"), 138 | "dateCreated" => Time.now.strftime("%Y-%m-%d"), 139 | "description" => details['description'], 140 | "keywords" => details['keywords'], 141 | "license" => details['license'], 142 | "title" => details['title'], 143 | "version" => details['version'] 144 | } 145 | 146 | File.open('codemeta.json', 'w') {|f| f.write(JSON.pretty_generate(output)) } 147 | -------------------------------------------------------------------------------- /docs/Makefile: -------------------------------------------------------------------------------- 1 | # Minimal makefile for Sphinx documentation 2 | # 3 | 4 | # You can set these variables from the command line, and also 5 | # from the environment for the first two. 6 | SPHINXOPTS ?= 7 | SPHINXBUILD ?= sphinx-build 8 | SOURCEDIR = . 9 | BUILDDIR = _build 10 | 11 | # Put it first so that "make" without argument is like "make help". 12 | help: 13 | @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) 14 | 15 | .PHONY: help Makefile 16 | 17 | # Catch-all target: route all unknown targets to Sphinx using the new 18 | # "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS). 19 | %: Makefile 20 | @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) 21 | -------------------------------------------------------------------------------- /docs/conf.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | 3 | import importlib 4 | import pathlib 5 | 6 | author = "Shaowu Pan, Eurika Kaiser and Brian de Silva" 7 | project = "pykoopman" # package name 8 | 9 | # no need to edit below this line 10 | 11 | copyright = f"2023, {author}" 12 | 13 | module = importlib.import_module(project) 14 | version = release = getattr(module, "__version__") 15 | # version = "0.0.1" 16 | 17 | # The master toctree document. 18 | master_doc = "index" 19 | 20 | extensions = [ 21 | "nbsphinx", 22 | "sphinx_codeautolink", 23 | "sphinxcontrib.apidoc", 24 | "sphinx.ext.autodoc", 25 | "sphinx.ext.viewcode", 26 | "sphinx.ext.autosummary", 27 | "sphinx.ext.napoleon", 28 | "sphinx.ext.mathjax", 29 | "sphinx_nbexamples", 30 | "sphinx.ext.intersphinx", 31 | ] 32 | 33 | apidoc_module_dir = f"../{project}" 34 | apidoc_excluded_paths = ["tests"] 35 | apidoc_toc_file = False 36 | 37 | autodoc_default_options = {"members": True} 38 | autodoc_member_order = "bysource" 39 | autoclass_content = "init" 40 | 41 | language = "en" 42 | 43 | here = pathlib.Path(__file__).parent 44 | 45 | if (here / "static/custom.css").exists(): 46 | 47 | html_static_path = ["static"] 48 | 49 | def setup(app): 50 | app.add_stylesheet("custom.css") 51 | 52 | 53 | exclude_patterns = ["build", "_build", "Thumbs.db", ".DS_Store"] 54 | # pygments_style = "sphinx" 55 | 56 | add_module_names = True 57 | add_function_parentheses = False 58 | 59 | html_theme = "sphinx_rtd_theme" 60 | html_show_sourcelink = False 61 | html_show_sphinx = False 62 | html_show_copyright = True 63 | 64 | default_role = "any" 65 | html_sourcelink_suffix = "" 66 | 67 | example_gallery_config = dict( 68 | dont_preprocess=True, 69 | examples_dirs=["../examples"], 70 | gallery_dirs=["examples"], 71 | pattern=".+.ipynb", 72 | urls="https://github.com/dynamicslab/pykoopman/blob/master/examples", 73 | ) 74 | 75 | 76 | intersphinx_mapping = { 77 | "derivative": ("https://derivative.readthedocs.io/en/latest/", None) 78 | } 79 | 80 | # -- Options for manual page output --------------------------------------- 81 | 82 | # One entry per manual page. List of tuples 83 | # (source start file, name, description, authors, manual section). 84 | man_pages = [(master_doc, "pykoopman", "pykoopman Documentation", [author], 1)] 85 | 86 | # If true, show URL addresses after external links. 87 | # man_show_urls = False 88 | 89 | 90 | # -- Extensions to the Napoleon GoogleDocstring class --------------------- 91 | # michaelgoerz.net/notes/extending-sphinx-napoleon-docstring-sections.html 92 | from sphinx.ext.napoleon.docstring import GoogleDocstring # noqa: E402 93 | 94 | 95 | def parse_keys_section(self, section): 96 | return self._format_fields("Keys", self._consume_fields()) 97 | 98 | 99 | GoogleDocstring._parse_keys_section = parse_keys_section 100 | 101 | 102 | def parse_attributes_section(self, section): 103 | return self._format_fields("Attributes", self._consume_fields()) 104 | 105 | 106 | GoogleDocstring._parse_attributes_section = parse_attributes_section 107 | 108 | 109 | def parse_class_attributes_section(self, section): 110 | return self._format_fields("Class Attributes", self._consume_fields()) 111 | 112 | 113 | GoogleDocstring._parse_class_attributes_section = parse_class_attributes_section 114 | 115 | 116 | def patched_parse(self): 117 | """ 118 | we now patch the parse method to guarantee that the the above methods are 119 | assigned to the _section dict 120 | """ 121 | self._sections["keys"] = self._parse_keys_section 122 | self._sections["class attributes"] = self._parse_class_attributes_section 123 | self._unpatched_parse() 124 | 125 | 126 | GoogleDocstring._unpatched_parse = GoogleDocstring._parse 127 | GoogleDocstring._parse = patched_parse 128 | -------------------------------------------------------------------------------- /docs/index.rst: -------------------------------------------------------------------------------- 1 | .. include:: ../README.rst 2 | 3 | .. toctree:: 4 | :maxdepth: 0 5 | :caption: Tutorials 6 | :glob: 7 | 8 | tutorial_compose_observables 9 | tutorial_compute_differentiation 10 | tutorial_dmd_separating_two_mixed_signals_400d_system 11 | tutorial_dmd_with_control_2d_system 12 | tutorial_dmd_with_control_128d_system 13 | tutorial_linear_random_control_system 14 | tutorial_dmd_succeeds_pde_examples 15 | tutorial_dmd_failed_for_pde_examples 16 | tutorial_koopman_edmd_with_rbf 17 | tutorial_koopman_eigenfunction_model_slow_manifold 18 | tutorial_koopman_kdmd_on_slow_manifold 19 | tutorial_koopman_edmdc_for_chaotic_duffing_oscillator 20 | tutorial_koopman_edmdc_for_vdp_system 21 | tutorial_koopman_havok_3d_lorenz 22 | tutorial_koopman_hankel_dmdc_for_vdp_system 23 | tutorial_koopman_nndmd_examples 24 | tutorial_linear_system_koopman_eigenfunctions_with_edmd_and_nndmd 25 | tutorial_sparse_modes_selection_2d_linear_system 26 | 27 | .. toctree:: 28 | :maxdepth: 4 29 | :caption: User Guide 30 | 31 | API Documentation 32 | 33 | .. toctree:: 34 | :maxdepth: 1 35 | :caption: Useful links 36 | 37 | PyKoopman @ PyPI 38 | Issue Tracker 39 | -------------------------------------------------------------------------------- /docs/make.bat: -------------------------------------------------------------------------------- 1 | @ECHO OFF 2 | 3 | pushd %~dp0 4 | 5 | REM Command file for Sphinx documentation 6 | 7 | if "%SPHINXBUILD%" == "" ( 8 | set SPHINXBUILD=sphinx-build 9 | ) 10 | set SOURCEDIR=. 11 | set BUILDDIR=_build 12 | 13 | %SPHINXBUILD% >NUL 2>NUL 14 | if errorlevel 9009 ( 15 | echo. 16 | echo.The 'sphinx-build' command was not found. Make sure you have Sphinx 17 | echo.installed, then set the SPHINXBUILD environment variable to point 18 | echo.to the full path of the 'sphinx-build' executable. Alternatively you 19 | echo.may add the Sphinx directory to PATH. 20 | echo. 21 | echo.If you don't have Sphinx installed, grab it from 22 | echo.https://www.sphinx-doc.org/ 23 | exit /b 1 24 | ) 25 | 26 | if "%1" == "" goto help 27 | 28 | %SPHINXBUILD% -M %1 %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O% 29 | goto end 30 | 31 | :help 32 | %SPHINXBUILD% -M help %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O% 33 | 34 | :end 35 | popd 36 | -------------------------------------------------------------------------------- /pyproject.toml: -------------------------------------------------------------------------------- 1 | [build-system] 2 | requires = [ 3 | "setuptools>=61.0", 4 | "wheel"] 5 | build-backend = "setuptools.build_meta" 6 | 7 | [project] 8 | name = "pykoopman" 9 | version = "1.1.1" 10 | authors = [ 11 | { name = "Shaowu Pan", email = "shawnpan@uw.edu" }, 12 | { name = "Eurika Kaiser", email = "eurika@uw.edu" }, 13 | { name = "Brian de Silva", email = "bdesilva@uw.edu" }, 14 | { name = "J. Nathan Kutz", email = "kutz@uw.edu" }, 15 | { name = "Steven L. Brunton", email = "sbrunton@uw.edu" }, 16 | ] 17 | description = "Python package for data-driven approximations to the Koopman operator." 18 | readme = "README.rst" 19 | requires-python = ">=3.10" 20 | classifiers = [ 21 | "Programming Language :: Python", 22 | "Programming Language :: Python :: 3.10", 23 | "Development Status :: 4 - Beta", 24 | "Intended Audience :: Science/Research", 25 | "License :: OSI Approved :: MIT License", 26 | "Topic :: Scientific/Engineering :: Mathematics", 27 | ] 28 | dependencies = [ 29 | "matplotlib >=3.6.0", 30 | "derivative ~= 0.6.0", 31 | "scikit-learn >= 1.1.3, <= 1.1.3", 32 | "numpy >=1.20, <= 1.26", 33 | "scipy >1.6.0, <= 1.11.2", 34 | "pydmd >0.4, <= 0.4.1", 35 | "optht ~= 0.2.0", 36 | "prettytable >3.0.0, <= 3.9.0", 37 | "torch ~= 2.1.0", 38 | "torchvision ~= 0.16.0", 39 | "torchaudio ~= 2.1.0", 40 | "lightning ~= 2.0.9", 41 | ] 42 | 43 | [project.optional-dependencies] 44 | dev = [ 45 | "pytest <= 7.4.4", 46 | "pytest-cov ~= 4.1.0", 47 | "pytest-lazy-fixture ~= 0.6.3", 48 | "flake8-builtins-unleashed ~= 1.3.1", 49 | "setuptools_scm ~= 8.0.2", 50 | "setuptools_scm_git_archive", 51 | "jupyter >= 1.0.0", 52 | "notebook >7.0.0, <=7.0.4", 53 | "nbsphinx", 54 | "sphinx-codeautolink", 55 | "sphinx >= 3,<=7.0.0", 56 | "sphinxcontrib-apidoc", 57 | "sphinx_rtd_theme", 58 | "pre-commit", 59 | "sphinx-nbexamples", 60 | "jupyter_contrib_nbextensions", 61 | ] 62 | 63 | 64 | 65 | [project.urls] 66 | "Homepage" = "https://github.com/dynamicslab/pykoopman" 67 | "Bug Tracker" = "https://github.com/dynamicslab/pykoopman/issues" 68 | 69 | [tool.setuptools_scm] 70 | #[tool.setuptools.dynamic] 71 | #dependencies = { file = ["requirements.txt"] } 72 | -------------------------------------------------------------------------------- /requirements-dev.txt: -------------------------------------------------------------------------------- 1 | -e . 2 | -r requirements.txt 3 | # -r requirements-torch.txt --extra-index-url https://download.pytorch.org/whl/cu121/ 4 | -f https://download.pytorch.org/whl/cu121/torch_stable.html 5 | -r requirements-torch.txt 6 | 7 | pytest <= 7.4.4 8 | pytest-cov ~= 4.1.0 9 | pytest-lazy-fixture ~= 0.6.3 10 | flake8-builtins-unleashed ~= 1.3.1 11 | setuptools_scm ~= 8.0.2 12 | setuptools_scm_git_archive 13 | jupyter >= 1.0.0 14 | notebook > 7.0.0, <= 7.0.4 15 | nbsphinx 16 | sphinx-codeautolink 17 | sphinx >= 3, <= 7.0.0 18 | sphinxcontrib-apidoc 19 | sphinx_rtd_theme 20 | pre-commit 21 | sphinx-nbexamples 22 | jupyter_contrib_nbextensions 23 | PyQt5 24 | osqp 25 | -------------------------------------------------------------------------------- /requirements-torch.txt: -------------------------------------------------------------------------------- 1 | torch == 2.1.0+cu121 2 | torchvision 3 | lightning 4 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | matplotlib >= 3.6.0 2 | derivative ~= 0.6.0 3 | scikit-learn == 1.1.3 4 | numpy >= 1.20, <= 1.26 5 | scipy > 1.6.0, <= 1.11.2 6 | pydmd > 0.4, <= 0.4.1 7 | optht ~= 0.2.0 8 | prettytable > 3.0.0, <= 3.9.0 9 | -------------------------------------------------------------------------------- /setup.cfg: -------------------------------------------------------------------------------- 1 | # flake8 settings 2 | [flake8] 3 | exclude = 4 | .git, 5 | .venv, 6 | dist, 7 | build, 8 | __pycache__ 9 | ignore = 10 | W503 # Line break before binary operator - Conflicts black 11 | E203 # Whitespace before ':' - Conflicts black 12 | per-file-ignores = 13 | __init__.py:F401,F403 14 | max-line-length = 88 15 | import-order-style = smarkets 16 | statistics = True 17 | count = True 18 | verbose = 1 19 | # format = [%(code)s] %(text)s @ %(path)s:%(row)d:%(col)d 20 | -------------------------------------------------------------------------------- /src/pykoopman/__init__.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | 3 | from pkg_resources import DistributionNotFound 4 | from pkg_resources import get_distribution 5 | 6 | try: 7 | __version__ = get_distribution(__name__).version 8 | except DistributionNotFound: 9 | pass 10 | 11 | from .koopman import Koopman 12 | from .koopman_continuous import KoopmanContinuous 13 | 14 | 15 | __all__ = [ 16 | "Koopman", 17 | "KoopmanContinuous", 18 | "common", 19 | "differentiation", 20 | "observables", 21 | "regression", 22 | "analytics", 23 | ] 24 | -------------------------------------------------------------------------------- /src/pykoopman/analytics/__init__.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | 3 | from ._base_analyzer import BaseAnalyzer 4 | from ._ms_pd21 import ModesSelectionPAD21 5 | from ._pruned_koopman import PrunedKoopman 6 | 7 | __all__ = ["BaseAnalyzer", "ModesSelectionPAD21", "PrunedKoopman"] 8 | -------------------------------------------------------------------------------- /src/pykoopman/analytics/_base_analyzer.py: -------------------------------------------------------------------------------- 1 | """module for implement modes analyzer for Koopman approximation""" 2 | from __future__ import annotations 3 | 4 | import abc 5 | 6 | import numpy as np 7 | 8 | 9 | class BaseAnalyzer(object): 10 | """Base class for Koopman model analyzer. 11 | 12 | Attributes: 13 | model (Koopman): An instance of `pykoopman.koopman.Koopman`. 14 | eigenfunction (Koopman.compute_eigenfunction): A function that evaluates Koopman 15 | psi. 16 | eigenvalues_cont (numpy.ndarray): Koopman lamda in continuous-time. 17 | eigenvalues_discrete (numpy.ndarray): Koopman lamda in discrete-time. 18 | """ 19 | 20 | def __init__(self, model): 21 | """Initialize the BaseAnalyzer object. 22 | 23 | Args: 24 | model (Koopman): An instance of `pykoopman.koopman.Koopman`. 25 | """ 26 | self.model = model 27 | self.eigenfunction = self.model.psi 28 | self.eigenvalues_cont = self.model.continuous_lamda_array 29 | self.eigenvalues_discrete = self.model.lamda_array 30 | 31 | def _compute_phi_minus_phi_evolved(self, t, validate_data_one_traj): 32 | """Compute the difference between psi evolved and psi observed. 33 | 34 | Args: 35 | t (numpy.ndarray): Time stamp of this validation trajectory. 36 | validate_data_one_traj (numpy.ndarray): Data matrix of this validation 37 | trajectory. 38 | 39 | Returns: 40 | list: Linear residual for each mode. 41 | """ 42 | 43 | # shape of phi = (num_samples, num_modes) 44 | psi = self.eigenfunction(validate_data_one_traj.T).T 45 | 46 | linear_residual_list = [] 47 | for i in range(len(self.eigenvalues_cont)): 48 | linear_residual_list.append( 49 | psi[:, i] - np.exp(self.eigenvalues_cont[i] * t) * psi[0:1, i] 50 | ) 51 | return linear_residual_list 52 | 53 | def validate(self, t, validate_data_one_traj): 54 | """Validate Koopman psi. 55 | 56 | Given a single trajectory, compute the norm of the difference 57 | between observed psi and evolved psi for each mode. 58 | 59 | Args: 60 | t (numpy.ndarray): Time stamp of this validation trajectory. 61 | validate_data_one_traj (numpy.ndarray): Data matrix of this validation 62 | trajectory. 63 | 64 | Returns: 65 | list: Difference in norm for each mode. 66 | """ 67 | 68 | linear_residual_list = self._compute_phi_minus_phi_evolved( 69 | t, validate_data_one_traj 70 | ) 71 | linear_residual_norm_list = [ 72 | np.linalg.norm(tmp) for tmp in linear_residual_list 73 | ] 74 | return linear_residual_norm_list 75 | 76 | @abc.abstractmethod 77 | def prune_model(self, *params, **kwargs): 78 | """Prune the model. 79 | 80 | This method should be implemented by the derived classes. 81 | 82 | Args: 83 | *params: Variable length argument list. 84 | **kwargs: Arbitrary keyword arguments. 85 | 86 | Raises: 87 | NotImplementedError: If the method is not implemented by the derived class. 88 | """ 89 | raise NotImplementedError 90 | -------------------------------------------------------------------------------- /src/pykoopman/analytics/_pruned_koopman.py: -------------------------------------------------------------------------------- 1 | """Module for pruning Koopman models.""" 2 | from __future__ import annotations 3 | 4 | import numpy as np 5 | from pykoopman.koopman import Koopman 6 | from sklearn.utils.validation import check_is_fitted 7 | 8 | 9 | class PrunedKoopman: 10 | """Prune the given original Koopman `model` at `sweep_index`. 11 | 12 | Parameters: 13 | model (Koopman): An instance of `pykoopman.koopman.Koopman`. 14 | sweep_index (np.ndarray): Selected indices in the original Koopman model. 15 | dt (float): Time step used in the original model. 16 | 17 | Attributes: 18 | sweep_index (np.ndarray): Selected indices in the original Koopman model. 19 | lamda_ (np.ndarray): Diagonal matrix that contains the selected eigenvalues. 20 | original_model (Koopman): An instance of `pykoopman.koopman.Koopman`. 21 | W_ (np.ndarray): Matrix that maps selected Koopman eigenfunctions back to the 22 | system state. 23 | 24 | Methods: 25 | fit(x): Fit the pruned model to the training data `x`. 26 | predict(x): Predict the system state at the next time stamp given `x`. 27 | psi(x_col): Evaluate the selected eigenfunctions at a given state `x`. 28 | phi(x_col): **Not implemented**. 29 | ur: **Not implemented**. 30 | A: **Not implemented**. 31 | B: **Not implemented**. 32 | C: Property. Returns `NotImplementedError`. 33 | W: Property. Returns the matrix that maps the selected Koopman eigenfunctions 34 | back to the system state. 35 | lamda: Property. Returns the diagonal matrix of selected eigenvalues. 36 | lamda_array: Property. Returns the selected eigenvalues as a 1D array. 37 | continuous_lamda_array: Property. Returns the selected eigenvalues in 38 | continuous-time as a 1D array. 39 | """ 40 | 41 | def __init__(self, model: Koopman, sweep_index: np.ndarray, dt): 42 | # construct lambda 43 | self.sweep_index = sweep_index 44 | # self.lamda_ = np.diag(np.diag(model.lamda)[self.sweep_index]) 45 | self.original_model = model 46 | self.time = {"dt": dt} 47 | 48 | # no support for controllable for now 49 | if self.original_model.n_control_features_ > 0: 50 | raise NotImplementedError 51 | 52 | self.A_ = None 53 | 54 | def fit(self, x): 55 | """Fit the pruned model given data matrix `x` 56 | 57 | Parameters 58 | ---------- 59 | x : numpy.ndarray 60 | Training data for refitting the Koopman V 61 | 62 | Returns 63 | ------- 64 | self : PrunedKoopman 65 | """ 66 | 67 | # pruned V 68 | selected_eigenphi = self.psi(x.T).T 69 | result = np.linalg.lstsq(selected_eigenphi, x) 70 | # print('refit residual = {}'.format(result[1])) 71 | self.W_ = result[0].T 72 | 73 | # lamda, W = np.linalg.eig(self.original_model.A) 74 | 75 | self.lamda_ = np.diag(np.diag(self.original_model.lamda)[self.sweep_index]) + 0j 76 | # evecs = self.original_model._regressor_eigenvectors 77 | 78 | return self 79 | 80 | def predict(self, x): 81 | """Predict system state at the next time stamp given `x` 82 | 83 | Parameters 84 | ---------- 85 | x : numpy.ndarray 86 | System state `x` in row-wise 87 | 88 | Returns 89 | ------- 90 | xnext : numpy.ndarray 91 | System state at the next time stamp 92 | """ 93 | 94 | if x.ndim == 1: 95 | x = x.reshape(1, -1) 96 | gnext = self.lamda @ self.psi(x.T) 97 | # xnext = self.compute_state_from_psi(gnext) 98 | xnext = self.W @ gnext 99 | return np.real(xnext.T) 100 | 101 | def psi(self, x_col): 102 | """Evaluate the selected psi at given state `x` 103 | 104 | Parameters 105 | ---------- 106 | x : numpy.ndarray 107 | System state `x` in column-wise 108 | 109 | Returns 110 | ------- 111 | eigenphi : numpy.ndarray 112 | Selected eigenfunctions' value at given state `x` 113 | """ 114 | 115 | # eigenphi_ori = self.original_model.psi(x_col).T 116 | # eigenphi_selected = eigenphi_ori[:, self.sweep_index] 117 | 118 | eigenphi_ori = self.original_model.psi(x_col) 119 | eigenphi_selected = eigenphi_ori[self.sweep_index] 120 | return eigenphi_selected 121 | 122 | def phi(self, x_col): 123 | # return self.original_model._regressor_eigenvectors @ self.psi(x_col) 124 | raise NotImplementedError("Pruned model does not have `phi` but only `psi`") 125 | 126 | @property 127 | def ur(self): 128 | raise NotImplementedError("Pruned model does not have `ur`") 129 | 130 | @property 131 | def A(self): 132 | raise NotImplementedError( 133 | "Pruning only happen in eigen-space. So no self.A " "but only self.lamda" 134 | ) 135 | 136 | @property 137 | def B(self): 138 | raise NotImplementedError( 139 | "Pruning only for autonomous system rather than " "controlled system" 140 | ) 141 | 142 | @property 143 | def C(self): 144 | return NotImplementedError("Pruning model does not have `C`") 145 | 146 | @property 147 | def W(self): 148 | check_is_fitted(self, "W_") 149 | return self.W_ 150 | 151 | @property 152 | def lamda(self): 153 | return self.lamda_ 154 | 155 | @property 156 | def lamda_array(self): 157 | return np.diag(self.lamda) + 0j 158 | 159 | @property 160 | def continuous_lamda_array(self): 161 | check_is_fitted(self, "_pipeline") 162 | return np.log(self.lamda_array) / self.time["dt"] 163 | -------------------------------------------------------------------------------- /src/pykoopman/common/__init__.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | 3 | from .cqgle import cqgle 4 | from .examples import advance_linear_system 5 | from .examples import drss 6 | from .examples import Linear2Ddynamics 7 | from .examples import lorenz 8 | from .examples import rev_dvdp 9 | from .examples import rk4 10 | from .examples import slow_manifold 11 | from .examples import torus_dynamics 12 | from .examples import vdp_osc 13 | from .ks import ks 14 | from .nlse import nlse 15 | from .validation import check_array 16 | from .validation import drop_nan_rows 17 | from .validation import validate_input 18 | from .vbe import vbe 19 | 20 | __all__ = [ 21 | "check_array", 22 | "drop_nan_rows", 23 | "validate_input", 24 | "drss", 25 | "advance_linear_system", 26 | "torus_dynamics", 27 | "lorenz", 28 | "vdp_osc", 29 | "rk4", 30 | "rev_dvdp", 31 | "Linear2Ddynamics", 32 | "slow_manifold", 33 | "nlse", 34 | "vbe", 35 | "cqgle", 36 | "ks", 37 | ] 38 | -------------------------------------------------------------------------------- /src/pykoopman/common/cqgle.py: -------------------------------------------------------------------------------- 1 | """Module for cubic-quintic Ginzburg-Landau equation.""" 2 | from __future__ import annotations 3 | 4 | import numpy as np 5 | from matplotlib import pyplot as plt 6 | from mpl_toolkits.mplot3d import Axes3D 7 | from pykoopman.common.examples import rk4 8 | from scipy.fft import fft 9 | from scipy.fft import fftfreq 10 | from scipy.fft import ifft 11 | 12 | 13 | class cqgle: 14 | """ 15 | Cubic-quintic Ginzburg-Landau equation solver. 16 | 17 | Solves the equation: 18 | i*u_t + (0.5 - i * tau) u_{xx} - i * kappa u_{xxxx} + (1-i * beta)|u|^2 u + 19 | (nu - i * sigma)|u|^4 u - i * gamma u = 0 20 | 21 | Solves the periodic boundary conditions PDE using spectral methods. 22 | 23 | Attributes: 24 | n_states (int): Number of states. 25 | x (numpy.ndarray): x-coordinates. 26 | dt (float): Time step. 27 | tau (float): Parameter tau. 28 | kappa (float): Parameter kappa. 29 | beta (float): Parameter beta. 30 | nu (float): Parameter nu. 31 | sigma (float): Parameter sigma. 32 | gamma (float): Parameter gamma. 33 | k (numpy.ndarray): Wave numbers. 34 | dk (float): Wave number spacing. 35 | 36 | Methods: 37 | sys(t, x, u): System dynamics function. 38 | simulate(x0, n_int, n_sample): Simulate the system for a given initial 39 | condition. 40 | collect_data_continuous(x0): Collect training data pairs in continuous sense. 41 | collect_one_step_data_discrete(x0): Collect training data pairs in discrete 42 | sense. 43 | collect_one_trajectory_data(x0, n_int, n_sample): Collect data for one 44 | trajectory. 45 | visualize_data(x, t, X): Visualize the data in physical space. 46 | visualize_state_space(X): Visualize the data in state space. 47 | """ 48 | 49 | def __init__( 50 | self, 51 | n, 52 | x, 53 | dt, 54 | tau=0.08, 55 | kappa=0, 56 | beta=0.66, 57 | nu=-0.1, 58 | sigma=-0.1, 59 | gamma=-0.1, 60 | L=2 * np.pi, 61 | ): 62 | self.n_states = n 63 | self.x = x 64 | 65 | self.tau = tau 66 | self.kappa = kappa 67 | self.beta = beta 68 | self.nu = nu 69 | self.sigma = sigma 70 | self.gamma = gamma 71 | 72 | dk = 2 * np.pi / L 73 | self.k = fftfreq(self.n_states, 1.0 / self.n_states) * dk 74 | self.dt = dt 75 | 76 | def sys(self, t, x, u): 77 | xk = fft(x) 78 | 79 | # 1/3 truncation rule 80 | xk[self.n_states // 6 : 5 * self.n_states // 6] = 0j 81 | x = ifft(xk) 82 | 83 | tmp_1_k = (0.5 - 1j * self.tau) * (-self.k**2) * xk 84 | tmp_2_k = -1j * self.kappa * self.k**4 * xk 85 | tmp_3_k = fft( 86 | (1 - 1j * self.beta) * abs(x) ** 2 * x 87 | + (self.nu - 1j * self.sigma) * abs(x) ** 4 * x 88 | ) 89 | tmp_4_k = -1j * self.gamma * xk 90 | 91 | # return back to physical space 92 | y = ifft(1j * (tmp_1_k + tmp_2_k + tmp_3_k + tmp_4_k)) 93 | return y 94 | 95 | def simulate(self, x0, n_int, n_sample): 96 | # n_traj = x0.shape[1] 97 | x = x0 98 | u = np.zeros((n_int, 1), dtype=complex) 99 | X = np.zeros((n_int // n_sample, self.n_states), dtype=complex) 100 | t = 0 101 | j = 0 102 | t_list = [] 103 | for step in range(n_int): 104 | t += self.dt 105 | y = rk4(0, x, u[step], self.dt, self.sys) 106 | if (step + 1) % n_sample == 0: 107 | X[j] = y 108 | j += 1 109 | t_list.append(t) 110 | x = y 111 | return X, np.array(t_list) 112 | 113 | def collect_data_continuous(self, x0): 114 | """ 115 | collect training data pairs - continuous sense. 116 | 117 | given x0, with shape (n_dim, n_traj), the function 118 | returns dx/dt with shape (n_dim, n_traj) 119 | """ 120 | 121 | n_traj = x0.shape[0] 122 | u = np.zeros((n_traj, 1)) 123 | X = x0 124 | Y = [] 125 | for i in range(n_traj): 126 | y = self.sys(0, x0[i], u[i]) 127 | Y.append(y) 128 | Y = np.vstack(Y) 129 | return X, Y 130 | 131 | def collect_one_step_data_discrete(self, x0): 132 | """ 133 | collect training data pairs - discrete sense. 134 | 135 | given x0, with shape (n_dim, n_traj), the function 136 | returns system state x1 after self.dt with shape 137 | (n_dim, n_traj) 138 | """ 139 | 140 | n_traj = x0.shape[0] 141 | X = x0 142 | Y = [] 143 | for i in range(n_traj): 144 | y, _ = self.simulate(x0[i], n_int=1, n_sample=1) 145 | Y.append(y) 146 | Y = np.vstack(Y) 147 | return X, Y 148 | 149 | def collect_one_trajectory_data(self, x0, n_int, n_sample): 150 | x = x0 151 | y, _ = self.simulate(x, n_int, n_sample) 152 | return y 153 | 154 | def visualize_data(self, x, t, X): 155 | plt.figure(figsize=(6, 6)) 156 | ax = plt.axes(projection=Axes3D.name) 157 | for i in range(X.shape[0]): 158 | ax.plot(x, abs(X[i]), zs=t[i], zdir="t", label="time = " + str(i * self.dt)) 159 | # plt.legend(loc='best') 160 | ax.view_init(elev=35.0, azim=-65, vertical_axis="y") 161 | ax.set(ylabel=r"$mag. of. u(x,t)$", xlabel=r"$x$", zlabel=r"time $t$") 162 | plt.title("CQGLE (Kutz et al., Complexity, 2018)") 163 | plt.show() 164 | 165 | def visualize_state_space(self, X): 166 | u, s, vt = np.linalg.svd(X, full_matrices=False) 167 | # this is a pde problem so the number of snapshots are smaller than dof 168 | pca_1_r, pca_1_i = np.real(u[:, 0]), np.imag(u[:, 0]) 169 | pca_2_r, pca_2_i = np.real(u[:, 1]), np.imag(u[:, 1]) 170 | pca_3_r, pca_3_i = np.real(u[:, 2]), np.imag(u[:, 2]) 171 | 172 | plt.figure(figsize=(6, 6)) 173 | plt.semilogy(s) 174 | plt.xlabel("number of SVD terms") 175 | plt.ylabel("singular values") 176 | plt.title("PCA singular value decays") 177 | plt.show() 178 | 179 | plt.figure(figsize=(6, 6)) 180 | ax = plt.axes(projection=Axes3D.name) 181 | ax.plot3D(pca_1_r, pca_2_r, pca_3_r, "k-o") 182 | ax.set(xlabel="pc1", ylabel="pc2", zlabel="pc3") 183 | plt.title("PCA visualization (real)") 184 | plt.show() 185 | 186 | plt.figure(figsize=(6, 6)) 187 | ax = plt.axes(projection=Axes3D.name) 188 | ax.plot3D(pca_1_i, pca_2_i, pca_3_i, "k-o") 189 | ax.set(xlabel="pc1", ylabel="pc2", zlabel="pc3") 190 | plt.title("PCA visualization (imag)") 191 | plt.show() 192 | 193 | 194 | if __name__ == "__main__": 195 | n = 512 196 | x = np.linspace(-10, 10, n, endpoint=False) 197 | u0 = np.exp(-((x) ** 2)) 198 | # u0 = 2.0 / np.cosh(x) 199 | # u0 = u0.reshape(-1,1) 200 | n_int = 9000 201 | n_snapshot = 300 202 | dt = 40.0 / n_int 203 | n_sample = n_int // n_snapshot 204 | 205 | model = cqgle(n, x, dt, L=20) 206 | X, t = model.simulate(u0, n_int, n_sample) 207 | 208 | print(X.shape) 209 | print(X[:, -1].max()) 210 | 211 | # usage: visualize the data in physical space 212 | model.visualize_data(x, t, X) 213 | print(t) 214 | 215 | # usage: visualize the data in state space 216 | model.visualize_state_space(X) 217 | 218 | # usage: collect continuous data pair: x and dx/dt 219 | x0_array = np.vstack([u0, u0, u0]) 220 | X, Y = model.collect_data_continuous(x0_array) 221 | 222 | print(X.shape) 223 | print(Y.shape) 224 | 225 | # usage: collect discrete data pair 226 | x0_array = np.vstack([u0, u0, u0]) 227 | X, Y = model.collect_one_step_data_discrete(x0_array) 228 | 229 | print(X.shape) 230 | print(Y.shape) 231 | 232 | # usage: collect one trajectory data 233 | X = model.collect_one_trajectory_data(u0, n_int, n_sample) 234 | print(X.shape) 235 | -------------------------------------------------------------------------------- /src/pykoopman/common/ks.py: -------------------------------------------------------------------------------- 1 | """module for 1D KS equation""" 2 | from __future__ import annotations 3 | 4 | import numpy as np 5 | from matplotlib import pyplot as plt 6 | from mpl_toolkits.mplot3d import Axes3D 7 | from scipy.fft import fft 8 | from scipy.fft import fftfreq 9 | from scipy.fft import ifft 10 | 11 | 12 | class ks: 13 | """ 14 | Solving 1D KS equation 15 | 16 | u_t = -u*u_x + u_{xx} + nu*u_{xxxx} 17 | 18 | Periodic B.C. between 0 and 2*pi. This PDE is solved 19 | using spectral methods. 20 | """ 21 | 22 | def __init__(self, n, x, nu, dt, M=16): 23 | self.n_states = n 24 | self.dt = dt 25 | self.x = x 26 | dk = 1 27 | k = fftfreq(self.n_states, 1.0 / self.n_states) * dk 28 | k[n // 2] = 0.0 29 | L = k**2 - nu * k**4 30 | self.E = np.exp(self.dt * L) 31 | self.E2 = np.exp(self.dt * L / 2.0) 32 | # self.M = M 33 | r = np.exp(1j * np.pi * (np.arange(1, M + 1) - 0.5) / M) 34 | r = r.reshape(1, -1) 35 | r_on_circle = np.repeat(r, n, axis=0) 36 | LR = self.dt * L 37 | LR = LR.reshape(-1, 1) 38 | LR = LR.astype("complex") 39 | LR = np.repeat(LR, M, axis=1) 40 | LR += r_on_circle 41 | self.g = -0.5j * k 42 | 43 | self.Q = self.dt * np.real(np.mean((np.exp(LR / 2.0) - 1) / LR, axis=1)) 44 | self.f1 = self.dt * np.real( 45 | np.mean( 46 | (-4.0 - LR + np.exp(LR) * (4.0 - 3.0 * LR + LR**2)) / LR**3, axis=1 47 | ) 48 | ) 49 | self.f2 = self.dt * np.real( 50 | np.mean((2.0 + LR + np.exp(LR) * (-2.0 + LR)) / LR**3, axis=1) 51 | ) 52 | self.f3 = self.dt * np.real( 53 | np.mean( 54 | (-4.0 - 3.0 * LR - LR**2 + np.exp(LR) * (4.0 - LR)) / LR**3, axis=1 55 | ) 56 | ) 57 | 58 | @staticmethod 59 | def compute_u2k_zeropad_dealiased(uk_): 60 | # three over two law 61 | N = uk_.size 62 | # map uk to uk_fine 63 | uk_fine = ( 64 | np.hstack((uk_[0 : int(N / 2)], np.zeros((int(N / 2))), uk_[int(-N / 2) :])) 65 | * 3.0 66 | / 2.0 67 | ) 68 | # convert uk_fine to physical space 69 | u_fine = np.real(ifft(uk_fine)) 70 | # compute square 71 | u2_fine = np.square(u_fine) 72 | # compute fft on u2_fine 73 | u2k_fine = fft(u2_fine) 74 | # convert u2k_fine to u2k 75 | u2k = np.hstack((u2k_fine[0 : int(N / 2)], u2k_fine[int(-N / 2) :])) / 3.0 * 2.0 76 | return u2k 77 | 78 | def sys(self, t, x, u): 79 | raise NotImplementedError 80 | 81 | def simulate(self, x0, n_int, n_sample): 82 | xk = fft(x0) 83 | u = np.zeros((n_int, 1)) 84 | X = np.zeros((n_int // n_sample, self.n_states)) 85 | t = 0 86 | j = 0 87 | t_list = [] 88 | for step in range(n_int): 89 | t += self.dt 90 | Nv = self.g * self.compute_u2k_zeropad_dealiased(xk) 91 | a = self.E2 * xk + self.Q * Nv 92 | Na = self.g * self.compute_u2k_zeropad_dealiased(a) 93 | b = self.E2 * xk + self.Q * Na 94 | Nb = self.g * self.compute_u2k_zeropad_dealiased(b) 95 | c = self.E2 * a + self.Q * (2.0 * Nb - Nv) 96 | Nc = self.g * self.compute_u2k_zeropad_dealiased(c) 97 | xk = self.E * xk + Nv * self.f1 + 2.0 * (Na + Nb) * self.f2 + Nc * self.f3 98 | 99 | if (step + 1) % n_sample == 0: 100 | y = np.real(ifft(xk)) + self.dt * u[j] 101 | X[j, :] = y 102 | j += 1 103 | t_list.append(t) 104 | xk = fft(y) 105 | 106 | return X, np.array(t_list) 107 | 108 | def collect_data_continuous(self, x0): 109 | raise NotImplementedError 110 | 111 | def collect_one_step_data_discrete(self, x0): 112 | """ 113 | collect training data pairs - discrete sense. 114 | 115 | given x0, with shape (n_dim, n_traj), the function 116 | returns system state x1 after self.dt with shape 117 | (n_dim, n_traj) 118 | """ 119 | n_traj = x0.shape[0] 120 | X = x0 121 | Y = [] 122 | for i in range(n_traj): 123 | y, _ = self.simulate(x0[i], n_int=1, n_sample=1) 124 | Y.append(y) 125 | Y = np.vstack(Y) 126 | return X, Y 127 | 128 | def collect_one_trajectory_data(self, x0, n_int, n_sample): 129 | x = x0 130 | y, _ = self.simulate(x, n_int, n_sample) 131 | return y 132 | 133 | def visualize_data(self, x, t, X): 134 | plt.figure(figsize=(6, 6)) 135 | ax = plt.axes(projection=Axes3D.name) 136 | for i in range(X.shape[0]): 137 | ax.plot(x, X[i], zs=t[i], zdir="t", label="time = " + str(i * self.dt)) 138 | ax.view_init(elev=35.0, azim=-65, vertical_axis="y") 139 | ax.set(ylabel=r"$u(x,t)$", xlabel=r"$x$", zlabel=r"time $t$") 140 | plt.title("1D K-S equation") 141 | plt.show() 142 | 143 | def visualize_state_space(self, X): 144 | u, s, vt = np.linalg.svd(X, full_matrices=False) 145 | plt.figure(figsize=(6, 6)) 146 | plt.semilogy(s) 147 | plt.xlabel("number of SVD terms") 148 | plt.ylabel("singular values") 149 | plt.title("PCA singular value decays") 150 | plt.show() 151 | 152 | # this is a pde problem so the number of snapshots are smaller than dof 153 | pca_1, pca_2, pca_3 = u[:, 0], u[:, 1], u[:, 2] 154 | plt.figure(figsize=(6, 6)) 155 | ax = plt.axes(projection=Axes3D.name) 156 | ax.plot3D(pca_1, pca_2, pca_3, "k-o") 157 | ax.set(xlabel="pc1", ylabel="pc2", zlabel="pc3") 158 | plt.title("PCA visualization") 159 | plt.show() 160 | 161 | 162 | if __name__ == "__main__": 163 | n = 256 164 | x = np.linspace(0, 2.0 * np.pi, n, endpoint=False) 165 | u0 = np.sin(x) 166 | nu = 0.01 167 | n_int = 1000 168 | n_snapshot = 500 169 | dt = 4.0 / n_int 170 | n_sample = n_int // n_snapshot 171 | 172 | model = ks(n, x, nu=nu, dt=dt) 173 | X, t = model.simulate(u0, n_int, n_sample) 174 | print(X.shape) 175 | model.visualize_data(x, t, X) 176 | 177 | # usage: visualize the data in state space 178 | model.visualize_state_space(X) 179 | 180 | # usage: collect discrete data pair 181 | x0_array = np.vstack([u0, u0, u0]) 182 | X, Y = model.collect_one_step_data_discrete(x0_array) 183 | 184 | print(X.shape) 185 | print(Y.shape) 186 | 187 | # usage: collect one trajectory data 188 | X = model.collect_one_trajectory_data(u0, n_int, n_sample) 189 | print(X.shape) 190 | -------------------------------------------------------------------------------- /src/pykoopman/common/nlse.py: -------------------------------------------------------------------------------- 1 | """module for nonlinear schrodinger equation""" 2 | from __future__ import annotations 3 | 4 | import numpy as np 5 | from matplotlib import pyplot as plt 6 | from mpl_toolkits.mplot3d import Axes3D 7 | from pykoopman.common.examples import rk4 8 | from scipy.fft import fft 9 | from scipy.fft import fftfreq 10 | from scipy.fft import ifft 11 | 12 | 13 | class nlse: 14 | """ 15 | nonlinear schrodinger equation 16 | 17 | iu_t + 0.5u_xx + u*|u|^2 = 0 18 | 19 | periodic B.C. PDE is solved with Spectral methods using FFT 20 | """ 21 | 22 | def __init__(self, n, dt, L=2 * np.pi): 23 | self.n_states = n 24 | # assert self.u0.size == self.n_states, 'check the size of initial 25 | # condition and mesh size n' 26 | 27 | dk = 2 * np.pi / L 28 | self.k = fftfreq(self.n_states, 1.0 / self.n_states) * dk 29 | self.dt = dt 30 | 31 | def sys(self, t, x, u): 32 | """the RHS for the governing equation using FFT""" 33 | xk = fft(x) 34 | 35 | # 4/3 truncation rule 36 | # dealiasing due to triple nonlinearity 37 | # note: you could do zero-padding to improve memory 38 | # efficiency 39 | xk[self.n_states // 4 : 3 * self.n_states // 4] = 0j 40 | x = ifft(xk) 41 | 42 | yk = (-self.k**2 * xk.ravel() / 2) * 1j 43 | y = ifft(yk) + 1j * abs(x) ** 2 * x + u 44 | return y 45 | 46 | def simulate(self, x0, n_int, n_sample): 47 | # n_traj = x0.shape[1] 48 | x = x0 49 | u = np.zeros((n_int, 1), dtype=complex) 50 | X = np.zeros((n_int // n_sample, self.n_states), dtype=complex) 51 | t = 0 52 | j = 0 53 | t_list = [] 54 | for step in range(n_int): 55 | t += self.dt 56 | y = rk4(0, x, u[step], self.dt, self.sys) 57 | if (step + 1) % n_sample == 0: 58 | X[j] = y 59 | j += 1 60 | t_list.append(t) 61 | x = y 62 | return X, np.array(t_list) 63 | 64 | def collect_data_continuous(self, x0): 65 | """ 66 | collect training data pairs - continuous sense. 67 | 68 | given x0, with shape (n_dim, n_traj), the function 69 | returns dx/dt with shape (n_dim, n_traj) 70 | """ 71 | 72 | n_traj = x0.shape[0] 73 | u = np.zeros((n_traj, 1)) 74 | X = x0 75 | Y = [] 76 | for i in range(n_traj): 77 | y = self.sys(0, x0[i], u[i]) 78 | Y.append(y) 79 | Y = np.vstack(Y) 80 | return X, Y 81 | 82 | def collect_one_step_data_discrete(self, x0): 83 | """ 84 | collect training data pairs - discrete sense. 85 | 86 | given x0, with shape (n_dim, n_traj), the function 87 | returns system state x1 after self.dt with shape 88 | (n_dim, n_traj) 89 | """ 90 | 91 | n_traj = x0.shape[0] 92 | X = x0 93 | Y = [] 94 | for i in range(n_traj): 95 | y, _ = self.simulate(x0[i], n_int=1, n_sample=1) 96 | # for j in range(int(delta_t // self.dt)): 97 | # y = rk4(0, x, u[:, i], self.dt, self.sys) 98 | # x = y 99 | Y.append(y) 100 | Y = np.vstack(Y) 101 | return X, Y 102 | 103 | def collect_one_trajectory_data(self, x0, n_int, n_sample): 104 | x = x0 105 | y, _ = self.simulate(x, n_int, n_sample) 106 | return y 107 | 108 | def visualize_data(self, x, t, X): 109 | plt.figure(figsize=(6, 6)) 110 | ax = plt.axes(projection=Axes3D.name) 111 | for i in range(X.shape[0]): 112 | ax.plot(x, abs(X[i]), zs=t[i], zdir="t", label="time = " + str(i * self.dt)) 113 | # plt.legend(loc='best') 114 | ax.view_init(elev=35.0, azim=-65, vertical_axis="y") 115 | ax.set(ylabel=r"$mag. of u(x,t)$", xlabel=r"$x$", zlabel=r"time $t$") 116 | plt.title("Nonlinear schrodinger equation (Kutz et al., Complexity, 2018)") 117 | plt.show() 118 | 119 | def visualize_state_space(self, X): 120 | u, s, vt = np.linalg.svd(X, full_matrices=False) 121 | # this is a pde problem so the number of snapshots are smaller than dof 122 | pca_1_r, pca_1_i = np.real(u[:, 0]), np.imag(u[:, 0]) 123 | pca_2_r, pca_2_i = np.real(u[:, 1]), np.imag(u[:, 1]) 124 | pca_3_r, pca_3_i = np.real(u[:, 2]), np.imag(u[:, 2]) 125 | 126 | plt.figure(figsize=(6, 6)) 127 | plt.semilogy(s) 128 | plt.xlabel("number of SVD terms") 129 | plt.ylabel("singular values") 130 | plt.title("PCA singular value decays") 131 | plt.show() 132 | 133 | plt.figure(figsize=(6, 6)) 134 | ax = plt.axes(projection=Axes3D.name) 135 | ax.plot3D(pca_1_r, pca_2_r, pca_3_r, "k-o") 136 | ax.set(xlabel="pc1", ylabel="pc2", zlabel="pc3") 137 | plt.title("PCA visualization (real)") 138 | plt.show() 139 | 140 | plt.figure(figsize=(6, 6)) 141 | ax = plt.axes(projection=Axes3D.name) 142 | ax.plot3D(pca_1_i, pca_2_i, pca_3_i, "k-o") 143 | ax.set(xlabel="pc1", ylabel="pc2", zlabel="pc3") 144 | plt.title("PCA visualization (imag)") 145 | plt.show() 146 | 147 | 148 | if __name__ == "__main__": 149 | n = 512 150 | x = np.linspace(-15, 15, n, endpoint=False) 151 | u0 = 2.0 / np.cosh(x) 152 | # u0 = u0.reshape(-1,1) 153 | n_int = 10000 154 | n_snapshot = 80 # in the original paper, it is 20, but I think too small 155 | dt = np.pi / n_int 156 | n_sample = n_int // n_snapshot 157 | 158 | model = nlse(n, dt=dt, L=30) 159 | X, t = model.simulate(u0, n_int, n_sample) 160 | 161 | # usage: visualize the data in physical space 162 | model.visualize_data(x, t, X) 163 | 164 | # usage: visualize the data in state space 165 | model.visualize_state_space(X) 166 | 167 | print(X.shape) 168 | print(t[1] - t[0]) 169 | 170 | # usage: collect continuous data pair: x and dx/dt 171 | x0_array = np.vstack([u0, u0, u0]) 172 | X, Y = model.collect_data_continuous(x0_array) 173 | 174 | print(X.shape) 175 | print(Y.shape) 176 | 177 | # usage: collect discrete data pair 178 | x0_array = np.vstack([u0, u0, u0]) 179 | X, Y = model.collect_one_step_data_discrete(x0_array) 180 | 181 | print(X.shape) 182 | print(Y.shape) 183 | 184 | # usage: collect one trajectory data 185 | X = model.collect_one_trajectory_data(u0, n_int, n_sample) 186 | print(X.shape) 187 | -------------------------------------------------------------------------------- /src/pykoopman/common/validation.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | 3 | import numpy as np 4 | from sklearn.utils import check_array as skl_check_array 5 | 6 | T_DEFAULT = object() 7 | 8 | 9 | def validate_input(x, t=T_DEFAULT): 10 | if not isinstance(x, np.ndarray) and not isinstance(x, list): 11 | raise ValueError("x must be array-like OR a list of array-like") 12 | elif isinstance(x, list): 13 | for i in range(len(x)): 14 | x[i] = validate_input(x[i], t) 15 | return x 16 | elif x.ndim == 1: 17 | x = x.reshape(-1, 1) 18 | x = check_array(x) 19 | 20 | # add another case if x is a list of trajectory 21 | 22 | if t is not T_DEFAULT: 23 | if t is None: 24 | raise ValueError("t must be a scalar or array-like.") 25 | # Apply this check if t is a scalar 26 | elif np.ndim(t) == 0 and (isinstance(t, int) or isinstance(t, float)): 27 | if t <= 0: 28 | raise ValueError("t must be positive") 29 | # Only apply these tests if t is array-like 30 | elif isinstance(t, np.ndarray): 31 | if not len(t) == x.shape[0]: 32 | raise ValueError("Length of t should match x.shape[0].") 33 | if not np.all(t[:-1] < t[1:]): 34 | raise ValueError("Values in t should be in strictly increasing order.") 35 | else: 36 | raise ValueError("t must be a scalar or array-like.") 37 | 38 | return x 39 | 40 | 41 | def check_array(x, **kwargs): 42 | if np.iscomplexobj(x): 43 | return skl_check_array(x.real, **kwargs) + 1j * skl_check_array( 44 | x.imag, **kwargs 45 | ) 46 | else: 47 | return skl_check_array(x, **kwargs) 48 | 49 | 50 | def drop_nan_rows(arr, *args): 51 | """ 52 | Remove rows in all inputs for which `arr` has `_np.nan` entries. 53 | 54 | Parameters 55 | ---------- 56 | arr : numpy.ndarray 57 | Array whose rows are checked for nan entries. 58 | Any rows containing nans are removed from ``arr`` and all arguments 59 | passed via ``args``. 60 | *args : variable length argument list of numpy.ndarray 61 | Additional arrays from which to remove rows. 62 | Each argument should have the same number of rows as ``arr``. 63 | 64 | Returns 65 | ------- 66 | arrays : tuple of numpy.ndarray 67 | Arrays with nan rows dropped. 68 | The first entry corresponds to ``arr`` and all following entries 69 | to ``*args``. 70 | """ 71 | nan_inds = np.isnan(arr).any(axis=1) 72 | return (arr[~nan_inds], *[arg[~nan_inds] for arg in args]) 73 | -------------------------------------------------------------------------------- /src/pykoopman/common/vbe.py: -------------------------------------------------------------------------------- 1 | """module for 1D viscous burgers""" 2 | from __future__ import annotations 3 | 4 | import numpy as np 5 | from matplotlib import pyplot as plt 6 | from mpl_toolkits.mplot3d import Axes3D 7 | from pykoopman.common.examples import rk4 8 | from scipy.fft import fft 9 | from scipy.fft import fftfreq 10 | from scipy.fft import ifft 11 | 12 | 13 | class vbe: 14 | """ 15 | 1D viscous Burgers equation 16 | 17 | u_t = -u*u_x + \nu u_{xx} 18 | 19 | periodic B.C. PDE is solved using spectral methods 20 | """ 21 | 22 | def __init__(self, n, x, dt, nu=0.1, L=2 * np.pi): 23 | self.n_states = n 24 | self.x = x 25 | self.nu = nu 26 | dk = 2 * np.pi / L 27 | self.k = fftfreq(self.n_states, 1.0 / self.n_states) * dk 28 | self.dt = dt 29 | 30 | def sys(self, t, x, u): 31 | xk = fft(x) 32 | 33 | # 3/2 truncation rule 34 | xk[self.n_states // 3 : 2 * self.n_states // 3] = 0j 35 | x = ifft(xk) 36 | 37 | # nonlinear advection 38 | tmp_nl_k = fft(-0.5 * x * x) 39 | tmp_nl_x_k = 1j * self.k * tmp_nl_k 40 | 41 | # linear viscous term 42 | tmp_vis_k = -self.nu * self.k**2 * xk 43 | 44 | # return back to physical space 45 | y = np.real(ifft(tmp_nl_x_k + tmp_vis_k)) 46 | return y 47 | 48 | def simulate(self, x0, n_int, n_sample): 49 | # n_traj = x0.shape[1] 50 | x = x0 51 | u = np.zeros((n_int, 1)) 52 | X = np.zeros((n_int // n_sample, self.n_states)) 53 | t = 0 54 | j = 0 55 | t_list = [] 56 | for step in range(n_int): 57 | t += self.dt 58 | y = rk4(0, x, u[step, :], self.dt, self.sys) 59 | if (step + 1) % n_sample == 0: 60 | X[j, :] = y 61 | j += 1 62 | t_list.append(t) 63 | x = y 64 | return X, np.array(t_list) 65 | 66 | def collect_data_continuous(self, x0): 67 | """ 68 | collect training data pairs - continuous sense. 69 | 70 | given x0, with shape (n_dim, n_traj), the function 71 | returns dx/dt with shape (n_dim, n_traj) 72 | """ 73 | 74 | n_traj = x0.shape[0] 75 | u = np.zeros((n_traj, 1)) 76 | X = x0 77 | Y = [] 78 | for i in range(n_traj): 79 | y = self.sys(0, x0[i], u[i]) 80 | Y.append(y) 81 | Y = np.vstack(Y) 82 | return X, Y 83 | 84 | def collect_one_step_data_discrete(self, x0): 85 | """ 86 | collect training data pairs - discrete sense. 87 | 88 | given x0, with shape (n_dim, n_traj), the function 89 | returns system state x1 after self.dt with shape 90 | (n_dim, n_traj) 91 | """ 92 | 93 | n_traj = x0.shape[0] 94 | X = x0 95 | Y = [] 96 | for i in range(n_traj): 97 | y, _ = self.simulate(x0[i], n_int=1, n_sample=1) 98 | Y.append(y) 99 | Y = np.vstack(Y) 100 | return X, Y 101 | 102 | def collect_one_trajectory_data(self, x0, n_int, n_sample): 103 | x = x0 104 | y, _ = self.simulate(x, n_int, n_sample) 105 | return y 106 | 107 | def visualize_data(self, x, t, X): 108 | plt.figure(figsize=(6, 6)) 109 | ax = plt.axes(projection=Axes3D.name) 110 | for i in range(X.shape[0]): 111 | ax.plot(x, X[i], zs=t[i], zdir="t", label="time = " + str(i * self.dt)) 112 | # plt.legend(loc='best') 113 | ax.view_init(elev=35.0, azim=-65, vertical_axis="y") 114 | ax.set(ylabel=r"$u(x,t)$", xlabel=r"$x$", zlabel=r"time $t$") 115 | plt.title("1D Viscous Burgers equation (Kutz et al., Complexity, 2018)") 116 | plt.show() 117 | 118 | def visualize_state_space(self, X): 119 | u, s, vt = np.linalg.svd(X, full_matrices=False) 120 | plt.figure(figsize=(6, 6)) 121 | plt.semilogy(s) 122 | plt.xlabel("number of SVD terms") 123 | plt.ylabel("singular values") 124 | plt.title("PCA singular value decays") 125 | plt.show() 126 | 127 | # this is a pde problem so the number of snapshots are smaller than dof 128 | pca_1, pca_2, pca_3 = u[:, 0], u[:, 1], u[:, 2] 129 | plt.figure(figsize=(6, 6)) 130 | ax = plt.axes(projection=Axes3D.name) 131 | ax.plot3D(pca_1, pca_2, pca_3, "k-o") 132 | ax.set(xlabel="pc1", ylabel="pc2", zlabel="pc3") 133 | plt.title("PCA visualization") 134 | plt.show() 135 | 136 | 137 | if __name__ == "__main__": 138 | n = 256 139 | x = np.linspace(-15, 15, n, endpoint=False) 140 | u0 = np.exp(-((x + 2) ** 2)) 141 | # u0 = 2.0 / np.cosh(x) 142 | # u0 = u0.reshape(-1,1) 143 | n_int = 3000 144 | n_snapshot = 30 145 | dt = 30.0 / n_int 146 | n_sample = n_int // n_snapshot 147 | 148 | model = vbe(n, x, dt=dt, L=30) 149 | X, t = model.simulate(u0, n_int, n_sample) 150 | 151 | print(X.shape) 152 | # print(X[:,-1].max()) 153 | 154 | # usage: visualize the data in physical space 155 | model.visualize_data(x, t, X) 156 | print(t) 157 | 158 | # usage: visualize the data in state space 159 | model.visualize_state_space(X) 160 | 161 | # usage: collect continuous data pair: x and dx/dt 162 | x0_array = np.vstack([u0, u0, u0]) 163 | X, Y = model.collect_data_continuous(x0_array) 164 | 165 | print(X.shape) 166 | print(Y.shape) 167 | 168 | # usage: collect discrete data pair 169 | x0_array = np.vstack([u0, u0, u0]) 170 | X, Y = model.collect_one_step_data_discrete(x0_array) 171 | 172 | print(X.shape) 173 | print(Y.shape) 174 | 175 | # usage: collect one trajectory data 176 | X = model.collect_one_trajectory_data(u0, n_int, n_sample) 177 | print(X.shape) 178 | -------------------------------------------------------------------------------- /src/pykoopman/differentiation/__init__.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | 3 | from ._derivative import Derivative 4 | from ._finite_difference import FiniteDifference 5 | 6 | __all__ = ["Derivative", "FiniteDifference"] 7 | -------------------------------------------------------------------------------- /src/pykoopman/differentiation/_derivative.py: -------------------------------------------------------------------------------- 1 | """Wrapper classes for differentiation methods from the :doc:`derivative:index` package. 2 | 3 | Some default values used here may differ from those used in :doc:`derivative:index`. 4 | """ 5 | from __future__ import annotations 6 | 7 | from derivative import dxdt 8 | from numpy import arange 9 | from sklearn.base import BaseEstimator 10 | 11 | from ..common import validate_input 12 | 13 | 14 | class Derivative(BaseEstimator): 15 | """ 16 | Wrapper class for differentiation classes from the :doc:`derivative:index` package. 17 | This class is meant to provide all the same functionality as the 18 | `dxdt `_ method. 20 | 21 | This class includes a :meth:`__call__` method. 22 | 23 | Parameters 24 | ---------- 25 | derivative_kws: dictionary, optional 26 | Keyword arguments to be passed to the 27 | `dxdt `_ 29 | method. 30 | 31 | Notes 32 | ----- 33 | See the `derivative documentation `_ 34 | for acceptable keywords. 35 | """ 36 | 37 | def __init__(self, **kwargs): 38 | self.kwargs = kwargs 39 | 40 | def set_params(self, **params): 41 | """ 42 | Set the parameters of this estimator. 43 | 44 | Returns 45 | ------- 46 | self 47 | """ 48 | if not params: 49 | # Simple optimization to gain speed (inspect is slow) 50 | return self 51 | else: 52 | self.kwargs.update(params) 53 | 54 | return self 55 | 56 | def get_params(self, deep=True): 57 | """Get parameters.""" 58 | params = super().get_params(deep) 59 | 60 | if isinstance(self.kwargs, dict): 61 | params.update(self.kwargs) 62 | 63 | return params 64 | 65 | def __call__(self, x, t): 66 | """ 67 | Perform numerical differentiation by calling the ``dxdt`` method. 68 | 69 | Paramters 70 | --------- 71 | x: np.ndarray, shape (n_samples, n_features) 72 | Data to be differentiated. Rows should correspond to different 73 | points in time and columns to different features. 74 | 75 | t: np.ndarray, shape (n_samples, ) 76 | Time points for each sample (row) in ``x``. 77 | 78 | Returns 79 | ------- 80 | x_dot: np.ndarray, shape (n_samples, n_features) 81 | """ 82 | x = validate_input(x, t=t) 83 | 84 | if isinstance(t, (int, float)): 85 | if t < 0: 86 | raise ValueError("t must be a positive constant or an array") 87 | t = arange(x.shape[0]) * t 88 | 89 | return dxdt(x, t, axis=0, **self.kwargs) 90 | -------------------------------------------------------------------------------- /src/pykoopman/differentiation/_finite_difference.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | 3 | import numpy as np 4 | from sklearn.base import BaseEstimator 5 | 6 | 7 | class FiniteDifference(BaseEstimator): 8 | def __init__(self, order=1): 9 | self.order = order 10 | 11 | def __call__(self, x, t=1): 12 | return np.gradient(x) 13 | -------------------------------------------------------------------------------- /src/pykoopman/koopman_continuous.py: -------------------------------------------------------------------------------- 1 | """module for continuous time Koopman class""" 2 | from __future__ import annotations 3 | 4 | import numpy as np 5 | from sklearn.utils.validation import check_is_fitted 6 | 7 | from .differentiation import Derivative 8 | from .koopman import Koopman 9 | 10 | 11 | class KoopmanContinuous(Koopman): 12 | """ 13 | Continuous-time Koopman class. 14 | 15 | Args: 16 | observables: Observables object, optional 17 | (default: pykoopman.observables.Identity) 18 | Map(s) to apply to raw measurement data before 19 | estimating the Koopman operator. Must extend 20 | pykoopman.observables.BaseObservables. The default 21 | option, pykoopman.observables.Identity, leaves the 22 | input untouched. 23 | differentiator: Callable, optional 24 | (default: centered difference) 25 | Function used to compute numerical derivatives. 26 | The function must have the call signature 27 | differentiator(x, t), where x is a 2D numpy ndarray 28 | of shape (n_samples, n_features) and t is a 1D numpy 29 | ndarray of shape (n_samples,). 30 | regressor: Regressor object, optional 31 | (default: DMD) 32 | The regressor used to learn the Koopman operator from 33 | the observables. regressor can either extend 34 | pykoopman.regression.BaseRegressor, or the 35 | pydmd.DMDBase class. In the latter case, the pydmd 36 | object must have both a fit and a predict method. 37 | """ 38 | 39 | def __init__( 40 | self, 41 | observables=None, 42 | differentiator=Derivative(kind="finite_difference", k=1), 43 | regressor=None, 44 | ): 45 | """ 46 | Continuous-time Koopman class. 47 | 48 | Args: 49 | observables: Observables object, optional 50 | (default: pykoopman.observables.Identity) 51 | Map(s) to apply to raw measurement data before 52 | estimating the Koopman operator. Must extend 53 | pykoopman.observables.BaseObservables. The default 54 | option, pykoopman.observables.Identity, leaves the 55 | input untouched. 56 | differentiator: Callable, optional 57 | (default: centered difference) 58 | Function used to compute numerical derivatives. 59 | The function must have the call signature 60 | differentiator(x, t), where x is a 2D numpy ndarray 61 | of shape (n_samples, n_features) and t is a 1D numpy 62 | ndarray of shape (n_samples,). 63 | regressor: Regressor object, optional 64 | (default: DMD) 65 | The regressor used to learn the Koopman operator from 66 | the observables. regressor can either extend 67 | pykoopman.regression.BaseRegressor, or the 68 | pydmd.DMDBase class. In the latter case, the pydmd 69 | object must have both a fit and a predict method. 70 | """ 71 | super().__init__(observables, regressor) 72 | self.differentiator = differentiator 73 | 74 | def predict(self, x, dt=0, u=None): 75 | """ 76 | Predict using continuous-time Koopman model. 77 | 78 | Args: 79 | x: numpy.ndarray 80 | State measurements. Each row should correspond to 81 | the system state at some point in time. 82 | dt: float, optional (default: 0) 83 | Time step between measurements. If specified, the 84 | prediction is made for the given time step in the 85 | future. 86 | u: numpy.ndarray, optional (default: None) 87 | Control input/actuation data. Each row should 88 | correspond to one sample and each column a control 89 | variable or feature. 90 | 91 | Returns: 92 | output: numpy.ndarray 93 | Predicted state using the continuous-time Koopman 94 | model. Each row corresponds to the predicted state 95 | for the corresponding row in x. 96 | """ 97 | check_is_fitted(self, "_pipeline") 98 | 99 | if u is None: 100 | ypred = self._pipeline.predict(X=x, t=dt) 101 | else: 102 | ypred = self._pipeline.predict(X=x, u=u, t=dt) 103 | 104 | output = self.observables.inverse(ypred) 105 | 106 | return output 107 | 108 | def simulate(self, x, t=0, u=None): 109 | """ 110 | Simulate continuous-time Koopman model. 111 | 112 | Args: 113 | x: numpy.ndarray 114 | Initial state from which to simulate. Each row 115 | corresponds to the system state at some point in time. 116 | t: float, optional (default: 0) 117 | Time at which to simulate the system. If specified, 118 | the simulation is performed for the given time. 119 | u: numpy.ndarray, optional (default: None) 120 | Control input/actuation data. Each row should 121 | correspond to one sample and each column a control 122 | variable or feature. 123 | 124 | Returns: 125 | output: numpy.ndarray 126 | Simulated states of the system. Each row corresponds 127 | to the simulated state at a specific time point. 128 | """ 129 | check_is_fitted(self, "_pipeline") 130 | 131 | if u is None: 132 | ypred = self._pipeline.predict(X=x, t=t) 133 | else: 134 | ypred = self._pipeline.predict(X=x, u=u, t=t) 135 | 136 | output = [] 137 | for k in range(ypred.shape[0]): 138 | output.append(np.squeeze(self.observables.inverse(ypred[k][np.newaxis, :]))) 139 | 140 | return np.array(output) 141 | 142 | def _step(self, x, u=None): 143 | """ 144 | Placeholder method for step function. 145 | 146 | This method is not implemented in the ContinuousKoopman class 147 | as there is no explicit step function for continuous-time 148 | Koopman models. 149 | 150 | Raises: 151 | NotImplementedError: This method is not implemented 152 | in the ContinuousKoopman class. 153 | """ 154 | raise NotImplementedError("ContinuousKoopman does not have a step function.") 155 | -------------------------------------------------------------------------------- /src/pykoopman/observables/__init__.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | 3 | from ._custom_observables import CustomObservables 4 | from ._identity import Identity 5 | from ._polynomial import Polynomial 6 | from ._radial_basis_functions import RadialBasisFunction 7 | from ._random_fourier_features import RandomFourierFeatures 8 | from ._time_delay import TimeDelay 9 | 10 | __all__ = [ 11 | "CustomObservables", 12 | "Identity", 13 | "Polynomial", 14 | "RadialBasisFunction", 15 | "RandomFourierFeatures", 16 | "TimeDelay", 17 | ] 18 | -------------------------------------------------------------------------------- /src/pykoopman/observables/_custom_observables.py: -------------------------------------------------------------------------------- 1 | """Module for customized observables""" 2 | from __future__ import annotations 3 | 4 | from itertools import combinations 5 | from itertools import combinations_with_replacement 6 | 7 | import numpy as np 8 | from numpy import empty 9 | from sklearn.utils.validation import check_is_fitted 10 | 11 | from ..common import validate_input 12 | from ._base import BaseObservables 13 | 14 | 15 | class CustomObservables(BaseObservables): 16 | """ 17 | A class to map state variables using custom observables. 18 | 19 | This class allows the user to specify a list of functions that map state variables 20 | to observables. The identity map is automatically included. It can be configured to 21 | include or exclude self-interaction terms. 22 | 23 | Attributes: 24 | observables (list of callable): List of functions mapping state variables to 25 | observables. Univariate functions are applied to each state variable, 26 | and multivariable functions are applied to combinations of state 27 | variables. The identity map is automatically included in this list. 28 | observable_names (list of callable, optional): List of functions mapping from 29 | names of state variables to names of observables. For example, 30 | the observable name lambda x: f"{x}^2" would correspond to the function 31 | x^2. If None, the names "f0(...)", "f1(...)", ... will be used. Default 32 | is None. 33 | interaction_only (bool, optional): If True, omits self-interaction terms. 34 | Function evaluations of the form f(x,x) and f(x,y,x) will be omitted, 35 | but those of the form f(x,y) and f(x,y,z) will be included. If False, 36 | all combinations will be included. Default is True. 37 | n_input_features_ (int): Number of input features. 38 | n_output_features_ (int): Number of output features. 39 | """ 40 | 41 | def __init__(self, observables, observable_names=None, interaction_only=True): 42 | """ 43 | Initialize a CustomObservables instance. 44 | 45 | Args: 46 | observables (list of callable): List of functions mapping state variables 47 | to observables. Univariate functions are applied to each state 48 | variable, and multivariable functions are applied to combinations of 49 | state variables. The identity map is automatically included in this 50 | list. 51 | observable_names (list of callable, optional): List of functions mapping 52 | from names of state variables to names of observables. For example, 53 | the observable name lambda x: f"{x}^2" would correspond to the 54 | function x^2. If None, the names "f0(...)", "f1(...)", ... will 55 | be used. Default is None. 56 | interaction_only (bool, optional): If True, omits self-interaction terms. 57 | Function evaluations of the form f(x,x) and f(x,y,x) will be omitted, 58 | but those of the form f(x,y) and f(x,y,z) will be included. If False, 59 | all combinations will be included. Default is True. 60 | """ 61 | super(CustomObservables, self).__init__() 62 | self.observables = [identity, *observables] 63 | if observable_names and (len(observables) != len(observable_names)): 64 | raise ValueError( 65 | "observables and observable_names must have the same length" 66 | ) 67 | self.observable_names = observable_names 68 | self.interaction_only = interaction_only 69 | self.include_state = True 70 | 71 | def fit(self, x, y=None): 72 | """ 73 | Fit the model to the measurement data. 74 | 75 | This method calculates the number of input and output features and generates 76 | default values for 'observable_names' if necessary. It also prepares the 77 | measurement matrix for data transformation. 78 | 79 | Args: 80 | x (array-like, shape (n_samples, n_input_features)): Measurement data to be 81 | fitted. 82 | y (None): This is a dummy parameter added for compatibility with sklearn's 83 | API. Default is None. 84 | 85 | Returns: 86 | self (CustomObservables): This method returns the fitted instance. 87 | """ 88 | x = validate_input(x) 89 | n_samples, n_features = x.shape 90 | 91 | n_output_features = 0 92 | for f in self.observables: 93 | n_args = f.__code__.co_argcount 94 | n_output_features += len( 95 | list(self._combinations(n_features, n_args, self.interaction_only)) 96 | ) 97 | 98 | self.n_input_features_ = n_features 99 | self.n_output_features_ = n_output_features 100 | self.n_consumed_samples = 0 101 | 102 | if self.observable_names is None: 103 | self.observable_names = list( 104 | map( 105 | lambda i: (lambda *x: "f" + str(i) + "(" + ",".join(x) + ")"), 106 | range(len(self.observables)), 107 | ) 108 | ) 109 | 110 | # First map is the identity 111 | self.observable_names.insert(0, identity_name) 112 | 113 | # since the first map is identity 114 | self.measurement_matrix_ = np.zeros( 115 | (self.n_input_features_, self.n_output_features_) 116 | ) 117 | self.measurement_matrix_[ 118 | : self.n_input_features_, : self.n_input_features_ 119 | ] = np.eye(self.n_input_features_) 120 | 121 | return self 122 | 123 | def transform(self, x): 124 | """ 125 | Apply custom transformations to data, computing observables. 126 | 127 | This method applies the user-defined observables functions to the input data, 128 | effectively transforming the state variables into observable ones. 129 | 130 | Args: 131 | x (array-like, shape (n_samples, n_input_features)): The measurement data 132 | to be transformed. 133 | 134 | Returns: 135 | x_transformed (array-like, shape (n_samples, n_output_features)): The 136 | transformed data, i.e., the computed observables. 137 | """ 138 | check_is_fitted(self, "n_input_features_") 139 | check_is_fitted(self, "n_output_features_") 140 | x = validate_input(x) 141 | 142 | n_samples, n_features = x.shape 143 | 144 | if n_features != self.n_input_features_: 145 | raise ValueError("x.shape[1] does not match n_input_features_") 146 | 147 | x_transformed = empty((n_samples, self.n_output_features_), dtype=x.dtype) 148 | observables_idx = 0 149 | for f in self.observables: 150 | for c in self._combinations( 151 | self.n_input_features_, f.__code__.co_argcount, self.interaction_only 152 | ): 153 | x_transformed[:, observables_idx] = f(*[x[:, j] for j in c]) 154 | observables_idx += 1 155 | 156 | return x_transformed 157 | 158 | def get_feature_names(self, input_features=None): 159 | """ 160 | Get the names of the output features. 161 | 162 | This method returns the names of the output features as defined by the 163 | observable functions. If names for the input features are provided, they are 164 | used in the output feature names. Otherwise, default names ("x0", "x1", ..., 165 | "xn_input_features") are used. 166 | 167 | Args: 168 | input_features (list of string, length n_input_features, optional): 169 | String names for input features, if available. By default, the names 170 | "x0", "x1", ... ,"xn_input_features" are used. 171 | 172 | Returns: 173 | output_feature_names (list of string, length n_output_features): 174 | Output feature names. 175 | """ 176 | check_is_fitted(self, "n_input_features_") 177 | if input_features is None: 178 | input_features = [f"x{i}" for i in range(self.n_input_features_)] 179 | else: 180 | if len(input_features) != self.n_input_features_: 181 | raise ValueError( 182 | "input_features must have n_input_features_ " 183 | f"({self.n_input_features_}) elements" 184 | ) 185 | 186 | feature_names = [] 187 | for i, f in enumerate(self.observables): 188 | feature_names.extend( 189 | [ 190 | self.observable_names[i](*[input_features[j] for j in c]) 191 | for c in self._combinations( 192 | self.n_input_features_, 193 | f.__code__.co_argcount, 194 | self.interaction_only, 195 | ) 196 | ] 197 | ) 198 | 199 | return feature_names 200 | 201 | @staticmethod 202 | def _combinations(n_features, n_args, interaction_only): 203 | """ 204 | Get the combinations of features to be passed to observable functions. 205 | 206 | This static method generates all possible combinations or combinations with 207 | replacement (depending on the `interaction_only` flag) of features that are to 208 | be passed to the observable functions. The combinations are represented as 209 | tuples of indices. 210 | 211 | Args: 212 | n_features (int): The total number of features. 213 | n_args (int): The number of arguments that the observable function accepts. 214 | interaction_only (bool): If True, combinations of the same feature 215 | (self-interactions) are omitted. If False, all combinations including 216 | self-interactions are included. 217 | 218 | Returns: 219 | iterable of tuples: An iterable over all combinations of feature indices 220 | to be passed to the observable functions. 221 | """ 222 | comb = combinations if interaction_only else combinations_with_replacement 223 | return comb(range(n_features), n_args) 224 | 225 | 226 | def identity(x): 227 | """Identity map.""" 228 | return x 229 | 230 | 231 | def identity_name(x): 232 | """Name for identity map.""" 233 | return str(x) 234 | -------------------------------------------------------------------------------- /src/pykoopman/observables/_identity.py: -------------------------------------------------------------------------------- 1 | """module for Linear observables""" 2 | from __future__ import annotations 3 | 4 | import numpy as np 5 | from sklearn.utils.validation import check_is_fitted 6 | 7 | from ..common import validate_input 8 | from ._base import BaseObservables 9 | 10 | 11 | class Identity(BaseObservables): 12 | """ 13 | A dummy observables class that simply returns its input. 14 | """ 15 | 16 | def __init__(self): 17 | """ 18 | Initialize the Identity class. 19 | 20 | This constructor initializes the Identity class which simply returns its input 21 | when transformed. 22 | """ 23 | super().__init__() 24 | self.include_state = True 25 | 26 | def fit(self, x, y=None): 27 | """ 28 | Fit the model to the provided measurement data. 29 | 30 | Args: 31 | x (array-like): The measurement data to be fit. It must have a shape of 32 | (n_samples, n_input_features). 33 | y (None): This parameter is retained for sklearn compatibility. 34 | 35 | Returns: 36 | self: Returns a fit instance of the class `pykoopman.observables.Identity`. 37 | 38 | Note: 39 | only identity mapping is supported for list of arb trajectories 40 | """ 41 | x = validate_input(x) 42 | if not isinstance(x, list): 43 | self.n_input_features_ = self.n_output_features_ = x.shape[1] 44 | self.measurement_matrix_ = np.eye(x.shape[1]).T 45 | else: 46 | self.n_input_features_ = self.n_output_features_ = x[0].shape[1] 47 | self.measurement_matrix_ = np.eye(x[0].shape[1]).T 48 | 49 | self.n_consumed_samples = 0 50 | 51 | return self 52 | 53 | def transform(self, x): 54 | """ 55 | Apply Identity transformation to the provided data. 56 | 57 | Args: 58 | x (array-like): The measurement data to be transformed. It must have a 59 | shape of (n_samples, n_input_features). 60 | 61 | Returns: 62 | array-like: Returns the transformed data which is the same as the input 63 | data in this case. 64 | """ 65 | check_is_fitted(self, "n_input_features_") 66 | return x 67 | 68 | def get_feature_names(self, input_features=None): 69 | """ 70 | Get the names of the output features. 71 | 72 | Args: 73 | input_features (list of string, optional): The string names for input 74 | features, if available. By default, the names "x0", "x1", ... , 75 | "xn_input_features" are used. 76 | 77 | Returns: 78 | list of string: Returns the output feature names. 79 | """ 80 | check_is_fitted(self, "n_input_features_") 81 | if input_features is None: 82 | input_features = [f"x{i}" for i in range(self.n_input_features_)] 83 | else: 84 | if len(input_features) != self.n_input_features_: 85 | raise ValueError( 86 | "input_features must have n_input_features_ " 87 | f"({self.n_input_features_}) elements" 88 | ) 89 | return input_features 90 | -------------------------------------------------------------------------------- /src/pykoopman/observables/_polynomial.py: -------------------------------------------------------------------------------- 1 | """moduel for Polynomial observables""" 2 | from __future__ import annotations 3 | 4 | from itertools import chain 5 | from itertools import combinations 6 | from itertools import combinations_with_replacement as combinations_w_r 7 | 8 | import numpy as np 9 | from scipy import sparse 10 | from sklearn.preprocessing import PolynomialFeatures 11 | from sklearn.preprocessing._csr_polynomial_expansion import _csr_polynomial_expansion 12 | from sklearn.utils.validation import check_is_fitted 13 | from sklearn.utils.validation import FLOAT_DTYPES 14 | 15 | from ..common import check_array 16 | from ..common import validate_input 17 | from ._base import BaseObservables 18 | 19 | 20 | class Polynomial(PolynomialFeatures, BaseObservables): 21 | """ 22 | Polynomial observables. 23 | 24 | This is essentially the `sklearn.preprocessing.PolynomialFeatures` with support for 25 | complex numbers. 26 | 27 | Args: 28 | degree (int, optional): The degree of the polynomial features. Default is 2. 29 | interaction_only (bool, optional): If True, only interaction features are 30 | produced: features that are products of at most ``degree`` *distinct* 31 | input features (so not ``x[1] ** 2``, ``x[0] * x[2] ** 3``, 32 | etc.). Default is False. 33 | include_bias (bool, optional): If True, then include a bias column, the feature 34 | in which all polynomial powers are zero (i.e., a column of ones - acts as an 35 | intercept term in a linear model). Default is True. 36 | order (str in {'C', 'F'}, optional): Order of output array in the dense case. 37 | 'F' order is faster to compute, but may slow down subsequent estimators. 38 | Default is 'C'. 39 | 40 | Raises: 41 | ValueError: If degree is less than 1. 42 | """ 43 | 44 | def __init__(self, degree=2, interaction_only=False, include_bias=True, order="C"): 45 | """ 46 | Initialize the Polynomial object. 47 | 48 | Args: 49 | degree (int, optional): The degree of the polynomial features. Default is 2. 50 | interaction_only (bool, optional): If True, only interaction features are 51 | produced: features that are products of at most ``degree`` *distinct* 52 | input features (so not ``x[1] ** 2``, ``x[0] * x[2] ** 3``, 53 | etc.). Default is False. 54 | include_bias (bool, optional): If True, then include a bias column, the 55 | feature in which all polynomial powers are zero (i.e., a column of 56 | ones - acts as an intercept term in a linear model). Default is True. 57 | order (str in {'C', 'F'}, optional): Order of output array in the dense 58 | case. 'F' order is faster to compute, but may slow down subsequent 59 | estimators. Default is 'C'. 60 | 61 | Raises: 62 | ValueError: If degree is less than 1. 63 | """ 64 | if degree == 0: 65 | raise ValueError( 66 | "degree must be at least 1, otherwise inverse cannot be " "computed" 67 | ) 68 | super(Polynomial, self).__init__( 69 | degree=degree, 70 | interaction_only=interaction_only, 71 | include_bias=include_bias, 72 | order=order, 73 | ) 74 | self.include_state = True 75 | 76 | def fit(self, x, y=None): 77 | """ 78 | Compute number of output features. 79 | 80 | This method fits the `Polynomial` instance to the input data `x`. It calls the 81 | `fit` method of the superclass (`PolynomialFeatures` from 82 | `sklearn.preprocessing`), which computes the number of output features based 83 | on the degree of the polynomial and the interaction_only flag. It also sets 84 | `n_input_features_` and `n_output_features_` attributes. Then, it initializes 85 | `measurement_matrix_` as a zero matrix of size `n_input_features_` by 86 | `n_output_features_` and sets the main diagonal to 1, depending on the 87 | `include_bias` attribute. The input `y` is not used in this method; it is 88 | only included to maintain compatibility with the usual interface of `fit` 89 | methods in scikit-learn. 90 | 91 | Args: 92 | x (np.ndarray): The measurement data to be fit, with shape (n_samples, 93 | n_features). 94 | y (array-like, optional): Dummy input. Defaults to None. 95 | 96 | Returns: 97 | self: A fit instance of `Polynomial`. 98 | 99 | Raises: 100 | ValueError: If the input data is not valid. 101 | """ 102 | x = validate_input(x) 103 | self.n_consumed_samples = 0 104 | 105 | y_poly_out = super(Polynomial, self).fit(x.real, y) 106 | 107 | self.measurement_matrix_ = np.zeros([x.shape[1], y_poly_out.n_output_features_]) 108 | if self.include_bias: 109 | self.measurement_matrix_[:, 1 : 1 + x.shape[1]] = np.eye(x.shape[1]) 110 | else: 111 | self.measurement_matrix_[:, : x.shape[1]] = np.eye(x.shape[1]) 112 | 113 | return y_poly_out 114 | 115 | def transform(self, x): 116 | """ 117 | Transforms the data to polynomial features. 118 | 119 | This method transforms the data `x` into polynomial features. It first checks if 120 | the fit method has been called by checking the `n_input_features_` attribute, 121 | then it validates the input `x`. If `x` is a CSR sparse matrix and the degree is 122 | less than 4, it uses a method based on "Leveraging Sparsity to Speed Up 123 | Polynomial Feature Expansions of CSR Matrices Using K-Simplex Numbers" by 124 | Andrew Nystrom and John Hughes. If `x` is a CSC sparse matrix and the degree 125 | is less than 4, it converts `x` to CSR, generates the polynomial features, 126 | then converts back to CSC. For dense arrays or CSC sparse matrix with a 127 | degree of 4 or more, it generates the polynomial features through a slower 128 | process. 129 | 130 | Args: 131 | x (array-like or CSR/CSC sparse matrix): The data to transform, row by row. 132 | The shape should be (n_samples, n_features). Prefer CSR over CSC for 133 | sparse input (for speed), but CSC is required if the degree is 4 or higher. 134 | 135 | Returns: 136 | xp (np.ndarray or CSR/CSC sparse matrix): The matrix of features, where 137 | n_output_features is the number of polynomial features generated from the 138 | combination of inputs. The shape is (n_samples, n_output_features). 139 | 140 | Raises: 141 | ValueError: If the input data is not valid or the shape of `x` does not 142 | match training shape. 143 | """ 144 | check_is_fitted(self, "n_input_features_") 145 | 146 | x = check_array(x, order="F", dtype=FLOAT_DTYPES, accept_sparse=("csr", "csc")) 147 | 148 | n_samples, n_features = x.shape 149 | 150 | if n_features != self.n_input_features_: 151 | raise ValueError("x shape does not match training shape") 152 | 153 | if sparse.isspmatrix_csr(x): 154 | if self.degree > 3: 155 | return self.transform(x.tocsc()).tocsr() 156 | to_stack = [] 157 | if self.include_bias: 158 | to_stack.append(np.ones(shape=(n_samples, 1), dtype=x.dtype)) 159 | to_stack.append(x) 160 | for deg in range(2, self.degree + 1): 161 | xp_next = _csr_polynomial_expansion( 162 | x.data, 163 | x.indices, 164 | x.indptr, 165 | x.shape[1], 166 | self.interaction_only, 167 | deg, 168 | ) 169 | if xp_next is None: 170 | break 171 | to_stack.append(xp_next) 172 | xp = sparse.hstack(to_stack, format="csr") 173 | elif sparse.isspmatrix_csc(x) and self.degree < 4: 174 | return self.transform(x.tocsr()).tocsc() 175 | else: 176 | combinations = self._combinations( 177 | n_features, 178 | self.degree, 179 | self.interaction_only, 180 | self.include_bias, 181 | ) 182 | if sparse.isspmatrix(x): 183 | columns = [] 184 | for comb in combinations: 185 | if comb: 186 | out_col = 1 187 | for col_idx in comb: 188 | out_col = x[:, col_idx].multiply(out_col) 189 | columns.append(out_col) 190 | else: 191 | bias = sparse.csc_matrix(np.ones((x.shape[0], 1))) 192 | columns.append(bias) 193 | xp = sparse.hstack(columns, dtype=x.dtype).tocsc() 194 | else: 195 | xp = np.empty( 196 | (n_samples, self.n_output_features_), 197 | dtype=x.dtype, 198 | order=self.order, 199 | ) 200 | for i, comb in enumerate(combinations): 201 | xp[:, i] = x[:, comb].prod(1) 202 | 203 | return xp 204 | 205 | @staticmethod 206 | def _combinations(n_features, degree, interaction_only, include_bias): 207 | """ 208 | Generate combinations for polynomial features. 209 | 210 | This static method generates combinations of features for the polynomial 211 | transformation. The combinations depend on whether interaction_only is set 212 | and whether a bias term should be included. 213 | 214 | Args: 215 | n_features (int): The number of features. 216 | degree (int): The degree of the polynomial. 217 | interaction_only (bool): If True, only interaction features are produced. 218 | include_bias (bool): If True, a bias column is included. 219 | 220 | Returns: 221 | itertools.chain: An iterable over all combinations. 222 | """ 223 | comb = combinations if interaction_only else combinations_w_r 224 | start = int(not include_bias) 225 | return chain.from_iterable( 226 | comb(range(n_features), i) for i in range(start, degree + 1) 227 | ) 228 | 229 | @property 230 | def powers_(self): 231 | """ 232 | Get the exponent for each of the inputs in the output. 233 | 234 | This property method returns the exponents for each input feature in the 235 | polynomial output. It first checks whether the model has been fitted, then 236 | uses the `_combinations` method to get the combinations of features, and 237 | finally calculates the exponents for each input feature. 238 | 239 | Returns: 240 | np.ndarray: A 2D array where each row represents a feature and each 241 | column represents an output of the polynomial transformation. The 242 | values are the exponents of the input features. 243 | 244 | Raises: 245 | NotFittedError: If the model has not been fitted. 246 | """ 247 | check_is_fitted(self) 248 | 249 | combinations = self._combinations( 250 | n_features=self.n_input_features_, 251 | degree=self.degree, 252 | interaction_only=self.interaction_only, 253 | include_bias=self.include_bias, 254 | ) 255 | return np.vstack( 256 | [np.bincount(c, minlength=self.n_input_features_) for c in combinations] 257 | ) 258 | -------------------------------------------------------------------------------- /src/pykoopman/observables/_radial_basis_functions.py: -------------------------------------------------------------------------------- 1 | """module for Radial basis function observables""" 2 | from __future__ import annotations 3 | 4 | import numpy as np 5 | from numpy import empty 6 | from numpy import random 7 | from sklearn.utils.validation import check_is_fitted 8 | 9 | from ..common import validate_input 10 | from ._base import BaseObservables 11 | 12 | 13 | class RadialBasisFunction(BaseObservables): 14 | """ 15 | This class represents Radial Basis Functions (RBF) used as observables. 16 | Observables are formed as RBFs of the state variables, interpreted as new state 17 | variables. 18 | 19 | For instance, a single state variable :math:`[x(t)]` could be evaluated using 20 | multiple centers, yielding a new set of observables. This implementation supports 21 | various types of RBFs including 'gauss', 'thinplate', 'invquad', 'invmultquad', 22 | and 'polyharmonic'. 23 | 24 | Attributes: 25 | rbf_type (str): The type of radial basis functions to be used. 26 | n_centers (int): The number of centers to compute RBF with. 27 | centers (numpy array): The centers to compute RBF with. 28 | kernel_width (float): The kernel width for Gaussian RBFs. 29 | polyharmonic_coeff (float): The polyharmonic coefficient for polyharmonic RBFs. 30 | include_state (bool): Whether to include the input coordinates as additional 31 | coordinates in the observable. 32 | n_input_features_ (int): Number of input features. 33 | n_output_features_ (int): Number of output features = Number of centers plus 34 | number of input features. 35 | 36 | Note: 37 | The implementation is based on the following references: 38 | - Williams, Matthew O and Kevrekidis, Ioannis G and Rowley, Clarence W 39 | "A data-driven approximation of the {K}oopman operator: extending dynamic 40 | mode decomposition." 41 | Journal of Nonlinear Science 6 (2015): 1307-1346 42 | - Williams, Matthew O and Rowley, Clarence W and Kevrekidis, Ioannis G 43 | "A Kernel Approach to Data-Driven {K}oopman Spectral Analysis." 44 | Journal of Computational Dynamics 2.2 (2015): 247-265 45 | - Korda, Milan and Mezic, Igor 46 | "Linear predictors for nonlinear dynamical systems: Koopman operator meets 47 | model predictive control." 48 | Automatica 93 (2018): 149-160 49 | """ 50 | 51 | def __init__( 52 | self, 53 | rbf_type="gauss", 54 | n_centers=10, 55 | centers=None, 56 | kernel_width=1.0, 57 | polyharmonic_coeff=1.0, 58 | include_state=True, 59 | ): 60 | super().__init__() 61 | if type(rbf_type) != str: 62 | raise TypeError("rbf_type must be a string") 63 | if type(n_centers) != int: 64 | raise TypeError("n_centers must be an int") 65 | if n_centers < 0: 66 | raise ValueError("n_centers must be a nonnegative int") 67 | if kernel_width < 0: 68 | raise ValueError("kernel_width must be a nonnegative float") 69 | if polyharmonic_coeff < 0: 70 | raise ValueError("polyharmonic_coeff must be a nonnegative float") 71 | if rbf_type not in [ 72 | "thinplate", 73 | "gauss", 74 | "invquad", 75 | "invmultquad", 76 | "polyharmonic", 77 | ]: 78 | raise ValueError("rbf_type not of available type") 79 | if type(include_state) != bool: 80 | raise TypeError("include_states must be a boolean") 81 | if centers is not None: 82 | if int(n_centers) not in centers.shape: 83 | raise ValueError( 84 | "n_centers is not equal to centers.shape[1]. " 85 | "centers must be of shape (n_input_features, " 86 | "n_centers). " 87 | ) 88 | self.rbf_type = rbf_type 89 | self.n_centers = int(n_centers) 90 | self.centers = centers 91 | self.kernel_width = kernel_width 92 | self.polyharmonic_coeff = polyharmonic_coeff 93 | self.include_state = include_state 94 | 95 | def fit(self, x, y=None): 96 | """ 97 | Initializes the RadialBasisFunction with specified parameters. 98 | 99 | Args: 100 | rbf_type (str, optional): The type of radial basis functions to be used. 101 | Options are: 'gauss', 'thinplate', 'invquad', 'invmultquad', 102 | 'polyharmonic'. Defaults to 'gauss'. 103 | n_centers (int, optional): The number of centers to compute RBF with. 104 | Must be a non-negative integer. Defaults to 10. 105 | centers (numpy array, optional): The centers to compute RBF with. 106 | If provided, it should have a shape of (n_input_features, n_centers). 107 | Defaults to None, in which case the centers are uniformly distributed 108 | over input data. 109 | kernel_width (float, optional): The kernel width for Gaussian RBFs. 110 | Must be a non-negative float. Defaults to 1.0. 111 | polyharmonic_coeff (float, optional): The polyharmonic coefficient for 112 | polyharmonic RBFs. Must be a non-negative float. Defaults to 1.0. 113 | include_state (bool, optional): Whether to include the input coordinates 114 | as additional coordinates in the observable. Defaults to True. 115 | 116 | Raises: 117 | TypeError: If rbf_type is not a string, n_centers is not an int, or 118 | include_state is not a bool. 119 | ValueError: If n_centers, kernel_width or polyharmonic_coeff is negative, 120 | rbf_type is not of available type, or centers is provided but 121 | n_centers is not equal to centers.shape[1]. 122 | """ 123 | x = validate_input(x) 124 | n_samples, n_features = x.shape 125 | self.n_consumed_samples = 0 126 | 127 | self.n_samples_ = n_samples 128 | self.n_input_features_ = n_features 129 | if self.include_state is True: 130 | self.n_output_features_ = n_features * 1 + self.n_centers 131 | elif self.include_state is False: 132 | self.n_output_features_ = self.n_centers 133 | 134 | x = validate_input(x) 135 | 136 | if x.shape[1] != self.n_input_features_: 137 | raise ValueError( 138 | "Wrong number of input features. " 139 | f"Expected x.shape[1] = {self.n_input_features_}; " 140 | f"instead x.shape[1] = {x.shape[1]}." 141 | ) 142 | 143 | if self.centers is None: 144 | # Uniformly distributed centers 145 | self.centers = random.rand(self.n_input_features_, self.n_centers) 146 | # Change range to range of input features' range 147 | for feat in range(self.n_input_features_): 148 | xminmax = self._minmax(x[:, feat]) 149 | 150 | # Map to range [0,1] 151 | self.centers[feat, :] = ( 152 | self.centers[feat, :] - min(self.centers[feat, :]) 153 | ) / (max(self.centers[feat, :]) - min(self.centers[feat, :])) 154 | # Scale to input features' range 155 | self.centers[feat, :] = ( 156 | self.centers[feat, :] * (xminmax[1] - xminmax[0]) + xminmax[0] 157 | ) 158 | 159 | xlift = self._rbf_lifting(x) 160 | # self.measurement_matrix_ = x.T @ np.linalg.pinv(xlift.T) 161 | self.measurement_matrix_ = np.linalg.lstsq(xlift, x)[0].T 162 | 163 | return self 164 | 165 | def transform(self, x): 166 | """ 167 | Apply radial basis function transformation to the data. 168 | 169 | Args: 170 | x (array-like): Measurement data to be transformed, with shape (n_samples, 171 | n_input_features). It is assumed that rows correspond to examples, 172 | which are not required to be equi-spaced in time or in sequential order. 173 | 174 | Returns: 175 | array-like: Transformed data, with shape (n_samples, n_output_features). 176 | 177 | Raises: 178 | NotFittedError: If the 'fit' method has not been called before the 179 | 'transform' method. 180 | ValueError: If the number of features in 'x' does not match the number of 181 | input features expected by the transformer. 182 | """ 183 | check_is_fitted(self, ["n_input_features_", "centers"]) 184 | x = validate_input(x) 185 | 186 | if x.shape[1] != self.n_input_features_: 187 | raise ValueError( 188 | "Wrong number of input features. " 189 | f"Expected x.shape[1] = {self.n_input_features_}; " 190 | f"instead x.shape[1] = {x.shape[1]}." 191 | ) 192 | 193 | y = self._rbf_lifting(x) 194 | return y 195 | 196 | def get_feature_names(self, input_features=None): 197 | """ 198 | Get the names of the output features. 199 | 200 | Args: 201 | input_features (list of str, optional): String names for input features, 202 | if available. By default, the names "x0", "x1", ... , 203 | "xn_input_features" are used. 204 | 205 | Returns: 206 | list of str: Output feature names. 207 | 208 | Raises: 209 | NotFittedError: If the 'fit' method has not been called before the 210 | 'get_feature_names' method. 211 | ValueError: If the length of 'input_features' does not match the number of 212 | input features expected by the transformer. 213 | """ 214 | 215 | check_is_fitted(self, "n_input_features_") 216 | if input_features is None: 217 | input_features = [f"x{i}" for i in range(self.n_input_features_)] 218 | else: 219 | if len(input_features) != self.n_input_features_: 220 | raise ValueError( 221 | "input_features must have n_input_features_ " 222 | f"({self.n_input_features_}) elements" 223 | ) 224 | 225 | output_features = [] 226 | if self.include_state is True: 227 | output_features.extend([f"{xi}(t)" for xi in input_features]) 228 | output_features.extend([f"phi(x(t)-c{i})" for i in range(self.n_centers)]) 229 | return output_features 230 | 231 | def _rbf_lifting(self, x): 232 | """ 233 | Internal method that performs Radial Basis Function (RBF) transformation. 234 | 235 | Args: 236 | x (numpy.ndarray): Input data of shape (n_samples, n_input_features) 237 | 238 | Returns: 239 | y (numpy.ndarray): Transformed data of shape (n_samples, n_output_features) 240 | 241 | Raises: 242 | ValueError: If 'rbf_type' is not one of the available types. 243 | 244 | Notes: 245 | This method should not be called directly. It is used internally by the 246 | 'transform' method. 247 | """ 248 | n_samples = x.shape[0] 249 | y = empty( 250 | (n_samples, self.n_output_features_), 251 | dtype=x.dtype, 252 | ) 253 | 254 | y_index = 0 255 | if self.include_state is True: 256 | y[:, : self.n_input_features_] = x 257 | y_index = self.n_input_features_ 258 | 259 | for index_of_center in range(self.n_centers): 260 | C = self.centers[:, index_of_center] 261 | r_squared = np.sum((x - C[np.newaxis, :]) ** 2, axis=1) 262 | 263 | match self.rbf_type: 264 | case "thinplate": 265 | y_ = r_squared * np.log(np.sqrt(r_squared)) 266 | y_[np.isnan(y_)] = 0 267 | case "gauss": 268 | y_ = np.exp(-self.kernel_width**2 * r_squared) 269 | case "invquad": 270 | y_ = np.reciprocal(1 + self.kernel_width**2 * r_squared) 271 | case "invmultquad": 272 | y_ = np.reciprocal(np.sqrt(1 + self.kernel_width**2 * r_squared)) 273 | case "polyharmonic": 274 | y_ = r_squared ** (self.polyharmonic_coeff / 2) * np.log( 275 | np.sqrt(r_squared) 276 | ) 277 | case _: 278 | # if none of the above cases match: 279 | raise ValueError("provided rbf_type not available") 280 | 281 | y[:, y_index + index_of_center] = y_ 282 | 283 | return y 284 | 285 | def _minmax(self, x): 286 | min_val = min(x) 287 | max_val = max(x) 288 | return (min_val, max_val) 289 | -------------------------------------------------------------------------------- /src/pykoopman/observables/_random_fourier_features.py: -------------------------------------------------------------------------------- 1 | """module for random fourier features observables""" 2 | from __future__ import annotations 3 | 4 | import numpy as np 5 | from sklearn.utils.validation import check_is_fitted 6 | 7 | from ..common import validate_input 8 | from ._base import BaseObservables 9 | 10 | 11 | class RandomFourierFeatures(BaseObservables): 12 | """ 13 | Random Fourier Features for observables. 14 | 15 | This class applies the random Fourier features method for kernel approximation. 16 | It can include the system state in the kernel function. It uses the 17 | Gaussian kernel by default. 18 | 19 | Args: 20 | include_state (bool, optional): If True, includes the system state. Defaults to 21 | True. 22 | gamma (float, optional): The scale of the Gaussian kernel. Defaults to 1.0. 23 | D (int, optional): The number of random samples in Monte Carlo approximation. 24 | Defaults to 100. 25 | random_state (int, None, optional): The seed of the random number for repeatable 26 | experiments. Defaults to None. 27 | 28 | Attributes: 29 | include_state (bool): If True, includes the system state. 30 | gamma (float): The scale of the Gaussian kernel. 31 | D (int): The number of random samples in Monte Carlo approximation. 32 | random_state (int, None): The seed of the random number for repeatable 33 | experiments. 34 | measurement_matrix_ (numpy.ndarray): A row feature vector right multiply with 35 | `measurement_matrix_` will return the system state. 36 | n_input_features_ (int): Dimension of input features, e.g., system state. 37 | n_output_features_ (int): Dimension of transformed/output features, e.g., 38 | observables. 39 | w (numpy.ndarray): The frequencies randomly sampled for random fourier features. 40 | """ 41 | 42 | def __init__(self, include_state=True, gamma=1.0, D=100, random_state=None): 43 | """ 44 | Initialize the RandomFourierFeatures class with given parameters. 45 | 46 | Args: 47 | include_state (bool, optional): If True, includes the system state. 48 | Defaults to True. 49 | gamma (float, optional): The scale of the Gaussian kernel. Defaults to 1.0. 50 | D (int, optional): The number of random samples in Monte Carlo 51 | approximation. Defaults to 100. 52 | random_state (int or None, optional): The seed of the random number 53 | for repeatable experiments. Defaults to None. 54 | """ 55 | super(RandomFourierFeatures, self).__init__() 56 | self.include_state = include_state 57 | self.gamma = gamma 58 | self.D = D 59 | self.random_state = random_state 60 | 61 | def fit(self, x, y=None): 62 | """ 63 | Set up observable. 64 | 65 | Args: 66 | x (numpy.ndarray): Measurement data to be fit. Shape (n_samples, 67 | n_input_features_). 68 | y (numpy.ndarray, optional): Time-shifted measurement data to be fit. 69 | Defaults to None. 70 | 71 | Returns: 72 | self: Returns a fitted RandomFourierFeatures instance. 73 | """ 74 | x = validate_input(x) 75 | np.random.seed(self.random_state) 76 | self.n_consumed_samples = 0 77 | 78 | self.n_input_features_ = x.shape[1] 79 | # although we have double the output dim, the convergence 80 | # rate is described in only self.n_components 81 | self.n_output_features_ = 2 * self.D 82 | 83 | if self.include_state is True: 84 | self.n_output_features_ += self.n_input_features_ 85 | 86 | # 1. generate (n_feature, n_component) random w 87 | self.w = np.sqrt(2.0 * self.gamma) * np.random.normal( 88 | 0, 1, [self.n_input_features_, self.D] 89 | ) 90 | 91 | # 3. get the C to map back to state 92 | if self.include_state: 93 | self.measurement_matrix_ = np.zeros( 94 | (self.n_input_features_, self.n_output_features_) 95 | ) 96 | self.measurement_matrix_[ 97 | : self.n_input_features_, : self.n_input_features_ 98 | ] = np.eye(self.n_input_features_) 99 | else: 100 | # we have to transform the data x in order to find a matrix by fitting 101 | # z = np.zeros((x.shape[0], self.n_output_features_)) 102 | # z[:,:x.shape[1]] = x 103 | # z[:,x.shape[1]:] = self._rff_lifting(x) 104 | z = self._rff_lifting(x) 105 | self.measurement_matrix_ = np.linalg.lstsq(z, x)[0].T 106 | 107 | return self 108 | 109 | def transform(self, x): 110 | """ 111 | Evaluate observable at `x`. 112 | 113 | Args: 114 | x (numpy.ndarray): Measurement data to be fit. Shape (n_samples, 115 | n_input_features_). 116 | 117 | Returns: 118 | y (numpy.ndarray): Evaluation of observables at `x`. Shape (n_samples, 119 | n_output_features_). 120 | """ 121 | 122 | check_is_fitted(self, "n_input_features_") 123 | z = np.zeros((x.shape[0], self.n_output_features_)) 124 | z_rff = self._rff_lifting(x) 125 | if self.include_state: 126 | z[:, : x.shape[1]] = x 127 | z[:, x.shape[1] :] = z_rff 128 | else: 129 | z = z_rff 130 | 131 | return z 132 | 133 | def get_feature_names(self, input_features=None): 134 | """ 135 | Return names of observables. 136 | 137 | Args: 138 | input_features (list of string of length n_features, optional): 139 | Default list is "x0", "x1", ..., "xn", where n = n_features. 140 | 141 | Returns: 142 | output_feature_names (list of string of length n_output_features): 143 | Returns a list of observable names. 144 | """ 145 | 146 | check_is_fitted(self, "n_input_features_") 147 | 148 | if input_features is None: 149 | input_features = [f"x{i}" for i in range(self.n_input_features_)] 150 | else: 151 | if len(input_features) != self.n_input_features_: 152 | raise ValueError( 153 | "input_features must have n_input_features_ " 154 | f"({self.n_input_features_}) elements" 155 | ) 156 | 157 | if self.include_state: 158 | # very easy to make mistake... python pass list by reference OMG 159 | output_features = input_features[:] 160 | else: 161 | output_features = [] 162 | output_features += [f"cos(w_{i}'x)/sqrt({self.D})" for i in range(self.D)] + [ 163 | f"sin(w_{i}'x)/sqrt({self.D})" for i in range(self.D) 164 | ] 165 | 166 | return output_features 167 | 168 | def _rff_lifting(self, x): 169 | """ 170 | Core algorithm that computes random Fourier features. 171 | 172 | This method uses the `cos` and `sin` transformations to get random Fourier 173 | features. 174 | 175 | Args: 176 | x (numpy.ndarray): System state. 177 | 178 | Returns: 179 | z_rff (numpy.ndarray): Random Fourier features evaluated on `x`. Shape 180 | (n_samples, n_output_features_). 181 | """ 182 | 183 | # 2. get the feature vector z 184 | xw = np.dot(x, self.w) 185 | z_rff = np.hstack([np.cos(xw), np.sin(xw)]) 186 | z_rff *= 1.0 / np.sqrt(self.D) 187 | return z_rff 188 | -------------------------------------------------------------------------------- /src/pykoopman/observables/_time_delay.py: -------------------------------------------------------------------------------- 1 | """moduel for time-delay observables""" 2 | from __future__ import annotations 3 | 4 | import numpy as np 5 | from numpy import arange 6 | from numpy import empty 7 | from sklearn.utils.validation import check_is_fitted 8 | 9 | from ..common import validate_input 10 | from ._base import BaseObservables 11 | 12 | 13 | class TimeDelay(BaseObservables): 14 | """ 15 | A class for creating time-delay observables. These observables are formed by 16 | taking time-lagged measurements of state variables and interpreting them as new 17 | state variables. 18 | 19 | The two state variables :math:`[x(t), y(t)]` could be supplemented with two 20 | time-delays each, yielding a new set of observables: 21 | 22 | .. math:: 23 | [x(t), y(t), x(t-\\Delta$ t), y(t-\\Delta t), 24 | x(t-2\\Delta t), y(t - 2\\Delta t)] 25 | 26 | This example corresponds to taking :code:`delay =` :math:`\\Delta t` and 27 | :code:`n_delays = 2`. 28 | 29 | Note that when transforming data the first :code:`delay * n_delays` rows/samples 30 | are dropped as there is insufficient time history to form time-delays for them. 31 | 32 | For more information, see the following references: 33 | 34 | Brunton, Steven L., et al. 35 | "Chaos as an intermittently forced linear system." 36 | Nature communications 8.1 (2017): 1-9. 37 | 38 | Susuki, Yoshihiko, and Igor Mezić. 39 | "A prony approximation of Koopman mode decomposition." 40 | 2015 54th IEEE Conference on Decision and Control (CDC). IEEE, 2015. 41 | 42 | Arbabi, Hassan, and Igor Mezic. 43 | "Ergodic theory, dynamic mode decomposition, and computation 44 | of spectral properties of the Koopman operator." 45 | SIAM Journal on Applied Dynamical Systems 16.4 (2017): 2096-2126. 46 | 47 | Args: 48 | delay (int, optional): The length of each delay. Defaults to 1. 49 | n_delays (int, optional): The number of delays to compute for each 50 | variable. Defaults to 2. 51 | 52 | Attributes: 53 | include_state (bool): If True, includes the system state. 54 | delay (int): The length of each delay. 55 | n_delays (int): The number of delays to compute for each variable. 56 | _n_consumed_samples (int): Number of samples consumed when :code:`transform` 57 | is called,i.e. :code:`n_delays * delay`. 58 | """ 59 | 60 | def __init__(self, delay=1, n_delays=2): 61 | """ 62 | Initialize the TimeDelay class with given parameters. 63 | 64 | Args: 65 | delay (int, optional): The length of each delay. Defaults to 1. Or 66 | we say this is the "stride of delay". 67 | n_delays (int, optional): The number of delays to compute for each 68 | variable. Defaults to 2. 69 | 70 | Raises: 71 | ValueError: If delay or n_delays are negative. 72 | """ 73 | super(TimeDelay, self).__init__() 74 | if delay < 0: 75 | raise ValueError("delay must be a nonnegative int") 76 | if n_delays < 0: 77 | raise ValueError("n_delays must be a nonnegative int") 78 | 79 | self.include_state = True 80 | self.delay = int(delay) 81 | self.n_delays = int(n_delays) 82 | self._n_consumed_samples = self.delay * self.n_delays 83 | 84 | def fit(self, x, y=None): 85 | """ 86 | Fit the model to measurement data. 87 | 88 | Args: 89 | x (array-like): The input data, shape (n_samples, n_input_features). 90 | y (None): Dummy parameter for sklearn compatibility. 91 | 92 | Returns: 93 | TimeDelay: The fitted instance. 94 | """ 95 | 96 | x = validate_input(x) 97 | n_samples, n_features = x.shape 98 | 99 | self.n_input_features_ = n_features 100 | self.n_output_features_ = n_features * (1 + self.n_delays) 101 | 102 | self.measurement_matrix_ = np.zeros( 103 | (self.n_input_features_, self.n_output_features_) 104 | ) 105 | self.measurement_matrix_[ 106 | : self.n_input_features_, : self.n_input_features_ 107 | ] = np.eye(self.n_input_features_) 108 | 109 | return self 110 | 111 | def transform(self, x): 112 | """ 113 | Add time-delay features to the data, dropping the first :code:`delay - 114 | n_delays` samples. 115 | 116 | Args: 117 | x (array-like): The input data, shape (n_samples, n_input_features). 118 | It is assumed that rows correspond to examples that are equi-spaced 119 | in time and are in sequential order. 120 | 121 | Returns: 122 | y (array-like): The transformed data, shape (n_samples - delay * n_delays, 123 | n_output_features). 124 | """ 125 | 126 | check_is_fitted(self, "n_input_features_") 127 | x = validate_input(x) 128 | 129 | if x.shape[1] != self.n_input_features_: 130 | raise ValueError( 131 | "Wrong number of input features. " 132 | f"Expected x.shape[1] = {self.n_input_features_}; " 133 | f"instead x.shape[1] = {x.shape[1]}." 134 | ) 135 | 136 | self._n_consumed_samples = self.delay * self.n_delays 137 | if len(x) < self._n_consumed_samples + 1: 138 | raise ValueError( 139 | "x has too few rows. To compute time-delay features with " 140 | f"delay = {self.delay} and n_delays = {self.n_delays} " 141 | f"x must have at least {self._n_consumed_samples + 1} rows." 142 | ) 143 | 144 | y = empty( 145 | (x.shape[0] - self._n_consumed_samples, self.n_output_features_), 146 | dtype=x.dtype, 147 | ) 148 | y[:, : self.n_input_features_] = x[self._n_consumed_samples :] 149 | 150 | for i in range(self._n_consumed_samples, x.shape[0]): 151 | y[i - self._n_consumed_samples, self.n_input_features_ :] = x[ 152 | self._delay_inds(i), : 153 | ].flatten() 154 | 155 | return y 156 | 157 | def get_feature_names(self, input_features=None): 158 | """ 159 | Get the names of the output features. 160 | 161 | Args: 162 | input_features (list of str, optional): Names for input features. 163 | If None, defaults to "x0", "x1", ... ,"xn_input_features". 164 | 165 | Returns: 166 | list of str: Names of the output features. 167 | """ 168 | check_is_fitted(self, "n_input_features_") 169 | if input_features is None: 170 | input_features = [f"x{i}" for i in range(self.n_input_features_)] 171 | else: 172 | if len(input_features) != self.n_input_features_: 173 | raise ValueError( 174 | "input_features must have n_input_features_ " 175 | f"({self.n_input_features_}) elements" 176 | ) 177 | 178 | output_features = [f"{xi}(t)" for xi in input_features] 179 | output_features.extend( 180 | [ 181 | f"{xi}(t-{i * self.delay}dt)" 182 | for i in range(1, self.n_delays + 1) 183 | for xi in input_features 184 | ] 185 | ) 186 | 187 | return output_features 188 | 189 | def _delay_inds(self, index): 190 | """ 191 | Private method to get the indices for the delayed data. 192 | 193 | Args: 194 | index (int): The index from which to calculate the delay indices. 195 | 196 | Returns: 197 | array-like: The delay indices. 198 | """ 199 | return index - self.delay * arange(1, self.n_delays + 1) 200 | 201 | @property 202 | def n_consumed_samples(self): 203 | """ 204 | The number of samples that are consumed as "initial conditions" for 205 | other samples, i.e., the number of samples for which time delays cannot 206 | be computed. 207 | 208 | Returns: 209 | int: The number of consumed samples. 210 | """ 211 | return self._n_consumed_samples 212 | -------------------------------------------------------------------------------- /src/pykoopman/regression/__init__.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | 3 | from ._base import BaseRegressor 4 | from ._base_ensemble import EnsembleBaseRegressor 5 | from ._dmd import PyDMDRegressor 6 | from ._dmdc import DMDc 7 | from ._edmd import EDMD 8 | from ._edmdc import EDMDc 9 | from ._havok import HAVOK 10 | from ._kdmd import KDMD 11 | from ._nndmd import NNDMD 12 | 13 | __all__ = [ 14 | "PyDMDRegressor", 15 | "EDMD", 16 | "KDMD", 17 | "DMDc", 18 | "EDMDc", 19 | "EnsembleBaseRegressor", 20 | "HAVOK", 21 | "NNDMD", 22 | ] 23 | -------------------------------------------------------------------------------- /src/pykoopman/regression/_base.py: -------------------------------------------------------------------------------- 1 | """module for base class of regressor""" 2 | from __future__ import annotations 3 | 4 | from abc import ABC 5 | from abc import abstractmethod 6 | 7 | from sklearn.base import BaseEstimator 8 | 9 | 10 | class BaseRegressor(BaseEstimator, ABC): 11 | """ 12 | Base class for PyKoopman regressors. 13 | 14 | This class provides a wrapper for regressors used in the PyKoopman package. 15 | It's designed to be used with any regressor object that implements `fit` 16 | and `predict` methods following the `sklearn.base.BaseEstimator` interface. 17 | 18 | Note: This is an abstract base class, and should not be instantiated directly. 19 | Instead, a subclass should be created that implements the required abstract methods. 20 | 21 | Args: 22 | regressor (BaseEstimator): A regressor object implementing `fit` and `predict` 23 | methods. 24 | 25 | Attributes: 26 | regressor (BaseEstimator): The regressor object passed during initialization. 27 | 28 | Abstract methods: 29 | coef_ : Should return the coefficients of the regression model. 30 | 31 | state_matrix_ : Should return the state matrix of the dynamic system. 32 | 33 | eigenvectors_ : Should return the eigenvectors of the system. 34 | 35 | eigenvalues_ : Should return the eigenvalues of the system. 36 | 37 | _compute_phi(x_col) : Should compute and return the phi function on given data. 38 | 39 | _compute_psi(x_col) : Should compute and return the psi function on given data. 40 | 41 | ur : Should return the u_r of the system. 42 | 43 | unnormalized_modes : Should return the unnormalized modes of the system. 44 | """ 45 | 46 | def __init__(self, regressor): 47 | # check .fit 48 | if not hasattr(regressor, "fit") or not callable(getattr(regressor, "fit")): 49 | raise AttributeError("regressor does not have a callable fit method") 50 | # check .predict 51 | if not hasattr(regressor, "predict") or not callable( 52 | getattr(regressor, "predict") 53 | ): 54 | raise AttributeError("regressor does not have a callable predict method") 55 | self.regressor = regressor 56 | 57 | def fit(self, x, y=None): 58 | raise NotImplementedError 59 | 60 | def predict(self, x): 61 | raise NotImplementedError 62 | 63 | @abstractmethod 64 | def coef_(self): 65 | pass 66 | 67 | @abstractmethod 68 | def state_matrix_(self): 69 | pass 70 | 71 | @abstractmethod 72 | def eigenvectors_(self): 73 | pass 74 | 75 | @abstractmethod 76 | def eigenvalues_(self): 77 | pass 78 | 79 | @abstractmethod 80 | def _compute_phi(self, x_col): 81 | pass 82 | 83 | @abstractmethod 84 | def _compute_psi(self, x_col): 85 | pass 86 | 87 | @abstractmethod 88 | def ur(self): 89 | pass 90 | 91 | @abstractmethod 92 | def unnormalized_modes(self): 93 | pass 94 | -------------------------------------------------------------------------------- /src/pykoopman/regression/_dmd.py: -------------------------------------------------------------------------------- 1 | """module for dmd""" 2 | # from warnings import warn 3 | from __future__ import annotations 4 | 5 | import numpy as np 6 | from pydmd import DMDBase 7 | from pydmd.dmdbase import DMDTimeDict 8 | from pydmd.utils import compute_svd 9 | from pydmd.utils import compute_tlsq 10 | from scipy.linalg import eig 11 | from scipy.linalg import sqrtm 12 | from sklearn.utils.validation import check_is_fitted 13 | 14 | from ._base import BaseRegressor 15 | 16 | 17 | class PyDMDRegressor(BaseRegressor): 18 | """ 19 | PyDMDRegressor is a wrapper for `pydmd` regressors. 20 | 21 | This class provides a wrapper for the `pydmd` regressor. The details about 22 | `pydmd` can be found in the reference: 23 | 24 | Demo, N., Tezzele, M., & Rozza, G. (2018). PyDMD: Python dynamic mode decomposition. 25 | Journal of Open Source Software, 3(22), 530. 26 | `_ 27 | 28 | Args: 29 | regressor (DMDBase): A regressor instance from DMDBase in `pydmd`. 30 | tikhonov_regularization (bool or None, optional): Indicates if Tikhonov 31 | regularization should be applied. Defaults to None. 32 | 33 | Attributes: 34 | tlsq_rank (int): Rank for truncation in TLSQ method. If 0, no noise reduction 35 | is computed. If positive, it is used for SVD truncation. 36 | svd_rank (int): Rank for truncation. If 0, optimal rank is computed and used 37 | for truncation. If positive integer, it is used for truncation. If float 38 | between 0 and 1, the rank is the number of the biggest singular values 39 | that are needed to reach the 'energy' specified by `svd_rank`. If -1, no 40 | truncation is computed. 41 | forward_backward (bool): If True, the low-rank operator is computed like in 42 | fbDMD. 43 | tikhonov_regularization (bool or None, optional): If None, no regularization 44 | is applied. If float, it is used as the Tikhonov regularization parameter. 45 | flag_xy (bool): If True, the regressor is operating on multiple trajectories 46 | instead of just one. 47 | n_samples_ (int): Number of samples. 48 | n_input_features_ (int): Number of features, i.e., the dimension of phi. 49 | _unnormalized_modes (ndarray): Raw DMD V with each column as one DMD mode. 50 | _state_matrix_ (ndarray): DMD state transition matrix. 51 | _reduced_state_matrix_ (ndarray): Reduced DMD state transition matrix. 52 | _eigenvalues_ (ndarray): Identified Koopman lambda. 53 | _eigenvectors_ (ndarray): Identified Koopman eigenvectors. 54 | _coef_ (ndarray): Weight vectors of the regression problem. Corresponds to 55 | either [A] or [A,B]. 56 | C (ndarray): Matrix that maps psi to the input features. 57 | """ 58 | 59 | def __init__(self, regressor, tikhonov_regularization=None): 60 | """ 61 | Initializes a PyDMDRegressor instance. 62 | 63 | Args: 64 | regressor (DMDBase): A regressor instance from DMDBase in `pydmd`. 65 | tikhonov_regularization (bool or None, optional): Indicates if Tikhonov 66 | regularization should be applied. Defaults to None. 67 | 68 | Raises: 69 | ValueError: If regressor is not a subclass of DMDBase from pydmd. 70 | """ 71 | if not isinstance(regressor, DMDBase): 72 | raise ValueError("regressor must be a subclass of DMDBase from pydmd.") 73 | self.regressor = regressor 74 | # super(PyDMDRegressor, self).__init__(regressor) 75 | self.tlsq_rank = regressor._tlsq_rank 76 | self.svd_rank = regressor._Atilde._svd_rank 77 | self.forward_backward = regressor._Atilde._forward_backward 78 | self.tikhonov_regularization = tikhonov_regularization 79 | self.flag_xy = False 80 | self._ur = None 81 | 82 | def fit(self, x, y=None, dt=1): 83 | """ 84 | Fit the PyDMDRegressor model according to the given training data. 85 | 86 | Args: 87 | x (np.ndarray): Measurement data input. Should be of shape (n_samples, 88 | n_features). 89 | y (np.ndarray, optional): Measurement data output to be fitted. Should be 90 | of shape (n_samples, n_features). Defaults to None. 91 | dt (float, optional): Time interval between `x` and `y`. Defaults to 1. 92 | 93 | Returns: 94 | self : Returns the instance itself. 95 | """ 96 | 97 | self.n_samples_, self.n_input_features_ = x.shape 98 | 99 | if y is None: 100 | # single trajectory 101 | self.flag_xy = False 102 | X = x[:-1].T 103 | Y = x[1:].T 104 | else: 105 | # multiple segments of trajectories 106 | self.flag_xy = True 107 | X = x.T 108 | Y = y.T 109 | 110 | X, Y = compute_tlsq(X, Y, self.tlsq_rank) 111 | U, s, V = compute_svd(X, self.svd_rank) 112 | 113 | if self.tikhonov_regularization is not None: 114 | _norm_X = np.linalg.norm(X) 115 | else: 116 | _norm_X = 0 117 | 118 | atilde = self._least_square_operator( 119 | U, s, V, Y, self.tikhonov_regularization, _norm_X 120 | ) 121 | if self.forward_backward: 122 | # b stands for "backward" 123 | bU, bs, bV = compute_svd(Y, svd_rank=len(s)) 124 | atilde_back = self._least_square_operator( 125 | bU, bs, bV, X, self.tikhonov_regularization, _norm_X 126 | ) 127 | atilde = sqrtm(atilde @ np.linalg.inv(atilde_back)) 128 | 129 | # - V, lamda, eigenvectors 130 | self._coef_ = atilde 131 | self._state_matrix_ = atilde 132 | [self._eigenvalues_, self._eigenvectors_] = eig(self._state_matrix_) 133 | 134 | # self._coef_ = U @ atilde @ U.conj().T 135 | # self._state_matrix_ = self._coef_ 136 | # self._reduced_state_matrix_ = atilde 137 | # [self._eigenvalues_, self._eigenvectors_] = eig(self._reduced_state_matrix_) 138 | self._ur = U 139 | self._unnormalized_modes = self._ur @ self._eigenvectors_ 140 | 141 | self._tmp_compute_psi = np.linalg.pinv(self.unnormalized_modes) 142 | 143 | # self.C = np.linalg.inv(self._eigenvectors_) @ U.conj().T 144 | # self._modes_ = U.dot(self._eigenvectors_) 145 | 146 | return self 147 | 148 | def predict(self, x): 149 | """ 150 | Predict the future values based on the input measurement data. 151 | 152 | Args: 153 | x (np.ndarray): Measurement data upon which to base the prediction. 154 | Should be of shape (n_samples, n_features). 155 | 156 | Returns: 157 | np.ndarray: Predicted values of `x` one timestep in the future. The shape 158 | is (n_samples, n_features). 159 | """ 160 | if x.ndim == 1: 161 | x = x.reshape(1, -1) 162 | check_is_fitted(self, "coef_") 163 | y = np.linalg.multi_dot([self.ur, self._coef_, self.ur.conj().T, x.T]).T 164 | return y 165 | 166 | def _compute_phi(self, x_col): 167 | """ 168 | Compute the `phi(x)` value given `x`. 169 | 170 | Args: 171 | x_col (np.ndarray): Input data, if one-dimensional it will be reshaped 172 | to (-1, 1). 173 | 174 | Returns: 175 | np.ndarray: Computed `phi(x)` value. 176 | """ 177 | if x_col.ndim == 1: 178 | x_col = x_col.reshape(-1, 1) 179 | phi = self.ur.T @ x_col 180 | return phi 181 | 182 | def _compute_psi(self, x_col): 183 | """ 184 | Compute the `psi(x)` value given `x`. 185 | 186 | Args: 187 | x_col (np.ndarray): Input data, if one-dimensional it will be reshaped 188 | to (-1, 1). 189 | 190 | Returns: 191 | np.ndarray: Value of Koopman eigenfunction psi at x. 192 | """ 193 | 194 | # compute psi - one column if x is a row 195 | if x_col.ndim == 1: 196 | x_col = x_col.reshape(-1, 1) 197 | psi = self._tmp_compute_psi @ x_col 198 | return psi 199 | 200 | def _set_initial_time_dictionary(self, time_dict): 201 | """ 202 | Sets the initial values for `time_dict` and `original_time`. 203 | Typically called in `fit()` and not used again afterwards. 204 | 205 | Args: 206 | time_dict (dict): Initial time dictionary for this DMD instance. Must 207 | contain the keys "t0", "tend", and "dt". 208 | 209 | Raises: 210 | ValueError: If the time_dict does not contain the keys "t0", "tend" and 211 | "dt" or if it contains more than these keys. 212 | """ 213 | 214 | if not ("t0" in time_dict and "tend" in time_dict and "dt" in time_dict): 215 | raise ValueError( 216 | 'time_dict must contain the keys "t0", ' '"tend" and "dt".' 217 | ) 218 | if len(time_dict) > 3: 219 | raise ValueError( 220 | 'time_dict must contain only the keys "t0", ' '"tend" and "dt".' 221 | ) 222 | 223 | self._original_time = DMDTimeDict(dict(time_dict)) 224 | self._dmd_time = DMDTimeDict(dict(time_dict)) 225 | 226 | def _least_square_operator(self, U, s, V, Y, tikhonov_regularization, _norm_X): 227 | """ 228 | Calculates the least square estimation 'A' using the provided parameters. 229 | 230 | Args: 231 | U (numpy.ndarray): Left singular vectors, shape (n_features, svd_rank). 232 | s (numpy.ndarray): Singular values, shape (svd_rank, ). 233 | V (numpy.ndarray): Right singular vectors, shape (n_features, svd_rank). 234 | Y (numpy.ndarray): Measurement data for prediction, shape (n_samples, 235 | n_features). 236 | tikhonov_regularization (bool or NoneType): Tikhonov parameter for 237 | regularization. If `None`, no regularization is applied, if `float`, 238 | it is used as the :math:`\\lambda` tikhonov parameter. 239 | _norm_X (numpy.ndarray): Norm of `X` for Tikhonov regularization, shape 240 | (n_samples, n_features). 241 | 242 | Returns: 243 | numpy.ndarray: The least square estimation 'A', shape (svd_rank, svd_rank). 244 | """ 245 | 246 | if tikhonov_regularization is not None: 247 | s = (s**2 + tikhonov_regularization * _norm_X) * np.reciprocal(s) 248 | A = np.linalg.multi_dot([U.T.conj(), Y, V]) * np.reciprocal(s) 249 | return A 250 | 251 | @property 252 | def coef_(self): 253 | """ 254 | The weight vectors of the regression problem. 255 | 256 | This method checks if the regressor is fitted before returning the coefficient. 257 | 258 | Returns: 259 | numpy.ndarray: The coefficient matrix. 260 | 261 | Raises: 262 | NotFittedError: If the regressor is not fitted yet. 263 | """ 264 | check_is_fitted(self, "_coef_") 265 | return self._coef_ 266 | 267 | @property 268 | def state_matrix_(self): 269 | """ 270 | The DMD state transition matrix. 271 | 272 | This method checks if the regressor is fitted before returning the state matrix. 273 | 274 | Returns: 275 | numpy.ndarray: The state transition matrix. 276 | 277 | Raises: 278 | NotFittedError: If the regressor is not fitted yet. 279 | """ 280 | check_is_fitted(self, "_state_matrix_") 281 | return self._state_matrix_ 282 | 283 | @property 284 | def eigenvalues_(self): 285 | """ 286 | The identified Koopman eigenvalues. 287 | 288 | This method checks if the regressor is fitted before returning the eigenvalues. 289 | 290 | Returns: 291 | numpy.ndarray: The Koopman eigenvalues. 292 | 293 | Raises: 294 | NotFittedError: If the regressor is not fitted yet. 295 | """ 296 | check_is_fitted(self, "_eigenvalues_") 297 | return self._eigenvalues_ 298 | 299 | @property 300 | def eigenvectors_(self): 301 | """ 302 | The identified Koopman eigenvectors. 303 | 304 | This method checks if the regressor is fitted before returning the eigenvectors. 305 | 306 | Returns: 307 | numpy.ndarray: The Koopman eigenvectors. 308 | 309 | Raises: 310 | NotFittedError: If the regressor is not fitted yet. 311 | """ 312 | check_is_fitted(self, "_eigenvectors_") 313 | return self._eigenvectors_ 314 | 315 | @property 316 | def unnormalized_modes(self): 317 | """ 318 | The raw DMD V with each column as one DMD mode. 319 | 320 | This method checks if the regressor is fitted before returning the unnormalized 321 | modes. 322 | 323 | Returns: 324 | numpy.ndarray: The unnormalized modes. 325 | 326 | Raises: 327 | NotFittedError: If the regressor is not fitted yet. 328 | """ 329 | check_is_fitted(self, "_unnormalized_modes") 330 | return self._unnormalized_modes 331 | 332 | @property 333 | def ur(self): 334 | """ 335 | The left singular vectors 'U'. 336 | 337 | This method checks if the regressor is fitted before returning 'U'. 338 | 339 | Returns: 340 | numpy.ndarray: The left singular vectors 'U'. 341 | 342 | Raises: 343 | NotFittedError: If the regressor is not fitted yet. 344 | """ 345 | check_is_fitted(self, "_ur") 346 | return self._ur 347 | -------------------------------------------------------------------------------- /src/pykoopman/regression/_edmd.py: -------------------------------------------------------------------------------- 1 | """module for extended dmd""" 2 | # from warnings import warn 3 | from __future__ import annotations 4 | 5 | import numpy as np 6 | import scipy 7 | from pydmd.dmdbase import DMDTimeDict 8 | from pydmd.utils import compute_svd 9 | from pydmd.utils import compute_tlsq 10 | from sklearn.utils.validation import check_is_fitted 11 | 12 | from ._base import BaseRegressor 13 | 14 | 15 | class EDMD(BaseRegressor): 16 | """Extended DMD (EDMD) regressor. 17 | 18 | Aims to determine the system matrices A,C that satisfy y' = Ay and x = Cy, 19 | where y' is the time-shifted observable with y0 = phi(x0). C is the measurement 20 | matrix that maps back to the state. 21 | 22 | The objective functions, \\|Y'-AY\\|_F, are minimized using least-squares regression 23 | and singular value decomposition. 24 | 25 | See the following reference for more details: 26 | `M.O. Williams, I.G. Kevrekidis, C.W. Rowley 27 | "A Data–Driven Approximation of the Koopman Operator: 28 | Extending Dynamic Mode Decomposition." 29 | Journal of Nonlinear Science, Vol. 25, 1307-1346, 2015. 30 | `_ 31 | 32 | Attributes: 33 | _coef_ (numpy.ndarray): Weight vectors of the regression problem. Corresponds 34 | to either [A] or [A,B]. 35 | _state_matrix_ (numpy.ndarray): Identified state transition matrix A of the 36 | underlying system. 37 | _eigenvalues_ (numpy.ndarray): Identified Koopman lambda. 38 | _eigenvectors_ (numpy.ndarray): Identified Koopman eigenvectors. 39 | _unnormalized_modes_ (numpy.ndarray): Identified Koopman eigenvectors. 40 | n_samples_ (int): Number of samples. 41 | n_input_features_ (int): Number of input features. 42 | C (numpy.ndarray): Matrix that maps psi to the input features. 43 | """ 44 | 45 | def __init__(self, svd_rank=1.0, tlsq_rank=0): 46 | """Initialize the EDMD regressor. 47 | 48 | Args: 49 | svd_rank (float): Rank parameter for singular value decomposition. 50 | Default is 1.0. 51 | tlsq_rank (int): Rank parameter for total least squares. Default is 0. 52 | """ 53 | self.svd_rank = svd_rank 54 | self.tlsq_rank = tlsq_rank 55 | 56 | def fit(self, x, y=None, dt=None): 57 | """Fit the EDMD regressor to the given data. 58 | 59 | Args: 60 | x (numpy.ndarray): Measurement data to be fit. 61 | y (numpy.ndarray, optional): Time-shifted measurement data to be fit. 62 | Defaults to None. 63 | dt (scalar, optional): Discrete time-step. Defaults to None. 64 | 65 | Returns: 66 | self: Fitted EDMD instance. 67 | """ 68 | self.n_samples_, self.n_input_features_ = x.shape 69 | 70 | if y is None: 71 | X1 = x[:-1, :] 72 | X2 = x[1:, :] 73 | else: 74 | X1 = x 75 | X2 = y 76 | 77 | # perform SVD 78 | X1T, X2T = compute_tlsq(X1.T, X2.T, self.tlsq_rank) 79 | U, s, V = compute_svd(X1T, self.svd_rank) 80 | 81 | # X1, X2 are row-wise data, so there is a transpose in the end. 82 | self._coef_ = U.conj().T @ X2T @ V @ np.diag(np.reciprocal(s)) 83 | # self._coef_ = np.linalg.lstsq(X1, X2)[0].T # [0:Nlift, 0:Nlift] 84 | self._state_matrix_ = self._coef_ 85 | [self._eigenvalues_, self._eigenvectors_] = scipy.linalg.eig(self.state_matrix_) 86 | # self._ur = np.eye(self.n_input_features_) 87 | self._ur = U 88 | # self._unnormalized_modes = self._eigenvectors_ 89 | self._unnormalized_modes = self._ur @ self._eigenvectors_ 90 | 91 | # np.linalg.pinv(self._unnormalized_modes) 92 | self._tmp_compute_psi = np.linalg.inv(self._eigenvectors_) @ self._ur.conj().T 93 | 94 | return self 95 | 96 | def predict(self, x): 97 | """Predict the next timestep based on the given data. 98 | 99 | Args: 100 | x (numpy.ndarray): Measurement data upon which to base prediction. 101 | 102 | Returns: 103 | y (numpy.ndarray): Prediction of x one timestep in the future. 104 | """ 105 | check_is_fitted(self, "coef_") 106 | y = x @ self.ur.conj() @ self.state_matrix_.T @ self.ur.T 107 | return y 108 | 109 | def _compute_phi(self, x_col): 110 | """Compute phi(x) given x. 111 | 112 | Args: 113 | x_col (numpy.ndarray): Input data x. 114 | 115 | Returns: 116 | phi (numpy.ndarray): Value of phi(x). 117 | """ 118 | if x_col.ndim == 1: 119 | x_col = x_col.reshape(-1, 1) 120 | phi = self._ur.conj().T @ x_col 121 | return phi 122 | 123 | def _compute_psi(self, x_col): 124 | """Compute psi(x) given x. 125 | 126 | Args: 127 | x_col (numpy.ndarray): Input data x. 128 | 129 | Returns: 130 | psi (numpy.ndarray): Value of psi(x). 131 | """ 132 | # compute psi - one column if x is a row 133 | if x_col.ndim == 1: 134 | x_col = x_col.reshape(-1, 1) 135 | psi = self._tmp_compute_psi @ x_col 136 | return psi 137 | 138 | def _set_initial_time_dictionary(self, time_dict): 139 | """Set the initial values for the class fields time_dict and original_time. 140 | 141 | Args: 142 | time_dict (dict): Initial time dictionary for this DMD instance. 143 | """ 144 | if not ("t0" in time_dict and "tend" in time_dict and "dt" in time_dict): 145 | raise ValueError('time_dict must contain the keys "t0", "tend" and "dt".') 146 | if len(time_dict) > 3: 147 | raise ValueError( 148 | 'time_dict must contain only the keys "t0", "tend" and "dt".' 149 | ) 150 | 151 | self._original_time = DMDTimeDict(dict(time_dict)) 152 | self._dmd_time = DMDTimeDict(dict(time_dict)) 153 | 154 | @property 155 | def coef_(self): 156 | """ 157 | Weight vectors of the regression problem. Corresponds to either [A] or 158 | [A,B]. 159 | 160 | """ 161 | check_is_fitted(self, "_coef_") 162 | return self._coef_ 163 | 164 | @property 165 | def state_matrix_(self): 166 | """ 167 | The EDMD state transition matrix. 168 | 169 | This method checks if the regressor is fitted before returning the state matrix. 170 | 171 | Returns: 172 | numpy.ndarray: The state transition matrix. 173 | 174 | Raises: 175 | NotFittedError: If the regressor is not fitted yet. 176 | """ 177 | check_is_fitted(self, "_state_matrix_") 178 | return self._state_matrix_ 179 | 180 | @property 181 | def eigenvalues_(self): 182 | """ 183 | The identified Koopman eigenvalues. 184 | 185 | This method checks if the regressor is fitted before returning the eigenvalues. 186 | 187 | Returns: 188 | numpy.ndarray: The Koopman eigenvalues. 189 | 190 | Raises: 191 | NotFittedError: If the regressor is not fitted yet. 192 | """ 193 | check_is_fitted(self, "_eigenvalues_") 194 | return self._eigenvalues_ 195 | 196 | @property 197 | def eigenvectors_(self): 198 | """ 199 | The identified Koopman eigenvectors. 200 | 201 | This method checks if the regressor is fitted before returning the eigenvectors. 202 | 203 | Returns: 204 | numpy.ndarray: The Koopman eigenvectors. 205 | 206 | Raises: 207 | NotFittedError: If the regressor is not fitted yet. 208 | """ 209 | check_is_fitted(self, "_eigenvectors_") 210 | return self._eigenvectors_ 211 | 212 | @property 213 | def unnormalized_modes(self): 214 | """ 215 | The raw EDMD V with each column as one EDMD mode. 216 | 217 | This method checks if the regressor is fitted before returning the unnormalized 218 | modes. Note that this will combined with the measurement matrix from the 219 | observer to give you the true Koopman modes 220 | 221 | Returns: 222 | numpy.ndarray: The unnormalized modes. 223 | 224 | Raises: 225 | NotFittedError: If the regressor is not fitted yet. 226 | """ 227 | check_is_fitted(self, "_unnormalized_modes") 228 | return self._unnormalized_modes 229 | 230 | @property 231 | def ur(self): 232 | """ 233 | The left singular vectors 'U'. 234 | 235 | This method checks if the regressor is fitted before returning 'U'. 236 | 237 | Returns: 238 | numpy.ndarray: The left singular vectors 'U'. 239 | 240 | Raises: 241 | NotFittedError: If the regressor is not fitted yet. 242 | """ 243 | check_is_fitted(self, "_ur") 244 | return self._ur 245 | -------------------------------------------------------------------------------- /src/pykoopman/regression/_edmdc.py: -------------------------------------------------------------------------------- 1 | """module for extended dmd with control""" 2 | from __future__ import annotations 3 | 4 | import numpy as np 5 | from sklearn.utils.validation import check_is_fitted 6 | 7 | from ._base import BaseRegressor 8 | 9 | # TODO: add support for time delay observables, so we will 10 | # have n_consumption_. 11 | 12 | 13 | class EDMDc(BaseRegressor): 14 | """Module for Extended DMD with control (EDMDc) regressor. 15 | 16 | Aims to determine the system matrices A, B, C that satisfy y' = Ay + Bu and x = Cy, 17 | where y' is the time-shifted observable with y0 = phi(x0) and u is the control 18 | input. B and C are the unknown control and measurement matrices, respectively. 19 | 20 | The objective functions, \\|Y'-AY-BU\\|_F and \\|X-CY\\|_F, are minimized using 21 | least-squares regression and singular value decomposition. 22 | 23 | See the following reference for more details: 24 | Korda, M. and Mezic, I. "Linear predictors for nonlinear dynamical systems: 25 | Koopman operator meets model predictive control." Automatica, Vol. 93, 149–160. 26 | 27 | 28 | Attributes: 29 | coef_ (numpy.ndarray): 30 | Weight vectors of the regression problem. Corresponds to either [A] or 31 | [A,B]. 32 | state_matrix_ (numpy.ndarray): 33 | Identified state transition matrix A of the underlying system. 34 | control_matrix_ (numpy.ndarray): 35 | Identified control matrix B of the underlying system. 36 | projection_matrix_ (numpy.ndarray): 37 | Projection matrix into low-dimensional subspace of shape (n_input_features 38 | +n_control_features, svd_rank). 39 | projection_matrix_output_ (numpy.ndarray): 40 | Projection matrix into low-dimensional subspace of shape (n_input_features 41 | +n_control_features, svd_output_rank). 42 | """ 43 | 44 | def __init__(self): 45 | """Initialize the EDMDc regressor.""" 46 | pass 47 | 48 | def fit(self, x, y=None, u=None, dt=None): 49 | """Fit the EDMDc regressor to the given data. 50 | 51 | Args: 52 | x (numpy.ndarray): 53 | Measurement data to be fit. 54 | y (numpy.ndarray, optional): 55 | Time-shifted measurement data to be fit. Defaults to None. 56 | u (numpy.ndarray, optional): 57 | Time series of external actuation/control. Defaults to None. 58 | dt (scalar, optional): 59 | Discrete time-step. Defaults to None. 60 | 61 | Returns: 62 | self: Fitted EDMDc instance. 63 | """ 64 | self.n_samples_, self.n_input_features_ = x.shape 65 | if y is None: 66 | X1 = x[:-1, :] 67 | X2 = x[1:, :] 68 | else: 69 | X1 = x 70 | X2 = y 71 | 72 | if u.ndim == 1: 73 | if len(u) > X1.shape[0]: 74 | u = u[:-1] 75 | C = u[np.newaxis, :] 76 | else: 77 | if u.shape[0] > X1.shape[0]: 78 | u = u[:-1, :] 79 | C = u 80 | self.n_control_features_ = C.shape[1] 81 | 82 | self._fit_with_unknown_b(X1, X2, C) 83 | return self 84 | 85 | def _fit_with_unknown_b(self, X1, X2, U): 86 | """Fit the EDMDc regressor with unknown control matrix B. 87 | 88 | Args: 89 | X1 (numpy.ndarray): 90 | Measurement data given as input. 91 | X2 (numpy.ndarray): 92 | Measurement data given as target. 93 | U (numpy.ndarray): 94 | Time series of external actuation/control. 95 | """ 96 | Nlift = X1.shape[1] 97 | W = X2.T 98 | V = np.vstack([X1.T, U.T]) 99 | VVt = V @ V.T 100 | WVt = W @ V.T 101 | M = WVt @ np.linalg.pinv(VVt) # Matrix [A B] 102 | self._state_matrix_ = M[0:Nlift, 0:Nlift] 103 | self._control_matrix_ = M[0:Nlift, Nlift:] 104 | self._coef_ = M 105 | 106 | # Compute Koopman V, eigenvectors, lamda 107 | [self._eigenvalues_, self._eigenvectors_] = np.linalg.eig(self.state_matrix_) 108 | self._unnormalized_modes = self._eigenvectors_ 109 | self._ur = np.eye(self.n_input_features_) 110 | self._tmp_compute_psi = np.linalg.inv(self._eigenvectors_) 111 | 112 | def predict(self, x, u): 113 | """Predict the next timestep based on the given data. 114 | 115 | Args: 116 | x (numpy.ndarray): 117 | Measurement data upon which to base prediction. 118 | u (numpy.ndarray): 119 | Time series of external actuation/control. 120 | 121 | Returns: 122 | y (numpy.ndarray): 123 | Prediction of x one timestep in the future. 124 | """ 125 | check_is_fitted(self, "coef_") 126 | y = x @ self.state_matrix_.T + u @ self.control_matrix_.T 127 | return y 128 | 129 | def _compute_phi(self, x_col): 130 | """Compute psi(x) given x. 131 | 132 | Args: 133 | x_col (numpy.ndarray): 134 | Input data x. 135 | 136 | Returns: 137 | psi (numpy.ndarray): 138 | Value of psi(x). 139 | """ 140 | if x_col.ndim == 1: 141 | x_col = x_col.reshape(-1, 1) 142 | phi = self._ur.T @ x_col 143 | return phi 144 | 145 | def _compute_psi(self, x_col): 146 | """Compute psi(x) given x. 147 | 148 | Args: 149 | x_col (numpy.ndarray): 150 | Input data x. 151 | 152 | Returns: 153 | psi (numpy.ndarray): 154 | Value of psi(x). 155 | """ 156 | # compute psi - one column if x is a row 157 | if x_col.ndim == 1: 158 | x_col = x_col.reshape(-1, 1) 159 | psi = self._tmp_compute_psi @ x_col 160 | return psi 161 | 162 | @property 163 | def coef_(self): 164 | """Weight vectors of the regression problem. Corresponds to either [A] or 165 | [A,B].""" 166 | check_is_fitted(self, "_coef_") 167 | return self._coef_ 168 | 169 | @property 170 | def state_matrix_(self): 171 | """Identified state transition matrix A of the underlying system. 172 | 173 | Returns: 174 | state_matrix (numpy.ndarray): 175 | State transition matrix A. 176 | """ 177 | check_is_fitted(self, "_state_matrix_") 178 | return self._state_matrix_ 179 | 180 | @property 181 | def control_matrix_(self): 182 | """Identified control matrix B of the underlying system. 183 | 184 | Returns: 185 | control_matrix (numpy.ndarray): 186 | Control matrix B. 187 | """ 188 | check_is_fitted(self, "_control_matrix_") 189 | return self._control_matrix_ 190 | 191 | @property 192 | def eigenvalues_(self): 193 | """Identified Koopman lambda. 194 | 195 | Returns: 196 | eigenvalues (numpy.ndarray): 197 | Koopman eigenvalues. 198 | """ 199 | check_is_fitted(self, "_eigenvalues_") 200 | return self._eigenvalues_ 201 | 202 | @property 203 | def eigenvectors_(self): 204 | """Identified Koopman eigenvectors. 205 | 206 | Returns: 207 | eigenvectors (numpy.ndarray): 208 | Koopman eigenvectors. 209 | """ 210 | check_is_fitted(self, "_eigenvectors_") 211 | return self._eigenvectors_ 212 | 213 | @property 214 | def unnormalized_modes(self): 215 | """Identified Koopman eigenvectors. 216 | 217 | Returns: 218 | unnormalized_modes (numpy.ndarray): 219 | Koopman eigenvectors. 220 | """ 221 | check_is_fitted(self, "_unnormalized_modes") 222 | return self._unnormalized_modes 223 | 224 | @property 225 | def ur(self): 226 | """Matrix U that is part of the SVD. 227 | 228 | Returns: 229 | ur (numpy.ndarray): 230 | Matrix U. 231 | """ 232 | check_is_fitted(self, "_ur") 233 | return self._ur 234 | -------------------------------------------------------------------------------- /src/pykoopman/regression/_havok.py: -------------------------------------------------------------------------------- 1 | """module for havok""" 2 | from __future__ import annotations 3 | 4 | from warnings import warn 5 | 6 | import numpy as np 7 | from matplotlib import pyplot as plt 8 | from optht import optht 9 | from scipy.signal import lsim 10 | from scipy.signal import lti 11 | from sklearn.utils.validation import check_is_fitted 12 | 13 | from ..common import drop_nan_rows 14 | from ..differentiation._derivative import Derivative 15 | from ._base import BaseRegressor 16 | 17 | 18 | class HAVOK(BaseRegressor): 19 | """ 20 | HAVOK (Hankel Alternative View of Koopman) regressor. 21 | 22 | Aims to determine the system matrices A, B that satisfy d/dt v = Av + Bu, 23 | where v is the vector of the leading delay coordinates and u is a low-energy 24 | delay coordinate acting as forcing. A and B are the unknown system and control 25 | matrices, respectively. The delay coordinates are obtained by computing the 26 | SVD from a Hankel matrix. 27 | 28 | The objective function, \\|dV-AV-BU\\|_F, is minimized using least-squares 29 | regression. 30 | 31 | See the following reference for more details: 32 | Brunton, S.L., Brunton, B.W., Proctor, J.L., Kaiser, E. & Kutz, J.N. 33 | "Chaos as an intermittently forced linear system." 34 | Nature Communications, Vol. 8(19), 2017. 35 | 36 | 37 | Parameters: 38 | svd_rank (int, optional): 39 | Rank of the SVD used for model reduction. Defaults to None. 40 | differentiator (Derivative, optional): 41 | Differentiation method to compute the time derivative. Defaults to 42 | Derivative(kind="finite_difference", k=1). 43 | plot_sv (bool, optional): 44 | Whether to plot the singular values. Defaults to False. 45 | 46 | Attributes: 47 | coef_ (array): 48 | Weight vectors of the regression problem. Corresponds to either [A] or 49 | [A,B]. 50 | state_matrix_ (array): 51 | Identified state transition matrix A of the underlying system. 52 | control_matrix_ (array): 53 | Identified control matrix B of the underlying system. 54 | projection_matrix_ (array): 55 | Projection matrix into low-dimensional subspace of shape (n_input_features 56 | +n_control_features, svd_rank). 57 | projection_matrix_output_ (array): 58 | Projection matrix into low-dimensional subspace of shape (n_input_features 59 | +n_control_features, svd_output_rank). 60 | """ 61 | 62 | def __init__( 63 | self, 64 | svd_rank=None, 65 | differentiator=Derivative(kind="finite_difference", k=1), 66 | plot_sv=False, 67 | ): 68 | """ 69 | Initialize the HAVOK regressor. 70 | 71 | Args: 72 | svd_rank (int, optional): 73 | Rank of the SVD used for model reduction. Defaults to None. 74 | differentiator (Derivative, optional): 75 | Differentiation method to compute the time derivative. Defaults to 76 | Derivative(kind="finite_difference", k=1). 77 | plot_sv (bool, optional): 78 | Whether to plot the singular values. Defaults to False. 79 | """ 80 | self.svd_rank = svd_rank 81 | self.differentiator = differentiator 82 | self.plot_sv = plot_sv 83 | 84 | def fit(self, x, y=None, dt=None): 85 | """ 86 | Fit the HAVOK regressor to the given data. 87 | 88 | Args: 89 | x (numpy.ndarray): 90 | Measurement data to be fit. 91 | y (not used): 92 | Time-shifted measurement data to be fit. Ignored. 93 | dt (scalar): 94 | Discrete time-step. 95 | 96 | Returns: 97 | self: Fitted HAVOK instance. 98 | """ 99 | 100 | if y is not None: 101 | warn("havok regressor does not require the y argument when fitting.") 102 | 103 | if dt is None: 104 | raise ValueError("havok regressor requires a timestep dt when fitting.") 105 | 106 | self.dt_ = dt 107 | self.n_samples_, self.n_input_features_ = x.shape 108 | self.n_control_features_ = 1 109 | 110 | # Create time vector 111 | t = np.arange(0, self.dt_ * self.n_samples_, self.dt_) 112 | 113 | # SVD to calculate intrinsic observables 114 | U, s, Vh = np.linalg.svd(x.T, full_matrices=False) 115 | 116 | if self.plot_sv: 117 | plt.figure() 118 | plt.semilogy(s) 119 | plt.xlabel("number of terms") 120 | plt.ylabel("singular values") 121 | plt.show() 122 | 123 | # calculate rank using optimal hard threshold by Gavish & Donoho 124 | if self.svd_rank is None: 125 | self.svd_rank = optht(x, sv=s, sigma=None) 126 | Vrh = Vh[: self.svd_rank, :] 127 | Vr = Vrh.T 128 | Ur = U[:, : self.svd_rank] 129 | sr = s[: self.svd_rank] 130 | 131 | # calculate time derivative dxdt of only the first rank-1 & normalize 132 | dVr = self.differentiator(Vr[:, :-1], t) 133 | # this line actually makes vh and dvh transposed 134 | dVr, t, V = drop_nan_rows(dVr, t, Vh.T) 135 | 136 | # regression on intrinsic variables v 137 | # xi = np.zeros((self.svd_rank - 1, self.svd_rank)) 138 | # for i in range(self.svd_rank - 1): 139 | # # here, we use rank terms in V to fit the rank-1 terms dV/dt 140 | # # we perform column wise 141 | # xi[i, :] = np.linalg.lstsq(Vr, dVr[:, i], rcond=None)[0] 142 | 143 | xi = np.linalg.lstsq(Vr, dVr, rcond=None)[0].T 144 | assert xi.shape == (self.svd_rank - 1, self.svd_rank) 145 | 146 | self.forcing_signal = Vr[:, -1] 147 | self._state_matrix_ = xi[:, :-1] 148 | self._control_matrix_ = xi[:, -1].reshape(-1, 1) 149 | 150 | self.svals = s 151 | self._ur = Ur[:, :-1] @ np.diag(sr[:-1]) 152 | self._coef_ = np.hstack([self.state_matrix_, self.control_matrix_]) 153 | 154 | eigenvalues_, self._eigenvectors_ = np.linalg.eig(self.state_matrix_) 155 | # because we fit the model in continuous time, 156 | # so we need to convert to discrete time 157 | self._eigenvalues_ = np.exp(eigenvalues_ * dt) 158 | 159 | self._unnormalized_modes = self._ur @ self.eigenvectors_ 160 | self._tmp_compute_psi = np.linalg.inv(self.eigenvectors_) @ self._ur.T 161 | 162 | # self.C = np.linalg.multi_dot( 163 | # [ 164 | # np.linalg.inv(self.eigenvectors_), 165 | # np.diag(np.reciprocal(s[: self.svd_rank - 1])), 166 | # U[:, : self.svd_rank - 1].T, 167 | # ] 168 | # ) 169 | 170 | def predict(self, x, u, t): 171 | """ 172 | Predict the output based on the input data. 173 | 174 | Args: 175 | x (numpy.ndarray): 176 | Measurement data upon which to base prediction. 177 | u (numpy.ndarray): 178 | Time series of external actuation/control, which is sampled at time 179 | instances in `t`. 180 | t (numpy.ndarray): 181 | Time vector. Instances at which the solution vector shall be provided. 182 | Note: The time vector must start at 0. 183 | 184 | Returns: 185 | y (numpy.ndarray): 186 | Prediction of `x` at the time instances provided in `t`. 187 | """ 188 | # if t[0] != 0: 189 | # raise ValueError("the time vector must start at 0.") 190 | 191 | check_is_fitted(self, "coef_") 192 | y0 = ( 193 | # np.linalg.inv(np.diag(self.svals[: self.svd_rank - 1])) 194 | # @ 195 | np.linalg.pinv(self._ur) 196 | @ x.T 197 | ) 198 | sys = lti( 199 | self.state_matrix_, 200 | self.control_matrix_, 201 | self._ur, 202 | np.zeros((self.n_input_features_, self.n_control_features_)), 203 | ) 204 | tout, ypred, xpred = lsim(sys, U=u, T=t, X0=y0.T) 205 | return ypred 206 | 207 | def _compute_phi(self, x_col): 208 | """ 209 | Compute the feature vector `phi(x)` given `x`. 210 | 211 | Args: 212 | x_col (numpy.ndarray): 213 | Input data `x` for computing `phi(x)`. 214 | 215 | Returns: 216 | phi (numpy.ndarray): 217 | Value of `phi(x)`. 218 | 219 | """ 220 | if x_col.ndim == 1: 221 | x_col = x_col.reshape(-1, 1) 222 | phi = self._ur.T @ x_col 223 | return phi 224 | 225 | def _compute_psi(self, x_col): 226 | """ 227 | Compute the feature vector `psi(x)` given `x`. 228 | 229 | Args: 230 | x_col (numpy.ndarray): 231 | Input data `x` for computing `psi(x)`. 232 | 233 | Returns: 234 | psi (numpy.ndarray): 235 | Value of `psi(x)`. 236 | 237 | """ 238 | # compute psi - one column if x is a row 239 | if x_col.ndim == 1: 240 | x_col = x_col.reshape(-1, 1) 241 | psi = self._tmp_compute_psi @ x_col 242 | return psi 243 | 244 | @property 245 | def coef_(self): 246 | """ 247 | Get the weight vectors of the regression problem. 248 | 249 | Returns: 250 | coef (numpy.ndarray): 251 | Weight vectors of the regression problem. Corresponds to either [A] 252 | or [A,B]. 253 | """ 254 | check_is_fitted(self, "_coef_") 255 | return self._coef_ 256 | 257 | @property 258 | def state_matrix_(self): 259 | """ 260 | Get the identified state transition matrix A of the underlying system. 261 | 262 | Returns: 263 | state_matrix (numpy.ndarray): 264 | Identified state transition matrix A. 265 | """ 266 | check_is_fitted(self, "_state_matrix_") 267 | return self._state_matrix_ 268 | 269 | @property 270 | def control_matrix_(self): 271 | """ 272 | Get the identified control matrix B of the underlying system. 273 | 274 | Returns: 275 | control_matrix (numpy.ndarray): 276 | Identified control matrix B. 277 | """ 278 | check_is_fitted(self, "_control_matrix_") 279 | return self._control_matrix_ 280 | 281 | @property 282 | def eigenvectors_(self): 283 | """ 284 | Get the identified eigenvectors of the state matrix A. 285 | 286 | Returns: 287 | eigenvectors (numpy.ndarray): 288 | Identified eigenvectors of the state matrix A. 289 | """ 290 | check_is_fitted(self, "_eigenvectors_") 291 | return self._eigenvectors_ 292 | 293 | @property 294 | def eigenvalues_(self): 295 | """ 296 | Get the identified eigenvalues of the state matrix A. 297 | 298 | Returns: 299 | eigenvalues (numpy.ndarray): 300 | Identified eigenvalues of the state matrix A. 301 | """ 302 | check_is_fitted(self, "_eigenvalues_") 303 | return self._eigenvalues_ 304 | 305 | @property 306 | def unnormalized_modes(self): 307 | """ 308 | Get the identified unnormalized modes. 309 | 310 | Returns: 311 | unnormalized_modes (numpy.ndarray): 312 | Identified unnormalized modes. 313 | """ 314 | check_is_fitted(self, "_unnormalized_modes") 315 | return self._unnormalized_modes 316 | 317 | @property 318 | def ur(self): 319 | """ 320 | Get the matrix UR. 321 | 322 | Returns: 323 | ur (numpy.ndarray): 324 | Matrix UR. 325 | """ 326 | check_is_fitted(self, "_ur") 327 | return self._ur 328 | -------------------------------------------------------------------------------- /test/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/dynamicslab/pykoopman/e3b010297a02ed8a24fbd147db7d22a53a50f724/test/__init__.py -------------------------------------------------------------------------------- /test/analytics/test_analytics.py: -------------------------------------------------------------------------------- 1 | """Test for pykoopman.analytics""" 2 | from __future__ import annotations 3 | 4 | import numpy as np 5 | import pykoopman as pk 6 | import pytest 7 | from pykoopman.analytics import ModesSelectionPAD21 8 | from pykoopman.common import Linear2Ddynamics 9 | 10 | 11 | @pytest.fixture 12 | def data_linear_dynamics(): 13 | # Create instance of the dynamical system 14 | sys = Linear2Ddynamics() 15 | 16 | # Collect training data 17 | n_pts = 51 18 | n_int = 1 19 | xx, yy = np.meshgrid(np.linspace(-1, 1, n_pts), np.linspace(-1, 1, n_pts)) 20 | x = np.vstack((xx.flatten(), yy.flatten())) 21 | n_traj = x.shape[1] 22 | 23 | X, Y = sys.collect_data(x, n_int, n_traj) 24 | return X, Y, sys 25 | 26 | 27 | def test_sparse_selection(data_linear_dynamics): 28 | X, Y, sys = data_linear_dynamics 29 | 30 | # run a vanilla model with polynomial features 31 | regressor = pk.regression.EDMD() 32 | obsv = pk.observables.Polynomial(degree=3) 33 | model = pk.Koopman(observables=obsv, regressor=regressor) 34 | model.fit(X.T, y=Y.T) 35 | 36 | # generate some validation trajectories 37 | # first trajectory 38 | n_int_val = 41 39 | # n_traj_val = 1 40 | xval = np.array([[-0.3], [-0.3]]) 41 | xval_list = [] 42 | for i in range(n_int_val): 43 | xval_list.append(xval) 44 | xval = sys.linear_map(xval) 45 | Xval1 = np.hstack(xval_list).T 46 | 47 | # second trajectory 48 | n_int_val = 17 49 | # n_traj_val = 1 50 | xval = np.array([[-0.923], [0.59]]) 51 | xval_list = [] 52 | for i in range(n_int_val): 53 | xval_list.append(xval) 54 | xval = sys.linear_map(xval) 55 | Xval2 = np.hstack(xval_list).T 56 | 57 | n_int_val = 23 58 | # n_traj_val = 1 59 | xval = np.array([[-2.5], [1.99]]) 60 | xval_list = [] 61 | for i in range(n_int_val): 62 | xval_list.append(xval) 63 | xval = sys.linear_map(xval) 64 | Xval3 = np.hstack(xval_list).T 65 | 66 | # combine three trajectories together 67 | Xval = [Xval1, Xval2, Xval3] 68 | # assemble them as a dictionary 69 | validate_data_traj = [{"t": np.arange(tmp.shape[0]), "x": tmp} for tmp in Xval] 70 | 71 | # perform analysis -- just to check if everything is running 72 | analysis = ModesSelectionPAD21( 73 | model, validate_data_traj, truncation_threshold=1e-3, plot=False 74 | ) 75 | analysis.sweep_among_best_L_modes( 76 | L=6, ALPHA_RANGE=np.logspace(-7, 1, 10), save_figure=False, plot=False 77 | ) 78 | analysis.prune_model(i_alpha=6, x_train=X.T) 79 | -------------------------------------------------------------------------------- /test/conftest.py: -------------------------------------------------------------------------------- 1 | """ 2 | Shared pytest fixtures for unit tests. 3 | 4 | Put any datasets that are used by multiple unit test files here. 5 | """ 6 | from __future__ import annotations 7 | 8 | import os.path 9 | 10 | import numpy as np 11 | import pytest 12 | import scipy 13 | from pykoopman.common import advance_linear_system 14 | from pykoopman.common import drss 15 | from pykoopman.common import lorenz 16 | from pykoopman.common import rev_dvdp 17 | from pykoopman.common import torus_dynamics 18 | from pykoopman.observables import CustomObservables 19 | 20 | my_path = os.path.abspath(os.path.dirname(__file__)) 21 | 22 | 23 | @pytest.fixture 24 | def data_random(): 25 | x = np.random.randn(50, 10) 26 | return x 27 | 28 | 29 | @pytest.fixture 30 | def data_random_complex(): 31 | x = np.random.randn(50, 10) + 1j * np.random.randn(50, 10) 32 | return x 33 | 34 | 35 | @pytest.fixture 36 | def data_2D_superposition(): 37 | t = np.linspace(0, 2 * np.pi, 200) 38 | x = np.linspace(-5, 5, 100) 39 | [x_grid, t_grid] = np.meshgrid(x, t) 40 | 41 | def sech(x): 42 | return 1 / np.cosh(x) 43 | 44 | f1 = sech(x_grid + 3) * np.exp(1j * 2.3 * t_grid) 45 | f2 = 2 * (sech(x_grid) * np.tanh(x_grid)) * np.exp(1j * 2.8 * t_grid) 46 | return f1 + f2 47 | 48 | 49 | @pytest.fixture 50 | def data_2D_linear_real_system(): 51 | A = np.array([[1.5, 0], [0, 0.1]]) 52 | B = np.array([[1], [0]]) 53 | x0 = np.array([4, 7]) 54 | u = 0 * np.array([-4, -2, -1, -0.5, 0, 0.5, 1, 3, 5]) 55 | n = len(u) + 1 56 | x = np.zeros([n, len(x0)]) 57 | x[0, :] = x0 58 | for i in range(n - 1): 59 | x[i + 1, :] = A.dot(x[i, :]) + B.dot(u[np.newaxis, i]) 60 | X = x 61 | return X 62 | 63 | 64 | @pytest.fixture 65 | def data_1D_cosine(): 66 | t = np.linspace(0, 2 * np.pi, 200) 67 | x = np.cos(3 * t) 68 | return x 69 | 70 | 71 | @pytest.fixture 72 | def data_custom_observables(): 73 | observables = [lambda x: x, lambda x: x**2, lambda x: 0 * x, lambda x, y: x * y] 74 | observable_names = [ 75 | lambda s: str(s), 76 | lambda s: f"{s}^2", 77 | lambda s: str(0), 78 | lambda s, t: f"{s} {t}", 79 | ] 80 | 81 | return CustomObservables(observables, observable_names=observable_names) 82 | 83 | 84 | @pytest.fixture 85 | def data_realistic_custom_observables(): 86 | observables = [lambda x: x**2, lambda x, y: x * y] 87 | observable_names = [ 88 | lambda s: f"{s}^2", 89 | lambda s, t: f"{s} {t}", 90 | ] 91 | 92 | return CustomObservables(observables, observable_names=observable_names) 93 | 94 | 95 | @pytest.fixture 96 | def data_2D_linear_control_system(): 97 | A = np.array([[1.5, 0], [0, 0.1]]) 98 | B = np.array([[1], [0]]) 99 | x0 = np.array([4, 7]) 100 | u = np.array([-4, -2, -1, -0.5, 0, 0.5, 1, 3, 5]) 101 | n = len(u) + 1 102 | x = np.zeros([n, len(x0)]) 103 | x[0, :] = x0 104 | for i in range(n - 1): 105 | x[i + 1, :] = A.dot(x[i, :]) + B.dot(u[np.newaxis, i]) 106 | X = x 107 | C = u[:, np.newaxis] 108 | 109 | return X, C, A, B 110 | 111 | 112 | @pytest.fixture 113 | def data_drss(): 114 | # Seed random generator for reproducibility 115 | np.random.seed(0) 116 | 117 | n_states = 5 118 | n_controls = 2 119 | n_measurements = 50 120 | A, B, C = drss(n_states, n_controls, n_measurements) 121 | 122 | x0 = np.array([4, 7, 2, 8, 0]) 123 | u = np.array( 124 | [ 125 | [ 126 | -4, 127 | -2, 128 | -1, 129 | -0.5, 130 | 0, 131 | 0.5, 132 | 1, 133 | 3, 134 | 5, 135 | 9, 136 | 8, 137 | 4, 138 | 3.5, 139 | 1, 140 | 2, 141 | 3, 142 | 1.5, 143 | 0.5, 144 | 0, 145 | 1, 146 | -1, 147 | -0.5, 148 | -2, 149 | -4, 150 | -5, 151 | -7, 152 | -9, 153 | -6, 154 | -5, 155 | -5.5, 156 | ], 157 | [ 158 | 4, 159 | 1, 160 | -1, 161 | -0.5, 162 | 0, 163 | 1, 164 | 2, 165 | 4, 166 | 3, 167 | 1.5, 168 | 1, 169 | 0, 170 | -1, 171 | -1.5, 172 | -2, 173 | -1, 174 | -3, 175 | -5, 176 | -9, 177 | -7, 178 | -5, 179 | -6, 180 | -8, 181 | -6, 182 | -4, 183 | -3, 184 | -2, 185 | -0.5, 186 | 0.5, 187 | 3, 188 | ], 189 | ] 190 | ) 191 | n = u.shape[1] 192 | X, Y = advance_linear_system(x0, u, n, A, B, C) 193 | U = u.T 194 | 195 | return Y, U, A, B, C 196 | 197 | 198 | @pytest.fixture 199 | def data_torus_unforced(): 200 | T = 20 # integration time 201 | dt = 0.05 # time step 202 | n_samples = int(T / dt) 203 | 204 | np.random.seed(1) # Seed random generator for reproducibility 205 | torus = torus_dynamics() 206 | torus.advance(n_samples, dt) 207 | xhat_nonzero = torus.Xhat[torus.mask.reshape(torus.n_states**2) == 1, :] 208 | 209 | return xhat_nonzero, torus.frequencies, dt 210 | 211 | 212 | @pytest.fixture 213 | def data_torus_ct(): 214 | T = 4 # integration time 215 | dt = 0.01 # time step 216 | n_samples = int(T / dt) 217 | 218 | np.random.seed(1) # for reproducibility 219 | torus = torus_dynamics() 220 | torus.advance(n_samples, dt) 221 | xhat = torus.Xhat[torus.mask.reshape(torus.n_states**2) == 1, :] 222 | 223 | return xhat 224 | 225 | 226 | @pytest.fixture 227 | def data_torus_dt(): 228 | T = 4 # integration time 229 | dt = 0.01 # time step 230 | n_samples = int(T / dt) 231 | 232 | np.random.seed(1) # for reproducibility 233 | torus = torus_dynamics() 234 | torus.advance_discrete_time(n_samples, dt) 235 | xhat = torus.Xhat[torus.mask.reshape(torus.n_states**2) == 1, :] 236 | 237 | return xhat 238 | 239 | 240 | @pytest.fixture 241 | def data_lorenz(): 242 | x0 = [-8, 8, 27] # initial condition 243 | dt = 0.001 244 | t = np.linspace(dt, 200, 200000) 245 | x = scipy.integrate.odeint(lorenz, x0, t, atol=1e-12, rtol=1e-12) 246 | 247 | return t, x, dt 248 | 249 | 250 | @pytest.fixture 251 | def data_rev_dvdp(): 252 | np.random.seed(42) # for reproducibility 253 | n_states = 2 # Number of states 254 | dT = 0.1 # Timestep 255 | n_traj = 51 # Number of trajectories 256 | n_int = 1 # Integration length 257 | 258 | # Uniform distribution of initial conditions 259 | x = X0 = 2 * np.random.random([n_states, n_traj]) - 1 260 | 261 | # training data 262 | Xtrain = np.zeros((n_states, n_int * n_traj)) 263 | Ytrain = np.zeros((n_states, n_int * n_traj)) 264 | for step in range(n_int): 265 | y = rev_dvdp(0, x, 0, dT) 266 | Xtrain[:, (step) * n_traj : (step + 1) * n_traj] = x 267 | Ytrain[:, (step) * n_traj : (step + 1) * n_traj] = y 268 | x = y 269 | 270 | x0 = np.array([-0.3, -0.2]) 271 | t = np.arange(0, 10, dT) 272 | 273 | # test data 274 | Xtest = np.zeros((len(t), n_states)) 275 | Xtest[0, :] = x0 276 | for step in range(len(t) - 1): 277 | y = rev_dvdp(0, Xtest[step, :][:, np.newaxis], 0, dT) 278 | Xtest[step + 1, :] = y.ravel() 279 | 280 | return dT, X0, Xtrain, Ytrain, Xtest 281 | 282 | 283 | @pytest.fixture 284 | def data_for_validty_check(): 285 | A = np.array([[-0.9, -0.3], [0.2, -0.7]]) 286 | B = np.zeros((2, 1)) 287 | C = np.eye(2) 288 | x0 = np.array([-2, 2]) 289 | N = 50 290 | X, Y = advance_linear_system(x0, np.zeros((1, N)), N, A, B, C) 291 | return X, np.arange(N) 292 | 293 | 294 | @pytest.fixture 295 | def data_vdp_edmdc(): 296 | # path = os.path.join(my_path, "../data/test.csv") 297 | xpred = np.loadtxt(os.path.join(my_path, "data_vdp_for_edmdc.txt"), delimiter=",") 298 | # xpred = np.loadtxt("./test/data_vdp_for_edmdc.txt", delimiter=",") 299 | return xpred 300 | -------------------------------------------------------------------------------- /test/differentiation/test_differentiation.py: -------------------------------------------------------------------------------- 1 | """Tests for pykoopman.differentiation methods.""" 2 | from __future__ import annotations 3 | 4 | import numpy as np 5 | import pytest 6 | from derivative import dxdt 7 | from pykoopman.differentiation import Derivative 8 | 9 | 10 | @pytest.fixture 11 | def data_1D_quadratic(): 12 | t = np.linspace(0, 5, 100) 13 | x = t.reshape(-1, 1) ** 2 14 | x_dot = 2 * t.reshape(-1, 1) 15 | 16 | return x, t, x_dot 17 | 18 | 19 | @pytest.fixture 20 | def data_1D_bad_shape(): 21 | t = np.linspace(0, 5, 100) 22 | x = t**2 23 | x_dot = 2 * t 24 | 25 | return x, t, x_dot 26 | 27 | 28 | @pytest.fixture 29 | def data_2D_quadratic(): 30 | t = np.linspace(0, 5, 100) 31 | x = np.zeros((len(t), 2)) 32 | x[:, 0] = t**2 33 | x[:, 1] = -(t**2) 34 | 35 | x_dot = np.zeros_like(x) 36 | x_dot[:, 0] = 2 * t 37 | x_dot[:, 1] = -2 * t 38 | 39 | return x, t, x_dot 40 | 41 | 42 | @pytest.fixture(params=["data_1D_quadratic", "data_1D_bad_shape", "data_2D_quadratic"]) 43 | def data(request): 44 | return request.getfixturevalue(request.param) 45 | 46 | 47 | @pytest.mark.parametrize( 48 | "kws", 49 | [ 50 | dict(kind="spectral"), 51 | dict(kind="spline", s=1e-2), 52 | dict(kind="trend_filtered", order=0, alpha=1e-2), 53 | dict(kind="finite_difference", k=1), 54 | dict(kind="savitzky_golay", order=3, left=1, right=1), 55 | ], 56 | ) 57 | def test_derivative_package_equivalence(data, kws): 58 | x, t, _ = data 59 | 60 | x_dot_pykoopman = Derivative(**kws)(x, t) 61 | x_dot_derivative = dxdt(x, t, axis=0, **kws).reshape(x_dot_pykoopman.shape) 62 | 63 | np.testing.assert_array_equal(x_dot_pykoopman, x_dot_derivative) 64 | 65 | 66 | def test_bad_t_values(data_1D_quadratic): 67 | x, t, _ = data_1D_quadratic 68 | 69 | method = Derivative(kind="finite_difference", k=1) 70 | 71 | with pytest.raises(ValueError): 72 | method(x, t=-1) 73 | 74 | with pytest.raises(ValueError): 75 | method(x, t[:5]) 76 | 77 | with pytest.raises(ValueError): 78 | inds = np.arange(len(t)) 79 | # Swap two time entries 80 | inds[[0, 1]] = inds[[1, 0]] 81 | method(x, t[inds]) 82 | 83 | 84 | def test_accuracy(data): 85 | x, t, x_dot = data 86 | 87 | method = Derivative(kind="finite_difference", k=1) 88 | x_dot_method = method(x, t) 89 | 90 | if x_dot.ndim == 1: 91 | x_dot = x_dot.reshape(-1, 1) 92 | 93 | # Ignore endpoints 94 | np.testing.assert_allclose(x_dot[1:-1], x_dot_method[1:-1]) 95 | 96 | method.set_params() 97 | method.get_params() 98 | -------------------------------------------------------------------------------- /test/observables/test_observables.py: -------------------------------------------------------------------------------- 1 | """Tests for pykoopman.observables objects.""" 2 | from __future__ import annotations 3 | 4 | import pytest 5 | from numpy import hstack 6 | from numpy import iscomplexobj 7 | from numpy import linspace 8 | from numpy import stack 9 | from numpy.testing import assert_allclose 10 | from numpy.testing import assert_array_equal 11 | from pykoopman.observables import CustomObservables 12 | from pykoopman.observables import Identity 13 | from pykoopman.observables import Polynomial 14 | from pykoopman.observables import RadialBasisFunction 15 | from pykoopman.observables import RandomFourierFeatures 16 | from pykoopman.observables import TimeDelay 17 | from sklearn.exceptions import NotFittedError 18 | from sklearn.utils.validation import check_is_fitted 19 | 20 | 21 | @pytest.fixture 22 | def data_small(): 23 | t = linspace(0, 5, 10) 24 | return stack((t, t**2), axis=1) 25 | 26 | 27 | @pytest.mark.parametrize( 28 | "observables", 29 | [ 30 | Identity(), 31 | Polynomial(), 32 | TimeDelay(), 33 | RadialBasisFunction(), 34 | RandomFourierFeatures(include_state=False, gamma=0.01, D=2), 35 | pytest.lazy_fixture("data_custom_observables"), 36 | ], 37 | ) 38 | def test_if_fitted(observables, data_random): 39 | """ 40 | we iterate over each observable object, first we 41 | test if it correctly raise NotFittedError when it is not fitted 42 | but called to .transform, .inverse, and .get_feature_names 43 | then we fit it, and check if it is fitted at the final step. 44 | """ 45 | x = data_random 46 | with pytest.raises(NotFittedError): 47 | observables.transform(x) 48 | 49 | with pytest.raises(NotFittedError): 50 | observables.inverse(x) 51 | 52 | with pytest.raises(NotFittedError): 53 | observables.get_feature_names() 54 | 55 | observables.fit(x) 56 | check_is_fitted(observables) 57 | 58 | 59 | @pytest.mark.parametrize( 60 | "observables_1", 61 | [ 62 | Identity(), 63 | Polynomial(), 64 | TimeDelay(), 65 | RadialBasisFunction(), 66 | RandomFourierFeatures(include_state=False, gamma=0.01, D=2), 67 | pytest.lazy_fixture("data_custom_observables"), 68 | ], 69 | ) 70 | @pytest.mark.parametrize( 71 | "observables_2", 72 | [ 73 | RandomFourierFeatures(include_state=False, gamma=0.01, D=2), 74 | Identity(), 75 | Polynomial(), 76 | TimeDelay(), 77 | RadialBasisFunction(), 78 | pytest.lazy_fixture("data_custom_observables"), 79 | ], 80 | ) 81 | def test_if_fitted_two_obs(observables_1, observables_2, data_random): 82 | """ 83 | we iterate over each observable object, first we 84 | test if it correctly raise NotFittedError when it is not fitted 85 | but called to .transform, .inverse, and .get_feature_names 86 | then we fit it, and check if it is fitted at the final step. 87 | """ 88 | observables = observables_1 + observables_2 89 | test_if_fitted(observables, data_random) 90 | 91 | 92 | @pytest.mark.parametrize( 93 | "observables_1", 94 | [ 95 | Identity(), 96 | Polynomial(), 97 | TimeDelay(), 98 | RadialBasisFunction(), 99 | RandomFourierFeatures(include_state=False, gamma=0.01, D=2), 100 | pytest.lazy_fixture("data_custom_observables"), 101 | ], 102 | ) 103 | @pytest.mark.parametrize( 104 | "observables_2", 105 | [ 106 | Identity(), 107 | Polynomial(), 108 | TimeDelay(), 109 | RadialBasisFunction(), 110 | RandomFourierFeatures(include_state=False, gamma=0.01, D=2), 111 | pytest.lazy_fixture("data_custom_observables"), 112 | ], 113 | ) 114 | @pytest.mark.parametrize( 115 | "observables_3", 116 | [ 117 | Identity(), 118 | Polynomial(), 119 | TimeDelay(), 120 | RadialBasisFunction(), 121 | RandomFourierFeatures(include_state=False, gamma=0.01, D=2), 122 | pytest.lazy_fixture("data_custom_observables"), 123 | ], 124 | ) 125 | def test_if_fitted_three_obs(observables_1, observables_2, observables_3, data_random): 126 | """ 127 | we iterate over each observable object, first we 128 | test if it correctly raise NotFittedError when it is not fitted 129 | but called to .transform, .inverse, and .get_feature_names 130 | then we fit it, and check if it is fitted at the final step. 131 | """ 132 | observables = observables_1 + observables_2 + observables_3 133 | test_if_fitted(observables, data_random) 134 | 135 | 136 | @pytest.mark.parametrize( 137 | "observables", 138 | [ 139 | Identity(), 140 | Polynomial(), 141 | Polynomial(degree=1), 142 | Polynomial(degree=4), 143 | Polynomial(include_bias=False), 144 | Polynomial(degree=3, include_bias=False), 145 | RadialBasisFunction(), 146 | RandomFourierFeatures(include_state=True, gamma=0.01, D=2), 147 | pytest.lazy_fixture("data_custom_observables"), 148 | ], 149 | ) 150 | def test_inverse(observables, data_random): 151 | """ 152 | we iterate over all obs to check if the fit_transform works, 153 | and if the output of fit_transform can be reverse back to x nicely 154 | with .inverse 155 | """ 156 | x = data_random 157 | assert_allclose(observables.inverse(observables.fit_transform(x)), x) 158 | 159 | 160 | @pytest.mark.parametrize( 161 | "observables_1", 162 | [ 163 | Identity(), 164 | Polynomial(), 165 | Polynomial(degree=1), 166 | Polynomial(degree=4), 167 | Polynomial(include_bias=False), 168 | Polynomial(degree=3, include_bias=False), 169 | RadialBasisFunction(), 170 | RandomFourierFeatures(include_state=True, gamma=0.01, D=2), 171 | pytest.lazy_fixture("data_custom_observables"), 172 | ], 173 | ) 174 | @pytest.mark.parametrize( 175 | "observables_2", 176 | [ 177 | Identity(), 178 | Polynomial(), 179 | Polynomial(degree=1), 180 | Polynomial(degree=4), 181 | Polynomial(include_bias=False), 182 | Polynomial(degree=3, include_bias=False), 183 | RandomFourierFeatures(include_state=True, gamma=0.01, D=2), 184 | RadialBasisFunction(), 185 | pytest.lazy_fixture("data_custom_observables"), 186 | ], 187 | ) 188 | def test_inverse_two_obs(observables_1, observables_2, data_random): 189 | """ 190 | we iterate over each observable object, first we 191 | test if it correctly raise NotFittedError when it is not fitted 192 | but called to .transform, .inverse, and .get_feature_names 193 | then we fit it, and check if it is fitted at the final step. 194 | """ 195 | observables = observables_1 + observables_2 196 | test_inverse(observables, data_random) 197 | 198 | 199 | @pytest.mark.parametrize( 200 | "observables_1", 201 | [ 202 | Identity(), 203 | Polynomial(), 204 | Polynomial(degree=1), 205 | Polynomial(degree=4), 206 | Polynomial(include_bias=False), 207 | Polynomial(degree=3, include_bias=False), 208 | RadialBasisFunction(), 209 | RandomFourierFeatures(include_state=True, gamma=0.01, D=2), 210 | pytest.lazy_fixture("data_custom_observables"), 211 | ], 212 | ) 213 | @pytest.mark.parametrize( 214 | "observables_2", 215 | [ 216 | Identity(), 217 | Polynomial(), 218 | Polynomial(degree=1), 219 | Polynomial(degree=4), 220 | Polynomial(include_bias=False), 221 | RadialBasisFunction(), 222 | Polynomial(degree=3, include_bias=False), 223 | RandomFourierFeatures(include_state=True, gamma=0.01, D=2), 224 | pytest.lazy_fixture("data_custom_observables"), 225 | ], 226 | ) 227 | @pytest.mark.parametrize( 228 | "observables_3", 229 | [ 230 | Identity(), 231 | Polynomial(), 232 | Polynomial(degree=1), 233 | Polynomial(degree=4), 234 | Polynomial(include_bias=False), 235 | Polynomial(degree=3, include_bias=False), 236 | RadialBasisFunction(), 237 | RandomFourierFeatures(include_state=True, gamma=0.01, D=2), 238 | RandomFourierFeatures(include_state=False, gamma=0.01, D=2), 239 | pytest.lazy_fixture("data_custom_observables"), 240 | ], 241 | ) 242 | def test_inverse_three_obs(observables_1, observables_2, observables_3, data_random): 243 | """ 244 | we iterate over each observable object, first we 245 | test if it correctly raise NotFittedError when it is not fitted 246 | but called to .transform, .inverse, and .get_feature_names 247 | then we fit it, and check if it is fitted at the final step. 248 | """ 249 | observables = observables_1 + observables_2 + observables_3 250 | test_inverse(observables, data_random) 251 | 252 | 253 | def test_time_delay_inverse(data_random): 254 | x = data_random 255 | delay = 2 256 | n_delays = 3 257 | n_deleted_rows = delay * n_delays 258 | 259 | observables = TimeDelay(delay=delay, n_delays=n_delays) 260 | y = observables.fit_transform(x) 261 | # First few rows of x are deleted which don't have enough 262 | # time history 263 | assert_array_equal(observables.inverse(y), x[n_deleted_rows:]) 264 | 265 | 266 | @pytest.mark.parametrize( 267 | "observables_1", 268 | [ 269 | RadialBasisFunction(include_state=False), 270 | Identity(), 271 | Polynomial(), 272 | Polynomial(degree=1), 273 | Polynomial(degree=4), 274 | Polynomial(include_bias=False), 275 | Polynomial(degree=3, include_bias=False), 276 | TimeDelay(delay=1, n_delays=2), 277 | RadialBasisFunction(), 278 | RandomFourierFeatures(include_state=True, gamma=0.01, D=2), 279 | pytest.lazy_fixture("data_custom_observables"), 280 | ], 281 | ) 282 | @pytest.mark.parametrize( 283 | "observables_2", 284 | [ 285 | RadialBasisFunction(kernel_width=1.0, include_state=True), 286 | RadialBasisFunction(), 287 | TimeDelay(delay=3, n_delays=4), 288 | TimeDelay(delay=1, n_delays=6), 289 | RandomFourierFeatures(include_state=True, gamma=0.01, D=2), 290 | ], 291 | ) 292 | def test_time_delay_inverse_two_obs(observables_1, observables_2, data_random): 293 | x = data_random 294 | observables = observables_1 + observables_2 295 | y = observables.fit_transform(x) 296 | n_deleted_rows = observables.n_consumed_samples 297 | # First few rows of x are deleted which don't have enough 298 | # time history 299 | assert_allclose(observables.inverse(y), x[n_deleted_rows:], rtol=1e-7) 300 | # assert_array_equal(observables.inverse(y), x[n_deleted_rows:]) 301 | 302 | 303 | @pytest.mark.parametrize( 304 | "observables_1", 305 | [ 306 | Identity(), 307 | Polynomial(), 308 | Polynomial(degree=1), 309 | Polynomial(degree=4), 310 | RadialBasisFunction(), 311 | Polynomial(include_bias=False), 312 | Polynomial(degree=3, include_bias=False), 313 | TimeDelay(delay=1, n_delays=2), 314 | pytest.lazy_fixture("data_custom_observables"), 315 | ], 316 | ) 317 | @pytest.mark.parametrize( 318 | "observables_2", 319 | [ 320 | TimeDelay(delay=2, n_delays=3), 321 | TimeDelay(delay=1, n_delays=6), 322 | RadialBasisFunction(), 323 | RandomFourierFeatures(include_state=True, gamma=0.3, D=3), 324 | ], 325 | ) 326 | @pytest.mark.parametrize( 327 | "observables_3", 328 | [ 329 | TimeDelay(delay=2, n_delays=3), 330 | RadialBasisFunction(), 331 | TimeDelay(delay=1, n_delays=6) + TimeDelay(delay=3, n_delays=3), 332 | RandomFourierFeatures(include_state=False, gamma=0.01, D=2), 333 | ], 334 | ) 335 | def test_time_delay_inverse_three_obs( 336 | observables_1, observables_2, observables_3, data_random 337 | ): 338 | x = data_random 339 | observables = observables_1 + observables_2 + observables_3 340 | y = observables.fit_transform(x) 341 | n_deleted_rows = observables.n_consumed_samples 342 | # First few rows of x are deleted which don't have enough 343 | # time history 344 | # assert_array_equal(observables.inverse(y), x[n_deleted_rows:]) 345 | assert_allclose(observables.inverse(y), x[n_deleted_rows:], rtol=1e-7) 346 | 347 | 348 | def test_bad_polynomial_inputs(): 349 | with pytest.raises(ValueError): 350 | Polynomial(degree=0) 351 | 352 | 353 | def test_bad_custom_observables_inputs(): 354 | # One too few names 355 | observables = [lambda x: x, lambda x: x**2, lambda x: 0 * x, lambda x, y: x * y] 356 | observable_names = [lambda s: f"{s}^2", lambda: str(0), lambda s, t: f"{s} {t}"] 357 | 358 | with pytest.raises(ValueError): 359 | CustomObservables(observables, observable_names=observable_names) 360 | 361 | 362 | def test_custom_observables_transform(data_small): 363 | x = data_small 364 | 365 | observables = [lambda x: x**2] 366 | y = CustomObservables(observables).fit_transform(x) 367 | 368 | # Identity is automatically prepended to custom observables 369 | assert_array_equal(y, hstack((x, x**2))) 370 | 371 | 372 | @pytest.mark.parametrize("delay, n_delays", [(3, 2), (1, 5)]) 373 | def test_time_delay_output_shape(data_random, delay, n_delays): 374 | x = data_random 375 | y = TimeDelay(delay=delay, n_delays=n_delays).fit_transform(x) 376 | 377 | assert y.shape == (x.shape[0] - delay * n_delays, (n_delays + 1) * x.shape[1]) 378 | 379 | 380 | def test_time_delay_transform_matches_input(data_random): 381 | x = data_random 382 | 383 | observables = TimeDelay(delay=2, n_delays=4) 384 | observables.fit(x) 385 | 386 | y = observables.transform(x) 387 | assert_array_equal(y[1], x[[9, 7, 5, 3, 1]].flatten()) 388 | 389 | 390 | @pytest.mark.parametrize( 391 | "observables, expected_default_names, expected_custom_names", 392 | [ 393 | (Identity(), ["x0", "x1"], ["x", "y"]), 394 | ( 395 | Polynomial(degree=2), 396 | ["1", "x0", "x1", "x0^2", "x0 x1", "x1^2"], 397 | ["1", "x", "y", "x^2", "x y", "y^2"], 398 | ), 399 | ( 400 | TimeDelay(delay=2, n_delays=2), 401 | ["x0(t)", "x1(t)", "x0(t-2dt)", "x1(t-2dt)", "x0(t-4dt)", "x1(t-4dt)"], 402 | ["x(t)", "y(t)", "x(t-2dt)", "y(t-2dt)", "x(t-4dt)", "y(t-4dt)"], 403 | ), 404 | ( 405 | pytest.lazy_fixture("data_custom_observables"), 406 | ["x0", "x1", "x0", "x1", "x0^2", "x1^2", "0", "0", "x0 x1"], 407 | ["x", "y", "x", "y", "x^2", "y^2", "0", "0", "x y"], 408 | ), 409 | ], 410 | ) 411 | def test_feature_names( 412 | observables, expected_default_names, expected_custom_names, data_small 413 | ): 414 | x = data_small 415 | 416 | observables.fit(x) 417 | assert observables.get_feature_names() == expected_default_names 418 | 419 | custom_names = ["x", "y"] 420 | assert ( 421 | observables.get_feature_names(input_features=custom_names) 422 | == expected_custom_names 423 | ) 424 | 425 | 426 | # so far it does not support complex number for random fourier features. 427 | # shaowu does not think complex.number is necessary at all 428 | @pytest.mark.parametrize( 429 | "observables", 430 | [ 431 | Identity(), 432 | Polynomial(), 433 | TimeDelay(), 434 | pytest.lazy_fixture("data_custom_observables"), 435 | ], 436 | ) 437 | def test_complex_data(data_random_complex, observables): 438 | x = data_random_complex 439 | y = observables.fit_transform(x) 440 | 441 | assert iscomplexobj(y) 442 | -------------------------------------------------------------------------------- /test/regression/test_regressors.py: -------------------------------------------------------------------------------- 1 | """Tests for pykoopman.regression objects and methods.""" 2 | from __future__ import annotations 3 | 4 | import numpy as np 5 | import pykoopman as pk 6 | import pytest 7 | from pydmd import DMD 8 | from pykoopman.regression import BaseRegressor 9 | from pykoopman.regression import EDMD 10 | from pykoopman.regression import KDMD 11 | from pykoopman.regression import NNDMD 12 | from pykoopman.regression import PyDMDRegressor 13 | from sklearn.gaussian_process.kernels import RBF 14 | 15 | 16 | class RegressorWithoutFit: 17 | def __init__(self): 18 | pass 19 | 20 | def predict(self, x): 21 | return x 22 | 23 | 24 | class RegressorWithoutPredict: 25 | def __init__(self): 26 | pass 27 | 28 | def fit(self, x): 29 | return self 30 | 31 | 32 | @pytest.mark.parametrize( 33 | "regressor", [RegressorWithoutFit(), RegressorWithoutPredict()] 34 | ) 35 | def test_bad_regressor_input(regressor): 36 | """test if BaseRegressor is going to raise TypeError for wrong input""" 37 | with pytest.raises(TypeError): 38 | BaseRegressor(regressor) 39 | 40 | 41 | @pytest.mark.parametrize( 42 | "data_xy", 43 | [ 44 | # case 1,2 only work for pykoopman class 45 | # case 1: single step single traj, no validation 46 | (np.random.rand(200, 3), None), 47 | # case 2: single step multiple traj, no validation 48 | (np.random.rand(200, 3), np.random.rand(200, 3)), 49 | ], 50 | ) 51 | @pytest.mark.parametrize( 52 | "regressor", 53 | [ 54 | EDMD(svd_rank=10), 55 | PyDMDRegressor(DMD(svd_rank=10)), 56 | KDMD(svd_rank=10, kernel=RBF(length_scale=1)), 57 | ], 58 | ) 59 | def test_fit_regressors(data_xy, regressor): 60 | """test if using nndmd regressor alone will run the fit without error 61 | 62 | Note: 63 | `pydmd.DMD` cannot be used to fit nonconsecutive data 64 | """ 65 | x, y = data_xy 66 | regressor.fit(x, y) 67 | 68 | 69 | @pytest.mark.parametrize( 70 | "data_xy", 71 | [ 72 | # case 1,2 only work for pykoopman class 73 | # case 1: single step single traj, no validation 74 | (np.random.rand(200, 3), None), 75 | # case 2: single step multiple traj, no validation 76 | ( 77 | np.random.rand(200, 3), 78 | np.random.rand(200, 3) # because "x" is not a list, so we think this 79 | # is single step 80 | ), 81 | # case 3,4 works for regressor directly 82 | # case 3: multiple traj, no validation 83 | ( 84 | [np.random.rand(200, 3), np.random.rand(100, 3)], # this is training 85 | None, # no validation 86 | ), 87 | # case 4: multiple traj, with validation 88 | ( 89 | [np.random.rand(100, 3), np.random.rand(100, 3)], # this is training 90 | [np.random.rand(300, 3), np.random.rand(400, 3)], # this is validation 91 | ), 92 | ], 93 | ) 94 | @pytest.mark.parametrize( 95 | "regressor", 96 | [ 97 | NNDMD( 98 | mode="Dissipative", 99 | look_forward=2, 100 | config_encoder=dict( 101 | input_size=3, hidden_sizes=[32] * 2, output_size=4, activations="swish" 102 | ), 103 | config_decoder=dict( 104 | input_size=4, hidden_sizes=[32] * 2, output_size=3, activations="linear" 105 | ), 106 | batch_size=512, 107 | lbfgs=True, 108 | normalize=False, 109 | normalize_mode="max", 110 | trainer_kwargs=dict(max_epochs=1, accelerator="cpu"), 111 | ) 112 | ], 113 | ) 114 | def test_fit_nndmd_regressor(data_xy, regressor): 115 | """test if using nndmd regressor alone will run the fit without error""" 116 | x, y = data_xy 117 | regressor.fit(x, y) 118 | 119 | 120 | @pytest.mark.parametrize( 121 | "data_xy", 122 | [ 123 | # # case 1,2 only work for pykoopman class 124 | # # case 1: single step single traj, no validation 125 | # ( 126 | # np.random.rand(200, 3), 127 | # None 128 | # ), 129 | # # case 2: single step multiple traj, no validation 130 | # ( 131 | # np.random.rand(200, 3), 132 | # np.random.rand(200, 3) # because "x" is not a list, so we think this 133 | # # is single step 134 | # ), 135 | # # case 3,4 works for regressor directly 136 | # # case 3: multiple traj, no validation 137 | # ( 138 | # [np.random.rand(200, 3), np.random.rand(100, 3)], # this is training 139 | # None # no validation 140 | # ), 141 | # case 4: multiple traj, with validation 142 | ( 143 | [np.random.rand(100, 3), np.random.rand(100, 3)], # this is training 144 | [np.random.rand(300, 3), np.random.rand(400, 3)], # this is validation 145 | ), 146 | ], 147 | ) 148 | @pytest.mark.parametrize( 149 | "regressor", 150 | [ 151 | NNDMD( 152 | mode="Dissipative", 153 | look_forward=2, 154 | config_encoder=dict( 155 | input_size=3, hidden_sizes=[32] * 2, output_size=4, activations="swish" 156 | ), 157 | config_decoder=dict( 158 | input_size=4, hidden_sizes=[32] * 2, output_size=3, activations="linear" 159 | ), 160 | batch_size=512, 161 | lbfgs=True, 162 | normalize=False, 163 | normalize_mode="max", 164 | trainer_kwargs=dict(max_epochs=1, accelerator="cpu"), 165 | ) 166 | ], 167 | ) 168 | def test_fit_dlkoopman(data_xy, regressor): 169 | """test if using NNDMD regressor work inside pykoopman""" 170 | model_d = pk.Koopman(regressor=regressor) 171 | model_d.fit(data_xy[0], data_xy[1], dt=1) 172 | -------------------------------------------------------------------------------- /test/test_koopman_continuous.py: -------------------------------------------------------------------------------- 1 | """Tests for pykoopman.koopman_continuous methods.""" 2 | from __future__ import annotations 3 | 4 | import pytest 5 | from numpy.testing import assert_allclose 6 | from pydmd import DMD 7 | from pykoopman import KoopmanContinuous 8 | from pykoopman import observables 9 | from pykoopman import regression 10 | from pykoopman.differentiation import Derivative 11 | from sklearn.utils.validation import check_is_fitted 12 | 13 | 14 | @pytest.mark.parametrize( 15 | "data", 16 | [pytest.lazy_fixture("data_random"), pytest.lazy_fixture("data_random_complex")], 17 | ) 18 | def test_derivative_integration(data): 19 | x = data 20 | 21 | diff = Derivative(kind="finite_difference", k=1) 22 | dmd = DMD(svd_rank=2) 23 | model = KoopmanContinuous(differentiator=diff, regressor=dmd) 24 | 25 | model.fit(x) 26 | check_is_fitted(model) 27 | 28 | 29 | def test_havok_prediction(data_lorenz): 30 | t, x, dt = data_lorenz 31 | 32 | n_delays = 99 33 | TDC = observables.TimeDelay(delay=1, n_delays=n_delays) 34 | HAVOK = regression.HAVOK(svd_rank=15) 35 | Diff = Derivative(kind="finite_difference", k=2) 36 | model = KoopmanContinuous(observables=TDC, differentiator=Diff, regressor=HAVOK) 37 | model.fit(x[:, 0], dt=dt) 38 | 39 | known_external_input = model.regressor.forcing_signal 40 | 41 | # one step prediction 42 | xpred_one_step = model.predict(x[: n_delays + 1, 0], dt, u=known_external_input[0]) 43 | assert_allclose(x[n_delays + 1, 0], xpred_one_step, atol=1e-3) 44 | 45 | # simulate: mult steps prediction 46 | xpred = model.simulate( 47 | x=x[: n_delays + 1, 0], t=t[n_delays:] - t[n_delays], u=known_external_input 48 | ) 49 | 50 | assert_allclose(xpred[50], 3.54512034, atol=1e-3) 51 | --------------------------------------------------------------------------------