├── .gitignore
├── .readthedocs.yaml
├── LICENSE
├── README.md
├── doc
└── sphinx
│ ├── Makefile
│ ├── make.bat
│ └── source
│ ├── conf.py
│ ├── datasets.rst
│ ├── dynonet.rst
│ ├── index.rst
│ ├── metrics.rst
│ ├── requirements.txt
│ ├── statespace_ct.rst
│ └── statespace_dt.rst
├── examples
├── dynonet
│ ├── README.txt
│ ├── RLC
│ │ ├── RLC_train.py
│ │ ├── RLC_train_FIR.py
│ │ └── data
│ │ │ └── RLC_data_id_lin.csv
│ └── WH2009
│ │ ├── README.txt
│ │ ├── WH2009_train_binary.py
│ │ ├── WH2009_train_quantized.py
│ │ ├── WH_test.py
│ │ ├── WH_test_FIR.py
│ │ ├── WH_train.py
│ │ ├── WH_train_FIR.py
│ │ └── download_dataset.py
├── io
│ └── RLC
│ │ ├── RLC_IO_fit_1step.py
│ │ ├── RLC_IO_fit_multistep.py
│ │ ├── RLC_IO_test.py
│ │ ├── RLC_generate_id.py
│ │ ├── RLC_generate_test.py
│ │ └── data
│ │ ├── RLC_data_id.csv
│ │ └── RLC_data_test.csv
├── statespace
│ ├── CTS
│ │ ├── CTS_test.py
│ │ ├── CTS_train_full.py
│ │ ├── CTS_train_truncated.py
│ │ ├── README.txt
│ │ └── download_dataset.py
│ ├── README.txt
│ ├── RLC
│ │ ├── README.txt
│ │ ├── RLC_generate_test.py
│ │ ├── RLC_generate_train.py
│ │ ├── RLC_test.py
│ │ ├── RLC_train_1step.py
│ │ ├── RLC_train_multistep.py
│ │ ├── data
│ │ │ ├── RLC_data_id_lin.csv
│ │ │ ├── RLC_data_test_nl.csv
│ │ │ ├── RLC_data_train_lin.csv
│ │ │ └── RLC_data_train_nl.csv
│ │ ├── loader.py
│ │ ├── old
│ │ │ ├── RLC_OE_comparison.m
│ │ │ ├── RLC_subspace_comparison.m
│ │ │ ├── RLC_test.py
│ │ │ ├── RLC_train_ae.py
│ │ │ ├── RLC_train_full.py
│ │ │ ├── RLC_train_soft.py
│ │ │ └── RLC_train_truncated.py
│ │ └── symbolic_RLC.py
│ ├── WH2009
│ │ ├── download_dataset.py
│ │ ├── loader.py
│ │ ├── wh_test.py
│ │ └── wh_train.py
│ └── silverbox
│ │ ├── README.txt
│ │ ├── SilverboxFiles
│ │ ├── SNLS80mV.csv
│ │ └── Schroeder80mV.csv
│ │ ├── loader.py
│ │ ├── silverbox_plot.py
│ │ ├── silverbox_test.py
│ │ ├── silverbox_train_lin.py
│ │ └── silverbox_train_poly.py
└── util
│ ├── __init__.py
│ ├── benchmark_url.py
│ └── metrics.py
├── setup.py
└── torchid
├── __init__.py
├── datasets.py
├── dynonet
├── __init__.py
├── filtering.py
├── functional
│ ├── __init__.py
│ └── lti.py
└── module
│ ├── __init__.py
│ ├── lti.py
│ └── static.py
├── io
├── __init__.py
└── module
│ ├── __init__.py
│ ├── io_simulator.py
│ └── iomodels.py
├── metrics.py
├── ss
├── __init__.py
├── ct
│ ├── __init__.py
│ ├── models.py
│ └── simulators.py
├── dt
│ ├── __init__.py
│ ├── estimators.py
│ ├── models.py
│ └── simulator.py
└── poly_utils.py
└── tests
├── __init__.py
└── ss_ct_test.py
/.gitignore:
--------------------------------------------------------------------------------
1 | # Byte-compiled / optimized / DLL files
2 | __pycache__/
3 | *.py[cod]
4 | *$py.class
5 |
6 | # C extensions
7 | *.so
8 |
9 | # Distribution / packaging
10 | .Python
11 | build/
12 | develop-eggs/
13 | dist/
14 | downloads/
15 | eggs/
16 | .eggs/
17 | lib/
18 | lib64/
19 | parts/
20 | sdist/
21 | var/
22 | wheels/
23 | share/python-wheels/
24 | *.egg-info/
25 | .installed.cfg
26 | *.egg
27 | MANIFEST
28 |
29 | # PyInstaller
30 | # Usually these files are written by a python script from a template
31 | # before PyInstaller builds the exe, so as to inject date/other infos into it.
32 | *.manifest
33 | *.spec
34 |
35 | # Installer logs
36 | pip-log.txt
37 | pip-delete-this-directory.txt
38 |
39 | # Unit test / coverage reports
40 | htmlcov/
41 | .tox/
42 | .nox/
43 | .coverage
44 | .coverage.*
45 | .cache
46 | nosetests.xml
47 | coverage.xml
48 | *.cover
49 | *.py,cover
50 | .hypothesis/
51 | .pytest_cache/
52 | cover/
53 |
54 | # Translations
55 | *.mo
56 | *.pot
57 |
58 | # Django stuff:
59 | *.log
60 | local_settings.py
61 | db.sqlite3
62 | db.sqlite3-journal
63 |
64 | # Flask stuff:
65 | instance/
66 | .webassets-cache
67 |
68 | # Scrapy stuff:
69 | .scrapy
70 |
71 | # Sphinx documentation
72 | docs/_build/
73 |
74 | # PyBuilder
75 | .pybuilder/
76 | target/
77 |
78 | # Jupyter Notebook
79 | .ipynb_checkpoints
80 |
81 | # IPython
82 | profile_default/
83 | ipython_config.py
84 |
85 | # pyenv
86 | # For a library or package, you might want to ignore these files since the code is
87 | # intended to run in multiple environments; otherwise, check them in:
88 | # .python-version
89 |
90 | # pipenv
91 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
92 | # However, in case of collaboration, if having platform-specific dependencies or dependencies
93 | # having no cross-platform support, pipenv may install dependencies that don't work, or not
94 | # install all needed dependencies.
95 | #Pipfile.lock
96 |
97 | # poetry
98 | # Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
99 | # This is especially recommended for binary packages to ensure reproducibility, and is more
100 | # commonly ignored for libraries.
101 | # https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
102 | #poetry.lock
103 |
104 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow
105 | __pypackages__/
106 |
107 | # Celery stuff
108 | celerybeat-schedule
109 | celerybeat.pid
110 |
111 | # SageMath parsed files
112 | *.sage.py
113 |
114 | # Environments
115 | .env
116 | .venv
117 | env/
118 | venv/
119 | ENV/
120 | env.bak/
121 | venv.bak/
122 |
123 | # Spyder project settings
124 | .spyderproject
125 | .spyproject
126 |
127 | # Rope project settings
128 | .ropeproject
129 |
130 | # mkdocs documentation
131 | /site
132 |
133 | # mypy
134 | .mypy_cache/
135 | .dmypy.json
136 | dmypy.json
137 |
138 | # Pyre type checker
139 | .pyre/
140 |
141 | # pytype static type analyzer
142 | .pytype/
143 |
144 | # Cython debug symbols
145 | cython_debug/
146 |
147 | # PyCharm
148 | # JetBrains specific template is maintained in a separate JetBrains.gitignore that can
149 | # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
150 | # and can be added to the global gitignore or merged into this file. For a more nuclear
151 | # option (not recommended) you can uncomment the following to ignore the entire idea folder.
152 | #.idea/
153 |
154 | /doc/sphinx/build/
155 | /examples/dynonet/WH2009/models/
156 | /examples/statespace/CTS/data/
157 | /examples/statespace/CTS/models/
158 | /examples/statespace/RLC/models/
159 | /examples/statespace/RLC/fig/
160 | /examples/dynonet/RLC/models/
161 | /examples/dynonet/WH2009/data/
162 | /examples/IO/fig/
163 | /examples/IO/models/
164 |
165 |
--------------------------------------------------------------------------------
/.readthedocs.yaml:
--------------------------------------------------------------------------------
1 | # .readthedocs.yaml
2 | # Read the Docs configuration file
3 | # See https://docs.readthedocs.io/en/stable/config-file/v2.html for details
4 |
5 | # Required
6 | version: 2
7 |
8 | # Set the version of Python and other tools you might need
9 | build:
10 | os: ubuntu-20.04
11 | tools:
12 | python: "3.9"
13 | # You can also specify other tool versions:
14 | # nodejs: "16"
15 | # rust: "1.55"
16 | # golang: "1.17"
17 |
18 | # Build documentation in the docs/ directory with Sphinx
19 | sphinx:
20 | configuration: doc/sphinx/source/conf.py
21 |
22 | # If using Sphinx, optionally build your docs in additional formats such as PDF
23 | # formats:
24 | # - pdf
25 |
26 | # Optionally declare the Python requirements required to build your docs
27 | python:
28 | install:
29 | - requirements: doc/sphinx/source/requirements.txt
30 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | MIT License
2 |
3 | Copyright (c) 2021 Marco Forgione
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the "Software"), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in all
13 | copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | SOFTWARE.
22 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # System identification tools in PyTorch
2 | A collection of system identification tools implemented in PyTorch.
3 |
4 | * State-space identification methods (see [1], [2], [3], [6])
5 | * Differentiable transfer functions (see [4], [5])
6 |
7 | ## Examples and Documentation
8 |
9 | * Examples are provided in the [**examples**](examples) folder of this repo.
10 | * The API documentation is available at https://pytorch-ident.readthedocs.io/en/latest.
11 |
12 |
13 | ## Installation:
14 |
15 | ### Requirements:
16 | A Python 3.9 conda environment with
17 |
18 | * numpy
19 | * scipy
20 | * matplotlib
21 | * pandas
22 | * pytorch
23 |
24 | ### Stable version from PyPI
25 |
26 | Run the command
27 |
28 | ```
29 | pip install pytorch-ident
30 | ```
31 | This will install the current [stable version](https://pypi.org/project/pytorch-ident/) from the PyPI package repository.
32 |
33 | ### Latest version from GitHub
34 | 1. Get a local copy the project. For instance, run
35 | ```
36 | git clone https://github.com/forgi86/pytorch-ident.git
37 | ```
38 | in a terminal to clone the project using git. Alternatively, download the zipped project from [this link](https://github.com/forgi86/pytorch-ident/zipball/master) and extract it in a local folder
39 |
40 | 2. Install pytorch-ident by running
41 | ```
42 | pip install .
43 | ```
44 | in the project root folder (where the file setup.py is located).
45 |
46 | # Bibliography
47 | [1] M. Forgione and D. Piga. Model structures and fitting criteria for system identification with neural networks. In Proceedings of the 14th IEEE International Conference Application of Information and Communication Technologies, 2020.
48 | [2] B. Mavkov, M. Forgione, D. Piga. Integrated Neural Networks for Nonlinear Continuous-Time System Identification. IEEE Control Systems Letters, 4(4), pp 851-856, 2020.
49 | [3] M. Forgione and D. Piga. Continuous-time system identification with neural networks: model structures and fitting criteria. European Journal of Control, 59:68-81, 2021.
50 | [4] M. Forgione and D. Piga. dynoNet: a neural network architecture for learning dynamical systems. International Journal of Adaptive Control and Signal Processing, 2021.
51 | [5] D. Piga, M.Forgione and M. Mejari. Deep learning with transfer functions: new applications in system identification. In Proceedings of the the 2021 SysId Conference, 2021.
52 | [6] G. Beintema, R. Toth and M. Schoukens. Nonlinear state-space identification using deep encoder networks. Learning for Dynamics and Control. PMLR, 2021.
53 |
--------------------------------------------------------------------------------
/doc/sphinx/Makefile:
--------------------------------------------------------------------------------
1 | # Minimal makefile for Sphinx documentation
2 | #
3 |
4 | # You can set these variables from the command line.
5 | SPHINXOPTS =
6 | SPHINXBUILD = sphinx-build
7 | SOURCEDIR = source
8 | BUILDDIR = build
9 |
10 | # Put it first so that "make" without argument is like "make help".
11 | help:
12 | @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
13 |
14 | .PHONY: help Makefile
15 |
16 | # Catch-all target: route all unknown targets to Sphinx using the new
17 | # "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS).
18 | %: Makefile
19 | @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
--------------------------------------------------------------------------------
/doc/sphinx/make.bat:
--------------------------------------------------------------------------------
1 | @ECHO OFF
2 |
3 | pushd %~dp0
4 |
5 | REM Command file for Sphinx documentation
6 |
7 | if "%SPHINXBUILD%" == "" (
8 | set SPHINXBUILD=sphinx-build
9 | )
10 | set SOURCEDIR=source
11 | set BUILDDIR=build
12 |
13 | if "%1" == "" goto help
14 |
15 | %SPHINXBUILD% >NUL 2>NUL
16 | if errorlevel 9009 (
17 | echo.
18 | echo.The 'sphinx-build' command was not found. Make sure you have Sphinx
19 | echo.installed, then set the SPHINXBUILD environment variable to point
20 | echo.to the full path of the 'sphinx-build' executable. Alternatively you
21 | echo.may add the Sphinx directory to PATH.
22 | echo.
23 | echo.If you don't have Sphinx installed, grab it from
24 | echo.http://sphinx-doc.org/
25 | exit /b 1
26 | )
27 |
28 | %SPHINXBUILD% -M %1 %SOURCEDIR% %BUILDDIR% %SPHINXOPTS%
29 | goto end
30 |
31 | :help
32 | %SPHINXBUILD% -M help %SOURCEDIR% %BUILDDIR% %SPHINXOPTS%
33 |
34 | :end
35 | popd
36 |
--------------------------------------------------------------------------------
/doc/sphinx/source/conf.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | #
3 | # Configuration file for the Sphinx documentation builder.
4 | #
5 | # This file does only contain a selection of the most common options. For a
6 | # full list see the documentation:
7 | # http://www.sphinx-doc.org/en/master/config
8 |
9 | # -- Path setup --------------------------------------------------------------
10 |
11 | # If extensions (or modules to document with autodoc) are in another directory,
12 | # add these directories to sys.path here. If the directory is relative to the
13 | # documentation root, use os.path.abspath to make it absolute, like shown here.
14 | #
15 | import os
16 | import sys
17 | # sys.path.insert(0, os.path.abspath('.'))
18 |
19 | #sys.path.insert(0, os.path.abspath(os.path.join('..', 'torchid')))
20 | sys.path.insert(0, os.path.abspath(os.path.join('..', '..', '..')))
21 |
22 | # -- Project information -----------------------------------------------------
23 |
24 | project = 'pytorch-ident'
25 | copyright = '2020, Marco Forgione'
26 | author = 'Marco Forgione'
27 |
28 | # The short X.Y version
29 | version = ''
30 | # The full version, including alpha/beta/rc tags
31 | release = '0.2.2'
32 |
33 |
34 | # -- General configuration ---------------------------------------------------
35 |
36 | # If your documentation needs a minimal Sphinx version, state it here.
37 | #
38 | # needs_sphinx = '1.0'
39 |
40 | # Add any Sphinx extension module names here, as strings. They can be
41 | # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
42 | # ones.
43 | extensions = [
44 | 'sphinx.ext.autodoc',
45 | 'sphinx.ext.autosummary',
46 | 'sphinx.ext.doctest',
47 | 'sphinx.ext.intersphinx',
48 | 'sphinx.ext.todo',
49 | 'sphinx.ext.coverage',
50 | 'sphinx.ext.napoleon',
51 | 'sphinx.ext.viewcode',
52 | 'sphinx.ext.autosectionlabel',
53 | ]
54 |
55 |
56 | # Add any paths that contain templates here, relative to this directory.
57 | templates_path = ['_templates']
58 |
59 | # The suffix(es) of source filenames.
60 | # You can specify multiple suffix as a list of string:
61 | #
62 | # source_suffix = ['.rst', '.md']
63 | source_suffix = '.rst'
64 |
65 | # The master toctree document.
66 | master_doc = 'index'
67 |
68 | # The language for content autogenerated by Sphinx. Refer to documentation
69 | # for a list of supported languages.
70 | #
71 | # This is also used if you do content translation via gettext catalogs.
72 | # Usually you set "language" from the command line for these cases.
73 | language = None
74 |
75 | # List of patterns, relative to source directory, that match files and
76 | # directories to ignore when looking for source files.
77 | # This pattern also affects html_static_path and html_extra_path.
78 | exclude_patterns = []
79 |
80 | # The name of the Pygments (syntax highlighting) style to use.
81 | pygments_style = None
82 |
83 |
84 | # -- Options for HTML output -------------------------------------------------
85 |
86 | # The theme to use for HTML and HTML Help pages. See the documentation for
87 | # a list of builtin themes.
88 | #
89 | html_theme = 'sphinxdoc'
90 |
91 | # Theme options are theme-specific and customize the look and feel of a theme
92 | # further. For a list of options available for each theme, see the
93 | # documentation.
94 | #
95 | # html_theme_options = {}
96 |
97 | # Add any paths that contain custom static files (such as style sheets) here,
98 | # relative to this directory. They are copied after the builtin static files,
99 | # so a file named "default.css" will overwrite the builtin "default.css".
100 | html_static_path = [] #'_static']
101 |
102 | # Custom sidebar templates, must be a dictionary that maps document names
103 | # to template names.
104 | #
105 | # The default sidebars (for documents that don't match any pattern) are
106 | # defined by theme itself. Builtin themes are using these templates by
107 | # default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
108 | # 'searchbox.html']``.
109 | #
110 | # html_sidebars = {}
111 |
112 |
113 | # -- Options for HTMLHelp output ---------------------------------------------
114 |
115 | # Output file base name for HTML help builder.
116 | htmlhelp_basename = 'torchiddoc'
117 |
118 |
119 | # -- Options for LaTeX output ------------------------------------------------
120 |
121 | latex_elements = {
122 | # The paper size ('letterpaper' or 'a4paper').
123 | #
124 | # 'papersize': 'letterpaper',
125 |
126 | # The font size ('10pt', '11pt' or '12pt').
127 | #
128 | # 'pointsize': '10pt',
129 |
130 | # Additional stuff for the LaTeX preamble.
131 | #
132 | # 'preamble': '',
133 |
134 | # Latex figure (float) alignment
135 | #
136 | # 'figure_align': 'htbp',
137 | }
138 |
139 | # Grouping the document tree into LaTeX files. List of tuples
140 | # (source start file, target name, title,
141 | # author, documentclass [howto, manual, or own class]).
142 | latex_documents = [
143 | (master_doc, 'torchid.tex', 'torchid Documentation',
144 | 'Marco Forgione', 'manual'),
145 | ]
146 |
147 |
148 | # -- Options for manual page output ------------------------------------------
149 |
150 | # One entry per manual page. List of tuples
151 | # (source start file, name, description, authors, manual section).
152 | man_pages = [
153 | (master_doc, 'torchid', 'torchid Documentation',
154 | [author], 1)
155 | ]
156 |
157 |
158 | # -- Options for Texinfo output ----------------------------------------------
159 |
160 | # Grouping the document tree into Texinfo files. List of tuples
161 | # (source start file, target name, title, author,
162 | # dir menu entry, description, category)
163 | texinfo_documents = [
164 | (master_doc, 'torchid', 'torchid Documentation',
165 | author, 'torchid', 'One line description of project.',
166 | 'Miscellaneous'),
167 | ]
168 |
169 |
170 | # -- Options for Epub output -------------------------------------------------
171 |
172 | # Bibliographic Dublin Core info.
173 | epub_title = project
174 |
175 | # The unique identifier of the text. This can be a ISBN number
176 | # or the project homepage.
177 | #
178 | # epub_identifier = ''
179 |
180 | # A unique identification for the text.
181 | #
182 | # epub_uid = ''
183 |
184 | # A list of files that should not be packed into the epub file.
185 | epub_exclude_files = ['search.html']
186 |
--------------------------------------------------------------------------------
/doc/sphinx/source/datasets.rst:
--------------------------------------------------------------------------------
1 | Datasets
2 | ========
3 |
4 | .. automodule:: torchid.datasets
5 | :members:
6 | :special-members:
7 | :member-order: bysource
8 | :exclude-members: __init__, forward
9 |
10 |
--------------------------------------------------------------------------------
/doc/sphinx/source/dynonet.rst:
--------------------------------------------------------------------------------
1 | dynoNet
2 | ==================================================
3 |
4 | -----------------------
5 | Linear dynamical blocks
6 | -----------------------
7 |
8 | .. automodule:: torchid.dynonet.module.lti
9 | :members:
10 | :special-members:
11 | :member-order: bysource
12 | :exclude-members: __init__, forward
13 |
14 | ------------------------
15 | Static non-linear blocks
16 | ------------------------
17 |
18 | .. automodule:: torchid.dynonet.module.static
19 | :members:
20 | :special-members:
21 | :member-order: bysource
22 | :exclude-members: __init__
23 |
--------------------------------------------------------------------------------
/doc/sphinx/source/index.rst:
--------------------------------------------------------------------------------
1 | .. pytorch-ident documentation master file, created by
2 | sphinx-quickstart on Fri Apr 10 01:50:34 2020.
3 | You can adapt this file completely to your liking, but it should at least
4 | contain the root `toctree` directive.
5 |
6 | pytorch-ident
7 | ===========================
8 | ------------------------------------------------
9 | System Identification with PyTorch.
10 | ------------------------------------------------
11 |
12 | pytorch-ident is an open-source python library for system identification
13 | with deep learning tools based on the PyTorch framework.
14 | The project is hosted on this `GitHub repository `_.
15 |
16 | Requirements
17 | ------------
18 |
19 | In order to run pytorch-ident, you need a python 3.x environment and the following packages:
20 |
21 | * `PyTorch `_
22 | * `numpy `_
23 | * `scipy `_
24 | * `matplotlib `_
25 |
26 | Installation
27 | ------------
28 | 1. Copy or clone the pytorch-ident project into a local folder. For instance, run
29 |
30 | .. code-block:: bash
31 |
32 | git clone https://github.com/forgi86/pytorch-ident.git
33 |
34 | from the command line
35 |
36 | 2. Navigate to your local pytorch-ident folder
37 |
38 | .. code-block:: bash
39 |
40 | cd LOCAL_FOLDER
41 |
42 | where LOCAL_FOLDER is the folder where you have just downloaded the code in step 2
43 |
44 | 3. Install pytorch-ident in your python environment: run
45 |
46 | .. code-block:: bash
47 |
48 | pip install .
49 |
50 | from the command line, in the working folder LOCAL_FOLDER
51 |
52 |
53 | Getting started
54 | ---------------
55 | The best way to get started with pytorch-ident is to run the
56 | `examples `_.
57 |
58 | API Documentation
59 | ---------------------
60 | .. toctree::
61 | :maxdepth: 2
62 |
63 | datasets
64 | statespace_dt
65 | statespace_ct
66 | dynonet
67 | metrics
68 |
69 |
70 |
71 | Indices and tables
72 | ==================
73 |
74 | * :ref:`genindex`
75 | * :ref:`modindex`
76 | * :ref:`search`
77 |
--------------------------------------------------------------------------------
/doc/sphinx/source/metrics.rst:
--------------------------------------------------------------------------------
1 | -------------------
2 | Performance metrics
3 | -------------------
4 |
5 | .. automodule:: torchid.metrics
6 | :members:
7 | :special-members:
8 | :member-order: bysource
9 |
--------------------------------------------------------------------------------
/doc/sphinx/source/requirements.txt:
--------------------------------------------------------------------------------
1 | torch
2 | numpy
3 | scipy
4 | pandas
5 |
--------------------------------------------------------------------------------
/doc/sphinx/source/statespace_ct.rst:
--------------------------------------------------------------------------------
1 | Continuous-time state-space identification
2 | ==========================================
3 |
4 |
5 | Continuous-time models
6 | ----------------------
7 |
8 | .. automodule:: torchid.ss.ct.models
9 | :members:
10 | :special-members:
11 | :member-order: bysource
12 | :exclude-members: __init__, forward
13 |
14 | Continuous-time simulators
15 | --------------------------
16 |
17 | .. automodule:: torchid.ss.ct.simulators
18 | :members:
19 | :special-members:
20 | :member-order: bysource
21 | :exclude-members: __init__, forward
--------------------------------------------------------------------------------
/doc/sphinx/source/statespace_dt.rst:
--------------------------------------------------------------------------------
1 | Discrete-time state-space identification
2 | ========================================
3 |
4 |
5 | Discrete-time models
6 | --------------------
7 | .. automodule:: torchid.ss.dt.models
8 | :members:
9 | :special-members:
10 | :member-order: bysource
11 | :exclude-members: __init__, forward
12 |
13 |
14 | Discrete-time simulators
15 | ------------------------
16 |
17 | .. automodule:: torchid.ss.dt.simulator
18 | :members:
19 | :special-members:
20 | :member-order: bysource
21 | :exclude-members: __init__, forward
--------------------------------------------------------------------------------
/examples/dynonet/README.txt:
--------------------------------------------------------------------------------
1 | RLC: Simulated linear RLC series circuit
2 | WH2009: Wiener-Hammerstein system from www.nonlinearbenchmark.org
3 |
--------------------------------------------------------------------------------
/examples/dynonet/RLC/RLC_train.py:
--------------------------------------------------------------------------------
1 | import torch
2 | import pandas as pd
3 | import numpy as np
4 | import os
5 | from torchid.dynonet.module.lti import SisoLinearDynamicalOperator
6 | import matplotlib.pyplot as plt
7 | import time
8 |
9 |
10 | if __name__ == '__main__':
11 |
12 | # In[Set seed for reproducibility]
13 | np.random.seed(0)
14 | torch.manual_seed(0)
15 |
16 | # In[Settings]
17 | model_name = 'IIR'
18 | add_noise = False
19 | lr = 1e-4
20 | num_iter = 20000
21 | test_freq = 100
22 | n_batch = 1
23 | n_b = 2
24 | n_a = 2
25 |
26 | # In[Column names in the dataset]
27 | COL_T = ['time']
28 | COL_X = ['V_C', 'I_L']
29 | COL_U = ['V_IN']
30 | COL_Y = ['V_C']
31 |
32 | # In[Load dataset]
33 | df_X = pd.read_csv(os.path.join("data", "RLC_data_id_lin.csv"))
34 | t = np.array(df_X[COL_T], dtype=np.float32)
35 | y = np.array(df_X[COL_Y], dtype=np.float32)
36 | x = np.array(df_X[COL_X], dtype=np.float32)
37 | u = np.array(df_X[COL_U], dtype=np.float32)
38 |
39 | # In[Add measurement noise]
40 | std_noise_V = add_noise * 10.0
41 | std_noise_I = add_noise * 1.0
42 | std_noise = np.array([std_noise_V, std_noise_I])
43 | x_noise = np.copy(x) + np.random.randn(*x.shape) * std_noise
44 | x_noise = x_noise.astype(np.float32)
45 |
46 | # In[Output]
47 | y_noise = np.copy(x_noise[:, [0]])
48 | y_nonoise = np.copy(x[:, [0]])
49 |
50 |
51 | # Prepare data
52 | u_torch = torch.tensor(u[None, ...], dtype=torch.float, requires_grad=False)
53 | y_meas_torch = torch.tensor(y_noise[None, ...], dtype=torch.float)
54 | y_true_torch = torch.tensor(y_nonoise[None, ...], dtype=torch.float)
55 |
56 | # In[Second-order dynamical system custom defined]
57 | G = SisoLinearDynamicalOperator(n_b, n_a)
58 |
59 | with torch.no_grad():
60 | G.b_coeff[0, 0, 0] = 0.01
61 | G.b_coeff[0, 0, 1] = 0.0
62 |
63 | G.a_coeff[0, 0, 0] = -0.9
64 | G.b_coeff[0, 0, 1] = 0.01
65 |
66 | # In[Setup optimizer]
67 | optimizer = torch.optim.Adam([
68 | {'params': G.parameters(), 'lr': lr},
69 | ], lr=lr)
70 |
71 | # In[Train]
72 | LOSS = []
73 | start_time = time.time()
74 | for itr in range(0, num_iter):
75 |
76 | optimizer.zero_grad()
77 |
78 | # Simulate
79 | y_hat = G(u_torch)
80 |
81 | # Compute fit loss
82 | err_fit = y_meas_torch - y_hat
83 | loss_fit = torch.mean(err_fit**2)
84 | loss = loss_fit
85 |
86 | LOSS.append(loss.item())
87 | if itr % test_freq == 0:
88 | print(f'Iter {itr} | Fit Loss {loss_fit:.4f}')
89 |
90 | # Optimize
91 | loss.backward()
92 | optimizer.step()
93 |
94 | train_time = time.time() - start_time
95 | print(f"\nTrain time: {train_time:.2f}") # 182 seconds
96 |
97 | # In[Save model]
98 |
99 | model_folder = os.path.join("models", model_name)
100 | if not os.path.exists(model_folder):
101 | os.makedirs(model_folder)
102 | torch.save(G.state_dict(), os.path.join(model_folder, "G.pt"))
103 | # In[Detach and reshape]
104 | y_hat = y_hat.detach().numpy()[0, ...]
105 | # In[Plot]
106 | plt.figure()
107 | plt.plot(t, y_nonoise, 'k', label="$y$")
108 | plt.plot(t, y_noise, 'r', label="$y_{noise}$")
109 | plt.plot(t, y_hat, 'b', label="$\hat y$")
110 | plt.legend()
111 |
112 | plt.figure()
113 | plt.plot(LOSS)
114 | plt.grid(True)
115 |
116 |
117 |
--------------------------------------------------------------------------------
/examples/dynonet/RLC/RLC_train_FIR.py:
--------------------------------------------------------------------------------
1 | import torch
2 | import pandas as pd
3 | import numpy as np
4 | import os
5 | from torchid.dynonet.module.lti import SisoFirLinearDynamicalOperator
6 | import matplotlib.pyplot as plt
7 | import time
8 |
9 |
10 | if __name__ == '__main__':
11 |
12 | # In[Set seed for reproducibility]
13 | np.random.seed(0)
14 | torch.manual_seed(0)
15 |
16 | # In[Settings]
17 | add_noise = False
18 | lr = 1e-4
19 | num_iter = 20000
20 | test_freq = 100
21 | n_batch = 1
22 | n_b = 256 # number of FIR coefficients
23 |
24 | # In[Column names in the dataset]
25 | COL_T = ['time']
26 | COL_X = ['V_C', 'I_L']
27 | COL_U = ['V_IN']
28 | COL_Y = ['V_C']
29 |
30 | # In[Load dataset]
31 | df_X = pd.read_csv(os.path.join("data", "RLC_data_id_lin.csv"))
32 | t = np.array(df_X[COL_T], dtype=np.float32)
33 | y = np.array(df_X[COL_Y], dtype=np.float32)
34 | x = np.array(df_X[COL_X], dtype=np.float32)
35 | u = np.array(df_X[COL_U], dtype=np.float32)
36 |
37 | # In[Add measurement noise]
38 | std_noise_V = add_noise * 10.0
39 | std_noise_I = add_noise * 1.0
40 | std_noise = np.array([std_noise_V, std_noise_I])
41 | x_noise = np.copy(x) + np.random.randn(*x.shape) * std_noise
42 | x_noise = x_noise.astype(np.float32)
43 |
44 | # In[Output]
45 | y_noise = np.copy(x_noise[:, [0]])
46 | y_nonoise = np.copy(x[:, [0]])
47 |
48 |
49 | # Prepare data
50 | u_torch = torch.tensor(u[None, ...], dtype=torch.float, requires_grad=False)
51 | y_meas_torch = torch.tensor(y_noise[None, ...], dtype=torch.float)
52 | y_true_torch = torch.tensor(y_nonoise[None, ...], dtype=torch.float)
53 |
54 | # In[Second-order dynamical system custom defined]
55 | G = SisoFirLinearDynamicalOperator(n_b)
56 |
57 |
58 | # In[Setup optimizer]
59 | optimizer = torch.optim.Adam([
60 | {'params': G.parameters(), 'lr': lr},
61 | ], lr=lr)
62 |
63 | # In[Train]
64 | LOSS = []
65 | start_time = time.time()
66 | for itr in range(0, num_iter):
67 |
68 | optimizer.zero_grad()
69 |
70 | # Simulate
71 | y_hat = G(u_torch)
72 |
73 | # Compute fit loss
74 | err_fit = y_meas_torch - y_hat
75 | loss_fit = torch.mean(err_fit**2)
76 | loss = loss_fit
77 |
78 | LOSS.append(loss.item())
79 | if itr % test_freq == 0:
80 | print(f'Iter {itr} | Fit Loss {loss_fit:.4f}')
81 |
82 | # Optimize
83 | loss.backward()
84 | optimizer.step()
85 |
86 | train_time = time.time() - start_time
87 | print(f"\nTrain time: {train_time:.2f}") # 182 seconds
88 |
89 | # In[Detach and reshape]
90 | y_hat = y_hat.detach().numpy()[0, ...]
91 |
92 | # In[Plot]
93 | plt.figure()
94 | plt.plot(t, y_nonoise, 'k', label="$y$")
95 | plt.plot(t, y_noise, 'r', label="$y_{noise}$")
96 | plt.plot(t, y_hat, 'b', label="$\hat y$")
97 | plt.legend()
98 |
99 | plt.figure()
100 | plt.plot(LOSS)
101 | plt.grid(True)
102 |
103 |
104 | # In[FIR coefficients]
105 | g_pars, _ = G.get_tfdata()
106 | fig, ax = plt.subplots()
107 | ax.plot(g_pars)
108 |
--------------------------------------------------------------------------------
/examples/dynonet/WH2009/README.txt:
--------------------------------------------------------------------------------
1 | * download_dataset.py: download the Wiener-Hammerstein System (2009) benchmark from www.nonlinearbenchmark.com
2 | * WH_train.py: Train a WH model for the system, exploiting the differentiable linear dynamical block (aka G-block)
3 | introduced in "dynoNet: a neural network architecture for learning dynamical systems"
4 | * WH_test.py: Test the model above
5 | * WH_train_FIR.py: Train a WH model for the system, using FIR blocks for the linear dynamical sections
6 | * WH_test_FIR.py: Test the model above
7 |
--------------------------------------------------------------------------------
/examples/dynonet/WH2009/WH2009_train_binary.py:
--------------------------------------------------------------------------------
1 | import torch
2 | import pandas as pd
3 | import numpy as np
4 | import os
5 | from torchid.dynonet.module.lti import SisoLinearDynamicalOperator
6 | from torchid.dynonet.module.static import SisoStaticNonLinearity
7 | import matplotlib.pyplot as plt
8 | import time
9 | import examples.util.metrics
10 |
11 |
12 | def normal_standard_cdf(val):
13 | return 1/2 * (1 + torch.erf(val/np.sqrt(2)))
14 |
15 | # In[Main]
16 | if __name__ == '__main__':
17 |
18 | # In[Set seed for reproducibility]
19 | np.random.seed(0)
20 | torch.manual_seed(0)
21 |
22 | # In[Settings]
23 | lr = 1e-4
24 | num_iter = 200000
25 | msg_freq = 100
26 | n_skip = 5000
27 | n_fit = 20000
28 | decimate = 1
29 | n_batch = 1
30 | n_b = 3
31 | n_a = 3
32 | C = 0.2 # threshold
33 | model_name = "model_WH_comparisons"
34 |
35 | # In[Column names in the dataset]
36 | COL_F = ['fs']
37 | COL_U = ['uBenchMark']
38 | COL_Y = ['yBenchMark']
39 |
40 | # In[Load dataset]
41 | df_X = pd.read_csv(os.path.join("data", "WienerHammerstein2009Files", "WienerHammerBenchmark.csv"))
42 |
43 | # Extract data
44 | y = np.array(df_X[COL_Y], dtype=np.float32) # batch, time, channel
45 | u = np.array(df_X[COL_U], dtype=np.float32)
46 | fs = np.array(df_X[COL_F].iloc[0], dtype=np.float32)
47 | N = y.size
48 | ts = 1/fs
49 | t = np.arange(N)*ts
50 |
51 | # In[Compute v signal]
52 | v = np.empty(y.shape, dtype=np.float32)
53 | v[y > C] = 1.0
54 | v[y <= C] = -1.0
55 |
56 | # In[Fit data]
57 | v_fit = v[0:n_fit:decimate]
58 | y_fit = y[0:n_fit:decimate]
59 | u_fit = u[0:n_fit:decimate]
60 | t_fit = t[0:n_fit:decimate]
61 |
62 | # In[Prepare training tensors]
63 | u_fit_torch = torch.tensor(u_fit[None, :, :], dtype=torch.float, requires_grad=False)
64 | y_fit_torch = torch.tensor(y_fit[None, :, :], dtype=torch.float)
65 | v_fit_torch = torch.tensor(v_fit[None, :, :], dtype=torch.float)
66 |
67 | # In[Prepare model]
68 | G1 = SisoLinearDynamicalOperator(n_b, n_a, n_k=1)
69 | F_nl = SisoStaticNonLinearity(n_hidden=10, activation='tanh')
70 | G2 = SisoLinearDynamicalOperator(n_b, n_a)
71 |
72 | sigma_hat = torch.tensor(0.1, requires_grad=True) # torch.randn(1, requires_grad = True)
73 |
74 | def model(u_in):
75 | y1_lin = G1(u_fit_torch)
76 | y1_nl = F_nl(y1_lin)
77 | y_hat = G2(y1_nl)
78 | return y_hat, y1_nl, y1_lin
79 |
80 | # In[Setup optimizer]
81 | optimizer = torch.optim.Adam([
82 | {'params': G1.parameters(), 'lr': lr},
83 | {'params': G2.parameters(), 'lr': lr},
84 | {'params': F_nl.parameters(), 'lr': lr},
85 | {'params': sigma_hat, 'lr': 1e-4},
86 | ], lr=lr)
87 |
88 |
89 | # In[Train]
90 | LOSS = []
91 | SIGMA = []
92 | start_time = time.time()
93 | for itr in range(0, num_iter):
94 |
95 | optimizer.zero_grad()
96 |
97 | y_hat, y1_nl, y1_lin = model(u_fit_torch)
98 | y_Phi_hat = normal_standard_cdf(v_fit_torch*((y_hat-C)/sigma_hat)) #: #(1 + torch.erf(-v_fit_torch * (C - y_hat) / torch.abs(sigma_hat+1e-12) / np.sqrt(2))) / 2 # Cumulative
99 | y_hat_log = y_Phi_hat.log()
100 | loss_train = - y_hat_log.mean()
101 |
102 | LOSS.append(loss_train.item())
103 | SIGMA.append(sigma_hat.item())
104 |
105 | if itr % msg_freq == 0:
106 | with torch.no_grad():
107 | RMSE = torch.sqrt(loss_train)
108 | print(f'Iter {itr} | Fit Loss {loss_train:.5f} sigma_hat:{sigma_hat:.5f} ')
109 |
110 | loss_train.backward()
111 | optimizer.step()
112 |
113 | train_time = time.time() - start_time
114 | print(f"\nTrain time: {train_time:.2f}")
115 |
116 | # In[Save model]
117 | model_folder = os.path.join("models", model_name)
118 | if not os.path.exists(model_folder):
119 | os.makedirs(model_folder)
120 |
121 | torch.save(G1.state_dict(), os.path.join(model_folder, "G1.pt"))
122 | torch.save(F_nl.state_dict(), os.path.join(model_folder, "F_nl.pt"))
123 | torch.save(G2.state_dict(), os.path.join(model_folder, "G2.pt"))
124 |
125 |
126 | # In[Simulate one more time]
127 | with torch.no_grad():
128 | y_hat, y1_nl, y1_lin = model(u_fit_torch)
129 |
130 | # In[Detach]
131 | y_hat = y_hat.detach().numpy()[0, :, :]
132 | y1_lin = y1_lin.detach().numpy()[0, :, :]
133 | y1_nl = y1_nl.detach().numpy()[0, :, :]
134 |
135 | # In[Plot]
136 | plt.figure()
137 | plt.plot(t_fit, y_fit, 'k', label="$y$")
138 | plt.plot(t_fit, y_hat, 'b', label="$\hat y$")
139 | plt.legend()
140 |
141 | # In[Plot loss]
142 | plt.figure()
143 | plt.plot(LOSS)
144 | plt.grid(True)
145 |
146 | # In[Plot sigma]
147 | plt.figure()
148 | plt.plot(SIGMA)
149 | plt.grid(True)
150 |
151 | # In[Plot static non-linearity]
152 |
153 | y1_lin_min = np.min(y1_lin)
154 | y1_lin_max = np.max(y1_lin)
155 |
156 | in_nl = np.arange(y1_lin_min, y1_lin_max, (y1_lin_max- y1_lin_min)/1000).astype(np.float32).reshape(-1, 1)
157 |
158 | with torch.no_grad():
159 | out_nl = F_nl(torch.as_tensor(in_nl))
160 |
161 | plt.figure()
162 | plt.plot(in_nl, out_nl, 'b')
163 | plt.plot(in_nl, out_nl, 'b')
164 | #plt.plot(y1_lin, y1_nl, 'b*')
165 | plt.xlabel('Static non-linearity input (-)')
166 | plt.ylabel('Static non-linearity input (-)')
167 | plt.grid(True)
168 |
169 | # In[RMSE]
170 | e_rms = examples.util.metrics.error_rmse(y_hat, y_fit)[0]
171 | print(f"RMSE: {e_rms:.2f}") # target: 1mv
172 |
173 | # In[v_hat]
174 | v_hat = np.empty_like(y_hat)
175 | v_hat[y_hat > C] = 1.0
176 | v_hat[y_hat <= C] = -1.0
177 |
178 | acc = np.sum(v_hat == v_fit)/(v_fit.shape[0])
179 | print(acc)
180 |
181 |
182 |
183 |
184 |
185 |
--------------------------------------------------------------------------------
/examples/dynonet/WH2009/WH2009_train_quantized.py:
--------------------------------------------------------------------------------
1 | import torch
2 | import pandas as pd
3 | import numpy as np
4 | import os
5 | from torchid.dynonet.module.lti import SisoLinearDynamicalOperator
6 | from torchid.dynonet.module.static import SisoStaticNonLinearity
7 | import matplotlib.pyplot as plt
8 | import time
9 | import examples.util.metrics
10 |
11 |
12 | def normal_standard_cdf(val):
13 | """Returns the value of the cumulative distribution function for a standard normal variable"""
14 | return 1/2 * (1 + torch.erf(val/np.sqrt(2)))
15 |
16 |
17 | # In[Main]
18 | if __name__ == '__main__':
19 |
20 | # In[Set seed for reproducibility]
21 | np.random.seed(0)
22 | torch.manual_seed(0)
23 |
24 | # In[Settings]
25 | lr = 1e-4
26 | num_iter = 200000
27 | msg_freq = 100
28 | n_skip = 5000
29 | n_fit = 20000
30 | decimate = 1
31 | n_batch = 1
32 | n_b = 3
33 | n_a = 3
34 |
35 | meas_intervals = np.array([-1.0, -0.75, -0.5, -0.25, 0, 0.25, 0.5, 0.75, 1.0], dtype=np.float32)
36 | meas_intervals_full = np.r_[-1000, meas_intervals, 1000]
37 |
38 | model_name = "model_WH_digit"
39 |
40 | # In[Column names in the dataset]
41 | COL_F = ['fs']
42 | COL_U = ['uBenchMark']
43 | COL_Y = ['yBenchMark']
44 |
45 | # In[Load dataset]
46 | df_X = pd.read_csv(os.path.join("data", "WienerHammerstein2009Files", "WienerHammerBenchmark.csv"))
47 |
48 | # Extract data
49 | y = np.array(df_X[COL_Y], dtype=np.float32) # batch, time, channel
50 | u = np.array(df_X[COL_U], dtype=np.float32)
51 | fs = np.array(df_X[COL_F].iloc[0], dtype=np.float32)
52 | N = y.size
53 | ts = 1/fs
54 | t = np.arange(N)*ts
55 |
56 | # In[Compute v signal]
57 | v = np.digitize(y, bins=meas_intervals)
58 | bins = meas_intervals_full[np.c_[v, v+1]] # bins of the measurement
59 |
60 | # In[Fit data]
61 | bins_fit = bins[0:n_fit:decimate, :]
62 | v_fit = v[0:n_fit:decimate]
63 | y_fit = y[0:n_fit:decimate]
64 | u_fit = u[0:n_fit:decimate]
65 | t_fit = t[0:n_fit:decimate]
66 |
67 | # In[Prepare training tensors]
68 | u_fit_torch = torch.tensor(u_fit[None, :, :], dtype=torch.float, requires_grad=False)
69 | bins_fit_torch = torch.tensor(bins_fit[None, :, :], dtype=torch.float, requires_grad=False)
70 | v_fit_torch = torch.tensor(v_fit[None, :, :], dtype=torch.float)
71 |
72 | # In[Prepare model]
73 | G1 = SisoLinearDynamicalOperator(n_b, n_a, n_k=1)
74 | F_nl = SisoStaticNonLinearity(n_hidden=10, activation='tanh')
75 | G2 = SisoLinearDynamicalOperator(n_b, n_a)
76 |
77 | log_sigma_hat = torch.tensor(np.log(1.0), requires_grad=True) # torch.randn(1, requires_grad = True)
78 |
79 | def model(u_in):
80 | y1_lin = G1(u_fit_torch)
81 | y1_nl = F_nl(y1_lin)
82 | y_hat = G2(y1_nl)
83 | return y_hat, y1_nl, y1_lin
84 |
85 | # In[Setup optimizer]
86 | optimizer = torch.optim.Adam([
87 | {'params': G1.parameters(), 'lr': lr},
88 | {'params': G2.parameters(), 'lr': lr},
89 | {'params': F_nl.parameters(), 'lr': lr},
90 | {'params': log_sigma_hat, 'lr': 2e-5},
91 | ], lr=lr)
92 |
93 |
94 | # In[Train]
95 | LOSS = []
96 | SIGMA = []
97 | start_time = time.time()
98 | #num_iter = 20
99 | for itr in range(0, num_iter):
100 |
101 | optimizer.zero_grad()
102 |
103 | sigma_hat = torch.exp(log_sigma_hat)
104 | y_hat, y1_nl, y1_lin = model(u_fit_torch)
105 | Phi_hat = normal_standard_cdf((bins_fit_torch - y_hat)/(sigma_hat + 1e-6))
106 | y_Phi_hat = Phi_hat[..., [1]] - Phi_hat[..., [0]]
107 | y_hat_log = y_Phi_hat.log()
108 | loss_train = - y_hat_log.mean()
109 |
110 | LOSS.append(loss_train.item())
111 | SIGMA.append(sigma_hat.item())
112 |
113 | if itr % msg_freq == 0:
114 | with torch.no_grad():
115 | pass
116 | #RMSE = torch.sqrt(loss_train)
117 | print(f'Iter {itr} | Fit Loss {loss_train:.5f} sigma_hat:{sigma_hat:.5f}')
118 |
119 | loss_train.backward()
120 | optimizer.step()
121 |
122 | train_time = time.time() - start_time
123 | print(f"\nTrain time: {train_time:.2f}")
124 |
125 | # In[Save model]
126 | model_folder = os.path.join("models", model_name)
127 | if not os.path.exists(model_folder):
128 | os.makedirs(model_folder)
129 |
130 | torch.save(G1.state_dict(), os.path.join(model_folder, "G1.pt"))
131 | torch.save(F_nl.state_dict(), os.path.join(model_folder, "F_nl.pt"))
132 | torch.save(G2.state_dict(), os.path.join(model_folder, "G2.pt"))
133 |
134 | # In[Simulate one more time]
135 | with torch.no_grad():
136 | y_hat, y1_nl, y1_lin = model(u_fit_torch)
137 |
138 | # In[Detach]
139 | y_hat = y_hat.detach().numpy()[0, :, :]
140 | y1_lin = y1_lin.detach().numpy()[0, :, :]
141 | y1_nl = y1_nl.detach().numpy()[0, :, :]
142 |
143 | # In[Plot]
144 | plt.figure()
145 | plt.plot(t_fit, y_fit, 'k', label="$y$")
146 | plt.plot(t_fit, y_hat, 'b', label="$\hat y$")
147 | plt.legend()
148 |
149 | # In[Plot loss]
150 | plt.figure()
151 | plt.plot(LOSS)
152 | plt.grid(True)
153 |
154 | # In[Plot sigma]
155 | plt.figure()
156 | plt.plot(SIGMA)
157 | plt.grid(True)
158 |
159 | # In[Plot static non-linearity]
160 |
161 | y1_lin_min = np.min(y1_lin)
162 | y1_lin_max = np.max(y1_lin)
163 |
164 | in_nl = np.arange(y1_lin_min, y1_lin_max, (y1_lin_max - y1_lin_min)/1000).astype(np.float32).reshape(-1, 1)
165 |
166 | with torch.no_grad():
167 | out_nl = F_nl(torch.as_tensor(in_nl))
168 |
169 | plt.figure()
170 | plt.plot(in_nl, out_nl, 'b')
171 | plt.plot(in_nl, out_nl, 'b')
172 | #plt.plot(y1_lin, y1_nl, 'b*')
173 | plt.xlabel('Static non-linearity input (-)')
174 | plt.ylabel('Static non-linearity input (-)')
175 | plt.grid(True)
176 |
177 | # In[Plot]
178 | e_rms = examples.util.metrics.error_rmse(y_hat, y_fit)[0]
179 | print(f"RMSE: {e_rms:.2f}") # target: 1mv
180 |
181 |
182 |
183 |
184 |
185 |
186 |
--------------------------------------------------------------------------------
/examples/dynonet/WH2009/WH_test.py:
--------------------------------------------------------------------------------
1 | import os
2 | import pandas as pd
3 | import numpy as np
4 | import torch
5 | import matplotlib
6 | import matplotlib.pyplot as plt
7 | import control
8 | import torchid.metrics as metrics
9 | from torchid.dynonet.module.lti import SisoLinearDynamicalOperator
10 | from torchid.dynonet.module.static import SisoStaticNonLinearity
11 |
12 |
13 | # In[Main]
14 | if __name__ == '__main__':
15 |
16 | matplotlib.rc('font', **{'family': 'sans-serif', 'sans-serif': ['Helvetica']})
17 | # In[Settings]
18 | model_name = 'model_WH'
19 |
20 | # Settings
21 | n_b = 8
22 | n_a = 8
23 |
24 | # Column names in the dataset
25 | COL_F = ['fs']
26 | COL_U = ['uBenchMark']
27 | COL_Y = ['yBenchMark']
28 |
29 | # Load dataset
30 | df_X = pd.read_csv(os.path.join("data", "WienerHammerstein2009Files", "WienerHammerBenchmark.csv"))
31 |
32 | # Extract data
33 | y_meas = np.array(df_X[COL_Y], dtype=np.float32)
34 | u = np.array(df_X[COL_U], dtype=np.float32)
35 | fs = np.array(df_X[COL_F].iloc[0], dtype=np.float32).item()
36 | N = y_meas.size
37 | ts = 1/fs
38 | t = np.arange(N)*ts
39 |
40 | t_fit_start = 0
41 | t_fit_end = 100000
42 | t_test_start = 100000
43 | t_test_end = 188000
44 | t_skip = 1000 # skip for statistics
45 |
46 | # In[Instantiate models]
47 |
48 | # Create models
49 | G1 = SisoLinearDynamicalOperator(n_b=n_b, n_a=n_a, n_k=1)
50 | G2 = SisoLinearDynamicalOperator(n_b=n_b, n_a=n_a, n_k=0)
51 | F_nl = SisoStaticNonLinearity(n_hidden=10, activation='tanh')
52 |
53 | model_folder = os.path.join("models", model_name)
54 | # Create model parameters
55 | G1.load_state_dict(torch.load(os.path.join(model_folder, "G1.pt")))
56 | F_nl.load_state_dict(torch.load(os.path.join(model_folder, "F_nl.pt")))
57 | G2.load_state_dict(torch.load(os.path.join(model_folder, "G2.pt")))
58 |
59 | # In[Predict]
60 |
61 | u_torch = torch.tensor(u[None, :, :])
62 | y1_lin = G1(u_torch)
63 | y1_nl = F_nl(y1_lin)
64 | y_hat = G2(y1_nl)
65 |
66 | # In[Detach]
67 | y_hat = y_hat.detach().numpy()[0, :, :]
68 | y1_lin = y1_lin.detach().numpy()[0, :, :]
69 | y1_nl = y1_nl.detach().numpy()[0, :, :]
70 |
71 | # In[Plot]
72 | plt.figure()
73 | plt.plot(t, y_meas, 'k', label="$y$")
74 | plt.plot(t, y_hat, 'b', label="$\hat y$")
75 | plt.plot(t, y_meas - y_hat, 'r', label="$e$")
76 | plt.grid(True)
77 | plt.xlabel('Time (s)')
78 | plt.ylabel('Voltage (V)')
79 | plt.legend(loc='upper right')
80 | plt.savefig('WH_fit.pdf')
81 |
82 | # In[Inspect linear model]
83 |
84 | n_imp = 128
85 | G1_num, G1_den = G1.get_tfdata()
86 | G1_sys = control.TransferFunction(G1_num, G1_den, ts)
87 | plt.figure()
88 | plt.title("$G_1$ impulse response")
89 | _, y_imp = control.impulse_response(G1_sys, np.arange(n_imp) * ts)
90 | # plt.plot(G1_num)
91 | plt.plot(y_imp)
92 | plt.savefig(os.path.join("models", model_name, "G1_imp.pdf"))
93 | plt.figure()
94 | mag_G1, phase_G1, omega_G1 = control.bode(G1_sys, omega_limits=[1e2, 1e5])
95 | plt.suptitle("$G_1$ bode plot")
96 | plt.savefig(os.path.join("models", model_name, "G1_bode.pdf"))
97 |
98 | # G2_b = G2.G.weight.detach().numpy()[0, 0, ::-1]
99 | G2_num, G2_den = G2.get_tfdata()
100 | G2_sys = control.TransferFunction(G2_num, G2_den, ts)
101 | plt.figure()
102 | plt.title("$G_2$ impulse response")
103 | _, y_imp = control.impulse_response(G2_sys, np.arange(n_imp) * ts)
104 | plt.plot(y_imp)
105 | plt.savefig(os.path.join("models", model_name, "G1_imp.pdf"))
106 | plt.figure()
107 | mag_G2, phase_G2, omega_G2 = control.bode(G2_sys, omega_limits=[1e2, 1e5])
108 | plt.suptitle("$G_2$ bode plot")
109 | plt.savefig(os.path.join("models", model_name, "G2_bode.pdf"))
110 |
111 | # In[Inspect static non-linearity]
112 |
113 | y1_lin_min = np.min(y1_lin)
114 | y1_lin_max = np.max(y1_lin)
115 |
116 | in_nl = np.arange(y1_lin_min, y1_lin_max, (y1_lin_max- y1_lin_min)/1000).astype(np.float32).reshape(-1, 1)
117 |
118 | with torch.no_grad():
119 | out_nl = F_nl(torch.as_tensor(in_nl))
120 |
121 | plt.figure()
122 | plt.plot(in_nl, out_nl, 'b')
123 | plt.plot(in_nl, out_nl, 'b')
124 | plt.xlabel('Static non-linearity input (-)')
125 | plt.ylabel('Static non-linearity input (-)')
126 | plt.grid(True)
127 |
128 | # In[Metrics]
129 | idx_test = range(t_test_start + t_skip, t_test_end)
130 | e_rms = 1000 * metrics.rmse(y_meas[idx_test], y_hat[idx_test])[0]
131 | fit_idx = metrics.fit_index(y_meas[idx_test], y_hat[idx_test])[0]
132 | r_sq = metrics.r_squared(y_meas[idx_test], y_hat[idx_test])[0]
133 |
134 | print(f"RMSE: {e_rms:.1f}V\nFIT: {fit_idx:.1f}%\nR_sq: {r_sq:.4f}")
135 |
136 |
137 | # In[Plot for paper]
138 |
139 | t_test_start = 140000
140 | len_plot = 1000
141 |
142 | plt.figure(figsize=(4, 3))
143 | plt.plot(t[t_test_start:t_test_start+len_plot], y_meas[t_test_start:t_test_start+len_plot], 'k', label="$\mathbf{y}^{\mathrm{meas}}$")
144 | plt.plot(t[t_test_start:t_test_start+len_plot], y_hat[t_test_start:t_test_start+len_plot], 'b--', label="$\mathbf{y}$")
145 | plt.plot(t[t_test_start:t_test_start+len_plot], y_meas[t_test_start:t_test_start+len_plot] - y_hat[t_test_start:t_test_start+len_plot], 'r', label="$\mathbf{e}$")
146 | plt.grid(True)
147 | plt.xlabel('Time (s)')
148 | plt.ylabel('Voltage (V)')
149 | plt.legend(loc='upper right')
150 | plt.tight_layout()
151 | plt.savefig('WH_timetrace.pdf')
152 |
--------------------------------------------------------------------------------
/examples/dynonet/WH2009/WH_test_FIR.py:
--------------------------------------------------------------------------------
1 | import torch
2 | import pandas as pd
3 | import numpy as np
4 | import os
5 | from torchid.dynonet.module.lti import SisoFirLinearDynamicalOperator
6 | from torchid.dynonet.module.static import SisoStaticNonLinearity
7 |
8 | import matplotlib as mpl
9 | import matplotlib.pyplot as plt
10 | import control
11 | import torchid.metrics as metrics
12 |
13 |
14 | if __name__ == '__main__':
15 |
16 | mpl.rc('text', usetex=True)
17 | mpl.rcParams['axes.grid'] = True
18 |
19 | model_name = 'model_WH_FIR'
20 |
21 | # Settings
22 | n_b = 128
23 |
24 | # Column names in the dataset
25 | COL_F = ['fs']
26 | COL_U = ['uBenchMark']
27 | COL_Y = ['yBenchMark']
28 |
29 | # Load dataset
30 | df_X = pd.read_csv(os.path.join("data", "WienerHammerstein2009Files", "WienerHammerBenchmark.csv"))
31 |
32 | # Extract data
33 | y_meas = np.array(df_X[COL_Y], dtype=np.float32)
34 | u = np.array(df_X[COL_U], dtype=np.float32)
35 | fs = np.array(df_X[COL_F].iloc[0], dtype = np.float32).item()
36 | N = y_meas.size
37 | ts = 1/fs
38 | t = np.arange(N)*ts
39 |
40 | t_fit_start = 0
41 | t_fit_end = 100000
42 | t_test_start = 100000
43 | t_test_end = 188000
44 | t_skip = 1000
45 |
46 | # In[Instantiate models]
47 |
48 | # Create models
49 | G1 = SisoFirLinearDynamicalOperator(n_b=n_b)
50 | G2 = SisoFirLinearDynamicalOperator(n_b=n_b)
51 | F_nl = SisoStaticNonLinearity()
52 |
53 | model_folder = os.path.join("models", model_name)
54 | # Create model parameters
55 | G1.load_state_dict(torch.load(os.path.join(model_folder, "G1.pt")))
56 | F_nl.load_state_dict(torch.load(os.path.join(model_folder, "F_nl.pt")))
57 | G2.load_state_dict(torch.load(os.path.join(model_folder, "G2.pt")))
58 |
59 | # In[Predict]
60 |
61 | u_torch = torch.tensor(u[None, :, :])
62 | y1_lin = G1(u_torch)
63 | y1_nl = F_nl(y1_lin)
64 | y_hat = G2(y1_nl)
65 |
66 | # In[Detach]
67 | y_hat = y_hat.detach().numpy()[0, :, :]
68 | y1_lin = y1_lin.detach().numpy()[0, :, :]
69 | y1_nl = y1_nl.detach().numpy()[0, :, :]
70 |
71 | # In[Plot]
72 | plt.figure()
73 | plt.plot(t, y_meas, 'k', label="$y$")
74 | plt.plot(t, y_hat, 'b', label="$\hat y$")
75 | plt.plot(t, y_meas - y_hat, 'r', label="$e$")
76 | plt.legend(loc='upper left')
77 |
78 | # In[Inspect linear model]
79 |
80 | n_imp = n_b
81 | G1_num, G1_den = G1.get_tfdata()
82 | G1_sys = control.TransferFunction(G1_num, G1_den, ts)
83 | plt.figure()
84 | plt.title("$G_1$ impulse response")
85 | _, y_imp = control.impulse_response(G1_sys, np.arange(n_imp) * ts)
86 | # plt.plot(G1_num)
87 | plt.plot(y_imp)
88 | plt.savefig(os.path.join("models", model_name, "G1_imp.pdf"))
89 | plt.figure()
90 | mag_G1, phase_G1, omega_G1 = control.bode(G1_sys, omega_limits=[1e2, 1e5])
91 | plt.suptitle("$G_1$ bode plot")
92 | plt.savefig(os.path.join("models", model_name, "G1_bode.pdf"))
93 |
94 |
95 | #G2_b = G2.G.weight.detach().numpy()[0, 0, ::-1]
96 | G2_num, G2_den = G2.get_tfdata()
97 | G2_sys = control.TransferFunction(G2_num, G2_den, ts)
98 | plt.figure()
99 | plt.title("$G_2$ impulse response")
100 | _, y_imp = control.impulse_response(G2_sys, np.arange(n_imp) * ts)
101 | plt.plot(y_imp)
102 | plt.savefig(os.path.join("models", model_name, "G1_imp.pdf"))
103 | plt.figure()
104 | mag_G2, phase_G2, omega_G2 = control.bode(G2_sys, omega_limits=[1e2, 1e5])
105 | plt.suptitle("$G_2$ bode plot")
106 | plt.savefig(os.path.join("models", model_name, "G2_bode.pdf"))
107 |
108 | #mag_G2, phase_G2, omega_G2 = control.bode(G2_sys)
109 |
110 | # In[Inspect static non-linearity]
111 |
112 | y1_lin_min = np.min(y1_lin)
113 | y1_lin_max = np.max(y1_lin)
114 |
115 | in_nl = np.arange(y1_lin_min, y1_lin_max, (y1_lin_max- y1_lin_min)/1000).astype(np.float32).reshape(-1, 1)
116 |
117 | with torch.no_grad():
118 | out_nl = F_nl(torch.as_tensor(in_nl))
119 |
120 | plt.figure()
121 | plt.plot(in_nl, out_nl, 'b')
122 | plt.plot(in_nl, out_nl, 'b')
123 | plt.xlabel('Static non-linearity input (-)')
124 | plt.ylabel('Static non-linearity input (-)')
125 | plt.grid(True)
126 |
127 | # In[Metrics]
128 | idx_test = range(t_test_start + t_skip, t_test_end)
129 | e_rms = 1000 * metrics.rmse(y_meas[idx_test], y_hat[idx_test])[0]
130 | fit_idx = metrics.fit_index(y_meas[idx_test], y_hat[idx_test])[0]
131 | r_sq = metrics.r_squared(y_meas[idx_test], y_hat[idx_test])[0]
132 |
133 | print(f"RMSE: {e_rms:.1f}V\nFIT: {fit_idx:.1f}%\nR_sq: {r_sq:.2f}")
134 |
135 |
136 |
--------------------------------------------------------------------------------
/examples/dynonet/WH2009/WH_train.py:
--------------------------------------------------------------------------------
1 | import torch
2 | import pandas as pd
3 | import numpy as np
4 | import os
5 | from torchid.dynonet.module.lti import SisoLinearDynamicalOperator
6 | from torchid.dynonet.module.static import SisoStaticNonLinearity
7 | import matplotlib.pyplot as plt
8 | import time
9 | import examples.util.metrics
10 |
11 |
12 | # In[Main]
13 | if __name__ == '__main__':
14 |
15 | # In[Set seed for reproducibility]
16 | np.random.seed(0)
17 | torch.manual_seed(0)
18 |
19 | # In[Settings]
20 | lr_ADAM = 2e-4
21 | lr_BFGS = 1e0
22 | num_iter_ADAM = 40000 # ADAM iterations 20000
23 | num_iter_BFGS = 0 # final BFGS iterations
24 | msg_freq = 100
25 | n_skip = 5000
26 | n_fit = 20000
27 | decimate = 1
28 | n_batch = 1
29 | n_b = 8
30 | n_a = 8
31 | model_name = "model_WH"
32 |
33 | num_iter = num_iter_ADAM + num_iter_BFGS
34 |
35 | # In[Column names in the dataset]
36 | COL_F = ['fs']
37 | COL_U = ['uBenchMark']
38 | COL_Y = ['yBenchMark']
39 |
40 | # In[Load dataset]
41 | df_X = pd.read_csv(os.path.join("data", "WienerHammerstein2009Files", "WienerHammerBenchmark.csv"))
42 |
43 | # Extract data
44 | y = np.array(df_X[COL_Y], dtype=np.float32) # batch, time, channel
45 | u = np.array(df_X[COL_U], dtype=np.float32)
46 | fs = np.array(df_X[COL_F].iloc[0], dtype=np.float32)
47 | N = y.size
48 | ts = 1/fs
49 | t = np.arange(N)*ts
50 |
51 | # In[Fit data]
52 | y_fit = y[0:n_fit:decimate]
53 | u_fit = u[0:n_fit:decimate]
54 | t_fit = t[0:n_fit:decimate]
55 |
56 | # In[Prepare training tensors]
57 | u_fit_torch = torch.tensor(u_fit[None, :, :], dtype=torch.float, requires_grad=False)
58 | y_fit_torch = torch.tensor(y_fit[None, :, :], dtype=torch.float)
59 |
60 | # In[Prepare model]
61 | G1 = SisoLinearDynamicalOperator(n_b, n_a, n_k=1)
62 | F_nl = SisoStaticNonLinearity(n_hidden=10, activation='tanh')
63 | G2 = SisoLinearDynamicalOperator(n_b, n_a)
64 |
65 | def model(u_in):
66 | y1_lin = G1(u_in)
67 | y1_nl = F_nl(y1_lin)
68 | y_hat = G2(y1_nl)
69 | return y_hat, y1_nl, y1_lin
70 |
71 | # In[Setup optimizer]
72 | optimizer_ADAM = torch.optim.Adam([
73 | {'params': G1.parameters(), 'lr': lr_ADAM},
74 | {'params': G2.parameters(), 'lr': lr_ADAM},
75 | {'params': F_nl.parameters(), 'lr': lr_ADAM},
76 | ], lr=lr_ADAM)
77 |
78 | optimizer_LBFGS = torch.optim.LBFGS(list(G1.parameters()) + list(G2.parameters()) + list(F_nl.parameters()), lr=lr_BFGS)
79 |
80 |
81 | def closure():
82 | optimizer_LBFGS.zero_grad()
83 |
84 | # Simulate
85 | y_hat, y1_nl, y1_lin = model(u_fit_torch)
86 |
87 | # Compute fit loss
88 | err_fit = y_fit_torch[:, n_skip:, :] - y_hat[:, n_skip:, :]
89 | loss = torch.mean(err_fit**2)*1000
90 |
91 | # Backward pas
92 | loss.backward()
93 | return loss
94 |
95 |
96 | # In[Train]
97 | LOSS = []
98 | start_time = time.time()
99 | for itr in range(0, num_iter):
100 |
101 | if itr < num_iter_ADAM:
102 | msg_freq = 10
103 | loss_train = optimizer_ADAM.step(closure)
104 | else:
105 | msg_freq = 10
106 | loss_train = optimizer_LBFGS.step(closure)
107 |
108 | LOSS.append(loss_train.item())
109 | if itr % msg_freq == 0:
110 | with torch.no_grad():
111 | RMSE = torch.sqrt(loss_train)
112 | print(f'Iter {itr} | Fit Loss {loss_train:.6f} | RMSE:{RMSE:.4f}')
113 |
114 | train_time = time.time() - start_time
115 | print(f"\nTrain time: {train_time:.2f}")
116 |
117 | # In[Save model]
118 | model_folder = os.path.join("models", model_name)
119 | if not os.path.exists(model_folder):
120 | os.makedirs(model_folder)
121 |
122 | torch.save(G1.state_dict(), os.path.join(model_folder, "G1.pt"))
123 | torch.save(F_nl.state_dict(), os.path.join(model_folder, "F_nl.pt"))
124 | torch.save(G2.state_dict(), os.path.join(model_folder, "G2.pt"))
125 |
126 |
127 | # In[Simulate one more time]
128 | with torch.no_grad():
129 | y_hat, y1_nl, y1_lin = model(u_fit_torch)
130 |
131 | # In[Detach]
132 | y_hat = y_hat.detach().numpy()[0, :, :]
133 | y1_lin = y1_lin.detach().numpy()[0, :, :]
134 | y1_nl = y1_nl.detach().numpy()[0, :, :]
135 |
136 | # In[Plot]
137 | plt.figure()
138 | plt.plot(t_fit, y_fit, 'k', label="$y$")
139 | plt.plot(t_fit, y_hat, 'b', label="$\hat y$")
140 | plt.legend()
141 |
142 | # In[Plot loss]
143 | plt.figure()
144 | plt.plot(LOSS)
145 | plt.grid(True)
146 |
147 | # In[Plot static non-linearity]
148 |
149 | y1_lin_min = np.min(y1_lin)
150 | y1_lin_max = np.max(y1_lin)
151 |
152 | in_nl = np.arange(y1_lin_min, y1_lin_max, (y1_lin_max- y1_lin_min)/1000).astype(np.float32).reshape(-1, 1)
153 |
154 | with torch.no_grad():
155 | out_nl = F_nl(torch.as_tensor(in_nl))
156 |
157 | plt.figure()
158 | plt.plot(in_nl, out_nl, 'b')
159 | plt.plot(in_nl, out_nl, 'b')
160 | #plt.plot(y1_lin, y1_nl, 'b*')
161 | plt.xlabel('Static non-linearity input (-)')
162 | plt.ylabel('Static non-linearity input (-)')
163 | plt.grid(True)
164 |
165 | # In[Plot]
166 | e_rms = examples.util.metrics.error_rmse(y_hat, y_fit)[0]
167 | print(f"RMSE: {e_rms:.2f}") # target: 1mv
168 |
169 |
170 |
171 |
172 |
173 |
174 |
--------------------------------------------------------------------------------
/examples/dynonet/WH2009/WH_train_FIR.py:
--------------------------------------------------------------------------------
1 | import torch
2 | import pandas as pd
3 | import numpy as np
4 | import os
5 | from torchid.dynonet.module.lti import SisoFirLinearDynamicalOperator
6 | from torchid.dynonet.module.static import SisoStaticNonLinearity
7 | import matplotlib.pyplot as plt
8 | import time
9 | import examples.util.metrics
10 |
11 | # In[Main]
12 | if __name__ == '__main__':
13 |
14 | # In[Set seed for reproducibility]
15 | np.random.seed(0)
16 | torch.manual_seed(0)
17 |
18 | # In[Settings]
19 | lr_ADAM = 1e-4
20 | lr_BFGS = 1e-1
21 | num_iter_ADAM = 100000
22 | num_iter_BFGS = 0
23 | test_freq = 100
24 | n_fit = 100000
25 | decimate = 1
26 | n_batch = 1
27 | n_b = 128
28 | model_name = "model_WH_FIR"
29 |
30 | num_iter = num_iter_ADAM + num_iter_BFGS
31 |
32 | # In[Column names in the dataset]
33 | COL_F = ['fs']
34 | COL_U = ['uBenchMark']
35 | COL_Y = ['yBenchMark']
36 |
37 | # In[Load dataset]
38 | df_X = pd.read_csv(os.path.join("data", "WienerHammerstein2009Files", "WienerHammerBenchmark.csv"))
39 |
40 | # Extract data
41 | y = np.array(df_X[COL_Y], dtype=np.float32) # batch, time, channel
42 | u = np.array(df_X[COL_U], dtype=np.float32)
43 | fs = np.array(df_X[COL_F].iloc[0], dtype = np.float32)
44 | N = y.size
45 | ts = 1/fs
46 | t = np.arange(N)*ts
47 |
48 | # In[Fit data]
49 | y_fit = y[0:n_fit:decimate]
50 | u_fit = u[0:n_fit:decimate]
51 | t_fit = t[0:n_fit:decimate]
52 |
53 |
54 | # In[Prepare training tensors]
55 | u_fit_torch = torch.tensor(u_fit[None, :, :], dtype=torch.float, requires_grad=False)
56 | y_fit_torch = torch.tensor(y_fit[None, :, :], dtype=torch.float)
57 |
58 | # In[Prepare model]
59 | G1 = SisoFirLinearDynamicalOperator(n_b=n_b)
60 | F_nl = SisoStaticNonLinearity()
61 | G2 = SisoFirLinearDynamicalOperator(n_b=n_b)
62 |
63 | def model(u_in):
64 | y1_lin = G1(u_in)
65 | y1_nl = F_nl(y1_lin)
66 | y_hat = G2(y1_nl)
67 | return y_hat, y1_nl, y1_lin
68 |
69 | # In[Setup optimizer]
70 | optimizer_ADAM = torch.optim.Adam([
71 | {'params': G1.parameters(), 'lr': lr_ADAM},
72 | {'params': G2.parameters(), 'lr': lr_ADAM},
73 | {'params': F_nl.parameters(), 'lr': lr_ADAM},
74 | ], lr=lr_ADAM)
75 |
76 | optimizer_LBFGS = torch.optim.LBFGS(list(G1.parameters())
77 | + list(G2.parameters()) + list(F_nl.parameters()), lr=lr_BFGS)
78 |
79 |
80 | def closure():
81 | optimizer_LBFGS.zero_grad()
82 |
83 | # Simulate
84 | y_hat, y1_nl, y1_lin = model(u_fit_torch)
85 |
86 | # Compute fit loss
87 | err_fit = y_fit_torch - y_hat
88 | loss = torch.mean(err_fit**2)
89 |
90 | # Backward pas
91 | loss.backward()
92 | return loss
93 |
94 |
95 | # In[Train]
96 | LOSS = []
97 | start_time = time.time()
98 | for itr in range(0, num_iter):
99 |
100 | if itr < num_iter_ADAM:
101 | test_freq = 10
102 | loss_train = optimizer_ADAM.step(closure)
103 | else:
104 | test_freq = 10
105 | loss_train = optimizer_LBFGS.step(closure)
106 |
107 | LOSS.append(loss_train.item())
108 | if itr % test_freq == 0:
109 | with torch.no_grad():
110 | RMSE = torch.sqrt(loss_train)
111 | print(f'Iter {itr} | Fit Loss {loss_train:.6f} | RMSE:{RMSE:.4f}')
112 |
113 | train_time = time.time() - start_time
114 | print(f"\nTrain time: {train_time:.2f}")
115 |
116 | # In[Save model]
117 | model_folder = os.path.join("models", model_name)
118 | if not os.path.exists(model_folder):
119 | os.makedirs(model_folder)
120 |
121 | torch.save(G1.state_dict(), os.path.join(model_folder, "G1.pt"))
122 | torch.save(F_nl.state_dict(), os.path.join(model_folder, "F_nl.pt"))
123 | torch.save(G2.state_dict(), os.path.join(model_folder, "G2.pt"))
124 |
125 |
126 | # In[Simulate one more time]
127 | with torch.no_grad():
128 | y_hat, y1_nl, y1_lin = model(u_fit_torch)
129 |
130 | # In[Detach]
131 | y_hat = y_hat.detach().numpy()[0, :, :]
132 | y1_lin = y1_lin.detach().numpy()[0, :, :]
133 | y1_nl = y1_nl.detach().numpy()[0, :, :]
134 |
135 | # In[Plot]
136 | plt.figure()
137 | plt.plot(t_fit, y_fit, 'k', label="$y$")
138 | plt.plot(t_fit, y_hat, 'b', label="$\hat y$")
139 | plt.legend()
140 |
141 | # In[Plot loss]
142 | plt.figure()
143 | plt.plot(LOSS)
144 | plt.grid(True)
145 |
146 | # In[Plot static non-linearity]
147 |
148 | y1_lin_min = np.min(y1_lin)
149 | y1_lin_max = np.max(y1_lin)
150 |
151 | in_nl = np.arange(y1_lin_min, y1_lin_max, (y1_lin_max- y1_lin_min)/1000).astype(np.float32).reshape(-1, 1)
152 |
153 | with torch.no_grad():
154 | out_nl = F_nl(torch.as_tensor(in_nl))
155 |
156 | plt.figure()
157 | plt.plot(in_nl, out_nl, 'b')
158 | plt.plot(in_nl, out_nl, 'b')
159 | #plt.plot(y1_lin, y1_nl, 'b*')
160 | plt.xlabel('Static non-linearity input (-)')
161 | plt.ylabel('Static non-linearity input (-)')
162 | plt.grid(True)
163 |
164 | # In[Plot]
165 | e_rms = examples.util.metrics.error_rmse(y_hat, y_fit)[0]
166 | print(f"RMSE: {e_rms:.2f}")
167 |
--------------------------------------------------------------------------------
/examples/dynonet/WH2009/download_dataset.py:
--------------------------------------------------------------------------------
1 | import os
2 | from google_drive_downloader import GoogleDriveDownloader as gdd
3 |
4 |
5 | if __name__ == '__main__':
6 |
7 | DATA_FOLDER = 'data'
8 |
9 | # %% Make data folder if it does not exist
10 | if not os.path.exists(DATA_FOLDER):
11 | os.makedirs(DATA_FOLDER)
12 |
13 | # %% Download dataset from www.nonlinearbenchmark.com
14 | # https://drive.google.com/file/d/16ipySVfKfxkwqWmbO9Z19-VjDoC2S6hx/view?usp=sharing
15 | gdd.download_file_from_google_drive(file_id='16ipySVfKfxkwqWmbO9Z19-VjDoC2S6hx',
16 | dest_path='./data/data.zip',
17 | unzip=True)
18 |
--------------------------------------------------------------------------------
/examples/io/RLC/RLC_IO_fit_1step.py:
--------------------------------------------------------------------------------
1 | import pandas as pd
2 | import numpy as np
3 | import torch
4 | import torch.optim as optim
5 | import time
6 | import matplotlib.pyplot as plt
7 | import os
8 | import scipy.linalg
9 | from torchid.io.module.io_simulator import NeuralIOSimulator
10 | from torchid.io.module.iomodels import NeuralIOModel
11 |
12 | if __name__ == '__main__':
13 |
14 | # Set seed for reproducibility
15 | np.random.seed(0)
16 | torch.manual_seed(0)
17 |
18 | # Overall parameters
19 | t_fit = 2e-3 # fitting on t_fit ms of data
20 | n_a = 2 # autoregressive coefficients for y
21 | n_b = 2 # autoregressive coefficients for u
22 | lr = 1e-4 # learning rate
23 | num_iter = 40000 # gradient-based optimization steps
24 | test_freq = 500 # print message every test_freq iterations
25 | add_noise = False
26 |
27 | # Column names in the dataset
28 | COL_T = ['time']
29 | COL_X = ['V_C', 'I_L']
30 | COL_U = ['V_IN']
31 | COL_Y = ['V_C']
32 | df_X = pd.read_csv(os.path.join("data", "RLC_data_id.csv"))
33 |
34 | # Load dataset
35 | t = np.array(df_X[COL_T], dtype=np.float32)
36 | x = np.array(df_X[COL_X], dtype=np.float32)
37 | u = np.array(df_X[COL_U], dtype=np.float32)
38 | y_var_idx = 0 # 0: voltage 1: current
39 | y = np.copy(x[:, [y_var_idx]])
40 |
41 | # Add measurement noise
42 | std_noise_V = add_noise * 10.0
43 | std_noise_I = add_noise * 1.0
44 | std_noise = np.array([std_noise_V, std_noise_I])
45 | x_noise = np.copy(x) + np.random.randn(*x.shape) * std_noise
46 | x_noise = x_noise.astype(np.float32)
47 | y_noise = x_noise[:, [y_var_idx]]
48 |
49 | # Build fit data
50 | n_max = np.max((n_a, n_b)) # delay
51 | N = np.shape(y)[0]
52 | Ts = t[1] - t[0]
53 | n_fit = int(t_fit // Ts) # x.shape[0]
54 | u_fit = u[0:n_fit]
55 | y_fit = y[0:n_fit]
56 | y_meas_fit = y_noise[0:n_fit]
57 | phi_fit_y = scipy.linalg.toeplitz(y_meas_fit, y_meas_fit[0:n_a])[n_max - 1:-1, :] # regressor 1
58 | phi_fit_u = scipy.linalg.toeplitz(u_fit, u_fit[0:n_a])[n_max - 1:-1, :]
59 | phi_fit = np.hstack((phi_fit_y, phi_fit_u))
60 |
61 | # Neglect initial values
62 | y_fit = y_fit[n_max:, :]
63 | y_meas_fit = y_meas_fit[n_max:, :]
64 | u_fit = u_fit[n_max:, :]
65 |
66 | # Build fit data
67 | phi_fit_torch = torch.from_numpy(phi_fit)
68 | y_meas_fit_torch = torch.from_numpy(y_meas_fit)
69 |
70 | # Setup neural model structure
71 | io_model = NeuralIOModel(n_a=n_a, n_b=n_b, n_feat=64, small_init=True)
72 | io_solution = NeuralIOSimulator(io_model)
73 |
74 | # Setup optimizer
75 | optimizer = optim.Adam(io_solution.io_model.parameters(), lr=lr)
76 |
77 | LOSS = []
78 | start_time = time.time()
79 | # Training loop
80 | for itr in range(1, num_iter + 1):
81 | optimizer.zero_grad()
82 |
83 | # Perform one-step ahead prediction
84 | y_est_torch = io_solution.f_onestep(phi_fit_torch)
85 |
86 | # Compute fit loss
87 | err = y_est_torch - y_meas_fit_torch
88 | loss = torch.mean(err ** 2)
89 |
90 | # Statistics
91 | LOSS.append(loss.item())
92 | if itr % test_freq == 0:
93 | print('Iter {:04d} | Total Loss {:.6f}'.format(itr, loss.item()))
94 |
95 | # Optimization step
96 | loss.backward()
97 | optimizer.step()
98 |
99 | train_time = time.time() - start_time # 73 seconds
100 | print(f"\nTrain time: {train_time:.2f}")
101 |
102 | # Save model
103 | if not os.path.exists("models"):
104 | os.makedirs("models")
105 | if add_noise:
106 | model_filename = "model_IO_1step_noise.pt"
107 | else:
108 | model_filename = "model_IO_1step_nonoise.pt"
109 |
110 | torch.save(io_solution.io_model.state_dict(), os.path.join("models", model_filename))
111 |
112 | # In[Validate model]
113 | t_val_start = 0
114 | t_val_end = t[-1]
115 | idx_val_start = int(t_val_start // Ts) # x.shape[0]
116 | idx_val_end = int(t_val_end // Ts) # x.shape[0]
117 |
118 | n_val = idx_val_end - idx_val_start
119 | u_val = np.copy(u[idx_val_start:idx_val_end])
120 | y_val = np.copy(y[idx_val_start:idx_val_end])
121 | y_meas_val = np.copy(y_noise[idx_val_start:idx_val_end])
122 |
123 | y_seq = np.array(np.flip(y_val[0:n_a].ravel()))
124 | u_seq = np.array(np.flip(u_val[0:n_b].ravel()))
125 |
126 | # Neglect initial values
127 | y_val = y_val[n_max:, :]
128 | y_meas_val = y_meas_val[n_max:, :]
129 | u_val = u_val[n_max:, :]
130 |
131 | y_meas_val_torch = torch.tensor(y_meas_val)
132 |
133 | with torch.no_grad():
134 | y_seq_torch = torch.tensor(y_seq)
135 | u_seq_torch = torch.tensor(u_seq)
136 |
137 | u_torch = torch.tensor(u_val)
138 | y_val_sim_torch = io_solution.f_sim(y_seq_torch, u_seq_torch, u_torch)
139 |
140 | err_val = y_val_sim_torch - y_meas_val_torch
141 | loss_val = torch.mean((err_val) ** 2)
142 |
143 | # Plot
144 | if not os.path.exists("fig"):
145 | os.makedirs("fig")
146 | y_val_sim = np.array(y_val_sim_torch)
147 | fig, ax = plt.subplots(2, 1, sharex=True)
148 | ax[0].plot(y_val, 'b', label='True')
149 | ax[0].plot(y_val_sim, 'r', label='Sim')
150 | ax[0].legend()
151 | ax[0].grid(True)
152 |
153 | ax[1].plot(u_val, label='Input')
154 | ax[1].legend()
155 | ax[1].grid(True)
156 |
157 | if add_noise:
158 | fig_name = "RLC_IO_loss_1step_noise.pdf"
159 | else:
160 | fig_name = "RLC_IO_loss_1step_nonoise.pdf"
161 |
162 | fig, ax = plt.subplots(1, 1, figsize=(7.5, 6))
163 | ax.plot(LOSS)
164 | ax.grid(True)
165 | ax.set_ylabel("Loss (-)")
166 | ax.set_xlabel("Iteration (-)")
167 |
168 | fig.savefig(os.path.join("fig", fig_name), bbox_inches='tight')
169 |
--------------------------------------------------------------------------------
/examples/io/RLC/RLC_IO_test.py:
--------------------------------------------------------------------------------
1 | import pandas as pd
2 | import numpy as np
3 | import torch
4 | import matplotlib.pyplot as plt
5 | import os
6 | from torchid.io.module.io_simulator import NeuralIOSimulator
7 | from torchid.io.module.iomodels import NeuralIOModel
8 | import torchid.metrics as metrics
9 |
10 | if __name__ == '__main__':
11 |
12 | n_a = 2 # autoregressive coefficients for y
13 | n_b = 2 # autoregressive coefficients for u
14 | plot_input = False
15 |
16 | #dataset_type = 'id'
17 | dataset_type = 'test'
18 | # model_type = '16step_noise'
19 | # model_type = '32step_noise'
20 | # model_type = '64step_noise
21 | model_type = '1step_nonoise'
22 | # model_type = '1step_noise'
23 |
24 | # Create fig folder if it does not exist
25 | if not os.path.exists("fig"):
26 | os.makedirs("fig")
27 |
28 | # Column names in the dataset
29 | COL_T = ['time']
30 | COL_X = ['V_C', 'I_L']
31 | COL_U = ['V_IN']
32 | COL_Y = ['V_C']
33 |
34 | # Load dataset
35 | dataset_filename = f"RLC_data_{dataset_type}.csv"
36 | df_X = pd.read_csv(os.path.join("data", dataset_filename))
37 | time_data = np.array(df_X[COL_T], dtype=np.float32)
38 | x = np.array(df_X[COL_X], dtype=np.float32)
39 | u = np.array(df_X[COL_U], dtype=np.float32)
40 | y_var_idx = 0 # 0: voltage 1: current
41 | y = np.copy(x[:, [y_var_idx]])
42 | N = np.shape(y)[0]
43 | Ts = time_data[1] - time_data[0]
44 |
45 | # Add measurement noise
46 | std_noise_V = 10.0
47 | std_noise_I = 1.0
48 | std_noise = np.array([std_noise_V, std_noise_I])
49 | x_noise = np.copy(x) + np.random.randn(*x.shape) * std_noise
50 | x_noise = x_noise.astype(np.float32)
51 | y_noise = x_noise[:, [y_var_idx]]
52 |
53 | # Build validation data
54 | t_val_start = 0
55 | t_val_end = time_data[-1]
56 | idx_val_start = int(t_val_start//Ts)#x.shape[0]
57 | idx_val_end = int(t_val_end//Ts)#x.shape[0]
58 | n_val = idx_val_end - idx_val_start
59 | u_val = np.copy(u[idx_val_start:idx_val_end])
60 | y_val = np.copy(y[idx_val_start:idx_val_end])
61 | y_meas_val = np.copy(y_noise[idx_val_start:idx_val_end])
62 | time_val = time_data[idx_val_start:idx_val_end]
63 |
64 | # Setup neural model structure and load fitted model parameters
65 | io_model = NeuralIOModel(n_a=n_a, n_b=n_b, n_feat=64)
66 | io_solution = NeuralIOSimulator(io_model)
67 | model_filename = f"model_IO_{model_type}.pt"
68 | io_solution.io_model.load_state_dict(torch.load(os.path.join("models", model_filename)))
69 |
70 | # Evaluate the model in open-loop simulation against validation data
71 | y_seq = np.zeros(n_a, dtype=np.float32)
72 | u_seq = np.zeros(n_b, dtype=np.float32 )
73 | y_meas_val_torch = torch.tensor(y_meas_val)
74 | with torch.no_grad():
75 | y_seq_torch = torch.tensor(y_seq)
76 | u_seq_torch = torch.tensor(u_seq)
77 |
78 | u_torch = torch.tensor(u_val)
79 | y_val_sim_torch = io_solution.f_sim(y_seq_torch, u_seq_torch, u_torch)
80 |
81 | err_val = y_val_sim_torch - y_meas_val_torch
82 | loss_val = torch.mean(err_val**2)
83 |
84 | # Plot results
85 | if dataset_type == 'id':
86 | t_plot_start = 0.2e-3
87 | else:
88 | t_plot_start = 1.0e-3
89 | t_plot_end = t_plot_start + 0.3e-3
90 |
91 | idx_plot_start = int(t_plot_start//Ts)#x.shape[0]
92 | idx_plot_end = int(t_plot_end//Ts)#x.shape[0]
93 |
94 | y_val_sim = np.array(y_val_sim_torch)
95 | time_val_us = time_val*1e6
96 |
97 | if plot_input:
98 | fig, ax = plt.subplots(2, 1, sharex=True)
99 | else:
100 | fig, ax = plt.subplots(1, 1, sharex=True)
101 | ax = [ax]
102 |
103 | ax[0].plot(time_val_us[idx_plot_start:idx_plot_end], y_val[idx_plot_start:idx_plot_end], 'k', label='True')
104 | ax[0].plot(time_val_us[idx_plot_start:idx_plot_end], y_val_sim[idx_plot_start:idx_plot_end], 'r--', label='Model simulation')
105 | ax[0].legend(loc='upper right')
106 | ax[0].grid(True)
107 | ax[0].set_xlabel("Time ($\mu$s)")
108 | ax[0].set_ylabel("Capacitor voltage $v_C$ (V)")
109 | ax[0].set_ylim([-400, 400])
110 |
111 | if plot_input:
112 | ax[1].plot(time_val_us[idx_plot_start:idx_plot_end], u_val[idx_plot_start:idx_plot_end], 'k', label='Input')
113 | #ax[1].legend()
114 | ax[1].grid(True)
115 | ax[1].set_xlabel("Time ($\mu$s)")
116 | ax[1].set_ylabel("Input voltage $v_{in}$ (V)")
117 |
118 | fig_name = f"RLC_IO_{dataset_type}_{model_type}.pdf"
119 | fig.savefig(os.path.join("fig", fig_name), bbox_inches='tight')
120 |
121 | # R-squared metrics
122 | R_sq = metrics.r_squared(y_val, y_val_sim)
123 | print(f"R-squared metrics: {R_sq}")
124 |
--------------------------------------------------------------------------------
/examples/io/RLC/RLC_generate_id.py:
--------------------------------------------------------------------------------
1 | from scipy.integrate import solve_ivp
2 | from scipy.interpolate import interp1d
3 | import numpy as np
4 | import matplotlib.pyplot as plt
5 | import control.matlab
6 | import pandas as pd
7 | import os
8 |
9 | from examples.statespace.RLC.symbolic_RLC import fxu_ODE, fxu_ODE_nl
10 |
11 | if __name__ == '__main__':
12 |
13 | # Set seed for reproducibility
14 | np.random.seed(42)
15 |
16 | # Input characteristics #
17 | len_sim = 5e-3
18 | Ts = 5e-7
19 |
20 | omega_input = 150e3
21 | std_input = 80
22 |
23 | tau_input = 1/omega_input
24 | Hu = control.TransferFunction([1], [1 / omega_input, 1])
25 | Hu = Hu * Hu
26 | Hud = control.matlab.c2d(Hu, Ts)
27 |
28 | N_sim = int(len_sim//Ts)
29 | N_skip = int(20 * tau_input // Ts) # skip initial samples to get a regime sample of d
30 | N_sim_u = N_sim + N_skip
31 | e = np.random.randn(N_sim_u)
32 | te = np.arange(N_sim_u) * Ts
33 | _, u, _ = control.forced_response(Hu, te, e)
34 | u = u[N_skip:]
35 | u = u /np.std(u) * std_input
36 |
37 | t_sim = np.arange(N_sim) * Ts
38 | u_func = interp1d(t_sim, u, kind='zero', fill_value="extrapolate")
39 |
40 |
41 | def f_ODE(t,x):
42 | u = u_func(t).ravel()
43 | return fxu_ODE(t, x, u)
44 |
45 | def f_ODE_mod(t,x):
46 | u = u_func(t).ravel()
47 | return fxu_ODE_nl(t, x, u)
48 |
49 |
50 | x0 = np.zeros(2)
51 | f_ODE(0.0,x0)
52 | t_span = (t_sim[0],t_sim[-1])
53 | y1 = solve_ivp(f_ODE, t_span, x0, t_eval = t_sim)
54 | y2 = solve_ivp(f_ODE_mod, t_span, x0, t_eval = t_sim)
55 |
56 | x1 = y1.y.T
57 | x2 = y2.y.T
58 |
59 | # In[plot]
60 | fig, ax = plt.subplots(3,1, figsize=(10,10), sharex=True)
61 | ax[0].plot(t_sim, x1[:,0],'b')
62 | ax[0].plot(t_sim, x2[:,0],'r')
63 | ax[0].set_xlabel('time (s)')
64 | ax[0].set_ylabel('Capacitor voltage (V)')
65 |
66 | ax[1].plot(t_sim, x1[:,1],'b')
67 | ax[1].plot(t_sim, x2[:,1],'r')
68 | ax[1].set_xlabel('time (s)')
69 | ax[1].set_ylabel('Inductor current (A)')
70 |
71 | ax[2].plot(t_sim, u,'b')
72 | ax[2].set_xlabel('time (s)')
73 | ax[2].set_ylabel('Input voltage (V)')
74 |
75 | ax[0].grid(True)
76 | ax[1].grid(True)
77 | ax[2].grid(True)
78 |
79 | # In[Save]
80 | if not os.path.exists("data"):
81 | os.makedirs("data")
82 |
83 | X = np.hstack((t_sim.reshape(-1, 1), x1, u.reshape(-1, 1), x1[:, 0].reshape(-1, 1)))
84 | COL_T = ['time']
85 | COL_X = ['V_C', 'I_L']
86 | COL_U = ['V_IN']
87 | COL_Y = ['V_C']
88 | COL = COL_T + COL_X + COL_U + COL_Y
89 | df_X = pd.DataFrame(X, columns=COL)
90 | # df_X.to_csv(os.path.join("data", "RLC_data_id.csv"), index=False)
91 |
92 |
93 | X = np.hstack((t_sim.reshape(-1, 1), x2, u.reshape(-1, 1), x2[:, 0].reshape(-1, 1)))
94 | COL_T = ['time']
95 | COL_X = ['V_C', 'I_L']
96 | COL_U = ['V_IN']
97 | COL_Y = ['V_C']
98 | COL = COL_T + COL_X + COL_U + COL_Y
99 | df_X = pd.DataFrame(X, columns=COL)
100 | df_X.to_csv(os.path.join("data", "RLC_data_id.csv"), index=False)
101 |
--------------------------------------------------------------------------------
/examples/io/RLC/RLC_generate_test.py:
--------------------------------------------------------------------------------
1 | from scipy.integrate import solve_ivp
2 | from scipy.interpolate import interp1d
3 | import numpy as np
4 | import matplotlib.pyplot as plt
5 | import control.matlab
6 | import pandas as pd
7 | import os
8 |
9 | from examples.statespace.RLC.symbolic_RLC import fxu_ODE, fxu_ODE_nl
10 |
11 | if __name__ == '__main__':
12 |
13 | # Set seed for reproducibility
14 | np.random.seed(42)
15 |
16 | # Input characteristics #
17 | len_sim = 5e-3
18 | Ts = 5e-7
19 |
20 | omega_input = 200e3
21 | std_input = 60
22 |
23 | tau_input = 1/omega_input
24 | Hu = control.TransferFunction([1], [1 / omega_input, 1])
25 | Hu = Hu * Hu
26 | Hud = control.matlab.c2d(Hu, Ts)
27 |
28 | N_sim = int(len_sim//Ts)
29 | N_skip = int(20 * tau_input // Ts) # skip initial samples to get a regime sample of d
30 | N_sim_u = N_sim + N_skip
31 | e = np.random.randn(N_sim_u)
32 | te = np.arange(N_sim_u) * Ts
33 | _, u, _ = control.forced_response(Hu, te, e)
34 | u = u[N_skip:]
35 | u = u /np.std(u) * std_input
36 |
37 | t_sim = np.arange(N_sim) * Ts
38 | u_func = interp1d(t_sim, u, kind='zero', fill_value="extrapolate")
39 |
40 |
41 | def f_ODE(t,x):
42 | u = u_func(t).ravel()
43 | return fxu_ODE(t, x, u)
44 |
45 | def f_ODE_mod(t,x):
46 | u = u_func(t).ravel()
47 | return fxu_ODE_nl(t, x, u)
48 |
49 |
50 | x0 = np.zeros(2)
51 | f_ODE(0.0,x0)
52 | t_span = (t_sim[0],t_sim[-1])
53 | y1 = solve_ivp(f_ODE, t_span, x0, t_eval = t_sim)
54 | y2 = solve_ivp(f_ODE_mod, t_span, x0, t_eval = t_sim)
55 |
56 | x1 = y1.y.T
57 | x2 = y2.y.T
58 |
59 | # In[plot]
60 | fig, ax = plt.subplots(3,1, figsize=(10,10), sharex=True)
61 | ax[0].plot(t_sim, x1[:,0],'b')
62 | ax[0].plot(t_sim, x2[:,0],'r')
63 | ax[0].set_xlabel('time (s)')
64 | ax[0].set_ylabel('Capacitor voltage (V)')
65 |
66 | ax[1].plot(t_sim, x1[:,1],'b')
67 | ax[1].plot(t_sim, x2[:,1],'r')
68 | ax[1].set_xlabel('time (s)')
69 | ax[1].set_ylabel('Inductor current (A)')
70 |
71 | ax[2].plot(t_sim, u,'b')
72 | ax[2].set_xlabel('time (s)')
73 | ax[2].set_ylabel('Input voltage (V)')
74 |
75 | ax[0].grid(True)
76 | ax[1].grid(True)
77 | ax[2].grid(True)
78 |
79 | # In[Save]
80 | if not os.path.exists("data"):
81 | os.makedirs("data")
82 |
83 | X = np.hstack((t_sim.reshape(-1, 1), x1, u.reshape(-1, 1), x1[:, 0].reshape(-1, 1)))
84 | COL_T = ['time']
85 | COL_X = ['V_C', 'I_L']
86 | COL_U = ['V_IN']
87 | COL_Y = ['V_C']
88 | COL = COL_T + COL_X + COL_U + COL_Y
89 | df_X = pd.DataFrame(X, columns=COL)
90 | # df_X.to_csv(os.path.join("data", "RLC_data_id.csv"), index=False)
91 |
92 |
93 | X = np.hstack((t_sim.reshape(-1, 1), x2, u.reshape(-1, 1), x2[:, 0].reshape(-1, 1)))
94 | COL_T = ['time']
95 | COL_X = ['V_C', 'I_L']
96 | COL_U = ['V_IN']
97 | COL_Y = ['V_C']
98 | COL = COL_T + COL_X + COL_U + COL_Y
99 | df_X = pd.DataFrame(X, columns=COL)
100 | df_X.to_csv(os.path.join("data", "RLC_data_test.csv"), index=False)
101 |
--------------------------------------------------------------------------------
/examples/statespace/CTS/CTS_test.py:
--------------------------------------------------------------------------------
1 | import pandas as pd
2 | import numpy as np
3 | import torch
4 | import matplotlib.pyplot as plt
5 | import os
6 | from torchid.ss.dt.simulator import StateSpaceSimulator
7 | from torchid.ss.dt.models import CTSNeuralStateSpace
8 | from examples.util import metrics
9 |
10 | if __name__ == '__main__':
11 |
12 | plot_input = False
13 |
14 | dataset_type = 'val'
15 | #model_name = 'model_ss_256step'
16 | #hidden_name = 'hidden_ss_256step'
17 |
18 | model_name = 'model_ss_full'
19 | hidden_name = 'hidden_ss_full'
20 |
21 | # Load dataset
22 | df_data = pd.read_csv(os.path.join("data", "CascadedTanksFiles", "dataBenchmark.csv"))
23 | if dataset_type == 'id':
24 | u = np.array(df_data[['uEst']]).astype(np.float32)
25 | y = np.array(df_data[['yEst']]).astype(np.float32)
26 | else:
27 | u = np.array(df_data[['uVal']]).astype(np.float32)
28 | y = np.array(df_data[['yVal']]).astype(np.float32)
29 |
30 | ts = df_data['Ts'][0].astype(np.float32)
31 | time_exp = np.arange(y.size).astype(np.float32) * ts
32 |
33 | # Build validation data
34 | t_val_start = 0
35 | t_val_end = time_exp[-1]
36 | idx_val_start = int(t_val_start//ts)
37 | idx_val_end = int(t_val_end//ts)
38 |
39 | y_meas_val = y[idx_val_start:idx_val_end]
40 | u_val = u[idx_val_start:idx_val_end]
41 | time_val = time_exp[idx_val_start:idx_val_end]
42 |
43 | # Setup neural model structure
44 | ss_model = CTSNeuralStateSpace(n_x=2, n_u=1, hidden_size=64)
45 | nn_solution = StateSpaceSimulator(ss_model)
46 | nn_solution.f_xu.load_state_dict(torch.load(os.path.join("models", model_name + ".pt")))
47 | x_hidden_fit = torch.load(os.path.join("models", hidden_name + ".pt"))
48 |
49 | # Evaluate the model in open-loop simulation against validation data
50 | # initial state had to be estimated, according to the dataset description
51 | x_0 = x_hidden_fit[0, :].detach().numpy()
52 | with torch.no_grad():
53 | x_sim_val_torch = nn_solution(torch.tensor(x_0), torch.tensor(u_val))
54 |
55 | # Transform to numpy arrays
56 | x_sim_val = x_sim_val_torch.detach().numpy()
57 | y_sim_val = x_sim_val[:, [0]]
58 |
59 | # Plot results
60 | fig, ax = plt.subplots(2, 1, sharex=True, figsize=(6, 5.5))
61 | idx_plot_start = 0
62 | idx_plot_end = time_val.size
63 |
64 | ax[0].plot(time_val[idx_plot_start:idx_plot_end], y_meas_val[idx_plot_start:idx_plot_end, 0], 'k', label='$y$')
65 | ax[0].plot(time_val[idx_plot_start:idx_plot_end], y_sim_val[idx_plot_start:idx_plot_end, 0], 'r--', label='$\hat{y}^{\mathrm{sim}}$')
66 | ax[0].legend(loc='upper right')
67 | ax[0].set_xlabel("Time (s)")
68 | ax[0].set_ylabel("Voltage (V)")
69 | ax[0].grid(True)
70 |
71 | ax[1].plot(time_val[idx_plot_start:idx_plot_end], u_val[idx_plot_start:idx_plot_end, 0], 'k', label='$u$')
72 | ax[1].legend(loc='upper right')
73 | ax[1].set_xlabel("Time (s)")
74 | ax[1].set_ylabel("Voltage (V)")
75 | #ax[1].set_ylim([-5, 5])
76 | ax[1].grid(True)
77 |
78 | # Plot all
79 | if not os.path.exists("fig"):
80 | os.makedirs("fig")
81 |
82 | fig_name = f"CTS_SS_{dataset_type}_{model_name}.pdf"
83 | fig.savefig(os.path.join("fig", fig_name), bbox_inches='tight')
84 |
85 | # R-squared metrics
86 | R_sq = metrics.r_squared(y_sim_val, y_meas_val)
87 | rmse_sim = metrics.error_rmse(y_sim_val, y_meas_val)
88 |
89 | print(f"R-squared metrics: {R_sq}")
90 | print(f"RMSE-squared metrics: {rmse_sim}")
91 |
--------------------------------------------------------------------------------
/examples/statespace/CTS/CTS_train_full.py:
--------------------------------------------------------------------------------
1 | import os
2 | import pandas as pd
3 | import numpy as np
4 | import torch
5 | import torch.optim as optim
6 | import time
7 | import matplotlib.pyplot as plt
8 | from torchid.ss.dt.simulator import StateSpaceSimulator
9 | from torchid.ss.dt.models import CTSNeuralStateSpace
10 |
11 |
12 | if __name__ == '__main__':
13 |
14 | # Set seed for reproducibility
15 | np.random.seed(0)
16 | torch.manual_seed(0)
17 |
18 | # Overall parameters
19 | num_iter = 10000 # gradient-based optimization steps
20 | lr = 1e-4 # learning rate
21 | test_freq = 10 # print message every test_freq iterations
22 |
23 | # Load dataset
24 | df_data = pd.read_csv(os.path.join("data", "CascadedTanksFiles", "dataBenchmark.csv"))
25 | u_id = np.array(df_data[['uEst']]).astype(np.float32)
26 | y_id = np.array(df_data[['yEst']]).astype(np.float32)
27 | ts = df_data['Ts'][0].astype(np.float32)
28 | time_exp = np.arange(y_id.size).astype(np.float32)*ts
29 |
30 | x_est = np.zeros((time_exp.shape[0], 2), dtype=np.float32)
31 | x_est[:, 0] = np.copy(y_id[:, 0])
32 |
33 | # Hidden state variable
34 | x_hidden_fit = torch.tensor(x_est, dtype=torch.float32, requires_grad=True) # hidden state is an optimization variable
35 | y_fit = y_id
36 | u_fit = u_id
37 | u_fit_torch = torch.tensor(u_fit)
38 | y_fit_torch = torch.tensor(y_fit)
39 | time_fit = time_exp
40 |
41 | # Setup neural model structure
42 | ss_model = CTSNeuralStateSpace(n_x=2, n_u=1, hidden_size=64)
43 | nn_solution = StateSpaceSimulator(ss_model)
44 |
45 | model_name = 'model_SS_256step'
46 | hidden_name = 'hidden_SS_256step'
47 | #nn_solution.load_state_dict(torch.load(os.path.join("models", model_name + ".pkl")))
48 | #x_hidden_fit = torch.load(os.path.join("models", hidden_name + ".pkl"))
49 |
50 |
51 | # Setup optimizer
52 | params_net = list(nn_solution.parameters())
53 | params_hidden = [x_hidden_fit]
54 | optimizer = optim.Adam([
55 | {'params': params_net, 'lr': lr},
56 | {'params': params_hidden, 'lr': lr},
57 | ], lr=lr)
58 |
59 | # Scale loss with respect to the initial one
60 | with torch.no_grad():
61 | x0_torch = x_hidden_fit[0, :]
62 | x_est_torch = nn_solution(x0_torch, u_fit_torch)
63 | err_init = x_est_torch[:, [0]] - y_fit_torch
64 | scale_error = torch.sqrt(torch.mean(err_init**2, dim=0))
65 |
66 | LOSS_TOT = []
67 | LOSS_FIT = []
68 | LOSS_CONSISTENCY = []
69 | start_time = time.time()
70 |
71 | # Training loop
72 | for itr in range(0, num_iter):
73 |
74 | optimizer.zero_grad()
75 |
76 | x0_torch = x_hidden_fit[0, :]
77 |
78 | # Perform open-loop simulation
79 | x_sim = nn_solution(x0_torch, u_fit_torch)
80 |
81 | # Compute fit loss
82 | err_fit = x_sim[:, [0]] - y_fit_torch
83 | err_fit_scaled = err_fit/scale_error[0]
84 | loss_fit = torch.mean(err_fit_scaled**2)
85 |
86 |
87 | # Compute trade-off loss
88 | loss = loss_fit
89 |
90 | LOSS_TOT.append(loss.item())
91 | LOSS_FIT.append(loss_fit.item())
92 | if itr % test_freq == 0:
93 | print(f'Iter {itr} | Fit Loss {loss_fit:.4f}')
94 |
95 | # Optimize
96 | loss.backward()
97 | optimizer.step()
98 |
99 | train_time = time.time() - start_time
100 | print(f"\nTrain time: {train_time:.2f}")
101 |
102 | if not os.path.exists("models"):
103 | os.makedirs("models")
104 |
105 | # Save model
106 | if not os.path.exists("models"):
107 | os.makedirs("models")
108 |
109 | model_filename = f"model_ss_full.pt"
110 | hidden_filename = f"hidden_ss_full.pt"
111 |
112 | torch.save(nn_solution.f_xu.state_dict(), os.path.join("models", model_filename))
113 | torch.save(x_hidden_fit, os.path.join("models", hidden_filename))
114 |
115 | # Plot figures
116 | if not os.path.exists("fig"):
117 | os.makedirs("fig")
118 |
119 | # Loss plot
120 | fig, ax = plt.subplots(1, 1)
121 | ax.plot(LOSS_TOT, 'k', label='TOT')
122 | ax.plot(LOSS_CONSISTENCY, 'r', label='CONSISTENCY')
123 | ax.plot(LOSS_FIT, 'b', label='FIT')
124 | ax.grid(True)
125 | ax.legend(loc='upper right')
126 | ax.set_ylabel("Loss (-)")
127 | ax.set_xlabel("Iteration (-)")
128 |
129 | fig_name = f"CTS_SS_loss_{'simerr'}_noise.pdf"
130 | fig.savefig(os.path.join("fig", fig_name), bbox_inches='tight')
131 |
132 | # Hidden variable plot
133 | x_hidden_fit_np = x_hidden_fit.detach().numpy()
134 | fig, ax = plt.subplots(2, 1, sharex=True)
135 | ax[0].plot(y_id[:, 0], 'b', label='Measured')
136 | ax[0].plot(x_hidden_fit_np[:, 0], 'r', label='Hidden')
137 | ax[0].legend()
138 | ax[0].grid(True)
139 |
140 | ax[1].plot(x_hidden_fit_np[:, 1], 'r', label='Hidden')
141 | ax[1].legend()
142 | ax[1].grid(True)
143 |
144 | # Simulate
145 | y_val = np.copy(y_fit)
146 | u_val = np.copy(u_fit)
147 |
148 | x0_val = x_hidden_fit[0, :].detach().numpy() # initial state had to be estimated, according to the dataset description
149 | x0_torch_val = torch.from_numpy(x0_val)
150 | u_torch_val = torch.tensor(u_val)
151 |
152 | with torch.no_grad():
153 | x_sim_torch = nn_solution(x0_torch_val[None, :], u_torch_val[:, None, :])
154 | y_sim_torch = x_sim_torch[:, 0]
155 | x_sim = y_sim_torch.detach().numpy()
156 |
157 | # Simulation plot
158 | fig, ax = plt.subplots(2, 1, sharex=True, figsize=(6, 7.5))
159 | ax[0].plot(time_exp, y_val, 'k', label='$y_{\mathrm{meas}}$')
160 | ax[0].plot(time_exp, x_sim[:, 0], 'r', label='$\hat y_{\mathrm{sim}}$')
161 | ax[0].legend(loc='upper right')
162 | ax[0].grid(True)
163 | ax[0].set_ylabel("Voltage (V)")
164 |
165 | ax[1].plot(time_exp, u_id, 'k', label='$u_{in}$')
166 | ax[1].set_xlabel("Time (s)")
167 | ax[1].set_ylabel("Voltage (V)")
168 | ax[1].grid(True)
169 | ax[1].set_xlabel("Time (s)")
170 |
--------------------------------------------------------------------------------
/examples/statespace/CTS/CTS_train_truncated.py:
--------------------------------------------------------------------------------
1 | import os
2 | import pandas as pd
3 | import numpy as np
4 | import torch
5 | import torch.optim as optim
6 | import time
7 | import matplotlib.pyplot as plt
8 | from torchid.ss.dt.simulator import StateSpaceSimulator
9 | from torchid.ss.dt.models import CTSNeuralStateSpace
10 |
11 |
12 | if __name__ == '__main__':
13 |
14 | # Set seed for reproducibility
15 | np.random.seed(0)
16 | torch.manual_seed(0)
17 |
18 | # Overall parameters
19 | num_iter = 40000 # gradient-based optimization steps
20 | seq_len = 256 # subsequence length m
21 | batch_size = 32 # batch size
22 | alpha = 0.5 # fit/consistency trade-off constant
23 | lr = 1e-4 # learning rate
24 | test_freq = 100 # print message every test_freq iterations
25 |
26 | # Load dataset
27 | df_data = pd.read_csv(os.path.join("data", "CascadedTanksFiles", "dataBenchmark.csv"))
28 | u_id = np.array(df_data[['uEst']]).astype(np.float32)
29 | y_id = np.array(df_data[['yEst']]).astype(np.float32)
30 | ts = df_data['Ts'][0].astype(np.float32)
31 | time_exp = np.arange(y_id.size).astype(np.float32)*ts
32 |
33 | x_est = np.zeros((time_exp.shape[0], 2), dtype=np.float32)
34 | x_est[:, 0] = np.copy(y_id[:, 0])
35 |
36 | # Hidden state variable
37 | x_hidden_fit = torch.tensor(x_est, dtype=torch.float32, requires_grad=True) # hidden state is an optimization variable
38 | y_fit = y_id
39 | u_fit = u_id
40 | time_fit = time_exp
41 |
42 | # Setup neural model structure
43 | f_xu = CTSNeuralStateSpace(n_x=2, n_u=1, hidden_size=64)
44 | nn_solution = StateSpaceSimulator(f_xu)
45 |
46 | # Setup optimizer
47 | params_net = list(nn_solution.parameters())
48 | params_hidden = [x_hidden_fit]
49 | optimizer = optim.Adam([
50 | {'params': params_net, 'lr': lr},
51 | {'params': params_hidden, 'lr': lr},
52 | ], lr=10*lr)
53 |
54 | # Batch extraction funtion
55 | def get_batch(batch_size, seq_len):
56 |
57 | # Select batch indexes
58 | num_train_samples = u_fit.shape[0]
59 | batch_start = np.random.choice(np.arange(num_train_samples - seq_len, dtype=np.int64),
60 | batch_size, replace=False) # batch start indices
61 | batch_idx = batch_start[:, np.newaxis] + np.arange(seq_len) # batch samples indices
62 | batch_idx = batch_idx.T # transpose indexes to obtain batches with structure (m, q, n_x) batch_first=False
63 |
64 | # Extract batch data
65 | batch_t = torch.tensor(time_fit[batch_idx])
66 | batch_x0_hidden = x_hidden_fit[batch_start, :]
67 | batch_x_hidden = x_hidden_fit[[batch_idx]]
68 | batch_u = torch.tensor(u_fit[batch_idx])
69 | batch_y = torch.tensor(y_fit[batch_idx])
70 |
71 | return batch_t, batch_x0_hidden, batch_u, batch_y, batch_x_hidden
72 |
73 | # Scale loss with respect to the initial one
74 | with torch.no_grad():
75 | batch_t, batch_x0_hidden, batch_u, batch_y, batch_x_hidden = get_batch(batch_size, seq_len)
76 | batch_x_sim = nn_solution(batch_x0_hidden, batch_u)
77 | err_init = batch_x_sim - batch_y
78 | scale_error = torch.sqrt(torch.mean(err_init**2, dim=(0, 1)))
79 |
80 | LOSS_TOT = []
81 | LOSS_FIT = []
82 | LOSS_CONSISTENCY = []
83 | start_time = time.time()
84 | # Training loop
85 |
86 | for itr in range(0, num_iter):
87 |
88 | optimizer.zero_grad()
89 |
90 | # Simulate
91 | batch_t, batch_x0_hidden, batch_u, batch_y, batch_x_hidden = get_batch(batch_size, seq_len)
92 | batch_x_sim = nn_solution(batch_x0_hidden, batch_u)
93 |
94 | # Compute fit loss
95 | err_fit = batch_x_sim[:, :, [0]] - batch_y
96 | err_fit_scaled = err_fit/scale_error[0]
97 | loss_fit = torch.mean(err_fit_scaled**2)
98 |
99 | # Compute consistency loss
100 | err_consistency = batch_x_sim - batch_x_hidden
101 | err_consistency_scaled = err_consistency/scale_error
102 | loss_consistency = torch.mean(err_consistency_scaled**2)
103 |
104 | # Compute trade-off loss
105 | loss = alpha*loss_fit + (1.0-alpha)*loss_consistency
106 |
107 | # Statistics
108 | LOSS_TOT.append(loss.item())
109 | LOSS_FIT.append(loss_fit.item())
110 | LOSS_CONSISTENCY.append(loss_consistency.item())
111 | if itr % test_freq == 0:
112 | print(f'Iter {itr} | Tradeoff Loss {loss:.4f} '
113 | f'Consistency Loss {loss_consistency:.4f} Fit Loss {loss_fit:.4f}')
114 |
115 | # Optimize
116 | loss.backward()
117 | optimizer.step()
118 |
119 | train_time = time.time() - start_time
120 | print(f"\nTrain time: {train_time:.2f}") # 182 seconds
121 |
122 | if not os.path.exists("models"):
123 | os.makedirs("models")
124 |
125 | # Save model
126 | if not os.path.exists("models"):
127 | os.makedirs("models")
128 |
129 | model_filename = f"model_ss_{seq_len}step.pt"
130 | hidden_filename = f"hidden_ss_{seq_len}step.pt"
131 |
132 | torch.save(nn_solution.f_xu.state_dict(), os.path.join("models", model_filename))
133 | torch.save(x_hidden_fit, os.path.join("models", hidden_filename))
134 |
135 | # Plot figures
136 | if not os.path.exists("fig"):
137 | os.makedirs("fig")
138 |
139 | # Loss plot
140 | fig, ax = plt.subplots(1, 1)
141 | ax.plot(LOSS_TOT, 'k', label='TOT')
142 | ax.plot(LOSS_CONSISTENCY, 'r', label='CONSISTENCY')
143 | ax.plot(LOSS_FIT, 'b', label='FIT')
144 | ax.grid(True)
145 | ax.legend(loc='upper right')
146 | ax.set_ylabel("Loss (-)")
147 | ax.set_xlabel("Iteration (-)")
148 |
149 | fig_name = f"WT_SS_loss_{seq_len}step_noise.pdf"
150 | fig.savefig(os.path.join("fig", fig_name), bbox_inches='tight')
151 |
152 | # Hidden variable plot
153 | x_hidden_fit_np = x_hidden_fit.detach().numpy()
154 | fig, ax = plt.subplots(2, 1, sharex=True)
155 | ax[0].plot(y_id[:, 0], 'b', label='Measured')
156 | ax[0].plot(x_hidden_fit_np[:, 0], 'r', label='Hidden')
157 | ax[0].legend()
158 | ax[0].grid(True)
159 |
160 | #ax[1].plot(x_est[:, 1], 'k', label='Estimated')
161 | ax[1].plot(x_hidden_fit_np[:, 1], 'r', label='Hidden')
162 | ax[1].legend()
163 | ax[1].grid(True)
164 |
165 | # Simulate
166 | y_val = np.copy(y_fit)
167 | u_val = np.copy(u_fit)
168 |
169 | # initial state had to be estimated, according to the dataset description
170 | x0_val = x_hidden_fit[0, :].detach().numpy()
171 | x0_torch_val = torch.from_numpy(x0_val)
172 | u_torch_val = torch.tensor(u_val)
173 |
174 | with torch.no_grad():
175 | x_sim_torch = nn_solution(x0_torch_val[None, :], u_torch_val[:, None, :])
176 | y_sim_torch = x_sim_torch[:, 0]
177 | x_sim = y_sim_torch.detach().numpy()
178 |
179 |
180 | # Simulation plot
181 | fig, ax = plt.subplots(2, 1, sharex=True, figsize=(6, 7.5))
182 | #ax[0].plot(time_exp, q_ref, 'k', label='$q_{\mathrm{ref}}$')
183 | ax[0].plot(time_exp, y_val, 'k', label='$y_{\mathrm{meas}}$')
184 | ax[0].plot(time_exp, x_sim[:, 0], 'r', label='$\hat y_{\mathrm{sim}}$')
185 | ax[0].legend(loc='upper right')
186 | ax[0].grid(True)
187 | ax[0].set_ylabel("Voltage (V)")
188 |
189 | ax[1].plot(time_exp, u_id, 'k', label='$u_{in}$')
190 | ax[1].set_xlabel("Time (s)")
191 | ax[1].set_ylabel("Voltage (V)")
192 | ax[1].grid(True)
193 | ax[1].set_xlabel("Time (s)")
194 |
--------------------------------------------------------------------------------
/examples/statespace/CTS/README.txt:
--------------------------------------------------------------------------------
1 | Main scripts:
2 | -------------
3 |
4 | - CTS_train_truncated.py: train with a multi-step-ahead method, estimate full state sequence along
5 | with the model parameters as done in [1] and [2]
6 | - CTS_train_full.py: train with a multi-step-ahead method, estimate
7 | - CTS_test.py: plot model simulation on a test dataset and compute metrics
8 |
9 | Bibliography
10 | ------------
11 | [1] M. Forgione and D. Piga. Model structures and fitting criteria for system identification with neural networks. In Proceedings of the 14th IEEE International Conference Application of Information and Communication Technologies, 2020.
12 | [2] M. Forgione and D. Piga. Continuous-time system identification with neural networks: model structures and fitting criteria. European Journal of Control, 59:68-81, 2021.
13 |
--------------------------------------------------------------------------------
/examples/statespace/CTS/download_dataset.py:
--------------------------------------------------------------------------------
1 | import os
2 | import requests
3 | import zipfile
4 | import examples.util.benchmark_url
5 |
6 |
7 | if __name__ == '__main__':
8 |
9 | DATA_FOLDER = 'data'
10 |
11 | # In[Make data folder if it does not exist]
12 | if not os.path.exists(DATA_FOLDER):
13 | os.makedirs(DATA_FOLDER)
14 |
15 | # In[Download dataset from www.nonlinearbenchmark.com]
16 | r = requests.get(examples.util.benchmark_url.CTS, allow_redirects=True)
17 |
18 | # In[Save zipped file]
19 | zipped_dataset_path = os.path.join('data', 'data.zip')
20 | with open(zipped_dataset_path, 'wb') as f:
21 | f.write(r.content)
22 |
23 | # In[Extract zipped file]
24 | with zipfile.ZipFile(zipped_dataset_path, 'r') as zip_ref:
25 | zip_ref.extractall('data')
26 |
27 | with zipfile.ZipFile(os.path.join("data", "CascadedTanksFiles.zip"), 'r') as zip_ref:
28 | zip_ref.extractall("data")
29 |
--------------------------------------------------------------------------------
/examples/statespace/README.txt:
--------------------------------------------------------------------------------
1 | RLC: Simulated non-linear linear RLC series circuit
2 | CTS: Cascaded Tanks system from www.nonlinearbenchmark.org
3 | silverbox: Electronic implementation of the Duffing oscillator from www.nonlinearbenchmark.org
--------------------------------------------------------------------------------
/examples/statespace/RLC/README.txt:
--------------------------------------------------------------------------------
1 | Main scripts:
2 | -------------
3 |
4 | - RLC_generate_train.py: generate the training dataset
5 | - RLC_generate_test.py: generate the test dataset
6 | - RLC_train_1step.py: train with a 1-step-ahead method, as discussed in [1]
7 | - RLC_train_ae.py: train with a multi-step-ahead method, using an LSTM for state estimation.
8 | Similar to [3] and [4], but with a recurrent encoder network
9 | - RLC_test.py: plot model simulation on a test dataset and compute metrics
10 |
11 | Bibliography
12 | ------------
13 | [1] M. Forgione and D. Piga. Model structures and fitting criteria for system identification with neural networks. In Proceedings of the 14th IEEE International Conference Application of Information and Communication Technologies, 2020.
14 | [2] M. Forgione and D. Piga. Continuous-time system identification with neural networks: model structures and fitting criteria. European Journal of Control, 59:68-81, 2021.
15 | [3] D. Masti and A. Bemborad. Learning nonlinear state–space models using autoencoders
16 | [4] G. Beintema, R. Toth, M. Schoukens. Nonlinear State-Space Identification using Deep Encoder Networks; Submitted to l4dc 2021a
--------------------------------------------------------------------------------
/examples/statespace/RLC/RLC_generate_test.py:
--------------------------------------------------------------------------------
1 | from scipy.integrate import solve_ivp
2 | from scipy.interpolate import interp1d
3 | import numpy as np
4 | import matplotlib.pyplot as plt
5 | import control.matlab
6 | import pandas as pd
7 | import os
8 |
9 | from examples.statespace.RLC.symbolic_RLC import fxu_ODE, fxu_ODE_nl
10 |
11 | if __name__ == '__main__':
12 |
13 | # Set seed for reproducibility
14 | np.random.seed(42)
15 |
16 | # Input characteristics #
17 | len_sim = 5e-3
18 | Ts = 5e-7
19 |
20 | omega_input = 200e3
21 | std_input = 60
22 |
23 | tau_input = 1/omega_input
24 | Hu = control.TransferFunction([1], [1 / omega_input, 1])
25 | Hu = Hu * Hu
26 | Hud = control.matlab.c2d(Hu, Ts)
27 |
28 | N_sim = int(len_sim//Ts)
29 | N_skip = int(20 * tau_input // Ts) # skip initial samples to get a regime sample of d
30 | N_sim_u = N_sim + N_skip
31 | e = np.random.randn(N_sim_u)
32 | te = np.arange(N_sim_u) * Ts
33 | _, u = control.forced_response(Hu, te, e, return_x=False)
34 | u = u[N_skip:]
35 | u = u /np.std(u) * std_input
36 |
37 | t_sim = np.arange(N_sim) * Ts
38 | u_func = interp1d(t_sim, u, kind='zero', fill_value="extrapolate")
39 |
40 |
41 | def f_ODE(t, x):
42 | u = u_func(t).ravel()
43 | return fxu_ODE(t, x, u)
44 |
45 |
46 | def f_ODE_nl(t, x):
47 | u = u_func(t).ravel()
48 | return fxu_ODE_nl(t, x, u)
49 |
50 |
51 | x0 = np.zeros(2)
52 | f_ODE(0.0, x0)
53 | t_span = (t_sim[0], t_sim[-1])
54 | y1 = solve_ivp(f_ODE, t_span, x0, t_eval=t_sim)
55 | y2 = solve_ivp(f_ODE_nl, t_span, x0, t_eval=t_sim)
56 |
57 | x1 = y1.y.T
58 | x2 = y2.y.T
59 |
60 | # In[plot]
61 | fig, ax = plt.subplots(3,1, figsize=(10,10), sharex=True)
62 | ax[0].plot(t_sim, x1[:,0],'b')
63 | ax[0].plot(t_sim, x2[:,0],'r')
64 | ax[0].set_xlabel('time (s)')
65 | ax[0].set_ylabel('Capacitor voltage (V)')
66 |
67 | ax[1].plot(t_sim, x1[:,1],'b')
68 | ax[1].plot(t_sim, x2[:,1],'r')
69 | ax[1].set_xlabel('time (s)')
70 | ax[1].set_ylabel('Inductor current (A)')
71 |
72 | ax[2].plot(t_sim, u,'b')
73 | ax[2].set_xlabel('time (s)')
74 | ax[2].set_ylabel('Input voltage (V)')
75 |
76 | ax[0].grid(True)
77 | ax[1].grid(True)
78 | ax[2].grid(True)
79 |
80 | # In[Save]
81 | if not os.path.exists("data"):
82 | os.makedirs("data")
83 |
84 | X = np.hstack((t_sim.reshape(-1, 1), x1, u.reshape(-1, 1), x1[:, 0].reshape(-1, 1)))
85 | COL_T = ['time']
86 | COL_X = ['V_C', 'I_L']
87 | COL_U = ['V_IN']
88 | COL_Y = ['V_C']
89 | COL = COL_T + COL_X + COL_U + COL_Y
90 | df_X = pd.DataFrame(X, columns=COL)
91 | df_X.to_csv(os.path.join("data", "RLC_data_id_lin.csv"), index=False)
92 |
93 | X = np.hstack((t_sim.reshape(-1, 1), x2, u.reshape(-1, 1), x2[:, 0].reshape(-1, 1)))
94 | COL_T = ['time']
95 | COL_X = ['V_C', 'I_L']
96 | COL_U = ['V_IN']
97 | COL_Y = ['V_C']
98 | COL = COL_T + COL_X + COL_U + COL_Y
99 | df_X = pd.DataFrame(X, columns=COL)
100 | df_X.to_csv(os.path.join("data", "RLC_data_test_nl.csv"), index=False)
101 |
--------------------------------------------------------------------------------
/examples/statespace/RLC/RLC_generate_train.py:
--------------------------------------------------------------------------------
1 | from scipy.integrate import solve_ivp
2 | from scipy.interpolate import interp1d
3 | import numpy as np
4 | import matplotlib.pyplot as plt
5 | import control.matlab
6 | import pandas as pd
7 | import os
8 |
9 | from examples.statespace.RLC.symbolic_RLC import fxu_ODE, fxu_ODE_nl
10 |
11 | if __name__ == '__main__':
12 |
13 | # Set seed for reproducibility
14 | np.random.seed(42)
15 |
16 | # Input characteristics #
17 | len_sim = 5e-3
18 | Ts = 5e-7
19 |
20 | omega_input = 150e3
21 | std_input = 80
22 |
23 | tau_input = 1/omega_input
24 | Hu = control.TransferFunction([1], [1 / omega_input, 1])
25 | Hu = Hu * Hu
26 | Hud = control.matlab.c2d(Hu, Ts)
27 |
28 | N_sim = int(len_sim//Ts)
29 | N_skip = int(20 * tau_input // Ts) # skip initial samples to get a regime sample of d
30 | N_sim_u = N_sim + N_skip
31 | e = np.random.randn(N_sim_u)
32 | te = np.arange(N_sim_u) * Ts
33 | _, u = control.forced_response(Hu, te, e, return_x=False)
34 | u = u[N_skip:]
35 | u = u /np.std(u) * std_input
36 |
37 | t_sim = np.arange(N_sim) * Ts
38 | u_func = interp1d(t_sim, u, kind='zero', fill_value="extrapolate")
39 |
40 |
41 | def f_ODE(t, x):
42 | u = u_func(t).ravel()
43 | return fxu_ODE(t, x, u)
44 |
45 |
46 | def f_ODE_nl(t, x):
47 | u = u_func(t).ravel()
48 | return fxu_ODE_nl(t, x, u)
49 |
50 |
51 | x0 = np.zeros(2)
52 | f_ODE(0.0, x0)
53 | t_span = (t_sim[0],t_sim[-1])
54 | y1 = solve_ivp(f_ODE, t_span, x0, t_eval=t_sim)
55 | y2 = solve_ivp(f_ODE_nl, t_span, x0, t_eval=t_sim)
56 |
57 | x1 = y1.y.T
58 | x2 = y2.y.T
59 |
60 | # In[plot]
61 | fig, ax = plt.subplots(3,1, figsize=(10,10), sharex=True)
62 | ax[0].plot(t_sim, x1[:,0],'b')
63 | ax[0].plot(t_sim, x2[:,0],'r')
64 | ax[0].set_xlabel('time (s)')
65 | ax[0].set_ylabel('Capacitor voltage (V)')
66 |
67 | ax[1].plot(t_sim, x1[:,1],'b')
68 | ax[1].plot(t_sim, x2[:,1],'r')
69 | ax[1].set_xlabel('time (s)')
70 | ax[1].set_ylabel('Inductor current (A)')
71 |
72 | ax[2].plot(t_sim, u,'b')
73 | ax[2].set_xlabel('time (s)')
74 | ax[2].set_ylabel('Input voltage (V)')
75 |
76 | ax[0].grid(True)
77 | ax[1].grid(True)
78 | ax[2].grid(True)
79 |
80 | # In[Save]
81 | if not os.path.exists("data"):
82 | os.makedirs("data")
83 |
84 | X = np.hstack((t_sim.reshape(-1, 1), x1, u.reshape(-1, 1), x1[:, 0].reshape(-1, 1)))
85 | COL_T = ['time']
86 | COL_X = ['V_C', 'I_L']
87 | COL_U = ['V_IN']
88 | COL_Y = ['V_C']
89 | COL = COL_T + COL_X + COL_U + COL_Y
90 | df_X = pd.DataFrame(X, columns=COL)
91 | df_X.to_csv(os.path.join("data", "RLC_data_train_lin.csv"), index=False)
92 |
93 | X = np.hstack((t_sim.reshape(-1, 1), x2, u.reshape(-1, 1), x2[:, 0].reshape(-1, 1)))
94 | COL_T = ['time']
95 | COL_X = ['V_C', 'I_L']
96 | COL_U = ['V_IN']
97 | COL_Y = ['V_C']
98 | COL = COL_T + COL_X + COL_U + COL_Y
99 | df_X = pd.DataFrame(X, columns=COL)
100 | df_X.to_csv(os.path.join("data", "RLC_data_train_nl.csv"), index=False)
101 |
--------------------------------------------------------------------------------
/examples/statespace/RLC/RLC_test.py:
--------------------------------------------------------------------------------
1 | import os
2 | import numpy as np
3 | import torch
4 | import matplotlib
5 | import matplotlib.pyplot as plt
6 | from torchid.ss.dt.models import NeuralStateUpdate, ChannelsOutput
7 | from torchid.ss.dt.simulator import StateSpaceSimulator
8 | from torchid import metrics
9 | from loader import rlc_loader
10 |
11 | if __name__ == '__main__':
12 |
13 | model_filename = "ss_model_ms.pt"
14 | #model_filename = "ss_model_1step.pt"
15 | model_data = torch.load(os.path.join("models", model_filename))
16 | n_feat = model_data["n_feat"]
17 |
18 | # Column names in the dataset
19 | t, u, y, x = rlc_loader("test", "nl", noise_std=0.0)
20 | n_x = x.shape[-1]
21 | ts = t[1, 0] - t[0, 0]
22 |
23 | # Setup neural model structure and load fitted model parameters
24 | f_xu = NeuralStateUpdate(n_x=2, n_u=1, hidden_size=n_feat)
25 | g_x = ChannelsOutput(channels=[0]) # output mapping corresponding to channel 0
26 | model = StateSpaceSimulator(f_xu, g_x)
27 | model.load_state_dict(model_data["model"])
28 |
29 | # Evaluate the model in open-loop simulation against validation data
30 | x_0 = torch.zeros((1, n_x), dtype=torch.float32)
31 | with torch.no_grad():
32 | y_sim = model(x_0, torch.tensor(u)[:, None, :]).squeeze(1)
33 | y_sim = y_sim.detach().numpy()
34 |
35 | # Plot results
36 | fig, ax = plt.subplots(2, 1, sharex=True, figsize=(6, 5.5))
37 |
38 | ax[0].plot(t, y, 'k', label='$v_C$')
39 | ax[0].plot(t, y_sim, 'b', label='$\hat v_C$')
40 | ax[0].plot(t, y-y_sim, 'r', label='e')
41 | ax[0].legend(loc='upper right')
42 | ax[0].grid(True)
43 | ax[0].set_xlabel("Time (mu_s)")
44 | ax[0].set_ylabel("Voltage (V)")
45 |
46 | ax[1].plot(t, u, 'k', label='$v_{in}$')
47 | ax[1].legend(loc='upper right')
48 | ax[1].grid(True)
49 | ax[1].set_xlabel("Time (mu_s)")
50 | ax[1].set_ylabel("Voltage (V)")
51 |
52 | plt.show()
53 |
54 | # R-squared metrics
55 | R_sq = metrics.r_squared(y, y_sim)
56 | print(f"R-squared: {R_sq}")
57 |
--------------------------------------------------------------------------------
/examples/statespace/RLC/RLC_train_1step.py:
--------------------------------------------------------------------------------
1 | """
2 | Train with a 1-step-ahead prediction model.
3 |
4 | + Very simple and efficient
5 | - Requires full state measurement
6 | - It is not very robust to noise.
7 | """
8 |
9 | import os
10 | import numpy as np
11 | import torch
12 | import torch.optim as optim
13 | import time
14 | import matplotlib.pyplot as plt
15 | from torchid.ss.dt.models import NeuralStateUpdate
16 | from torchid.ss.dt.simulator import StateSpaceSimulator
17 | from loader import rlc_loader
18 |
19 | if __name__ == '__main__':
20 |
21 | # Set seed for reproducibility
22 | np.random.seed(0)
23 | torch.manual_seed(0)
24 |
25 | # Overall parameters
26 | t_fit = 2e-3 # fitting on t_fit ms of data
27 | lr = 1e-4 # learning rate
28 | num_iter = 40000 # gradient-based optimization steps
29 | test_freq = 500 # print message every test_freq iterations
30 | n_feat = 50
31 |
32 | # Load dataset
33 | t, u, y, x = rlc_loader("train", "nl", noise_std=0.1, dtype=np.float32)
34 | n_x = x.shape[-1]
35 | n_u = u.shape[-1]
36 | n_y = y.shape[-1]
37 |
38 | ts = t[1] - t[0]
39 | n_fit = int(t_fit // ts)
40 |
41 | # Fit data to pytorch tensors #
42 | u_train = torch.tensor(u, dtype=torch.float32)
43 | x_train = torch.tensor(x, dtype=torch.float32)
44 |
45 | # Setup neural model structure
46 | f_xu = NeuralStateUpdate(n_x=2, n_u=1, hidden_size=n_feat)
47 | model = StateSpaceSimulator(f_xu)
48 |
49 | # Setup optimizer
50 | optimizer = optim.Adam(model.parameters(), lr=lr)
51 |
52 | # Scale loss with respect to the initial one
53 | with torch.no_grad():
54 | delta_x = x_train[1:, :] - x_train[0:-1, :]
55 | scale_error = torch.sqrt(torch.mean(delta_x ** 2, dim=0))
56 |
57 | LOSS = []
58 | start_time = time.time()
59 | # Training loop
60 | for itr in range(0, num_iter):
61 | optimizer.zero_grad()
62 |
63 | # Perform one-step ahead prediction
64 | delta_x_hat = model.f_xu(x_train[0:-1, :], u_train[0:-1, :])
65 | delta_x = x_train[1:, :] - x_train[0:-1, :]
66 |
67 | err = delta_x - delta_x_hat
68 | err_scaled = err/scale_error
69 |
70 | # Compute fit loss
71 | loss = torch.mean(err_scaled**2)
72 |
73 | # Statistics
74 | LOSS.append(loss.item())
75 | if itr % test_freq == 0:
76 | print('Iter {:04d} | Total Loss {:.6f}'.format(itr, loss.item()))
77 |
78 | # Optimize
79 | loss.backward()
80 | optimizer.step()
81 |
82 | train_time = time.time() - start_time # 114 seconds
83 | print(f"\nTrain time: {train_time:.2f}")
84 |
85 | #%% Save model
86 | if not os.path.exists("models"):
87 | os.makedirs("models")
88 | model_filename = "ss_model_1step.pt"
89 | torch.save({"n_x": 2,
90 | "n_y": 1,
91 | "n_u": 1,
92 | "model": model.state_dict(),
93 | "n_feat": n_feat
94 | },
95 | os.path.join("models", model_filename))
96 |
97 | #%% Simulate model
98 |
99 | t_test, u_test, y_test, x_test = rlc_loader("test", "nl", noise_std=0.0, dtype=np.float32)
100 |
101 | with torch.no_grad():
102 | x0 = torch.zeros((1, n_x), dtype=torch.float32)
103 | y_sim, x_sim = model(x0, torch.tensor(u_test)[:, None, :], return_x=True)
104 |
105 | y_sim = y_sim.squeeze(1).detach().numpy()
106 | x_sim = x_sim.squeeze(1).detach().numpy()
107 |
108 | fig, ax = plt.subplots(2, 1, sharex=True)
109 | ax[0].plot(x_test[:, 0], 'k+', label='True')
110 | ax[0].plot(x_sim[:, 0], 'r', label='Sim')
111 | ax[0].legend()
112 | ax[1].plot(x_test[:, 1], 'k+', label='True')
113 | ax[1].plot(x_sim[:, 1], 'r', label='Sim')
114 | ax[1].legend()
115 | ax[0].grid(True)
116 | ax[1].grid(True)
117 |
118 | #%% Plot loss
119 |
120 | if not os.path.exists("fig"):
121 | os.makedirs("fig")
122 |
123 | fig, ax = plt.subplots(1, 1, figsize=(7.5, 6))
124 | ax.plot(LOSS)
125 | ax.grid(True)
126 | ax.set_ylabel("Loss (-)")
127 | ax.set_xlabel("Iteration (-)")
--------------------------------------------------------------------------------
/examples/statespace/RLC/RLC_train_multistep.py:
--------------------------------------------------------------------------------
1 | """
2 | Use an LSTM encoder network to estimate the initial state, then simulate forward in time.
3 |
4 | - Slightly more complex than 1-step-ahead prediction
5 | + Can handle partial state observation
6 | + Works well with measurement noise
7 | """
8 |
9 |
10 | import os
11 | import time
12 | import numpy as np
13 | import torch
14 | import torch.optim as optim
15 | from torch.utils.data import DataLoader
16 | import matplotlib.pyplot as plt
17 | from torchid.ss.dt.models import NeuralStateUpdate, ChannelsOutput
18 | from torchid.ss.dt.simulator import StateSpaceSimulator
19 | from torchid.ss.dt.estimators import LSTMStateEstimator
20 | from torchid.datasets import SubsequenceDataset
21 | from loader import rlc_loader
22 |
23 |
24 | # Truncated simulation error minimization method
25 | if __name__ == '__main__':
26 |
27 | # Set seed for reproducibility
28 | np.random.seed(0)
29 | torch.manual_seed(0)
30 |
31 | no_cuda = False # no GPU, CPU only training
32 | threads = 6 # max number of CPU threads
33 |
34 | # Overall parameters
35 | epochs = 100 # training epochs
36 | seq_sim_len = 64 # simulation sequence length
37 | seq_est_len = 16 # estimation sequence length
38 | batch_size = 32 # batch size q
39 | lr = 1e-4 # learning rate
40 | n_fit = 5000
41 | hidden_size = 50
42 | n_x = 2
43 |
44 | # CPU/GPU resources
45 | use_cuda = not no_cuda and torch.cuda.is_available()
46 | device = torch.device("cuda" if use_cuda else "cpu")
47 | torch.set_num_threads(threads)
48 |
49 | # Load dataset
50 | t, u, y, _ = rlc_loader("train", "nl", noise_std=0.1, n_data=n_fit) # state not used
51 |
52 | # Setup neural model structure
53 | f_xu = NeuralStateUpdate(n_x=2, n_u=1, hidden_size=hidden_size).to(device)
54 | g_x = ChannelsOutput(channels=[0]).to(device) # output is channel 0
55 | model = StateSpaceSimulator(f_xu, g_x).to(device)
56 | estimator = LSTMStateEstimator(n_u=1, n_y=1, n_x=2).to(device)
57 |
58 | load_len = seq_sim_len + seq_est_len
59 | train_dataset = SubsequenceDataset(torch.from_numpy(u), torch.from_numpy(y), subseq_len=load_len)
60 | train_loader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True)
61 |
62 | # Setup optimizer
63 | optimizer = optim.Adam([
64 | {'params': model.parameters(), 'lr': lr},
65 | {'params': estimator.parameters(), 'lr': lr},
66 | ], lr=lr)
67 |
68 | LOSS = []
69 | LOSS_CONSISTENCY = []
70 | LOSS_FIT = []
71 | start_time = time.time()
72 |
73 | # Training loop
74 | for epoch in range(epochs):
75 |
76 | for batch_idx, (batch_u, batch_y) in enumerate(train_loader):
77 | optimizer.zero_grad()
78 |
79 | batch_u = batch_u.transpose(0, 1).to(device) # transpose to time_first
80 | batch_y = batch_y.transpose(0, 1).to(device) # transpose to time_first
81 |
82 | # Estimate initial state
83 | batch_u_est = batch_u[:seq_est_len]
84 | batch_y_est = batch_y[:seq_est_len]
85 | batch_x0 = estimator(batch_u_est, batch_y_est)
86 |
87 | # Simulate
88 | batch_u_fit = batch_u[seq_est_len:]
89 | batch_y_fit = batch_y[seq_est_len:]
90 | batch_y_sim = model(batch_x0, batch_u_fit)
91 |
92 | # Compute loss
93 | loss = torch.nn.functional.mse_loss(batch_y_fit, batch_y_sim)
94 |
95 | # Statistics
96 | LOSS.append(loss.item())
97 |
98 | # Optimize
99 | loss.backward()
100 | optimizer.step()
101 |
102 | print(f'Epoch {epoch} | Train Loss {loss:.4f} ')
103 |
104 | train_time = time.time() - start_time
105 | print(f"\nTrain time: {train_time:.2f}")
106 |
107 | #%%
108 |
109 | # Save model
110 | if not os.path.exists("models"):
111 | os.makedirs("models")
112 |
113 | model = model.to("cpu")
114 | estimator = estimator.to("cpu")
115 | model_filename = "ss_model_ms.pt"
116 | torch.save({
117 | "n_x": n_x,
118 | "n_feat": hidden_size,
119 | "model": model.state_dict(),
120 | "estimator": estimator.state_dict()
121 | },
122 | os.path.join("models", model_filename))
123 |
124 | #%% Simulate
125 | with torch.no_grad():
126 | u_v = torch.tensor(u[:, None, :])
127 | y_v = torch.tensor(y[:, None, :])
128 | x0 = torch.zeros(1, n_x, dtype=torch.float32) # initial state set to 0 for simplicity
129 | y_sim = model(x0, u_v)
130 |
131 | #%% Test
132 | fig, ax = plt.subplots(1, 1)
133 | ax.plot(LOSS, 'k', label='ALL')
134 | ax.grid(True)
135 | ax.legend()
136 | ax.set_ylabel("Loss (-)")
137 | ax.set_xlabel("Iteration (-)")
138 |
139 | fig, ax = plt.subplots(1, 1, sharex=True)
140 | ax.plot(y_v[:, 0, 0], 'k', label='meas')
141 | ax.grid(True)
142 | ax.plot(y_sim[:, 0, 0], 'b', label='sim')
143 |
--------------------------------------------------------------------------------
/examples/statespace/RLC/loader.py:
--------------------------------------------------------------------------------
1 | import os
2 | import numpy as np
3 | import pandas as pd
4 |
5 | COL_T = ['time']
6 | COL_X = ['V_C', 'I_L']
7 | COL_U = ['V_IN']
8 | COL_Y = ['V_C']
9 |
10 |
11 | def rlc_loader(dataset, dataset_type="nl", output='V_C', noise_std=0.1, dtype=np.float32, scale=True, n_data=-1):
12 | filename = f"RLC_data_{dataset}_{dataset_type}.csv"
13 | df_data = pd.read_csv(os.path.join("data", filename))
14 | t = np.array(df_data[['time']], dtype=dtype)
15 | u = np.array(df_data[['V_IN']], dtype=dtype)
16 | y = np.array(df_data[[output]], dtype=dtype)
17 | x = np.array(df_data[['V_C', 'I_L']], dtype=dtype)
18 |
19 | if scale:
20 | u = u/100
21 | y = y/100
22 | x = x/[100, 6]
23 |
24 | y += np.random.randn(*y.shape) * noise_std
25 |
26 | if n_data > 0:
27 | t = t[:n_data, :]
28 | u = u[:n_data, :]
29 | y = y[:n_data, :]
30 | x = x[:n_data, :]
31 | return t, u, y, x
32 |
33 |
34 | if __name__ == "__main__":
35 | t, u, y, x = rlc_loader("train", "lin")
36 |
--------------------------------------------------------------------------------
/examples/statespace/RLC/old/RLC_OE_comparison.m:
--------------------------------------------------------------------------------
1 | clear;
2 | clc;
3 |
4 | %% Read data table
5 |
6 | data_path = fullfile("data", "RLC_data_id.csv");
7 | data_table = readtable(data_path);
8 |
9 |
10 | vin = data_table.V_IN;
11 | vC = data_table.V_C;
12 | iL = data_table.I_L;
13 | y = [vC iL];
14 | t = data_table.time;
15 | Ts = t(2) - t(1);
16 |
17 | %% Add noise %%
18 | add_noise = 0;
19 | STD_V = add_noise*10;
20 | STD_I = add_noise*1;
21 | vC_meas = vC + randn(size(vC))*STD_V;
22 | iL_meas = iL + randn(size(iL))*STD_V;
23 | y_meas = [vC_meas iL_meas];
24 |
25 | %% Identification data %%
26 | data_id = iddata(y_meas,vin,Ts);
27 | model_subs = oe(data_id, 'nb',[2; 2], 'nf', [2; 2]);
28 |
29 | y_sim_id = sim(model_subs, data_id);
30 | y_sim_id = y_sim_id.OutputData;
31 |
32 |
33 | %% Plot data %%
34 |
35 | figure()
36 | plot(t, vC, 'k');
37 | hold on;
38 | plot(t, y_sim_id(:,1), 'b');
39 | legend('True', 'Model');
40 |
41 | figure()
42 | plot(t, iL, 'k');
43 | hold on;
44 | plot(t, y_sim_id(:,2), 'b');
45 | legend('True', 'Model');
46 |
47 | %%
48 | SSE_v = sum((vC - y_sim_id(:,1)).^2);
49 | y_mean_v = mean(vC);
50 | SST_v = sum((vC - y_mean_v).^2);
51 | R_sq_v = 1 - SSE_v/SST_v;
52 |
53 | SSE_i = sum((iL - y_sim_id(:,2)).^2);
54 | y_mean_i = mean(iL);
55 | SST_i = sum((iL - y_mean_i).^2);
56 | R_sq_i = 1 - SSE_i/SST_i;
57 |
58 | fprintf("OE fitting performance");
59 | fprintf("Identification dataset:\nR-squred vC:%.3f\nR-squred iL:%.3f\n", R_sq_v, R_sq_i)
60 |
61 | %% Read data table val
62 |
63 | data_path = fullfile("data", "RLC_data_val.csv");
64 | data_table_val = readtable(data_path);
65 |
66 |
67 | vin = data_table_val.V_IN;
68 | vC = data_table_val.V_C;
69 | iL = data_table_val.I_L;
70 | y = [vC iL];
71 | t = data_table.time;
72 | Ts = t(2) - t(1);
73 |
74 | %% Validation data %%
75 | data_val = iddata(y_meas,vin,Ts);
76 |
77 | y_sim_val = sim(model_subs, data_val);
78 | y_sim_val = y_sim_val.OutputData;
79 |
80 | loss = mean((vC - y_sim_val).^2);
81 |
82 | %%
83 | SSE_v = sum((vC - y_sim_val(:,1)).^2);
84 | y_mean_v = mean(vC);
85 | SST_v = sum((vC - y_mean_v).^2);
86 | R_sq_v = 1 - SSE_v/SST_v;
87 |
88 | SSE_i = sum((iL - y_sim_val(:,2)).^2);
89 | y_mean_i = mean(iL);
90 | SST_i = sum((iL - y_mean_i).^2);
91 | R_sq_i = 1 - SSE_i/SST_i;
92 |
93 | fprintf("Validation dataset:\nR-squred vC:%.2f\nR-squred iL:%.2f\n", R_sq_v, R_sq_i)
94 |
--------------------------------------------------------------------------------
/examples/statespace/RLC/old/RLC_subspace_comparison.m:
--------------------------------------------------------------------------------
1 | clear;
2 | clc;
3 | close all;
4 |
5 | %% Read data table
6 |
7 | data_path = fullfile("data", "RLC_data_id.csv");
8 | data_table = readtable(data_path);
9 |
10 |
11 | vin = data_table.V_IN;
12 | vC = data_table.V_C;
13 | iL = data_table.I_L;
14 | y = [vC iL];
15 | t = data_table.time;
16 | Ts = t(2) - t(1);
17 |
18 | %% Add noise %%
19 | add_noise = 0;
20 | STD_V = add_noise*10;
21 | STD_I = add_noise*1;
22 | vC_meas = vC + randn(size(vC))*STD_V;
23 | iL_meas = iL + randn(size(iL))*STD_V;
24 | y_meas = [vC_meas iL_meas];
25 |
26 | %% Identification data %%
27 | data_id = iddata(y_meas,vin,Ts);
28 | model_subs = n4sid(data_id);%, 2)
29 |
30 | y_sim = sim(model_subs, data_id);
31 | y_sim_val = y_sim.OutputData;
32 |
33 | loss = mean((vC - y_sim_val).^2);
34 |
35 | %% Plot data %%
36 |
37 | figure()
38 | plot(t, vC, 'k');
39 | hold on;
40 | plot(t, y_sim_val(:,1), 'b');
41 | legend('True', 'Model');
42 |
43 | figure()
44 | plot(t, iL, 'k');
45 | hold on;
46 | plot(t, y_sim_val(:,2), 'b');
47 | legend('True', 'Model');
48 |
49 | %%
50 | SSE_v = sum((vC - y_sim_val(:,1)).^2);
51 | y_mean_v = mean(vC);
52 | SST_v = sum((vC - y_mean_v).^2);
53 | R_sq_v = 1 - SSE_v/SST_v;
54 |
55 | SSE_i = sum((iL - y_sim_val(:,2)).^2);
56 | y_mean_i = mean(iL);
57 | SST_i = sum((iL - y_mean_i).^2);
58 | R_sq_i = 1 - SSE_i/SST_i;
59 |
60 | fprintf("Subspace fitting performance\n");
61 | fprintf("R-squred vC:%.2f\nR-squred iL:%.2f\n", R_sq_v, R_sq_i)
62 |
63 |
64 | %% Read data table val
65 |
66 | data_path = fullfile("data", "RLC_data_val.csv");
67 | data_table_val = readtable(data_path);
68 |
69 |
70 | vin = data_table_val.V_IN;
71 | vC = data_table_val.V_C;
72 | iL = data_table_val.I_L;
73 | y = [vC iL];
74 | t = data_table.time;
75 | Ts = t(2) - t(1);
76 |
77 | %% Validation data %%
78 | data_val = iddata(y_meas,vin,Ts);
79 |
80 | y_sim_val = sim(model_subs, data_val);
81 | y_sim_val = y_sim_val.OutputData;
82 |
83 | loss = mean((vC - y_sim_val).^2);
84 |
85 | %%
86 | SSE_v = sum((vC - y_sim_val(:,1)).^2);
87 | y_mean_v = mean(vC);
88 | SST_v = sum((vC - y_mean_v).^2);
89 | R_sq_v = 1 - SSE_v/SST_v;
90 |
91 | SSE_i = sum((iL - y_sim_val(:,2)).^2);
92 | y_mean_i = mean(iL);
93 | SST_i = sum((iL - y_mean_i).^2);
94 | R_sq_i = 1 - SSE_i/SST_i;
95 |
96 | fprintf("Validation dataset:\nR-squred vC:%.2f\nR-squred iL:%.2f\n", R_sq_v, R_sq_i)
97 |
--------------------------------------------------------------------------------
/examples/statespace/RLC/old/RLC_test.py:
--------------------------------------------------------------------------------
1 | import os
2 | import pandas as pd
3 | import numpy as np
4 | import matplotlib
5 | import matplotlib.pyplot as plt
6 | import torch
7 | from torchid.ss.ct.ssmodels_ct import NeuralStateSpaceModel
8 | from torchid.ss.ct.ss_simulator_ct import ForwardEulerSimulator
9 | import torchid.metrics as metrics
10 |
11 | if __name__ == '__main__':
12 |
13 | matplotlib.rc('text', usetex=True)
14 |
15 | plot_input = False
16 |
17 | dataset_type = 'test'
18 | #dataset_type = 'id'
19 |
20 | model_type = '64step_noise'
21 | #model_type = 'fullsim_noise'
22 | #model_type = '1step_noise'
23 | #model_type = '1step_nonoise'
24 | #model_type = 'soft_noise'
25 |
26 | # Column names in the dataset
27 | COL_T = ['time']
28 | COL_X = ['V_C', 'I_L']
29 | COL_U = ['V_IN']
30 | COL_Y = ['V_C']
31 |
32 | # Load dataset
33 | dataset_filename = f"RLC_data_{dataset_type}.csv"
34 | df_X = pd.read_csv(os.path.join("data", dataset_filename))
35 | time_data = np.array(df_X[COL_T], dtype=np.float32)
36 | x = np.array(df_X[COL_X], dtype=np.float32)
37 | u = np.array(df_X[COL_U], dtype=np.float32)
38 | y_var_idx = 0 # 0: voltage 1: current
39 | y = np.copy(x[:, [y_var_idx]])
40 | N = np.shape(y)[0]
41 | ts = time_data[1, 0] - time_data[0, 0]
42 | ts_integ = 1.0
43 |
44 | # Add measurement noise
45 | std_noise_V = 0.0 * 5.0
46 | std_noise_I = 0.0 * 0.5
47 | std_noise = np.array([std_noise_V, std_noise_I])
48 | x_noise = np.copy(x) + np.random.randn(*x.shape) * std_noise
49 | x_noise = x_noise.astype(np.float32)
50 | y_noise = x_noise[:, [y_var_idx]]
51 |
52 | # Scale dataset
53 | #scale_vector = np.array([100.0, 10.0]).astype(np.float32)
54 | #x = x/scale_vector
55 | #x_noise = x_noise/scale_vector
56 |
57 | # Build validation data
58 | t_val_start = 0
59 | t_val_end = time_data[-1]
60 | idx_val_start = int(t_val_start // ts)
61 | idx_val_end = int(t_val_end // ts)
62 | u_val = u[idx_val_start:idx_val_end]
63 | x_meas_val = x_noise[idx_val_start:idx_val_end]
64 | x_true_val = x[idx_val_start:idx_val_end]
65 | y_val = y[idx_val_start:idx_val_end]
66 | time_val = time_data[idx_val_start:idx_val_end]
67 |
68 | # Setup neural model structure and load fitted model parameters
69 | #scale_dx = 800000.0
70 | #scale_dx = 100
71 | ss_model = NeuralStateSpaceModel(n_x=2, n_u=1, n_feat=64)#, scale_dx=scale_dx)
72 | nn_solution = ForwardEulerSimulator(ss_model, ts=ts_integ)
73 | model_filename = f"model_SS_{model_type}.pt"
74 | nn_solution.f_xu.load_state_dict(torch.load(os.path.join("models", model_filename)))
75 |
76 | # Evaluate the model in open-loop simulation against validation data
77 | x_0 = x_meas_val[0, :]
78 | with torch.no_grad():
79 | x_sim_torch = nn_solution(torch.tensor(x_0), torch.tensor(u_val))
80 | loss = torch.mean(torch.abs(x_sim_torch - torch.tensor(x_true_val)))
81 |
82 | # Plot results
83 | x_sim = np.array(x_sim_torch)
84 | if not plot_input:
85 | fig, ax = plt.subplots(2, 1, sharex=True, figsize=(6, 5.5))
86 | else:
87 | fig, ax = plt.subplots(3, 1, sharex=True, figsize=(6, 7.5))
88 | time_val_us = time_val*1e6
89 |
90 | if dataset_type == 'id':
91 | t_plot_start = 0.0e-3#0.2e-3
92 | else:
93 | t_plot_start = 0.0e-3#1.9e-3
94 | t_plot_end = t_plot_start + 1.0#0.32e-3
95 |
96 | idx_plot_start = int(t_plot_start // ts)
97 | idx_plot_end = int(t_plot_end // ts)
98 |
99 | ax[0].plot(time_val_us[idx_plot_start:idx_plot_end], x_true_val[idx_plot_start:idx_plot_end,0], 'k', label='$v_C$')
100 | ax[0].plot(time_val_us[idx_plot_start:idx_plot_end], x_sim[idx_plot_start:idx_plot_end,0],'r--', label='$\hat{v}^{\mathrm{sim}}_C$')
101 | ax[0].legend(loc='upper right')
102 | ax[0].grid(True)
103 | ax[0].set_xlabel("Time ($\mu$s)")
104 | ax[0].set_ylabel("Voltage (V)")
105 | ax[0].set_ylim([-300, 300])
106 |
107 | ax[1].plot(time_val_us[idx_plot_start:idx_plot_end], np.array(x_true_val[idx_plot_start:idx_plot_end:,1]), 'k', label='$i_L$')
108 | ax[1].plot(time_val_us[idx_plot_start:idx_plot_end], x_sim[idx_plot_start:idx_plot_end:,1],'r--', label='$\hat i_L^{\mathrm{sim}}$')
109 | ax[1].legend(loc='upper right')
110 | ax[1].grid(True)
111 | ax[1].set_xlabel("Time ($\mu$s)")
112 | ax[1].set_ylabel("Current (A)")
113 | ax[1].set_ylim([-25, 25])
114 |
115 | if plot_input:
116 | ax[2].plot(time_val_us[idx_plot_start:idx_plot_end], u_val[idx_plot_start:idx_plot_end], 'k')
117 | #ax[2].legend(loc='upper right')
118 | ax[2].grid(True)
119 | ax[2].set_xlabel("Time ($\mu$s)")
120 | ax[2].set_ylabel("Input voltage $v_C$ (V)")
121 | ax[2].set_ylim([-400, 400])
122 |
123 | fig_name = f"RLC_SS_{dataset_type}_{model_type}.pdf"
124 | fig.savefig(os.path.join("fig", fig_name), bbox_inches='tight')
125 |
126 | # R-squared metrics
127 | R_sq = metrics.r_squared(x_true_val, x_sim)
128 | print(f"R-squared metrics: {R_sq}")
129 |
--------------------------------------------------------------------------------
/examples/statespace/RLC/old/RLC_train_ae.py:
--------------------------------------------------------------------------------
1 | """
2 | Use an LSTM encoder network to estimate the initial state (backward in time), then simulate it forward in time.
3 | Overall, the combination of state_estimator + nn_solution may be seen as an autoencoder.
4 | """
5 |
6 | import os
7 | import time
8 | import numpy as np
9 | import pandas as pd
10 | import torch
11 | import torch.optim as optim
12 | from torchid.ss.ct.models import NeuralStateUpdate
13 | from torchid.ss.ct.simulator import StateSpaceSimulator
14 | from torchid.ss.ct.estimators import FlippedLSTMStateEstimator
15 | import matplotlib.pyplot as plt
16 |
17 |
18 | # Truncated simulation error minimization method
19 | if __name__ == '__main__':
20 |
21 | # Set seed for reproducibility
22 | np.random.seed(0)
23 | torch.manual_seed(0)
24 |
25 | # Overall parameters
26 | num_iter = 20000 # gradient-based optimization steps
27 | seq_len = 64 # subsequence length m
28 | batch_size = 32 # batch size q
29 | t_fit = 2e-3 # fitting on t_fit ms of data
30 | lr = 1e-4 # learning rate
31 | test_freq = 100 # print message every test_freq iterations
32 | add_noise = True
33 |
34 | # Column names in the dataset
35 | COL_T = ['time']
36 | COL_X = ['V_C', 'I_L']
37 | COL_U = ['V_IN']
38 | idx_out = 0 # output=vc
39 |
40 | scale_u = np.array(80., dtype=np.float32)
41 | scale_x = np.array([90., 3.], dtype=np.float32)
42 |
43 | # Load dataset
44 | df_X = pd.read_csv(os.path.join("data", "RLC_data_id.csv"))
45 | time_data = np.array(df_X[COL_T], dtype=np.float32)
46 | t = np.array(df_X[COL_T], dtype=np.float32)
47 |
48 | x = np.array(df_X[COL_X], dtype=np.float32)
49 | u = np.array(df_X[COL_U], dtype=np.float32)
50 |
51 | # Add measurement noise
52 | std_noise_V = add_noise * 10.0
53 | std_noise_I = add_noise * 1.0
54 | std_noise = np.array([std_noise_V, std_noise_I])
55 | x_noise = np.copy(x) + np.random.randn(*x.shape) * std_noise
56 | x_noise = x_noise.astype(np.float32)
57 | y_noise = x_noise[:, [idx_out]]
58 |
59 | x = x/scale_x
60 | u = u/scale_u
61 | x_noise = x_noise/scale_x
62 | y = np.copy(x[:, [idx_out]])
63 |
64 | # Get fit data #
65 | ts = t[1] - t[0]
66 | n_fit = int(t_fit // ts) # x.shape[0]
67 | u_fit = u[0:n_fit]
68 | x_fit = x_noise[0:n_fit]
69 | x_fit_nonoise = x[0:n_fit] # not used, just for reference
70 | time_fit = t[0:n_fit]
71 | y_fit = x_fit[:, [idx_out]]
72 |
73 | # Fit data to pytorch tensors #
74 | u_torch_fit = torch.from_numpy(u_fit)
75 | time_torch_fit = torch.from_numpy(time_fit)
76 |
77 | # Setup neural model structure
78 | state_estimator = FlippedLSTMStateEstimator(n_u=1, n_y=1, n_x=2)
79 | # Setup neural model structure
80 | ss_model = NeuralStateUpdate(n_x=2, n_u=1, n_feat=50)
81 | nn_solution = StateSpaceSimulator(ss_model)
82 |
83 | # Setup optimizer
84 | optimizer = optim.Adam([
85 | {'params': state_estimator.parameters(), 'lr': lr},
86 | {'params': nn_solution.parameters(), 'lr': lr},
87 | ], lr=lr)
88 |
89 | # Batch extraction function
90 | def get_batch(batch_size, seq_len):
91 |
92 | # Select batch indexes
93 | num_train_samples = y_fit.shape[0]
94 | batch_start = np.random.choice(np.arange(num_train_samples - seq_len, dtype=np.int64),
95 | batch_size, replace=False) # batch start indices
96 | batch_idx = batch_start[:, np.newaxis] + np.arange(seq_len) # batch samples indices
97 | batch_idx = batch_idx.T # transpose indexes to obtain batches with structure (m, q, n_x)
98 |
99 | # Extract batch data
100 | batch_t = torch.tensor(time_fit[batch_idx])
101 | batch_u = torch.tensor(u_fit[batch_idx])
102 | batch_y = torch.tensor(y_fit[batch_idx])
103 |
104 | return batch_t, batch_u, batch_y
105 |
106 |
107 | LOSS = []
108 | LOSS_CONSISTENCY = []
109 | LOSS_FIT = []
110 | start_time = time.time()
111 | # Training loop
112 |
113 | for itr in range(0, num_iter):
114 |
115 | optimizer.zero_grad()
116 |
117 | # Simulate
118 | batch_t, batch_u, batch_y = get_batch(batch_size, seq_len)
119 |
120 | # Compute fit loss
121 | batch_x0 = state_estimator(batch_u, batch_y)[0, :, :]
122 | batch_x_sim = nn_solution(batch_x0, batch_u)
123 | batch_y_sim = batch_x_sim[..., [0]]
124 |
125 | # Compute consistency loss
126 | err_ae = batch_y - batch_y_sim
127 | loss_ae = torch.mean(err_ae**2)
128 |
129 | # Compute trade-off loss
130 | loss = loss_ae
131 |
132 | # Statistics
133 | LOSS.append(loss.item())
134 | if itr % test_freq == 0:
135 | with torch.no_grad():
136 | print(f'Iter {itr} | AE Loss {loss:.4f} ')
137 |
138 | # Optimize
139 | loss.backward()
140 | optimizer.step()
141 |
142 | train_time = time.time() - start_time
143 | print(f"\nTrain time: {train_time:.2f}")
144 |
145 | #%%
146 |
147 | # Save model
148 | if not os.path.exists("models"):
149 | os.makedirs("models")
150 |
151 | model_filename = "ss_model_ae.pt"
152 | torch.save(ss_model.state_dict(), os.path.join("models", model_filename))
153 |
154 | t_val = 5e-3
155 | n_val = int(t_val // ts) # x.shape[0]
156 |
157 | #%%
158 | with torch.no_grad():
159 | u_v = torch.tensor(u[:, None, :])
160 | y_v = torch.tensor(y[:, None, :])
161 | x0 = state_estimator(u_v, y_v)[0, :, :]
162 | y_sim = nn_solution(x0, u_v)
163 |
164 |
165 | #%%
166 | fig, ax = plt.subplots(1, 1)
167 | ax.plot(LOSS, 'k', label='ALL')
168 | ax.grid(True)
169 | ax.legend()
170 | ax.set_ylabel("Loss (-)")
171 | ax.set_xlabel("Iteration (-)")
172 |
173 | fig, ax = plt.subplots(1, 1, sharex=True)
174 | ax.plot(y_v[:, 0, 0], 'k', label='meas')
175 | ax.grid(True)
176 | ax.plot(y_sim[:, 0, 0], 'b', label='sim')
177 |
--------------------------------------------------------------------------------
/examples/statespace/RLC/old/RLC_train_full.py:
--------------------------------------------------------------------------------
1 | import matplotlib
2 | matplotlib.use("TkAgg")
3 | import os
4 | import pandas as pd
5 | import numpy as np
6 | import torch
7 | import torch.optim as optim
8 | import time
9 | import matplotlib.pyplot as plt
10 | import sys
11 | sys.path.append(os.path.join("../..", ".."))
12 | from torchid.ssmodels_ct import NeuralStateSpaceModel
13 | from torchid.ss_simulator_ct import ForwardEulerSimulator
14 |
15 | # Full simulation error minimization method
16 | if __name__ == '__main__':
17 |
18 | # Set seed for reproducibility
19 | np.random.seed(0)
20 | torch.manual_seed(0)
21 |
22 | # Overall parameters
23 | num_iter = 10000 # gradient-based optimization steps
24 | t_fit = 2e-3 # fitting on t_fit ms of data
25 | lr = 1e-3 # learning rate
26 | test_freq = 10 # print message every test_freq iterations
27 | add_noise = True
28 |
29 | # Column names in the dataset
30 | COL_T = ['time']
31 | COL_X = ['V_C', 'I_L']
32 | COL_U = ['V_IN']
33 | COL_Y = ['V_C']
34 |
35 | # Load dataset
36 | df_X = pd.read_csv(os.path.join("data", "RLC_data_id.csv"))
37 | t = np.array(df_X[COL_T], dtype=np.float32)
38 | x = np.array(df_X[COL_X], dtype=np.float32)
39 | u = np.array(df_X[COL_U], dtype=np.float32)
40 |
41 | # Add measurement noise
42 | std_noise_V = add_noise * 10.0
43 | std_noise_I = add_noise * 1.0
44 | std_noise = np.array([std_noise_V, std_noise_I])
45 | x_noise = np.copy(x) + np.random.randn(*x.shape) * std_noise
46 | x_noise = x_noise.astype(np.float32)
47 |
48 | # Compute SNR
49 | P_x = np.mean(x ** 2, axis=0)
50 | P_n = std_noise**2
51 | SNR = P_x/(P_n+1e-10)
52 | SNR_db = 10*np.log10(SNR)
53 |
54 | # Get fit data #
55 | Ts = t[1] - t[0]
56 | n_fit = int(t_fit // Ts) # x.shape[0]
57 | u_fit = u[0:n_fit]
58 | x_fit = x_noise[0:n_fit]
59 | x_fit_nonoise = x[0:n_fit] # not used, just for reference
60 | time_fit = t[0:n_fit]
61 |
62 | # Fit data to pytorch tensors #
63 | u_torch_fit = torch.from_numpy(u_fit)
64 | x_meas_torch_fit = torch.from_numpy(x_fit)
65 | time_torch_fit = torch.from_numpy(time_fit)
66 |
67 | # Setup neural model structure
68 | ss_model = NeuralStateSpaceModel(n_x=2, n_u=1, n_feat=64)
69 | nn_solution = ForwardEulerSimulator(ss_model)
70 |
71 | # Scale loss with respect to the initial one
72 | with torch.no_grad():
73 | x0_torch = torch.tensor([0.0, 0.0])
74 | x_sim_torch_fit = nn_solution(x0_torch[None, :], u_torch_fit[:, None, :])
75 | x_sim_torch_fit = x_sim_torch_fit.squeeze(1)
76 | err_init = x_meas_torch_fit - x_sim_torch_fit
77 | scale_error = torch.sqrt(torch.mean((err_init)**2, dim=(0)))
78 |
79 | scripted_nn_solution = torch.jit.script(nn_solution)
80 |
81 | # Setup optimizer
82 | params_net = list(scripted_nn_solution.parameters())
83 | optimizer = optim.Adam([
84 | {'params': params_net, 'lr': lr},
85 | ], lr=lr)
86 |
87 | LOSS = []
88 | start_time = time.time()
89 | # Training loop
90 | for itr in range(0, num_iter):
91 |
92 | optimizer.zero_grad()
93 |
94 | # Simulate
95 | x0_torch = torch.tensor([0.0, 0.0])
96 | x_sim_torch_fit = nn_solution(x0_torch[None, :], u_torch_fit[:, None, :])
97 | x_sim_torch_fit = x_sim_torch_fit.squeeze(1)
98 |
99 | # Compute fit loss
100 | err_fit = x_sim_torch_fit - x_meas_torch_fit
101 | err_fit_scaled = err_fit/scale_error
102 | loss = torch.mean(err_fit_scaled**2)
103 |
104 | # Statistics
105 | LOSS.append(loss.item())
106 | if itr % test_freq == 0:
107 | with torch.no_grad():
108 | print(f'Iter {itr} | Loss {loss:.4f}')
109 |
110 | # Optimize
111 | loss.backward()
112 | optimizer.step()
113 |
114 | train_time = time.time() - start_time
115 | print(f"\nTrain time: {train_time:.2f}") # 8043.92 seconds
116 |
117 | # Save model
118 | if not os.path.exists("models"):
119 | os.makedirs("models")
120 | if add_noise:
121 | model_filename = f"model_SS_fullsim_noise.pkl"
122 | hidden_filename = f"hidden_SS_fullsim_noise.pkl"
123 | else:
124 | model_filename = f"model_SS_fullsim_nonoise.pkl"
125 | hidden_filename = f"hidden_SS_fullsim_nonoise.pkl"
126 |
127 | torch.save(nn_solution.f_xu.state_dict(), os.path.join("models", model_filename))
128 |
129 |
130 | # In[Simulate]
131 |
132 | t_val = 5e-3
133 | n_val = int(t_val // Ts) # x.shape[0]
134 |
135 | input_data_val = u[0:n_val]
136 | state_data_val = x[0:n_val]
137 |
138 | x0_val = np.zeros(2, dtype=np.float32)
139 | x0_torch_val = torch.from_numpy(x0_val)
140 | u_torch_val = torch.tensor(input_data_val)
141 | x_true_torch_val = torch.from_numpy(state_data_val)
142 |
143 |
144 | with torch.no_grad():
145 | x_sim_torch_val = nn_solution(x0_torch_val[None, :], u_torch_val[:, None, :])
146 | x_sim_torch_val = x_sim_torch_val.squeeze(1)
147 |
148 | if not os.path.exists("fig"):
149 | os.makedirs("fig")
150 |
151 | fig, ax = plt.subplots(3, 1, sharex=True)
152 | ax[0].plot(np.array(x_true_torch_val[:, 0]), label='True')
153 | ax[0].plot(np.array(x_sim_torch_val[:, 0]), label='Fit')
154 | ax[0].legend()
155 | ax[0].grid(True)
156 |
157 | ax[1].plot(np.array(x_true_torch_val[:, 1]), label='True')
158 | ax[1].plot(np.array(x_sim_torch_val[:, 1]), label='Fit')
159 | ax[1].legend()
160 | ax[1].grid(True)
161 |
162 | ax[2].plot(np.array(u_torch_val), label='Input')
163 | ax[2].grid(True)
164 |
165 | fig, ax = plt.subplots(1, 1)
166 | ax.plot(LOSS, 'k', label='ALL')
167 | ax.grid(True)
168 | ax.legend()
169 | ax.set_ylabel("Loss (-)")
170 | ax.set_xlabel("Iteration (-)")
171 |
172 | if add_noise:
173 | fig_name = f"RLC_SS_loss_fullsim_noise.pdf"
174 | else:
175 | fig_name = f"RLC_SS_loss_fullsim_nonoise.pdf"
176 |
177 | fig.savefig(os.path.join("fig", fig_name), bbox_inches='tight')
178 |
--------------------------------------------------------------------------------
/examples/statespace/RLC/old/RLC_train_soft.py:
--------------------------------------------------------------------------------
1 | import os
2 | import pandas as pd
3 | import numpy as np
4 | import torch
5 | import torch.optim as optim
6 | import time
7 | import matplotlib.pyplot as plt
8 | from torchid.ss.ct.ssmodels_ct import NeuralStateSpaceModel
9 | from torchid.ss.ct.ss_simulator_ct import ForwardEulerSimulator
10 |
11 |
12 | # Soft-constrained integration method
13 | if __name__ == '__main__':
14 |
15 | # Set seed for reproducibility
16 | np.random.seed(0)
17 | torch.manual_seed(0)
18 |
19 | # Overall parameters
20 | num_iter = 50000 # gradient-based optimization steps
21 | t_fit = 2e-3 # fitting on t_fit ms of data
22 | alpha = 1e1 # fit/consistency trade-off constant
23 | lr = 5e-4 # learning rate
24 | test_freq = 100 # print message every test_freq iterations
25 | add_noise = True
26 |
27 | # Column names in the dataset
28 | COL_T = ['time']
29 | COL_X = ['V_C', 'I_L']
30 | COL_U = ['V_IN']
31 | COL_Y = ['V_C']
32 |
33 | # Load dataset
34 | #df_X = pd.read_csv(os.path.join("data", "RLC_data_id.csv"))
35 | df_X = pd.read_csv(os.path.join("data", "RLC_data_id.csv"))
36 | t = np.array(df_X[COL_T], dtype=np.float32)
37 | y = np.array(df_X[COL_Y], dtype=np.float32)
38 | x = np.array(df_X[COL_X], dtype=np.float32)
39 | u = np.array(df_X[COL_U], dtype=np.float32)
40 |
41 | # Add measurement noise
42 | std_noise_V = add_noise * 10.0
43 | std_noise_I = add_noise * 1.0
44 | std_noise = np.array([std_noise_V, std_noise_I])
45 | x_noise = np.copy(x) + np.random.randn(*x.shape) * std_noise
46 | x_noise = x_noise.astype(np.float32)
47 |
48 | # Compute SNR
49 | P_x = np.mean(x ** 2, axis=0)
50 | P_n = std_noise**2
51 | SNR = P_x/(P_n+1e-10)
52 | SNR_db = 10*np.log10(SNR)
53 |
54 | # Get fit data #
55 | Ts = t[1] - t[0]
56 | n_fit = int(t_fit // Ts) # x.shape[0]
57 | u_fit = u[0:n_fit]
58 | x_fit = x_noise[0:n_fit]
59 | x_fit_nonoise = x[0:n_fit] # not used, just for reference
60 | y_fit = y[0:n_fit]
61 | time_fit = t[0:n_fit]
62 |
63 | # Fit data to pytorch tensors #
64 | time_torch_fit = torch.from_numpy(time_fit[:, 0])
65 | u_torch_fit = torch.from_numpy(u_fit)
66 | y_true_torch_fit = torch.from_numpy(y_fit)
67 | x_meas_torch_fit = torch.from_numpy(x_fit)
68 | time_torch_fit = torch.from_numpy(time_fit)
69 | x_hidden_init = x_fit + 0*np.random.randn(*x_fit.shape)*std_noise
70 | x_hidden_init = x_hidden_init.astype(np.float32)
71 | x_hidden_fit = torch.tensor(x_hidden_init, requires_grad=True) # hidden state is an optimization variable
72 |
73 | ts_integ = 1.0 # better for numerical reasons
74 | # Setup neural model structure
75 | ss_model = NeuralStateSpaceModel(n_x=2, n_u=1, n_feat=64, activation='relu')
76 | nn_solution = ForwardEulerSimulator(ss_model, ts=ts_integ)
77 | #nn_solution.ss_model.load_state_dict(torch.load(os.path.join("models", "model_SS_64step_noise.pkl")))
78 |
79 | # Setup optimizer
80 | params_net = list(ss_model.parameters())
81 | params_hidden = [x_hidden_fit]
82 | optimizer = optim.Adam([
83 | {'params': params_net, 'lr': lr},
84 | {'params': params_hidden, 'lr': lr},
85 | ], lr=lr)
86 |
87 | # Scale loss with respect to the initial one
88 | scale_error = torch.tensor([20.0, 1.0]).float()
89 |
90 |
91 | LOSS = []
92 | LOSS_SIM = []
93 | start_time = time.time()
94 | # Training loop
95 |
96 | for itr in range(0, num_iter):
97 |
98 | optimizer.zero_grad()
99 |
100 | # Compute fit loss
101 | x_hidden = x_hidden_fit[:]
102 | err_fit = x_hidden - x_meas_torch_fit
103 | err_fit_scaled = err_fit/scale_error
104 | loss_fit = torch.mean(err_fit_scaled**2)
105 |
106 | # Compute consistency loss
107 | DX = ts_integ*ss_model(x_hidden[0:-1, :], u_torch_fit[0:-1, :])
108 | err_consistency = x_hidden[1:, :] - x_hidden[0:-1, :] - DX
109 | err_consistency_scaled = err_consistency/scale_error
110 | loss_consistency = torch.mean(err_consistency_scaled**2)
111 |
112 | # Compute trade-off loss
113 | loss = loss_fit + alpha*loss_consistency
114 |
115 | # Statistics
116 | LOSS.append(loss.item())
117 | if itr % test_freq == 0:
118 | with torch.no_grad():
119 | x0_torch_fit = x_hidden_fit[0, :]
120 | x_sim_torch_fit = nn_solution(x0_torch_fit[None, :], torch.tensor(u_fit)[:, None, :])
121 | x_sim_torch_fit = x_sim_torch_fit.squeeze(1)
122 | err_sim_torch_fit = x_sim_torch_fit - torch.tensor(x_fit)
123 | loss_sim = torch.sqrt(torch.mean(err_sim_torch_fit**2))
124 | LOSS_SIM.append(loss_sim.item())
125 | print(f'Iter {itr} | Tradeoff Loss {loss:.6f} '
126 | f'Consistency Loss {loss_consistency:.6f} Fit Loss {loss_fit:.6f} Sim Loss {loss_sim:.6f}')
127 |
128 | # Optimize
129 | loss.backward()
130 | optimizer.step()
131 |
132 | train_time = time.time() - start_time
133 | print(f"\nTrain time: {train_time:.2f}")
134 |
135 | # Save model
136 | if not os.path.exists("models"):
137 | os.makedirs("models")
138 | if add_noise:
139 | model_filename = f"model_SS_soft_noise.pt"
140 | else:
141 | model_filename = f"model_SS_soft_nonoise.pt"
142 |
143 | torch.save(ss_model.state_dict(), os.path.join("models", model_filename))
144 |
145 | if not os.path.exists("fig"):
146 | os.makedirs("fig")
147 |
148 | fig, ax = plt.subplots(1, 1)
149 | ax.plot(LOSS)
150 | ax.grid(True)
151 | ax.set_ylabel("Loss (-)")
152 | ax.set_xlabel("Iteration (-)")
153 |
154 | # In[Val]
155 | t_val = 5e-3
156 | n_val = int(t_val // Ts) # x.shape[0]
157 |
158 | time_val = t[0:n_val]
159 | input_data_val = u[0:n_val]
160 | state_data_val = x[0:n_val]
161 | output_data_val = y[0:n_val]
162 |
163 | x0_val = np.zeros(2, dtype=np.float32)
164 | x0_torch_val = torch.from_numpy(x0_val)
165 | u_torch_val = torch.tensor(input_data_val)
166 | x_true_torch_val = torch.from_numpy(state_data_val)
167 | time_torch_val = torch.from_numpy(time_val[:, 0])
168 |
169 | with torch.no_grad():
170 | x_sim_torch_val = nn_solution(x0_torch_val[None, :], u_torch_val[:, None, :])
171 | x_sim_torch_val = x_sim_torch_val.squeeze(1)
172 |
173 | fig, ax = plt.subplots(3, 1, sharex=True)
174 | ax[0].plot(np.array(x_true_torch_val[:, 0]), label='True')
175 | ax[0].plot(np.array(x_sim_torch_val[:, 0]), label='Fit')
176 | ax[0].legend()
177 | ax[0].grid(True)
178 |
179 | ax[1].plot(np.array(x_true_torch_val[:, 1]), label='True')
180 | ax[1].plot(np.array(x_sim_torch_val[:, 1]), label='Fit')
181 | ax[1].legend()
182 | ax[1].grid(True)
183 |
184 | ax[2].plot(np.array(u_torch_val), label='Input')
185 | ax[2].grid(True)
186 |
187 | x_hidden_fit_np = x_hidden_fit.detach().numpy()
188 | fig, ax = plt.subplots(2, 1, sharex=True)
189 | ax[0].plot(x_fit_nonoise[:, 0], 'k', label='True')
190 | ax[0].plot(x_fit[:, 0], 'b', label='Measured')
191 | ax[0].plot(x_hidden_fit_np[:, 0], 'r', label='Hidden')
192 | ax[0].legend()
193 | ax[0].grid(True)
194 |
195 | ax[1].plot(x_fit_nonoise[:, 1], 'k', label='True')
196 | ax[1].plot(x_fit[:, 1], 'b', label='Measured')
197 | ax[1].plot(x_hidden_fit_np[:, 1], 'r', label='Hidden')
198 | ax[1].legend()
199 | ax[1].grid(True)
200 |
--------------------------------------------------------------------------------
/examples/statespace/RLC/symbolic_RLC.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | import matplotlib.pyplot as plt
3 | import os
4 |
5 | R_val = 3
6 | L_val = 50e-6
7 | C_val = 270e-9
8 | Td_val = 1e-6
9 |
10 |
11 | def saturation_formula(current_abs):
12 | sat_ratio = (1 / np.pi * np.arctan(-5 * (current_abs - 5)) + 0.5) * 0.9 + 0.1
13 | return sat_ratio
14 |
15 |
16 | def fxu_ODE(t, x, u):
17 | A = np.array([[0.0, 1.0 / C_val],
18 | [-1 / (L_val), -R_val / L_val]
19 | ])
20 | B = np.array([[0.0], [1.0 / (L_val)]])
21 | dx = np.zeros(2, dtype=np.float64)
22 | dx[0] = A[0, 0] * x[0] + A[0, 1] * x[1] + B[0, 0] * u[0]
23 | dx[1] = A[1, 0] * x[0] + A[1, 1] * x[1] + B[1, 0] * u[0]
24 | return dx
25 |
26 |
27 | def fxu_ODE_nl(t, x, u):
28 | I_abs = np.abs(x[1])
29 | L_val_mod = L_val * saturation_formula(I_abs)
30 | R_val_mod = R_val
31 | C_val_mod = C_val
32 |
33 | A = np.array([[0.0, 1.0 / C_val_mod],
34 | [-1 / (L_val_mod), -R_val_mod / L_val_mod]
35 | ])
36 | B = np.array([[0.0], [1.0 / (L_val_mod)]])
37 |
38 | dx = np.zeros(2, dtype=np.float64)
39 | dx[0] = A[0, 0] * x[0] + A[0, 1] * x[1] + B[0, 0] * u[0]
40 | dx[1] = A[1, 0] * x[0] + A[1, 1] * x[1] + B[1, 0] * u[0]
41 | return dx
42 |
43 |
44 | A_nominal = np.array([[0.0, 1.0 / C_val],
45 | [-1 / L_val, -R_val / L_val]
46 | ])
47 |
48 | B_nominal = np.array([[0.0], [1.0 / L_val]])
49 |
50 | if __name__ == '__main__':
51 |
52 | x = np.zeros(2)
53 | u = np.zeros(1)
54 | dx = fxu_ODE_nl(0.0, x, u)
55 |
56 | I = np.arange(0., 20., 0.1)
57 |
58 | # Save model
59 | if not os.path.exists("fig"):
60 | os.makedirs("fig")
61 |
62 | fig, ax = plt.subplots(1, 1, sharex=True, figsize=(4, 3))
63 | ax.plot(I, L_val * 1e6 * saturation_formula(I), 'k')
64 | ax.grid(True)
65 | ax.set_xlabel('Inductor current $i_L$ (A)', fontsize=14)
66 | ax.set_ylabel('Inductance $L$ ($\mu$H)', fontsize=14)
67 | fig.savefig(os.path.join("fig", "RLC_characteristics.pdf"), bbox_inches='tight')
68 |
--------------------------------------------------------------------------------
/examples/statespace/WH2009/download_dataset.py:
--------------------------------------------------------------------------------
1 | import os
2 | from google_drive_downloader import GoogleDriveDownloader as gdd
3 |
4 |
5 | if __name__ == '__main__':
6 |
7 | DATA_FOLDER = 'data'
8 |
9 | # %% Make data folder if it does not exist
10 | if not os.path.exists(DATA_FOLDER):
11 | os.makedirs(DATA_FOLDER)
12 |
13 | # %% Download dataset from www.nonlinearbenchmark.com
14 | # https://drive.google.com/file/d/16ipySVfKfxkwqWmbO9Z19-VjDoC2S6hx/view?usp=sharing
15 | gdd.download_file_from_google_drive(file_id='16ipySVfKfxkwqWmbO9Z19-VjDoC2S6hx',
16 | dest_path='./data/data.zip',
17 | unzip=True)
18 |
--------------------------------------------------------------------------------
/examples/statespace/WH2009/loader.py:
--------------------------------------------------------------------------------
1 | import os
2 | import numpy as np
3 | import pandas as pd
4 |
5 | # Column names in the dataset
6 | COL_F = ['fs']
7 | COL_U = ['uBenchMark']
8 | COL_Y = ['yBenchMark']
9 | idx_train = 100000
10 |
11 |
12 | def wh2009_loader(dataset, scale=True, dtype=np.float32):
13 | df_data = pd.read_csv(os.path.join("data", "WienerHammerstein2009Files", "WienerHammerBenchmark.csv"))
14 | y = np.array(df_data[COL_Y], dtype=dtype)
15 | u = np.array(df_data[COL_U], dtype=dtype)
16 | fs = np.array(df_data[COL_F].iloc[0], dtype=np.float32)
17 | N = y.size
18 | ts = 1/fs
19 | t = np.arange(N)*ts
20 |
21 | if scale:
22 | u_train = u[:idx_train]
23 | y_train = y[:idx_train]
24 | u_mean, u_std = np.mean(u_train), np.std(u_train)
25 | y_mean, y_std = np.mean(y_train), np.std(y_train)
26 | u = (u-u_mean)/u_std
27 | y = (y-y_mean)/y_std
28 |
29 | if dataset == "full":
30 | return t, u, y
31 | elif dataset == "train":
32 | t_train = t[:idx_train]
33 | u_train = u[:idx_train]
34 | y_train = y[:idx_train]
35 | return t_train, u_train, y_train
36 | elif dataset == "test":
37 | t_test = t[idx_train:] - t[idx_train]
38 | u_test = u[idx_train:]
39 | y_test = y[idx_train:]
40 | return t_test, u_test, y_test
41 |
42 |
43 | def wh2009_scaling():
44 | df_data = pd.read_csv(os.path.join("data", "WienerHammerstein2009Files", "WienerHammerBenchmark.csv"))
45 | y = np.array(df_data[COL_Y])
46 | u = np.array(df_data[COL_U])
47 | fs = np.array(df_data[COL_F].iloc[0], dtype=np.float32)
48 | N = y.size
49 | ts = 1/fs
50 | t = np.arange(N)*ts
51 |
52 | u_train = u[:idx_train]
53 | y_train = y[:idx_train]
54 | y_mean, y_std = np.mean(y_train), np.std(y_train)
55 | return y_mean, y_std
56 |
57 |
58 | if __name__ == "__main__":
59 | import matplotlib.pyplot as plt
60 | for dataset in ["full", "train", "test"]:
61 | t, u, y = wh2009_loader(dataset)
62 | fig, ax = plt.subplots(2, 1, sharex=True)
63 | ax[0].plot(t, u)
64 | ax[1].plot(t, y)
65 | plt.suptitle(dataset)
66 |
--------------------------------------------------------------------------------
/examples/statespace/WH2009/wh_test.py:
--------------------------------------------------------------------------------
1 | import os
2 | import torch
3 | import torchid.ss.dt.models as models
4 | from torchid.ss.dt.simulator import StateSpaceSimulator
5 | from torchid.ss.dt.estimators import FeedForwardStateEstimator
6 | from loader import wh2009_loader, wh2009_scaling
7 | import matplotlib
8 |
9 | matplotlib.use("TKAgg")
10 | import matplotlib.pyplot as plt
11 | from torchid import metrics
12 |
13 | if __name__ == '__main__':
14 | model_data = torch.load(os.path.join("models", "model.pt"))
15 |
16 | seq_sim_len = 64 # simulation sequence length
17 | seq_est_len = 16 # estimation sequence length
18 | n_x = 6
19 | n_u = 1
20 | n_y = 1
21 | est_hidden_size = 16
22 | hidden_size = 16
23 |
24 | # Load dataset
25 | t, u, y = wh2009_loader("test", scale=True)
26 | y_mean, y_std = wh2009_scaling()
27 |
28 | # %% Load models and parameters
29 | f_xu = models.NeuralLinStateUpdate(n_x, n_u, hidden_size=hidden_size)
30 | g_x = models.NeuralLinOutput(n_x, n_u, hidden_size=hidden_size)
31 | model = StateSpaceSimulator(f_xu, g_x)
32 | estimator = FeedForwardStateEstimator(n_u=n_u, n_y=n_y, n_x=n_x,
33 | hidden_size=est_hidden_size,
34 | seq_len=seq_est_len)
35 | model.load_state_dict(model_data["model"])
36 | # state_estimator.load_state_dict(model_data["estimator"])
37 |
38 | # %% Simulate
39 | with torch.no_grad():
40 | u_v = torch.tensor(u[:, None, :])
41 | y_v = torch.tensor(y[:, None, :])
42 | # x0 = estimator(u_v, y_v)
43 | # initial state here set to 0 for simplicity. The effect on the long simulation is negligible
44 | x0 = torch.zeros((1, n_x), dtype=u_v.dtype, device=u_v.device)
45 | y_sim = model(x0, u_v).squeeze(1) # remove batch dimension
46 | y_sim = y_sim.detach().numpy()
47 |
48 | y = y * y_std + y_mean
49 | y_sim = y_sim * y_std + y_mean
50 |
51 | # %% Test
52 | fig, ax = plt.subplots(1, 1, sharex=True)
53 | ax.plot(y[:, 0], 'k', label='meas')
54 | ax.grid(True)
55 | ax.plot(y_sim[:, 0], 'b', label='sim')
56 | ax.plot(y[:, 0] - y_sim[:, 0], 'r', label='sim')
57 |
58 | # %% Metrics
59 |
60 | n_skip = 0
61 | e_rms = 1000 * metrics.rmse(y[n_skip:], y_sim[n_skip:])[0]
62 | fit_idx = metrics.fit_index(y[n_skip:], y_sim[n_skip:])[0]
63 | r_sq = metrics.r_squared(y[n_skip:], y_sim[n_skip:])[0]
64 |
65 | print(f"RMSE: {e_rms:.1f}mV\nFIT: {fit_idx:.1f}%\nR_sq: {r_sq:.4f}")
66 |
--------------------------------------------------------------------------------
/examples/statespace/WH2009/wh_train.py:
--------------------------------------------------------------------------------
1 | import os
2 | import time
3 | import numpy as np
4 | import torch
5 | import torch.optim as optim
6 | from torch.utils.data import DataLoader
7 | from torchid.datasets import SubsequenceDataset
8 | import torchid.ss.dt.models as models
9 | import torchid.ss.dt.estimators as estimators
10 | from torchid.ss.dt.simulator import StateSpaceSimulator
11 | from loader import wh2009_loader
12 | import matplotlib.pyplot as plt
13 |
14 |
15 | if __name__ == '__main__':
16 |
17 | save_folder = "models"
18 |
19 | epochs_adam = 100
20 | epochs_bfgs = 5
21 | epochs_lin = 20
22 | batch_size = 1024
23 | seq_len = 80
24 | seq_est_len = 50
25 | est_hidden_size = 16
26 | hidden_size = 16
27 | lr = 1e-3
28 |
29 | no_cuda = False
30 | log_interval = 20
31 |
32 | torch.manual_seed(10)
33 |
34 | # CPU/GPU resources
35 | use_cuda = not no_cuda and torch.cuda.is_available()
36 | device = torch.device("cuda" if use_cuda else "cpu")
37 |
38 | # Constants
39 | n_x = 6
40 | n_u = 1
41 | n_y = 1
42 | n_fit = 80000
43 |
44 | epochs = epochs_adam + epochs_bfgs
45 |
46 | # %% Load dataset
47 | t_train, u_train, y_train = wh2009_loader("train", scale=True)
48 | t_fit, u_fit, y_fit = t_train[:n_fit], u_train[:n_fit], y_train[:n_fit]
49 | t_val, u_val, y_val = t_train[n_fit:] - t_train[n_fit], u_train[n_fit:], y_train[n_fit:]
50 |
51 | # %% Prepare dataset, models, optimizer
52 | train_data = SubsequenceDataset(u_fit, y_fit, subseq_len=seq_len + seq_est_len)
53 | train_loader = DataLoader(train_data, batch_size=batch_size, shuffle=True)
54 | u_val_t = torch.tensor(u_val[:, None, :]).to(device)
55 | y_val_t = torch.tensor(y_val[:, None, :]).to(device)
56 |
57 | f_xu = models.NeuralLinStateUpdate(n_x, n_u, hidden_size=hidden_size).to(device)
58 | g_x = models.NeuralLinOutput(n_x, n_u, hidden_size=hidden_size).to(device)
59 | model = StateSpaceSimulator(f_xu, g_x).to(device)
60 | estimator = estimators.FeedForwardStateEstimator(n_u=n_u, n_y=n_y, n_x=n_x,
61 | hidden_size=est_hidden_size,
62 | seq_len=seq_est_len).to(device)
63 |
64 | # Setup optimizer
65 | optimizer = optim.Adam(list(model.parameters())+list(estimator.parameters()), lr=lr)
66 |
67 | optimizer_LBFGS = torch.optim.LBFGS(
68 | list(model.parameters())+list(estimator.parameters()),
69 | line_search_fn="strong_wolfe",
70 | lr=1.0
71 | )
72 |
73 | # %% Other initializations
74 | if not os.path.exists(save_folder):
75 | os.makedirs(save_folder)
76 |
77 | model_filename = "model.pt"
78 | model_path = os.path.join(save_folder, model_filename)
79 |
80 | VAL_LOSS, TRAIN_LOSS = [], []
81 | min_loss = np.inf # for early stopping
82 |
83 |
84 | def closure():
85 | optimizer_LBFGS.zero_grad()
86 |
87 | # State is estimated on the first seq_est_len samples
88 | batch_u_est = batch_u[:seq_est_len]
89 | batch_y_est = batch_y[:seq_est_len]
90 | batch_x0 = estimator(batch_u_est, batch_y_est)
91 |
92 | # fit only after seq_est_len
93 | batch_u_fit = batch_u[seq_est_len:]
94 | batch_y_fit = batch_y[seq_est_len:]
95 | batch_y_sim = model(batch_x0, batch_u_fit)
96 |
97 | # Compute fit loss
98 | loss = torch.nn.functional.mse_loss(batch_y_fit, batch_y_sim)
99 | loss.backward()
100 | return loss
101 |
102 | start_time = time.time()
103 | # %% Training loop
104 | itr = 0
105 | model.f_xu.disable_nl()
106 | model.f_xu.freeze_nl()
107 | model.g_x.disable_nl()
108 | model.g_x.freeze_nl()
109 | for epoch in range(epochs):
110 | train_loss = 0 # train loss for the whole epoch
111 | model.train()
112 | estimator.train()
113 |
114 | if epoch == epochs_lin:
115 | model.f_xu.enable_nl()
116 | model.f_xu.unfreeze_nl()
117 | model.f_xu.freeze_lin()
118 | model.g_x.enable_nl()
119 | model.g_x.unfreeze_nl()
120 | model.g_x.freeze_lin()
121 |
122 | for batch_idx, (batch_u, batch_y) in enumerate(train_loader):
123 |
124 | batch_u = batch_u.transpose(0, 1).to(device) # transpose to time_first
125 | batch_y = batch_y.transpose(0, 1).to(device) # transpose to time_first
126 |
127 | if epoch < epochs_adam:
128 | loss = optimizer.step(closure)
129 | else:
130 | loss = optimizer_LBFGS.step(closure)
131 |
132 | train_loss += loss.item()
133 |
134 | itr += 1
135 |
136 | train_loss = train_loss / len(train_loader)
137 | TRAIN_LOSS.append(train_loss)
138 |
139 | # Validation loss: full simulation error
140 | with torch.no_grad():
141 | model.eval()
142 | estimator.eval()
143 | x0 = torch.zeros((1, n_x), dtype=u_val_t.dtype,
144 | device=u_val_t.device)
145 | # x0 = state_estimator(u_val_t, y_val_t)
146 | y_val_sim = model(x0, u_val_t)
147 | val_loss = torch.nn.functional.mse_loss(y_val_t, y_val_sim)
148 |
149 | VAL_LOSS.append(val_loss.item())
150 | print(f'==== Epoch {epoch} | Train Loss {train_loss:.4f} Val (sim) Loss {val_loss:.4f} ====')
151 |
152 | # best model so far, save it
153 | if val_loss < min_loss:
154 | torch.save({
155 | "epoch": epoch,
156 | "model": model.state_dict(),
157 | "estimator": estimator.state_dict()
158 | },
159 | os.path.join(model_path)
160 | )
161 | min_loss = val_loss.item()
162 |
163 | train_time = time.time() - start_time
164 | print(f"\nTrain time: {train_time:.2f}")
165 |
166 | if not np.isfinite(min_loss): # model never saved as it was never giving a finite simulation loss
167 | torch.save({
168 | "epoch": epoch,
169 | "model": model.state_dict(),
170 | "estimator": estimator.state_dict()
171 | },
172 | os.path.join(model_path)
173 | )
174 | # %% Simulate
175 |
176 | # Also save total training time (up to last epoch)
177 | model_data = torch.load(model_path)
178 | model_data["total_time"] = time.time() - start_time
179 | torch.save(model_data, model_path)
180 |
181 | # Reload optimal parameters (best on validation)
182 | model.load_state_dict(model_data["model"])
183 | estimator.load_state_dict(model_data["estimator"])
184 |
185 | t_full, u_full, y_full = wh2009_loader("full", scale=True)
186 | with torch.no_grad():
187 | model.eval()
188 | estimator.eval()
189 | u_v = torch.tensor(u_full[:, None, :]).to(device)
190 | y_v = torch.tensor(y_full[:, None, :]).to(device)
191 | x0 = torch.zeros((1, n_x), dtype=u_v.dtype, device=u_v.device)
192 | y_sim = model(x0, u_v).squeeze(1).to("cpu").detach().numpy()
193 |
194 | # %% Metrics
195 |
196 | from torchid import metrics
197 | e_rms = 1000 * metrics.rmse(y_full, y_sim)[0]
198 | fit_idx = metrics.fit_index(y_full, y_sim)[0]
199 | r_sq = metrics.r_squared(y_full, y_sim)[0]
200 |
201 | print(f"RMSE: {e_rms:.1f} mV\nFIT: {fit_idx:.1f}%\nR_sq: {r_sq:.4f}")
202 |
203 | # %% Plots
204 |
205 | fig, ax = plt.subplots(1, 1)
206 | ax.plot(TRAIN_LOSS, 'k', label='TRAIN')
207 | ax.plot(VAL_LOSS, 'r', label='VAL')
208 | ax.grid(True)
209 | ax.legend()
210 | ax.set_ylabel("Loss (-)")
211 | ax.set_xlabel("Iteration (-)")
212 |
213 | fig, ax = plt.subplots(1, 1, sharex=True)
214 | ax.plot(y_full[:, 0], 'k', label='meas')
215 | ax.grid(True)
216 | ax.plot(y_sim[:, 0], 'b', label='sim')
217 |
--------------------------------------------------------------------------------
/examples/statespace/silverbox/README.txt:
--------------------------------------------------------------------------------
1 | Main scripts:
2 | -------------
3 |
4 | - silverbox_train_lin.py: train a linear state-space model on the silverbox dataset
5 | - silverbox_train_poly.py: train a polynomial state-space model on the silverbox dataset. Results seem to be in line with [1]
6 | - silverbox_test.py: test the polynomial state-space model and compute metrics
7 |
8 | [1] J. Paduart, L. Lauwers, J. Swevers, K. Smolders, J. Schoukens, and R. Pintelon. Identification of the Silverbox Benchmark Using Nonlinear State-Space Models.
--------------------------------------------------------------------------------
/examples/statespace/silverbox/loader.py:
--------------------------------------------------------------------------------
1 | import os
2 | import numpy as np
3 | import pandas as pd
4 |
5 | # Column names in the dataset
6 | COL_U = ['V1']
7 | COL_Y = ['V2']
8 | idx_train = 40700
9 |
10 |
11 | def silverbox_loader(dataset, scale=True, dtype=np.float32):
12 | filename = "SNLS80mV.csv"
13 | df_data = pd.read_csv(os.path.join("SilverboxFiles", filename))
14 | y = np.array(df_data[COL_Y], dtype=dtype)
15 | u = np.array(df_data[COL_U], dtype=dtype)
16 | fs = 10**7/2**14
17 | N = y.size
18 | ts = 1/fs
19 | t = np.arange(N)*ts
20 |
21 | if scale:
22 | u_train = u[idx_train:]
23 | y_train = y[idx_train:]
24 | u_mean, u_std = np.mean(u_train), np.std(u_train)
25 | y_mean, y_std = np.mean(y_train), np.std(y_train)
26 | u = (u-u_mean)/u_std
27 | y = (y-y_mean)/y_std
28 |
29 | if dataset == "full":
30 | return t, u, y
31 | elif dataset == "train":
32 | t_train = t[idx_train:] - t[idx_train]
33 | u_train = u[idx_train:]
34 | y_train = y[idx_train:]
35 | return t_train, u_train, y_train
36 | elif dataset == "test":
37 | t_test = t[:idx_train]
38 | u_test = u[:idx_train]
39 | y_test = y[:idx_train]
40 | return t_test, u_test, y_test
41 |
42 |
43 | if __name__ == "__main__":
44 | import matplotlib.pyplot as plt
45 | for dataset in ["full", "train", "test"]:
46 | t, u, y = silverbox_loader(dataset)
47 | fig, ax = plt.subplots(2, 1, sharex=True)
48 | ax[0].plot(t, u)
49 | ax[1].plot(t, y)
50 | plt.suptitle(dataset)
51 |
--------------------------------------------------------------------------------
/examples/statespace/silverbox/silverbox_plot.py:
--------------------------------------------------------------------------------
1 | import pandas as pd
2 | import numpy as np
3 | import os
4 | import matplotlib.pyplot as plt
5 |
6 |
7 | if __name__ == '__main__':
8 |
9 | # Column names in the dataset
10 | COL_U = ['V1']
11 | COL_Y = ['V2']
12 |
13 | # Load dataset
14 | df_X = pd.read_csv(os.path.join("SilverboxFiles", "SNLS80mV.csv"))
15 |
16 | # Extract data
17 | y = np.array(df_X[COL_Y], dtype=np.float32)
18 | u = np.array(df_X[COL_U], dtype=np.float32)
19 | u = u-np.mean(u)
20 | y = y-np.mean(y)
21 | fs = 10**7/2**14
22 | N = y.size
23 | ts = 1/fs
24 | t = np.arange(N)*ts
25 |
26 | #%% Plot
27 | fig, ax = plt.subplots(2, 1, sharex=True)
28 | ax[0].plot(t, y, 'k', label="$u")
29 | ax[0].grid()
30 | ax[1].plot(t, u, 'k', label="$y$")
31 | ax[1].grid()
32 | plt.legend()
33 | plt.show()
34 |
35 |
36 |
37 |
38 |
39 |
--------------------------------------------------------------------------------
/examples/statespace/silverbox/silverbox_test.py:
--------------------------------------------------------------------------------
1 | import os
2 | import torch
3 | from torchid.ss.dt.models import PolynomialStateUpdate, LinearStateUpdate, LinearOutput
4 | from torchid.ss.dt.simulator import StateSpaceSimulator
5 | from torchid.ss.dt.estimators import LSTMStateEstimator
6 | from loader import silverbox_loader
7 | import matplotlib.pyplot as plt
8 | from torchid import metrics
9 |
10 |
11 | if __name__ == '__main__':
12 |
13 | model_filename = "ss_poly.pt"
14 | model_data = torch.load(os.path.join("models", model_filename))
15 | n_x = model_data["n_x"]
16 | n_y = model_data["n_y"]
17 | n_u = model_data["n_u"]
18 | d_max = model_data["d_max"]
19 |
20 | # Load dataset
21 | t, u, y = silverbox_loader("test", scale=True)
22 |
23 | #%% Load models and parameters
24 | f_xu = PolynomialStateUpdate(n_x, n_u, d_max)
25 | g_x = LinearOutput(n_x, n_y)
26 | model = StateSpaceSimulator(f_xu, g_x)
27 | state_estimator = LSTMStateEstimator(n_u=n_u, n_y=n_y, n_x=n_x)
28 | model.load_state_dict(model_data["model"])
29 | #state_estimator.load_state_dict(model_data["estimator"])
30 |
31 | #%% Simulate
32 | with torch.no_grad():
33 | u_v = torch.tensor(u[:, None, :])
34 | # y_v = torch.tensor(y[:, None, :])
35 | # x0 = state_estimator(u_v, y_v)
36 | x0 = torch.zeros(1, n_x, dtype=torch.float32) # initial state set to 0 for simplicity
37 | y_sim = model(x0, u_v).squeeze(1) # remove batch dimension
38 | y_sim = y_sim.detach().numpy()
39 |
40 | #%% Metrics
41 | e_rms = 1000 * metrics.rmse(y, y_sim)[0]
42 | fit_idx = metrics.fit_index(y, y_sim)[0]
43 | r_sq = metrics.r_squared(y, y_sim)[0]
44 |
45 | print(f"RMSE: {e_rms:.1f}mV\nFIT: {fit_idx:.1f}%\nR_sq: {r_sq:.4f}")
46 |
47 | #%% Plots
48 | fig, ax = plt.subplots(1, 1, sharex=True)
49 | ax.plot(y[:, 0], 'k', label='meas')
50 | ax.grid(True)
51 | ax.plot(y_sim[:, 0], 'b', label='sim')
52 | ax.plot(y[:, 0] - y_sim[:, 0], 'r', label='err')
53 |
54 |
55 |
--------------------------------------------------------------------------------
/examples/statespace/silverbox/silverbox_train_lin.py:
--------------------------------------------------------------------------------
1 | import os
2 | import time
3 | import torch
4 | import torch.optim as optim
5 | from torch.utils.data import DataLoader
6 | from torchid.datasets import SubsequenceDataset
7 | from torchid.ss.dt.models import LinearStateUpdate, LinearOutput
8 | from torchid.ss.dt.simulator import StateSpaceSimulator
9 | from torchid.ss.dt.estimators import LSTMStateEstimator
10 | from loader import silverbox_loader
11 | import matplotlib.pyplot as plt
12 |
13 |
14 | if __name__ == '__main__':
15 |
16 | no_cuda = False # no GPU, CPU only training
17 | threads = 6 # max number of CPU threads
18 |
19 | # Parameters
20 | n_fit = 40000
21 | seq_sim_len = 256
22 | seq_est_len = 32 # estimation sequence length
23 | batch_size = 64
24 | lr = 1e-5
25 | epochs = 10
26 | n_x = 2
27 | n_u = 1
28 | n_y = 1
29 | d_max = 3
30 |
31 | # CPU/GPU resources
32 | use_cuda = not no_cuda and torch.cuda.is_available()
33 | device = torch.device("cuda" if use_cuda else "cpu")
34 | torch.set_num_threads(threads)
35 |
36 | # Load dataset
37 | t_train, u_train, y_train = silverbox_loader("train", scale=True)
38 |
39 | #%% Prepare dataset
40 | load_len = seq_sim_len + seq_est_len
41 | train_data = SubsequenceDataset(u_train, y_train, subseq_len=load_len)
42 | train_loader = DataLoader(train_data, batch_size=batch_size, shuffle=True)
43 |
44 | f_xu = LinearStateUpdate(n_x, n_u, d_max).to(device)
45 | g_x = LinearOutput(n_x, n_y).to(device)
46 | model = StateSpaceSimulator(f_xu, g_x).to(device)
47 | estimator = LSTMStateEstimator(n_u=1, n_y=1, n_x=2).to(device)
48 |
49 | # Setup optimizer
50 | optimizer = optim.Adam([
51 | {'params': model.parameters(), 'lr': 1e-3},
52 | {'params': estimator.parameters(), 'lr': 1e-3},
53 | ], lr=lr)
54 |
55 | LOSS = []
56 | LOSS_CONSISTENCY = []
57 | LOSS_FIT = []
58 |
59 | start_time = time.time()
60 |
61 | # Training loop
62 | itr = 0
63 | for epoch in range(epochs):
64 | for batch_idx, (batch_u, batch_y) in enumerate(train_loader):
65 | optimizer.zero_grad()
66 |
67 | # Compute fit loss
68 | batch_u = batch_u.transpose(0, 1).to(device) # transpose to time_first
69 | batch_y = batch_y.transpose(0, 1).to(device) # transpose to time_first
70 |
71 | # Estimate initial state
72 | batch_u_est = batch_u[:seq_est_len]
73 | batch_y_est = batch_y[:seq_est_len]
74 | batch_x0 = estimator(batch_u_est, batch_y_est)
75 |
76 | # Simulate
77 | batch_u_fit = batch_u[seq_est_len:]
78 | batch_y_fit = batch_y[seq_est_len:]
79 | batch_y_sim = model(batch_x0, batch_u_fit)
80 |
81 | # Compute loss
82 | loss = torch.nn.functional.mse_loss(batch_y_fit, batch_y_sim)
83 |
84 | # Statistics
85 | LOSS.append(loss.item())
86 |
87 | # Optimize
88 | loss.backward()
89 | optimizer.step()
90 |
91 | if itr % 10 == 0:
92 | print(f'Iteration {itr} | Train Loss {loss:.4f} ')
93 | itr += 1
94 |
95 | print(f'Epoch {epoch} | Train Loss {loss:.4f} ')
96 |
97 | train_time = time.time() - start_time
98 | print(f"\nTrain time: {train_time:.2f}")
99 |
100 | #%% Save model
101 |
102 | if not os.path.exists("models"):
103 | os.makedirs("models")
104 |
105 | model = model.to("cpu")
106 | estimator = estimator.to("cpu")
107 | model_filename = "ss_lin.pt"
108 | torch.save({"n_x": n_x,
109 | "n_y": n_y,
110 | "n_u": n_u,
111 | "d_max": d_max,
112 | "model": model.to("cpu").state_dict(),
113 | "estimator": estimator.to("cpu").state_dict()
114 | },
115 | os.path.join("models", model_filename))
116 |
117 | #%% Simulate
118 | t_full, u_full, y_full = silverbox_loader("full", scale=True)
119 | with torch.no_grad():
120 | u_v = torch.tensor(u_full[:, None, :])
121 | y_v = torch.tensor(y_full[:, None, :])
122 | x0 = estimator(u_v, y_v)
123 | y_sim = model(x0, u_v).squeeze(1)
124 |
125 | #%% Test
126 | fig, ax = plt.subplots(1, 1)
127 | ax.plot(LOSS, 'k', label='ALL')
128 | ax.grid(True)
129 | ax.legend()
130 | ax.set_ylabel("Loss (-)")
131 | ax.set_xlabel("Iteration (-)")
132 |
133 | fig, ax = plt.subplots(1, 1, sharex=True)
134 | ax.plot(y_full[:, 0], 'k', label='meas')
135 | ax.grid(True)
136 | ax.plot(y_sim[:, 0], 'b', label='sim')
137 |
--------------------------------------------------------------------------------
/examples/statespace/silverbox/silverbox_train_poly.py:
--------------------------------------------------------------------------------
1 | import os
2 | import time
3 | import torch
4 | import torch.optim as optim
5 | from torch.utils.data import DataLoader
6 | from torchid.datasets import SubsequenceDataset
7 | from torchid.ss.dt.models import PolynomialStateUpdate, LinearOutput
8 | from torchid.ss.dt.simulator import StateSpaceSimulator
9 | from torchid.ss.dt.estimators import LSTMStateEstimator
10 | from loader import silverbox_loader
11 | import matplotlib.pyplot as plt
12 |
13 |
14 | if __name__ == '__main__':
15 |
16 | no_cuda = False # no GPU, CPU only training
17 | threads = 6 # max number of CPU threads
18 |
19 | # Parameters
20 | n_fit = 40000
21 | seq_sim_len = 256
22 | seq_est_len = 32 # estimation sequence length
23 | batch_size = 64
24 | lr = 1e-3
25 | epochs = 4
26 | epochs_lin = 2
27 | n_x = 2
28 | n_u = 1
29 | n_y = 1
30 | d_max = 3
31 |
32 | # CPU/GPU resources
33 | use_cuda = not no_cuda and torch.cuda.is_available()
34 | device = torch.device("cuda" if use_cuda else "cpu")
35 | torch.set_num_threads(threads)
36 |
37 | # Load dataset
38 | t_train, u_train, y_train = silverbox_loader("train", scale=True)
39 |
40 | #%% Prepare dataset
41 | load_len = seq_sim_len + seq_est_len
42 | train_data = SubsequenceDataset(u_train, y_train, subseq_len=load_len)
43 | train_loader = DataLoader(train_data, batch_size=batch_size, shuffle=True)
44 |
45 | f_xu = PolynomialStateUpdate(n_x, n_u, d_max).to(device)
46 | g_x = LinearOutput(n_x, n_y).to(device)
47 | f_xu.poly_coeffs = f_xu.poly_coeffs.to(device) # TODO find a best way to do this automatically
48 | model = StateSpaceSimulator(f_xu, g_x).to(device)
49 | estimator = LSTMStateEstimator(n_u=1, n_y=1, n_x=2).to(device)
50 |
51 | # Setup optimizer
52 | optimizer = optim.Adam([
53 | {'params': model.parameters(), 'lr': lr},
54 | {'params': estimator.parameters(), 'lr': lr},
55 | ], lr=lr)
56 |
57 | LOSS = []
58 | start_time = time.time()
59 |
60 | # Training loop
61 | itr = 0
62 | model.f_xu.freeze_nl()
63 | model.f_xu.disable_nl()
64 | for epoch in range(epochs):
65 | if epoch >= epochs_lin:
66 | model.f_xu.enable_nl()
67 | model.f_xu.unfreeze_nl()
68 | # model.f_xu.freeze_lin() # do not further train the linear part
69 | for batch_idx, (batch_u, batch_y) in enumerate(train_loader):
70 | optimizer.zero_grad()
71 |
72 | # Compute fit loss
73 | batch_u = batch_u.transpose(0, 1).to(device) # transpose to time_first
74 | batch_y = batch_y.transpose(0, 1).to(device) # transpose to time_first
75 |
76 | # Estimate initial state
77 | batch_u_est = batch_u[:seq_est_len]
78 | batch_y_est = batch_y[:seq_est_len]
79 | batch_x0 = estimator(batch_u_est, batch_y_est)
80 |
81 | # Simulate
82 | batch_u_fit = batch_u[seq_est_len:]
83 | batch_y_fit = batch_y[seq_est_len:]
84 | batch_y_sim = model(batch_x0, batch_u_fit)
85 |
86 | # Compute loss
87 | loss = torch.nn.functional.mse_loss(batch_y_fit, batch_y_sim)
88 |
89 | # Statistics
90 | LOSS.append(loss.item())
91 |
92 | # Optimize
93 | loss.backward()
94 | optimizer.step()
95 |
96 | if itr % 10 == 0:
97 | print(f'Iteration {itr} | Train Loss {loss:.4f} ')
98 | itr += 1
99 |
100 | print(f'Epoch {epoch} | Train Loss {loss:.4f} ')
101 |
102 | train_time = time.time() - start_time
103 | print(f"\nTrain time: {train_time:.2f}")
104 |
105 | #%% Save model
106 | if not os.path.exists("models"):
107 | os.makedirs("models")
108 |
109 | model = model.to("cpu")
110 | model.f_xu.poly_coeffs = f_xu.poly_coeffs.to("cpu")
111 | estimator = estimator.to("cpu")
112 | model_filename = "ss_poly.pt"
113 | torch.save({"n_x": n_x,
114 | "n_y": n_y,
115 | "n_u": n_u,
116 | "d_max": d_max,
117 | "model": model.state_dict(),
118 | "estimator": estimator.state_dict()
119 | },
120 | os.path.join("models", model_filename))
121 |
122 | #%% Simulate
123 | t_full, u_full, y_full = silverbox_loader("full", scale=True)
124 | with torch.no_grad():
125 | u_v = torch.tensor(u_full[:, None, :])
126 | y_v = torch.tensor(y_full[:, None, :])
127 | x0 = estimator(u_v, y_v)
128 | y_sim = model(x0, u_v).squeeze(1)
129 |
130 | #%% Test
131 | fig, ax = plt.subplots(1, 1)
132 | ax.plot(LOSS, 'k', label='Training loss')
133 | ax.grid(True)
134 | ax.legend()
135 | ax.set_ylabel("Loss")
136 | ax.set_xlabel("Iteration (-)")
137 |
138 | fig, ax = plt.subplots(1, 1, sharex=True)
139 | ax.plot(y_full[:, 0], 'k', label='meas')
140 | ax.grid(True)
141 | ax.plot(y_sim[:, 0], 'b', label='sim')
142 |
--------------------------------------------------------------------------------
/examples/util/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/forgi86/pytorch-ident/f00d79ff8c6cb96d094d9ea1c079d4dcbf9a67e0/examples/util/__init__.py
--------------------------------------------------------------------------------
/examples/util/benchmark_url.py:
--------------------------------------------------------------------------------
1 | WH2009 = 'https://drive.google.com/file/d/16ipySVfKfxkwqWmbO9Z19-VjDoC2S6hx/view?usp=sharing'
2 | CTS = 'https://data.4tu.nl/ndownloader/articles/12960104/versions/1'
--------------------------------------------------------------------------------
/examples/util/metrics.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | #np.array
3 |
4 |
5 | def r_squared(y_true, y_pred, time_axis=0):
6 | """ Computes the R-square index.
7 |
8 | The R-squared index is computed separately on each channel.
9 |
10 | Parameters
11 | ----------
12 | y_true : np.array
13 | Array of true values. If must be at least 2D.
14 | y_pred : np.array
15 | Array of predicted values. If must be compatible with y_true'
16 | time_axis : int
17 | Time axis. All other axes define separate channels.
18 |
19 | Returns
20 | -------
21 | r_squared_val : np.array
22 | Array of r_squared value.
23 | """
24 |
25 | SSE = np.sum((y_pred - y_true)**2, axis=time_axis)
26 | y_mean = np.mean(y_true, axis=time_axis, keepdims=True)
27 | SST = np.sum((y_true - y_mean)**2, axis=time_axis)
28 |
29 | return 1.0 - SSE/SST
30 |
31 |
32 | def error_rmse(y_true, y_pred, time_axis=0):
33 | """ Computes the Root Mean Square Error (RMSE).
34 |
35 | The RMSE index is computed separately on each channel.
36 |
37 | Parameters
38 | ----------
39 | y_true : np.array
40 | Array of true values. If must be at least 2D.
41 | y_pred : np.array
42 | Array of predicted values. If must be compatible with y_true'
43 | time_axis : int
44 | Time axis. All other axes define separate channels.
45 |
46 | Returns
47 | -------
48 | RMSE : np.array
49 | Array of r_squared value.
50 |
51 | """
52 |
53 | SSE = np.mean((y_pred - y_true)**2, axis=time_axis)
54 | RMSE = np.sqrt(SSE)
55 | return RMSE
56 |
57 |
58 | def error_mean(y_true, y_pred, time_axis=0):
59 | """ Computes the error mean value.
60 |
61 | The RMSE index is computed separately on each channel.
62 |
63 | Parameters
64 | ----------
65 | y_true : np.array
66 | Array of true values. If must be at least 2D.
67 | y_pred : np.array
68 | Array of predicted values. If must be compatible with y_true'
69 | time_axis : int
70 | Time axis. All other axes define separate channels.
71 |
72 | Returns
73 | -------
74 | e_mean : np.array
75 | Array of error means.
76 | """
77 |
78 | e_mean = np.mean(y_true - y_pred, axis=time_axis)
79 | return e_mean
80 |
81 |
82 | def error_mae(y_true, y_pred, time_axis=0):
83 | """ Computes the error Mean Absolute Value (MAE)
84 |
85 | The RMSE index is computed separately on each channel.
86 |
87 | Parameters
88 | ----------
89 | y_true : np.array
90 | Array of true values. If must be at least 2D.
91 | y_pred : np.array
92 | Array of predicted values. If must be compatible with y_true'
93 | time_axis : int
94 | Time axis. All other axes define separate channels.
95 |
96 | Returns
97 | -------
98 | e_mean : np.array
99 | Array of error mean absolute values.
100 | """
101 |
102 | e_mean = np.mean(np.abs(y_true - y_pred), axis=time_axis)
103 | return e_mean
104 |
105 | def fit_index(y_true, y_pred, time_axis=0):
106 | """ Computes the per-channel fit index.
107 |
108 | The fit index is commonly used in System Identification. See the definitionin the System Identification Toolbox
109 | or in the paper 'Nonlinear System Identification: A User-Oriented Road Map',
110 | https://arxiv.org/abs/1902.00683, page 31.
111 | The fit index is computed separately on each channel.
112 |
113 | Parameters
114 | ----------
115 | y_true : np.array
116 | Array of true values. If must be at least 2D.
117 | y_pred : np.array
118 | Array of predicted values. If must be compatible with y_true'
119 | time_axis : int
120 | Time axis. All other axes define separate channels.
121 |
122 | Returns
123 | -------
124 | fit_val : np.array
125 | Array of r_squared value.
126 |
127 | """
128 |
129 | err_norm = np.linalg.norm(y_true - y_pred, axis=time_axis, ord=2) # || y - y_pred ||
130 | y_mean = np.mean(y_true, axis=time_axis)
131 | err_mean_norm = np.linalg.norm(y_true - y_mean, ord=2) # || y - y_mean ||
132 | fit_val = 100*(1 - err_norm/err_mean_norm)
133 |
134 | return fit_val
135 |
136 |
137 | if __name__ == '__main__':
138 | N = 20
139 | ny = 2
140 | SNR = 10
141 | y_true = SNR*np.random.randn(N, 2)
142 | y_pred = np.copy(y_true) + np.random.randn(N, 2)
143 | err_rmse_val = error_rmse(y_pred, y_true)
144 | r_squared_val = r_squared(y_true, y_pred)
145 | fit_val = fit_index(y_true, y_pred)
146 |
147 | print(f"RMSE: {err_rmse_val}")
148 | print(f"R-squared: {r_squared_val}")
149 | print(f"fit index: {fit_val}")
150 |
--------------------------------------------------------------------------------
/setup.py:
--------------------------------------------------------------------------------
1 | from setuptools import setup, find_packages
2 |
3 | setup(
4 | name='pytorch-ident',
5 | version='0.2.5',
6 | url='https://github.com/forgi86/pytorch-ident.git',
7 | author='Marco Forgione',
8 | author_email='marco.forgione1986@gmail.com',
9 | description='A library for system identification with PyTorch',
10 | packages=find_packages(),
11 | install_requires=['numpy', 'scipy', 'matplotlib', 'torch'], # to be checked
12 | extras_require={
13 | 'download datasets': ["requests", "googledrivedownloader"],
14 | 'open datasets': ["pandas"],
15 | 'generate plots': ['matplotlib'],
16 | 'generate documentation': ["sphinx"]
17 | }
18 | )
19 |
--------------------------------------------------------------------------------
/torchid/__init__.py:
--------------------------------------------------------------------------------
1 | from . import dynonet
2 | from . import io
3 | from . import ss
4 |
--------------------------------------------------------------------------------
/torchid/datasets.py:
--------------------------------------------------------------------------------
1 | import torch
2 | from torch.utils.data import Dataset, TensorDataset
3 |
4 |
5 | class SubsequenceDataset(Dataset):
6 | r"""A dataset returning sub-sequences extracted from longer sequences.
7 |
8 | Args:
9 | *tensors (Tensor): tensors that have the same size of the first dimension.
10 |
11 | Examples:
12 |
13 | >>> u = torch.randn(1000, 2) # 2 inputs
14 | >>> y = torch.randn(1000, 3) # 3 outputs
15 | >>> train_dataset = SubsequenceDataset(u, y, subseq_len=100)
16 | """
17 |
18 | def __init__(self, *tensors, subseq_len):
19 | self.tensors = tensors
20 | self.subseq_len = subseq_len
21 | self.length = self.tensors[0].shape[0]
22 |
23 | def __len__(self):
24 | return self.length - self.subseq_len + 1
25 |
26 | def __getitem__(self, idx):
27 | subsequences = [tensor[idx:idx+self.subseq_len] for tensor in self.tensors]
28 | return subsequences
29 |
--------------------------------------------------------------------------------
/torchid/dynonet/__init__.py:
--------------------------------------------------------------------------------
1 | from . import module
--------------------------------------------------------------------------------
/torchid/dynonet/filtering.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | import scipy as sp
3 | import scipy.signal
4 | from multiprocessing.pool import ThreadPool as Pool
5 |
6 |
7 | def lfiltic_vec(b, a, y, x=None):
8 | """
9 | Construct initial conditions for lfilter given input and output vectors.
10 |
11 | Given a linear filter (b, a) and initial conditions on the output `y`
12 | and the input `x`, return the initial conditions on the state vector zi
13 | which is used by `lfilter` to generate the output given the input.
14 |
15 | Parameters
16 | ----------
17 | b : array_like
18 | Linear filter term.
19 | a : array_like
20 | Linear filter term.
21 | y : array_like
22 | Initial conditions.
23 |
24 | If ``N = len(a) - 1``, then ``y = {y[-1], y[-2], ..., y[-N]}``.
25 |
26 | If `y` is too short, it is padded with zeros.
27 | x : array_like, optional
28 | Initial conditions.
29 |
30 | If ``M = len(b) - 1``, then ``x = {x[-1], x[-2], ..., x[-M]}``.
31 |
32 | If `x` is not given, its initial conditions are assumed zero.
33 |
34 | If `x` is too short, it is padded with zeros.
35 |
36 | Returns
37 | -------
38 | zi : ndarray
39 | The state vector ``zi = {z_0[-1], z_1[-1], ..., z_K-1[-1]}``,
40 | where ``K = max(M, N)``.
41 |
42 | See Also
43 | --------
44 | lfilter, lfilter_zi
45 |
46 | """
47 | N = np.size(a) - 1
48 | M = np.size(b) - 1
49 | K = max(M, N)
50 | y = np.asarray(y)
51 | batch_size = y.shape[0]
52 |
53 | if y.dtype.kind in 'bui':
54 | # ensure calculations are floating point
55 | y = y.astype(np.float64)
56 | zi = np.zeros((batch_size, K), y.dtype)
57 | if x is None:
58 | x = np.zeros((batch_size, M), y.dtype)
59 | else:
60 | x = np.asarray(x)
61 | L = np.shape(x)[1]
62 | if L < M:
63 | x = np.r_[x, np.zeros((batch_size, M - L))]
64 | L = np.shape(y)[1]
65 | if L < N:
66 | y = np.r_[y, np.zeros((batch_size, N - L))]
67 |
68 | for m in range(M):
69 | zi[:, m] = np.sum(b[m + 1:] * x[:, :M - m], axis=1)
70 |
71 | for m in range(N):
72 | zi[:, m] -= np.sum(a[m + 1:] * y[:, :N - m], axis=1)
73 |
74 | return zi
75 |
76 |
77 | def lfilter_mimo_channels_first(b, a, u_in):
78 | batch_size, in_ch, seq_len = u_in.shape
79 | out_ch, _, _ = a.shape
80 | y_out = np.zeros_like(u_in, shape=(batch_size, out_ch, seq_len))
81 | for out_idx in range(out_ch):
82 | for in_idx in range(in_ch):
83 | y_out[:, out_idx, :] += scipy.signal.lfilter(b[out_idx, in_idx, :], a[out_idx, in_idx, :],
84 | u_in[:, in_idx, :], axis=-1)
85 | return y_out
86 |
87 |
88 | def lfilter_mimo_components_channels_first(b, a, u_in):
89 | batch_size, in_ch, seq_len = u_in.shape
90 | out_ch, _, _ = a.shape
91 | y_comp_out = np.zeros_like(u_in, shape=(batch_size, out_ch, in_ch, seq_len))
92 | for out_idx in range(out_ch):
93 | for in_idx in range(in_ch):
94 | y_comp_out[:, out_idx, in_idx, :] = scipy.signal.lfilter(b[out_idx, in_idx, :], a[out_idx, in_idx, :], u_in[:, in_idx, :], axis=-1)
95 | return y_comp_out # [B, O, I, T]
96 |
97 |
98 | def lfilter_mimo(b, a, u_in):
99 | batch_size, seq_len, in_ch = u_in.shape
100 | out_ch, _, _ = a.shape
101 | y_out = np.zeros_like(u_in, shape=(batch_size, seq_len, out_ch))
102 | for out_idx in range(out_ch):
103 | for in_idx in range(in_ch):
104 | y_out[:, :, out_idx] += scipy.signal.lfilter(b[out_idx, in_idx, :], a[out_idx, in_idx, :],
105 | u_in[:, :, in_idx], axis=-1)
106 | return y_out # [B, T, O]
107 |
108 |
109 | def lfilter_mimo_components(b, a, u_in):
110 | batch_size, seq_len, in_ch = u_in.shape
111 | out_ch, _, _ = a.shape
112 | y_comp_out = np.zeros_like(u_in, shape=(batch_size, seq_len, out_ch, in_ch))
113 | for out_idx in range(out_ch):
114 | for in_idx in range(in_ch):
115 | y_comp_out[:, :, out_idx, in_idx] = scipy.signal.lfilter(b[out_idx, in_idx, :], a[out_idx, in_idx, :], u_in[:, :, in_idx], axis=-1)
116 | return y_comp_out # [B, T, O, I]
117 |
118 |
119 | if __name__ == '__main__':
120 |
121 | batch_size = 5
122 | n_b = 2
123 | n_f = 2
124 | N = 100
125 | u_in = np.random.rand(batch_size, N, 1).astype(np.float)
126 |
127 | y_init = np.random.rand(*(batch_size, n_f)).astype(np.float)
128 | u_init = np.random.rand(*(batch_size, n_b)).astype(np.float)
129 |
130 | # coefficients of a 2nd order oscillator
131 | b_coeff = np.array([0.0706464146944544, 0], dtype=np.float)
132 | f_coeff = np.array([-1.87212998940304, 0.942776404097492], dtype=np.float)
133 |
134 | f_poly = np.r_[1.0, f_coeff]
135 | b_poly = np.r_[0.0, b_coeff]
136 |
137 | zi = lfiltic_vec(b_poly, f_poly, y_init, u_init) # initial condition
138 |
139 | zi_loop = np.empty_like(zi)
140 | for batch_idx in range(batch_size):
141 | zi_loop[batch_idx, :] = sp.signal.lfiltic(b_poly, f_poly, y_init[batch_idx, :], u_init[batch_idx, :]) # initial condition
142 |
143 |
144 | # y_out = lfilter_mimo_components_channels_last
145 | # yout_vec, _ = sp.signal.lfilter(b_poly, f_poly, u_in, axis=0, zi=zi.T)
146 |
147 | # yout_loop = np.empty_like(yout_vec)
148 | # for batch_idx in range(batch_size):
149 | # yout_loop[:, batch_idx] = sp.signal.lfilter(b_poly, f_poly, u_in[:, batch_idx], axis=0, zi=zi[batch_idx, :])[0]
150 |
151 |
--------------------------------------------------------------------------------
/torchid/dynonet/functional/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/forgi86/pytorch-ident/f00d79ff8c6cb96d094d9ea1c079d4dcbf9a67e0/torchid/dynonet/functional/__init__.py
--------------------------------------------------------------------------------
/torchid/dynonet/module/__init__.py:
--------------------------------------------------------------------------------
1 | from . import lti
2 | from . import static
3 |
4 |
--------------------------------------------------------------------------------
/torchid/dynonet/module/static.py:
--------------------------------------------------------------------------------
1 | import torch
2 | import torch.nn as nn
3 |
4 |
5 | class MimoStaticNonLinearity(nn.Module):
6 | r"""Applies a Static MIMO non-linearity.
7 | The non-linearity is implemented as a feed-forward neural network.
8 |
9 | Args:
10 | in_channels (int): Number of input channels
11 | out_channels (int): Number of output channels
12 | n_hidden (int, optional): Number of nodes in the hidden layer. Default: 20
13 | activation (str): Activation function. Either 'tanh', 'relu', or 'sigmoid'. Default: 'tanh'
14 |
15 | Shape:
16 | - Input: (..., in_channels)
17 | - Output: (..., out_channels)
18 |
19 | Examples::
20 |
21 | >>> in_channels, out_channels = 2, 4
22 | >>> F = MimoStaticNonLinearity(in_channels, out_channels)
23 | >>> batch_size, seq_len = 32, 100
24 | >>> u_in = torch.ones((batch_size, seq_len, in_channels))
25 | >>> y_out = F(u_in, y_0, u_0) # shape: (batch_size, seq_len, out_channels)
26 | """
27 |
28 | def __init__(self, in_channels, out_channels, n_hidden=20, activation='tanh'):
29 | super(MimoStaticNonLinearity, self).__init__()
30 |
31 | activation_dict = {'tanh': nn.Tanh, 'relu': nn.ReLU, 'sigmoid': nn.Sigmoid}
32 |
33 | self.net = nn.Sequential(
34 | nn.Linear(in_channels, n_hidden),
35 | activation_dict[activation](), #nn.Tanh(),
36 | nn.Linear(n_hidden, out_channels)
37 | )
38 |
39 | def forward(self, u_lin):
40 | y_nl = self.net(u_lin)
41 | return y_nl
42 |
43 |
44 | class SisoStaticNonLinearity(MimoStaticNonLinearity):
45 | r"""Applies a Static SISO non-linearity.
46 | The non-linearity is implemented as a feed-forward neural network.
47 |
48 | Args:
49 | n_hidden (int, optional): Number of nodes in the hidden layer. Default: 20
50 | activation (str): Activation function. Either 'tanh', 'relu', or 'sigmoid'. Default: 'tanh'
51 | s
52 | Shape:
53 | - Input: (..., in_channels)
54 | - Output: (..., out_channels)
55 |
56 | Examples::
57 |
58 | >>> F = SisoStaticNonLinearity(n_hidden=20)
59 | >>> batch_size, seq_len = 32, 100
60 | >>> u_in = torch.ones((batch_size, seq_len, in_channels))
61 | >>> y_out = F(u_in, y_0, u_0) # shape: (batch_size, seq_len, out_channels)
62 | """
63 | def __init__(self, n_hidden=20, activation='tanh'):
64 | super(SisoStaticNonLinearity, self).__init__(in_channels=1, out_channels=1, n_hidden=n_hidden, activation=activation)
65 |
66 |
67 | class MimoChannelWiseNonLinearity(nn.Module):
68 | r"""Applies a Channel-wise non-linearity.
69 | The non-linearity is implemented as a set of feed-forward neural networks (each one operating on a different channel).
70 |
71 | Args:
72 | channels (int): Number of both input and output channels
73 | n_hidden (int, optional): Number of nodes in the hidden layer of each network. Default: 10
74 |
75 | Shape:
76 | - Input: (..., channels)
77 | - Output: (..., channels)
78 |
79 | Examples::
80 |
81 | >>> channels = 4
82 | >>> F = MimoChannelWiseNonLinearity(channels)
83 | >>> batch_size, seq_len = 32, 100
84 | >>> u_in = torch.ones((batch_size, seq_len, channels))
85 | >>> y_out = F(u_in, y_0, u_0) # shape: (batch_size, seq_len, channels)
86 |
87 | """
88 |
89 | def __init__(self, channels, n_hidden=10):
90 | super(MimoChannelWiseNonLinearity, self).__init__()
91 |
92 | self.net = nn.ModuleList()
93 | for channel_idx in range(channels):
94 | channel_net = nn.Sequential(
95 | nn.Linear(1, n_hidden), # 2 states, 1 input
96 | nn.ReLU(),
97 | nn.Linear(n_hidden, 1)
98 | )
99 | self.net.append(channel_net)
100 |
101 | def forward(self, u_lin):
102 |
103 | y_nl = []
104 | for channel_idx, u_channel in enumerate(u_lin.split(1, dim=-1)): # split over the last dimension (input channel)
105 | y_nl_channel = self.net[channel_idx](u_channel) # Process blocks individually
106 | y_nl.append(y_nl_channel)
107 |
108 | y_nl = torch.cat(y_nl, -1) # concatenate all output channels
109 | return y_nl
110 |
111 |
112 | if __name__ == '__main__':
113 |
114 | channels = 4
115 | nn1 = MimoChannelWiseNonLinearity(channels)
116 | in_data = torch.randn(100, 10, channels)
117 | xx = net_out = nn1(in_data)
--------------------------------------------------------------------------------
/torchid/io/__init__.py:
--------------------------------------------------------------------------------
1 | from . import module
2 |
--------------------------------------------------------------------------------
/torchid/io/module/__init__.py:
--------------------------------------------------------------------------------
1 | from . import io_simulator
2 | from . import iomodels
3 |
--------------------------------------------------------------------------------
/torchid/io/module/io_simulator.py:
--------------------------------------------------------------------------------
1 | import torch
2 | import torch.nn as nn
3 | import numpy as np
4 |
5 |
6 | class NeuralIOSimulator:
7 | """ This class implements prediction/simulation methods for the io model structure
8 |
9 | Attributes
10 | ----------
11 | io_model: nn.Module
12 | The neural io model to be fitted
13 | """
14 |
15 | def __init__(self, io_model):
16 | self.io_model = io_model
17 |
18 | def f_onestep(self, PHI):
19 | """ Naive one-step prediction
20 |
21 | Parameters
22 | ----------
23 | PHI : Tensor. Size: (N, n_a + n_b)
24 | Measured io regressor tensor
25 |
26 | Returns
27 | -------
28 | Tensor. Size: (N, n_y)
29 | One-step prediction of the output
30 |
31 | """
32 |
33 | Y_pred = self.io_model(PHI)
34 | return Y_pred
35 |
36 | def f_sim(self, y_seq, u_seq, U):
37 | """ Open-loop simulation
38 |
39 | Parameters
40 | ----------
41 | y_seq: Tensor. Size: (n_a)
42 | Initial regressor with past values of y
43 |
44 | u_seq: Tensor. Size: (n_b)
45 | Initial regressor with past values of u
46 |
47 | U : Tensor. Size: (N, n_u)
48 | Input sequence tensor
49 |
50 | Returns
51 | -------
52 | Tensor. Size: (N, n_y)
53 | Open-loop simulation of the output
54 |
55 | """
56 | N = np.shape(U)[0]
57 | Y_list = []
58 |
59 | for i in range(N):
60 | phi = torch.cat((y_seq, u_seq))
61 | yi = self.io_model(phi)
62 | Y_list += [yi]
63 |
64 | if i < N-1:
65 | # y shift
66 | y_seq[1:] = y_seq[0:-1]
67 | y_seq[0] = yi
68 |
69 | # u shift
70 | u_seq[1:] = u_seq[0:-1]
71 | u_seq[0] = U[i]
72 |
73 | Y = torch.stack(Y_list, 0)
74 | return Y
75 |
76 | def f_sim_multistep(self, batch_u, batch_y_seq, batch_u_seq):
77 | """ Multi-step simulation over (mini)batches
78 |
79 | Parameters
80 | ----------
81 | batch_u: Tensor. Size: (q, m, n_u)
82 | Input sequence for each subsequence in the minibatch
83 |
84 | batch_y_seq: Tensor. Size: (q, n_a)
85 | Initial regressor with past values of y for each subsequence in the minibatch
86 |
87 | batch_u_seq: Tensor. Size: (q, n_b)
88 | Initial regressor with past values of u for each subsequence in the minibatch
89 |
90 | Returns
91 | -------
92 | Tensor. Size: (q, m, n_y)
93 | Simulated output for all subsequences in the minibatch
94 |
95 | """
96 |
97 | batch_size = batch_u.shape[0] # number of training samples in the batch
98 | seq_len = batch_u.shape[1] # length of the training sequences
99 | n_a = batch_y_seq.shape[1] # number of autoregressive terms on y
100 | n_b = batch_u_seq.shape[1] # number of autoregressive terms on u
101 |
102 | Y_sim_list = []
103 | for i in range(seq_len):
104 | phi = torch.cat((batch_y_seq, batch_u_seq), -1)
105 | yi = self.io_model(phi)
106 | Y_sim_list += [yi]
107 |
108 | # y shift
109 | batch_y_seq[:, 1:] = batch_y_seq[:, 0:-1]
110 | batch_y_seq[:, [0]] = yi[:]
111 |
112 | # u shift
113 | batch_u_seq[:, 1:] = batch_u_seq[:, 0:-1]
114 | batch_u_seq[:, [0]] = batch_u[:, i]
115 |
116 | Y_sim = torch.stack(Y_sim_list, 1)
117 | return Y_sim
118 |
--------------------------------------------------------------------------------
/torchid/io/module/iomodels.py:
--------------------------------------------------------------------------------
1 | import torch
2 | import torch.nn as nn
3 | import numpy as np
4 |
5 |
6 | class NeuralIOModel(nn.Module):
7 | """ This class implements an io neural model
8 |
9 | Attributes
10 | ----------
11 | n_a : int.
12 | number of autoregressive lags in y
13 | n_b : int.
14 | number of autoregressive lags in u
15 | n_feat : int.
16 | number of units in the hidden layer
17 | """
18 | def __init__(self, n_a, n_b, n_feat=64, small_init=True):
19 | super(NeuralIOModel, self).__init__()
20 | self.n_a = n_a
21 | self.n_b = n_b
22 | self.n_feat = n_feat
23 |
24 | const_np = np.zeros((n_a + n_b, 1), dtype=np.float32)
25 | const_np[0, 0] = 1.0
26 | self.const = torch.tensor(const_np)
27 |
28 | self.net = nn.Sequential(
29 | nn.Linear(n_a + n_b, n_feat), # 2 states, 1 input
30 | nn.ReLU(),
31 | nn.Linear(n_feat, 1),
32 | )
33 |
34 | if small_init:
35 | for m in self.net.modules():
36 | if isinstance(m, nn.Linear):
37 | nn.init.normal_(m.weight, mean=0, std=1e-4)
38 | nn.init.constant_(m.bias, val=0)
39 |
40 | def forward(self, phi):
41 | Y = self.net(phi) + torch.matmul(phi, self.const)
42 | return Y
43 |
44 |
45 | class NeuralIOModelComplex(nn.Module):
46 | def __init__(self, n_a, n_b, n_feat=64, small_init=True):
47 | super(NeuralIOModelComplex, self).__init__()
48 | self.n_a = n_a
49 | self.n_b = n_b
50 | self.n_feat = 64
51 |
52 | const_np = np.zeros((n_a + n_b, 1), dtype=np.float32)
53 | const_np[0, 0] = 1.0
54 | self.const = torch.tensor(const_np)
55 |
56 | self.net = nn.Sequential(
57 | nn.Linear(n_a + n_b, n_feat), # 2 states, 1 input
58 | nn.ELU(),
59 | nn.Linear(n_feat, n_feat),
60 | nn.ELU(),
61 | nn.Linear(n_feat, 1)
62 | )
63 |
64 | if small_init:
65 | for m in self.net.modules():
66 | if isinstance(m, nn.Linear):
67 | nn.init.normal_(m.weight, mean=0, std=1e-3)
68 | nn.init.constant_(m.bias, val=0)
69 |
70 | def forward(self, phi):
71 | Y = self.net(phi) + torch.matmul(phi, self.const)
72 | return Y
73 |
--------------------------------------------------------------------------------
/torchid/metrics.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 |
3 |
4 | def r_squared(y_true, y_pred, time_axis=0):
5 | """ Computes the R-square index.
6 |
7 | The R-squared index is computed separately on each channel.
8 |
9 | Parameters
10 | ----------
11 | y_true : np.array
12 | Array of true values. If must be at least 2D.
13 | y_pred : np.array
14 | Array of predicted values. If must be compatible with y_true'
15 | time_axis : int
16 | Time axis. All other axes define separate channels.
17 |
18 | Returns
19 | -------
20 | r_squared_val : np.array
21 | Array of r_squared value.
22 | """
23 |
24 | SSE = np.sum((y_pred - y_true)**2, axis=time_axis)
25 | y_mean = np.mean(y_true, axis=time_axis, keepdims=True)
26 | SST = np.sum((y_true - y_mean)**2, axis=time_axis)
27 |
28 | return 1.0 - SSE/SST
29 |
30 |
31 | def rmse(y_true, y_pred, time_axis=0):
32 | """ Computes the Root Mean Square Error (RMSE).
33 |
34 | The RMSE index is computed separately on each channel.
35 |
36 | Parameters
37 | ----------
38 | y_true : np.array
39 | Array of true values. If must be at least 2D.
40 | y_pred : np.array
41 | Array of predicted values. If must be compatible with y_true'
42 | time_axis : int
43 | Time axis. All other axes define separate channels.
44 |
45 | Returns
46 | -------
47 | RMSE : np.array
48 | Array of r_squared value.
49 |
50 | """
51 |
52 | SSE = np.mean((y_pred - y_true)**2, axis=time_axis)
53 | RMSE = np.sqrt(SSE)
54 | return RMSE
55 |
56 |
57 | def nrmse(y_true, y_pred, time_axis=0):
58 | """ Computes the Normalized Root Mean Square Error (NRMSE).
59 |
60 | The NRMSE index is computed separately on each channel.
61 |
62 | Parameters
63 | ----------
64 | y_true : np.array
65 | Array of true values. If must be at least 2D.
66 | y_pred : np.array
67 | Array of predicted values. If must be compatible with y_true'
68 | time_axis : int
69 | Time axis. All other axes define separate channels.
70 |
71 | Returns
72 | -------
73 | NRMSE : np.array
74 | Array of r_squared value.
75 |
76 | """
77 |
78 | SSE = np.mean((y_pred - y_true)**2, axis=time_axis)
79 | RMSE = np.sqrt(SSE)
80 | NRMSE = RMSE/np.std(y_true, axis=time_axis)
81 | return NRMSE
82 |
83 |
84 | def error_mean(y_true, y_pred, time_axis=0):
85 | """ Computes the error mean value.
86 |
87 | The error mean is computed separately on each channel.
88 |
89 | Parameters
90 | ----------
91 | y_true : np.array
92 | Array of true values. If must be at least 2D.
93 | y_pred : np.array
94 | Array of predicted values. If must be compatible with y_true'
95 | time_axis : int
96 | Time axis. All other axes define separate channels.
97 |
98 | Returns
99 | -------
100 | e_mean : np.array
101 | Array of error means.
102 | """
103 |
104 | e_mean = np.mean(y_true - y_pred, axis=time_axis)
105 | return e_mean
106 |
107 |
108 | def mae(y_true, y_pred, time_axis=0):
109 | """ Computes the error Mean Absolute Value (MAE)
110 |
111 | The MAE index is computed separately on each channel.
112 |
113 | Parameters
114 | ----------
115 | y_true : np.array
116 | Array of true values. If must be at least 2D.
117 | y_pred : np.array
118 | Array of predicted values. If must be compatible with y_true'
119 | time_axis : int
120 | Time axis. All other axes define separate channels.
121 |
122 | Returns
123 | -------
124 | e_mae : np.array
125 | Array of error mean absolute values.
126 | """
127 |
128 | e_mae = np.mean(np.abs(y_true - y_pred), axis=time_axis)
129 | return e_mae
130 |
131 |
132 | def fit_index(y_true, y_pred, time_axis=0):
133 | """ Computes the per-channel fit index.
134 |
135 | The fit index is commonly used in System Identification. See the definition in the System Identification Toolbox
136 | or in the paper 'Nonlinear System Identification: A User-Oriented Road Map',
137 | https://arxiv.org/abs/1902.00683, page 31.
138 | The fit index is computed separately on each channel.
139 |
140 | Parameters
141 | ----------
142 | y_true : np.array
143 | Array of true values. If must be at least 2D.
144 | y_pred : np.array
145 | Array of predicted values. If must be compatible with y_true'
146 | time_axis : int
147 | Time axis. All other axes define separate channels.
148 |
149 | Returns
150 | -------
151 | fit : np.array
152 | Array of fit index.
153 |
154 | """
155 |
156 | err_norm = np.linalg.norm(y_true - y_pred, axis=time_axis, ord=2) # || y - y_pred ||
157 | y_mean = np.mean(y_true, axis=time_axis, keepdims=True)
158 | err_mean_norm = np.linalg.norm(y_true - y_mean, axis=time_axis, ord=2) # || y - y_mean ||
159 | fit = 100*(1 - err_norm/err_mean_norm)
160 |
161 | return fit
162 |
163 |
164 | if __name__ == '__main__':
165 | N = 20
166 | ny = 2
167 | SNR = 10
168 | y_true = SNR*np.random.randn(N, 2)
169 | y_pred = np.copy(y_true) + np.random.randn(N, 2)
170 | err_rmse_val = rmse(y_pred, y_true)
171 | r_squared_val = r_squared(y_true, y_pred)
172 | fit_val = fit_index(y_true, y_pred)
173 |
174 | print(f"RMSE: {err_rmse_val}")
175 | print(f"R-squared: {r_squared_val}")
176 | print(f"fit index: {fit_val}")
177 |
--------------------------------------------------------------------------------
/torchid/ss/__init__.py:
--------------------------------------------------------------------------------
1 | from . import ct
2 | from . import dt
3 |
--------------------------------------------------------------------------------
/torchid/ss/ct/__init__.py:
--------------------------------------------------------------------------------
1 | from . import models
2 | from . import simulators
3 |
--------------------------------------------------------------------------------
/torchid/ss/ct/simulators.py:
--------------------------------------------------------------------------------
1 | import torch
2 | import torch.nn as nn
3 | import numpy as np
4 | from typing import List
5 |
6 |
7 | class ForwardEulerSimulator(nn.Module):
8 |
9 | r""" Forward Euler integration of a continuous-time neural state space model.
10 |
11 | Args:
12 | ss_model (nn.Module): The neural state-space model.
13 | ts (np.float): Sampling time for simulation.
14 | batch_first (bool): If True, first dimension is batch.
15 |
16 | Inputs: x_0, input
17 | * **x_0**: tensor of shape :math:`(N, n_{x})` containing the
18 | initial hidden state for each element in the batch.
19 | Defaults to zeros if (h_0, c_0) is not provided.
20 | * **input**: tensor of shape :math:`(L, N, n_{u})` when ``batch_first=False`` or
21 | :math:`(N, L, n_{x})` when ``batch_first=True`` containing the input sequence
22 |
23 | Outputs: states
24 | * **states**: tensor of shape :math:`(L, N, n_{x})` corresponding to
25 | the simulated state sequence.
26 |
27 | Examples::
28 |
29 | >>> ss_model = NeuralStateSpaceModel(n_x=3, n_u=2)
30 | >>> nn_solution = ForwardEulerSimulator(ss_model)
31 | >>> x0 = torch.randn(64, 3)
32 | >>> u = torch.randn(100, 64, 2)
33 | >>> x = nn_solution(x0, u)
34 | >>> print(x.size())
35 | torch.Size([100, 64, 3])
36 | """
37 |
38 | def __init__(self, ss_model, ts=1.0, batch_first=False):
39 | super(ForwardEulerSimulator, self).__init__()
40 | self.ss_model = ss_model
41 | self.ts = ts
42 | self.batch_first = batch_first
43 |
44 | def forward(self, x_0: torch.Tensor, input: torch.Tensor) -> torch.Tensor:
45 |
46 | states: List[torch.Tensor] = []
47 | x_step = x_0
48 | dim_time = 1 if self.batch_first else 0
49 |
50 | for u_step in input.split(1, dim=dim_time): # split along the time axis
51 | u_step = u_step.squeeze(dim_time)
52 | states += [x_step]
53 | dx = self.ss_model(x_step, u_step)
54 | x_step = x_step + self.ts*dx
55 |
56 | states = torch.stack(states, dim_time)
57 | return states
58 |
59 |
60 | class RK4Simulator(nn.Module):
61 | """ This class implements prediction/simulation methods for a continuous SS model structure
62 |
63 | Attributes
64 | ----------
65 | ss_model: nn.Module
66 | The neural SS model to be fitted
67 | ts: float
68 | model sampling time (when it is fixed)
69 |
70 | scheme: string
71 | Runge-Kutta scheme to be used
72 | """
73 |
74 | def __init__(self, ss_model, ts=1.0):
75 | super(RK4Simulator, self).__init__()
76 | self.ss_model = ss_model
77 | self.ts = ts
78 |
79 | def forward(self, x0_batch, u_batch):
80 | """ Multi-step simulation over (mini)batches
81 |
82 | Parameters
83 | ----------
84 | x0_batch: Tensor. Size: (q, n_x)
85 | Initial state for each subsequence in the minibatch
86 |
87 | u_batch: Tensor. Size: (m, q, n_u)
88 | Input sequence for each subsequence in the minibatch
89 |
90 | Returns
91 | -------
92 | Tensor. Size: (m, q, n_x)
93 | Simulated state for all subsequences in the minibatch
94 |
95 | """
96 |
97 | X_sim_list = []
98 | x_step = x0_batch
99 | for u_step in u_batch.split(1):
100 |
101 | u_step = u_step.squeeze(0)
102 | X_sim_list += [x_step]
103 |
104 | dt2 = self.ts / 2.0
105 | k1 = self.ss_model(x_step, u_step)
106 | k2 = self.ss_model(x_step + dt2 * k1, u_step)
107 | k3 = self.ss_model(x_step + dt2 * k2, u_step)
108 | k4 = self.ss_model(x_step + self.ts * k3, u_step)
109 | dx = self.ts / 6.0 * (k1 + 2.0 * k2 + 2.0 * k3 + k4)
110 | x_step = x_step + dx
111 |
112 | X_sim = torch.stack(X_sim_list, 0)
113 |
114 | return X_sim
115 |
--------------------------------------------------------------------------------
/torchid/ss/dt/__init__.py:
--------------------------------------------------------------------------------
1 | from . import models
2 | from . import simulator
3 | from . import estimators
4 |
--------------------------------------------------------------------------------
/torchid/ss/dt/estimators.py:
--------------------------------------------------------------------------------
1 | import torch
2 | import torch.nn as nn
3 |
4 |
5 | class LSTMStateEstimator(nn.Module):
6 | """ Black-box estimator from the sequences of (u, y) to x[N-1].
7 | The estimation is performed by processing (u, y) forward in time.
8 | """
9 |
10 | def __init__(self, n_u, n_y, n_x, hidden_size=16, batch_first=False, flipped=False):
11 | super(LSTMStateEstimator, self).__init__()
12 | self.n_u = n_u
13 | self.n_y = n_y
14 | self.n_x = n_x
15 | self.batch_first = batch_first
16 | self.flipped = flipped
17 |
18 | self.lstm = nn.LSTM(input_size=n_y + n_u, hidden_size=hidden_size, batch_first=batch_first)
19 | self.lstm_output = nn.Linear(hidden_size, n_x)
20 | self.dim_time = 1 if self.batch_first else 0
21 |
22 | def forward(self, u, y):
23 | uy = torch.cat((u, y), -1)
24 | if self.flipped:
25 | uy = uy.flip(self.dim_time)
26 | _, (hN, cN) = self.lstm(uy)
27 | xN = self.lstm_output(hN).squeeze(0)
28 | return xN
29 |
30 |
31 | class FeedForwardStateEstimator(nn.Module):
32 | def __init__(self, n_u, n_y, n_x, seq_len, hidden_size=64, batch_first=False):
33 | super(FeedForwardStateEstimator, self).__init__()
34 | self.n_u = n_u
35 | self.n_y = n_y
36 | self.n_x = n_x
37 | self.batch_first = batch_first
38 | self.seq_len = seq_len
39 |
40 | self.est_net = nn.Sequential(
41 | nn.Linear((n_u + n_y)*seq_len, hidden_size),
42 | nn.Tanh(),
43 | nn.Linear(hidden_size, n_x),
44 | )
45 |
46 | def forward(self, u, y):
47 | uy = torch.cat((u, y), -1)
48 | if not self.batch_first:
49 | uy = uy.transpose(0, 1)
50 | feat = uy.flatten(start_dim=1)
51 |
52 | x_est = self.est_net(feat)
53 | return x_est
54 |
--------------------------------------------------------------------------------
/torchid/ss/dt/simulator.py:
--------------------------------------------------------------------------------
1 | import torch
2 | import torch.nn as nn
3 | from typing import List
4 |
5 |
6 | class StateSpaceSimulator(nn.Module):
7 | r""" Discrete-time state-space simulator.
8 |
9 | Args:
10 | f_xu (nn.Module): The neural state-space model.
11 | batch_first (bool): If True, first dimension is batch.
12 |
13 | Inputs: x_0, u
14 | * **x_0**: tensor of shape :math:`(N, n_{x})` containing the
15 | initial hidden state for each element in the batch.
16 | Defaults to zeros if (h_0, c_0) is not provided.
17 | * **input**: tensor of shape :math:`(L, N, n_{u})` when ``batch_first=False`` or
18 | :math:`(N, L, n_{x})` when ``batch_first=True`` containing the input sequence
19 |
20 | Outputs: x
21 | * **x**: tensor of shape :math:`(L, N, n_{x})` corresponding to
22 | the simulated state sequence.
23 |
24 | Examples::
25 |
26 | >>> ss_model = NeuralStateSpaceModel(n_x=3, n_u=2)
27 | >>> nn_solution = StateSpaceSimulator(ss_model)
28 | >>> x0 = torch.randn(64, 3)
29 | >>> u = torch.randn(100, 64, 2)
30 | >>> x = nn_solution(x0, u)
31 | >>> print(x.size())
32 | torch.Size([100, 64, 3])
33 | """
34 |
35 | def __init__(self, f_xu, g_x=None, batch_first=False):
36 | super().__init__()
37 | self.f_xu = f_xu
38 | self.g_x = g_x
39 | self.batch_first = batch_first
40 |
41 | def simulate_state(self, x_0, u):
42 | x: List[torch.Tensor] = []
43 | x_step = x_0
44 | dim_time = 1 if self.batch_first else 0
45 |
46 | for u_step in u.split(1, dim=dim_time): # split along the time axis
47 | u_step = u_step.squeeze(dim_time)
48 | x += [x_step]
49 | dx = self.f_xu(x_step, u_step)
50 | x_step = x_step + dx
51 |
52 | x = torch.stack(x, dim_time)
53 | return x
54 |
55 | def forward(self, x_0, u, return_x=False):
56 | x = self.simulate_state(x_0, u)
57 | if self.g_x is not None:
58 | y = self.g_x(x)
59 | else:
60 | y = x
61 | if not return_x:
62 | return y
63 | else:
64 | return y, x
65 |
66 |
67 |
--------------------------------------------------------------------------------
/torchid/ss/poly_utils.py:
--------------------------------------------------------------------------------
1 | def digits_repr(num, base, min_digits):
2 | res = []
3 | while num:
4 | res.append(num % base)
5 | num //= base
6 |
7 | if len(res) < min_digits:
8 | res = res + [0] * (min_digits - len(res))
9 | res.reverse()
10 | return res
11 |
12 |
13 | def valid_coeffs(n_feat, p):
14 | n_comb = (p+1)**n_feat # combination of possible monomials
15 | pows = []
16 | for comb in range(1, n_comb+1):
17 | pow = digits_repr(comb, base=p+1, min_digits=n_feat)
18 | if 1 < sum(pow) < p+1:
19 | pows.append(pow)
20 | return pows
--------------------------------------------------------------------------------
/torchid/tests/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/forgi86/pytorch-ident/f00d79ff8c6cb96d094d9ea1c079d4dcbf9a67e0/torchid/tests/__init__.py
--------------------------------------------------------------------------------
/torchid/tests/ss_ct_test.py:
--------------------------------------------------------------------------------
1 | import torch
2 | from torchid.ss.dt.models import NeuralStateUpdate
3 | from torchid.ss.dt.simulator import StateSpaceSimulator
4 |
5 |
6 | def test_batchfirst():
7 | """Test batch-first option of ForwardEulerSimulator"""
8 |
9 | L = 100 # sequence length
10 | N = 64 # batch size
11 | n_x = 2 # states
12 | n_u = 3
13 |
14 | f_xu = NeuralStateUpdate(n_x=n_x, n_u=n_u)
15 | model_tf = StateSpaceSimulator(f_xu) # batch_first=False, thus time first, default
16 | model_bf = StateSpaceSimulator(f_xu, batch_first=True)
17 |
18 | x0 = torch.randn(N, n_x)
19 | u_tf = torch.randn(L, N, n_u)
20 | u_bf = torch.transpose(u_tf, 0, 1) # transpose time/batch dimensions
21 |
22 | x_tf = model_tf(x0, u_tf)
23 | x_bf = model_bf(x0, u_bf)
24 |
25 | assert(torch.allclose(x_bf.transpose(0, 1), x_tf))
26 |
27 |
--------------------------------------------------------------------------------