├── .gitignore
├── .idea
├── .gitignore
├── csv-plugin.xml
├── dynonet.iml
├── inspectionProfiles
│ ├── Project_Default.xml
│ └── profiles_settings.xml
├── misc.xml
├── modules.xml
├── other.xml
└── vcs.xml
├── LICENSE
├── README.md
├── doc
├── dense_dynonet.png
├── paper
│ ├── fig
│ │ ├── BW_timetrace.pdf
│ │ ├── EMPS_timetrace.pdf
│ │ ├── WH_timetrace.pdf
│ │ ├── backprop_tf_ab.pdf
│ │ ├── backprop_tf_ab.svg
│ │ ├── backprop_tf_ab_stable.pdf
│ │ ├── backprop_tf_ab_stable.svg
│ │ ├── backprop_tf_ab_stable_mod.pdf
│ │ ├── backprop_tf_ab_stable_mod.svg
│ │ ├── coeff_2ndorder.svg
│ │ ├── generalized_HW.pdf
│ │ ├── generalized_HW.svg
│ │ ├── hammer.pdf
│ │ ├── hammer.svg
│ │ ├── hammerstein_wiener.pdf
│ │ ├── hammerstein_wiener.svg
│ │ ├── parallel_WH.pdf
│ │ ├── parallel_WH.svg
│ │ ├── rho.svg
│ │ ├── rhopsi.png
│ │ ├── stable_2ndorder.pdf
│ │ ├── transf.svg
│ │ ├── wiener.pdf
│ │ ├── wiener.svg
│ │ ├── wiener_hammerstein.pdf
│ │ └── wiener_hammerstein.svg
│ ├── ms.bib
│ ├── ms.pdf
│ └── ms.tex
└── slides
│ ├── img
│ ├── BW_timetrace.pdf
│ ├── EMPS_timetrace.pdf
│ ├── WH_timetrace.pdf
│ ├── backprop_tf_ab.pdf
│ ├── backprop_tf_ab_empty.pdf
│ ├── forward_tf_ab.pdf
│ ├── generalized_HW.pdf
│ ├── generic_dynonet.pdf
│ ├── generic_dynonet.svg
│ ├── hammer.pdf
│ ├── hammerstein_wiener.pdf
│ ├── parallel_WH.pdf
│ ├── stable_2ndorder.pdf
│ ├── wiener.pdf
│ └── wiener_hammerstein.pdf
│ ├── preamble.tex
│ ├── presentation_main.pdf
│ └── presentation_main.tex
├── examples
├── BW
│ ├── BW_import.py
│ ├── BW_plot.py
│ ├── BW_test.py
│ ├── BW_timetrace.pdf
│ ├── BW_train.py
│ ├── README.md
│ ├── __init__.py
│ └── data
│ │ ├── BoucWen_generate_train.m
│ │ ├── README.txt
│ │ └── Test signals
│ │ ├── test.h5
│ │ └── train.h5
├── EMPS
│ ├── EMPS_plot.py
│ ├── EMPS_test.py
│ ├── EMPS_timetrace.pdf
│ ├── EMPS_train.py
│ ├── README.md
│ ├── __init__.py
│ └── data
│ │ └── README.txt
├── ParWH
│ ├── .gitignore
│ ├── README.md
│ ├── common.py
│ ├── data
│ │ └── README.txt
│ ├── parWH_plot_id.py
│ ├── parWH_plot_test.py
│ ├── parWH_test.py
│ ├── parWH_train.py
│ ├── parWH_train_refine.py
│ └── parWH_train_single_dataset.py
├── RLC
│ ├── README.md
│ ├── RLC_MIMO_wiener_fit.py
│ ├── RLC_SIMO_WH.py
│ ├── RLC_generate_id.py
│ ├── RLC_generate_test.py
│ ├── RLC_train.py
│ ├── RLC_train_FIR.py
│ ├── RLC_train_process_noise.py
│ ├── RLC_wiener_fit.py
│ ├── old
│ │ └── RLC_secondorder_fit.py
│ └── symbolic_RLC.py
├── Silverbox
│ ├── README.md
│ ├── data
│ │ └── README.txt
│ ├── silverbox_plot.py
│ ├── silverbox_train_W.py
│ ├── silverbox_train_WH.py
│ └── silverbox_train_feedback.py
├── WH2009
│ ├── README.md
│ ├── WH2009_test.py
│ ├── WH2009_test_FIR.py
│ ├── WH2009_train.py
│ ├── WH2009_train_FIR.py
│ ├── WH2009_train_comparisons.py
│ ├── WH2009_train_process_noise.py
│ ├── WH2009_train_process_noise_PEM.py
│ ├── WH2009_train_quantized.py
│ ├── WH_fit.pdf
│ ├── WH_timetrace.pdf
│ ├── __init__.py
│ └── data
│ │ └── README.txt
├── Wiener
│ ├── .gitignore
│ ├── README.txt
│ ├── W_generate_noise.py
│ ├── W_generate_nonoise.py
│ ├── W_test.py
│ ├── W_train_ML_refine.py
│ ├── W_train_NLS.py
│ ├── W_train_NLS_nonoise.py
│ ├── data
│ │ └── dataset.h5
│ └── models
│ │ ├── ML_noise
│ │ ├── F.pkl
│ │ └── G.pkl
│ │ └── NLS_noise
│ │ ├── F.pkl
│ │ └── G.pkl
├── __init__.py
└── coupled_drive
│ ├── README.md
│ ├── data
│ └── README.txt
│ ├── drive_plot.py
│ └── drive_train_W.py
├── setup.cfg
├── setup.py
├── sphinx
├── Makefile
├── make.bat
└── source
│ ├── code.rst
│ ├── conf.py
│ └── index.rst
├── src
└── dynonet
│ ├── __init__.py
│ ├── filtering.py
│ ├── functional.py
│ ├── lti.py
│ ├── metrics.py
│ └── static.py
└── test_code
├── .gitignore
├── correlate_example.py
├── einsum_example.py
├── filter_example.py
├── filter_initial_cond_ab.py
├── filter_mimo.py
├── filter_mimo_channels_last.py
├── find_initial_cond_ab.py
├── install_pypy.txt
├── linearsiso_fun_example.py
├── linearsiso_module_example.py
├── linearsiso_module_time.py
├── mimo_module_example.py
├── mimo_secondorder_module_example.py
├── run_tests.bat
├── secondorder_module_example.py
├── stability_second_order.py
├── stable_ocs_param.py
├── stable_param.py
├── tmp
├── backproptest1.py
├── backproptest2.py
├── new-relu.py
├── new_convolution.py
├── new_optional.py
└── new_staticlinear.py
├── torch_convolutional.py
└── torch_convolutional_FIR.py
/.gitignore:
--------------------------------------------------------------------------------
1 | __pycache__
2 | dynonet.egg-info
3 |
4 | /examples/WH2009/data/
5 | /examples/WH2009/models/
6 | /examples/BW/data/
7 | /examples/BW/models/
8 | /examples/EMPS/data/
9 | /examples/EMPS/models/
10 | /examples/RLC/data/
11 | /examples/RLC/models/
12 | /examples/ParWH/data/
13 | /examples/ParWH/models/model_PWH/
14 | /examples/coupled_drive/data/
15 | /examples/coupled_drive/models/
16 | /examples/Silverbox/data/
17 | /examples/Silverbox/models/
18 | /torchid/old/
19 | /sphinx/build/
20 |
21 | /doc/slides/presentation_main.aux
22 | /doc/slides/presentation_main.nav
23 | /doc/slides/presentation_main.out
24 | /doc/slides/presentation_main.log
25 | /doc/slides/presentation_main.snm
26 | /doc/slides/presentation_main.toc
27 | /doc/slides/presentation_main.vrb
28 | /doc/paper/ms.synctex.gz
29 | /doc/paper/ms.log
30 | /doc/paper/ms.dvi
31 | /doc/paper/ms.blg
32 | /doc/paper/ms.bbl
33 | /doc/paper/ms.aux
34 |
--------------------------------------------------------------------------------
/.idea/.gitignore:
--------------------------------------------------------------------------------
1 | # Default ignored files
2 | /shelf/
3 | /workspace.xml
4 | # Datasource local storage ignored files
5 | /dataSources/
6 | /dataSources.local.xml
7 | # Editor-based HTTP Client requests
8 | /httpRequests/
9 |
--------------------------------------------------------------------------------
/.idea/csv-plugin.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
22 |
23 |
--------------------------------------------------------------------------------
/.idea/dynonet.iml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
10 |
11 |
12 |
13 |
--------------------------------------------------------------------------------
/.idea/inspectionProfiles/Project_Default.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
10 |
--------------------------------------------------------------------------------
/.idea/inspectionProfiles/profiles_settings.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
--------------------------------------------------------------------------------
/.idea/misc.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
--------------------------------------------------------------------------------
/.idea/modules.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
--------------------------------------------------------------------------------
/.idea/other.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
--------------------------------------------------------------------------------
/.idea/vcs.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | MIT License
2 |
3 | Copyright (c) 2020 Marco Forgione
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the "Software"), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in all
13 | copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | SOFTWARE.
22 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # dynoNet: A neural network architecture for learning dynamical systems
2 |
3 | This repository contains the Python code to reproduce the results of the paper [dynoNet: A neural network architecture for learning dynamical systems](https://arxiv.org/pdf/2006.02250.pdf) by Marco Forgione and Dario Piga.
4 |
5 | In this work, we introduce the linear dynamical operator as a differentiable layer compatible with back-propagation-based training.
6 | The operator is parametrized as a rational transfer function and thus can represent an infinite impulse response (IIR)
7 | filtering operation, as opposed to the Convolutional layer of 1D-CNNs that is equivalent to finite impulse response (FIR) filtering.
8 |
9 | In the dynoNet architecture, linear dynamical operators are combined with static (i.e., memoryless) non-linearities which can be either elementary
10 | activation functions applied channel-wise; fully connected feed-forward neural networks; or other differentiable operators.
11 |
12 |
13 | 
14 |
15 | A 15-min presentation about dynoNet is available [here](https://www.youtube.com/watch?v=SrrlhGPLBrA&t=55s).
16 |
17 | # Folders:
18 | * [dynonet](src/dynonet): PyTorch implementation of the linear dynamical operator (aka G-block in the paper) used in dynoNet
19 | * [examples](examples): examples using dynoNet for system identification
20 | * [util](util): definition of metrics R-square, RMSE, fit index
21 | * [doc](doc): paper & slides
22 |
23 | Three [examples](examples) discussed in the paper are:
24 |
25 | * [WH2009](examples/WH2009): A circuit with Wiener-Hammerstein behavior. Experimental dataset from http://www.nonlinearbenchmark.org
26 | * [BW](examples/BW): Bouc-Wen. A nonlinear dynamical system describing hysteretic effects in mechanical engineering. Experimental dataset from http://www.nonlinearbenchmark.org
27 | * [EMPS](examples/EMPS): A controlled prismatic joint (Electro Mechanical Positioning System). Experimental dataset from http://www.nonlinearbenchmark.org
28 |
29 | For the [WH2009](examples/WH2009) example, the main scripts are:
30 |
31 | * ``WH2009_train.py``: Training of the dynoNet model
32 | * ``WH2009_test.py``: Evaluation of the dynoNet model on the test dataset, computation of metrics.
33 |
34 | Similar scripts are provided for the other examples.
35 |
36 | NOTE: the original data sets are not included in this project. They have to be manually downloaded from
37 | http://www.nonlinearbenchmark.org and copied in the data sub-folder of the example.
38 | # Software requirements:
39 | Simulations were performed on a Python 3.7 conda environment with
40 |
41 | * numpy
42 | * scipy
43 | * matplotlib
44 | * pandas
45 | * pytorch (version 1.4)
46 |
47 | These dependencies may be installed through the commands:
48 |
49 | ```
50 | conda install numpy scipy pandas matplotlib
51 | conda install pytorch torchvision cudatoolkit=10.2 -c pytorch
52 | ```
53 |
54 | # Local installation:
55 |
56 | ## From PyPI
57 | Type in terminal:
58 | ```
59 | pip install dynonet
60 | ```
61 | This will install the latest stable version packaged on PyPI: https://pypi.org/project/dynonet/
62 |
63 | ## From a local copy of this repository
64 | Navigate to a local copy of this repository, where setup.py and setup.cfg are located.
65 | Then, type in terminal:
66 |
67 | ```
68 | pip install -e .
69 | ```
70 |
71 |
72 | # Citing
73 |
74 | If you find this project useful, we encourage you to
75 |
76 | * Star this repository :star:
77 | * Cite the [paper](https://onlinelibrary.wiley.com/doi/abs/10.1002/acs.3216)
78 | ```
79 | @article{forgione2021dyno,
80 | title={\textit{dyno{N}et}: A neural network architecture for learning dynamical systems},
81 | author={Forgione, M. and Piga, D.},
82 | journal={International Journal of Adaptive Control and Signal Processing},
83 | volume={35},
84 | number={4},
85 | pages={612--626},
86 | year={2021},
87 | publisher={Wiley}
88 | }
89 | ```
90 |
--------------------------------------------------------------------------------
/doc/dense_dynonet.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/forgi86/dynonet/56699401b141065a45403a18977333e0ec8d7303/doc/dense_dynonet.png
--------------------------------------------------------------------------------
/doc/paper/fig/BW_timetrace.pdf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/forgi86/dynonet/56699401b141065a45403a18977333e0ec8d7303/doc/paper/fig/BW_timetrace.pdf
--------------------------------------------------------------------------------
/doc/paper/fig/EMPS_timetrace.pdf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/forgi86/dynonet/56699401b141065a45403a18977333e0ec8d7303/doc/paper/fig/EMPS_timetrace.pdf
--------------------------------------------------------------------------------
/doc/paper/fig/WH_timetrace.pdf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/forgi86/dynonet/56699401b141065a45403a18977333e0ec8d7303/doc/paper/fig/WH_timetrace.pdf
--------------------------------------------------------------------------------
/doc/paper/fig/backprop_tf_ab.pdf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/forgi86/dynonet/56699401b141065a45403a18977333e0ec8d7303/doc/paper/fig/backprop_tf_ab.pdf
--------------------------------------------------------------------------------
/doc/paper/fig/backprop_tf_ab_stable.pdf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/forgi86/dynonet/56699401b141065a45403a18977333e0ec8d7303/doc/paper/fig/backprop_tf_ab_stable.pdf
--------------------------------------------------------------------------------
/doc/paper/fig/backprop_tf_ab_stable_mod.pdf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/forgi86/dynonet/56699401b141065a45403a18977333e0ec8d7303/doc/paper/fig/backprop_tf_ab_stable_mod.pdf
--------------------------------------------------------------------------------
/doc/paper/fig/generalized_HW.pdf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/forgi86/dynonet/56699401b141065a45403a18977333e0ec8d7303/doc/paper/fig/generalized_HW.pdf
--------------------------------------------------------------------------------
/doc/paper/fig/hammer.pdf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/forgi86/dynonet/56699401b141065a45403a18977333e0ec8d7303/doc/paper/fig/hammer.pdf
--------------------------------------------------------------------------------
/doc/paper/fig/hammerstein_wiener.pdf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/forgi86/dynonet/56699401b141065a45403a18977333e0ec8d7303/doc/paper/fig/hammerstein_wiener.pdf
--------------------------------------------------------------------------------
/doc/paper/fig/parallel_WH.pdf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/forgi86/dynonet/56699401b141065a45403a18977333e0ec8d7303/doc/paper/fig/parallel_WH.pdf
--------------------------------------------------------------------------------
/doc/paper/fig/rho.svg:
--------------------------------------------------------------------------------
1 |
2 |
3 |
--------------------------------------------------------------------------------
/doc/paper/fig/rhopsi.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/forgi86/dynonet/56699401b141065a45403a18977333e0ec8d7303/doc/paper/fig/rhopsi.png
--------------------------------------------------------------------------------
/doc/paper/fig/stable_2ndorder.pdf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/forgi86/dynonet/56699401b141065a45403a18977333e0ec8d7303/doc/paper/fig/stable_2ndorder.pdf
--------------------------------------------------------------------------------
/doc/paper/fig/transf.svg:
--------------------------------------------------------------------------------
1 |
2 |
3 |
--------------------------------------------------------------------------------
/doc/paper/fig/wiener.pdf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/forgi86/dynonet/56699401b141065a45403a18977333e0ec8d7303/doc/paper/fig/wiener.pdf
--------------------------------------------------------------------------------
/doc/paper/fig/wiener_hammerstein.pdf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/forgi86/dynonet/56699401b141065a45403a18977333e0ec8d7303/doc/paper/fig/wiener_hammerstein.pdf
--------------------------------------------------------------------------------
/doc/paper/ms.pdf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/forgi86/dynonet/56699401b141065a45403a18977333e0ec8d7303/doc/paper/ms.pdf
--------------------------------------------------------------------------------
/doc/slides/img/BW_timetrace.pdf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/forgi86/dynonet/56699401b141065a45403a18977333e0ec8d7303/doc/slides/img/BW_timetrace.pdf
--------------------------------------------------------------------------------
/doc/slides/img/EMPS_timetrace.pdf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/forgi86/dynonet/56699401b141065a45403a18977333e0ec8d7303/doc/slides/img/EMPS_timetrace.pdf
--------------------------------------------------------------------------------
/doc/slides/img/WH_timetrace.pdf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/forgi86/dynonet/56699401b141065a45403a18977333e0ec8d7303/doc/slides/img/WH_timetrace.pdf
--------------------------------------------------------------------------------
/doc/slides/img/backprop_tf_ab.pdf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/forgi86/dynonet/56699401b141065a45403a18977333e0ec8d7303/doc/slides/img/backprop_tf_ab.pdf
--------------------------------------------------------------------------------
/doc/slides/img/backprop_tf_ab_empty.pdf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/forgi86/dynonet/56699401b141065a45403a18977333e0ec8d7303/doc/slides/img/backprop_tf_ab_empty.pdf
--------------------------------------------------------------------------------
/doc/slides/img/forward_tf_ab.pdf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/forgi86/dynonet/56699401b141065a45403a18977333e0ec8d7303/doc/slides/img/forward_tf_ab.pdf
--------------------------------------------------------------------------------
/doc/slides/img/generalized_HW.pdf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/forgi86/dynonet/56699401b141065a45403a18977333e0ec8d7303/doc/slides/img/generalized_HW.pdf
--------------------------------------------------------------------------------
/doc/slides/img/generic_dynonet.pdf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/forgi86/dynonet/56699401b141065a45403a18977333e0ec8d7303/doc/slides/img/generic_dynonet.pdf
--------------------------------------------------------------------------------
/doc/slides/img/hammer.pdf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/forgi86/dynonet/56699401b141065a45403a18977333e0ec8d7303/doc/slides/img/hammer.pdf
--------------------------------------------------------------------------------
/doc/slides/img/hammerstein_wiener.pdf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/forgi86/dynonet/56699401b141065a45403a18977333e0ec8d7303/doc/slides/img/hammerstein_wiener.pdf
--------------------------------------------------------------------------------
/doc/slides/img/parallel_WH.pdf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/forgi86/dynonet/56699401b141065a45403a18977333e0ec8d7303/doc/slides/img/parallel_WH.pdf
--------------------------------------------------------------------------------
/doc/slides/img/stable_2ndorder.pdf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/forgi86/dynonet/56699401b141065a45403a18977333e0ec8d7303/doc/slides/img/stable_2ndorder.pdf
--------------------------------------------------------------------------------
/doc/slides/img/wiener.pdf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/forgi86/dynonet/56699401b141065a45403a18977333e0ec8d7303/doc/slides/img/wiener.pdf
--------------------------------------------------------------------------------
/doc/slides/img/wiener_hammerstein.pdf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/forgi86/dynonet/56699401b141065a45403a18977333e0ec8d7303/doc/slides/img/wiener_hammerstein.pdf
--------------------------------------------------------------------------------
/doc/slides/preamble.tex:
--------------------------------------------------------------------------------
1 | % does not look nice, try deleting the line with the fontenc.
2 | \usepackage[english]{babel}
3 | \usepackage{amsmath}
4 | \usepackage[latin1]{inputenc}
5 | \usepackage{units}
6 | \usepackage{colortbl}
7 | \usepackage{multimedia}
8 | \usepackage{bm}
9 |
10 | \mode
11 | {
12 | \usetheme{Boadilla}
13 | \useoutertheme{infolines}
14 | \setbeamercovered{transparent}
15 | }
16 |
17 | \newcommand{\Name}{\emph{dynoNet}}
18 |
19 | \title[\Name]{\Name: a neural network architecture for learning dynamical systems}
20 |
21 | %\subtitle{Industrial-scale experimental results\\} % (optional)
22 |
23 | \author[]{Marco Forgione, Dario Piga}
24 |
25 | \institute[IDSIA]{
26 | \inst{1}IDSIA Dalle Molle Institute for Artificial Intelligence SUPSI-USI, Lugano, Switzerland
27 | }
28 |
29 |
30 | \date[]{\today}
31 |
32 |
33 | \subject{System identification with neural networks}
34 |
35 |
36 | %% MATH DEFINITIONS %%
37 | \newcommand{\q}{q} % shift operator
38 | \newcommand{\A}{A} % autoregressive polynomial
39 | \newcommand{\ac}{a} % autoregressive polynomial coefficient
40 | \newcommand{\B}{B} % exogenous polynomial
41 | \newcommand{\bb}{b} % exogenous polynomial coefficient
42 | \newcommand{\Gmat}{\mathbb{G}} % transfer function operator in matrix form
43 | \newcommand{\tvec}[1]{\mathbf{#1}}
44 | \newcommand{\mat}[1]{\bm{#1}}
45 | \newcommand{\sens}[1]{\tilde{#1}}
46 | \newcommand{\adjoint}[1]{\overline{#1}}
47 | \newcommand{\loss}{\mathcal{L}}
48 | \newcommand{\pdiff}[2]{\frac{\partial #1}{\partial #2}}
49 | \newcommand{\nsamp}{T}
50 |
51 | \newcommand{\conv}{*}
52 | \newcommand{\ccorr}{\star}
53 | %\newcommand{\R}{\mathcal{R}}
54 | %\newcommand{\du}{\delta u}
55 | %\newcommand{\dy}{\delta y}
56 | %\newcommand{\DU}{\Delta U}
57 | %\newcommand{\DY}{\Delta Y}
58 | %\newcommand{\abs}[1]{\left | #1 \right |}
59 | \newcommand{\norm}[1]{\left \lVert #1 \right \rVert}
60 | %\newcommand{\relphantom}[1]{\mathrel{\phantom{#1}}}
61 | %\newenvironment{matrixc}{\begin{array}{c}\left[}{\end{array}\right]}
62 | \DeclareMathOperator*\argmin{arg \, min}
63 | %\DeclareMathOperator*\argmax{arg \, max}
64 | %\DeclareMathOperator*\fit{fit}
65 | %\DeclareMathOperator*\RMSE{RMSE}
66 | %\DeclareMathOperator*\diag{diag}
67 | %\DeclareMathOperator*\diet{diet}
68 | %\DeclareMathOperator*\Risk{Risk}
69 | %\DeclareMathOperator*\Num{Num}
70 | %\DeclareMathOperator*\Den{Den}
71 | %\DeclareMathOperator*\Rat{Rat}
72 | \DeclareMathOperator*\cov{cov}
73 | %\DeclareMathOperator*\Var{Var}
74 | %\DeclareMathOperator*\SSR{SSR}
75 | %\setcounter{MaxMatrixCols}{20}
76 | %\newcommand{\pdiff}[2]{\frac{\partial #1}{\partial #2}}
77 | %\definecolor{mypink1}{rgb}{0.858, 0.188, 0.478}
78 | %\definecolor{olive}{RGB}{85, 107, 47}
79 | \definecolor{orange}{RGB}{204, 85, 0}
80 |
81 | %\definecolor{mypink3}{cmyk}{0, 0.7808, 0.4429, 0.1412}
82 | %\definecolor{mygray}{gray}{0.6}
83 | %\definecolor{olivegreen}[RGB]{85, 107, 47}
84 |
85 | \newcommand{\K}{K}
86 | \newcommand{\M}{M}
87 | \newcommand{\Mo}{M_o}
88 | \newcommand{\So}{S_o}
89 | \newcommand{\Smod}{S}
90 | \newcommand{\parcolor}[1]{{\color{orange}#1}}
91 | \newcommand{\Ts}{T_s^{\rm MPC}}
92 | \newcommand{\cites}[1]{\begin{small}(#1)\end{small}}
93 | %\newcommand{\Ts}{T_s}
94 |
--------------------------------------------------------------------------------
/doc/slides/presentation_main.pdf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/forgi86/dynonet/56699401b141065a45403a18977333e0ec8d7303/doc/slides/presentation_main.pdf
--------------------------------------------------------------------------------
/examples/BW/BW_import.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | import scipy.io
3 | import matplotlib.pyplot as plt
4 | import os
5 | import h5py
6 |
7 | if __name__ == '__main__':
8 |
9 | #signal_name = 'multisine'
10 | signal_name = 'sinesweep'
11 |
12 | # In[Load dataset]
13 | u_name = 'uval_' + signal_name
14 | u_filename = u_name + '.mat'
15 |
16 | y_name = 'yval_' + signal_name
17 | y_filename = y_name + '.mat'
18 |
19 | u = scipy.io.loadmat(os.path.join("data", "Test signals", "Validation signals", u_filename))[u_name]\
20 | .reshape(1, -1)
21 | y = scipy.io.loadmat(os.path.join("data", "Test signals", "Validation signals", y_filename))[y_name]\
22 | .reshape(1, -1)
23 |
24 | fs = np.array([750.0])
25 |
26 | # In[Plot dataset]
27 | fig, ax = plt.subplots(2, 1, sharex=True)
28 | ax[0].plot(y[0, :])
29 | ax[0].set_xlabel('Time (s)')
30 | ax[0].set_ylabel('Displacement (mm)')
31 | ax[0].grid(True)
32 | ax[1].plot(u[0, :])
33 | ax[1].set_xlabel('Time (s)')
34 | ax[1].set_ylabel('Force (N)')
35 | ax[1].grid(True)
36 |
37 | # In[Save in an hdf file]
38 |
39 | # can only write a group once, delete file to re-write the same group
40 | filename = os.path.join('data', 'Test signals', 'test.h5')
41 | hf = h5py.File(filename, 'a')
42 | ds_signal = hf.create_group(signal_name) # signal group
43 | ds_signal.create_dataset('y', data=y.transpose())
44 | ds_signal.create_dataset('u', data=u.transpose())
45 | ds_signal.create_dataset('fs', data=fs)
46 | hf.close()
47 |
--------------------------------------------------------------------------------
/examples/BW/BW_plot.py:
--------------------------------------------------------------------------------
1 | import pandas as pd
2 | import numpy as np
3 | import matplotlib.pyplot as plt
4 | import os
5 | import h5py
6 |
7 | if __name__ == '__main__':
8 |
9 | # In[Load dataset]
10 |
11 | h5_filename = 'train.h5'
12 | #h5_filename = 'test.h5'
13 |
14 | signal_name = 'multisine'
15 | #signal_name = 'sinesweep' # available in test
16 |
17 | h5_data = h5py.File(os.path.join("data", "Test signals", h5_filename), 'r')
18 | dataset_list = h5_data.keys()
19 | y = np.array(h5_data[signal_name]['y']).transpose() # MATLAB saves data in column major order...
20 | if y.ndim == 2:
21 | y = y[..., None]
22 | u = np.array(h5_data[signal_name]['u']).transpose()
23 | if u.ndim == 2:
24 | u = u[..., None]
25 |
26 | fs = np.array(h5_data[signal_name]['fs']).item()
27 |
28 | N = y.shape[1]
29 | ts = 1.0/fs
30 | t = np.arange(N)*fs
31 |
32 | # In[Plot dataset]
33 | fig, ax = plt.subplots(2, 1, sharex=True)
34 | ax[0].plot(y[0, :, 0])
35 | ax[0].set_xlabel('Time (s)')
36 | ax[0].set_ylabel('Displacement (mm)')
37 | ax[0].grid(True)
38 | ax[1].plot(y[0, :, 0])
39 | ax[1].set_xlabel('Time (s)')
40 | ax[1].set_ylabel('Force (N)')
41 | ax[1].grid(True)
42 | plt.show()
43 |
--------------------------------------------------------------------------------
/examples/BW/BW_test.py:
--------------------------------------------------------------------------------
1 | import os
2 | import h5py
3 | import numpy as np
4 | import torch
5 | import matplotlib
6 | import matplotlib.pyplot as plt
7 | from dynonet.lti import MimoLinearDynamicalOperator
8 | import dynonet.metrics
9 | from dynonet.static import MimoStaticNonLinearity
10 |
11 | if __name__ == '__main__':
12 |
13 | matplotlib.rc('font', **{'family': 'sans-serif', 'sans-serif': ['Helvetica']})
14 |
15 | # In[Settings]
16 | #h5_filename = 'train.h5'
17 | h5_filename = 'test.h5'
18 | #signal_name = 'multisine'
19 | signal_name = 'multisine'
20 | #signal_name = 'sinesweep' # available in test
21 | model_name = "model_BW"
22 |
23 |
24 | # In[Load dataset]
25 |
26 | h5_data = h5py.File(os.path.join("data", "Test signals", h5_filename), 'r')
27 | dataset_list = h5_data.keys()
28 | y = np.array(h5_data[signal_name]['y']).transpose() # MATLAB saves data in column major order...
29 | if y.ndim == 2:
30 | y = y[..., None]
31 | u = np.array(h5_data[signal_name]['u']).transpose()
32 | if u.ndim == 2:
33 | u = u[..., None]
34 |
35 | fs = np.array(h5_data[signal_name]['fs']).item()
36 |
37 | N = y.shape[1]
38 | ts = 1.0/fs
39 | t = np.arange(N)*ts
40 |
41 |
42 | # In[Scale data]
43 | scaler_y = 0.0006 # approx std(y_train)
44 | scaler_u = 50 # approx std(u_train)
45 |
46 | y = y/scaler_y
47 | u = u/scaler_u
48 |
49 | # In[Data to float 32]
50 | y = y.astype(np.float32)
51 | u = u.astype(np.float32)
52 | t = t.astype(np.float32)
53 |
54 | # In[Instantiate models]
55 |
56 | # Model blocks
57 | G1 = MimoLinearDynamicalOperator(1, 8, n_b=3, n_a=3, n_k=1)
58 | F1 = MimoStaticNonLinearity(8, 4, n_hidden=10) #torch.nn.ReLU() #StaticMimoNonLin(3, 3, n_hidden=10)
59 | G2 = MimoLinearDynamicalOperator(4, 4, n_b=3, n_a=3)
60 | F2 = MimoStaticNonLinearity(4, 1, n_hidden=10)
61 | G3 = MimoLinearDynamicalOperator(1, 1, n_b=2, n_a=2, n_k=1)
62 |
63 | # Load identified model parameters
64 | model_folder = os.path.join("models", model_name)
65 | G1.load_state_dict(torch.load(os.path.join(model_folder, "G1.pkl")))
66 | F1.load_state_dict(torch.load(os.path.join(model_folder, "F1.pkl")))
67 | G2.load_state_dict(torch.load(os.path.join(model_folder, "G2.pkl")))
68 | F2.load_state_dict(torch.load(os.path.join(model_folder, "F2.pkl")))
69 | G3.load_state_dict(torch.load(os.path.join(model_folder, "G3.pkl")))
70 |
71 | # Model structure
72 | def model(u_in):
73 | y1_lin = G1(u_in)
74 | y1_nl = F1(y1_lin)
75 | y2_lin = G2(y1_nl)
76 | y_branch1 = F2(y2_lin)
77 |
78 | y_branch2 = G3(u_in)
79 | y_hat = y_branch1 + y_branch2
80 | return y_hat
81 |
82 | # In[Simulate]
83 | u_torch = torch.tensor(u)
84 | y_hat = model(u_torch)
85 |
86 | # In[Detach & organize]
87 | y_hat = y_hat.detach().numpy()[0, :, :]
88 | y = y[0, :, :]
89 | u = u[0, :, :]
90 |
91 | # In[Plot]
92 | e = y - y_hat
93 | plt.figure()
94 | plt.plot(t, y, 'k', label="$y$")
95 | plt.plot(t, y_hat, 'b', label="$\hat y$")
96 | plt.plot(t, e, 'r', label="$e$")
97 | plt.legend(loc='upper left')
98 | plt.grid(True)
99 |
100 | # In[Metrics]
101 | n_skip = 300
102 | e_rms = dynonet.metrics.error_rmse(scaler_y*y[n_skip:], scaler_y*y_hat[n_skip:])[0]
103 | fit_idx = dynonet.metrics.fit_index(y[n_skip:], y_hat[n_skip:])[0]
104 | r_sq = dynonet.metrics.r_squared(y[n_skip:], y_hat[n_skip:])[0]
105 |
106 | print(f"RMSE: {e_rms:.2E} mm\nFIT: {fit_idx:.1f}%\nR_sq: {r_sq:.2f}")
107 |
108 |
109 | # In[Plot for paper]
110 | t_test_start = 5900
111 | len_plot = 400
112 |
113 | plt.figure(figsize=(4, 3))
114 | plt.plot(t[t_test_start:t_test_start+len_plot], y[t_test_start:t_test_start+len_plot], 'k', label="$\mathbf{y}^{\mathrm{meas}}$")
115 | plt.plot(t[t_test_start:t_test_start+len_plot], y_hat[t_test_start:t_test_start+len_plot], 'b--', label="$\mathbf{y}$")
116 | plt.plot(t[t_test_start:t_test_start+len_plot], e[t_test_start:t_test_start+len_plot], 'r', label="$\mathbf{e}$")
117 | plt.xlabel('Time (s)')
118 | plt.ylabel('Displacement (mm)')
119 | plt.legend(loc='upper right')
120 | plt.tight_layout()
121 | plt.grid(True)
122 | plt.show()
123 | # plt.savefig('BW_timetrace.pdf')
--------------------------------------------------------------------------------
/examples/BW/BW_timetrace.pdf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/forgi86/dynonet/56699401b141065a45403a18977333e0ec8d7303/examples/BW/BW_timetrace.pdf
--------------------------------------------------------------------------------
/examples/BW/README.md:
--------------------------------------------------------------------------------
1 | # BW Example
2 |
3 | Bouc-Wen: nonlinear dynamical system describing hysteretic effects in mechanical engineering. Steps to run:
4 |
5 | 1. Install the h5py package, if necessary.
6 | 2. ```python BW_train.py```
7 | 3. ```python BW_test.py```
8 | 4. ```python BW_plot.py```
9 |
10 |
11 |
--------------------------------------------------------------------------------
/examples/BW/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/forgi86/dynonet/56699401b141065a45403a18977333e0ec8d7303/examples/BW/__init__.py
--------------------------------------------------------------------------------
/examples/BW/data/BoucWen_generate_train.m:
--------------------------------------------------------------------------------
1 | %
2 | % Generation of the training set for the Bouc Wen benchmark.
3 | % It is adapted from the example file BoucWen_ExampleIntegration.m
4 | % by J.P. Noel(1), M. Schoukens(2), and available within the Bouc Wen benchmark
5 | % at the address http://nonlinearbenchmark.org/FILES/BENCHMARKS/BOUCWEN/BoucWenFiles.zip
6 | % However, we save the result in hdf5 format for easier use in python.
7 | % To run this script, you also need to copy in this folder the file BoucWen_NewmarkIntegration.p
8 | % also available in the zip file http://nonlinearbenchmark.org/FILES/BENCHMARKS/BOUCWEN/BoucWenFiles.zip
9 |
10 |
11 | %% Clean workspace
12 | clear;
13 | clc;
14 |
15 | %% Time integration parameters.
16 |
17 | fs = 750; % working sampling frequency.
18 | upsamp = 20; % upsampling factor to ensure integration accuracy.
19 | fsint = fs*upsamp; % integration sampling frequency.
20 | h = 1/fsint; % integration time step.
21 |
22 | %% Dataset path
23 | filename = 'train.h5';
24 | data_folder = 'Test signals';
25 |
26 | %% Excitation signal design.
27 |
28 | P = 5; % number of excitation periods.
29 | N = 8192; % number of points per period.
30 | Nint = N*upsamp; % number of points per period during integration.
31 |
32 | fmin = 5; % excitation bandwidth.
33 | fmax = 150;
34 |
35 | A = 50; % excitation amplitude.
36 |
37 | Pfilter = 1; % extra period to avoid edge effects during low-pass filtering (see line 59).
38 | P = P + Pfilter;
39 |
40 | U = zeros(Nint,1); % definition of the multisine excitation.
41 | fres = fsint/Nint;
42 | exclines = 1:ceil(fmax/fres);
43 | exclines(exclines < floor(fmin/fres)) = [];
44 |
45 | U(exclines+1) = exp(complex(0,2*pi*rand(size(exclines))));
46 | u = 2*real(ifft(U));
47 | u = A*u/std(u);
48 | u = repmat(u,[P 1]);
49 |
50 | %% Time integration.
51 |
52 | y0 = 0; % initial conditions.
53 | dy0 = 0;
54 | z0 = 0;
55 |
56 | start = tic;
57 | y = BoucWen_NewmarkIntegration(h,u,y0,dy0,z0);
58 | stop = toc(start);disp(['Duration of the time integration: ',num2str(stop),' s.']);
59 |
60 | %% Low-pass filtering and downsampling.
61 |
62 | drate = factor(upsamp); % prime factor decomposition.
63 |
64 | for k=1:length(drate)
65 | y = decimate(y,drate(k),'fir');
66 | end %k
67 |
68 | u = downsample(u,upsamp);
69 |
70 | %% Removal of the last simulated period to eliminate the edge effects due to the low-pass filter.
71 |
72 | y = y(1:(P-1)*N);
73 | u = u(1:(P-1)*N,:);
74 | P = P-1;
75 |
76 |
77 | %% Prepare data %%
78 | N = length(y);
79 |
80 | u = transpose(u);
81 | %% Save results in an hdf file %%
82 |
83 |
84 | filepath = fullfile(data_folder, filename);
85 | h5create(filepath, '/multisine/u', size(u))
86 | h5write(filepath, '/multisine/u', u)
87 |
88 | h5create(filepath, '/multisine/y', size(y))
89 | h5write(filepath, '/multisine/y', y)
90 |
91 | h5create(filepath, '/multisine/fs', size(fs))
92 | h5write(filepath, '/multisine/fs', fs)
93 |
94 | h5disp(filepath)
95 |
--------------------------------------------------------------------------------
/examples/BW/data/README.txt:
--------------------------------------------------------------------------------
1 | For this benchmarks, training and datasets in hdf5 format are available in the 'Test signals' subfolder.
2 | The original test dataset in .mat format and the Matlab script BoucWen_NewmarkIntegration.p required
3 | by BoucWen_generate_train.m to create the training set may be downloaded from:
4 |
5 | http://nonlinearbenchmark.org/FILES/BENCHMARKS/BOUCWEN/BoucWenFiles.zip
--------------------------------------------------------------------------------
/examples/BW/data/Test signals/test.h5:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/forgi86/dynonet/56699401b141065a45403a18977333e0ec8d7303/examples/BW/data/Test signals/test.h5
--------------------------------------------------------------------------------
/examples/BW/data/Test signals/train.h5:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/forgi86/dynonet/56699401b141065a45403a18977333e0ec8d7303/examples/BW/data/Test signals/train.h5
--------------------------------------------------------------------------------
/examples/EMPS/EMPS_plot.py:
--------------------------------------------------------------------------------
1 | import matplotlib
2 | matplotlib.use("TkAgg")
3 | import os
4 | import pandas as pd
5 | import numpy as np
6 | import scipy as sp
7 | import scipy.io
8 | import scipy.signal
9 | import torch
10 | import torch.optim as optim
11 | import time
12 | import matplotlib.pyplot as plt
13 | import sys
14 | sys.path.append(os.path.join("..", ".."))
15 |
16 |
17 | if __name__ == '__main__':
18 |
19 | # In[Set seed for reproducibility]
20 | np.random.seed(0)
21 | torch.manual_seed(0)
22 |
23 | # In[Load dataset]
24 | emps_data = sp.io.loadmat(os.path.join("data", "DATA_EMPS.mat"))
25 | q_ref = emps_data['qg'].astype(np.float32)
26 | q_meas = emps_data['qm'].astype(np.float32)
27 | u_in = emps_data['vir'].astype(np.float32)
28 | time_exp = emps_data['t'].astype(np.float32)
29 | # d_N = emps_data['pulses_N']
30 | ts = np.mean(np.diff(time_exp.ravel()))#time_exp[1] - time_exp[0]
31 |
32 | # Design a differentiator filter to estimate unmeasured velocities from noisy, measured positions
33 | fs = 1/ts # Sample rate, Hz
34 | cutoff = 10.0 # Desired cutoff frequency, Hz
35 | trans_width = 100 # Width of transition from pass band to stop band, Hz
36 | n_taps = 32 # Size of the FIR filter.
37 | taps = scipy.signal.remez(n_taps, [0, cutoff, cutoff + trans_width, 0.5 * fs], [2 * np.pi * 2 * np.pi * 10 * 1.5, 0], Hz=fs, type='differentiator')
38 |
39 | # Filter positions to estimate velocities
40 | x_est = np.zeros((q_ref.shape[0], 2), dtype=np.float32)
41 | x_est[:, 0] = q_meas[:, 0]
42 | v_est = np.convolve(x_est[:, 0], taps, 'same') # signal.lfilter(taps, 1, y_meas[:,0])*2*np.pi
43 | x_est[:, 1] = np.copy(v_est)
44 | x_est[0:n_taps, [1]] = x_est[n_taps + 1, [1]]
45 | x_est[-n_taps:, [1]] = x_est[-n_taps - 1, [1]]
46 |
47 |
48 | # Simulation plot
49 | fig, ax = plt.subplots(3, 1, sharex=True, figsize=(6, 7.5))
50 | ax[0].plot(time_exp, q_ref, 'k', label='$q_{\mathrm{ref}}$')
51 | ax[0].plot(time_exp, q_meas, 'k', label='$q_{\mathrm{meas}}$')
52 | ax[0].legend(loc='upper right')
53 | ax[0].grid(True)
54 | ax[0].set_ylabel("Position (m)")
55 |
56 | ax[1].plot(time_exp, x_est[:, 1], 'k--', label='$v_{\mathrm{est}}$')
57 | ax[1].grid(True)
58 | ax[1].set_ylabel("Velocity (m/s)")
59 |
60 | ax[2].plot(time_exp, u_in, 'k*', label='$u_{in}$')
61 | ax[2].set_xlabel("Time (s)")
62 | ax[2].set_ylabel("Input (V)")
63 | ax[2].grid(True)
64 | ax[2].set_xlabel("Time (s)")
65 | plt.show()
66 |
--------------------------------------------------------------------------------
/examples/EMPS/EMPS_test.py:
--------------------------------------------------------------------------------
1 | import matplotlib
2 | matplotlib.use("TkAgg")
3 | import os
4 | import numpy as np
5 | import scipy as sp
6 | import scipy.io
7 | import torch
8 | import matplotlib.pyplot as plt
9 | from dynonet.lti import MimoLinearDynamicalOperator, SisoLinearDynamicalOperator
10 | from dynonet.static import MimoStaticNonLinearity, MimoStaticNonLinearity
11 | import dynonet.metrics
12 |
13 | if __name__ == '__main__':
14 |
15 |
16 | matplotlib.rc('font', **{'family': 'sans-serif', 'sans-serif': ['Helvetica']})
17 | #matplotlib.rc('text', usetex=True)
18 |
19 | # In[Set seed for reproducibility]
20 | np.random.seed(0)
21 | torch.manual_seed(0)
22 |
23 | # In[Settings]
24 |
25 | model_name = 'EMPS_model'
26 | dataset = 'test'
27 | if dataset == 'id':
28 | dataset_filename = 'DATA_EMPS.mat'
29 | elif dataset == 'test':
30 | dataset_filename = 'DATA_EMPS_PULSES.mat'
31 |
32 | # In[Load dataset]
33 |
34 | emps_data = sp.io.loadmat(os.path.join("data", 'DATA_EMPS.mat'))
35 | y_ref = emps_data['qg'].astype(np.float32)
36 | y_meas = emps_data['qm'].astype(np.float32)
37 | u_in = emps_data['vir'].astype(np.float32)
38 | time_exp = emps_data['t'].astype(np.float32)
39 | # d_N = emps_data['pulses_N']
40 | ts = np.mean(np.diff(time_exp.ravel())) #time_exp[1] - time_exp[0]
41 |
42 | v_est = np.diff(y_meas, axis=0) / ts
43 | v_est = np.r_[[[0]], v_est]
44 |
45 |
46 | # In[Instantiate models]
47 |
48 | # Model blocks
49 | G1 = MimoLinearDynamicalOperator(1, 10, n_b=2, n_a=2, n_k=1)
50 | # Static sandwitched non-linearity
51 | F1 = MimoStaticNonLinearity(10, 5, activation='tanh')
52 | G2 = MimoLinearDynamicalOperator(5, 1, n_b=2, n_a=2, n_k=0)
53 |
54 | # Load identified model parameters
55 | model_folder = os.path.join("models", model_name)
56 | G1.load_state_dict(torch.load(os.path.join(model_folder, "G1.pkl")))
57 | F1.load_state_dict(torch.load(os.path.join(model_folder, "F1.pkl")))
58 | G2.load_state_dict(torch.load(os.path.join(model_folder, "G2.pkl")))
59 |
60 | # Model structure
61 | def model(u_in):
62 | y_lin_1 = G1(u_in)
63 | v_hat = F1(y_lin_1)
64 | v_hat = G2(v_hat)
65 | y_hat = torch.cumsum(v_hat, dim=1) * ts
66 | return y_hat, v_hat
67 |
68 | # In[Simulate]
69 | u_fit_torch = torch.tensor(u_in[None, :, :])
70 | y_hat, v_hat = model(u_fit_torch)
71 |
72 | # In[Detach]
73 | y_hat = y_hat.detach().numpy()[0, :, :]
74 | v_hat = v_hat.detach().numpy()[0, :, :]
75 |
76 | # In[Plot]
77 | # Simulation plot
78 | fig, ax = plt.subplots(3, 1, sharex=True, figsize=(6, 7.5))
79 | ax[0].plot(time_exp, y_meas, 'k', label='$y_{\mathrm{meas}}$')
80 | ax[0].plot(time_exp, y_hat, 'r', label='$y_{\mathrm{sim}}$')
81 | ax[0].legend(loc='upper right')
82 | ax[0].grid(True)
83 | ax[0].set_ylabel("Position (m)")
84 |
85 | ax[1].plot(time_exp, v_est, 'k', label='$v_{\mathrm{est}}$')
86 | #ax[1].plot(time_exp, v_hat_np, 'r', label='$v_{\mathrm{sim}}$')
87 | ax[1].grid(True)
88 | ax[1].legend(loc='upper right')
89 | ax[1].set_ylabel("Velocity (m/s)")
90 |
91 | ax[2].plot(time_exp, u_in, 'k*', label='$u_{in}$')
92 | ax[2].set_xlabel("Time (s)")
93 | ax[2].set_ylabel("Input (V)")
94 | ax[2].grid(True)
95 | ax[2].set_xlabel("Time (s)")
96 | plt.show()
97 |
98 | # In[Metrics]
99 | e_rms = dynonet.metrics.error_rmse(y_meas, y_hat)[0]
100 | fit_idx = dynonet.metrics.fit_index(y_meas, y_hat)[0]
101 | r_sq = dynonet.metrics.r_squared(y_meas, y_hat)[0]
102 |
103 | print(f"RMSE: {e_rms:.2E} mm\nFIT: {fit_idx:.1f}%\nR_sq: {r_sq:.2f}")
104 |
105 | # In[Plot for paper]
106 | t_test_start = 5900
107 | len_plot = 400
108 |
109 | plt.figure(figsize=(4, 3))
110 | plt.plot(time_exp, y_meas, 'k', label='$\mathbf{y}^{\mathrm{meas}}$')
111 | plt.plot(time_exp, y_hat, 'b', label='$\mathbf{y}$')
112 | plt.plot(time_exp, y_meas - y_hat, 'r', label='$\mathbf{e}$')
113 | plt.legend(loc='upper right')
114 | plt.grid(True)
115 | plt.ylabel("Position (m)")
116 | plt.xlabel("Time (s)")
117 | plt.tight_layout()
118 | plt.show()
119 | # plt.savefig('EMPS_timetrace.pdf')
120 |
121 |
--------------------------------------------------------------------------------
/examples/EMPS/EMPS_timetrace.pdf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/forgi86/dynonet/56699401b141065a45403a18977333e0ec8d7303/examples/EMPS/EMPS_timetrace.pdf
--------------------------------------------------------------------------------
/examples/EMPS/EMPS_train.py:
--------------------------------------------------------------------------------
1 | import matplotlib
2 | matplotlib.use("TkAgg")
3 | import os
4 | import numpy as np
5 | import scipy as sp
6 | import scipy.io
7 | import torch
8 | import time
9 | import matplotlib.pyplot as plt
10 | from dynonet.lti import MimoLinearDynamicalOperator, SisoLinearDynamicalOperator
11 | from dynonet.static import MimoStaticNonLinearity, MimoStaticNonLinearity
12 |
13 | if __name__ == '__main__':
14 |
15 | # In[Set seed for reproducibility]
16 | np.random.seed(0)
17 | torch.manual_seed(0)
18 |
19 | # In[]
20 | lr = 1e-4
21 | num_iter = 50000
22 | msg_freq = 100
23 | model_name = 'EMPS_model'
24 |
25 | # In[Load dataset]
26 | emps_data = sp.io.loadmat(os.path.join("data", "DATA_EMPS.mat"))
27 | y_ref = emps_data['qg'].astype(np.float32)
28 | y_meas = emps_data['qm'].astype(np.float32)
29 | u_in = emps_data['vir'].astype(np.float32)
30 | time_exp = emps_data['t'].astype(np.float32)
31 | # d_N = emps_data['pulses_N']
32 | ts = np.mean(np.diff(time_exp.ravel())) #time_exp[1] - time_exp[0]
33 |
34 | v_est = np.diff(y_meas, axis=0) / ts
35 | v_est = np.r_[[[0]], v_est]
36 |
37 |
38 | # In[Instantiate models]
39 |
40 | # Model blocks
41 | G1 = MimoLinearDynamicalOperator(1, 10, n_b=2, n_a=2, n_k=1)
42 | # Static sandwitched non-linearity
43 | F1 = MimoStaticNonLinearity(10, 5, activation='tanh')
44 | G2 = MimoLinearDynamicalOperator(5, 1, n_b=2, n_a=2, n_k=0)
45 |
46 | # Model structure
47 | def model(u_in):
48 | y_lin_1 = G1(u_in)
49 | v_hat = F1(y_lin_1)
50 | v_hat = G2(v_hat)
51 | y_hat = torch.cumsum(v_hat, dim=1) * ts
52 | return y_hat, v_hat
53 |
54 | # In[Optimizer]
55 | optimizer = torch.optim.Adam([
56 | {'params': G1.parameters(), 'lr': lr},
57 | {'params': F1.parameters(), 'lr': lr},
58 | {'params': G2.parameters(), 'lr': lr},
59 | ], lr=lr)
60 |
61 | # In[Prepare tensors]
62 |
63 | #q_meas = (q_meas - 1.23)/0.08
64 | u_fit_torch = torch.tensor(u_in[None, :, :])
65 | y_fit_torch = torch.tensor(y_meas[None, :, :])
66 |
67 | # In[Train]
68 | LOSS = []
69 | start_time = time.time()
70 | for itr in range(0, num_iter):
71 |
72 | optimizer.zero_grad()
73 |
74 | y_hat, v_hat = model(u_fit_torch)
75 |
76 | err_fit = y_fit_torch - y_hat
77 | loss = torch.mean(err_fit ** 2) * 10
78 |
79 | LOSS.append(loss.item())
80 | if itr % msg_freq == 0:
81 | print(f'Iter {itr} | Fit Loss {loss:.6f}')
82 |
83 | loss.backward()
84 | optimizer.step()
85 |
86 | train_time = time.time() - start_time
87 |
88 | print(f"\nTrain time: {train_time:.2f}") # 1900 seconds, loss was still going down
89 |
90 | # In[Save model]
91 |
92 | if model_name is not None:
93 | model_folder = os.path.join("models", model_name)
94 | if not os.path.exists(model_folder):
95 | os.makedirs(model_folder)
96 |
97 | torch.save(G1.state_dict(), os.path.join(model_folder, "G1.pkl"))
98 | torch.save(F1.state_dict(), os.path.join(model_folder, "F1.pkl"))
99 | torch.save(G2.state_dict(), os.path.join(model_folder, "G2.pkl"))
100 |
101 | # In[Detach]
102 | y_hat_np = y_hat.detach().numpy()[0, :, 0]
103 | v_hat_np = v_hat.detach().numpy()[0, :, 0]
104 |
105 | # In[Plot loss]
106 | fig, ax = plt.subplots(figsize=(6, 7.5))
107 | ax.plot(LOSS)
108 |
109 | # In[Plot]
110 | # Simulation plot
111 | fig, ax = plt.subplots(3, 1, sharex=True, figsize=(6, 7.5))
112 | ax[0].plot(time_exp, y_meas, 'k', label='$q_{\mathrm{meas}}$')
113 | ax[0].plot(time_exp, y_hat_np, 'r', label='$q_{\mathrm{sim}}$')
114 | ax[0].legend(loc='upper right')
115 | ax[0].grid(True)
116 | ax[0].set_ylabel("Position (m)")
117 |
118 | ax[1].plot(time_exp, v_est, 'k', label='$v_{\mathrm{est}}$')
119 | #ax[1].plot(time_exp, v_hat_np, 'r', label='$v_{\mathrm{sim}}$')
120 | ax[1].grid(True)
121 | ax[1].legend(loc='upper right')
122 | ax[1].set_ylabel("Velocity (m/s)")
123 |
124 | ax[2].plot(time_exp, u_in, 'k*', label='$u_{in}$')
125 | ax[2].set_xlabel("Time (s)")
126 | ax[2].set_ylabel("Input (V)")
127 | ax[2].grid(True)
128 | ax[2].set_xlabel("Time (s)")
129 | plt.show()
--------------------------------------------------------------------------------
/examples/EMPS/README.md:
--------------------------------------------------------------------------------
1 | # EMPS Example
2 |
3 | Example of an electromechanical positioning system, which can drive a prismatic joint of robots or machine tools. Steps to run:
4 |
5 | 1. Obtain the data, which includes a description, as directed in [data/README.txt](data/README.txt).
6 | 2. ```python EMPS_train.py```
7 | 3. ```python EMPS_test.py```
8 | 4. ```python EMPS_plot.py```
9 |
10 |
11 |
--------------------------------------------------------------------------------
/examples/EMPS/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/forgi86/dynonet/56699401b141065a45403a18977333e0ec8d7303/examples/EMPS/__init__.py
--------------------------------------------------------------------------------
/examples/EMPS/data/README.txt:
--------------------------------------------------------------------------------
1 | Copy in this folder the files DATA_EMPS.mat and DATA_EMPS_PULSES.mat contained in the zip file:
2 | http://nonlinearbenchmark.org/FILES/BENCHMARKS/EMPS/EMPS.zip
--------------------------------------------------------------------------------
/examples/ParWH/.gitignore:
--------------------------------------------------------------------------------
1 | models
2 |
--------------------------------------------------------------------------------
/examples/ParWH/README.md:
--------------------------------------------------------------------------------
1 | # ParWH Example
2 |
3 | Steps to run:
4 |
5 | 1. Obtain the data as directed in [data/README.txt](data/README.txt).
6 | 2. ```python parWH_train.py```
7 | 3. ```python parWH_train_refine.py```
8 | 4. ```python parWH_train_single_dataset.py```
9 | 5. ```python parWH_test.py```
10 | 6. ```python parWH_plot_test.py```
11 | 7. ```python parWH_plot_id.py```
12 |
13 |
14 |
--------------------------------------------------------------------------------
/examples/ParWH/common.py:
--------------------------------------------------------------------------------
1 | import torch
2 | import torch.nn as nn
3 |
4 |
5 | class ParallelWHDataset(torch.utils.data.Dataset):
6 | """Face Landmarks dataset."""
7 |
8 | def __init__(self, data):
9 | """
10 | Args:
11 | data (torch.Tensor): Tensor with data organized in.
12 | """
13 | self.data = torch.tensor(data)
14 | self.n_amp, self.n_real, self.seq_len, self.n_channels = data.shape
15 | self.len = self.n_amp * self.n_real
16 | self._data = self.data.view(self.n_amp * self.n_real, self.seq_len, self.n_channels)
17 |
18 | def __len__(self):
19 | return self.len
20 |
21 | def __getitem__(self, idx):
22 | return self._data[idx, :, [0]], self._data[idx, :, [1]]
23 |
24 |
25 | class StaticNonLin(nn.Module):
26 |
27 | def __init__(self):
28 | super(StaticNonLin, self).__init__()
29 |
30 | self.net_1 = nn.Sequential(
31 | nn.Linear(1, 20), # 2 states, 1 input
32 | nn.ReLU(),
33 | nn.Linear(20, 1)
34 | )
35 |
36 | self.net_2 = nn.Sequential(
37 | nn.Linear(1, 20), # 2 states, 1 input
38 | nn.ReLU(),
39 | nn.Linear(20, 1)
40 | )
41 |
42 | def forward(self, u_lin):
43 |
44 | y_nl_1 = self.net_1(u_lin[..., [0]]) # Process blocks individually
45 | y_nl_2 = self.net_2(u_lin[..., [1]]) # Process blocks individually
46 | y_nl = torch.cat((y_nl_1, y_nl_2), dim=-1)
47 |
48 | return y_nl
49 |
50 |
51 | class StaticMimoNonLin(nn.Module):
52 |
53 | def __init__(self):
54 | super(StaticMimoNonLin, self).__init__()
55 |
56 | self.net = nn.Sequential(
57 | nn.Linear(2, 20), # 2 states, 1 input
58 | nn.ReLU(),
59 | nn.Linear(20, 2)
60 | )
61 |
62 | def forward(self, u_lin):
63 |
64 | y_nl = self.net(u_lin) # Process blocks individually
65 | return y_nl
--------------------------------------------------------------------------------
/examples/ParWH/data/README.txt:
--------------------------------------------------------------------------------
1 | Copy in this folder all .csv files from the .zip file:
2 |
3 | http://www.nonlinearbenchmark.org/FILES/BENCHMARKS/PARWH/ParWHFiles.zip
4 |
--------------------------------------------------------------------------------
/examples/ParWH/parWH_plot_id.py:
--------------------------------------------------------------------------------
1 | import pandas as pd
2 | import numpy as np
3 | import os
4 | import matplotlib.pyplot as plt
5 |
6 | if __name__ == '__main__':
7 |
8 | N = 16384 # number of samples per period
9 | M = 20 # number of random phase multisine realizations
10 | P = 2 # number of periods
11 | nAmp = 5 # number of different amplitudes
12 |
13 | # Column names in the dataset
14 | COL_F = ['fs']
15 | TAG_U = 'u'
16 | TAG_Y = 'y'
17 |
18 | # Load dataset
19 | #df_X = pd.read_csv(os.path.join("data", "WH_CombinedZeroMultisineSinesweep.csv"))
20 | df_X = pd.read_csv(os.path.join("data", "ParWHData_Estimation_Level2.csv"))
21 | df_X.columns = ['amplitude', 'fs', 'lines'] + [TAG_U + str(i) for i in range(M)] + [TAG_Y + str(i) for i in range(M)] + ['?']
22 |
23 | # Extract data
24 | y = np.array(df_X['y0'], dtype=np.float32)
25 | u = np.array(df_X['u0'], dtype=np.float32)
26 | fs = np.array(df_X[COL_F].iloc[0], dtype = np.float32)
27 | N = y.size
28 | ts = 1/fs
29 | t = np.arange(N)*ts
30 |
31 |
32 | # In[Plot]
33 | fig, ax = plt.subplots(2, 1, sharex=True)
34 | ax[0].plot(t, y, 'k', label="$y$")
35 | ax[0].legend()
36 | ax[0].grid()
37 |
38 | ax[1].plot(t, u, 'k', label="$u$")
39 | ax[1].legend()
40 | ax[1].grid()
41 | plt.show()
42 |
43 |
--------------------------------------------------------------------------------
/examples/ParWH/parWH_plot_test.py:
--------------------------------------------------------------------------------
1 | import pandas as pd
2 | import numpy as np
3 | import os
4 | import matplotlib.pyplot as plt
5 |
6 | if __name__ == '__main__':
7 |
8 | N = 16384 # number of samples per period
9 | #M = 20 # number of random phase multisine realizations
10 | P = 2 # number of periods
11 | nAmp = 5 # number of different amplitudes
12 |
13 | # Column names in the dataset
14 | COL_F = ['fs']
15 | TAG_U = 'u'
16 | TAG_Y = 'y'
17 |
18 | # Load dataset
19 |
20 | #df_X = pd.read_csv(os.path.join("data", "ParWHData_Validation_Level1.csv"))
21 | df_X = pd.read_csv(os.path.join("data", "ParWHData_ValidationArrow.csv"))
22 |
23 | #df_X.columns = ['amplitude', 'fs', 'lines'] + [TAG_U + str(i) for i in range(M)] + [TAG_Y + str(i) for i in range(M)] + ['?']
24 |
25 | # Extract data
26 | y = np.array(df_X['y'], dtype=np.float32)
27 | u = np.array(df_X['u'], dtype=np.float32)
28 | fs = np.array(df_X[COL_F].iloc[0], dtype = np.float32)
29 | N = y.size
30 | ts = 1/fs
31 | t = np.arange(N)*ts
32 |
33 |
34 | # In[Plot]
35 | fig, ax = plt.subplots(2, 1, sharex=True)
36 | ax[0].plot(t, y, 'k', label="$y$")
37 | ax[0].legend()
38 | ax[0].grid()
39 |
40 | ax[1].plot(t, u, 'k', label="$u$")
41 | ax[1].legend()
42 | ax[1].grid()
43 | plt.show()
44 |
45 |
--------------------------------------------------------------------------------
/examples/ParWH/parWH_train_single_dataset.py:
--------------------------------------------------------------------------------
1 | import pandas as pd
2 | import numpy as np
3 | import os
4 | import torch
5 | from dynonet.lti import MimoLinearDynamicalOperator
6 | import torch.nn as nn
7 |
8 | import matplotlib.pyplot as plt
9 | import time
10 | import dynonet.metrics
11 |
12 | class StaticNonLin(nn.Module):
13 |
14 | def __init__(self):
15 | super(StaticNonLin, self).__init__()
16 |
17 | self.net_1 = nn.Sequential(
18 | nn.Linear(1, 10), # 2 states, 1 input
19 | nn.ReLU(),
20 | nn.Linear(10, 1)
21 | )
22 |
23 | self.net_2 = nn.Sequential(
24 | nn.Linear(1, 10), # 2 states, 1 input
25 | nn.ReLU(),
26 | nn.Linear(10, 1)
27 | )
28 |
29 | def forward(self, u_lin):
30 |
31 | y_nl_1 = self.net_1(u_lin[..., [0]]) # Process blocks individually
32 | y_nl_2 = self.net_2(u_lin[..., [1]]) # Process blocks individually
33 | y_nl = torch.cat((y_nl_1, y_nl_2), dim=-1)
34 |
35 | return y_nl
36 |
37 |
38 | if __name__ == '__main__':
39 |
40 | lr = 1e-4
41 | num_iter = 100000
42 | test_freq = 100
43 | n_batch = 1
44 |
45 | N = 16384 # number of samples per period
46 | M = 20 # number of random phase multisine realizations
47 | P = 2 # number of periods
48 | nAmp = 5 # number of different amplitudes
49 |
50 | # Column names in the dataset
51 | COL_F = ['fs']
52 | TAG_U = 'u'
53 | TAG_Y = 'y'
54 |
55 | # Load dataset
56 | #df_X = pd.read_csv(os.path.join("data", "WH_CombinedZeroMultisineSinesweep.csv"))
57 | df_X = pd.read_csv(os.path.join("data", "ParWHData_Estimation_Level1.csv"))
58 | df_X.columns = ['amplitude', 'fs', 'lines'] + [TAG_U + str(i) for i in range(M)] + [TAG_Y + str(i) for i in range(M)] + ['?']
59 |
60 | # Extract data
61 | y = np.array(df_X['y0'], dtype=np.float32)
62 | u = np.array(df_X['u0'], dtype=np.float32)
63 | fs = np.array(df_X[COL_F].iloc[0], dtype = np.float32)
64 | N = y.size
65 | ts = 1/fs
66 | t = np.arange(N)*ts
67 |
68 | u_torch = torch.tensor(u[None, :, None], dtype=torch.float, requires_grad=False)
69 | y_meas_torch = torch.tensor(y[None, :, None], dtype=torch.float, requires_grad=False)
70 |
71 | # In[Set-up model]
72 |
73 | # First linear section
74 | in_channels_1 = 1
75 | out_channels_1 = 2
76 | nb_1 = 3
77 | na_1 = 3
78 | y0_1 = torch.zeros((n_batch, na_1), dtype=torch.float)
79 | u0_1 = torch.zeros((n_batch, nb_1), dtype=torch.float)
80 | G1 = MimoLinearDynamicalOperator(in_channels_1, out_channels_1, nb_1, na_1)
81 |
82 | # Non-linear section
83 | F_nl = StaticNonLin()
84 |
85 | # Second linear section
86 | in_channels_2 = 2
87 | out_channels_2 = 1
88 | nb_2 = 3
89 | na_2 = 3
90 | y0_2 = torch.zeros((n_batch, na_2), dtype=torch.float)
91 | u0_2 = torch.zeros((n_batch, nb_2), dtype=torch.float)
92 | G2 = MimoLinearDynamicalOperator(in_channels_2, out_channels_2, nb_2, na_2)
93 |
94 | # In[Initialize linear systems]
95 | with torch.no_grad():
96 | G1.a_coeff[:, :, 0] = -0.9
97 | G1.b_coeff[:, :, 0] = 0.1
98 | G1.b_coeff[:, :, 1] = 0.1
99 |
100 | G2.a_coeff[:, :, 0] = -0.9
101 | G2.b_coeff[:, :, 0] = 0.1
102 | G1.b_coeff[:, :, 1] = 0.1
103 |
104 | # In[Setup optimizer]
105 | optimizer = torch.optim.Adam([
106 | {'params': G1.parameters(), 'lr': lr},
107 | {'params': F_nl.parameters(), 'lr': lr},
108 | {'params': G2.parameters(), 'lr': lr},
109 | ], lr=lr)
110 |
111 | # In[Training loop]
112 | LOSS = []
113 | start_time = time.time()
114 | for itr in range(0, num_iter):
115 |
116 | optimizer.zero_grad()
117 |
118 | # Simulate
119 | y_lin_1 = G1(u_torch, y0_1, u0_1)
120 | y_nl_1 = F_nl(y_lin_1)
121 | y_lin_2 = G2(y_nl_1, y0_2, u0_2)
122 |
123 | y_hat = y_lin_2
124 |
125 | # Compute fit loss
126 | err_fit = y_meas_torch - y_hat
127 | loss_fit = torch.mean(err_fit**2)
128 | loss = loss_fit
129 |
130 | LOSS.append(loss.item())
131 | if itr % test_freq == 0:
132 | print(f'Iter {itr} | Fit Loss {loss_fit:.8f}')
133 |
134 | # Optimize
135 | loss.backward()
136 | optimizer.step()
137 |
138 | train_time = time.time() - start_time
139 | print(f"\nTrain time: {train_time:.2f}") # 182 seconds
140 |
141 |
142 | # In[Save model]
143 | if not os.path.exists("models"):
144 | os.makedirs("models")
145 | model_filename = "model_WH"
146 |
147 | torch.save(G1.state_dict(), os.path.join("models", f"{model_filename}_G1.pkl"))
148 | torch.save(F_nl.state_dict(), os.path.join("models", f"{model_filename}_F_nl.pkl"))
149 | torch.save(G2.state_dict(), os.path.join("models", f"{model_filename}_G2.pkl"))
150 |
151 |
152 | # In[detach]
153 | y_hat_np = y_hat.detach().numpy()[0, :, 0]
154 |
155 | # In[Plot]
156 | fig, ax = plt.subplots(2, 1, sharex=True)
157 | ax[0].plot(t, y, 'k', label="$y$")
158 | ax[0].plot(t, y_hat_np, 'r', label="$y$")
159 |
160 | ax[0].legend()
161 | ax[0].grid()
162 |
163 | ax[1].plot(t, u, 'k', label="$u$")
164 | ax[1].legend()
165 | ax[1].grid()
166 | plt.show()
167 |
168 | plt.figure()
169 | plt.plot(LOSS)
170 |
171 |
172 | # In[Metrics]
173 |
174 | idx_metric = range(0, N)
175 | e_rms = dynonet.metrics.error_rmse(y[idx_metric], y_hat_np[idx_metric])
176 | fit_idx = dynonet.metrics.fit_index(y[idx_metric], y_hat_np[idx_metric])
177 | r_sq = dynonet.metrics.r_squared(y[idx_metric], y_hat_np[idx_metric])
178 |
179 | print(f"RMSE: {e_rms:.4f}V\nFIT: {fit_idx:.1f}%\nR_sq: {r_sq:.1f}")
--------------------------------------------------------------------------------
/examples/RLC/README.md:
--------------------------------------------------------------------------------
1 | # RLC Example
2 |
3 | Linear dynamic example with RLC circuit. Steps to run:
4 |
5 | 1. ```python RLC_generate_test.py```
6 | 2. ```python RLC_generate_id.py```
7 | 3. ```python RLC_train.py```
8 |
9 |
10 |
--------------------------------------------------------------------------------
/examples/RLC/RLC_MIMO_wiener_fit.py:
--------------------------------------------------------------------------------
1 | import torch
2 | import pandas as pd
3 | import numpy as np
4 | import os
5 | from dynonet.lti import MimoLinearDynamicalOperator
6 | import matplotlib.pyplot as plt
7 | import time
8 | import torch.nn as nn
9 |
10 |
11 | class StaticNonLin(nn.Module):
12 |
13 | def __init__(self):
14 | super(StaticNonLin, self).__init__()
15 |
16 | self.net = nn.Sequential(
17 | nn.Linear(1, 20), # 2 states, 1 input
18 | nn.ReLU(),
19 | nn.Linear(20, 1)
20 | )
21 |
22 | def forward(self, u_lin):
23 | #u_lin = torch.transpose(u_lin, (-2), (-1))
24 | y_nl = u_lin + self.net(u_lin)
25 | #y_nl = torch.transpose(y_nl, (-2), (-1))
26 | return y_nl
27 |
28 |
29 | if __name__ == '__main__':
30 |
31 | # Set seed for reproducibility
32 | np.random.seed(0)
33 | torch.manual_seed(0)
34 |
35 | # Settings
36 | add_noise = True
37 | lr = 1e-4
38 | num_iter = 40000
39 | test_freq = 100
40 | n_batch = 1
41 | in_channels = 1
42 | out_channels = 1
43 | n_b = 2
44 | n_a = 2
45 |
46 | # Column names in the dataset
47 | COL_T = 'time'
48 | COL_X = ['V_C', 'I_L']
49 | COL_U = 'V_IN'
50 | COL_Y = 'V_C'
51 |
52 | # Load dataset
53 | df_X = pd.read_csv(os.path.join("data", "RLC_data_id_nl.csv"))
54 | t = np.array(df_X[COL_T], dtype=np.float32)
55 | #y = np.array(df_X[COL_Y], dtype=np.float32)
56 | x = np.array(df_X[COL_X], dtype=np.float32)
57 | u = np.array(df_X[COL_U], dtype=np.float32)
58 |
59 | # scale state
60 | x = x/np.array([100.0, 10.0])
61 |
62 | # Add measurement noise
63 | std_noise_V = add_noise * 0.1
64 | #y_nonoise = np.copy(1 + x[:, [0]] + x[:, [0]]**2)
65 | y_nonoise = np.copy(1 + x[:, 0] ** 3)
66 | y_noise = y_nonoise + np.random.randn(*y_nonoise.shape) * std_noise_V
67 |
68 |
69 | # Prepare data
70 | u_torch = torch.tensor(u[None, :, None], dtype=torch.float, requires_grad=False) # B, C, T
71 | y_meas_torch = torch.tensor(y_noise[None, :, None], dtype=torch.float)
72 | y_true_torch = torch.tensor(y_nonoise[None, :, None], dtype=torch.float)
73 | y_0 = torch.zeros((n_batch, n_a), dtype=torch.float)
74 | u_0 = torch.zeros((n_batch, n_b), dtype=torch.float)
75 |
76 |
77 | G = MimoLinearDynamicalOperator(in_channels, out_channels, n_b, n_a)
78 | nn_static = StaticNonLin()
79 |
80 | # Setup optimizer
81 | params_lin = G.parameters()
82 | optimizer = torch.optim.Adam([
83 | {'params': params_lin, 'lr': lr},
84 | {'params': nn_static.parameters(), 'lr': lr}
85 | ], lr=lr)
86 |
87 |
88 | # In[Train]
89 | LOSS = []
90 | start_time = time.time()
91 | for itr in range(0, num_iter):
92 |
93 | optimizer.zero_grad()
94 |
95 | # Simulate
96 | y_lin = G(u_torch)#, y_0, u_0)
97 | y_hat = nn_static(y_lin)
98 | y_hat = y_hat
99 |
100 | # Compute fit loss
101 | err_fit = y_meas_torch - y_hat
102 | loss_fit = torch.mean(err_fit**2)
103 | loss = loss_fit
104 |
105 | LOSS.append(loss.item())
106 | if itr % test_freq == 0:
107 | print(f'Iter {itr} | Fit Loss {loss_fit:.4f}')
108 |
109 | # Optimize
110 | loss.backward()
111 | optimizer.step()
112 |
113 | train_time = time.time() - start_time
114 | print(f"\nTrain time: {train_time:.2f}") # 182 seconds
115 |
116 | # In[Plot]
117 | plt.figure()
118 | plt.plot(t, y_nonoise, 'k', label="$y$")
119 | plt.plot(t, y_noise, 'r', label="$y_{noise}$")
120 | plt.plot(t, y_hat.detach().numpy()[0, 0], 'b', label="$\hat y$")
121 | plt.legend()
122 |
123 | plt.figure()
124 | plt.plot(LOSS)
125 | plt.grid(True)
126 |
127 | # In[Plot]
128 | plt.figure()
129 | plt.plot(y_lin.detach(), y_hat.detach())
130 |
131 | plt.figure()
132 | plt.plot(x[:, [0]], y_nonoise)
133 |
--------------------------------------------------------------------------------
/examples/RLC/RLC_SIMO_WH.py:
--------------------------------------------------------------------------------
1 | import torch
2 | import pandas as pd
3 | import numpy as np
4 | import os
5 | from dynonet.lti import MimoLinearDynamicalOperator
6 | from dynonet.static import MimoChannelWiseNonLinearity
7 | import matplotlib.pyplot as plt
8 | import time
9 | import torch.nn as nn
10 |
11 |
12 | if __name__ == '__main__':
13 |
14 | # Set seed for reproducibility
15 | np.random.seed(0)
16 | torch.manual_seed(0)
17 |
18 | # Settings
19 | add_noise = True
20 | lr = 1e-3
21 | num_iter = 40000
22 | test_freq = 100
23 | n_batch = 1
24 | n_b = 2
25 | n_a = 2
26 |
27 | # Column names in the dataset
28 | COL_T = ['time']
29 | COL_X = ['V_C', 'I_L']
30 | COL_U = ['V_IN']
31 | COL_Y = ['V_C']
32 |
33 | # Load dataset
34 | df_X = pd.read_csv(os.path.join("data", "RLC_data_id_nl.csv"))
35 | t = np.array(df_X[COL_T], dtype=np.float32)
36 | #y = np.array(df_X[COL_Y], dtype=np.float32)
37 | x = np.array(df_X[COL_X], dtype=np.float32)
38 | u = np.array(df_X[COL_U], dtype=np.float32)
39 |
40 | # scale state
41 | x = x/np.array([100.0, 10.0])
42 |
43 | # Add measurement noise
44 | std_noise_V = add_noise * 0.1
45 | #y_nonoise = np.copy(1 + x[:, [0]] + x[:, [0]]**2)
46 | y_nonoise = np.copy(x[:, [0, 1]]) #np.copy(1 + x[:, [0]] ** 3)
47 | y_noise = y_nonoise + np.random.randn(*y_nonoise.shape) * std_noise_V
48 |
49 | # Prepare data
50 | u_torch = torch.tensor(u[None, :, :], dtype=torch.float, requires_grad=False)
51 | y_meas_torch = torch.tensor(y_noise[None, :, :], dtype=torch.float)
52 | y_true_torch = torch.tensor(y_nonoise[None, :, :], dtype=torch.float)
53 | G1 = MimoLinearDynamicalOperator(in_channels=1, out_channels=2, n_b=n_b, n_a=n_a, n_k=1)
54 | nn_static = MimoChannelWiseNonLinearity(channels=2, n_hidden=10) #StaticChannelWiseNonLin(in_channels=2, out_channels=2, n_hidden=10)
55 | G2 = MimoLinearDynamicalOperator(in_channels=2, out_channels=2, n_b=n_b, n_a=n_a, n_k=1)
56 |
57 | # Setup optimizer
58 |
59 | optimizer = torch.optim.Adam([
60 | {'params': G1.parameters(), 'lr': lr},
61 | {'params': nn_static.parameters(), 'lr': lr},
62 | {'params': G2.parameters(), 'lr': lr},
63 | ], lr=lr)
64 |
65 |
66 | # In[Train]
67 | LOSS = []
68 | start_time = time.time()
69 | for itr in range(0, num_iter):
70 |
71 | optimizer.zero_grad()
72 |
73 | # Simulate
74 | y_lin = G1(u_torch)
75 | y_nl = nn_static(y_lin)
76 | #y_hat = G2(y_nl)
77 | y_hat = y_nl
78 |
79 | # Compute fit loss
80 | err_fit = y_meas_torch - y_hat
81 | loss_fit = torch.mean(err_fit**2)
82 | loss = loss_fit
83 |
84 | LOSS.append(loss.item())
85 | if itr % test_freq == 0:
86 | print(f'Iter {itr} | Fit Loss {loss_fit:.4f}')
87 |
88 | # Optimize
89 | loss.backward()
90 | optimizer.step()
91 |
92 | train_time = time.time() - start_time
93 | print(f"\nTrain time: {train_time:.2f}") # 182 seconds
94 |
95 | # In[Detach]
96 | y_hat = y_hat.detach().numpy()[0, :, :]
97 |
98 | # In[Plot]
99 | fig, ax = plt.subplots(2, 1)
100 | ax[0].plot(t, y_nonoise[:, 0], 'k', label="$y$")
101 | ax[0].plot(t, y_noise[:, 0], 'r', label="$y_{noise}$")
102 | ax[0].plot(t, y_hat[:, 0], 'b', label="$\hat y$")
103 | ax[0].legend()
104 | ax[0].grid()
105 |
106 | ax[1].plot(t, y_nonoise[:, 1], 'k', label="$y$")
107 | ax[1].plot(t, y_noise[:, 1], 'r', label="$y_{noise}$")
108 | ax[1].plot(t, y_hat[:, 1], 'b', label="$\hat y$")
109 | ax[1].legend()
110 | ax[1].grid()
111 |
112 | plt.figure()
113 | plt.plot(LOSS)
114 | plt.grid(True)
115 |
116 |
--------------------------------------------------------------------------------
/examples/RLC/RLC_generate_id.py:
--------------------------------------------------------------------------------
1 | from scipy.integrate import solve_ivp
2 | from scipy.interpolate import interp1d
3 | import numpy as np
4 | import matplotlib.pyplot as plt
5 | import control.matlab
6 | import pandas as pd
7 | import os
8 |
9 | from symbolic_RLC import fxu_ODE, fxu_ODE_mod
10 |
11 | if __name__ == '__main__':
12 |
13 | # Set seed for reproducibility
14 | np.random.seed(42)
15 |
16 | # Input characteristics #
17 | len_sim = 5e-3
18 | Ts = 1e-6
19 |
20 | omega_input = 150e3
21 | std_input = 80
22 |
23 | tau_input = 1/omega_input
24 | Hu = control.TransferFunction([1], [1 / omega_input, 1])
25 | Hu = Hu * Hu
26 | Hud = control.matlab.c2d(Hu, Ts)
27 |
28 | N_sim = int(len_sim//Ts)
29 | N_skip = int(20 * tau_input // Ts) # skip initial samples to get a regime sample of d
30 | N_sim_u = N_sim + N_skip
31 | e = np.random.randn(N_sim_u)
32 | te = np.arange(N_sim_u) * Ts
33 | # _, u, _ = control.forced_response(Hu, te, e)
34 | dum, u = control.forced_response(Hu, te, e)
35 | print (dum)
36 | print (u)
37 | u = u[N_skip:]
38 | u = u /np.std(u) * std_input
39 |
40 | t_sim = np.arange(N_sim) * Ts
41 | u_func = interp1d(t_sim, u, kind='zero', fill_value="extrapolate")
42 |
43 |
44 | def f_ODE(t,x):
45 | u = u_func(t).ravel()
46 | return fxu_ODE(t, x, u)
47 |
48 | def f_ODE_mod(t,x):
49 | u = u_func(t).ravel()
50 | return fxu_ODE_mod(t, x, u)
51 |
52 |
53 | x0 = np.zeros(2)
54 | f_ODE(0.0,x0)
55 | t_span = (t_sim[0],t_sim[-1])
56 | y1 = solve_ivp(f_ODE, t_span, x0, t_eval = t_sim)
57 | y2 = solve_ivp(f_ODE_mod, t_span, x0, t_eval = t_sim)
58 |
59 | x1 = y1.y.T
60 | x2 = y2.y.T
61 |
62 | # In[plot]
63 | fig, ax = plt.subplots(3,1, figsize=(10,10), sharex=True)
64 | ax[0].plot(t_sim, x1[:,0],'b')
65 | ax[0].plot(t_sim, x2[:,0],'r')
66 | ax[0].set_xlabel('time (s)')
67 | ax[0].set_ylabel('Capacitor voltage (V)')
68 |
69 | ax[1].plot(t_sim, x1[:,1],'b')
70 | ax[1].plot(t_sim, x2[:,1],'r')
71 | ax[1].set_xlabel('time (s)')
72 | ax[1].set_ylabel('Inductor current (A)')
73 |
74 | ax[2].plot(t_sim, u,'b')
75 | ax[2].set_xlabel('time (s)')
76 | ax[2].set_ylabel('Input voltage (V)')
77 |
78 | ax[0].grid(True)
79 | ax[1].grid(True)
80 | ax[2].grid(True)
81 | plt.show()
82 |
83 | # In[Save]
84 | if not os.path.exists("data"):
85 | os.makedirs("data")
86 |
87 | X = np.hstack((t_sim.reshape(-1, 1), x1, u.reshape(-1, 1), x1[:, 0].reshape(-1, 1)))
88 | COL_T = ['time']
89 | COL_X = ['V_C', 'I_L']
90 | COL_U = ['V_IN']
91 | COL_Y = ['V_C']
92 | COL = COL_T + COL_X + COL_U + COL_Y
93 | df_X = pd.DataFrame(X, columns=COL)
94 | df_X.to_csv(os.path.join("data", "RLC_data_id_lin.csv"), index=False)
95 |
96 |
97 | X = np.hstack((t_sim.reshape(-1, 1), x2, u.reshape(-1, 1), x2[:, 0].reshape(-1, 1)))
98 | COL_T = ['time']
99 | COL_X = ['V_C', 'I_L']
100 | COL_U = ['V_IN']
101 | COL_Y = ['V_C']
102 | COL = COL_T + COL_X + COL_U + COL_Y
103 | df_X = pd.DataFrame(X, columns=COL)
104 | df_X.to_csv(os.path.join("data", "RLC_data_id_nl.csv"), index=False)
105 |
--------------------------------------------------------------------------------
/examples/RLC/RLC_generate_test.py:
--------------------------------------------------------------------------------
1 | from scipy.integrate import solve_ivp
2 | from scipy.interpolate import interp1d
3 | import numpy as np
4 | import matplotlib.pyplot as plt
5 | import control.matlab
6 | import pandas as pd
7 | import os
8 |
9 | from symbolic_RLC import fxu_ODE, fxu_ODE_mod
10 |
11 | if __name__ == '__main__':
12 |
13 | # Set seed for reproducibility
14 | np.random.seed(42)
15 |
16 | # Input characteristics #
17 | len_sim = 5e-3
18 | Ts = 5e-7
19 |
20 | omega_input = 200e3
21 | std_input = 60
22 |
23 | tau_input = 1/omega_input
24 | Hu = control.TransferFunction([1], [1 / omega_input, 1])
25 | Hu = Hu * Hu
26 | Hud = control.matlab.c2d(Hu, Ts)
27 |
28 | N_sim = int(len_sim//Ts)
29 | N_skip = int(20 * tau_input // Ts) # skip initial samples to get a regime sample of d
30 | N_sim_u = N_sim + N_skip
31 | e = np.random.randn(N_sim_u)
32 | te = np.arange(N_sim_u) * Ts
33 | # _, u, _ = control.forced_response(Hu, te, e)
34 | dum, u = control.forced_response(Hu, te, e)
35 | print (dum)
36 | print (u)
37 | u = u[N_skip:]
38 | u = u /np.std(u) * std_input
39 |
40 | t_sim = np.arange(N_sim) * Ts
41 | u_func = interp1d(t_sim, u, kind='zero', fill_value="extrapolate")
42 |
43 |
44 | def f_ODE(t,x):
45 | u = u_func(t).ravel()
46 | return fxu_ODE(t, x, u)
47 |
48 | def f_ODE_mod(t,x):
49 | u = u_func(t).ravel()
50 | return fxu_ODE_mod(t, x, u)
51 |
52 |
53 | x0 = np.zeros(2)
54 | f_ODE(0.0,x0)
55 | t_span = (t_sim[0],t_sim[-1])
56 | y1 = solve_ivp(f_ODE, t_span, x0, t_eval = t_sim)
57 | y2 = solve_ivp(f_ODE_mod, t_span, x0, t_eval = t_sim)
58 |
59 | x1 = y1.y.T
60 | x2 = y2.y.T
61 |
62 | # In[plot]
63 | fig, ax = plt.subplots(3,1, figsize=(8,8), sharex=True)
64 | ax[0].plot(t_sim, x1[:,0],'b')
65 | ax[0].plot(t_sim, x2[:,0],'r')
66 | ax[0].set_xlabel('time (s)')
67 | ax[0].set_ylabel('Capacitor voltage (V)')
68 |
69 | ax[1].plot(t_sim, x1[:,1],'b')
70 | ax[1].plot(t_sim, x2[:,1],'r')
71 | ax[1].set_xlabel('time (s)')
72 | ax[1].set_ylabel('Inductor current (A)')
73 |
74 | ax[2].plot(t_sim, u,'b')
75 | ax[2].set_xlabel('time (s)')
76 | ax[2].set_ylabel('Input voltage (V)')
77 |
78 | ax[0].grid(True)
79 | ax[1].grid(True)
80 | ax[2].grid(True)
81 | plt.show()
82 |
83 | # In[Save]
84 | if not os.path.exists("data"):
85 | os.makedirs("data")
86 |
87 | X = np.hstack((t_sim.reshape(-1, 1), x1, u.reshape(-1, 1), x1[:, 0].reshape(-1, 1)))
88 | COL_T = ['time']
89 | COL_X = ['V_C', 'I_L']
90 | COL_U = ['V_IN']
91 | COL_Y = ['V_C']
92 | COL = COL_T + COL_X + COL_U + COL_Y
93 | df_X = pd.DataFrame(X, columns=COL)
94 | # df_X.to_csv(os.path.join("data", "RLC_data_id_nl.csv"), index=False)
95 |
96 |
97 | X = np.hstack((t_sim.reshape(-1, 1), x2, u.reshape(-1, 1), x2[:, 0].reshape(-1, 1)))
98 | COL_T = ['time']
99 | COL_X = ['V_C', 'I_L']
100 | COL_U = ['V_IN']
101 | COL_Y = ['V_C']
102 | COL = COL_T + COL_X + COL_U + COL_Y
103 | df_X = pd.DataFrame(X, columns=COL)
104 | df_X.to_csv(os.path.join("data", "RLC_data_test.csv"), index=False)
105 |
--------------------------------------------------------------------------------
/examples/RLC/RLC_train.py:
--------------------------------------------------------------------------------
1 | import torch
2 | import pandas as pd
3 | import numpy as np
4 | import os
5 | from dynonet.lti import SisoLinearDynamicalOperator
6 | import matplotlib.pyplot as plt
7 | import time
8 |
9 |
10 | if __name__ == '__main__':
11 |
12 | # In[Set seed for reproducibility]
13 | np.random.seed(0)
14 | torch.manual_seed(0)
15 |
16 | # In[Settings]
17 | model_name = 'IIR'
18 | add_noise = False
19 | lr = 1e-4
20 | num_iter = 20000
21 | test_freq = 100
22 | n_batch = 1
23 | n_b = 2
24 | n_a = 2
25 |
26 | # In[Column names in the dataset]
27 | COL_T = ['time']
28 | COL_X = ['V_C', 'I_L']
29 | COL_U = ['V_IN']
30 | COL_Y = ['V_C']
31 |
32 | # In[Load dataset]
33 | df_X = pd.read_csv(os.path.join("data", "RLC_data_id_lin.csv"))
34 | t = np.array(df_X[COL_T], dtype=np.float32)
35 | y = np.array(df_X[COL_Y], dtype=np.float32)
36 | x = np.array(df_X[COL_X], dtype=np.float32)
37 | u = np.array(df_X[COL_U], dtype=np.float32)
38 |
39 | # In[Add measurement noise]
40 | std_noise_V = add_noise * 10.0
41 | std_noise_I = add_noise * 1.0
42 | std_noise = np.array([std_noise_V, std_noise_I])
43 | x_noise = np.copy(x) + np.random.randn(*x.shape) * std_noise
44 | x_noise = x_noise.astype(np.float32)
45 |
46 | # In[Output]
47 | y_noise = np.copy(x_noise[:, [0]])
48 | y_nonoise = np.copy(x[:, [0]])
49 |
50 |
51 | # Prepare data
52 | u_torch = torch.tensor(u[None, ...], dtype=torch.float, requires_grad=False)
53 | y_meas_torch = torch.tensor(y_noise[None, ...], dtype=torch.float)
54 | y_true_torch = torch.tensor(y_nonoise[None, ...], dtype=torch.float)
55 |
56 | # In[Second-order dynamical system custom defined]
57 | G = SisoLinearDynamicalOperator(n_b, n_a)
58 |
59 | with torch.no_grad():
60 | G.b_coeff[0, 0, 0] = 0.01
61 | G.b_coeff[0, 0, 1] = 0.0
62 |
63 | G.a_coeff[0, 0, 0] = -0.9
64 | G.b_coeff[0, 0, 1] = 0.01
65 |
66 | # In[Setup optimizer]
67 | optimizer = torch.optim.Adam([
68 | {'params': G.parameters(), 'lr': lr},
69 | ], lr=lr)
70 |
71 | # In[Train]
72 | LOSS = []
73 | start_time = time.time()
74 | for itr in range(0, num_iter):
75 |
76 | optimizer.zero_grad()
77 |
78 | # Simulate
79 | y_hat = G(u_torch)
80 |
81 | # Compute fit loss
82 | err_fit = y_meas_torch - y_hat
83 | loss_fit = torch.mean(err_fit**2)
84 | loss = loss_fit
85 |
86 | LOSS.append(loss.item())
87 | if itr % test_freq == 0:
88 | print(f'Iter {itr} | Fit Loss {loss_fit:.4f}')
89 |
90 | # Optimize
91 | loss.backward()
92 | optimizer.step()
93 |
94 | train_time = time.time() - start_time
95 | print(f"\nTrain time: {train_time:.2f}") # 182 seconds
96 |
97 | # In[Save model]
98 |
99 | model_folder = os.path.join("models", model_name)
100 | if not os.path.exists(model_folder):
101 | os.makedirs(model_folder)
102 | torch.save(G.state_dict(), os.path.join(model_folder, "G.pkl"))
103 | # In[Detach and reshape]
104 | y_hat = y_hat.detach().numpy()[0, ...]
105 | # In[Plot]
106 | plt.figure()
107 | plt.plot(t, y_nonoise, 'k', label="$y$")
108 | plt.plot(t, y_noise, 'r', label="$y_{noise}$")
109 | plt.plot(t, y_hat, 'b', label="$\hat y$")
110 | plt.legend()
111 | plt.show()
112 |
113 | plt.figure()
114 | plt.plot(LOSS)
115 | plt.grid(True)
116 | plt.show()
117 |
118 |
119 |
--------------------------------------------------------------------------------
/examples/RLC/RLC_train_FIR.py:
--------------------------------------------------------------------------------
1 | import torch
2 | import pandas as pd
3 | import numpy as np
4 | import os
5 | from dynonet.lti import SisoFirLinearDynamicalOperator
6 | import matplotlib.pyplot as plt
7 | import time
8 |
9 |
10 | if __name__ == '__main__':
11 |
12 | # In[Set seed for reproducibility]
13 | np.random.seed(0)
14 | torch.manual_seed(0)
15 |
16 | # In[Settings]
17 | add_noise = False
18 | lr = 1e-4
19 | num_iter = 20000
20 | test_freq = 100
21 | n_batch = 1
22 | n_b = 256 # number of FIR coefficients
23 |
24 | # In[Column names in the dataset]
25 | COL_T = ['time']
26 | COL_X = ['V_C', 'I_L']
27 | COL_U = ['V_IN']
28 | COL_Y = ['V_C']
29 |
30 | # In[Load dataset]
31 | df_X = pd.read_csv(os.path.join("data", "RLC_data_id_lin.csv"))
32 | t = np.array(df_X[COL_T], dtype=np.float32)
33 | y = np.array(df_X[COL_Y], dtype=np.float32)
34 | x = np.array(df_X[COL_X], dtype=np.float32)
35 | u = np.array(df_X[COL_U], dtype=np.float32)
36 |
37 | # In[Add measurement noise]
38 | std_noise_V = add_noise * 10.0
39 | std_noise_I = add_noise * 1.0
40 | std_noise = np.array([std_noise_V, std_noise_I])
41 | x_noise = np.copy(x) + np.random.randn(*x.shape) * std_noise
42 | x_noise = x_noise.astype(np.float32)
43 |
44 | # In[Output]
45 | y_noise = np.copy(x_noise[:, [0]])
46 | y_nonoise = np.copy(x[:, [0]])
47 |
48 |
49 | # Prepare data
50 | u_torch = torch.tensor(u[None, ...], dtype=torch.float, requires_grad=False)
51 | y_meas_torch = torch.tensor(y_noise[None, ...], dtype=torch.float)
52 | y_true_torch = torch.tensor(y_nonoise[None, ...], dtype=torch.float)
53 |
54 | # In[Second-order dynamical system custom defined]
55 | G = SisoFirLinearDynamicalOperator(n_b)
56 |
57 |
58 | # In[Setup optimizer]
59 | optimizer = torch.optim.Adam([
60 | {'params': G.parameters(), 'lr': lr},
61 | ], lr=lr)
62 |
63 | # In[Train]
64 | LOSS = []
65 | start_time = time.time()
66 | for itr in range(0, num_iter):
67 |
68 | optimizer.zero_grad()
69 |
70 | # Simulate
71 | y_hat = G(u_torch)
72 |
73 | # Compute fit loss
74 | err_fit = y_meas_torch - y_hat
75 | loss_fit = torch.mean(err_fit**2)
76 | loss = loss_fit
77 |
78 | LOSS.append(loss.item())
79 | if itr % test_freq == 0:
80 | print(f'Iter {itr} | Fit Loss {loss_fit:.4f}')
81 |
82 | # Optimize
83 | loss.backward()
84 | optimizer.step()
85 |
86 | train_time = time.time() - start_time
87 | print(f"\nTrain time: {train_time:.2f}") # 182 seconds
88 |
89 | # In[Detach and reshape]
90 | y_hat = y_hat.detach().numpy()[0, ...]
91 |
92 | # In[Plot]
93 | plt.figure()
94 | plt.plot(t, y_nonoise, 'k', label="$y$")
95 | plt.plot(t, y_noise, 'r', label="$y_{noise}$")
96 | plt.plot(t, y_hat, 'b', label="$\hat y$")
97 | plt.legend()
98 |
99 | plt.figure()
100 | plt.plot(LOSS)
101 | plt.grid(True)
102 |
103 |
104 | # In[FIR coefficients]
105 |
106 | g_pars = G.b_coeff[0, 0, :].detach().numpy()
107 | g_pars = g_pars[::-1]
108 | fig, ax = plt.subplots()
109 | ax.plot(g_pars)
110 |
--------------------------------------------------------------------------------
/examples/RLC/RLC_train_process_noise.py:
--------------------------------------------------------------------------------
1 | import torch
2 | import pandas as pd
3 | import numpy as np
4 | import os
5 | import control.matlab
6 | from dynonet.lti import SisoLinearDynamicalOperator
7 | import matplotlib.pyplot as plt
8 | import time
9 |
10 |
11 | if __name__ == '__main__':
12 |
13 | # In[Set seed for reproducibility]
14 | np.random.seed(0)
15 | torch.manual_seed(0)
16 |
17 | # In[Settings]
18 | model_name = 'IIR_proc_noise'
19 | add_noise = False
20 | lr = 1e-4
21 | num_iter = 20000
22 | test_freq = 100
23 | n_batch = 1
24 | n_b = 2
25 | n_a = 2
26 | do_PEM = False
27 |
28 | # In[Column names in the dataset]
29 | COL_T = ['time']
30 | COL_X = ['V_C', 'I_L']
31 | COL_U = ['V_IN']
32 | COL_Y = ['V_C']
33 |
34 | # In[Load dataset]
35 | df_X = pd.read_csv(os.path.join("data", "RLC_data_id_lin.csv"))
36 | t = np.array(df_X[COL_T], dtype=np.float32)
37 | y = np.array(df_X[COL_Y], dtype=np.float32)
38 | x = np.array(df_X[COL_X], dtype=np.float32)
39 | u = np.array(df_X[COL_U], dtype=np.float32)
40 |
41 | # In[Add measurement noise]
42 | #std_noise_V = add_noise * 10.0
43 | #std_noise_I = add_noise * 1.0
44 | #std_noise = np.array([std_noise_V, std_noise_I])
45 | #x_noise = np.copy(x) + np.random.randn(*x.shape) * std_noise
46 | #x_noise = x_noise.astype(np.float32)
47 | # In[Add process noise]
48 |
49 | ts = t[1, 0] - t[0, 0]
50 | n_fit = t.shape[0]
51 |
52 | std_v = 100
53 | w_v = 5e4
54 | tau_v = 1/w_v
55 |
56 | Hu = control.TransferFunction([1], [1 / w_v, 1])
57 | Hu = Hu * Hu
58 | Hud = control.matlab.c2d(Hu, ts)
59 | t_imp = np.arange(1000) * ts
60 | t_imp, y_imp = control.impulse_response(Hud, t_imp)
61 | #y = y[0]
62 | std_tmp = np.sqrt(np.sum(y_imp ** 2)) # np.sqrt(trapz(y**2,t))
63 | Hud = Hud / std_tmp * std_v
64 |
65 | n_skip_d = 0
66 | N_sim_d = n_fit + n_skip_d
67 | e = np.random.randn(N_sim_d)
68 | te = np.arange(N_sim_d) * ts
69 | _, d, _ = control.forced_response(Hud, te, e)
70 | d_fast = d[n_skip_d:]
71 | d_fast = d_fast.reshape(-1, 1)
72 | y_nonoise = np.copy(y)
73 | y_noise = y + d_fast
74 |
75 | # Prepare data
76 | u_torch = torch.tensor(u[None, ...], dtype=torch.float, requires_grad=False)
77 | y_meas_torch = torch.tensor(y_noise[None, ...], dtype=torch.float)
78 | y_true_torch = torch.tensor(y_nonoise[None, ...], dtype=torch.float)
79 |
80 | # In[Second-order dynamical system custom defined]
81 | G = SisoLinearDynamicalOperator(n_b, n_a)
82 | H_inv = SisoLinearDynamicalOperator(2, 2, n_k=1)
83 |
84 | with torch.no_grad():
85 | G.b_coeff[0, 0, 0] = 0.01
86 | G.b_coeff[0, 0, 1] = 0.0
87 |
88 | G.a_coeff[0, 0, 0] = -0.9
89 | G.b_coeff[0, 0, 1] = 0.01
90 |
91 | # In[Setup optimizer]
92 | optimizer = torch.optim.Adam([
93 | {'params': G.parameters(), 'lr': lr},
94 | {'params': H_inv.parameters(), 'lr': lr},
95 | ], lr=lr)
96 |
97 | # In[Train]
98 | LOSS = []
99 | start_time = time.time()
100 | for itr in range(0, num_iter):
101 |
102 | optimizer.zero_grad()
103 |
104 | # Simulate
105 | y_hat = G(u_torch)
106 |
107 | # Compute fit loss
108 | err_fit_v = y_meas_torch - y_hat
109 |
110 | if do_PEM:
111 | err_fit_e = err_fit_v + H_inv(err_fit_v)
112 | err_fit = err_fit_e
113 | else:
114 | err_fit = err_fit_v
115 |
116 | loss_fit = torch.mean(err_fit**2)
117 | loss = loss_fit
118 |
119 | LOSS.append(loss.item())
120 | if itr % test_freq == 0:
121 | print(f'Iter {itr} | Fit Loss {loss_fit:.4f}')
122 |
123 | # Optimize
124 | loss.backward()
125 | optimizer.step()
126 |
127 | train_time = time.time() - start_time
128 | print(f"\nTrain time: {train_time:.2f}") # 182 seconds
129 |
130 | # In[Save model]
131 |
132 | model_folder = os.path.join("models", model_name)
133 | if not os.path.exists(model_folder):
134 | os.makedirs(model_folder)
135 | torch.save(G.state_dict(), os.path.join(model_folder, "G.pkl"))
136 | # In[Detach and reshape]
137 | y_hat = y_hat.detach().numpy()[0, ...]
138 | # In[Plot]
139 | plt.figure()
140 | plt.plot(t, y_nonoise, 'k', label="$y$")
141 | plt.plot(t, y_noise, 'r', label="$y_{noise}$")
142 | plt.plot(t, y_hat, 'b', label="$\hat y$")
143 | plt.legend()
144 |
145 | plt.figure()
146 | plt.plot(LOSS)
147 | plt.grid(True)
148 |
149 |
150 |
--------------------------------------------------------------------------------
/examples/RLC/RLC_wiener_fit.py:
--------------------------------------------------------------------------------
1 | import torch
2 | import pandas as pd
3 | import numpy as np
4 | import os
5 | from dynonet.lti import SisoLinearDynamicalOperator
6 | from dynonet.static import SisoStaticNonLinearity
7 | import matplotlib.pyplot as plt
8 | import time
9 | import torch.nn as nn
10 |
11 |
12 | # class StaticNonLin(nn.Module):
13 | #
14 | # def __init__(self):
15 | # super(StaticNonLin, self).__init__()
16 | #
17 | # self.net = nn.Sequential(
18 | # nn.Linear(1, 20), # 2 states, 1 input
19 | # nn.ReLU(),
20 | # nn.Linear(20, 1)
21 | # )
22 | #
23 | # def forward(self, y_lin):
24 | # y_nonlin = y_lin + self.net(y_lin)
25 | # return y_nonlin
26 |
27 |
28 | if __name__ == '__main__':
29 |
30 | # Set seed for reproducibility
31 | np.random.seed(0)
32 | torch.manual_seed(0)
33 |
34 | # Settings
35 | add_noise = True
36 | lr = 1e-4
37 | num_iter = 40000
38 | test_freq = 100
39 | n_batch = 1
40 | n_b = 2
41 | n_a = 2
42 |
43 | # Column names in the dataset
44 | COL_T = ['time']
45 | COL_X = ['V_C', 'I_L']
46 | COL_U = ['V_IN']
47 | COL_Y = ['V_C']
48 |
49 | # Load dataset
50 | df_X = pd.read_csv(os.path.join("data", "RLC_data_id_nl.csv"))
51 | t = np.array(df_X[COL_T], dtype=np.float32)
52 | #y = np.array(df_X[COL_Y], dtype=np.float32)
53 | x = np.array(df_X[COL_X], dtype=np.float32)
54 | u = np.array(df_X[COL_U], dtype=np.float32)
55 |
56 | # scale state
57 | x = x/np.array([100.0, 10.0])
58 |
59 | # Add measurement noise
60 | std_noise_V = add_noise * 0.1
61 | #y_nonoise = np.copy(1 + x[:, [0]] + x[:, [0]]**2)
62 | y_nonoise = np.copy(x[:, [0, 1]]) #np.copy(1 + x[:, [0]] ** 3)
63 | y_noise = y_nonoise + np.random.randn(*y_nonoise.shape) * std_noise_V
64 |
65 | # Prepare data
66 | u_torch = torch.tensor(u[None, :, :], dtype=torch.float, requires_grad=False)
67 | y_meas_torch = torch.tensor(y_noise[None, :, :], dtype=torch.float)
68 | y_true_torch = torch.tensor(y_nonoise[None, :, :], dtype=torch.float)
69 | G = SisoLinearDynamicalOperator(n_b, n_a, n_k=1)
70 | nn_static = SisoStaticNonLinearity()
71 |
72 | # Setup optimizer
73 | params_lin = G.parameters()
74 | optimizer = torch.optim.Adam([
75 | {'params': params_lin, 'lr': lr},
76 | {'params': nn_static.parameters(), 'lr': lr}
77 | ], lr=lr)
78 |
79 |
80 | # In[Train]
81 | LOSS = []
82 | start_time = time.time()
83 | for itr in range(0, num_iter):
84 |
85 | optimizer.zero_grad()
86 |
87 | # Simulate
88 | y_lin = G(u_torch)
89 | y_hat = nn_static(y_lin)
90 |
91 | # Compute fit loss
92 | err_fit = y_meas_torch - y_hat
93 | loss_fit = torch.mean(err_fit**2)
94 | loss = loss_fit
95 |
96 | LOSS.append(loss.item())
97 | if itr % test_freq == 0:
98 | print(f'Iter {itr} | Fit Loss {loss_fit:.4f}')
99 |
100 | # Optimize
101 | loss.backward()
102 | optimizer.step()
103 |
104 | train_time = time.time() - start_time
105 | print(f"\nTrain time: {train_time:.2f}") # 182 seconds
106 |
107 | # In[Plot]
108 | plt.figure()
109 | plt.plot(t, y_nonoise, 'k', label="$y$")
110 | plt.plot(t, y_noise, 'r', label="$y_{noise}$")
111 | plt.plot(t, y_hat.detach().numpy(), 'b', label="$\hat y$")
112 | plt.legend()
113 |
114 | plt.figure()
115 | plt.plot(LOSS)
116 | plt.grid(True)
117 |
118 | # In[Plot]
119 | plt.figure()
120 | plt.plot(y_lin.detach(), y_hat.detach())
121 |
122 | plt.figure()
123 | plt.plot(x[:, [0]], y_nonoise)
124 |
--------------------------------------------------------------------------------
/examples/RLC/old/RLC_secondorder_fit.py:
--------------------------------------------------------------------------------
1 | import torch
2 | import pandas as pd
3 | import numpy as np
4 | import os
5 | from torchid.old.linearsiso_TB import SecondOrderOscillator
6 | import matplotlib.pyplot as plt
7 | import time
8 |
9 |
10 | if __name__ == '__main__':
11 |
12 | # Set seed for reproducibility
13 | np.random.seed(0)
14 | torch.manual_seed(0)
15 |
16 | # Settings
17 | add_noise = True
18 | lr = 1e-3
19 | num_iter = 5000
20 | test_freq = 100
21 | n_batch = 1
22 | n_b = 2
23 | n_f = 2
24 |
25 | # Column names in the dataset
26 | COL_T = ['time']
27 | COL_X = ['V_C', 'I_L']
28 | COL_U = ['V_IN']
29 | COL_Y = ['V_C']
30 |
31 | # Load dataset
32 | df_X = pd.read_csv(os.path.join("data", "RLC_data_id_nl.csv"))
33 | t = np.array(df_X[COL_T], dtype=np.float32)
34 | y = np.array(df_X[COL_Y], dtype=np.float32)
35 | x = np.array(df_X[COL_X], dtype=np.float32)
36 | u = np.array(df_X[COL_U], dtype=np.float32)
37 |
38 | # Add measurement noise
39 | std_noise_V = add_noise * 10.0
40 | std_noise_I = add_noise * 1.0
41 | std_noise = np.array([std_noise_V, std_noise_I])
42 | x_noise = np.copy(x) + np.random.randn(*x.shape) * std_noise
43 | x_noise = x_noise.astype(np.float32)
44 |
45 | # Output
46 | y_noise = np.copy(x_noise[:, [0]])
47 | y_nonoise = np.copy(x[:, [0]])
48 |
49 |
50 | # Prepare data
51 | u_torch = torch.tensor(u, dtype=torch.float, requires_grad=False)
52 | y_meas_torch = torch.tensor(y_noise, dtype=torch.float)
53 | y_true_torch = torch.tensor(y_nonoise, dtype=torch.float)
54 | y_0 = torch.zeros((n_batch, n_f), dtype=torch.float)
55 | u_0 = torch.zeros((n_batch, n_b), dtype=torch.float)
56 | # coefficients of a 2nd order oscillator
57 | # b_coeff = torch.tensor([0.0706464146944544, 0], dtype=torch.float, requires_grad=True) # b_1, b_2
58 | # f_coeff = torch.tensor([-1.87212998940304, 0.942776404097492], dtype=torch.float, requires_grad=True) # f_1, f_2
59 | b_coeff = torch.tensor([0.01, 0], dtype=torch.float, requires_grad=True) # b_1, b_2
60 | rho = np.array(0.0)
61 | psi = np.array(0.0)
62 | G = SecondOrderOscillator(b_coeff, rho, psi)
63 |
64 | # Setup optimizer
65 | params_net = G.parameters()
66 | optimizer = torch.optim.Adam([
67 | {'params': params_net, 'lr': lr},
68 | ], lr=lr)
69 |
70 | # In[Train]
71 | LOSS = []
72 | start_time = time.time()
73 | for itr in range(0, num_iter):
74 |
75 | optimizer.zero_grad()
76 |
77 | # Simulate
78 | y_hat = G(u_torch, y_0, u_0)
79 |
80 | # Compute fit loss
81 | err_fit = y_meas_torch - y_hat
82 | loss_fit = torch.mean(err_fit**2)
83 | loss = loss_fit
84 |
85 | LOSS.append(loss.item())
86 | if itr % test_freq == 0:
87 | print(f'Iter {itr} | Fit Loss {loss_fit:.4f}')
88 |
89 | # Optimize
90 | loss.backward()
91 | optimizer.step()
92 |
93 | train_time = time.time() - start_time
94 | print(f"\nTrain time: {train_time:.2f}") # 182 seconds
95 |
96 | # In[Plot]
97 | plt.figure()
98 | plt.plot(t, y_nonoise, 'k', label="$y$")
99 | plt.plot(t, y_noise, 'r', label="$y_{noise}$")
100 | plt.plot(t, y_hat.detach().numpy(), 'b', label="$\hat y$")
101 | plt.legend()
102 |
103 | plt.figure()
104 | plt.plot(LOSS)
105 | plt.grid(True)
106 |
107 |
108 |
--------------------------------------------------------------------------------
/examples/RLC/symbolic_RLC.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | # -*- coding: utf-8 -*-
3 | """
4 | Created on Tue Dec 25 12:27:55 2018
5 |
6 | @author: marco
7 | """
8 | import numba as nb
9 | from sympy import symbols, collect, cancel, init_printing, fraction
10 | import numpy as np
11 | from matplotlib import rc
12 | rc('font',**{'family':'sans-serif','sans-serif':['Helvetica']})
13 | rc('text', usetex=True)
14 |
15 | import matplotlib.pyplot as plt
16 | import os
17 | # In[Symbols of the RLC circuit]
18 |
19 | R = symbols('R')
20 | L = symbols('L')
21 | C = symbols('C')
22 | s = symbols('s')
23 |
24 | # In[Impedances]
25 |
26 | ZR = R
27 | ZL = s*L
28 | ZC = 1/(s*C)
29 |
30 | ZRL = ZR + ZL # series R and L
31 |
32 | G1 = 1/(ZRL)
33 |
34 |
35 | G2 = ZC/(ZRL + ZC)
36 | G2sym = 1/(L*C)/(s**2 + R/L*s + 1/(L*C))
37 |
38 |
39 | # In[Impedances]
40 | z = symbols('z')
41 | Td = symbols('Td')
42 |
43 | s_subs = 2/Td * (z-1)/(z+1) # Tustin transform of the laplace variable s
44 |
45 | G2d = G2.subs(s,s_subs)
46 | G2d_simple = collect(cancel(G2d),z)
47 |
48 |
49 | # In[Substitution]
50 | R_val = 3
51 | L_val = 50e-6
52 | C_val = 270e-9
53 | Td_val = 1e-6
54 |
55 |
56 | @nb.jit(["float64(float64)", "float64[:](float64[:])"], nopython=True)
57 | def saturation_formula(current_abs):
58 | sat_ratio = (1/np.pi*np.arctan(-5*(current_abs-5))+0.5)*0.9 + 0.1
59 | return sat_ratio
60 |
61 | @nb.jit("float64[:](float64,float64[:],float64[:])",nopython=True)
62 | def fxu_ODE(t,x,u):
63 | A = np.array([[0.0, 1.0/C_val],
64 | [-1/(L_val), -R_val/L_val]
65 | ])
66 | B = np.array([[0.0], [1.0/(L_val)]])
67 | dx = np.zeros(2, dtype=np.float64)
68 | dx[0] = A[0,0]*x[0] + A[0,1]*x[1] + B[0,0]*u[0]
69 | dx[1] = A[1,0]*x[0] + A[1,1]*x[1] + B[1,0]*u[0]
70 | return dx
71 |
72 | @nb.jit("float64[:](float64,float64[:],float64[:])", nopython=True)
73 | def fxu_ODE_mod(t,x,u):
74 |
75 | I_abs = np.abs(x[1])
76 | L_val_mod = L_val*saturation_formula(I_abs)
77 | R_val_mod = R_val
78 | C_val_mod = C_val
79 |
80 | A = np.array([[0.0, 1.0/C_val_mod],
81 | [-1/(L_val_mod), -R_val_mod/L_val_mod]
82 | ])
83 | B = np.array([[0.0], [1.0/(L_val_mod)]])
84 | dx = np.zeros(2, dtype=np.float64)
85 | dx[0] = A[0,0]*x[0] + A[0,1]*x[1] + B[0,0]*u[0]
86 | dx[1] = A[1,0]*x[0] + A[1,1]*x[1] + B[1,0]*u[0]
87 | #dx = A @ x + B @ u
88 | return dx
89 |
90 |
91 | A_nominal = np.array([[0.0, 1.0/C_val],
92 | [-1/(L_val), -R_val/L_val]
93 | ])
94 |
95 | B_nominal = np.array([[0.0], [1.0/(L_val)]])
96 |
97 | if __name__ == '__main__':
98 |
99 | init_printing(use_unicode=True)
100 |
101 | x = np.zeros(2)
102 | u = np.zeros(1)
103 | dx = fxu_ODE_mod(0.0, x, u)
104 |
105 | sym = [R, L, C, Td]
106 | vals = [R_val, L_val, C_val, Td_val]
107 |
108 | G2d_val = G2d_simple.subs(zip(sym, vals))
109 | G2d_num, G2d_den = fraction(G2d_val)
110 |
111 | # In[Get coefficients]
112 |
113 | num_coeff = G2d_num.collect(z).as_coefficients_dict()
114 | den_coeff = G2d_den.collect(z).as_coefficients_dict()
115 |
116 | G2d_num = G2d_num / den_coeff[z**2] # Monic numerator
117 | G2d_den = G2d_den / den_coeff[z**2] # Monic denominator
118 | G2d_monic = G2d_num/G2d_den # Monic trasnfer function
119 |
120 |
121 | I = np.arange(0.,20.,0.1)
122 |
123 | # Save model
124 | if not os.path.exists("fig"):
125 | os.makedirs("fig")
126 |
127 | fig, ax = plt.subplots(1, 1, sharex=True, figsize=(4, 3))
128 | ax.plot(I, L_val*1e6*saturation_formula(I), 'k')
129 | ax.grid(True)
130 | ax.set_xlabel('Inductor current $i_L$ (A)', fontsize=14)
131 | ax.set_ylabel('Inductance $L$ ($\mu$H)', fontsize=14)
132 | fig.savefig(os.path.join("fig", "RLC_characteristics.pdf"), bbox_inches='tight')
133 |
--------------------------------------------------------------------------------
/examples/Silverbox/README.md:
--------------------------------------------------------------------------------
1 | # Silverbox Example
2 |
3 | Damped harmonic oscillator oscillator. Steps to run:
4 |
5 | 1. Obtain the data as directed in [data/README.txt](data/README.txt).
6 | 2. ```python silverbox_train_feedback.py```
7 | 3. ```python silverbox_train_W.py```
8 | 4. ```python silverbox_train_WH.py```
9 | 5. ```python silverbox_plot.py```
10 |
11 |
12 |
--------------------------------------------------------------------------------
/examples/Silverbox/data/README.txt:
--------------------------------------------------------------------------------
1 | Copy in this folder all .csv files from the .zip file:
2 |
3 | http://www.nonlinearbenchmark.org/FILES/BENCHMARKS/SILVERBOX/SilverboxFiles.zip
4 |
--------------------------------------------------------------------------------
/examples/Silverbox/silverbox_plot.py:
--------------------------------------------------------------------------------
1 | import torch
2 | import pandas as pd
3 | import numpy as np
4 | import os
5 | import matplotlib.pyplot as plt
6 |
7 |
8 | if __name__ == '__main__':
9 |
10 | # Set seed for reproducibility
11 | np.random.seed(0)
12 | torch.manual_seed(0)
13 |
14 | # Settings
15 | add_noise = True
16 | lr = 2e-4
17 | num_iter = 1
18 | test_freq = 100
19 | n_fit = 100000
20 | decimate = 1
21 | n_batch = 1
22 | n_b = 3
23 | n_f = 3
24 |
25 | # Column names in the dataset
26 | COL_U = ['V1']
27 | COL_Y = ['V2']
28 |
29 | # Load dataset
30 | df_X = pd.read_csv(os.path.join("data", "SNLS80mV.csv"))
31 |
32 | # Extract data
33 | y = np.array(df_X[COL_Y], dtype=np.float32)
34 | u = np.array(df_X[COL_U], dtype=np.float32)
35 | u = u-np.mean(u)
36 | fs = 10**7/2**14
37 | N = y.size
38 | ts = 1/fs
39 | t = np.arange(N)*ts
40 |
41 | # Fit data
42 | y_fit = y[:n_fit:decimate]
43 | u_fit = u[:n_fit:decimate]
44 | t_fit = t[0:n_fit:decimate]
45 |
46 | # In[Plot]
47 | fig, ax = plt.subplots(2, 1, sharex=True)
48 | ax[0].plot(t_fit, y_fit, 'r', label="$y$")
49 | ax[0].grid()
50 | ax[0].set_ylabel('$y$')
51 | ax[1].plot(t_fit, u_fit, 'k', label="$u$")
52 | ax[1].grid()
53 | ax[1].set_ylabel('$u$')
54 | # plt.legend()
55 | plt.show()
56 |
57 |
58 |
59 |
60 |
61 |
--------------------------------------------------------------------------------
/examples/Silverbox/silverbox_train_W.py:
--------------------------------------------------------------------------------
1 | import torch
2 | import pandas as pd
3 | import numpy as np
4 | import os
5 | from dynonet.lti import SisoLinearDynamicalOperator
6 | import matplotlib.pyplot as plt
7 | import time
8 | import torch.nn as nn
9 |
10 | import dynonet.metrics
11 |
12 |
13 | class StaticNonLin(nn.Module):
14 |
15 | def __init__(self):
16 | super(StaticNonLin, self).__init__()
17 |
18 | self.net = nn.Sequential(
19 | nn.Linear(1, 10), # 2 states, 1 input
20 | nn.Tanh(),
21 | nn.Linear(10, 1)
22 | )
23 |
24 | for m in self.net.modules():
25 | if isinstance(m, nn.Linear):
26 | nn.init.normal_(m.weight, mean=0, std=1e-3)
27 | nn.init.constant_(m.bias, val=0)
28 |
29 | def forward(self, y_lin):
30 | y_nl = y_lin + self.net(y_lin)
31 | return y_nl
32 |
33 |
34 | if __name__ == '__main__':
35 |
36 | # Set seed for reproducibility
37 | np.random.seed(0)
38 | torch.manual_seed(0)
39 |
40 | # Settings
41 | add_noise = True
42 | lr = 1e-4
43 | num_iter = 10000
44 | test_freq = 100
45 | n_fit = 40000
46 | decimate = 1
47 | n_batch = 1
48 | n_b = 3
49 | n_a = 3
50 |
51 | # Column names in the dataset
52 | COL_U = ['V1']
53 | COL_Y = ['V2']
54 |
55 | # Load dataset
56 | df_X = pd.read_csv(os.path.join("data", "SNLS80mV.csv"))
57 |
58 | # Extract data
59 | y = np.array(df_X[COL_Y], dtype=np.float32)
60 | u = np.array(df_X[COL_U], dtype=np.float32)
61 | u = u - np.mean(u)
62 | fs = 10**7/2**14
63 | N = y.size
64 | ts = 1/fs
65 | t = np.arange(N)*ts
66 |
67 | # Fit data
68 | y_fit = y[:n_fit:decimate]
69 | u_fit = u[:n_fit:decimate]
70 | t_fit = t[0:n_fit:decimate]
71 |
72 | # Prepare data
73 | u_fit_torch = torch.tensor(u_fit[None, :, :], dtype=torch.float, requires_grad=False)
74 | y_fit_torch = torch.tensor(y_fit[None, :, :], dtype=torch.float)
75 |
76 |
77 | # Second-order dynamical system custom defined
78 | G1 = SisoLinearDynamicalOperator(n_b=n_b, n_a=n_a)
79 | y_init_1 = torch.zeros((n_batch, n_a), dtype=torch.float)
80 | u_init_1 = torch.zeros((n_batch, n_b), dtype=torch.float)
81 |
82 | # Static non-linearity
83 | F_nl = StaticNonLin()
84 |
85 | # Setup optimizer
86 | optimizer = torch.optim.Adam([
87 | {'params': G1.parameters(), 'lr': 1e-4},
88 | {'params': F_nl.parameters(), 'lr': 1e-4},
89 | ], lr=lr)
90 |
91 | # In[Train]
92 | LOSS = []
93 | start_time = time.time()
94 | for itr in range(0, num_iter):
95 |
96 | optimizer.zero_grad()
97 |
98 | # Simulate
99 | y1_lin = G1(u_fit_torch, y_init_1, u_init_1)
100 | y_hat = F_nl(y1_lin)
101 |
102 | # Compute fit loss
103 | err_fit = y_fit_torch - y_hat
104 | loss_fit = torch.mean(err_fit**2)
105 | loss = loss_fit
106 |
107 | LOSS.append(loss.item())
108 | if itr % test_freq == 0:
109 | with torch.no_grad():
110 | RMSE = torch.sqrt(loss)
111 | print(f'Iter {itr} | Fit Loss {loss_fit:.6f} | RMSE:{RMSE:.4f}')
112 |
113 | # Optimize
114 | loss.backward()
115 |
116 | if itr == 100:
117 | pass
118 | optimizer.step()
119 |
120 | train_time = time.time() - start_time
121 | print(f"\nTrain time: {train_time:.2f}") # 182 seconds
122 |
123 | # In[To numpy]
124 |
125 | y_hat = y_hat.detach().numpy()[0, :, :]
126 | y1_lin = y1_lin.detach().numpy()[0, :, :]
127 |
128 | # In[Plot]
129 | plt.figure()
130 | plt.plot(t_fit, y_fit, 'k', label="$y$")
131 | plt.plot(t_fit, y_hat, 'b', label="$\hat y$")
132 | plt.legend()
133 | plt.show()
134 |
135 | plt.figure()
136 | plt.plot(LOSS)
137 | plt.grid(True)
138 | plt.show()
139 |
140 | # In[Plot static non-linearity]
141 |
142 | y1_lin_min = np.min(y1_lin) - 1e-6
143 | y1_lin_max = np.max(y1_lin) + 1e-6
144 |
145 | in_nl = np.arange(y1_lin_min, y1_lin_max, (y1_lin_max- y1_lin_min)/1000).astype(np.float32).reshape(-1, 1)
146 |
147 | with torch.no_grad():
148 | out_nl = F_nl(torch.as_tensor(in_nl))
149 |
150 | plt.figure()
151 | plt.plot(in_nl, out_nl, 'b')
152 | plt.plot(in_nl, out_nl, 'b')
153 | #plt.plot(y1_lin, y1_nl, 'b*')
154 | plt.xlabel('Static non-linearity input (-)')
155 | plt.ylabel('Static non-linearity input (-)')
156 | plt.grid(True)
157 | plt.show()
158 |
159 | # In[Plot]
160 | e_rms = dynonet.metrics.error_rmse(y_fit, y_hat)[0]
161 | fit_idx = dynonet.metrics.fit_index(y_fit, y_hat)[0]
162 | r_sq = dynonet.metrics.r_squared(y_fit, y_hat)[0]
163 | print(f"RMSE: {e_rms:.4f}V\nFIT: {fit_idx:.1f}%\nR_sq: {r_sq:.1f}")
164 |
165 |
166 |
167 |
168 |
169 |
170 |
--------------------------------------------------------------------------------
/examples/Silverbox/silverbox_train_WH.py:
--------------------------------------------------------------------------------
1 | import torch
2 | import pandas as pd
3 | import numpy as np
4 | import os
5 | from dynonet.lti import SisoLinearDynamicalOperator
6 | import matplotlib.pyplot as plt
7 | import time
8 | import torch.nn as nn
9 |
10 | import dynonet.metrics
11 |
12 |
13 | class StaticNonLin(nn.Module):
14 |
15 | def __init__(self):
16 | super(StaticNonLin, self).__init__()
17 |
18 | self.net = nn.Sequential(
19 | nn.Linear(1, 20), # 2 states, 1 input
20 | nn.Tanh(),
21 | nn.Linear(20, 1)
22 | )
23 |
24 | for m in self.net.modules():
25 | if isinstance(m, nn.Linear):
26 | nn.init.normal_(m.weight, mean=0, std=1e-3)
27 | nn.init.constant_(m.bias, val=0)
28 |
29 | def forward(self, y_lin):
30 | y_nl = y_lin + self.net(y_lin)
31 | return y_nl
32 |
33 |
34 | if __name__ == '__main__':
35 |
36 | # Set seed for reproducibility
37 | np.random.seed(0)
38 | torch.manual_seed(0)
39 |
40 | # Settings
41 | add_noise = True
42 | lr = 1e-3
43 | num_iter = 10000
44 | test_freq = 100
45 | n_fit = 40000
46 | decimate = 1
47 | n_batch = 1
48 | n_b = 3
49 | n_a = 3
50 |
51 | # Column names in the dataset
52 | COL_U = ['V1']
53 | COL_Y = ['V2']
54 |
55 | # Load dataset
56 | df_X = pd.read_csv(os.path.join("data", "SNLS80mV.csv"))
57 |
58 | # Extract data
59 | y = np.array(df_X[COL_Y], dtype=np.float32)
60 | u = np.array(df_X[COL_U], dtype=np.float32)
61 | u = u - np.mean(u)
62 | fs = 10**7/2**14
63 | N = y.size
64 | ts = 1/fs
65 | t = np.arange(N)*ts
66 |
67 | # Fit data
68 | y_fit = y[:n_fit:decimate]
69 | u_fit = u[:n_fit:decimate]
70 | t_fit = t[0:n_fit:decimate]
71 |
72 | # Prepare data
73 | u_fit_torch = torch.tensor(u_fit[None, :, :], dtype=torch.float, requires_grad=False)
74 | y_fit_torch = torch.tensor(y_fit[None, :, :], dtype=torch.float)
75 |
76 |
77 | # First dynamical system custom defined
78 | G1 = SisoLinearDynamicalOperator(n_b=n_b, n_a=n_a)
79 | y_init_1 = torch.zeros((n_batch, n_a), dtype=torch.float)
80 | u_init_1 = torch.zeros((n_batch, n_b), dtype=torch.float)
81 |
82 | # Second dynamical system custom defined
83 | G2 = SisoLinearDynamicalOperator(n_b=n_b, n_a=n_a)
84 | y_init_2 = torch.zeros((n_batch, n_a), dtype=torch.float)
85 | u_init_2 = torch.zeros((n_batch, n_b), dtype=torch.float)
86 |
87 | # Static non-linearity
88 | F_nl = StaticNonLin()
89 |
90 | # Setup optimizer
91 | optimizer = torch.optim.Adam([
92 | {'params': G1.parameters(), 'lr': lr},
93 | {'params': G2.parameters(), 'lr': lr},
94 | {'params': F_nl.parameters(), 'lr': lr},
95 | ], lr=lr)
96 |
97 | # In[Train]
98 | LOSS = []
99 | start_time = time.time()
100 | for itr in range(0, num_iter):
101 |
102 | optimizer.zero_grad()
103 |
104 | # Simulate
105 | y1_lin = G1(u_fit_torch, y_init_1, u_init_1)
106 | y1_nl = F_nl(y1_lin)
107 | y_hat = G2(y1_nl, y_init_2, u_init_2)
108 |
109 | # Compute fit loss
110 | err_fit = y_fit_torch - y_hat
111 | loss_fit = torch.mean(err_fit**2)
112 | loss = loss_fit
113 |
114 | LOSS.append(loss.item())
115 | if itr % test_freq == 0:
116 | with torch.no_grad():
117 | RMSE = torch.sqrt(loss)
118 | print(f'Iter {itr} | Fit Loss {loss_fit:.6f} | RMSE:{RMSE:.4f}')
119 |
120 | # Optimize
121 | loss.backward()
122 |
123 | if itr == 100:
124 | pass
125 | optimizer.step()
126 |
127 | train_time = time.time() - start_time
128 | print(f"\nTrain time: {train_time:.2f}") # 182 seconds
129 |
130 | # In[To numpy]
131 |
132 | y_hat = y_hat.detach().numpy()[0, :, :]
133 | y1_lin = y1_lin.detach().numpy()[0, :, :]
134 |
135 | # In[Plot]
136 | plt.figure()
137 | plt.plot(t_fit, y_fit, 'k', label="$y$")
138 | plt.plot(t_fit, y_hat, 'b', label="$\hat y$")
139 | plt.legend()
140 | plt.show()
141 |
142 | plt.figure()
143 | plt.plot(LOSS)
144 | plt.grid(True)
145 | plt.show()
146 |
147 | # In[Plot static non-linearity]
148 |
149 | y1_lin_min = np.min(y1_lin) - 1e-6
150 | y1_lin_max = np.max(y1_lin) + 1e-6
151 |
152 | in_nl = np.arange(y1_lin_min, y1_lin_max, (y1_lin_max- y1_lin_min)/1000).astype(np.float32).reshape(-1, 1)
153 |
154 | with torch.no_grad():
155 | out_nl = F_nl(torch.as_tensor(in_nl))
156 |
157 | plt.figure()
158 | plt.plot(in_nl, out_nl, 'b')
159 | plt.plot(in_nl, out_nl, 'b')
160 | #plt.plot(y1_lin, y1_nl, 'b*')
161 | plt.xlabel('Static non-linearity input (-)')
162 | plt.ylabel('Static non-linearity input (-)')
163 | plt.grid(True)
164 | plt.show()
165 |
166 | # In[Plot]
167 | e_rms = dynonet.metrics.error_rmse(y_hat, y_fit)[0]
168 | print(f"RMSE: {e_rms:.2f}")
169 |
170 |
171 |
172 |
173 |
174 |
175 |
--------------------------------------------------------------------------------
/examples/WH2009/README.md:
--------------------------------------------------------------------------------
1 | # WH2009 Example
2 |
3 | Nonlinear circuit example from a paper by Schoukens, Suykens and Ljung. Steps to run:
4 |
5 | 1. Obtain the data, which includes a description, as directed in [data/README.txt](data/README.txt).
6 | 2. ```python WH2009_train.py```
7 | 3. ```python WH2009_train_quantized.py```
8 | 4. ```python WH2009_train_FIR.py```
9 | 5. ```python WH2009_train_comparisons.py```
10 | 6. ```python WH2009_train_process_noise.py```
11 | 7. ```python WH2009_train_process_noise_PEM.py```
12 | 8. ```python WH2009_test_FIR.py```
13 | 9. ```python WH2009_test.py```
14 |
15 |
16 |
--------------------------------------------------------------------------------
/examples/WH2009/WH2009_test.py:
--------------------------------------------------------------------------------
1 | import torch
2 | import pandas as pd
3 | import numpy as np
4 | import os
5 | from dynonet.lti import SisoLinearDynamicalOperator
6 | from dynonet.static import SisoStaticNonLinearity
7 |
8 | import matplotlib
9 | import matplotlib.pyplot as plt
10 | import control
11 | import dynonet.metrics
12 |
13 |
14 | # In[Main]
15 | if __name__ == '__main__':
16 |
17 | matplotlib.rc('font', **{'family': 'sans-serif', 'sans-serif': ['Helvetica']})
18 | # In[Settings]
19 | #model_name = 'model_WH_digit'
20 | model_name = "model_WH_proc_noise"
21 |
22 | # Settings
23 | n_b = 8
24 | n_a = 8
25 |
26 | # Column names in the dataset
27 | COL_F = ['fs']
28 | COL_U = ['uBenchMark']
29 | COL_Y = ['yBenchMark']
30 |
31 | # Load dataset
32 | df_X = pd.read_csv(os.path.join("data", "WienerHammerBenchmark.csv"))
33 |
34 | # Extract data
35 | y_meas = np.array(df_X[COL_Y], dtype=np.float32)
36 | u = np.array(df_X[COL_U], dtype=np.float32)
37 | fs = np.array(df_X[COL_F].iloc[0], dtype=np.float32).item()
38 | N = y_meas.size
39 | ts = 1/fs
40 | t = np.arange(N)*ts
41 |
42 | t_fit_start = 0
43 | t_fit_end = 100000
44 | t_test_start = 100000
45 | t_test_end = 188000
46 | t_skip = 1000 # skip for statistics
47 |
48 | # In[Instantiate models]
49 |
50 | # Create models
51 | G1 = SisoLinearDynamicalOperator(n_b=n_b, n_a=n_a, n_k=1)
52 | G2 = SisoLinearDynamicalOperator(n_b=n_b, n_a=n_a, n_k=0)
53 | F_nl = SisoStaticNonLinearity(n_hidden=10, activation='tanh')
54 |
55 | model_folder = os.path.join("models", model_name)
56 | # Create model parameters
57 | G1.load_state_dict(torch.load(os.path.join(model_folder, "G1.pkl")))
58 | F_nl.load_state_dict(torch.load(os.path.join(model_folder, "F_nl.pkl")))
59 | G2.load_state_dict(torch.load(os.path.join(model_folder, "G2.pkl")))
60 |
61 | # In[Predict]
62 |
63 | u_torch = torch.tensor(u[None, :, :])
64 | y1_lin = G1(u_torch)
65 | y1_nl = F_nl(y1_lin)
66 | y_hat = G2(y1_nl)
67 |
68 | # In[Detach]
69 | y_hat = y_hat.detach().numpy()[0, :, :]
70 | y1_lin = y1_lin.detach().numpy()[0, :, :]
71 | y1_nl = y1_nl.detach().numpy()[0, :, :]
72 |
73 | # In[Plot]
74 | plt.figure()
75 | plt.plot(t, y_meas, 'k', label="$y$")
76 | plt.plot(t, y_hat, 'b', label="$\hat y$")
77 | plt.plot(t, y_meas - y_hat, 'r', label="$e$")
78 | plt.grid(True)
79 | plt.xlabel('Time (s)')
80 | plt.ylabel('Voltage (V)')
81 | plt.legend(loc='upper right')
82 | # plt.savefig('WH_fit.pdf')
83 | plt.show()
84 |
85 | # In[Inspect linear model]
86 |
87 | n_imp = 128
88 | G1_num, G1_den = G1.get_tfdata()
89 | G1_sys = control.TransferFunction(G1_num, G1_den, ts)
90 | plt.figure()
91 | plt.title("$G_1$ impulse response")
92 | _, y_imp = control.impulse_response(G1_sys, np.arange(n_imp) * ts)
93 | # plt.plot(G1_num)
94 | plt.plot(y_imp)
95 | # plt.savefig(os.path.join("models", model_name, "G1_imp.pdf"))
96 | plt.show()
97 | plt.figure()
98 | mag_G1, phase_G1, omega_G1 = control.bode(G1_sys, omega_limits=[1e2, 1e5])
99 | plt.suptitle("$G_1$ bode plot")
100 | # plt.savefig(os.path.join("models", model_name, "G1_bode.pdf"))
101 | plt.show()
102 |
103 | # G2_b = G2.G.weight.detach().numpy()[0, 0, ::-1]
104 | G2_num, G2_den = G2.get_tfdata()
105 | G2_sys = control.TransferFunction(G2_num, G2_den, ts)
106 | plt.figure()
107 | plt.title("$G_2$ impulse response")
108 | _, y_imp = control.impulse_response(G2_sys, np.arange(n_imp) * ts)
109 | plt.plot(y_imp)
110 | # plt.savefig(os.path.join("models", model_name, "G1_imp.pdf"))
111 | plt.show()
112 | plt.figure()
113 | mag_G2, phase_G2, omega_G2 = control.bode(G2_sys, omega_limits=[1e2, 1e5])
114 | plt.suptitle("$G_2$ bode plot")
115 | # plt.savefig(os.path.join("models", model_name, "G2_bode.pdf"))
116 | plt.show()
117 |
118 | # In[Inspect static non-linearity]
119 |
120 | y1_lin_min = np.min(y1_lin)
121 | y1_lin_max = np.max(y1_lin)
122 |
123 | in_nl = np.arange(y1_lin_min, y1_lin_max, (y1_lin_max- y1_lin_min)/1000).astype(np.float32).reshape(-1, 1)
124 |
125 | with torch.no_grad():
126 | out_nl = F_nl(torch.as_tensor(in_nl))
127 |
128 | plt.figure()
129 | plt.plot(in_nl, out_nl, 'b')
130 | plt.plot(in_nl, out_nl, 'b')
131 | plt.xlabel('Static non-linearity input (-)')
132 | plt.ylabel('Static non-linearity input (-)')
133 | plt.grid(True)
134 | plt.show()
135 |
136 | # In[Metrics]
137 | idx_test = range(t_test_start + t_skip, t_test_end)
138 | e_rms = 1000*dynonet.metrics.error_rmse(y_meas[idx_test], y_hat[idx_test])[0]
139 | fit_idx = dynonet.metrics.fit_index(y_meas[idx_test], y_hat[idx_test])[0]
140 | r_sq = dynonet.metrics.r_squared(y_meas[idx_test], y_hat[idx_test])[0]
141 |
142 | print(f"RMSE: {e_rms:.1f}V\nFIT: {fit_idx:.1f}%\nR_sq: {r_sq:.4f}")
143 |
144 |
145 | # In[Plot for paper]
146 |
147 | t_test_start = 140000
148 | len_plot = 1000
149 |
150 | plt.figure(figsize=(4, 3))
151 | plt.plot(t[t_test_start:t_test_start+len_plot], y_meas[t_test_start:t_test_start+len_plot], 'k', label="$\mathbf{y}^{\mathrm{meas}}$")
152 | plt.plot(t[t_test_start:t_test_start+len_plot], y_hat[t_test_start:t_test_start+len_plot], 'b--', label="$\mathbf{y}$")
153 | plt.plot(t[t_test_start:t_test_start+len_plot], y_meas[t_test_start:t_test_start+len_plot] - y_hat[t_test_start:t_test_start+len_plot], 'r', label="$\mathbf{e}$")
154 | plt.grid(True)
155 | plt.xlabel('Time (s)')
156 | plt.ylabel('Voltage (V)')
157 | plt.legend(loc='upper right')
158 | plt.tight_layout()
159 | # plt.savefig('WH_timetrace.pdf')
160 | plt.show()
161 |
--------------------------------------------------------------------------------
/examples/WH2009/WH2009_test_FIR.py:
--------------------------------------------------------------------------------
1 | import torch
2 | import pandas as pd
3 | import numpy as np
4 | import os
5 | from dynonet.lti import SisoFirLinearDynamicalOperator
6 | from dynonet.static import SisoStaticNonLinearity
7 |
8 | import matplotlib as mpl
9 | import matplotlib.pyplot as plt
10 | import torch.nn as nn
11 | import control
12 | import dynonet.metrics
13 |
14 |
15 | if __name__ == '__main__':
16 |
17 | mpl.rc('text', usetex=True)
18 | mpl.rcParams['axes.grid'] = True
19 |
20 | model_name = 'model_WH_FIR'
21 |
22 | # Settings
23 | n_b = 128
24 |
25 | # Column names in the dataset
26 | COL_F = ['fs']
27 | COL_U = ['uBenchMark']
28 | COL_Y = ['yBenchMark']
29 |
30 | # Load dataset
31 | df_X = pd.read_csv(os.path.join("data", "WienerHammerBenchmark.csv"))
32 |
33 | # Extract data
34 | y_meas = np.array(df_X[COL_Y], dtype=np.float32)
35 | u = np.array(df_X[COL_U], dtype=np.float32)
36 | fs = np.array(df_X[COL_F].iloc[0], dtype = np.float32).item()
37 | N = y_meas.size
38 | ts = 1/fs
39 | t = np.arange(N)*ts
40 |
41 | t_fit_start = 0
42 | t_fit_end = 100000
43 | t_test_start = 100000
44 | t_test_end = 188000
45 | t_skip = 1000
46 |
47 | # In[Instantiate models]
48 |
49 | # Create models
50 | G1 = SisoFirLinearDynamicalOperator(n_b=n_b)
51 | G2 = SisoFirLinearDynamicalOperator(n_b=n_b)
52 | F_nl = SisoStaticNonLinearity()
53 |
54 | model_folder = os.path.join("models", model_name)
55 | # Create model parameters
56 | G1.load_state_dict(torch.load(os.path.join(model_folder, "G1.pkl")))
57 | F_nl.load_state_dict(torch.load(os.path.join(model_folder, "F_nl.pkl")))
58 | G2.load_state_dict(torch.load(os.path.join(model_folder, "G2.pkl")))
59 |
60 | # In[Predict]
61 |
62 | u_torch = torch.tensor(u[None, :, :])
63 | y1_lin = G1(u_torch)
64 | y1_nl = F_nl(y1_lin)
65 | y_hat = G2(y1_nl)
66 |
67 | # In[Detach]
68 | y_hat = y_hat.detach().numpy()[0, :, :]
69 | y1_lin = y1_lin.detach().numpy()[0, :, :]
70 | y1_nl = y1_nl.detach().numpy()[0, :, :]
71 |
72 | # In[Plot]
73 | plt.figure()
74 | plt.plot(t, y_meas, 'k', label="$y$")
75 | plt.plot(t, y_hat, 'b', label="$\hat y$")
76 | plt.plot(t, y_meas - y_hat, 'r', label="$e$")
77 | plt.legend(loc='upper left')
78 |
79 | # In[Inspect linear model]
80 |
81 | n_imp = n_b
82 | G1_num, G1_den = G1.get_tfdata()
83 | G1_sys = control.TransferFunction(G1_num, G1_den, ts)
84 | plt.figure()
85 | plt.title("$G_1$ impulse response")
86 | _, y_imp = control.impulse_response(G1_sys, np.arange(n_imp) * ts)
87 | # plt.plot(G1_num)
88 | plt.plot(y_imp)
89 | plt.savefig(os.path.join("models", model_name, "G1_imp.pdf"))
90 | plt.figure()
91 | mag_G1, phase_G1, omega_G1 = control.bode(G1_sys, omega_limits=[1e2, 1e5])
92 | plt.suptitle("$G_1$ bode plot")
93 | plt.savefig(os.path.join("models", model_name, "G1_bode.pdf"))
94 |
95 |
96 | #G2_b = G2.G.weight.detach().numpy()[0, 0, ::-1]
97 | G2_num, G2_den = G2.get_tfdata()
98 | G2_sys = control.TransferFunction(G2_num, G2_den, ts)
99 | plt.figure()
100 | plt.title("$G_2$ impulse response")
101 | _, y_imp = control.impulse_response(G2_sys, np.arange(n_imp) * ts)
102 | plt.plot(y_imp)
103 | plt.savefig(os.path.join("models", model_name, "G1_imp.pdf"))
104 | plt.figure()
105 | mag_G2, phase_G2, omega_G2 = control.bode(G2_sys, omega_limits=[1e2, 1e5])
106 | plt.suptitle("$G_2$ bode plot")
107 | plt.savefig(os.path.join("models", model_name, "G2_bode.pdf"))
108 |
109 | #mag_G2, phase_G2, omega_G2 = control.bode(G2_sys)
110 |
111 | # In[Inspect static non-linearity]
112 |
113 | y1_lin_min = np.min(y1_lin)
114 | y1_lin_max = np.max(y1_lin)
115 |
116 | in_nl = np.arange(y1_lin_min, y1_lin_max, (y1_lin_max- y1_lin_min)/1000).astype(np.float32).reshape(-1, 1)
117 |
118 | with torch.no_grad():
119 | out_nl = F_nl(torch.as_tensor(in_nl))
120 |
121 | plt.figure()
122 | plt.plot(in_nl, out_nl, 'b')
123 | plt.plot(in_nl, out_nl, 'b')
124 | plt.xlabel('Static non-linearity input (-)')
125 | plt.ylabel('Static non-linearity input (-)')
126 | plt.grid(True)
127 |
128 | # In[Metrics]
129 | idx_test = range(t_test_start + t_skip, t_test_end)
130 | e_rms = 1000*dynonet.metrics.error_rmse(y_meas[idx_test], y_hat[idx_test])[0]
131 | fit_idx = dynonet.metrics.fit_index(y_meas[idx_test], y_hat[idx_test])[0]
132 | r_sq = dynonet.metrics.r_squared(y_meas[idx_test], y_hat[idx_test])[0]
133 |
134 | print(f"RMSE: {e_rms:.1f}V\nFIT: {fit_idx:.1f}%\nR_sq: {r_sq:.2f}")
135 |
136 |
137 |
--------------------------------------------------------------------------------
/examples/WH2009/WH2009_train.py:
--------------------------------------------------------------------------------
1 | import torch
2 | import pandas as pd
3 | import numpy as np
4 | import os
5 | from dynonet.lti import SisoLinearDynamicalOperator
6 | from dynonet.static import SisoStaticNonLinearity
7 | import matplotlib.pyplot as plt
8 | import time
9 | import torch.nn as nn
10 |
11 |
12 | import dynonet.metrics
13 |
14 | # In[Main]
15 | if __name__ == '__main__':
16 |
17 | # In[Set seed for reproducibility]
18 | np.random.seed(0)
19 | torch.manual_seed(0)
20 |
21 | # In[Settings]
22 | lr_ADAM = 2e-4
23 | lr_BFGS = 1e0
24 | num_iter_ADAM = 40000 # ADAM iterations 20000
25 | num_iter_BFGS = 0 # final BFGS iterations
26 | msg_freq = 100
27 | n_skip = 5000
28 | n_fit = 20000
29 | decimate = 1
30 | n_batch = 1
31 | n_b = 8
32 | n_a = 8
33 | model_name = "model_WH"
34 |
35 | num_iter = num_iter_ADAM + num_iter_BFGS
36 |
37 | # In[Column names in the dataset]
38 | COL_F = ['fs']
39 | COL_U = ['uBenchMark']
40 | COL_Y = ['yBenchMark']
41 |
42 | # In[Load dataset]
43 | df_X = pd.read_csv(os.path.join("data", "WienerHammerBenchmark.csv"))
44 |
45 | # Extract data
46 | y = np.array(df_X[COL_Y], dtype=np.float32) # batch, time, channel
47 | u = np.array(df_X[COL_U], dtype=np.float32)
48 | fs = np.array(df_X[COL_F].iloc[0], dtype=np.float32)
49 | N = y.size
50 | ts = 1/fs
51 | t = np.arange(N)*ts
52 |
53 | # In[Fit data]
54 | y_fit = y[0:n_fit:decimate]
55 | u_fit = u[0:n_fit:decimate]
56 | t_fit = t[0:n_fit:decimate]
57 |
58 | # In[Prepare training tensors]
59 | u_fit_torch = torch.tensor(u_fit[None, :, :], dtype=torch.float, requires_grad=False)
60 | y_fit_torch = torch.tensor(y_fit[None, :, :], dtype=torch.float)
61 |
62 | # In[Prepare model]
63 | G1 = SisoLinearDynamicalOperator(n_b, n_a, n_k=1)
64 | F_nl = SisoStaticNonLinearity(n_hidden=10, activation='tanh')
65 | G2 = SisoLinearDynamicalOperator(n_b, n_a)
66 |
67 | def model(u_in):
68 | y1_lin = G1(u_fit_torch)
69 | y1_nl = F_nl(y1_lin)
70 | y_hat = G2(y1_nl)
71 | return y_hat, y1_nl, y1_lin
72 |
73 | # In[Setup optimizer]
74 | optimizer_ADAM = torch.optim.Adam([
75 | {'params': G1.parameters(), 'lr': lr_ADAM},
76 | {'params': G2.parameters(), 'lr': lr_ADAM},
77 | {'params': F_nl.parameters(), 'lr': lr_ADAM},
78 | ], lr=lr_ADAM)
79 |
80 | optimizer_LBFGS = torch.optim.LBFGS(list(G1.parameters()) + list(G2.parameters()) + list(F_nl.parameters()), lr=lr_BFGS)
81 |
82 |
83 | def closure():
84 | optimizer_LBFGS.zero_grad()
85 |
86 | # Simulate
87 | y_hat, y1_nl, y1_lin = model(u_fit_torch)
88 |
89 | # Compute fit loss
90 | err_fit = y_fit_torch[:, n_skip:, :] - y_hat[:, n_skip:, :]
91 | loss = torch.mean(err_fit**2)*1000
92 |
93 | # Backward pas
94 | loss.backward()
95 | return loss
96 |
97 |
98 | # In[Train]
99 | LOSS = []
100 | start_time = time.time()
101 | for itr in range(0, num_iter):
102 |
103 | if itr < num_iter_ADAM:
104 | msg_freq = 10
105 | loss_train = optimizer_ADAM.step(closure)
106 | else:
107 | msg_freq = 10
108 | loss_train = optimizer_LBFGS.step(closure)
109 |
110 | LOSS.append(loss_train.item())
111 | if itr % msg_freq == 0:
112 | with torch.no_grad():
113 | RMSE = torch.sqrt(loss_train)
114 | print(f'Iter {itr} | Fit Loss {loss_train:.6f} | RMSE:{RMSE:.4f}')
115 |
116 | train_time = time.time() - start_time
117 | print(f"\nTrain time: {train_time:.2f}")
118 |
119 | # In[Save model]
120 | model_folder = os.path.join("models", model_name)
121 | if not os.path.exists(model_folder):
122 | os.makedirs(model_folder)
123 |
124 | torch.save(G1.state_dict(), os.path.join(model_folder, "G1.pkl"))
125 | torch.save(F_nl.state_dict(), os.path.join(model_folder, "F_nl.pkl"))
126 | torch.save(G2.state_dict(), os.path.join(model_folder, "G2.pkl"))
127 |
128 |
129 | # In[Simulate one more time]
130 | with torch.no_grad():
131 | y_hat, y1_nl, y1_lin = model(u_fit_torch)
132 |
133 | # In[Detach]
134 | y_hat = y_hat.detach().numpy()[0, :, :]
135 | y1_lin = y1_lin.detach().numpy()[0, :, :]
136 | y1_nl = y1_nl.detach().numpy()[0, :, :]
137 |
138 | # In[Plot]
139 | plt.figure()
140 | plt.plot(t_fit, y_fit, 'k', label="$y$")
141 | plt.plot(t_fit, y_hat, 'b', label="$\hat y$")
142 | plt.legend()
143 | plt.show()
144 |
145 | # In[Plot loss]
146 | plt.figure()
147 | plt.plot(LOSS)
148 | plt.grid(True)
149 | plt.show()
150 |
151 | # In[Plot static non-linearity]
152 |
153 | y1_lin_min = np.min(y1_lin)
154 | y1_lin_max = np.max(y1_lin)
155 |
156 | in_nl = np.arange(y1_lin_min, y1_lin_max, (y1_lin_max- y1_lin_min)/1000).astype(np.float32).reshape(-1, 1)
157 |
158 | with torch.no_grad():
159 | out_nl = F_nl(torch.as_tensor(in_nl))
160 |
161 | plt.figure()
162 | plt.plot(in_nl, out_nl, 'b')
163 | plt.plot(in_nl, out_nl, 'b')
164 | #plt.plot(y1_lin, y1_nl, 'b*')
165 | plt.xlabel('Static non-linearity input (-)')
166 | plt.ylabel('Static non-linearity input (-)')
167 | plt.grid(True)
168 | plt.show()
169 |
170 | # In[Plot]
171 | e_rms = dynonet.metrics.error_rmse(y_hat, y_fit)[0]
172 | print(f"RMSE: {e_rms:.2f}") # target: 1mv
173 |
174 |
175 |
176 |
177 |
178 |
179 |
--------------------------------------------------------------------------------
/examples/WH2009/WH2009_train_FIR.py:
--------------------------------------------------------------------------------
1 | import torch
2 | import pandas as pd
3 | import numpy as np
4 | import os
5 | from dynonet.lti import SisoFirLinearDynamicalOperator
6 | from dynonet.static import SisoStaticNonLinearity
7 | import matplotlib.pyplot as plt
8 | import time
9 | import torch.nn as nn
10 |
11 | import dynonet.metrics
12 |
13 | # In[Main]
14 | if __name__ == '__main__':
15 |
16 | # In[Set seed for reproducibility]
17 | np.random.seed(0)
18 | torch.manual_seed(0)
19 |
20 | # In[Settings]
21 | lr_ADAM = 1e-4
22 | lr_BFGS = 1e-1
23 | num_iter_ADAM = 100000
24 | num_iter_BFGS = 0
25 | test_freq = 100
26 | n_fit = 100000
27 | decimate = 1
28 | n_batch = 1
29 | n_b = 128
30 | model_name = "model_WH_FIR"
31 |
32 | num_iter = num_iter_ADAM + num_iter_BFGS
33 |
34 | # In[Column names in the dataset]
35 | COL_F = ['fs']
36 | COL_U = ['uBenchMark']
37 | COL_Y = ['yBenchMark']
38 |
39 | # In[Load dataset]
40 | df_X = pd.read_csv(os.path.join("data", "WienerHammerBenchmark.csv"))
41 |
42 | # Extract data
43 | y = np.array(df_X[COL_Y], dtype=np.float32) # batch, time, channel
44 | u = np.array(df_X[COL_U], dtype=np.float32)
45 | fs = np.array(df_X[COL_F].iloc[0], dtype = np.float32)
46 | N = y.size
47 | ts = 1/fs
48 | t = np.arange(N)*ts
49 |
50 | # In[Fit data]
51 | y_fit = y[0:n_fit:decimate]
52 | u_fit = u[0:n_fit:decimate]
53 | t_fit = t[0:n_fit:decimate]
54 |
55 |
56 | # In[Prepare training tensors]
57 | u_fit_torch = torch.tensor(u_fit[None, :, :], dtype=torch.float, requires_grad=False)
58 | y_fit_torch = torch.tensor(y_fit[None, :, :], dtype=torch.float)
59 |
60 | # In[Prepare model]
61 | G1 = SisoFirLinearDynamicalOperator(n_b=n_b)
62 | F_nl = SisoStaticNonLinearity()
63 | G2 = SisoFirLinearDynamicalOperator(n_b=n_b)
64 |
65 | def model(u_in):
66 | y1_lin = G1(u_fit_torch)
67 | y1_nl = F_nl(y1_lin)
68 | y_hat = G2(y1_nl)
69 | return y_hat, y1_nl, y1_lin
70 |
71 | # In[Setup optimizer]
72 | optimizer_ADAM = torch.optim.Adam([
73 | {'params': G1.parameters(), 'lr': lr_ADAM},
74 | {'params': G2.parameters(), 'lr': lr_ADAM},
75 | {'params': F_nl.parameters(), 'lr': lr_ADAM},
76 | ], lr=lr_ADAM)
77 |
78 | optimizer_LBFGS = torch.optim.LBFGS(list(G1.parameters()) + list(G2.parameters()) + list(F_nl.parameters()), lr=lr_BFGS)
79 |
80 |
81 | def closure():
82 | optimizer_LBFGS.zero_grad()
83 |
84 | # Simulate
85 | y_hat, y1_nl, y1_lin = model(u_fit_torch)
86 |
87 | # Compute fit loss
88 | err_fit = y_fit_torch - y_hat
89 | loss = torch.mean(err_fit**2)
90 |
91 | # Backward pas
92 | loss.backward()
93 | return loss
94 |
95 |
96 | # In[Train]
97 | LOSS = []
98 | start_time = time.time()
99 | for itr in range(0, num_iter):
100 |
101 | if itr < num_iter_ADAM:
102 | test_freq = 10
103 | loss_train = optimizer_ADAM.step(closure)
104 | else:
105 | test_freq = 10
106 | loss_train = optimizer_LBFGS.step(closure)
107 |
108 | LOSS.append(loss_train.item())
109 | if itr % test_freq == 0:
110 | with torch.no_grad():
111 | RMSE = torch.sqrt(loss_train)
112 | print(f'Iter {itr} | Fit Loss {loss_train:.6f} | RMSE:{RMSE:.4f}')
113 |
114 | train_time = time.time() - start_time
115 | print(f"\nTrain time: {train_time:.2f}")
116 |
117 | # In[Save model]
118 | model_folder = os.path.join("models", model_name)
119 | if not os.path.exists(model_folder):
120 | os.makedirs(model_folder)
121 |
122 | torch.save(G1.state_dict(), os.path.join(model_folder, "G1.pkl"))
123 | torch.save(F_nl.state_dict(), os.path.join(model_folder, "F_nl.pkl"))
124 | torch.save(G2.state_dict(), os.path.join(model_folder, "G2.pkl"))
125 |
126 |
127 | # In[Simulate one more time]
128 | with torch.no_grad():
129 | y_hat, y1_nl, y1_lin = model(u_fit_torch)
130 |
131 | # In[Detach]
132 | y_hat = y_hat.detach().numpy()[0, :, :]
133 | y1_lin = y1_lin.detach().numpy()[0, :, :]
134 | y1_nl = y1_nl.detach().numpy()[0, :, :]
135 |
136 | # In[Plot]
137 | plt.figure()
138 | plt.plot(t_fit, y_fit, 'k', label="$y$")
139 | plt.plot(t_fit, y_hat, 'b', label="$\hat y$")
140 | plt.legend()
141 | plt.show()
142 |
143 | # In[Plot loss]
144 | plt.figure()
145 | plt.plot(LOSS)
146 | plt.grid(True)
147 | plt.show()
148 |
149 | # In[Plot static non-linearity]
150 |
151 | y1_lin_min = np.min(y1_lin)
152 | y1_lin_max = np.max(y1_lin)
153 |
154 | in_nl = np.arange(y1_lin_min, y1_lin_max, (y1_lin_max- y1_lin_min)/1000).astype(np.float32).reshape(-1, 1)
155 |
156 | with torch.no_grad():
157 | out_nl = F_nl(torch.as_tensor(in_nl))
158 |
159 | plt.figure()
160 | plt.plot(in_nl, out_nl, 'b')
161 | plt.plot(in_nl, out_nl, 'b')
162 | #plt.plot(y1_lin, y1_nl, 'b*')
163 | plt.xlabel('Static non-linearity input (-)')
164 | plt.ylabel('Static non-linearity input (-)')
165 | plt.grid(True)
166 | plt.show()
167 |
168 | # In[Plot]
169 | e_rms = dynonet.metrics.error_rmse(y_hat, y_fit)[0]
170 | print(f"RMSE: {e_rms:.2f}")
171 |
--------------------------------------------------------------------------------
/examples/WH2009/WH_fit.pdf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/forgi86/dynonet/56699401b141065a45403a18977333e0ec8d7303/examples/WH2009/WH_fit.pdf
--------------------------------------------------------------------------------
/examples/WH2009/WH_timetrace.pdf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/forgi86/dynonet/56699401b141065a45403a18977333e0ec8d7303/examples/WH2009/WH_timetrace.pdf
--------------------------------------------------------------------------------
/examples/WH2009/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/forgi86/dynonet/56699401b141065a45403a18977333e0ec8d7303/examples/WH2009/__init__.py
--------------------------------------------------------------------------------
/examples/WH2009/data/README.txt:
--------------------------------------------------------------------------------
1 | Copy in this folder the file WienerHammerBenchmark.csv contained in the .zip file:
2 | http://nonlinearbenchmark.org/FILES/BENCHMARKS/WIENERHAMMERSTEIN/WienerHammerstein2009Files.zip
--------------------------------------------------------------------------------
/examples/Wiener/.gitignore:
--------------------------------------------------------------------------------
1 | models/NLS_nonoise
2 |
--------------------------------------------------------------------------------
/examples/Wiener/README.txt:
--------------------------------------------------------------------------------
1 | An example inspired from the paper [1]]
2 |
3 | It is about training of a Wiener system (G-F) with noise entering in the system before the non-linearity.
4 | In such a setting, the non-linear least square estimate is generally biased.
5 |
6 | In order to deal with this example, we perform an approximate Maximum Likelihood (ML) estimate.
7 | We approximate the integral (27) in [1] using the rectangular integration rule and differentiate through
8 | using plain back-propagation (see W_train_ML_refine.py)
9 |
10 | We use the non-linear least square estimate (See W_train_NLS.py)
11 | to initialize the estimate for the heavier ML estimation task.
12 |
13 | To run the example:
14 |
15 | 1. install pyro, e.g., "pip install pyro-ppl"
16 | 2. python W_train_NLS_nonoise.py
17 | 3. python W_train_NLS.py
18 | 4. python W_train_ML_refine.py
19 | 5. python W_test.py
20 |
21 | [1] A. Hagenblad, L. Ljung, and A. Wills. Maximum likelihood identification of Wiener models. Automatica, 44 (2008) 2697–2705
22 |
--------------------------------------------------------------------------------
/examples/Wiener/W_generate_noise.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | import scipy.signal
3 | import matplotlib.pyplot as plt
4 | import pandas as pd
5 | import os
6 | import h5py
7 |
8 |
9 | #a = [1.0, 0.3, -0.3]
10 | #b = [0.0, -0.3, 0.3]
11 | #c = []
12 |
13 | a = [1, 0.5]
14 | b = [0.0, 1.0]
15 | c = [1.0, 1.0, 0.0]
16 |
17 | var_w = 4.0
18 | var_e = 1.0
19 |
20 |
21 | def static_nl(y_lin):
22 | y_nl = np.polyval(c, y_lin) #c0 + c1*y_lin + c2*y_lin**2
23 | return y_nl
24 |
25 |
26 | if __name__ == '__main__':
27 |
28 | n_real = 50
29 | N = 1000
30 | add_noise = True # add process noise
31 | output_filename = 'dataset.h5'
32 | dataset_name = 'train_noise'
33 |
34 | # In[]
35 | var_w = add_noise*var_w
36 | var_e = var_e
37 | std_w = np.sqrt(var_w)
38 | std_e = np.sqrt(var_e)
39 |
40 | # In[Wiener with noise model]
41 | u = np.random.randn(n_real, N)
42 | x0 = scipy.signal.lfilter(b, a, u, axis=-1)
43 | w = std_w*np.random.randn(n_real, N)
44 | x = x0 + w
45 | y0 = static_nl(x)
46 | e = std_e*np.random.randn(n_real, N)
47 | y = y0+e
48 |
49 |
50 | # In[Plot]
51 | plt.figure()
52 | plt.plot(y[0, :], 'r', label='y')
53 | plt.plot(y0[0, :], 'g', label='y0')
54 | plt.legend()
55 | plt.show()
56 |
57 | plt.figure()
58 | plt.plot(x[0, :], 'g', label='x')
59 | plt.plot(x0[0, :], 'r', label='x0')
60 | plt.legend()
61 | plt.show()
62 |
63 | # In[Save]
64 | if not (os.path.exists('data')):
65 | os.makedirs('data')
66 | filename = os.path.join('data', output_filename)
67 | hf = h5py.File(filename, 'a')
68 | ds_signal = hf.create_group(dataset_name) # signal group
69 | ds_signal.create_dataset('u', data=u[..., None])
70 | ds_signal.create_dataset('x0', data=x0[..., None])
71 | ds_signal.create_dataset('w', data=w[..., None])
72 | ds_signal.create_dataset('y0', data=y0[..., None])
73 | ds_signal.create_dataset('y', data=y[..., None])
74 | hf.close()
75 |
76 |
77 |
--------------------------------------------------------------------------------
/examples/Wiener/W_generate_nonoise.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | import scipy.signal
3 | import matplotlib.pyplot as plt
4 | import pandas as pd
5 | import os
6 | import h5py
7 |
8 |
9 | #a = [1.0, 0.3, -0.3]
10 | #b = [0.0, -0.3, 0.3]
11 | #c = []
12 |
13 | a = [1, 0.5]
14 | b = [0.0, 1.0]
15 | c = [1.0, 1.0, 0.0]
16 |
17 | var_w = 4.0
18 | var_e = 1.0
19 |
20 |
21 | def static_nl(y_lin):
22 | y_nl = np.polyval(c, y_lin) #c0 + c1*y_lin + c2*y_lin**2
23 | return y_nl
24 |
25 |
26 | if __name__ == '__main__':
27 |
28 | n_real = 50
29 | N = 1000
30 | add_noise = False # add process noise
31 | output_filename = 'dataset.h5'
32 | dataset_name = 'train_nonoise'
33 |
34 | # In[]
35 | var_w = add_noise*var_w
36 | var_e = var_e
37 | std_w = np.sqrt(var_w)
38 | std_e = np.sqrt(var_e)
39 |
40 | # In[Wiener with noise model]
41 | u = np.random.randn(n_real, N)
42 | x0 = scipy.signal.lfilter(b, a, u, axis=-1)
43 | w = std_w*np.random.randn(n_real, N)
44 | x = x0 + w
45 | y0 = static_nl(x)
46 | e = std_e*np.random.randn(n_real, N)
47 | y = y0+e
48 |
49 |
50 | # In[Plot]
51 | plt.figure()
52 | plt.plot(y[0, :], 'r', label='y')
53 | plt.plot(y0[0, :], 'g', label='y0')
54 | plt.legend()
55 | plt.show()
56 |
57 | plt.figure()
58 | plt.plot(x[0, :], 'g', label='x')
59 | plt.plot(x0[0, :], 'r', label='x0')
60 | plt.legend()
61 | plt.show()
62 |
63 | # In[Save]
64 | if not (os.path.exists('data')):
65 | os.makedirs('data')
66 | filename = os.path.join('data', output_filename)
67 | hf = h5py.File(filename, 'a')
68 | ds_signal = hf.create_group(dataset_name) # signal group
69 | ds_signal.create_dataset('u', data=u[..., None])
70 | ds_signal.create_dataset('x0', data=x0[..., None])
71 | ds_signal.create_dataset('w', data=w[..., None])
72 | ds_signal.create_dataset('y0', data=y0[..., None])
73 | ds_signal.create_dataset('y', data=y[..., None])
74 | hf.close()
75 |
76 |
77 |
--------------------------------------------------------------------------------
/examples/Wiener/W_test.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | import torch
3 | import pyro
4 | import pyro.distributions as dist
5 | import scipy.signal
6 | import matplotlib.pyplot as plt
7 | import pandas as pd
8 | import os
9 | import h5py
10 | from dynonet.lti import SisoLinearDynamicalOperator
11 | from dynonet.static import SisoStaticNonLinearity
12 | import time
13 |
14 | if __name__ == '__main__':
15 |
16 | n_a = 1
17 | n_b = 1
18 | n_k = 1
19 |
20 | model_name = 'NLS_nonoise'
21 | dataset_name = 'train_nonoise'
22 | # In[Load data]
23 | filename = os.path.join('data', 'dataset.h5')
24 | h5_data = h5py.File(filename, 'r')
25 | u = np.array(h5_data[dataset_name]['u'])
26 | y = np.array(h5_data[dataset_name]['y'])
27 | y0 = np.array(h5_data[dataset_name]['y0'])
28 |
29 | # Train on a single example
30 | u = u[[0], ...]
31 | y = y[[0], ...]
32 |
33 | batch_size = u.shape[0]
34 | seq_len = u.shape[1]
35 | n_u = u.shape[2]
36 | n_y = y.shape[2]
37 |
38 | # In[To tensors]
39 | u_torch = torch.tensor(u, dtype=torch.float32)
40 | y_torch = torch.tensor(y, dtype=torch.float32)
41 |
42 | # In[Deterministic model]
43 | G = SisoLinearDynamicalOperator(n_b, n_a, n_k=n_k)
44 | F = SisoStaticNonLinearity(n_hidden=10)
45 | model_folder = os.path.join("models", model_name)
46 | G.load_state_dict(torch.load(os.path.join(model_folder, "G.pkl")))
47 | F.load_state_dict(torch.load(os.path.join(model_folder, "F.pkl")))
48 |
49 | # In[Simulate]
50 | y_lin = G(u_torch)
51 | y_nl = F(y_lin)
52 | y_hat = y_nl
53 |
54 | # In[Detach]
55 | y_hat = y_hat.detach().numpy()
56 |
57 | # In[Predict]
58 | plt.plot(y0[0, :, 0], 'k', label='y0')
59 | plt.plot(y_hat[0, :, 0], 'g', label='$\hat y$')
60 | plt.plot(y0[0, :, 0]-y_hat[0, :, 0], 'r', label='e')
61 | plt.grid()
62 | plt.legend()
63 | plt.show()
64 |
65 |
66 | # In[Plot loss]
67 | #plt.figure()
68 | #plt.plot(y0[0, :, 0], 'k', label='y')
69 | #plt.plot(y[0, :, 0], 'r', label='y0')
70 |
71 | #plt.grid()
72 | #plt.legend()
--------------------------------------------------------------------------------
/examples/Wiener/W_train_NLS.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | import torch
3 | import matplotlib.pyplot as plt
4 | import os
5 | import h5py
6 | from dynonet.lti import SisoLinearDynamicalOperator
7 | from dynonet.static import SisoStaticNonLinearity
8 | import time
9 |
10 | if __name__ == '__main__':
11 |
12 | n_a = 1
13 | n_b = 1
14 | n_k = 1
15 |
16 | num_iter = 20000
17 | test_freq = 100
18 | lr = 1e-3
19 |
20 | #model_name = 'ML_noise'
21 | model_name = 'NLS_noise'
22 | dataset_name = 'train_noise'
23 | # In[Load data]
24 | filename = os.path.join('data', 'dataset.h5')
25 | h5_data = h5py.File(filename, 'r')
26 | u = np.array(h5_data[dataset_name]['u'])
27 | y = np.array(h5_data[dataset_name]['y'])
28 | y0 = np.array(h5_data[dataset_name]['y0'])
29 |
30 | # y = (y - np.mean(y[[0], :, :], axis=-2))/(np.std(y[[0], :, :], axis=-2))
31 | batch_size = u.shape[0]
32 | seq_len = u.shape[1]
33 | n_u = u.shape[2]
34 | n_y = y.shape[2]
35 |
36 | # In[To tensors]
37 | u_torch = torch.tensor(u, dtype=torch.float32)
38 | y_torch = torch.tensor(y, dtype=torch.float32)
39 |
40 | # In[Deterministic model]
41 | G = SisoLinearDynamicalOperator(n_b, n_a, n_k=n_k)
42 | F = SisoStaticNonLinearity(n_hidden=10)
43 |
44 | # In[Log-likelihood]
45 |
46 |
47 | optimizer = torch.optim.Adam([
48 | {'params': G.parameters(), 'lr': lr},
49 | {'params': F.parameters(), 'lr': lr},
50 | ], lr=lr)
51 |
52 | # In[Train]
53 | LOSS = []
54 | start_time = time.time()
55 | for itr in range(0, num_iter):
56 |
57 | optimizer.zero_grad()
58 |
59 | # Simulate
60 | y_lin = G(u_torch)
61 | y_nl = F(y_lin)
62 | y_hat = y_nl
63 |
64 | # Compute fit loss
65 | err_fit = y_torch - y_hat
66 | loss_fit = torch.mean(err_fit**2)
67 | loss = loss_fit
68 |
69 | LOSS.append(loss.item())
70 | if itr % test_freq == 0:
71 | print(f'Iter {itr} | Fit Loss {loss:.4f}')
72 |
73 | # Optimize
74 | loss.backward()
75 | optimizer.step()
76 |
77 | train_time = time.time() - start_time
78 |
79 |
80 | # In[Save model]
81 | model_folder = os.path.join("models", model_name)
82 | if not os.path.exists(model_folder):
83 | os.makedirs(model_folder)
84 |
85 | torch.save(G.state_dict(), os.path.join(model_folder, "G.pkl"))
86 | torch.save(F.state_dict(), os.path.join(model_folder, "F.pkl"))
87 |
88 | # In[Simulate noise-free]
89 | with torch.no_grad():
90 | y_lin = G(u_torch)
91 | y_nl = F(y_lin)
92 | y_hat = y_nl
93 |
94 | # In[Numpy]
95 | y_lin = y_lin.numpy()
96 | y_nl = y_nl.numpy()
97 | y_hat = y_hat.numpy()
98 |
99 | # In[Predict]
100 | plt.figure()
101 | plt.plot(y0[0, :, 0], 'k')
102 | plt.plot(y_hat[0, :, 0], 'g')
103 | plt.plot(y0[0, :, 0] - y_hat[0, :, 0], 'r')
104 | plt.show()
105 |
106 | plt.figure()
107 | plt.plot(y_lin[0, :], y_hat[0, :], '*k', label='x')
108 | plt.legend()
109 |
110 | # In[Predict]
111 | plt.plot(y_torch[0, :, 0], 'k')
112 | plt.plot(y_hat[0, :, 0], 'g')
113 | plt.plot(y_torch[0, :, 0]-y_hat[0, :, 0], 'r')
114 | plt.show()
115 |
116 | # In[Plot loss]
117 | plt.figure()
118 | plt.plot(LOSS)
119 | plt.show()
120 |
121 |
--------------------------------------------------------------------------------
/examples/Wiener/W_train_NLS_nonoise.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | import torch
3 | import pyro
4 | import pyro.distributions as dist
5 | import scipy.signal
6 | import matplotlib.pyplot as plt
7 | import pandas as pd
8 | import os
9 | import h5py
10 | from dynonet.lti import SisoLinearDynamicalOperator
11 | from dynonet.static import SisoStaticNonLinearity
12 | import time
13 |
14 | if __name__ == '__main__':
15 |
16 | n_a = 1
17 | n_b = 1
18 | n_k = 1
19 |
20 | num_iter = 20000
21 | test_freq = 100
22 | lr = 1e-3
23 |
24 | #model_name = 'ML_noise'
25 | model_name = 'NLS_nonoise'
26 | dataset_name = 'train_nonoise'
27 | # In[Load data]
28 | filename = os.path.join('data', 'dataset.h5')
29 | h5_data = h5py.File(filename, 'r')
30 | u = np.array(h5_data[dataset_name]['u'])
31 | y = np.array(h5_data[dataset_name]['y'])
32 |
33 | # y = (y - np.mean(y[[0], :, :], axis=-2))/(np.std(y[[0], :, :], axis=-2))
34 | batch_size = u.shape[0]
35 | seq_len = u.shape[1]
36 | n_u = u.shape[2]
37 | n_y = y.shape[2]
38 |
39 | # In[To tensors]
40 | u_torch = torch.tensor(u, dtype=torch.float32)
41 | y_torch = torch.tensor(y, dtype=torch.float32)
42 |
43 | # In[Deterministic model]
44 | G = SisoLinearDynamicalOperator(n_b, n_a, n_k=n_k)
45 | F = SisoStaticNonLinearity(n_hidden=10)
46 |
47 | # In[Log-likelihood]
48 |
49 |
50 | optimizer = torch.optim.Adam([
51 | {'params': G.parameters(), 'lr': lr},
52 | {'params': F.parameters(), 'lr': lr},
53 | ], lr=lr)
54 |
55 | # In[Train]
56 | LOSS = []
57 | start_time = time.time()
58 | for itr in range(0, num_iter):
59 |
60 | optimizer.zero_grad()
61 |
62 | # Simulate
63 | y_lin = G(u_torch)
64 | y_nl = F(y_lin)
65 | y_hat = y_nl
66 |
67 | # Compute fit loss
68 | err_fit = y_torch - y_hat
69 | loss_fit = torch.mean(err_fit**2)
70 | loss = loss_fit
71 |
72 | LOSS.append(loss.item())
73 | if itr % test_freq == 0:
74 | print(f'Iter {itr} | Fit Loss {loss:.4f}')
75 |
76 | # Optimize
77 | loss.backward()
78 | optimizer.step()
79 |
80 | train_time = time.time() - start_time
81 |
82 |
83 | # In[Save model]
84 | model_folder = os.path.join("models", model_name)
85 | if not os.path.exists(model_folder):
86 | os.makedirs(model_folder)
87 |
88 | torch.save(G.state_dict(), os.path.join(model_folder, "G.pkl"))
89 | torch.save(F.state_dict(), os.path.join(model_folder, "F.pkl"))
90 |
91 |
92 | # In[Detach]
93 | y_hat = y_hat.detach().numpy()
94 |
95 | # In[Predict]
96 | plt.plot(y_torch[0, :, 0], 'k')
97 | plt.plot(y_hat[0, :, 0], 'g')
98 | plt.plot(y_torch[0, :, 0]-y_hat[0, :, 0], 'r')
99 | plt.show()
100 |
101 | # In[Plot loss]
102 | plt.figure()
103 | plt.plot(LOSS)
104 | plt.show()
105 |
106 |
--------------------------------------------------------------------------------
/examples/Wiener/data/dataset.h5:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/forgi86/dynonet/56699401b141065a45403a18977333e0ec8d7303/examples/Wiener/data/dataset.h5
--------------------------------------------------------------------------------
/examples/Wiener/models/ML_noise/F.pkl:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/forgi86/dynonet/56699401b141065a45403a18977333e0ec8d7303/examples/Wiener/models/ML_noise/F.pkl
--------------------------------------------------------------------------------
/examples/Wiener/models/ML_noise/G.pkl:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/forgi86/dynonet/56699401b141065a45403a18977333e0ec8d7303/examples/Wiener/models/ML_noise/G.pkl
--------------------------------------------------------------------------------
/examples/Wiener/models/NLS_noise/F.pkl:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/forgi86/dynonet/56699401b141065a45403a18977333e0ec8d7303/examples/Wiener/models/NLS_noise/F.pkl
--------------------------------------------------------------------------------
/examples/Wiener/models/NLS_noise/G.pkl:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/forgi86/dynonet/56699401b141065a45403a18977333e0ec8d7303/examples/Wiener/models/NLS_noise/G.pkl
--------------------------------------------------------------------------------
/examples/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/forgi86/dynonet/56699401b141065a45403a18977333e0ec8d7303/examples/__init__.py
--------------------------------------------------------------------------------
/examples/coupled_drive/README.md:
--------------------------------------------------------------------------------
1 | # Coupled Drive Example
2 |
3 | Two electric motors driving a pulley with flexible belt. Steps to run:
4 |
5 | 1. Obtain the data, which includes a report, as directed in [data/README.txt](data/README.txt).
6 | 2. ```python drive_train_W.py```
7 | 3. ```python drive_plot.py```
8 |
9 |
10 |
--------------------------------------------------------------------------------
/examples/coupled_drive/data/README.txt:
--------------------------------------------------------------------------------
1 | Copy in this folder DATAPRBS.csv and DATAUNIF.csv contained in the .zip file:
2 |
3 | http://www.it.uu.se/research/publications/reports/2017-024/CoupledElectricDrivesDataSetAndReferenceModels.zip
4 |
--------------------------------------------------------------------------------
/examples/coupled_drive/drive_plot.py:
--------------------------------------------------------------------------------
1 | import torch
2 | import pandas as pd
3 | import numpy as np
4 | import os
5 | import matplotlib.pyplot as plt
6 |
7 | if __name__ == '__main__':
8 |
9 | # Set seed for reproducibility
10 | np.random.seed(0)
11 | torch.manual_seed(0)
12 |
13 | # Settings
14 | add_noise = True
15 | lr = 2e-4
16 | num_iter = 1
17 | test_freq = 100
18 | n_fit = 100000
19 | decimate = 1
20 | n_batch = 1
21 | n_b = 3
22 | n_f = 3
23 |
24 | # Column names in the dataset
25 | COL_U = ['u1']
26 | COL_Y = ['z1']
27 |
28 | # Load dataset
29 | df_X = pd.read_csv(os.path.join("data", "DATAPRBS.csv"))
30 |
31 | # Extract data
32 | y = np.array(df_X[COL_Y], dtype=np.float32)
33 | u = np.array(df_X[COL_U], dtype=np.float32)
34 | u = u-np.mean(u)
35 | fs = 10**7/2**14
36 | N = y.size
37 | ts = 1/fs
38 | t = np.arange(N)*ts
39 |
40 | # Fit data
41 | y_fit = y[:n_fit:decimate]
42 | u_fit = u[:n_fit:decimate]
43 | t_fit = t[0:n_fit:decimate]
44 |
45 | # In[Plot]
46 | fig, ax = plt.subplots(2, 1, sharex=True)
47 | ax[0].plot(t_fit, y_fit, 'k', label="$u")
48 | ax[0].grid()
49 | ax[1].plot(t_fit, u_fit, 'k', label="$y$")
50 | ax[1].grid()
51 | plt.legend()
52 | plt.show()
53 |
54 |
55 |
56 |
57 |
58 |
--------------------------------------------------------------------------------
/examples/coupled_drive/drive_train_W.py:
--------------------------------------------------------------------------------
1 | import torch
2 | import pandas as pd
3 | import numpy as np
4 | import os
5 | from dynonet.lti import SisoLinearDynamicalOperator
6 | from dynonet.static import SisoStaticNonLinearity
7 | import time
8 | import dynonet.metrics
9 | import matplotlib.pyplot as plt
10 |
11 | if __name__ == '__main__':
12 |
13 | # In[Set seed for reproducibility]
14 | np.random.seed(0)
15 | torch.manual_seed(0)
16 |
17 | # In[Settings]
18 | lr = 1e-3
19 | num_iter = 400000
20 | test_freq = 100
21 | n_fit = 500
22 | decimate = 1
23 | n_batch = 1
24 | n_b = 4
25 | n_a = 4
26 |
27 | # Column names in the dataset
28 | # Column names in the dataset
29 | COL_U = ['u1']
30 | COL_Y = ['z1']
31 |
32 | # In[Load dataset]
33 | df_X = pd.read_csv(os.path.join("data", "DATAPRBS.csv"))
34 |
35 | # Extract data
36 | y = np.array(df_X[COL_Y], dtype=np.float32)
37 | u = np.array(df_X[COL_U], dtype=np.float32)
38 | N = y.size
39 | fs = 50 # Sampling frequency (Hz)
40 | ts = 1/fs
41 | t = np.arange(N)*ts
42 |
43 | # Fit data
44 | y_fit = y[:n_fit:decimate] - 1.5
45 | u_fit = u[:n_fit:decimate]
46 | t_fit = t[0:n_fit:decimate]
47 |
48 |
49 | # In[Prepare data]
50 | u_fit_torch = torch.tensor(u_fit[None, :, :], dtype=torch.float)
51 | y_fit_torch = torch.tensor(y_fit[None, :, :], dtype=torch.float)
52 |
53 |
54 | # In[Setup model]
55 | G1 = SisoLinearDynamicalOperator(n_b=4, n_a=4, n_k=1)
56 | F = SisoStaticNonLinearity(n_hidden=16, activation='tanh')
57 | G2 = SisoLinearDynamicalOperator(n_b=4, n_a=4, n_k=1)
58 |
59 | # Setup optimizer
60 | optimizer = torch.optim.Adam([
61 | {'params': G1.parameters(), 'lr': lr},
62 | {'params': F.parameters(), 'lr': lr},
63 | ], lr=lr)
64 |
65 | # In[Train]
66 | LOSS = []
67 | start_time = time.time()
68 | for itr in range(0, num_iter):
69 |
70 | optimizer.zero_grad()
71 |
72 | # Simulate
73 | y_lin = G1(u_fit_torch)
74 | y_nl = F(y_lin)
75 | y_hat = G2(y_nl)
76 |
77 | # Compute fit loss
78 | err_fit = y_fit_torch - y_hat
79 | loss_fit = torch.mean(err_fit**2)
80 | loss = loss_fit
81 |
82 | LOSS.append(loss.item())
83 | if itr % test_freq == 0:
84 | with torch.no_grad():
85 | RMSE = torch.sqrt(loss)
86 | print(f'Iter {itr} | Fit Loss {loss_fit:.6f} | RMSE:{RMSE:.4f}')
87 |
88 | # Optimize
89 | loss.backward()
90 | optimizer.step()
91 |
92 | train_time = time.time() - start_time
93 | print(f"\nTrain time: {train_time:.2f}") # 182 seconds
94 |
95 | # In[To numpy]
96 |
97 | y_hat = y_hat.detach().numpy()[0, :, :]
98 | y_lin = y_lin.detach().numpy()[0, :, :]
99 |
100 |
101 | # In[Plot]
102 | plt.figure()
103 | plt.plot(t_fit, y_fit, 'k', label="$y$")
104 | plt.plot(t_fit, y_hat, 'b', label="$\hat y$")
105 | plt.legend()
106 | plt.show()
107 |
108 | plt.figure()
109 | plt.plot(LOSS)
110 | plt.grid(True)
111 | plt.show()
112 |
113 | # In[Plot static non-linearity]
114 |
115 | y1_lin_min = np.min(y_lin)
116 | y1_lin_max = np.max(y_lin)
117 |
118 | in_nl = np.arange(y1_lin_min, y1_lin_max, (y1_lin_max- y1_lin_min)/1000).astype(np.float32).reshape(-1, 1)
119 |
120 | with torch.no_grad():
121 | out_nl = F(torch.as_tensor(in_nl))
122 |
123 | plt.figure()
124 | plt.plot(in_nl, out_nl, 'b')
125 | plt.plot(in_nl, out_nl, 'b')
126 | #plt.plot(y1_lin, y1_nl, 'b*')
127 | plt.xlabel('Static non-linearity input (-)')
128 | plt.ylabel('Static non-linearity input (-)')
129 | plt.grid(True)
130 | plt.show()
131 |
132 | # In[Plot]
133 | e_rms = dynonet.metrics.error_rmse(y_hat, y_fit)[0]
134 | print(f"RMSE: {e_rms:.2f}")
135 |
136 |
137 | # In[Analysis]
138 | import control
139 |
140 | #Gg_lin = control.TransferFunction(G.b_coeff.detach().numpy(), np.r_[1.0, G.a_coeff.detach().numpy()], ts)
141 | #mag, phase, omega = control.bode(Gg_lin)
142 |
143 |
144 |
145 |
146 |
147 |
148 |
149 |
--------------------------------------------------------------------------------
/setup.cfg:
--------------------------------------------------------------------------------
1 | [wheel]
2 | universal = 1
3 |
4 | [metadata]
5 | name = dynonet
6 | version = 0.1.2
7 | author = Marco Forgione and Dario Piga
8 | author_email = marco.forgione1986@gmail.com
9 | description = dynoNet: A neural network architecture for learning dynamical systems
10 | long_description = file: README.md
11 | long_description_content_type = text/markdown
12 | license_file = LICENSE
13 | license = MIT
14 | url = https://github.com/forgi86/dynonet
15 | project_urls =
16 | Bug Tracker = https://github.com/forgi86/dynonet/issues
17 | classifiers =
18 | License :: OSI Approved :: MIT License
19 | Operating System :: OS Independent
20 | Programming Language :: Python :: 3.7
21 | Development Status :: 4 - Beta
22 | Environment :: Console
23 | Intended Audience :: Developers
24 | Intended Audience :: Education
25 | Intended Audience :: Science/Research
26 | Topic :: Scientific/Engineering
27 |
28 | [options]
29 | zip_safe = False
30 | include_package_data = True
31 | packages = find:
32 | package_dir =
33 | = src
34 | install_requires =
35 | numpy>=1.19.4
36 | scipy>=1.5.4
37 | matplotlib>=3.3.3
38 | pandas>=1.1.4
39 | torch>=1.4
40 |
41 | [options.packages.find]
42 | where = src
43 |
--------------------------------------------------------------------------------
/setup.py:
--------------------------------------------------------------------------------
1 | import setuptools
2 | setuptools.setup()
3 |
4 |
--------------------------------------------------------------------------------
/sphinx/Makefile:
--------------------------------------------------------------------------------
1 | # Minimal makefile for Sphinx documentation
2 | #
3 |
4 | # You can set these variables from the command line.
5 | SPHINXOPTS =
6 | SPHINXBUILD = sphinx-build
7 | SOURCEDIR = source
8 | BUILDDIR = build
9 |
10 | # Put it first so that "make" without argument is like "make help".
11 | help:
12 | @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
13 |
14 | .PHONY: help Makefile
15 |
16 | # Catch-all target: route all unknown targets to Sphinx using the new
17 | # "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS).
18 | %: Makefile
19 | @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
--------------------------------------------------------------------------------
/sphinx/make.bat:
--------------------------------------------------------------------------------
1 | @ECHO OFF
2 |
3 | pushd %~dp0
4 |
5 | REM Command file for Sphinx documentation
6 |
7 | if "%SPHINXBUILD%" == "" (
8 | set SPHINXBUILD=sphinx-build
9 | )
10 | set SOURCEDIR=source
11 | set BUILDDIR=build
12 |
13 | if "%1" == "" goto help
14 |
15 | %SPHINXBUILD% >NUL 2>NUL
16 | if errorlevel 9009 (
17 | echo.
18 | echo.The 'sphinx-build' command was not found. Make sure you have Sphinx
19 | echo.installed, then set the SPHINXBUILD environment variable to point
20 | echo.to the full path of the 'sphinx-build' executable. Alternatively you
21 | echo.may add the Sphinx directory to PATH.
22 | echo.
23 | echo.If you don't have Sphinx installed, grab it from
24 | echo.http://sphinx-doc.org/
25 | exit /b 1
26 | )
27 |
28 | %SPHINXBUILD% -M %1 %SOURCEDIR% %BUILDDIR% %SPHINXOPTS%
29 | goto end
30 |
31 | :help
32 | %SPHINXBUILD% -M help %SOURCEDIR% %BUILDDIR% %SPHINXOPTS%
33 |
34 | :end
35 | popd
36 |
--------------------------------------------------------------------------------
/sphinx/source/code.rst:
--------------------------------------------------------------------------------
1 | dynoNet API
2 | ==========================
3 | ---------------
4 | LTI blocks
5 | ---------------
6 |
7 | .. automodule:: dynonet.lti
8 | :members:
9 | :special-members:
10 | :member-order: bysource
11 | :exclude-members: __init__, forward
12 |
13 | ---------------
14 | Static blocks
15 | ---------------
16 |
17 | .. automodule:: dynonet.static
18 | :members:
19 | :special-members:
20 | :member-order: bysource
21 | :exclude-members: __init__
22 |
23 | ---------------
24 | Metrics
25 | ---------------
26 |
27 | .. automodule:: dynonet.metrics
28 | :members:
29 | :special-members:
30 | :member-order: bysource
31 | :exclude-members: __init__
32 |
33 | ---------------
34 | Filtering
35 | ---------------
36 |
37 | .. automodule:: dynonet.filtering
38 | :members:
39 | :special-members:
40 | :member-order: bysource
41 | :exclude-members: __init__
42 |
43 |
--------------------------------------------------------------------------------
/sphinx/source/index.rst:
--------------------------------------------------------------------------------
1 | .. dynoNet documentation master file, created by
2 | sphinx-quickstart on Fri Apr 10 01:50:34 2020.
3 | You can adapt this file completely to your liking, but it should at least
4 | contain the root `toctree` directive.
5 |
6 | Welcome to the dynoNet documentation!
7 | =====================================
8 |
9 | .. toctree::
10 | :maxdepth: 2
11 | :caption: Contents:
12 |
13 | code
14 |
15 |
16 | Indices and tables
17 | ==================
18 |
19 | * :ref:`genindex`
20 | * :ref:`modindex`
21 | * :ref:`search`
22 |
--------------------------------------------------------------------------------
/src/dynonet/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/forgi86/dynonet/56699401b141065a45403a18977333e0ec8d7303/src/dynonet/__init__.py
--------------------------------------------------------------------------------
/src/dynonet/metrics.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | #np.array
3 |
4 |
5 | def r_squared(y_true, y_pred, time_axis=0):
6 | """ Computes the R-square index.
7 |
8 | The R-squared index is computed separately on each channel.
9 |
10 | Parameters
11 | ----------
12 | y_true : np.array
13 | Array of true values. If must be at least 2D.
14 | y_pred : np.array
15 | Array of predicted values. If must be compatible with y_true'
16 | time_axis : int
17 | Time axis. All other axes define separate channels.
18 |
19 | Returns
20 | -------
21 | r_squared_val : np.array
22 | Array of r_squared value.
23 | """
24 |
25 | SSE = np.sum((y_pred - y_true)**2, axis=time_axis)
26 | y_mean = np.mean(y_true, axis=time_axis, keepdims=True)
27 | SST = np.sum((y_true - y_mean)**2, axis=time_axis)
28 |
29 | return 1.0 - SSE/SST
30 |
31 |
32 | def error_rmse(y_true, y_pred, time_axis=0):
33 | """ Computes the Root Mean Square Error (RMSE).
34 |
35 | The RMSE index is computed separately on each channel.
36 |
37 | Parameters
38 | ----------
39 | y_true : np.array
40 | Array of true values. If must be at least 2D.
41 | y_pred : np.array
42 | Array of predicted values. If must be compatible with y_true'
43 | time_axis : int
44 | Time axis. All other axes define separate channels.
45 |
46 | Returns
47 | -------
48 | RMSE : np.array
49 | Array of r_squared value.
50 |
51 | """
52 |
53 | SSE = np.mean((y_pred - y_true)**2, axis=time_axis)
54 | RMSE = np.sqrt(SSE)
55 | return RMSE
56 |
57 |
58 | def error_mean(y_true, y_pred, time_axis=0):
59 | """ Computes the error mean value.
60 |
61 | The RMSE index is computed separately on each channel.
62 |
63 | Parameters
64 | ----------
65 | y_true : np.array
66 | Array of true values. If must be at least 2D.
67 | y_pred : np.array
68 | Array of predicted values. If must be compatible with y_true'
69 | time_axis : int
70 | Time axis. All other axes define separate channels.
71 |
72 | Returns
73 | -------
74 | e_mean : np.array
75 | Array of error means.
76 | """
77 |
78 | e_mean = np.mean(y_true - y_pred, axis=time_axis)
79 | return e_mean
80 |
81 |
82 | def error_mae(y_true, y_pred, time_axis=0):
83 | """ Computes the error Mean Absolute Value (MAE)
84 |
85 | The RMSE index is computed separately on each channel.
86 |
87 | Parameters
88 | ----------
89 | y_true : np.array
90 | Array of true values. If must be at least 2D.
91 | y_pred : np.array
92 | Array of predicted values. If must be compatible with y_true'
93 | time_axis : int
94 | Time axis. All other axes define separate channels.
95 |
96 | Returns
97 | -------
98 | e_mean : np.array
99 | Array of error mean absolute values.
100 | """
101 |
102 | e_mean = np.mean(np.abs(y_true - y_pred), axis=time_axis)
103 | return e_mean
104 |
105 | def fit_index(y_true, y_pred, time_axis=0):
106 | """ Computes the per-channel fit index.
107 |
108 | The fit index is commonly used in System Identification. See the definitionin the System Identification Toolbox
109 | or in the paper 'Nonlinear System Identification: A User-Oriented Road Map',
110 | https://arxiv.org/abs/1902.00683, page 31.
111 | The fit index is computed separately on each channel.
112 |
113 | Parameters
114 | ----------
115 | y_true : np.array
116 | Array of true values. If must be at least 2D.
117 | y_pred : np.array
118 | Array of predicted values. If must be compatible with y_true'
119 | time_axis : int
120 | Time axis. All other axes define separate channels.
121 |
122 | Returns
123 | -------
124 | fit_val : np.array
125 | Array of r_squared value.
126 |
127 | """
128 |
129 | err_norm = np.linalg.norm(y_true - y_pred, axis=time_axis, ord=2) # || y - y_pred ||
130 | y_mean = np.mean(y_true, axis=time_axis)
131 | err_mean_norm = np.linalg.norm(y_true - y_mean, ord=2) # || y - y_mean ||
132 | fit_val = 100*(1 - err_norm/err_mean_norm)
133 |
134 | return fit_val
135 |
136 |
137 | if __name__ == '__main__':
138 | N = 20
139 | ny = 2
140 | SNR = 10
141 | y_true = SNR*np.random.randn(N, 2)
142 | y_pred = np.copy(y_true) + np.random.randn(N, 2)
143 | err_rmse_val = error_rmse(y_pred, y_true)
144 | r_squared_val = r_squared(y_true, y_pred)
145 | fit_val = fit_index(y_true, y_pred)
146 |
147 | print(f"RMSE: {err_rmse_val}")
148 | print(f"R-squared: {r_squared_val}")
149 | print(f"fit index: {fit_val}")
150 |
--------------------------------------------------------------------------------
/src/dynonet/static.py:
--------------------------------------------------------------------------------
1 | import torch
2 | import torch.nn as nn
3 |
4 |
5 | class MimoStaticNonLinearity(nn.Module):
6 | r"""Applies a Static MIMO non-linearity.
7 | The non-linearity is implemented as a feed-forward neural network.
8 |
9 | Args:
10 | in_channels (int): Number of input channels
11 | out_channels (int): Number of output channels
12 | n_hidden (int, optional): Number of nodes in the hidden layer. Default: 20
13 | activation (str): Activation function. Either 'tanh', 'relu', or 'sigmoid'. Default: 'tanh'
14 |
15 | Shape:
16 | - Input: (..., in_channels)
17 | - Output: (..., out_channels)
18 |
19 | Examples::
20 |
21 | >>> in_channels, out_channels = 2, 4
22 | >>> F = MimoStaticNonLinearity(in_channels, out_channels)
23 | >>> batch_size, seq_len = 32, 100
24 | >>> u_in = torch.ones((batch_size, seq_len, in_channels))
25 | >>> y_out = F(u_in, y_0, u_0) # shape: (batch_size, seq_len, out_channels)
26 | """
27 |
28 | def __init__(self, in_channels, out_channels, n_hidden=20, activation='tanh'):
29 | super(MimoStaticNonLinearity, self).__init__()
30 |
31 | activation_dict = {'tanh': nn.Tanh, 'relu': nn.ReLU, 'sigmoid': nn.Sigmoid}
32 |
33 | self.net = nn.Sequential(
34 | nn.Linear(in_channels, n_hidden),
35 | activation_dict[activation](), #nn.Tanh(),
36 | nn.Linear(n_hidden, out_channels)
37 | )
38 |
39 | def forward(self, u_lin):
40 | y_nl = self.net(u_lin)
41 | return y_nl
42 |
43 |
44 | class SisoStaticNonLinearity(MimoStaticNonLinearity):
45 | r"""Applies a Static SISO non-linearity.
46 | The non-linearity is implemented as a feed-forward neural network.
47 |
48 | Args:
49 | n_hidden (int, optional): Number of nodes in the hidden layer. Default: 20
50 | activation (str): Activation function. Either 'tanh', 'relu', or 'sigmoid'. Default: 'tanh'
51 | s
52 | Shape:
53 | - Input: (..., in_channels)
54 | - Output: (..., out_channels)
55 |
56 | Examples::
57 |
58 | >>> F = SisoStaticNonLinearity(n_hidden=20)
59 | >>> batch_size, seq_len = 32, 100
60 | >>> u_in = torch.ones((batch_size, seq_len, in_channels))
61 | >>> y_out = F(u_in, y_0, u_0) # shape: (batch_size, seq_len, out_channels)
62 | """
63 | def __init__(self, n_hidden=20, activation='tanh'):
64 | super(SisoStaticNonLinearity, self).__init__(in_channels=1, out_channels=1, n_hidden=n_hidden, activation=activation)
65 |
66 |
67 | class MimoChannelWiseNonLinearity(nn.Module):
68 | r"""Applies a Channel-wise non-linearity.
69 | The non-linearity is implemented as a set of feed-forward neural networks (each one operating on a different channel).
70 |
71 | Args:
72 | channels (int): Number of both input and output channels
73 | n_hidden (int, optional): Number of nodes in the hidden layer of each network. Default: 10
74 |
75 | Shape:
76 | - Input: (..., channels)
77 | - Output: (..., channels)
78 |
79 | Examples::
80 |
81 | >>> channels = 4
82 | >>> F = MimoChannelWiseNonLinearity(channels)
83 | >>> batch_size, seq_len = 32, 100
84 | >>> u_in = torch.ones((batch_size, seq_len, channels))
85 | >>> y_out = F(u_in, y_0, u_0) # shape: (batch_size, seq_len, channels)
86 |
87 | """
88 |
89 | def __init__(self, channels, n_hidden=10):
90 | super(MimoChannelWiseNonLinearity, self).__init__()
91 |
92 | self.net = nn.ModuleList()
93 | for channel_idx in range(channels):
94 | channel_net = nn.Sequential(
95 | nn.Linear(1, n_hidden), # 2 states, 1 input
96 | nn.ReLU(),
97 | nn.Linear(n_hidden, 1)
98 | )
99 | self.net.append(channel_net)
100 |
101 | def forward(self, u_lin):
102 |
103 | y_nl = []
104 | for channel_idx, u_channel in enumerate(u_lin.split(1, dim=-1)): # split over the last dimension (input channel)
105 | y_nl_channel = self.net[channel_idx](u_channel) # Process blocks individually
106 | y_nl.append(y_nl_channel)
107 |
108 | y_nl = torch.cat(y_nl, -1) # concatenate all output channels
109 | return y_nl
110 |
111 |
112 | if __name__ == '__main__':
113 |
114 | channels = 4
115 | nn1 = MimoChannelWiseNonLinearity(channels)
116 | in_data = torch.randn(100, 10, channels)
117 | xx = net_out = nn1(in_data)
--------------------------------------------------------------------------------
/test_code/.gitignore:
--------------------------------------------------------------------------------
1 | stable_2ndorder.pdf
2 |
--------------------------------------------------------------------------------
/test_code/correlate_example.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | import scipy
3 |
4 | from scipy import signal
5 |
6 | SIG = np.zeros((3, 8*128))
7 |
8 | sig = np.repeat([0., 1., 1., 0., 1., 0., 0., 1.], 128)
9 | sig_noise = sig + 0.1*np.random.randn(len(sig))
10 | corr = signal.correlate(sig_noise, np.ones(128), mode='same') / 128
11 |
12 | SIG[0, :] = np.copy(sig)
13 | SIG[1, :] = np.copy(sig_noise)
14 | SIG[2, :] = np.copy(sig_noise)
15 |
16 | CORR = signal.correlate(SIG, np.ones((1, 128)), mode='same') / 128
17 |
18 | import matplotlib.pyplot as plt
19 | clock = np.arange(64, len(sig), 128)
20 | fig, (ax_orig, ax_noise, ax_corr) = plt.subplots(3, 1, sharex=True)
21 | ax_orig.plot(sig)
22 | ax_orig.plot(clock, sig[clock], 'ro')
23 | ax_orig.set_title('Original signal')
24 | ax_noise.plot(sig_noise)
25 | ax_noise.set_title('Signal with noise')
26 | ax_corr.plot(corr)
27 | ax_corr.plot(clock, corr[clock], 'ro')
28 | ax_corr.axhline(0.5, ls=':')
29 | ax_corr.set_title('Cross-correlated with rectangular pulse')
30 | ax_orig.margins(0, 0.1)
31 | fig.tight_layout()
32 | fig.show()
33 |
34 |
35 | fig,ax = plt.subplots()
36 | plt.plot(CORR.T)
--------------------------------------------------------------------------------
/test_code/einsum_example.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | import scipy.signal
3 | import matplotlib.pyplot as plt
4 |
5 | allData = np.random.rand(20,80,200)
6 |
7 | #allData is a 64x256x913 array
8 |
9 |
10 | N = 100
11 | n_batch = 2
12 | n_b = 3
13 | grad_out = np.random.rand(N + n_b - 1, n_batch)
14 | sens_b = np.random.rand(N, 1)
15 | z = scipy.signal.correlate(grad_out, sens_b, 'valid')
16 |
--------------------------------------------------------------------------------
/test_code/filter_example.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | import scipy as sp
3 | import scipy.signal
4 | import matplotlib.pyplot as plt
5 | import numba as nb
6 | import control
7 |
8 | if __name__ == '__main__':
9 |
10 | in_channels = 3
11 | out_channels = 4
12 | n_a = 0
13 | n_b = 10
14 | ts = 1.0
15 |
16 | a_coeff = np.random.randn(out_channels, in_channels, n_a)
17 | b_coeff = np.random.randn(out_channels, in_channels, n_b)
18 |
19 | a_poly = np.zeros_like(a_coeff, shape=(out_channels, in_channels, n_a+1))
20 | a_poly[:, :, 0] = 1.0
21 | b_poly = np.array(b_coeff)
22 |
23 | M = n_b # numerator coefficients
24 | N = n_a + 1 # denominator coefficients
25 | if M > N:
26 | num = b_poly
27 | den = np.c_[a_poly, np.zeros((out_channels, in_channels, M-N))]
28 | elif N > M:
29 | nun = np.c_[b_poly, np.zeros((out_channels, in_channels, N-M))]
30 | den = a_poly
31 | else:
32 | num = b_poly
33 | den = a_poly
34 |
35 | G = scipy.signal.TransferFunction(num[0, 0, :], den[0, 0, :], dt=ts)
36 | Gg = control.TransferFunction(num[0, 0, :], den[0, 0, :], ts)
37 |
38 | G_MIMO = control.TransferFunction(num, den, ts)
39 |
40 | len_imp = n_b
41 | t, y_imp = control.impulse_response(Gg, np.arange(len_imp)*ts)
42 |
43 |
--------------------------------------------------------------------------------
/test_code/filter_initial_cond_ab.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | import scipy
3 | import scipy.signal
4 | import matplotlib.pyplot as plt
5 |
6 |
7 | def lfilter_ic(b_poly, a_poly, u_in, y_0=None, u_0=None):
8 |
9 | if y_0 is None and u_0 is None:
10 | z_init = None
11 | else:
12 | z_init = scipy.signal.lfiltic(b_poly, a_poly, y_0, u_0)
13 | if z_init is not None:
14 | y, z_end = scipy.signal.lfilter(b_poly, a_poly, u_in, zi=z_init)
15 | else:
16 | y = scipy.signal.lfilter(b_poly, a_poly, u_in, zi=z_init)
17 | z_init = None
18 | z_end = None
19 | return y, z_init, z_end
20 |
21 | if __name__ == '__main__':
22 |
23 | # Coefficients of the polynomials
24 | b_coeff = np.array([0.02, 0.03, 0.04]) # b_0, b_1, b_2
25 | a_coeff = np.array([-1.87212998940304, 0.942776404097492]) # a_1, a_2
26 |
27 | # Polynomials
28 | a_poly = np.r_[1.0, a_coeff]
29 | b_poly = np.array(b_coeff)
30 |
31 | eps = 1e-6 # numerical perturbation
32 |
33 | # In[Filter with initial condition]
34 | y_0 = np.array([1.0, 2.0]) # y_-1, y_-2
35 | u_0 = np.array([3.0, 4.0]) # u_-1, u_-2
36 | u_in = 0*np.random.randn(150)
37 | y, _, _ = lfilter_ic(b_poly, a_poly, u_in, y_0, u_0)
38 |
39 | # In[Analytical sensitivities b]
40 |
41 | sens_b0_an, _, _ = lfilter_ic([1.0], a_poly, u_in, [0.0], u_0) # this is correct!
42 | sens_b1_an, _, _ = lfilter_ic([0.0, 1.0], a_poly, u_in, [0.0], u_0) # this is correct!
43 | sens_b2_an, _, _ = lfilter_ic([0.0, 0.0, 1.0], a_poly, u_in, [0.0], u_0) # this is correct!
44 |
45 | # In[Analytical sensitivities a]
46 |
47 | sens_a1_an, _, _ = lfilter_ic([0.0, 1.0], a_poly, -y, [0.0], -y_0) # this is correct!
48 | sens_a2_an, _, _ = lfilter_ic([0.0, 0.0, 1.0], a_poly, -y, [0.0], -y_0) # this is correct!
49 |
50 | # In[Perturbation on coefficients b]
51 | # b0
52 | b_coeff_eps = np.array(b_coeff)
53 | b_coeff_eps[0] += eps
54 | b_poly_eps = np.array(b_coeff_eps)
55 | y_eps, _ ,_ = lfilter_ic(b_poly_eps, a_poly, u_in, y_0, u_0)
56 | sens_b0_num = (y_eps - y) / eps
57 |
58 | # b1
59 | b_coeff_eps = np.array(b_coeff)
60 | b_coeff_eps[1] += eps
61 | b_poly_eps = np.array(b_coeff_eps)
62 | y_eps, _, _ = lfilter_ic(b_poly_eps, a_poly, u_in, y_0, u_0)
63 | sens_b1_num = (y_eps - y) / eps
64 |
65 | # b2
66 | b_coeff_eps = np.array(b_coeff)
67 | b_coeff_eps[2] += eps
68 | b_poly_eps = np.array(b_coeff_eps)
69 |
70 | y_eps, _, _ = lfilter_ic(b_poly_eps, a_poly, u_in, y_0, u_0)
71 | sens_b2_num = (y_eps - y) / eps
72 |
73 | # In[Perturbation on coefficients a]
74 | # a1
75 | a_coeff_eps = np.array(a_coeff)
76 | a_coeff_eps[0] += eps
77 | a_poly_eps = np.r_[1.0, a_coeff_eps]
78 | y_eps, _, _ = lfilter_ic(b_poly, a_poly_eps, u_in, y_0, u_0)
79 | sens_a1_num = (y_eps - y) / eps
80 |
81 | # a2
82 | a_coeff_eps = np.array(a_coeff)
83 | a_coeff_eps[1] += eps
84 | a_poly_eps = np.r_[1.0, a_coeff_eps]
85 | y_eps, _, _ = lfilter_ic(b_poly, a_poly_eps, u_in, y_0, u_0)
86 | sens_a2_num = (y_eps - y) / eps
87 |
88 |
89 | # In[Output]
90 | plt.figure()
91 | plt.plot(y, '*')
92 |
93 | # In[b sensitivity]
94 | fig, ax = plt.subplots(3, 1)
95 | ax[0].plot(sens_b0_num, 'b', label='$b_0$ num')
96 | ax[0].plot(sens_b0_an, 'r', label='$b_0$ an')
97 | ax[0].legend()
98 | ax[0].grid()
99 |
100 | ax[1].plot(sens_b1_num, 'b', label='$b_1$ num')
101 | ax[1].plot(sens_b1_an, 'r', label='$b_1$ an')
102 | ax[1].legend()
103 | ax[1].grid()
104 |
105 | ax[2].plot(sens_b2_num, 'b', label='$b_2$ num')
106 | ax[2].plot(sens_b2_an, 'r', label='$b_2$ an')
107 | ax[2].legend()
108 | ax[2].grid()
109 |
110 | # In[2]
111 | plt.figure()
112 | plt.plot(sens_b0_num[0:-2], label='$b_0$')
113 | plt.plot(sens_b1_num[1:-1], label='$b_1 q^1$')
114 | plt.plot(sens_b2_num[2:], label='$b_2 q^2$')
115 | plt.grid()
116 | plt.legend()
117 |
118 | # In[2]
119 | fig, ax = plt.subplots(2, 1)
120 | ax[0].plot(sens_a1_num, 'b', label='$a_1$ num')
121 | ax[0].plot(sens_a1_an, 'r', label='$a_1$ an')
122 | ax[0].legend()
123 | ax[0].grid()
124 |
125 | ax[1].plot(sens_a2_num, 'b', label='$a_2$ num')
126 | ax[1].plot(sens_a2_an, 'r', label='$a_2$ an')
127 | ax[1].legend()
128 | ax[1].grid()
129 |
130 |
131 | # In[2]
132 | plt.figure()
133 | plt.plot(sens_a1_num[0:-1], label='$a_1$')
134 | plt.plot(sens_a2_num[1:], label='$a_2q^1$')
135 | plt.legend()
136 | plt.grid()
137 |
--------------------------------------------------------------------------------
/test_code/filter_mimo.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | import scipy as sp
3 | import scipy.signal
4 | import matplotlib.pyplot as plt
5 | import numba as nb
6 |
7 | def lfilter_ic(b_poly, a_poly, u_in, y_0=None, u_0=None):
8 |
9 | if y_0 is None and u_0 is None:
10 | z_init = None
11 | else:
12 | z_init = scipy.signal.lfiltic(b_poly, a_poly, y_0, u_0)
13 | if z_init is not None:
14 | y, z_end = scipy.signal.lfilter(b_poly, a_poly, u_in, zi=z_init)
15 | else:
16 | y = scipy.signal.lfilter(b_poly, a_poly, u_in, zi=z_init)
17 | z_init = None
18 | z_end = None
19 | return y, z_init, z_end
20 |
21 |
22 | def lfilter_mimo(b, a, u_in):
23 | batch_size, in_ch, seq_len = u_in.shape
24 | out_ch, _, _ = a.shape
25 | y_out = np.zeros_like(u_in, shape=(batch_size, out_ch, seq_len))
26 | for out_idx in range(out_ch):
27 | for in_idx in range(in_ch):
28 | y_out[:, out_idx, :] += scipy.signal.lfilter(b[out_idx, in_idx, :], a[out_idx, in_idx, :], u_in[:, in_idx, :], axis=-1)
29 | return y_out # [B, O, T]
30 |
31 |
32 | def lfilter_mimo_components(b, a, u_in):
33 | batch_size, in_ch, seq_len = u_in.shape
34 | out_ch, _, _ = a.shape
35 | y_comp_out = np.zeros_like(u_in, shape=(batch_size, out_ch, in_ch, seq_len))
36 | for out_idx in range(out_ch):
37 | for in_idx in range(in_ch):
38 | y_comp_out[:, out_idx, in_idx, :] += scipy.signal.lfilter(b[out_idx, in_idx, :], a[out_idx, in_idx, :], u_in[:, in_idx, :], axis=-1)
39 | return y_comp_out # [B, O, I, T]
40 |
41 |
42 |
43 | @nb.guvectorize([(nb.float64[:], nb.float64[:], nb.float64[:])], '(n),(n)->()')
44 | def fast_dist(X1, X2, res):
45 | res[0] = 0.0
46 | n = X1.shape[0]
47 | for i in range(n):
48 | res[0] += (X1[i] - X2[i])**2
49 |
50 |
51 | @nb.guvectorize([(nb.float64[:], nb.float64[:], nb.float64[:], nb.float64[:])], '(n),(m),(N)->(N)')
52 | def lfilter_nb(b, a, u_in, y_comp):
53 | y_comp = scipy.signal.lfilter(b, a, u_in, axis=-1)
54 |
55 |
56 | if __name__ == '__main__':
57 |
58 | in_ch = 3
59 | out_ch = 4
60 | n_b = 2
61 | n_a = 1
62 |
63 | batch_size = 16
64 | seq_len = 32
65 |
66 | # Coefficients of the polynomials
67 | b_coeff = np.random.randn(*(out_ch, in_ch, n_b))
68 | a_coeff = np.random.rand(*(out_ch, in_ch, n_a))
69 |
70 | # Polynomials
71 | a_poly = np.empty_like(a_coeff, shape=(out_ch, in_ch, n_a + 1))
72 | a_poly[:, :, 0] = 1
73 | a_poly[:, :, 1:] = a_coeff[:, :, :]
74 | b_poly = np.array(b_coeff)
75 |
76 | eps = 1e-6 # numerical perturbation
77 |
78 | # In[Filter with initial condition]
79 | y_0 = np.random.randn(*(out_ch, in_ch, n_a))
80 | u_0 = np.random.randn(*(out_ch, in_ch, n_b))
81 | u_in = 1*np.random.randn(*(batch_size, in_ch, seq_len))
82 | #y, _, _ = lfilter_ic(b_poly, a_poly, u_in, y_0, u_0)
83 |
84 |
85 |
86 |
87 |
88 | y_out = lfilter_mimo(b_poly, a_poly, u_in)
89 | y_out_comp = lfilter_mimo_components(b_poly, a_poly, u_in)
90 | y_out_2 = np.sum(y_out_comp, axis=2)
91 |
92 | assert (np.allclose(y_out, y_out_2))
93 |
94 | # In[Sensitivity]
95 | d0_np = np.array([1.0])
96 | sens_b = np.zeros_like(u_in, shape=(batch_size, out_ch, in_ch, n_b, seq_len))
97 | for out_idx in range(out_ch):
98 | for in_idx in range(in_ch):
99 | sens_b[:, out_idx, in_idx, 0, :] = sp.signal.lfilter(a_poly[out_idx, in_idx, :], d0_np, u_in[:, in_idx])
100 | for idx_coeff in range(1, n_b):
101 | sens_b[:, :, :, idx_coeff, idx_coeff:] = sens_b[:, :, :, 0, :-idx_coeff]
102 | #sens_b = torch.as_tensor(sens_b) # B, O, I, D, T
103 |
104 | grad_out = np.random.randn(*(batch_size, out_ch, seq_len))
105 | grad_b = np.einsum('boidt,bot->oid', sens_b, grad_out)
106 | grad_bb = np.einsum('boidt,bqt->oid', sens_b, grad_out)
107 |
108 | #grad_bb = np.einsum('b...t,b...t', sens_b, grad_out)
109 |
110 |
111 |
--------------------------------------------------------------------------------
/test_code/filter_mimo_channels_last.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | import scipy as sp
3 | import scipy.signal
4 | import matplotlib.pyplot as plt
5 | import numba as nb
6 |
7 | def lfilter_ic(b_poly, a_poly, u_in, y_0=None, u_0=None):
8 |
9 | if y_0 is None and u_0 is None:
10 | z_init = None
11 | else:
12 | z_init = scipy.signal.lfiltic(b_poly, a_poly, y_0, u_0)
13 | if z_init is not None:
14 | y, z_end = scipy.signal.lfilter(b_poly, a_poly, u_in, zi=z_init)
15 | else:
16 | y = scipy.signal.lfilter(b_poly, a_poly, u_in, zi=z_init)
17 | z_init = None
18 | z_end = None
19 | return y, z_init, z_end
20 |
21 |
22 | def lfilter_mimo_channels_last(b, a, u_in):
23 | batch_size, seq_len, in_ch = u_in.shape
24 | out_ch, _, _ = a.shape
25 | y_out = np.zeros_like(u_in, shape=(batch_size, seq_len, out_ch))
26 | for out_idx in range(out_ch):
27 | for in_idx in range(in_ch):
28 | y_out[:, :, out_idx] += scipy.signal.lfilter(b[out_idx, in_idx, :], a[out_idx, in_idx, :],
29 | u_in[:, :, in_idx], axis=-1)
30 | return y_out # [B, T, O]
31 |
32 |
33 | def lfilter_mimo_components_channels_last(b, a, u_in):
34 | batch_size, seq_len, in_ch = u_in.shape
35 | out_ch, _, _ = a.shape
36 | y_comp_out = np.zeros_like(u_in, shape=(batch_size, seq_len, out_ch, in_ch))
37 | for out_idx in range(out_ch):
38 | for in_idx in range(in_ch):
39 | y_comp_out[:, :, out_idx, in_idx] = scipy.signal.lfilter(b[out_idx, in_idx, :], a[out_idx, in_idx, :], u_in[:, :, in_idx], axis=-1)
40 | return y_comp_out # [B, T, O, I]
41 |
42 |
43 |
44 | if __name__ == '__main__':
45 |
46 | in_ch = 3
47 | out_ch = 4
48 | n_b = 2
49 | n_a = 1
50 |
51 | batch_size = 16
52 | seq_len = 32
53 |
54 | # Coefficients of the polynomials
55 | b_coeff = np.random.randn(*(out_ch, in_ch, n_b))
56 | a_coeff = np.random.rand(*(out_ch, in_ch, n_a))
57 |
58 | # Polynomials
59 | a_poly = np.empty_like(a_coeff, shape=(out_ch, in_ch, n_a + 1))
60 | a_poly[:, :, 0] = 1
61 | a_poly[:, :, 1:] = a_coeff[:, :, :]
62 | b_poly = np.array(b_coeff)
63 |
64 | eps = 1e-6 # numerical perturbation
65 |
66 | # In[Filter with initial condition]
67 | y_0 = np.random.randn(*(out_ch, in_ch, n_a))
68 | u_0 = np.random.randn(*(out_ch, in_ch, n_b))
69 | u_in = 1*np.random.randn(*(batch_size, seq_len, in_ch))
70 | #y, _, _ = lfilter_ic(b_poly, a_poly, u_in, y_0, u_0)
71 |
72 |
73 |
74 | y_out = lfilter_mimo_channels_last(b_poly, a_poly, u_in)
75 | y_out_comp = lfilter_mimo_components_channels_last(b_poly, a_poly, u_in)
76 | y_out_2 = np.sum(y_out_comp, axis=-1)
77 |
78 | assert (np.allclose(y_out, y_out_2))
79 |
80 | # In[Sensitivity]
81 | d0_np = np.array([1.0])
82 | sens_b = np.zeros_like(u_in, shape=(batch_size, out_ch, in_ch, n_b, seq_len))
83 | for out_idx in range(out_ch):
84 | for in_idx in range(in_ch):
85 | sens_b[:, out_idx, in_idx, 0, :] = sp.signal.lfilter(a_poly[out_idx, in_idx, :], d0_np, u_in[:, in_idx])
86 | for idx_coeff in range(1, n_b):
87 | sens_b[:, :, :, idx_coeff, idx_coeff:] = sens_b[:, :, :, 0, :-idx_coeff]
88 | #sens_b = torch.as_tensor(sens_b) # B, O, I, D, T
89 |
90 | grad_out = np.random.randn(*(batch_size, out_ch, seq_len))
91 | grad_b = np.einsum('boidt,bot->oid', sens_b, grad_out)
92 | grad_bb = np.einsum('boidt,bqt->oid', sens_b, grad_out)
93 |
94 | #grad_bb = np.einsum('b...t,b...t', sens_b, grad_out)
95 |
96 |
97 |
--------------------------------------------------------------------------------
/test_code/find_initial_cond_ab.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | import scipy
3 | import scipy.signal
4 | import matplotlib.pyplot as plt
5 |
6 |
7 | def lfilter_ic(b_poly, a_poly, u_in, y_0=None, u_0=None):
8 |
9 | if y_0 is None and u_0 is None:
10 | z_init = None
11 | else:
12 | z_init = scipy.signal.lfiltic(b_poly, a_poly, y_0, u_0)
13 | if z_init is not None:
14 | y, z_end = scipy.signal.lfilter(b_poly, a_poly, u_in, zi=z_init)
15 | else:
16 | y = scipy.signal.lfilter(b_poly, a_poly, u_in, zi=z_init)
17 | z_init = None
18 | z_end = None
19 | return y, z_init, z_end
20 |
21 | if __name__ == '__main__':
22 |
23 | # Coefficients of the polynomials
24 | b_coeff = np.array([0.02, 0.03, 0.04]) # b_0, b_1, b_2
25 | a_coeff = np.array([-1.87212998940304, 0.942776404097492]) # a_1, a_2
26 |
27 | # Polynomials
28 | a_poly = np.r_[1.0, a_coeff]
29 | b_poly = np.array(b_coeff)
30 |
31 | eps = 1e-6 # numerical perturbation
32 |
33 | # In[Filter with initial condition]
34 | y_0 = np.array([1.0, 2.0]) # y_-1, y_-2
35 | u_0 = np.array([3.0, 4.0]) # u_-1, u_-2
36 | u_in = 0*np.random.randn(150)
37 | y, _, _ = lfilter_ic(b_poly, a_poly, u_in, y_0, u_0)
38 |
39 |
40 | # Equivalent initial condition, with no input
41 | y_0_bar = np.zeros_like(y_0)
42 | y_m1_bar = y_0[0] - b_poly[2]/a_poly[2]*u_0[0]
43 | y_m2_bar = y_0[1] + (a_poly[1]*b_poly[2]/(a_poly[2] ** 2) - b_poly[1] / a_poly[2]) * u_0[0] - b_poly[2] / a_poly[2] * u_0[1]
44 |
45 | y_0_bar[0] = y_0[0] - b_poly[2]/a_poly[2]*u_0[0]
46 | #y_0_bar[1] = y_0[1] -b_poly[1]/a_poly[2]*u_0[0] - b_poly[2]/a_poly[2]*u_0[1] + a_poly[1]*b_poly[2]/(a_poly[2]**2)*u_0[0]
47 | y_0_bar[1] = y_0[1] + (a_poly[1]*b_poly[2]/(a_poly[2] ** 2) - b_poly[1] / a_poly[2]) * u_0[0] - b_poly[2] / a_poly[2] * u_0[1]
48 |
49 | # Verify equivalent initial condition
50 | zi = scipy.signal.lfiltic(b_poly, a_poly, y_0, u_0)
51 | zi_bar = scipy.signal.lfiltic(b_poly, a_poly, y_0_bar, 0*u_0)
52 |
53 | # In[Free response]
54 | delta = a_poly[1]**2 -4*a_poly[2]
55 | if delta < 0:
56 | r = -a_poly[1]
57 |
58 | # In[Analytical sensitivities b]
59 |
60 | sens_b0_an, _, _ = lfilter_ic([1.0], a_poly, u_in, [0.0], u_0) # this is correct!
61 | sens_b1_an, _, _ = lfilter_ic([0.0, 1.0], a_poly, u_in, [0.0], u_0) # this is correct!
62 | sens_b2_an, _, _ = lfilter_ic([0.0, 0.0, 1.0], a_poly, u_in, [0.0], u_0) # this is correct!
63 |
64 | # In[Analytical sensitivities a]
65 |
66 | sens_a1_an, _, _ = lfilter_ic([0.0, 1.0], a_poly, -y, [0.0], -y_0) # this is correct!
67 | sens_a2_an, _, _ = lfilter_ic([0.0, 0.0, 1.0], a_poly, -y, [0.0], -y_0) # this is correct!
68 |
69 | # In[Perturbation on coefficients b]
70 | # b0
71 | b_coeff_eps = np.array(b_coeff)
72 | b_coeff_eps[0] += eps
73 | b_poly_eps = np.array(b_coeff_eps)
74 | y_eps, _ ,_ = lfilter_ic(b_poly_eps, a_poly, u_in, y_0, u_0)
75 | sens_b0_num = (y_eps - y) / eps
76 |
77 | # b1
78 | b_coeff_eps = np.array(b_coeff)
79 | b_coeff_eps[1] += eps
80 | b_poly_eps = np.array(b_coeff_eps)
81 | y_eps, _, _ = lfilter_ic(b_poly_eps, a_poly, u_in, y_0, u_0)
82 | sens_b1_num = (y_eps - y) / eps
83 |
84 | # b2
85 | b_coeff_eps = np.array(b_coeff)
86 | b_coeff_eps[2] += eps
87 | b_poly_eps = np.array(b_coeff_eps)
88 |
89 | y_eps, _, _ = lfilter_ic(b_poly_eps, a_poly, u_in, y_0, u_0)
90 | sens_b2_num = (y_eps - y) / eps
91 |
92 | # In[Perturbation on coefficients a]
93 | # a1
94 | a_coeff_eps = np.array(a_coeff)
95 | a_coeff_eps[0] += eps
96 | a_poly_eps = np.r_[1.0, a_coeff_eps]
97 | y_eps, _, _ = lfilter_ic(b_poly, a_poly_eps, u_in, y_0, u_0)
98 | sens_a1_num = (y_eps - y) / eps
99 |
100 | # a2
101 | a_coeff_eps = np.array(a_coeff)
102 | a_coeff_eps[1] += eps
103 | a_poly_eps = np.r_[1.0, a_coeff_eps]
104 | y_eps, _, _ = lfilter_ic(b_poly, a_poly_eps, u_in, y_0, u_0)
105 | sens_a2_num = (y_eps - y) / eps
106 |
107 |
108 | # In[Output]
109 | plt.figure()
110 | plt.plot(y, '*')
111 |
112 | # In[b sensitivity]
113 | fig, ax = plt.subplots(3, 1)
114 | ax[0].plot(sens_b0_num, 'b', label='$b_0$ num')
115 | ax[0].plot(sens_b0_an, 'r', label='$b_0$ an')
116 | ax[0].legend()
117 | ax[0].grid()
118 |
119 | ax[1].plot(sens_b1_num, 'b', label='$b_1$ num')
120 | ax[1].plot(sens_b1_an, 'r', label='$b_1$ an')
121 | ax[1].legend()
122 | ax[1].grid()
123 |
124 | ax[2].plot(sens_b2_num, 'b', label='$b_2$ num')
125 | ax[2].plot(sens_b2_an, 'r', label='$b_2$ an')
126 | ax[2].legend()
127 | ax[2].grid()
128 |
129 | # In[2]
130 | plt.figure()
131 | plt.plot(sens_b0_num[0:-2], label='$b_0$')
132 | plt.plot(sens_b1_num[1:-1], label='$b_1 q^1$')
133 | plt.plot(sens_b2_num[2:], label='$b_2 q^2$')
134 | plt.grid()
135 | plt.legend()
136 |
137 | # In[2]
138 | fig, ax = plt.subplots(2, 1)
139 | ax[0].plot(sens_a1_num, 'b', label='$a_1$ num')
140 | ax[0].plot(sens_a1_an, 'r', label='$a_1$ an')
141 | ax[0].legend()
142 | ax[0].grid()
143 |
144 | ax[1].plot(sens_a2_num, 'b', label='$a_2$ num')
145 | ax[1].plot(sens_a2_an, 'r', label='$a_2$ an')
146 | ax[1].legend()
147 | ax[1].grid()
148 |
149 |
150 | # In[2]
151 | plt.figure()
152 | plt.plot(sens_a1_num[0:-1], label='$a_1$')
153 | plt.plot(sens_a2_num[1:], label='$a_2q^1$')
154 | plt.legend()
155 | plt.grid()
156 |
--------------------------------------------------------------------------------
/test_code/install_pypy.txt:
--------------------------------------------------------------------------------
1 | python -m build
2 | python -m twine upload --repository testpypi dist/*
3 | python -m pip install --index-url https://test.pypi.org/simple/ --no-deps dynonet
--------------------------------------------------------------------------------
/test_code/linearsiso_fun_example.py:
--------------------------------------------------------------------------------
1 | import torch
2 | from torchid.old.linearsiso import LinearDynamicalSystemFunction
3 | import matplotlib.pyplot as plt
4 | from torch.autograd import gradcheck
5 | from torch.autograd.gradcheck import get_numerical_jacobian, get_analytical_jacobian
6 | import time
7 |
8 | # copied from torch.autograd.gradcheck
9 | def istuple(obj):
10 | # Usually instances of PyStructSequence is also an instance of tuple
11 | # but in some py2 environment it is not, so we have to manually check
12 | # the name of the type to determine if it is a namedtupled returned
13 | # by a pytorch operator.
14 | t = type(obj)
15 | return isinstance(obj, tuple) or t.__module__ == 'torch.return_types'
16 |
17 | # copied from torch.autograd.gradcheck
18 | def _as_tuple(x):
19 | if istuple(x):
20 | return x
21 | elif isinstance(x, list):
22 | return tuple(x)
23 | else:
24 | return x,
25 |
26 |
27 | if __name__ == '__main__':
28 |
29 | G = LinearDynamicalSystemFunction.apply
30 |
31 | # In[Setup problem]
32 | n_batch = 1
33 | n_b = 2
34 | n_f = 2
35 | N = 100
36 | u_in = torch.ones((N, n_batch), dtype=torch.double, requires_grad=False)
37 | y_0 = torch.zeros((n_batch, n_f), dtype=torch.double)
38 | u_0 = torch.zeros((n_batch, n_b), dtype=torch.double)
39 |
40 | # coefficients of a 2nd order oscillator
41 | b_coeff = torch.tensor([0.0706464146944544, 0], dtype=torch.double, requires_grad=True) # b_1, b_2
42 | f_coeff = torch.tensor([-1.87212998940304, 0.942776404097492], dtype=torch.double, requires_grad=True) # f_1, f_2
43 | inputs = (b_coeff, f_coeff, u_in, y_0, u_0)
44 |
45 | # In[Forward pass]
46 | y_out = G(*inputs)
47 |
48 | # In[Finite difference derivatives computation]
49 | def G_fun(input):
50 | return _as_tuple(G(*input))[0]
51 | numerical = get_numerical_jacobian(G_fun, inputs)
52 |
53 | # In[Autodiff derivatives computation]
54 | analytical, reentrant, correct_grad_sizes = get_analytical_jacobian(inputs, y_out)
55 | torch.max(numerical[0]- analytical[0])
56 |
57 |
58 | # In[Plot output]
59 | plt.figure()
60 | plt.plot(y_out.detach().numpy(), label='\hat y')
61 | plt.grid(True)
62 |
63 | # In[Plot derivatives]
64 |
65 | fig, ax = plt.subplots(2, 1)
66 | ax[0].plot(numerical[0][0, :], 'b', label='$\\tilde{b}_1$')
67 | #ax[0].plot(numerical[0][1, :], 'k', label='$\\tilde{b}_2$')
68 | ax[0].grid(True)
69 | ax[0].legend()
70 |
71 | ax[1].plot(numerical[1][0, :], 'b', label='$\\tilde{f}_1$')
72 | ax[1].plot(numerical[1][1, :], 'k', label='$\\tilde{f}_2$')
73 | ax[1].grid(True)
74 | ax[1].legend()
75 |
76 | fig, ax = plt.subplots(2, 1)
77 | ax[0].plot(numerical[0][0, :], 'b', label='$\\tilde{b}_1$')
78 | ax[0].plot(analytical[0][0, :], 'b*', label='$\\tilde{b}_1$')
79 |
80 | #ax[0].plot(numerical[0][1, :], 'k', label='$\\tilde{b}_2$')
81 | #ax[0].plot(analytical[0][1, :], 'k*', label='$\\tilde{b}_2$')
82 |
83 | ax[0].grid(True)
84 | ax[0].legend()
85 |
86 | ax[1].plot(numerical[1][0, :], 'b', label='$\\tilde{f}_1$')
87 | ax[1].plot(analytical[1][0, :], 'b*', label='$\\tilde{f}_1$')
88 |
89 | ax[1].plot(numerical[1][1, :], 'k', label='$\\tilde{f}_2$')
90 | ax[1].plot(analytical[1][1, :], 'k*', label='$\\tilde{f}_2$')
91 |
92 | ax[1].grid(True)
93 | ax[1].legend()
94 |
95 | # In[Plot derivatives delayed]
96 |
97 | # delayed sensitivities match!
98 |
99 | fig, ax = plt.subplots(2, 1)
100 | ax[0].plot(numerical[0][0, 0:-1], 'b', label='$\\tilde{b}_1$')
101 | ax[0].plot(numerical[0][1, 1:], 'k', label='$\\tilde{b}_2$')
102 | ax[0].grid(True)
103 | ax[0].legend()
104 |
105 | ax[1].plot(numerical[1][0, 0:-1], 'b', label='$\\tilde{f}_1$')
106 | ax[1].plot(numerical[1][1, 1:], 'k', label='$\\tilde{f}_2$')
107 | ax[1].grid(True)
108 | ax[1].legend()
109 |
110 |
111 | # In[builtin gradient check]
112 | test = gradcheck(G, inputs, eps=1e-6, atol=1e-4, raise_exception=True)
113 |
--------------------------------------------------------------------------------
/test_code/linearsiso_module_example.py:
--------------------------------------------------------------------------------
1 | import torch
2 | from torchid.old.linearsiso import LinearDynamicalSystem
3 | import numpy as np
4 | import matplotlib.pyplot as plt
5 |
6 |
7 | # copied from torch.autograd.gradcheck
8 | def istuple(obj):
9 | # Usually instances of PyStructSequence is also an instance of tuple
10 | # but in some py2 environment it is not, so we have to manually check
11 | # the name of the type to determine if it is a namedtupled returned
12 | # by a pytorch operator.
13 | t = type(obj)
14 | return isinstance(obj, tuple) or t.__module__ == 'torch.return_types'
15 |
16 | # copied from torch.autograd.gradcheck
17 | def _as_tuple(x):
18 | if istuple(x):
19 | return x
20 | elif isinstance(x, list):
21 | return tuple(x)
22 | else:
23 | return x,
24 |
25 |
26 | if __name__ == '__main__':
27 |
28 |
29 | # In[Setup problem]
30 | n_batch = 1
31 | n_b = 2
32 | n_f = 2
33 | N = 100
34 | u_in = torch.ones((N, n_batch))
35 | y_0 = torch.zeros((n_batch, n_f))
36 | u_0 = torch.zeros((n_batch, n_b))
37 |
38 | # coefficients of a 2nd order oscillator
39 | b_coeff = np.array([0.0706464146944544]) # b_1, b_2
40 | f_coeff = np.array([-1.87212998940304, 0.942776404097492]) # f_1, f_2
41 |
42 | # In[Forward pass]
43 |
44 | G = LinearDynamicalSystem(b_coeff, f_coeff)
45 | y_out = G(u_in, y_0, u_0)
46 |
47 | # In[Plot output]
48 | plt.figure()
49 | plt.plot(y_out.detach().numpy(), label='\hat y')
50 | plt.grid(True)
--------------------------------------------------------------------------------
/test_code/linearsiso_module_time.py:
--------------------------------------------------------------------------------
1 | import torch
2 | from torchid.old.linearsiso_TB import LinearDynamicalSystem
3 | import numpy as np
4 | import matplotlib.pyplot as plt
5 | import time
6 |
7 | # copied from torch.autograd.gradcheck
8 | def istuple(obj):
9 | # Usually instances of PyStructSequence is also an instance of tuple
10 | # but in some py2 environment it is not, so we have to manually check
11 | # the name of the type to determine if it is a namedtupled returned
12 | # by a pytorch operator.
13 | t = type(obj)
14 | return isinstance(obj, tuple) or t.__module__ == 'torch.return_types'
15 |
16 | # copied from torch.autograd.gradcheck
17 | def _as_tuple(x):
18 | if istuple(x):
19 | return x
20 | elif isinstance(x, list):
21 | return tuple(x)
22 | else:
23 | return x,
24 |
25 |
26 | if __name__ == '__main__':
27 |
28 |
29 | # In[Setup problem]
30 | n_batch = 1
31 | n_b = 2
32 | n_f = 2
33 | N = 10000000
34 | u_in = torch.rand((N, n_batch), requires_grad=True)
35 | y_0 = torch.zeros((n_batch, n_f), requires_grad=False)
36 | u_0 = torch.zeros((n_batch, n_b), requires_grad=True)
37 |
38 | # coefficients of a 2nd order oscillator
39 | b_coeff = np.array([0.0706464146944544]) # b_1, b_2
40 | f_coeff = np.array([-1.87212998940304, 0.942776404097492]) # f_1, f_2
41 |
42 | # In[Trace]
43 | time_start = time.time()
44 | G = LinearDynamicalSystem(b_coeff, f_coeff)
45 | G_traced = torch.jit.trace(G, (u_in, y_0, u_0))
46 | # In[Forward pass]
47 |
48 | time_start = time.time()
49 | y_out = G_traced(u_in, y_0, u_0)
50 | z = y_out
51 | L = torch.sum(z)
52 | L.backward()
53 | time_full = time.time() - time_start
54 |
55 | print(f"Time forward + backward: {time_full:.2f}")
56 |
57 | # In[Plot output]
58 | plt.figure()
59 | plt.plot(y_out.detach().numpy(), label='\hat y')
60 | plt.grid(True)
61 |
62 | # In[Test]
63 | y_out_np = y_out.detach().numpy()
64 | grad_out = np.ones_like(y_out_np)
65 | f_np = np.concatenate(([1.0], f_coeff))
66 | b_np = b_coeff
67 | import scipy.signal
68 | grad_u = scipy.signal.lfilter(b_np, f_np, grad_out, axis=0)
69 | grad_u = grad_u[::-1, :]
70 |
71 |
72 |
73 |
--------------------------------------------------------------------------------
/test_code/mimo_module_example.py:
--------------------------------------------------------------------------------
1 | import torch
2 | from torchid.module.lti import MimoLinearDynamicalOperator
3 | import numpy as np
4 | import matplotlib.pyplot as plt
5 |
6 |
7 | if __name__ == '__main__':
8 |
9 |
10 | # In[Setup problem]
11 | n_b = 3
12 | n_a = 2
13 | n_k = 20
14 | in_channels = 4
15 | out_channels = 5
16 | batch_size = 32
17 | seq_len = 1024
18 | G = MimoLinearDynamicalOperator(in_channels, out_channels, n_b, n_a, n_k=n_k)
19 |
20 | # build first-order stable systems
21 | with torch.no_grad():
22 | G.a_coeff[:, :, :] = 0.0
23 | G.b_coeff[:, :, :] = 0.0
24 | G.a_coeff[:, :, 0] = -0.99
25 | G.b_coeff[:, :, 0] = 0.01
26 |
27 | y_0 = torch.tensor(0*np.random.randn(*(out_channels, in_channels, n_a)))
28 | u_0 = torch.tensor(0*np.random.randn(*(out_channels, in_channels, n_b)))
29 | u_in = torch.tensor(1*np.random.randn(*(batch_size, seq_len, in_channels)), requires_grad=True)
30 |
31 | # In[Forward pass]
32 | y_out = G(u_in, y_0, u_0)
33 | y_out_np = y_out.detach().numpy()
34 | #y_out = y_out.detach().numpy(),
35 | # In[Plot output]
36 |
37 |
38 | #plt.figure()
39 | plt.plot(y_out_np[0, :, 0], label='y')
40 | #plt.grid(True)
41 |
42 | # In[Test doc]
43 | in_channels, out_channels = 2, 4
44 | n_b, n_a, n_k = 2, 2, 1
45 | G = MimoLinearDynamicalOperator(in_channels, out_channels, n_b, n_a, n_k)
46 | batch_size, seq_len = 32, 100
47 | u_in = torch.ones((batch_size, seq_len, in_channels))
48 | y_out = G(u_in, y_0, u_0) # shape:
49 |
--------------------------------------------------------------------------------
/test_code/mimo_secondorder_module_example.py:
--------------------------------------------------------------------------------
1 | import torch
2 | from torchid.module.lti import StableSecondOrderMimoLinearDynamicalOperator
3 | import numpy as np
4 | import matplotlib.pyplot as plt
5 |
6 |
7 | if __name__ == '__main__':
8 |
9 |
10 | # In[Setup problem]
11 | n_b = 3
12 | n_a = 2
13 | in_channels = 4
14 | out_channels = 5
15 | batch_size = 32
16 | seq_len = 1024
17 | G = StableSecondOrderMimoLinearDynamicalOperator(in_channels, out_channels)
18 |
19 |
20 | y_0 = torch.tensor(0*np.random.randn(*(out_channels, in_channels, n_a)))
21 | u_0 = torch.tensor(0*np.random.randn(*(out_channels, in_channels, n_b)))
22 | u_in = torch.tensor(1*np.random.randn(*(batch_size, seq_len, in_channels)), requires_grad=True)
23 |
24 | # In[Forward pass]
25 | y_out = G(u_in, y_0, u_0)
26 | y_out_np = y_out.detach().numpy()
27 | #y_out = y_out.detach().numpy(),
28 | # In[Plot output]
29 |
30 |
31 | #plt.figure()
32 | plt.plot(y_out_np[0, :, 0], label='y')
33 | #plt.grid(True)
--------------------------------------------------------------------------------
/test_code/run_tests.bat:
--------------------------------------------------------------------------------
1 | python correlate_example.py
2 | python einsum_example.py
3 | python filter_example.py
4 | python filter_initial_cond_ab.py
5 | rem python filter_mimo.py
6 | rem python filter_mimo_channels_last.py
7 | python find_initial_cond_ab.py
8 | rem python linearsiso_fun_example.py
9 | rem python linearsiso_module_example.py
10 | rem python linearsiso_module_time.py
11 | python mimo_module_example.py
12 | python mimo_secondorder_module_example.py
13 | rem python secondorder_module_example.py
14 | python stability_second_order.py
15 | rem python stable_ocs_param.py
16 | rem python stable_param.py
17 | python torch_convolutional.py
18 | python torch_convolutional_FIR.py
19 |
--------------------------------------------------------------------------------
/test_code/secondorder_module_example.py:
--------------------------------------------------------------------------------
1 | import torch
2 | from torchid.old.linearsiso_TB import SecondOrderOscillator
3 | import numpy as np
4 | import matplotlib.pyplot as plt
5 |
6 |
7 | # copied from torch.autograd.gradcheck
8 | def istuple(obj):
9 | # Usually instances of PyStructSequence is also an instance of tuple
10 | # but in some py2 environment it is not, so we have to manually check
11 | # the name of the type to determine if it is a namedtupled returned
12 | # by a pytorch operator.
13 | t = type(obj)
14 | return isinstance(obj, tuple) or t.__module__ == 'torch.return_types'
15 |
16 | # copied from torch.autograd.gradcheck
17 | def _as_tuple(x):
18 | if istuple(x):
19 | return x
20 | elif isinstance(x, list):
21 | return tuple(x)
22 | else:
23 | return x,
24 |
25 |
26 | if __name__ == '__main__':
27 |
28 |
29 | # In[Setup problem]
30 | n_batch = 1
31 | n_b = 2
32 | n_f = 2
33 | N = 10000000
34 | u_in = torch.rand((N, n_batch), requires_grad=True)
35 | y_0 = torch.zeros((n_batch, n_f), requires_grad=False)
36 | u_0 = torch.zeros((n_batch, n_b), requires_grad=True)
37 |
38 | # coefficients of a 2nd order oscillator
39 | b_coeff = np.array([0.0706464146944544]) # b_1, b_2
40 | rho = np.array(1.0)
41 | psi = np.array(0.0)
42 |
43 | G = SecondOrderOscillator(b_coeff, rho, psi)
44 |
45 | # In[Forward pass]
46 | y_out = G(u_in, y_0, u_0)
47 |
48 | # In[Plot output]
49 | plt.figure()
50 | plt.plot(y_out.detach().numpy(), label='\hat y')
51 | plt.grid(True)
--------------------------------------------------------------------------------
/test_code/stability_second_order.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | import matplotlib
3 | import matplotlib.pyplot as plt
4 |
5 |
6 | if __name__ == '__main__':
7 |
8 | matplotlib.rc('font', **{'family': 'sans-serif', 'sans-serif': ['Helvetica'], 'size': 12})
9 | matplotlib.rc('text', usetex=True)
10 |
11 | # Plotting the different regions for a system with denominator z**2 + a1*z + a2
12 |
13 | a1 = np.linspace(-2, 2, 1000) # range for parameter a1
14 | a2_complex = a1**2/4 # complex conjugate poles for a2> a2_complex
15 | a2_stab_min = np.abs(a1) - 1
16 | a2_stab_max = np.ones_like(a1)
17 |
18 | fig, ax = plt.subplots(1, 1, figsize=(5, 4))
19 | ax.plot(a1, a2_stab_min, 'k')
20 | ax.plot(a1, a2_stab_max, 'k')
21 | ax.fill_between(a1, a2_complex, a2_stab_max, facecolor='b', alpha=0.2, label='stable complex conjugate poles')
22 | ax.fill_between(a1, a2_stab_min, a2_complex, label='stable real poles')
23 | ax.plot(a1, a2_complex, 'k--', label='stable real coincident poles')
24 | ax.set_xlim([-2.5, 2.5])
25 | ax.set_ylim([-2.0, 1.5])
26 | ax.set_xlabel('$a_1$', fontsize=16)
27 | ax.set_ylabel('$a_2$', fontsize=16)
28 | plt.grid()
29 | plt.tight_layout()
30 | plt.legend(loc='lower right')
31 | plt.savefig('stable_2ndorder.pdf')
32 |
33 |
34 |
35 |
36 |
--------------------------------------------------------------------------------
/test_code/stable_ocs_param.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | import matplotlib.pyplot as plt
3 |
4 |
5 | def sigmoid(val):
6 | return 1/(1 + np.exp(-val))
7 |
8 | def stable_coeff(rho, psi):
9 | r = sigmoid(rho)
10 | theta = np.pi * sigmoid(psi)
11 |
12 | a_1 = -2*r*np.cos(theta)
13 | a_2 = r**2
14 | return a_1, a_2
15 |
16 | def roots_polynomial(a_1, a_2):
17 | delta = a_1**2 - 4 * a_2
18 | delta = delta.astype(np.complex)
19 | root_1 = (-a_1 + np.sqrt(delta))/2
20 | root_2 = (-a_1 - np.sqrt(delta))/2
21 | idx_real = delta > 0
22 | return root_1, root_2, idx_real
23 |
24 |
25 | if __name__ == '__main__':
26 |
27 | N = 100000
28 | rho = np.random.randn(N)*1
29 | psi = np.random.randn(N)*1
30 |
31 | a_1, a_2 = stable_coeff(rho, psi)
32 | r_1, r_2, idx_real = roots_polynomial(a_1, a_2)
33 |
34 |
35 | fig, ax = plt.subplots()
36 | ax.plot(a_1, a_2, '*')
37 | ax.plot(a_1[idx_real], a_2[idx_real], 'k*')
38 | ax.set_xlabel('a_1')
39 | ax.set_ylabel('a_2')
40 | ax.set_xlim([-2, 2])
41 | ax.set_ylim([-2, 2])
42 |
43 |
44 | fig, ax = plt.subplots()
45 | ax.plot(np.real(r_1), np.imag(r_1), 'r*')
46 | ax.plot(np.real(r_2), np.imag(r_2), 'r*')
47 | ax.plot(np.real(r_1)[idx_real], np.imag(r_1)[idx_real], 'k*')
48 | ax.plot(np.real(r_2)[idx_real], np.imag(r_2)[idx_real], 'k*')
49 | ax.set_xlim([-1.2, 1.2])
50 | ax.set_ylim([-1.2, 1.2])
51 |
52 | perc_real = np.sum(idx_real) / N *100
53 | print(f"Real poles in {perc_real:.1f} cases")
54 |
--------------------------------------------------------------------------------
/test_code/stable_param.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | import matplotlib.pyplot as plt
3 |
4 |
5 | def sigmoid(val):
6 | return 1/(1 + np.exp(-val))
7 |
8 | def stable_coeff(alpha_1, alpha_2):
9 | a_1 = 2*np.tanh(alpha_1)
10 | a_2 = np.abs(a_1) + (2 - np.abs(a_1))*sigmoid(alpha_2) - 1
11 | return a_1, a_2
12 |
13 |
14 | def roots_polynomial(a_1, a_2):
15 | delta = a_1**2 - 4 * a_2
16 | delta = delta.astype(np.complex)
17 | root_1 = (-a_1 + np.sqrt(delta))/2
18 | root_2 = (-a_1 - np.sqrt(delta))/2
19 | idx_real = delta > 0
20 | return root_1, root_2, idx_real
21 |
22 |
23 | if __name__ == '__main__':
24 |
25 | N = 10000
26 | alpha_1 = np.random.randn(N)*1
27 | alpha_2 = np.random.randn(N)*1
28 |
29 | a_1, a_2 = stable_coeff(alpha_1, alpha_2)
30 | r_1, r_2, idx_real = roots_polynomial(a_1, a_2)
31 |
32 |
33 | fig, ax = plt.subplots()
34 | ax.plot(a_1, a_2, '*')
35 | ax.plot(a_1[idx_real], a_2[idx_real], 'k*')
36 | ax.set_xlabel('a_1')
37 | ax.set_ylabel('a_2')
38 | ax.set_xlim([-2, 2])
39 | ax.set_ylim([-2, 2])
40 |
41 |
42 | fig, ax = plt.subplots()
43 | ax.plot(np.real(r_1), np.imag(r_1), 'r*')
44 | ax.plot(np.real(r_2), np.imag(r_2), 'r*')
45 | ax.plot(np.real(r_1)[idx_real], np.imag(r_1)[idx_real], 'k*')
46 | ax.plot(np.real(r_2)[idx_real], np.imag(r_2)[idx_real], 'k*')
47 | ax.set_xlim([-1.2, 1.2])
48 | ax.set_ylim([-1.2, 1.2])
49 |
50 | perc_real = np.sum(idx_real) / N *100
51 | print(f"Real poles in {perc_real:.1f} cases")
52 |
--------------------------------------------------------------------------------
/test_code/tmp/backproptest1.py:
--------------------------------------------------------------------------------
1 | import torch
2 |
3 | # Creating the graph
4 | x = torch.tensor(1.0, requires_grad = True)
5 | y = torch.tensor(2.0)
6 | z = x * y
7 |
8 | # Displaying
9 | for i, name in zip([x, y, z], "xyz"):
10 | print(f"{name}\ndata: {i.data}\nrequires_grad: {i.requires_grad}\n\
11 | grad: {i.grad}\ngrad_fn: {i.grad_fn}\nis_leaf: {i.is_leaf}\n")
12 |
13 |
--------------------------------------------------------------------------------
/test_code/tmp/backproptest2.py:
--------------------------------------------------------------------------------
1 | import torch
2 | # Creating the graph
3 | x = torch.tensor(1.0, requires_grad = True)
4 | z = x ** 3
5 | z.backward() #Computes the gradient
6 | print(x.grad.data) #Prints '3' which is dz/dx
--------------------------------------------------------------------------------
/test_code/tmp/new-relu.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | import torch
3 | import torchdiffeq
4 |
5 | class MyReLU(torch.autograd.Function):
6 | """
7 | We can implement our own custom autograd Functions by subclassing
8 | torch.autograd.Function and implementing the forward and backward passes
9 | which operate on Tensors.
10 | """
11 |
12 | @staticmethod
13 | def forward(ctx, input):
14 | """
15 | In the forward pass we receive a Tensor containing the input and return
16 | a Tensor containing the output. ctx is a context object that can be used
17 | to stash information for backward computation. You can cache arbitrary
18 | objects for use in the backward pass using the ctx.save_for_backward method.
19 | """
20 | ctx.save_for_backward(input)
21 | return input.clamp(min=0)
22 |
23 | @staticmethod
24 | def backward(ctx, grad_output):
25 | """
26 | In the backward pass we receive a Tensor containing the gradient of the loss
27 | with respect to the output, and we need to compute the gradient of the loss
28 | with respect to the input.
29 | """
30 | input, = ctx.saved_tensors
31 | grad_input = grad_output.clone()
32 | grad_input[input < 0] = 0
33 | return grad_input
34 |
35 |
36 | dtype = torch.float
37 | device = torch.device("cpu")
38 | # device = torch.device("cuda:0") # Uncomment this to run on GPU
39 |
40 | # N is batch size; D_in is input dimension;
41 | # H is hidden dimension; D_out is output dimension.
42 | N, D_in, H, D_out = 64, 1000, 100, 10
43 |
44 | # Create random Tensors to hold input and outputs.
45 | x = torch.randn(N, D_in, device=device, dtype=dtype)
46 | y = torch.randn(N, D_out, device=device, dtype=dtype)
47 |
48 | # Create random Tensors for weights.
49 | w1 = torch.randn(D_in, H, device=device, dtype=dtype, requires_grad=True)
50 | w2 = torch.randn(H, D_out, device=device, dtype=dtype, requires_grad=True)
51 |
52 | learning_rate = 1e-6
53 | for t in range(500):
54 | # To apply our Function, we use Function.apply method. We alias this as 'relu'.
55 | relu = MyReLU.apply
56 |
57 | # Forward pass: compute predicted y using operations; we compute
58 | # ReLU using our custom autograd operation.
59 | y_pred = relu(x.mm(w1)).mm(w2)
60 |
61 | # Compute and print loss
62 | loss = (y_pred - y).pow(2).sum()
63 | if t % 100 == 99:
64 | print(t, loss.item())
65 |
66 | # Use autograd to compute the backward pass.
67 | loss.backward()
68 |
69 | # Update weights using gradient descent
70 | with torch.no_grad():
71 | w1 -= learning_rate * w1.grad
72 | w2 -= learning_rate * w2.grad
73 |
74 | # Manually zero the gradients after updating weights
75 | w1.grad.zero_()
76 | w2.grad.zero_()
--------------------------------------------------------------------------------
/test_code/tmp/new_convolution.py:
--------------------------------------------------------------------------------
1 | import torch
2 | from numpy import flip
3 | import numpy as np
4 | from scipy.signal import convolve2d, correlate2d
5 | from torch.autograd import Function
6 | from torch.nn.modules.module import Module
7 | from torch.nn.parameter import Parameter
8 |
9 |
10 | class ScipyConv2dFunction(Function):
11 | @staticmethod
12 | def forward(ctx, input, filter, bias):
13 | # detach so we can cast to NumPy
14 | input, filter, bias = input.detach(), filter.detach(), bias.detach()
15 | result = correlate2d(input.numpy(), filter.numpy(), mode='valid')
16 | result += bias.numpy()
17 | ctx.save_for_backward(input, filter, bias)
18 | return torch.as_tensor(result, dtype=input.dtype)
19 |
20 | @staticmethod
21 | def backward(ctx, grad_output):
22 | grad_output = grad_output.detach()
23 | input, filter, bias = ctx.saved_tensors
24 | grad_output = grad_output.numpy()
25 | grad_bias = np.sum(grad_output, keepdims=True)
26 | grad_input = convolve2d(grad_output, filter.numpy(), mode='full')
27 | # the previous line can be expressed equivalently as:
28 | # grad_input = correlate2d(grad_output, flip(flip(filter.numpy(), axis=0), axis=1), mode='full')
29 | grad_filter = correlate2d(input.numpy(), grad_output, mode='valid')
30 | return torch.from_numpy(grad_input), torch.from_numpy(grad_filter).to(torch.float), torch.from_numpy(grad_bias).to(torch.float)
31 |
32 | class ScipyConv2d(Module):
33 | def __init__(self, filter_width, filter_height):
34 | super(ScipyConv2d, self).__init__()
35 | self.filter = Parameter(torch.randn(filter_width, filter_height))
36 | self.bias = Parameter(torch.randn(1, 1))
37 |
38 | def forward(self, input):
39 | return ScipyConv2dFunction.apply(input, self.filter, self.bias)
40 |
41 | if __name__ == '__main__':
42 | module = ScipyConv2d(3, 3)
43 | print("Filter and bias: ", list(module.parameters()))
44 | input = torch.randn(10, 10, requires_grad=True)
45 | output = module(input)
46 | print("Output from the convolution: ", output)
47 | output.backward(torch.randn(8, 8))
48 | print("Gradient for the input map: ", input.grad)
49 |
50 |
51 | from torch.autograd.gradcheck import gradcheck
52 | moduleConv = ScipyConv2d(3, 3)
53 | input = [torch.randn(20, 20, dtype=torch.double, requires_grad=True)]
54 | test = gradcheck(moduleConv, input, eps=1e-6, atol=1e-4)
55 | print("Are the gradients correct: ", test)
--------------------------------------------------------------------------------
/test_code/tmp/new_optional.py:
--------------------------------------------------------------------------------
1 | import torch
2 |
3 | class MulConstant(torch.autograd.Function):
4 | @staticmethod
5 | def forward(ctx, tensor, constant):
6 | # ctx is a context object that can be used to stash information
7 | # for backward computation
8 | ctx.constant = constant
9 | return tensor * constant
10 |
11 | @staticmethod
12 | def backward(ctx, grad_output):
13 | # We return as many input gradients as there were arguments.
14 | # Gradients of non-Tensor arguments to forward must be None.
15 | return grad_output * ctx.constant, None
--------------------------------------------------------------------------------
/test_code/tmp/new_staticlinear.py:
--------------------------------------------------------------------------------
1 | import torch
2 |
3 | torch.autograd
4 |
5 | # Inherit from Function
6 | class LinearFunction(torch.autograd.Function):
7 |
8 | # Note that both forward and backward are @staticmethods
9 | @staticmethod
10 | # bias is an optional argument
11 | def forward(ctx, input, weight, bias=None):
12 | ctx.save_for_backward(input, weight, bias)
13 | output = input.mm(weight.t())
14 | if bias is not None:
15 | output += bias.unsqueeze(0).expand_as(output)
16 | return output
17 |
18 | # This function has only a single output, so it gets only one gradient
19 | @staticmethod
20 | def backward(ctx, grad_output):
21 | # This is a pattern that is very convenient - at the top of backward
22 | # unpack saved_tensors and initialize all gradients w.r.t. inputs to
23 | # None. Thanks to the fact that additional trailing Nones are
24 | # ignored, the return statement is simple even when the function has
25 | # optional inputs.
26 | input, weight, bias = ctx.saved_tensors
27 | grad_input = grad_weight = grad_bias = None
28 |
29 | # These needs_input_grad checks are optional and there only to
30 | # improve efficiency. If you want to make your code simpler, you can
31 | # skip them. Returning gradients for inputs that don't require it is
32 | # not an error.
33 | if ctx.needs_input_grad[0]:
34 | grad_input = grad_output.mm(weight)
35 | if ctx.needs_input_grad[1]:
36 | grad_weight = grad_output.t().mm(input)
37 | if bias is not None and ctx.needs_input_grad[2]:
38 | grad_bias = grad_output.sum(0)
39 |
40 | return grad_input, grad_weight, grad_bias
41 |
42 |
43 | if __name__ == '__main__':
44 |
45 | from torch.autograd import gradcheck
46 |
47 | linear = LinearFunction.apply
48 | # gradcheck takes a tuple of tensors as input, check if your gradient
49 | # evaluated with these tensors are close enough to numerical
50 | # approximations and returns True if they all verify this condition.
51 |
52 | batch_size = 128
53 | in_feat = 20
54 | out_feat = 30
55 | X = torch.randn(batch_size, in_feat, requires_grad=True)
56 | W = torch.randn(out_feat, in_feat, requires_grad=True)
57 | Y = linear(X, W) # shape: (batch_size, out_feat)
58 | Y_sum = torch.sum(Y, dim=1)
59 | #Y.backward(torch.ones(batch_size, out_feat))
60 | Y_sum.backward(torch.ones(batch_size))
61 | #input = (X, W)
62 | #test = gradcheck(linear, input, eps=1e-6, atol=1e-4)
63 | #print(test)
--------------------------------------------------------------------------------
/test_code/torch_convolutional.py:
--------------------------------------------------------------------------------
1 | import torch
2 | torch.autograd
3 |
4 | if __name__ == '__main__':
5 |
6 | # 1D Convolutional layer convention: B, F, T
7 | in_channels = 16
8 | out_channels = 33
9 | m = torch.nn.Conv1d(in_channels, out_channels, kernel_size=3, stride=2)
10 |
11 | batch_size = 20
12 | seq_len = 50
13 | input = torch.randn(batch_size, in_channels, seq_len)
14 | output = m(input)
15 |
16 |
17 | # RNN layer convention: B, T, F
18 | input_size = 10
19 | hidden_size = 20
20 | num_layers = 2
21 | rnn = torch.nn.GRU(input_size, hidden_size, num_layers)
22 |
23 | seq_len = 5
24 | batch_size = 3
25 | input_size = 10
26 | input = torch.randn(seq_len, batch_size, input_size)
27 | h0 = torch.randn(num_layers, batch_size, hidden_size)
28 | output, hn = rnn(input, h0) # seq_len, batch_size, hidden_size
29 |
--------------------------------------------------------------------------------
/test_code/torch_convolutional_FIR.py:
--------------------------------------------------------------------------------
1 | import torch
2 | torch.autograd
3 |
4 | if __name__ == '__main__':
5 |
6 | # 1D Convolutional layer convention: B, F, T
7 | in_channels = 1
8 | out_channels = 1
9 | kernel_size = 128
10 | m = torch.nn.Conv1d(in_channels, out_channels, kernel_size=kernel_size, bias=False, padding=kernel_size-1)#kernel_size)
11 |
12 | batch_size = 1
13 | seq_len = 5000
14 | u = torch.randn(batch_size, in_channels, seq_len)
15 | y = m(u)
16 |
17 | y1 = y[..., 0:-kernel_size+1]
18 | y2 = y1.transpose(-2, -1)
19 |
20 |
--------------------------------------------------------------------------------