├── .gitignore
├── .idea
├── .gitignore
├── .name
├── dictionaries
│ └── marco.xml
├── inspectionProfiles
│ └── profiles_settings.xml
├── misc.xml
├── modules.xml
├── other.xml
├── sysid-neural-structures-fitting.iml
└── vcs.xml
├── LICENSE.md
├── README.md
├── common
├── __init__.py
└── metrics.py
├── doc
├── paper
│ ├── fig
│ │ ├── RLC.pdf
│ │ ├── RLC.svg
│ │ ├── RLC_SS_val_1step_noise.pdf
│ │ ├── RLC_SS_val_64step_noise.pdf
│ │ ├── RLC_characteristics.pdf
│ │ ├── cart_pole.pdf
│ │ └── sym
│ │ │ ├── F.svg
│ │ │ ├── R.png
│ │ │ ├── elle.svg
│ │ │ ├── elleM.svg
│ │ │ ├── m.svg
│ │ │ ├── p.svg
│ │ │ └── preview.svg
│ ├── ms.bib
│ ├── ms.pdf
│ └── ms.tex
└── presentation
│ ├── biblio.bib
│ ├── img
│ ├── CTS
│ │ ├── CTS.jpg
│ │ ├── CTS_SS_id_model_SS_1024step.pdf
│ │ ├── CTS_SS_id_model_SS_256step.pdf
│ │ ├── CTS_SS_val_model_SS_1024step.pdf
│ │ ├── CTS_SS_val_model_SS_256step.pdf
│ │ ├── CTS_scheme.pdf
│ │ └── CTS_scheme.svg
│ ├── RLC
│ │ ├── RLC.pdf
│ │ ├── RLC.svg
│ │ ├── RLC_SS_val_1step_noise.pdf
│ │ ├── RLC_SS_val_64step_noise.pdf
│ │ └── RLC_characteristics.pdf
│ └── scheme
│ │ ├── J_fit.svg
│ │ ├── J_fit_theta_x.svg
│ │ ├── J_reg_theta_X.svg
│ │ ├── hatx.svg
│ │ ├── hatxinit.svg
│ │ ├── scheme_multistep_plain.pdf
│ │ ├── scheme_multistep_plain.svg
│ │ ├── scheme_multistep_plain.svg.2020_09_28_17_59_54.0.svg
│ │ ├── scheme_multistep_state_est.pdf
│ │ ├── scheme_multistep_state_est.svg
│ │ ├── scheme_multistep_with_reg.pdf
│ │ ├── scheme_multistep_with_reg.svg
│ │ ├── sysid.svg
│ │ ├── sysid_full.svg
│ │ ├── sysid_rot.svg
│ │ └── x0.svg
│ ├── preamble.tex
│ ├── presentation_main.pdf
│ └── presentation_main.tex
├── environment.txt
├── examples
├── CSTR_example
│ ├── CSTR_IO_ident_minibatch.py
│ ├── CSTR_SS_eval_sim.py
│ ├── CSTR_SS_fit_1step.py
│ ├── CSTR_SS_fit_multistep.py
│ ├── CSTR_computational_time.py
│ ├── CSTR_plot.py
│ ├── CSTR_scale.py
│ ├── README_CSTR.md
│ ├── data
│ │ ├── cstr.dat
│ │ └── cstr.txt
│ └── old
│ │ ├── CSTR_SS_ident_minibatch_1step.py
│ │ └── CSTR_ident_eval_pred.py
├── CTS_example
│ ├── CTS_SS_fit_multistep.py
│ ├── CTS_SS_fit_simerr.py
│ ├── CTS_eval_sim.py
│ └── data
│ │ └── dataBenchmark.csv
├── RLC_example
│ ├── RLC_IO_eval_sim.py
│ ├── RLC_IO_fit_1step.py
│ ├── RLC_IO_fit_multistep.py
│ ├── RLC_OE_comparison.m
│ ├── RLC_SS_eval_sim.py
│ ├── RLC_SS_fit_1step.py
│ ├── RLC_SS_fit_multistep.py
│ ├── RLC_SS_fit_simerror.py
│ ├── RLC_generate_id.py
│ ├── RLC_generate_val.py
│ ├── RLC_subspace_comparison.m
│ ├── data
│ │ ├── RLC_data_id.csv
│ │ └── RLC_data_val.csv
│ ├── old
│ │ ├── RLC_IO_ident_1step_I.py
│ │ ├── RLC_IO_ident_minibatch.py
│ │ ├── RLC_IO_ident_minibatch_exp.py
│ │ ├── RLC_IO_ident_minibatch_hidden_I.py
│ │ ├── RLC_SS_ident_minibatch.py
│ │ ├── RLC_SS_ident_minibatch_bkp.py
│ │ ├── RLC_SS_ident_minibatch_hidden_consistency_test.py
│ │ ├── RLC_ident_minibatch_hidden.py
│ │ ├── RLC_ident_minibatch_hidden_consistency.py
│ │ ├── RLC_ident_minibatch_transposed.py
│ │ ├── RLC_ident_sat_fit_ARX_lin.py
│ │ ├── RLC_ident_sat_fit_hidden_ARX.py
│ │ ├── RLC_ident_sat_fit_minibatch_OE_test.py
│ │ ├── RLC_ident_sat_generate_FE.py
│ │ ├── RLC_ident_sat_generate_FE_RK_comp.py
│ │ ├── RLC_ident_sat_generate_FE_val.py
│ │ ├── RLC_ident_sat_refine_ARX.py
│ │ ├── RLC_kalman_filter.py
│ │ ├── RLC_use_model.py
│ │ ├── time_linearization.ipynb
│ │ └── time_linearization_nn.ipynb
│ ├── symbolic_RLC.py
│ └── test
│ │ ├── RLC_IO_I_eval_sim.py
│ │ ├── RLC_IO_I_fit_multistep.py
│ │ ├── RLC_IO_fit_multistep.py
│ │ ├── RLC_SS_computational_time.py
│ │ ├── RLC_SS_computational_time_transposed.py
│ │ ├── RLC_SS_eval_pred.py
│ │ ├── RLC_SS_fit_1step_tf.py
│ │ ├── RLC_SS_fit_multistep_different_lr.py
│ │ ├── RLC_SS_ident_sim_jit.py
│ │ └── RLC_SS_ident_simerror_tf.py
└── cartpole_example
│ ├── cartpole_SS_eval_pred.py
│ ├── cartpole_SS_ident_1step.py
│ ├── cartpole_SS_ident_multistep.py
│ ├── cartpole_dynamics.py
│ ├── cartpole_generate_id.py
│ ├── kalman.py
│ ├── ltisim.py
│ ├── old
│ ├── cartpole_SS_ident_minibatch.py
│ ├── cartpole_SS_ident_minibatch_y_full_noise.py
│ ├── cartpole_SS_ident_minibatch_y_noise.py
│ ├── cartpole_SS_ident_minibatch_y_noise_100.py
│ ├── cartpole_SS_ident_minibatch_y_noise_32.py
│ ├── cartpole_SS_ident_minibatch_y_noise_64.py
│ ├── cartpole_SS_ident_minibatch_y_noise_64_start0.py
│ ├── cartpole_SS_ident_minibatch_y_nonoise.py
│ ├── cartpole_SS_minibatch.py
│ ├── fit_cartpole_OE.py
│ ├── fit_cartpole_ref_ARX.py
│ ├── fit_cartpole_residual_ARX.py
│ ├── ode_pendulum.py
│ ├── ode_pendulum_forward_eul.py
│ ├── ode_pendulum_forward_eul_explicit.py
│ └── ode_pendulum_forward_eul_loop.py
│ └── test
│ ├── cartpole_MPC_sim.py
│ ├── cartpole_MPC_sim_reference_id.py
│ ├── cartpole_MPC_sim_reference_val.py
│ ├── cartpole_PID_MPC_sim.py
│ ├── cartpole_PID_clean.py
│ ├── cartpole_PID_clean_NN_model.py
│ ├── cartpole_PID_position_clean.py
│ ├── cartpole_PID_sim.py
│ ├── cartpole_kalman_filter.py
│ ├── cartpole_plot_model_NN_cloop.py
│ ├── cartpole_ref_kalman_filter.py
│ ├── cartpole_use_model.py
│ ├── differentiator_example.py
│ └── differentiator_example_2.py
├── scheme_full.png
└── torchid
├── __init__.py
├── iofitter.py
├── iomodels.py
├── ssfitter.py
├── ssmodels.py
├── tmp
├── lstmfitter.py
├── lstmfitter_transposed.py
└── ssfitter_jit.py
└── util.py
/.gitignore:
--------------------------------------------------------------------------------
1 | /examples/RLC_example/models/
2 | /examples/RLC_example/fig/
3 | /examples/CSTR_example/models/
4 | /examples/CSTR_example/fig/
5 | /examples/cartpole_example/fig/
6 | /examples/cartpole_example/models/
7 | /doc/paper/ms.synctex.gz
8 | /doc/paper/ms.blg
9 | /doc/paper/ms.log
10 | /doc/paper/ms.aux
11 | /doc/paper/ms.bbl
12 |
--------------------------------------------------------------------------------
/.idea/.gitignore:
--------------------------------------------------------------------------------
1 | # Default ignored files
2 | /workspace.xml
--------------------------------------------------------------------------------
/.idea/.name:
--------------------------------------------------------------------------------
1 | sysid-neural-structures-fitting
--------------------------------------------------------------------------------
/.idea/dictionaries/marco.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 | autoregressive
5 | nonoise
6 | optim
7 |
8 |
9 |
--------------------------------------------------------------------------------
/.idea/inspectionProfiles/profiles_settings.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
--------------------------------------------------------------------------------
/.idea/modules.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
--------------------------------------------------------------------------------
/.idea/other.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
--------------------------------------------------------------------------------
/.idea/sysid-neural-structures-fitting.iml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
--------------------------------------------------------------------------------
/.idea/vcs.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
--------------------------------------------------------------------------------
/LICENSE.md:
--------------------------------------------------------------------------------
1 | MIT License
2 |
3 | Copyright (c) 2019 Marco Forgione
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the "Software"), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in all
13 | copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | SOFTWARE.
22 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # Model structures and fitting criteria for system identification with neural networks
2 |
3 | This repository contains the Python code to reproduce the results of the paper
4 | [Model structures and fitting criteria for system identification with neural networks](https://arxiv.org/pdf/1911.13034.pdf) by Marco Forgione and Dario Piga.
5 |
6 | The following fitting methods for State-Space (SS) and Input-Output (IO) neural dynamical models are implemented
7 |
8 | 1. One-step prediction error minimization
9 | 2. Open-loop simulation error minimization
10 | 3. Multi-step simulation error minimization
11 |
12 | # Block Diagram
13 |
14 | The block diagram below illustrates the proposed multi-step simulation error minimization approach applied to a
15 | state-space model. Quantities in red are tunable optimization variable (so as the parameters of the state and output
16 | neural network mappings).
17 |
18 | At each iteration of the gradient-based optimization loop:
19 |
20 | 1. A batch consisting of q length-m subsequences of measured input, measured output, and hidden state is extracted from the training
21 | dataset (and from the tunable hidden state sequence)
22 | 1. The system's simulated state and output subsequences are obtained by applying m-step-ahead simulation
23 | to the input subsequences. The initial condition is taken as the first element of the hidden state sequence
24 | 1. The fit loss is computed as the discrepancy between measured and simulated output; the consistency
25 | loss is computed as the discrepancy between hidden and simulated state; the total loss is a defined as a weighted
26 | sum of the fit and consistency loss
27 | 1. Derivatives of the total loss w.r.t. the hidden state and the neural network parameters are computed via
28 | back-propagation
29 | 1. Using the derivatives computed at the previous step, a gradient-based optimization step is performed. The hidden state and neural network parameters are updated
30 | in the negative gradient direction, aiming to minimize the total loss
31 |
32 |
33 | 
34 |
35 | # Folders:
36 | * torchid: pytorch implementation of the fitting methods 1,2,3
37 | * examples: examples of neural dynamical models fitting
38 | * common: definition of performance index R-squared, etc.
39 |
40 | The [examples](examples) are:
41 |
42 | * `` RLC_example``: nonlinear RLC circuit thoroughly discussed in the paper
43 | * `` CSTR_example``: CSTR system from the [DaISy](https://homes.esat.kuleuven.be/~tokka/daisydata.html) dataset
44 | * `` cartpole_example``: cart-pole mechanical system. Equations are the same used [here](https://github.com/forgi86/pyMPC/blob/master/examples/example_inverted_pendulum.ipynb)
45 | * `` CTS_example``: Cascated Tanks System from the [Nonlinear System Identification Benchmark](http://www.nonlinearbenchmark.org/) website
46 |
47 | For the [RLC example](examples/RLC_example), the main scripts are:
48 |
49 | * ``symbolic_RLC.m``: Symbolic manipulation of the RLC model, constant definition
50 | * ``RLC_generate_id.py``: generate the identification dataset
51 | * ``RLC_generate_val.py``: generate the validation dataset
52 | * ``RLC_SS_fit_1step.py``: SS model, one-step prediction error minimization
53 | * ``RLC_SS_fit_simerror.py``: SS model, open-loop simulation error minimization
54 | * ``RLC_SS_fit_multistep.py``: SS model, multistep simulation error minimization
55 | * ``RLC_SS_eval_sim.py``: SS model, evaluate the simulation performance of the identified models, produce relevant plots and model statistics
56 | * ``RLC_IO_fit_1step.py``: IO model, one-step prediction error minimization
57 | * ``RLC_IO_fit_multistep.py``: IO model, multistep simulation error minimization
58 | * ``RLC_IO_eval_sim.py``: IO model, evaluate the simulation performance of the identified models, produce relevant plots and model statistics
59 | * ``RLC_OE_comparison.m``: Linear Output Error (OE) model fit in Matlab
60 |
61 |
62 | # Software requirements:
63 | Simulations were performed on a Python 3.7 conda environment with
64 |
65 | * numpy
66 | * scipy
67 | * matplotlib
68 | * pandas
69 | * sympy
70 | * numba
71 | * pytorch (version 1.3)
72 |
73 | These dependencies may be installed through the commands:
74 |
75 | ```
76 | conda install numpy numba scipy sympy pandas matplotlib ipython
77 | conda install pytorch torchvision cpuonly -c pytorch
78 | ```
79 |
80 | ## Citing
81 |
82 | If you find this project useful, we encourage you to
83 |
84 | * Star this repository :star:
85 | * Cite the [paper](https://arxiv.org/pdf/1911.13034.pdf)
86 | ```
87 | @inproceedings{forgione2020model,
88 | title={Model structures and fitting criteria for system identification
89 | with neural networks},
90 | author={Forgione, Marco and Piga, Dario},
91 | booktitle={Proc. of the 14th IEEE International Conference Application
92 | of Information and Communication Technologies, Tashkent, Uzbekistan},
93 | year={2020}
94 | }
95 | ```
96 |
--------------------------------------------------------------------------------
/common/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/forgi86/sysid-neural-structures-fitting/886c969c03e86422b8a81d9e826b5745a4934bb0/common/__init__.py
--------------------------------------------------------------------------------
/common/metrics.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 |
3 |
4 | def r_square(y_pred, y_true, w=1, time_axis=0):
5 | """ Get the r-square fit criterion per time signal """
6 | SSE = np.sum((y_pred - y_true)**2, axis=time_axis)
7 | y_mean = np.mean(y_true, axis=time_axis)
8 | SST = np.sum((y_true - y_mean)**2, axis=time_axis)
9 |
10 | return 1.0 - SSE/SST
11 |
12 |
13 | def error_rmse(y_pred, y_true, time_axis=0):
14 | """ Compute the Root Mean Square Error (RMSE) per time signal """
15 |
16 | SSE = np.mean((y_pred - y_true)**2, axis=time_axis)
17 | RMS = np.sqrt(SSE)
18 | return RMS
19 |
20 |
21 | if __name__ == '__main__':
22 | N = 20
23 | ny = 2
24 | SNR = 10
25 | y_true = SNR*np.random.randn(N,2)
26 | y_pred = np.copy(y_true) + np.random.randn(N,2)
27 | r_square(y_true, y_pred)
28 |
--------------------------------------------------------------------------------
/doc/paper/fig/RLC.pdf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/forgi86/sysid-neural-structures-fitting/886c969c03e86422b8a81d9e826b5745a4934bb0/doc/paper/fig/RLC.pdf
--------------------------------------------------------------------------------
/doc/paper/fig/RLC_SS_val_1step_noise.pdf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/forgi86/sysid-neural-structures-fitting/886c969c03e86422b8a81d9e826b5745a4934bb0/doc/paper/fig/RLC_SS_val_1step_noise.pdf
--------------------------------------------------------------------------------
/doc/paper/fig/RLC_SS_val_64step_noise.pdf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/forgi86/sysid-neural-structures-fitting/886c969c03e86422b8a81d9e826b5745a4934bb0/doc/paper/fig/RLC_SS_val_64step_noise.pdf
--------------------------------------------------------------------------------
/doc/paper/fig/RLC_characteristics.pdf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/forgi86/sysid-neural-structures-fitting/886c969c03e86422b8a81d9e826b5745a4934bb0/doc/paper/fig/RLC_characteristics.pdf
--------------------------------------------------------------------------------
/doc/paper/fig/cart_pole.pdf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/forgi86/sysid-neural-structures-fitting/886c969c03e86422b8a81d9e826b5745a4934bb0/doc/paper/fig/cart_pole.pdf
--------------------------------------------------------------------------------
/doc/paper/fig/sym/F.svg:
--------------------------------------------------------------------------------
1 |
2 |
16 |
--------------------------------------------------------------------------------
/doc/paper/fig/sym/R.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/forgi86/sysid-neural-structures-fitting/886c969c03e86422b8a81d9e826b5745a4934bb0/doc/paper/fig/sym/R.png
--------------------------------------------------------------------------------
/doc/paper/fig/sym/elle.svg:
--------------------------------------------------------------------------------
1 |
2 |
16 |
--------------------------------------------------------------------------------
/doc/paper/fig/sym/elleM.svg:
--------------------------------------------------------------------------------
1 |
2 |
16 |
--------------------------------------------------------------------------------
/doc/paper/fig/sym/m.svg:
--------------------------------------------------------------------------------
1 |
2 |
16 |
--------------------------------------------------------------------------------
/doc/paper/fig/sym/p.svg:
--------------------------------------------------------------------------------
1 |
2 |
16 |
--------------------------------------------------------------------------------
/doc/paper/fig/sym/preview.svg:
--------------------------------------------------------------------------------
1 |
2 |
16 |
--------------------------------------------------------------------------------
/doc/paper/ms.pdf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/forgi86/sysid-neural-structures-fitting/886c969c03e86422b8a81d9e826b5745a4934bb0/doc/paper/ms.pdf
--------------------------------------------------------------------------------
/doc/presentation/biblio.bib:
--------------------------------------------------------------------------------
1 | @article{bombois2006least,
2 | title={Least costly identification experiment for control},
3 | author={Bombois, X. and Scorletti, G. and Gevers, M. and Van den Hof, PMJ and Hildebrand, R.},
4 | journal={Automatica},
5 | volume={42},
6 | number={10},
7 | pages={1651--1662},
8 | year={2006},
9 | publisher={Elsevier}
10 | }
--------------------------------------------------------------------------------
/doc/presentation/img/CTS/CTS.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/forgi86/sysid-neural-structures-fitting/886c969c03e86422b8a81d9e826b5745a4934bb0/doc/presentation/img/CTS/CTS.jpg
--------------------------------------------------------------------------------
/doc/presentation/img/CTS/CTS_SS_id_model_SS_1024step.pdf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/forgi86/sysid-neural-structures-fitting/886c969c03e86422b8a81d9e826b5745a4934bb0/doc/presentation/img/CTS/CTS_SS_id_model_SS_1024step.pdf
--------------------------------------------------------------------------------
/doc/presentation/img/CTS/CTS_SS_id_model_SS_256step.pdf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/forgi86/sysid-neural-structures-fitting/886c969c03e86422b8a81d9e826b5745a4934bb0/doc/presentation/img/CTS/CTS_SS_id_model_SS_256step.pdf
--------------------------------------------------------------------------------
/doc/presentation/img/CTS/CTS_SS_val_model_SS_1024step.pdf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/forgi86/sysid-neural-structures-fitting/886c969c03e86422b8a81d9e826b5745a4934bb0/doc/presentation/img/CTS/CTS_SS_val_model_SS_1024step.pdf
--------------------------------------------------------------------------------
/doc/presentation/img/CTS/CTS_SS_val_model_SS_256step.pdf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/forgi86/sysid-neural-structures-fitting/886c969c03e86422b8a81d9e826b5745a4934bb0/doc/presentation/img/CTS/CTS_SS_val_model_SS_256step.pdf
--------------------------------------------------------------------------------
/doc/presentation/img/CTS/CTS_scheme.pdf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/forgi86/sysid-neural-structures-fitting/886c969c03e86422b8a81d9e826b5745a4934bb0/doc/presentation/img/CTS/CTS_scheme.pdf
--------------------------------------------------------------------------------
/doc/presentation/img/RLC/RLC.pdf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/forgi86/sysid-neural-structures-fitting/886c969c03e86422b8a81d9e826b5745a4934bb0/doc/presentation/img/RLC/RLC.pdf
--------------------------------------------------------------------------------
/doc/presentation/img/RLC/RLC_SS_val_1step_noise.pdf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/forgi86/sysid-neural-structures-fitting/886c969c03e86422b8a81d9e826b5745a4934bb0/doc/presentation/img/RLC/RLC_SS_val_1step_noise.pdf
--------------------------------------------------------------------------------
/doc/presentation/img/RLC/RLC_SS_val_64step_noise.pdf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/forgi86/sysid-neural-structures-fitting/886c969c03e86422b8a81d9e826b5745a4934bb0/doc/presentation/img/RLC/RLC_SS_val_64step_noise.pdf
--------------------------------------------------------------------------------
/doc/presentation/img/RLC/RLC_characteristics.pdf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/forgi86/sysid-neural-structures-fitting/886c969c03e86422b8a81d9e826b5745a4934bb0/doc/presentation/img/RLC/RLC_characteristics.pdf
--------------------------------------------------------------------------------
/doc/presentation/img/scheme/J_fit.svg:
--------------------------------------------------------------------------------
1 |
2 |
3 |
--------------------------------------------------------------------------------
/doc/presentation/img/scheme/hatx.svg:
--------------------------------------------------------------------------------
1 |
2 |
3 |
--------------------------------------------------------------------------------
/doc/presentation/img/scheme/hatxinit.svg:
--------------------------------------------------------------------------------
1 |
2 |
3 |
--------------------------------------------------------------------------------
/doc/presentation/img/scheme/scheme_multistep_plain.pdf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/forgi86/sysid-neural-structures-fitting/886c969c03e86422b8a81d9e826b5745a4934bb0/doc/presentation/img/scheme/scheme_multistep_plain.pdf
--------------------------------------------------------------------------------
/doc/presentation/img/scheme/scheme_multistep_state_est.pdf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/forgi86/sysid-neural-structures-fitting/886c969c03e86422b8a81d9e826b5745a4934bb0/doc/presentation/img/scheme/scheme_multistep_state_est.pdf
--------------------------------------------------------------------------------
/doc/presentation/img/scheme/scheme_multistep_with_reg.pdf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/forgi86/sysid-neural-structures-fitting/886c969c03e86422b8a81d9e826b5745a4934bb0/doc/presentation/img/scheme/scheme_multistep_with_reg.pdf
--------------------------------------------------------------------------------
/doc/presentation/img/scheme/x0.svg:
--------------------------------------------------------------------------------
1 |
2 |
3 |
17 |
--------------------------------------------------------------------------------
/doc/presentation/preamble.tex:
--------------------------------------------------------------------------------
1 | % does not look nice, try deleting the line with the fontenc.
2 | \usepackage[english]{babel}
3 | \usepackage{amsmath}
4 | \usepackage[latin1]{inputenc}
5 | \usepackage{units}
6 | \usepackage{colortbl}
7 | \usepackage{multimedia}
8 | \usepackage{bm}
9 | \usepackage{subcaption}
10 | \usepackage{algorithm2e}
11 | \usepackage{algorithmic}
12 |
13 | \mode
14 | {
15 | \usetheme{Boadilla}
16 | \useoutertheme{infolines}
17 | \setbeamercovered{transparent}
18 | }
19 |
20 |
21 | \title[Neural System Identification]{\textsc{Model Structures and Fitting Criteria for System Identification with Neural Networks}}
22 |
23 |
24 | \author[]{Marco Forgione, Dario Piga}
25 |
26 | \institute[IDSIA]{IDSIA Dalle Molle Institute for Artificial Intelligence SUPSI-USI, Lugano, Switzerland}
27 |
28 |
29 | \date[AICT 2020]{14th IEEE International Conference on Application of Information and Communication Technologies}
30 |
31 |
32 | \subject{System Identification, Deep Learning, Machine Learning, Regularization}
33 |
34 |
35 | %% MATH DEFINITIONS %%
36 | \newcommand{\So}{S_o} % true system
37 | \newcommand{\hidden}[1]{\overline{#1}}
38 | \newcommand{\nsamp}{N}
39 | \newcommand{\Yid}{Y}
40 | \newcommand{\Uid}{U}
41 | \newcommand{\Did}{{\mathcal{D}}}
42 | \newcommand{\tens}[1]{\bm{#1}}
43 |
44 | \newcommand{\batchsize}{q}
45 | \newcommand{\seqlen}{m}
46 | \newcommand{\nin}{n_u}
47 | \newcommand{\ny}{n_y}
48 | \newcommand{\nx}{n_x}
49 |
50 | \newcommand{\NN}{\mathcal{N}} % a feedforward neural network
51 |
52 | \newcommand{\norm}[1]{\left \lVert #1 \right \rVert}
53 | \DeclareMathOperator*\argmin{arg \, min}
54 |
--------------------------------------------------------------------------------
/doc/presentation/presentation_main.pdf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/forgi86/sysid-neural-structures-fitting/886c969c03e86422b8a81d9e826b5745a4934bb0/doc/presentation/presentation_main.pdf
--------------------------------------------------------------------------------
/environment.txt:
--------------------------------------------------------------------------------
1 | conda create --name DL_ID python=3.7
2 | conda activate DL_ID
3 | conda install numpy numba scipy sympy pandas matplotlib ipython
4 | conda install pytorch torchvision cpuonly -c pytorch
5 | pip install control
6 |
--------------------------------------------------------------------------------
/examples/CSTR_example/CSTR_SS_eval_sim.py:
--------------------------------------------------------------------------------
1 | import pandas as pd
2 | import numpy as np
3 | import torch
4 | import matplotlib.pyplot as plt
5 | import os
6 | import sys
7 | import pickle
8 | from torchid.ssfitter import NeuralStateSpaceSimulator
9 | from torchid.ssmodels import NeuralStateSpaceModel
10 |
11 | if __name__ == '__main__':
12 |
13 | dataset_type = 'val'
14 | model_type = '1step_nonoise'
15 |
16 | COL_T = ['time']
17 | COL_Y = ['Ca']
18 | COL_X = ['Ca', 'T']
19 | COL_U = ['q']
20 |
21 | dataset_filename = f"CSTR_data_{dataset_type}.csv"
22 | df_X = pd.read_csv(os.path.join("data", dataset_filename))
23 |
24 | with open(os.path.join("data", "fit_scaler.pkl"), 'rb') as fp:
25 | scaler = pickle.load(fp)
26 |
27 | time_data = np.array(df_X[COL_T], dtype=np.float32)
28 | x = np.array(df_X[COL_X], dtype=np.float32)
29 | u = np.array(df_X[COL_U], dtype=np.float32)
30 | y_var_idx = 0 # 0: voltage 1: current
31 |
32 | y = np.copy(x[:, [y_var_idx]])
33 |
34 | N = np.shape(y)[0]
35 | Ts = time_data[1] - time_data[0]
36 |
37 | x_noise = np.copy(x)
38 | x_noise = x_noise.astype(np.float32)
39 |
40 | # Initialize optimization
41 | ss_model = NeuralStateSpaceModel(n_x=2, n_u=1, n_feat=64) #NeuralStateSpaceModelLin(A_nominal*Ts, B_nominal*Ts)
42 | nn_solution = NeuralStateSpaceSimulator(ss_model)
43 |
44 | model_filename = f"model_SS_{model_type}.pkl"
45 | nn_solution.ss_model.load_state_dict(torch.load(os.path.join("models", model_filename)))
46 |
47 | # In[Validate model]
48 | t_val_start = 0
49 | t_val_end = time_data[-1]
50 | idx_val_start = int(t_val_start//Ts)#x.shape[0]
51 | idx_val_end = int(t_val_end//Ts)#x.shape[0]
52 |
53 | # Build fit data
54 | u_val = u[idx_val_start:idx_val_end]
55 | x_meas_val = x_noise[idx_val_start:idx_val_end]
56 | x_true_val = x[idx_val_start:idx_val_end]
57 | y_val = y[idx_val_start:idx_val_end]
58 | time_val = time_data[idx_val_start:idx_val_end]
59 |
60 | x_0 = x_meas_val[0, :]
61 |
62 | with torch.no_grad():
63 | x_sim_torch = nn_solution.f_sim(torch.tensor(x_0), torch.tensor(u_val))
64 | loss = torch.mean(torch.abs(x_sim_torch - torch.tensor(x_true_val)))
65 |
66 |
67 | # In[Plot]
68 | x_sim = np.array(x_sim_torch)
69 | fig, ax = plt.subplots(2,1,sharex=True, figsize=(6,5))
70 | time_val_us = time_val *1e6
71 |
72 | if dataset_type == 'id':
73 | t_plot_start = 0.0
74 | t_plot_end = 500
75 | else:
76 | t_plot_start = 0.0
77 | t_plot_end = 100
78 |
79 | idx_plot_start = int(t_plot_start//Ts)#x.shape[0]
80 | idx_plot_end = int(t_plot_end//Ts)#x.shape[0]
81 |
82 |
83 | x_true_val_unsc = np.copy(x_true_val)
84 | x_true_val_unsc[:, 0] = x_true_val[:,0]*scaler.scale_[0] + scaler.mean_[0]
85 | x_true_val_unsc[:, 1] = x_true_val[:, 1]*scaler.scale_[1] + scaler.mean_[1]
86 |
87 | x_sim_unsc = np.copy(x_sim)
88 | x_sim_unsc[:, 0] = x_sim_unsc[:,0]*scaler.scale_[0] + scaler.mean_[0]
89 | x_sim_unsc[:, 1] = x_sim_unsc[:, 1]*scaler.scale_[1] + scaler.mean_[1]
90 |
91 |
92 | ax[0].plot(time_val[idx_plot_start:idx_plot_end], x_true_val_unsc[idx_plot_start:idx_plot_end,0], 'k', label='True')
93 | ax[0].plot(time_val[idx_plot_start:idx_plot_end], x_sim_unsc[idx_plot_start:idx_plot_end,0],'r--', label='Model simulation')
94 | ax[0].legend(loc='upper right')
95 | ax[0].grid(True)
96 | ax[0].set_xlabel("Time (s)")
97 | ax[0].set_ylabel("Concentration Ca (mol/L)")
98 |
99 | ax[1].plot(time_val[idx_plot_start:idx_plot_end], x_true_val_unsc[idx_plot_start:idx_plot_end,1], 'k', label='True')
100 | ax[1].plot(time_val[idx_plot_start:idx_plot_end], x_sim_unsc[idx_plot_start:idx_plot_end:,1],'r--', label='Model simulation')
101 | ax[1].legend(loc='upper right')
102 | ax[1].grid(True)
103 | ax[1].set_xlabel("Time (s)")
104 | ax[1].set_ylabel("Reactor temperature T (K)")
105 |
106 | fig_name = f"CSTR_SS_{dataset_type}_{model_type}.pdf"
107 | fig.savefig(os.path.join("fig", fig_name), bbox_inches='tight')
108 |
--------------------------------------------------------------------------------
/examples/CSTR_example/CSTR_SS_fit_1step.py:
--------------------------------------------------------------------------------
1 | import pandas as pd
2 | import numpy as np
3 | import torch
4 | import torch.optim as optim
5 | import time
6 | import matplotlib.pyplot as plt
7 | import os
8 | import sys
9 | sys.path.append(os.path.join("..", ".."))
10 | from torchid.ssfitter import NeuralStateSpaceSimulator
11 | from torchid.ssmodels import NeuralStateSpaceModel
12 |
13 | if __name__ == '__main__':
14 |
15 | # Set seed for reproducibility
16 | np.random.seed(0)
17 | torch.manual_seed(0)
18 |
19 | # Overall parameters
20 | num_iter = 10000
21 | lr=1e-4
22 | test_freq = 100
23 | add_noise = False
24 |
25 | # Column names
26 | COL_T = ['time']
27 | COL_Y = ['Ca']
28 | COL_X = ['Ca', 'T']
29 | COL_U = ['q']
30 |
31 | # Load dataset
32 | df_X = pd.read_csv(os.path.join("data", "CSTR_data_id.csv"))
33 | time_data = np.array(df_X[COL_T], dtype=np.float32)
34 | y = np.array(df_X[COL_Y],dtype=np.float32)
35 | x = np.array(df_X[COL_X],dtype=np.float32)
36 | u = np.array(df_X[COL_U],dtype=np.float32)
37 | x0_torch = torch.from_numpy(x[0,:])
38 | x_noise = np.copy(x) # np.random.randn(*x.shape)*std_noise
39 | x_noise = x_noise.astype(np.float32)
40 | Ts = time_data[1] - time_data[0]
41 |
42 | # Get fit data
43 | t_fit = time_data[-1] # use all data
44 | n_fit = int(t_fit//Ts)
45 | input_data = u[0:n_fit]
46 | state_data = x_noise[0:n_fit]
47 | u_torch = torch.from_numpy(input_data)
48 | x_true_torch = torch.from_numpy(state_data)
49 |
50 | # Setup neural model structure
51 | ss_model = NeuralStateSpaceModel(n_x=2, n_u=1, n_feat=64, init_small=False)
52 | nn_solution = NeuralStateSpaceSimulator(ss_model)
53 |
54 | # Setup optimizer
55 | optimizer = optim.Adam(nn_solution.ss_model.parameters(), lr=lr)
56 |
57 | # Scale loss with respect to the initial one
58 | with torch.no_grad():
59 | x_est_torch = nn_solution.f_onestep(x_true_torch, u_torch)
60 | err_init = x_est_torch - x_true_torch
61 | scale_error = torch.sqrt(torch.mean(err_init**2, dim=0))
62 |
63 | # Training loop
64 | LOSS = []
65 | start_time = time.time()
66 | for itr in range(0, num_iter):
67 |
68 | optimizer.zero_grad()
69 | x_est_torch = nn_solution.f_onestep(x_true_torch, u_torch)
70 | err = x_est_torch - x_true_torch
71 | err_scaled = err / scale_error
72 | loss_sc = torch.mean((err_scaled) ** 2) #torch.mean(torch.sq(batch_x[:,1:,:] - batch_x_pred[:,1:,:]))
73 |
74 | if itr % test_freq == 0:
75 | with torch.no_grad():
76 | loss_unsc = torch.mean(err**2)
77 | print('Iter {:04d} | Loss {:.6f}, Scaled Loss {:.6f}'.format(itr, loss_unsc.item(), loss_sc.item()))
78 |
79 | LOSS.append(loss_sc.item())
80 | loss_sc.backward()
81 | optimizer.step()
82 |
83 | train_time = time.time() - start_time
84 | print(f"\nTrain time: {train_time:.2f}")
85 |
86 | if not os.path.exists("models"):
87 | os.makedirs("models")
88 |
89 | torch.save(nn_solution.ss_model.state_dict(), os.path.join("models", "model_SS_1step_nonoise.pkl"))
90 |
91 |
92 | # In[Plot]
93 | x_0 = state_data[0,:]
94 | time_start = time.time()
95 | with torch.no_grad():
96 | x_sim = nn_solution.f_sim(torch.tensor(x_0), torch.tensor(input_data))
97 | loss_sc = torch.mean(torch.abs(x_sim - x_true_torch))
98 | time_arr = time.time() - time_start
99 |
100 | x_sim = np.array(x_sim)
101 | fig, ax = plt.subplots(3, 1, sharex=True)
102 | ax[0].plot(np.array(x_true_torch[:,0]), 'k', label='True')
103 | ax[0].plot(x_sim[:,0],'r', label='Sim')
104 | ax[0].legend()
105 | ax[0].grid(True)
106 | ax[1].plot(np.array(x_true_torch[:,1]), 'k', label='True')
107 | ax[1].plot(x_sim[:,1],'r', label='Sim')
108 | ax[1].legend()
109 | ax[1].grid(True)
110 |
111 | ax[2].plot(np.array(u_torch), 'b', label='Input')
112 | ax[2].legend()
113 | ax[2].grid(True)
114 |
115 | if not os.path.exists("fig"):
116 | os.makedirs("fig")
117 |
118 | fig, ax = plt.subplots(1, 1, figsize=(7.5, 6))
119 | ax.plot(LOSS)
120 | ax.grid(True)
121 | ax.set_ylabel("Loss (-)")
122 | ax.set_xlabel("Iteration (-)")
123 |
124 | if add_noise:
125 | fig_name = "CSTR_SS_loss_1step_noise.pdf"
126 | else:
127 | fig_name = "CSTR_SS_loss_1step_nonoise.pdf"
128 |
129 | fig.savefig(os.path.join("fig", fig_name), bbox_inches='tight')
130 |
--------------------------------------------------------------------------------
/examples/CSTR_example/CSTR_computational_time.py:
--------------------------------------------------------------------------------
1 | import pandas as pd
2 | import numpy as np
3 | import torch
4 | import matplotlib.pyplot as plt
5 | import os
6 | import sys
7 | sys.path.append(os.path.join(".."))
8 | from torchid.ssfitter import NeuralStateSpaceSimulator
9 | from torchid.ssmodels import NeuralStateSpaceModel
10 | from torchid.util import get_random_batch_idx
11 | import time
12 |
13 | if __name__ == '__main__':
14 | COL_T = ['time']
15 | COL_Y = ['Ca']
16 | COL_X = ['Ca', 'T']
17 | COL_U = ['q']
18 |
19 | df_X = pd.read_csv(os.path.join("data", "cstr.dat"), header=None, sep="\t")
20 | df_X.columns = ['time', 'q', 'Ca', 'T', 'None']
21 |
22 | df_X['q'] = df_X['q'] / 100
23 | df_X['Ca'] = df_X['Ca'] * 10
24 | df_X['T'] = df_X['T'] / 400
25 |
26 | time_data = np.array(df_X[COL_T], dtype=np.float32)
27 | y = np.array(df_X[COL_Y], dtype=np.float32)
28 | x = np.array(df_X[COL_X], dtype=np.float32)
29 | u = np.array(df_X[COL_U], dtype=np.float32)
30 | x0_torch = torch.from_numpy(x[0, :])
31 |
32 | N = np.shape(y)[0]
33 | Ts = time_data[1] - time_data[0]
34 |
35 |
36 | std_noise_V = 0.0 * 5.0
37 | std_noise_I = 0.0 * 0.5
38 | std_noise = np.array([std_noise_V, std_noise_I])
39 |
40 | x_noise = np.copy(x) + np.random.randn(*x.shape) * std_noise
41 | x_noise = x_noise.astype(np.float32)
42 | y_noise = np.copy(y)
43 |
44 | # Initialize optimization
45 | ss_model = NeuralStateSpaceModel(n_x=2, n_u=1, n_feat=64) #NeuralStateSpaceModelLin(A_nominal*Ts, B_nominal*Ts)
46 | nn_solution = NeuralStateSpaceSimulator(ss_model)
47 | nn_solution.ss_model.load_state_dict(torch.load(os.path.join("models", "model_SS_1step_nonoise.pkl")))
48 |
49 | # In[Validate model]
50 | t_val_start = 0
51 | t_val_end = time_data[-1]
52 | idx_val_start = int(t_val_start//Ts)#x.shape[0]
53 | idx_val_end = int(t_val_end//Ts)#x.shape[0]
54 |
55 | # Build fit data
56 | u_val = u[idx_val_start:idx_val_end]
57 | x_val = x_noise[idx_val_start:idx_val_end]
58 | y_val = y[idx_val_start:idx_val_end]
59 | time_val = time_data[idx_val_start:idx_val_end]
60 |
61 |
62 | # Predict batch data
63 | SEQ_LEN = np.arange(2,1024, 8)
64 | SEQ_LEN = np.flip(SEQ_LEN)
65 | SEQ_LEN[-1] = 7000
66 | TIME_CALC_NOGRAD = []
67 | TIME_CALC_GRAD = []
68 |
69 | num_samples = y_val.shape[0]
70 |
71 | loss_fn = torch.nn.MSELoss()
72 |
73 | for idx in range(len(SEQ_LEN)):
74 | seq_len = SEQ_LEN[idx]
75 | batch_size = num_samples // seq_len
76 | batch_start, batch_idx = get_random_batch_idx(num_samples, batch_size, seq_len)
77 | batch_time = torch.tensor(time_val[batch_idx]) # torch.stack([time_torch_fit[batch_start[i]:batch_start[i] + seq_len] for i in range(batch_size)], dim=0)
78 | batch_x0 = torch.tensor(x_val[batch_start]) # x_meas_torch_fit[batch_start, :] # (M, D)
79 | batch_u = torch.tensor(u_val[batch_idx]) # torch.stack([u_torch_fit[batch_start[i]:batch_start[i] + seq_len] for i in range(batch_size)], dim=0)
80 | batch_x = torch.tensor(x_val[batch_idx]) # torch.stack([x_meas_torch_fit[batch_start[i]:batch_start[i] + seq_len] for i in range(batch_size)], dim=0)
81 | time_start = time.perf_counter()
82 |
83 | """
84 | with torch.no_grad():
85 | batch_x_pred = nn_solution.f_sim_minibatch(batch_x0, batch_u)
86 | err = batch_x - batch_x_pred
87 | err_scaled = err
88 | loss = torch.mean(err_scaled**2)
89 | #loss = loss_fn(batch_x_pred, batch_x)
90 | TIME_CALC_NOGRAD.append(time.perf_counter() - time_start)
91 | """
92 |
93 | time_start = time.perf_counter()
94 |
95 | batch_x_pred = nn_solution.f_sim_multistep(batch_x0, batch_u)
96 | err = batch_x - batch_x_pred
97 | err_scaled = err
98 | loss = torch.mean(err_scaled**2)
99 | #loss = loss_fn(batch_x_pred, batch_x)
100 | loss.backward()
101 | TIME_CALC_GRAD.append(time.perf_counter() - time_start)
102 |
103 | for par in nn_solution.ss_model.parameters():
104 | if par.grad is not None:
105 | par.grad.zero_()
106 |
107 |
108 | # In[Plot]
109 | # plt.plot(SEQ_LEN, np.array(TIME_CALC_NOGRAD)*1e3, '*b')
110 | plt.plot(SEQ_LEN, np.array(TIME_CALC_GRAD)*1e3, '*r')
111 | plt.grid(True)
112 |
--------------------------------------------------------------------------------
/examples/CSTR_example/CSTR_plot.py:
--------------------------------------------------------------------------------
1 | import pandas as pd
2 | import numpy as np
3 | import torch
4 | import matplotlib.pyplot as plt
5 | import os
6 | import sys
7 |
8 |
9 | if __name__ == '__main__':
10 |
11 | # Column names in dataset
12 | COL_T = ['time']
13 | COL_Y = ['Ca']
14 | COL_X = ['Ca', 'T']
15 | COL_U = ['q']
16 |
17 | # Load dataset
18 | df_X = pd.read_csv(os.path.join("data", "cstr.dat"), header=None, sep="\t")
19 | df_X.columns = ['time', 'q', 'Ca', 'T', 'None']
20 | df_X['q'] = df_X['q']/100
21 | df_X['Ca'] = df_X['Ca']*10
22 | df_X['T'] = df_X['T']/400
23 |
24 | time_data = np.array(df_X[COL_T], dtype=np.float32)
25 | y = np.array(df_X[COL_Y],dtype=np.float32)
26 | x = np.array(df_X[COL_X],dtype=np.float32)
27 | u = np.array(df_X[COL_U],dtype=np.float32)
28 | x0_torch = torch.from_numpy(x[0,:])
29 |
30 | # Add noise
31 | x_noise = np.copy(x) #+ np.random.randn(*x.shape)*std_noise
32 | x_noise = x_noise.astype(np.float32)
33 |
34 | # Plot
35 | fig, ax = plt.subplots(3,1,sharex=True)
36 | ax[0].plot(np.array(x_noise[:,0]), 'k', label='True')
37 | ax[0].legend()
38 | ax[0].grid(True)
39 |
40 | ax[1].plot(np.array(x_noise[:,1]), 'k', label='True')
41 | ax[1].legend()
42 | ax[1].grid(True)
43 |
44 | ax[2].plot(u, 'k', label='True')
45 | ax[2].legend()
46 | ax[2].grid(True)
47 |
--------------------------------------------------------------------------------
/examples/CSTR_example/CSTR_scale.py:
--------------------------------------------------------------------------------
1 | import pandas as pd
2 | import numpy as np
3 | import torch
4 | import matplotlib.pyplot as plt
5 | import os
6 | from sklearn.preprocessing import StandardScaler
7 | import pickle
8 |
9 | if __name__ == '__main__':
10 |
11 | # Column names
12 | COL_T = ['time']
13 | COL_Y = ['Ca']
14 | COL_X = ['Ca', 'T']
15 | COL_U = ['q']
16 |
17 | # Load dataset
18 | df_X = pd.read_csv(os.path.join("data", "cstr.dat"), header=None, sep="\t")
19 | df_X.columns = ['time', 'q', 'Ca', 'T', 'None']
20 |
21 | time_data = np.array(df_X[COL_T], dtype=np.float32)
22 | y = np.array(df_X[COL_Y],dtype=np.float32)
23 | x = np.array(df_X[COL_X],dtype=np.float32)
24 | u = np.array(df_X[COL_U],dtype=np.float32)
25 |
26 | # Plot data in original scale
27 | fig, ax = plt.subplots(3, 1, sharex=True)
28 | ax[0].plot(time_data, np.array(x[:, 0]), 'k', label='$C_a$')
29 | ax[0].grid(True)
30 | ax[0].set_ylabel("Concentration (mol/l)")
31 | ax[0].legend(loc='upper right')
32 |
33 | ax[1].plot(time_data, np.array(x[:, 1]), 'k', label='$T$')
34 | ax[1].legend()
35 | ax[1].grid(True)
36 | ax[1].set_ylabel("Temperature (K)")
37 | ax[1].legend(loc='upper right')
38 |
39 | ax[2].plot(time_data, u, 'k', label='$q$')
40 | ax[2].legend()
41 | ax[2].grid(True)
42 | ax[2].set_xlabel("Time (min)")
43 | ax[2].set_ylabel("Flow (l/min)")
44 | ax[2].legend(loc='upper right')
45 |
46 |
47 | # Rescale data
48 | Ts = time_data[1] - time_data[0]
49 | t_fit = 500 # Fit on first 500 minutes
50 | n_fit = int(t_fit//Ts)
51 | df_id = df_X.iloc[0:n_fit].copy()
52 | df_val = df_X.iloc[n_fit:].copy()
53 |
54 | COLUMNS_SCALE = ['Ca', 'T', 'q'] # columns to be scaled
55 | scaler = StandardScaler().fit(df_id[COLUMNS_SCALE])
56 |
57 | # scale identification dataset
58 | df_id[COLUMNS_SCALE] = scaler.transform(df_id[COLUMNS_SCALE])
59 | df_id[COL_T] = df_id[COL_T] - df_id[COL_T].iloc[0]
60 |
61 | # scale fit dataset
62 | df_val[COLUMNS_SCALE] = scaler.transform(df_val[COLUMNS_SCALE])
63 | df_val[COL_T] = df_val[COL_T] - df_val[COL_T].iloc[0]
64 |
65 | # save datasets to csv
66 | df_id.to_csv(os.path.join("data", "CSTR_data_id.csv"), sep=",", index=False)
67 | df_val.to_csv(os.path.join("data", "CSTR_data_val.csv"), sep=",", index=False)
68 |
69 | # save scaler object
70 | with open(os.path.join("data", "fit_scaler.pkl"), 'wb') as fp:
71 | pickle.dump(scaler, fp)
72 |
--------------------------------------------------------------------------------
/examples/CSTR_example/README_CSTR.md:
--------------------------------------------------------------------------------
1 | Neural dynamical model identification of the CSTR example from the [DaISy dataset](https://homes.esat.kuleuven.be/~smc/daisy/daisydata.html)
2 |
3 | The main scripts are:
4 |
5 | * ``CSRT_SS_fit_1step.py``: SS model, one-step prediction error minimization
6 | * ``RLC_SS_fit_multistep.py``: SS model, multistep simulation error minimization
7 | * ``RLC_SS_eval_sim.py``: SS model, evaluate the simulation performance of the identified models, produce relevant plots and model statistics
8 |
--------------------------------------------------------------------------------
/examples/CSTR_example/data/cstr.txt:
--------------------------------------------------------------------------------
1 | Contributed by:
2 | Jairo ESPINOSA
3 | ESAT-SISTA KULEUVEN
4 | Kardinaal Mercierlaan 94
5 | B-3001 Heverlee Belgium
6 | espinosa@esat.kuleuven.ac.be
7 | Description:
8 | The Process is a model of a Continuous
9 | Stirring Tank Reactor, where the reaction
10 | is exothermic and the concentration is
11 | controlled by regulating the coolant
12 | flow.
13 | Sampling:
14 | 0.1 min
15 | Number:
16 | 7500
17 | Inputs:
18 | q: Coolant Flow l/min
19 | Outputs:
20 | Ca: Concentration mol/l
21 | T: Temperature Kelvin degrees
22 | References:
23 | J.D. Morningred, B.E.Paden, D.E. Seborg and D.A. Mellichamp "An adaptive nonlinear predictive controller" in. Proc. of the A.C.C. vol.2 1990 pp.1614-1619
24 | G.Lightbody and G.W.Irwin. Nonlinear Control Structures Based on Embedded Neural System Models, IEEE Tran. on Neural Networks Vol.8 No.3 pp.553-567
25 | J.Espinosa and J. Vandewalle, Predictive Control Using Fuzzy Models, Submitted to the 3rd. On-Line World Conference on Soft Computing in Engineering Design and Manufacturing.
26 | Properties:
27 | Columns:
28 | Column 1: time-steps
29 | Column 2: input q
30 | Column 3: output Ca
31 | Column 4: output T
32 | Category:
33 | Process Industry Systems
34 | Where:
35 | ftp://ftp.esat.kuleuven.ac.be/pub/SISTA/espinosa/datasets/cstr.dat
36 |
--------------------------------------------------------------------------------
/examples/CSTR_example/old/CSTR_ident_eval_pred.py:
--------------------------------------------------------------------------------
1 | import pandas as pd
2 | import numpy as np
3 | import torch
4 | import matplotlib.pyplot as plt
5 | import os
6 | import sys
7 |
8 | sys.path.append(os.path.join(".."))
9 | from torchid.ssfitter import NeuralStateSpaceSimulator
10 | from torchid.ssmodels import NeuralStateSpaceModel
11 | from torchid.util import get_sequential_batch_idx
12 |
13 | if __name__ == '__main__':
14 |
15 | seq_len = 256 #256 # prediction sequence length
16 |
17 | COL_T = ['time']
18 | COL_Y = ['Ca']
19 | COL_X = ['Ca', 'T']
20 | COL_U = ['q']
21 |
22 | # df_X = pd.read_csv(os.path.join("data", "cstr.dat"), header=None, sep="\t")
23 | # df_X.columns = ['time', 'q', 'Ca', 'T', 'None']
24 | # df_X['q'] = 1.0*df_X['q']/100.0
25 | # df_X['Ca'] = 1.0*df_X['Ca']*10.0
26 | # df_X['T'] = 1.0*df_X['T']/400.0
27 |
28 | df_X = pd.read_csv(os.path.join("data", "cstr_val.dat"), sep="\t")
29 |
30 |
31 |
32 | time_data = np.array(df_X[COL_T], dtype=np.float32)
33 | y = np.array(df_X[COL_Y], dtype=np.float32)
34 | x = np.array(df_X[COL_X], dtype=np.float32)
35 | u = np.array(df_X[COL_U], dtype=np.float32)
36 | x0_torch = torch.from_numpy(x[0, :])
37 |
38 | N = np.shape(y)[0]
39 | Ts = time_data[1] - time_data[0]
40 |
41 |
42 | std_noise_V = 0.0 * 5.0
43 | std_noise_I = 0.0 * 0.5
44 | std_noise = np.array([std_noise_V, std_noise_I])
45 |
46 | x_noise = np.copy(x) + np.random.randn(*x.shape) * std_noise
47 | x_noise = x_noise.astype(np.float32)
48 | y_noise = np.copy(y)
49 |
50 | # Initialize optimization
51 | ss_model = NeuralStateSpaceModel(n_x=2, n_u=1, n_feat=64) #NeuralStateSpaceModelLin(A_nominal*Ts, B_nominal*Ts)
52 | nn_solution = NeuralStateSpaceSimulator(ss_model)
53 | nn_solution.ss_model.load_state_dict(torch.load(os.path.join("models", "model_SS_1step.pkl")))
54 | # nn_solution.ss_model.load_state_dict(torch.load(os.path.join("models", "model_ss_16step_from1.pkl")))
55 | # nn_solution.ss_model.load_state_dict(torch.load(os.path.join("models", "model_ss_128step_from16.pkl")))
56 |
57 |
58 | # In[Validate model]
59 | t_val_start = 0
60 | t_val_end = time_data[-1]
61 | idx_val_start = int(t_val_start//Ts)#x.shape[0]
62 | idx_val_end = int(t_val_end//Ts)#x.shape[0]
63 |
64 | # Build fit data
65 | u_val = u[idx_val_start:idx_val_end]
66 | x_val = x_noise[idx_val_start:idx_val_end]
67 | y_val = y[idx_val_start:idx_val_end]
68 | time_val = time_data[idx_val_start:idx_val_end]
69 |
70 |
71 | # Predict batch data
72 | batch_start, batch_idx = get_sequential_batch_idx(y_val.shape[0], seq_len)
73 | batch_time = torch.tensor(time_val[batch_idx]) # torch.stack([time_torch_fit[batch_start[i]:batch_start[i] + seq_len] for i in range(batch_size)], dim=0)
74 | batch_x0 = torch.tensor(x_val[batch_start]) # x_meas_torch_fit[batch_start, :] # (M, D)
75 | batch_u = torch.tensor(u_val[batch_idx]) # torch.stack([u_torch_fit[batch_start[i]:batch_start[i] + seq_len] for i in range(batch_size)], dim=0)
76 | batch_x = torch.tensor(x_val[batch_idx]) # torch.stack([x_meas_torch_fit[batch_start[i]:batch_start[i] + seq_len] for i in range(batch_size)], dim=0)
77 | batch_x_pred = nn_solution.f_sim_multistep(batch_x0, batch_u)
78 |
79 | # Plot data
80 | batch_x_pred_np = np.array(batch_x_pred.detach())
81 | batch_time_np = np.array(batch_time.detach()).squeeze()
82 |
83 | fig, ax = plt.subplots(3,1, sharex=True)
84 | ax[0].plot(time_val, x_val[:,0], 'b')
85 | ax[0].plot(batch_time_np.T, batch_x_pred_np[:,:,0].T, 'r')
86 | ax[0].grid(True)
87 |
88 | ax[1].plot(time_val, x_val[:,1], 'b')
89 | ax[1].plot(batch_time_np.T, batch_x_pred_np[:,:,1].T, 'r')
90 | ax[1].grid(True)
91 |
92 | ax[2].plot(time_val, u_val, label='Input')
93 | ax[2].grid(True)
94 |
--------------------------------------------------------------------------------
/examples/CTS_example/CTS_eval_sim.py:
--------------------------------------------------------------------------------
1 | import pandas as pd
2 | import numpy as np
3 | import torch
4 | import matplotlib.pyplot as plt
5 | import os
6 | import sys
7 |
8 | sys.path.append(os.path.join("..", ".."))
9 | from torchid.ssfitter import NeuralStateSpaceSimulator
10 | from torchid.ssmodels import CTSNeuralStateSpaceModel
11 | from common import metrics
12 |
13 | if __name__ == '__main__':
14 |
15 | plot_input = False
16 |
17 | dataset_type = 'val'
18 | model_name = 'model_SS_256step'
19 | hidden_name = 'hidden_SS_256step'
20 | #model_name = 'model_SS_1024step'
21 | #hidden_name = 'hidden_SS_1024step'
22 |
23 |
24 |
25 | # Load dataset
26 | df_data = pd.read_csv(os.path.join("data", "dataBenchmark.csv"))
27 | if dataset_type == 'id':
28 | u = np.array(df_data[['uEst']]).astype(np.float32)
29 | y = np.array(df_data[['yEst']]).astype(np.float32)
30 | else:
31 | u = np.array(df_data[['uVal']]).astype(np.float32)
32 | y = np.array(df_data[['yVal']]).astype(np.float32)
33 |
34 | ts = df_data['Ts'][0].astype(np.float32)
35 | time_exp = np.arange(y.size).astype(np.float32) * ts
36 |
37 |
38 | # Build validation data
39 | t_val_start = 0
40 | t_val_end = time_exp[-1]
41 | idx_val_start = int(t_val_start//ts)
42 | idx_val_end = int(t_val_end//ts)
43 |
44 | y_meas_val = y[idx_val_start:idx_val_end]
45 | u_val = u[idx_val_start:idx_val_end]
46 | time_val = time_exp[idx_val_start:idx_val_end]
47 |
48 | # Setup neural model structure
49 | ss_model = CTSNeuralStateSpaceModel(n_x=2, n_u=1, n_feat=64, ts=ts)
50 | nn_solution = NeuralStateSpaceSimulator(ss_model)
51 | nn_solution.ss_model.load_state_dict(torch.load(os.path.join("models", model_name + ".pkl")))
52 | x_hidden_fit = torch.load(os.path.join("models", hidden_name + ".pkl"))
53 |
54 | # Evaluate the model in open-loop simulation against validation data
55 | x_0 = x_hidden_fit[0, :].detach().numpy() # initial state had to be estimated, according to the dataset description
56 | with torch.no_grad():
57 | x_sim_val_torch = nn_solution.f_sim(torch.tensor(x_0), torch.tensor(u_val))
58 |
59 | # Transform to numpy arrays
60 | x_sim_val = x_sim_val_torch.detach().numpy()
61 | y_sim_val = x_sim_val[:, [0]]
62 |
63 | # Plot results
64 | fig, ax = plt.subplots(2, 1, sharex=True, figsize=(6, 5.5))
65 | idx_plot_start = 0
66 | idx_plot_end = time_val.size
67 |
68 | ax[0].plot(time_val[idx_plot_start:idx_plot_end], y_meas_val[idx_plot_start:idx_plot_end, 0], 'k', label='$y$')
69 | ax[0].plot(time_val[idx_plot_start:idx_plot_end], y_sim_val[idx_plot_start:idx_plot_end, 0], 'r--', label='$\hat{y}^{\mathrm{sim}}$')
70 | ax[0].legend(loc='upper right')
71 | ax[0].set_xlabel("Time (s)")
72 | ax[0].set_ylabel("Voltage (V)")
73 | #ax[0].set_ylim([-1.5, 1.5])
74 | ax[0].grid(True)
75 |
76 | ax[1].plot(time_val[idx_plot_start:idx_plot_end], u_val[idx_plot_start:idx_plot_end, 0], 'k', label='$u$')
77 | ax[1].legend(loc='upper right')
78 | ax[1].set_xlabel("Time (s)")
79 | ax[1].set_ylabel("Voltage (V)")
80 | #ax[1].set_ylim([-5, 5])
81 | ax[1].grid(True)
82 |
83 | # Plot all
84 | if not os.path.exists("fig"):
85 | os.makedirs("fig")
86 |
87 | fig_name = f"CTS_SS_{dataset_type}_{model_name}.pdf"
88 | fig.savefig(os.path.join("fig", fig_name), bbox_inches='tight')
89 |
90 | # R-squared metrics
91 | R_sq = metrics.r_square(y_sim_val, y_meas_val)
92 | rmse_sim = metrics.error_rmse(y_sim_val, y_meas_val)
93 |
94 | print(f"R-squared metrics: {R_sq}")
95 | print(f"RMSE-squared metrics: {rmse_sim}")
96 |
--------------------------------------------------------------------------------
/examples/RLC_example/RLC_IO_eval_sim.py:
--------------------------------------------------------------------------------
1 | import pandas as pd
2 | import numpy as np
3 | import torch
4 | import matplotlib.pyplot as plt
5 | import os
6 | import sys
7 |
8 | sys.path.append(os.path.join("..", ".."))
9 | from torchid.iofitter import NeuralIOSimulator
10 | from torchid.iomodels import NeuralIOModel
11 | from common import metrics
12 |
13 | if __name__ == '__main__':
14 |
15 | n_a = 2 # autoregressive coefficients for y
16 | n_b = 2 # autoregressive coefficients for u
17 | plot_input = False
18 |
19 | #dataset_type = 'id'
20 | dataset_type = 'val'
21 | # model_type = '16step_noise'
22 | model_type = '32step_noise'
23 | # model_type = '64step_noise
24 | # model_type = '1step_nonoise'
25 | # model_type = '1step_noise'
26 |
27 | # Create fig folder if it does not exist
28 | if not os.path.exists("fig"):
29 | os.makedirs("fig")
30 |
31 | # Column names in the dataset
32 | COL_T = ['time']
33 | COL_X = ['V_C', 'I_L']
34 | COL_U = ['V_IN']
35 | COL_Y = ['V_C']
36 |
37 | # Load dataset
38 | dataset_filename = f"RLC_data_{dataset_type}.csv"
39 | df_X = pd.read_csv(os.path.join("data", dataset_filename))
40 | time_data = np.array(df_X[COL_T], dtype=np.float32)
41 | x = np.array(df_X[COL_X], dtype=np.float32)
42 | u = np.array(df_X[COL_U], dtype=np.float32)
43 | y_var_idx = 0 # 0: voltage 1: current
44 | y = np.copy(x[:, [y_var_idx]])
45 | N = np.shape(y)[0]
46 | Ts = time_data[1] - time_data[0]
47 |
48 | # Add measurement noise
49 | std_noise_V = 10.0
50 | std_noise_I = 1.0
51 | std_noise = np.array([std_noise_V, std_noise_I])
52 | x_noise = np.copy(x) + np.random.randn(*x.shape) * std_noise
53 | x_noise = x_noise.astype(np.float32)
54 | y_noise = x_noise[:, [y_var_idx]]
55 |
56 | # Build validation data
57 | t_val_start = 0
58 | t_val_end = time_data[-1]
59 | idx_val_start = int(t_val_start//Ts)#x.shape[0]
60 | idx_val_end = int(t_val_end//Ts)#x.shape[0]
61 | n_val = idx_val_end - idx_val_start
62 | u_val = np.copy(u[idx_val_start:idx_val_end])
63 | y_val = np.copy(y[idx_val_start:idx_val_end])
64 | y_meas_val = np.copy(y_noise[idx_val_start:idx_val_end])
65 | time_val = time_data[idx_val_start:idx_val_end]
66 |
67 | # Setup neural model structure and load fitted model parameters
68 | io_model = NeuralIOModel(n_a=n_a, n_b=n_b, n_feat=64)
69 | io_solution = NeuralIOSimulator(io_model)
70 | model_filename = f"model_IO_{model_type}.pkl"
71 | io_solution.io_model.load_state_dict(torch.load(os.path.join("models", model_filename)))
72 |
73 | # Evaluate the model in open-loop simulation against validation data
74 | y_seq = np.zeros(n_a, dtype=np.float32)
75 | u_seq = np.zeros(n_b, dtype=np.float32 )
76 | y_meas_val_torch = torch.tensor(y_meas_val)
77 | with torch.no_grad():
78 | y_seq_torch = torch.tensor(y_seq)
79 | u_seq_torch = torch.tensor(u_seq)
80 |
81 | u_torch = torch.tensor(u_val)
82 | y_val_sim_torch = io_solution.f_sim(y_seq_torch, u_seq_torch, u_torch)
83 |
84 | err_val = y_val_sim_torch - y_meas_val_torch
85 | loss_val = torch.mean(err_val**2)
86 |
87 | # Plot results
88 | if dataset_type == 'id':
89 | t_plot_start = 0.2e-3
90 | else:
91 | t_plot_start = 1.0e-3
92 | t_plot_end = t_plot_start + 0.3e-3
93 |
94 | idx_plot_start = int(t_plot_start//Ts)#x.shape[0]
95 | idx_plot_end = int(t_plot_end//Ts)#x.shape[0]
96 |
97 | y_val_sim = np.array(y_val_sim_torch)
98 | time_val_us = time_val*1e6
99 |
100 | if plot_input:
101 | fig, ax = plt.subplots(2, 1, sharex=True)
102 | else:
103 | fig, ax = plt.subplots(1, 1, sharex=True)
104 | ax = [ax]
105 |
106 | ax[0].plot(time_val_us[idx_plot_start:idx_plot_end], y_val[idx_plot_start:idx_plot_end], 'k', label='True')
107 | ax[0].plot(time_val_us[idx_plot_start:idx_plot_end], y_val_sim[idx_plot_start:idx_plot_end], 'r--', label='Model simulation')
108 | ax[0].legend(loc='upper right')
109 | ax[0].grid(True)
110 | ax[0].set_xlabel("Time ($\mu$s)")
111 | ax[0].set_ylabel("Capacitor voltage $v_C$ (V)")
112 | ax[0].set_ylim([-400, 400])
113 |
114 | if plot_input:
115 | ax[1].plot(time_val_us[idx_plot_start:idx_plot_end], u_val[idx_plot_start:idx_plot_end], 'k', label='Input')
116 | #ax[1].legend()
117 | ax[1].grid(True)
118 | ax[1].set_xlabel("Time ($\mu$s)")
119 | ax[1].set_ylabel("Input voltage $v_{in}$ (V)")
120 |
121 | fig_name = f"RLC_IO_{dataset_type}_{model_type}.pdf"
122 | fig.savefig(os.path.join("fig", fig_name), bbox_inches='tight')
123 |
124 | # R-squared metrics
125 | R_sq = metrics.r_square(y_val, y_val_sim)
126 | print(f"R-squared metrics: {R_sq}")
127 |
--------------------------------------------------------------------------------
/examples/RLC_example/RLC_OE_comparison.m:
--------------------------------------------------------------------------------
1 | clear;
2 | clc;
3 |
4 | %% Read data table
5 |
6 | data_path = fullfile("data", "RLC_data_id.csv");
7 | data_table = readtable(data_path);
8 |
9 |
10 | vin = data_table.V_IN;
11 | vC = data_table.V_C;
12 | iL = data_table.I_L;
13 | y = [vC iL];
14 | t = data_table.time;
15 | Ts = t(2) - t(1);
16 |
17 | %% Add noise %%
18 | add_noise = 0;
19 | STD_V = add_noise*10;
20 | STD_I = add_noise*1;
21 | vC_meas = vC + randn(size(vC))*STD_V;
22 | iL_meas = iL + randn(size(iL))*STD_V;
23 | y_meas = [vC_meas iL_meas];
24 |
25 | %% Identification data %%
26 | data_id = iddata(y_meas,vin,Ts);
27 | model_subs = oe(data_id, 'nb',[2; 2], 'nf', [2; 2]);
28 |
29 | y_sim_id = sim(model_subs, data_id);
30 | y_sim_id = y_sim_id.OutputData;
31 |
32 |
33 | %% Plot data %%
34 |
35 | figure()
36 | plot(t, vC, 'k');
37 | hold on;
38 | plot(t, y_sim_id(:,1), 'b');
39 | legend('True', 'Model');
40 |
41 | figure()
42 | plot(t, iL, 'k');
43 | hold on;
44 | plot(t, y_sim_id(:,2), 'b');
45 | legend('True', 'Model');
46 |
47 | %%
48 | SSE_v = sum((vC - y_sim_id(:,1)).^2);
49 | y_mean_v = mean(vC);
50 | SST_v = sum((vC - y_mean_v).^2);
51 | R_sq_v = 1 - SSE_v/SST_v;
52 |
53 | SSE_i = sum((iL - y_sim_id(:,2)).^2);
54 | y_mean_i = mean(iL);
55 | SST_i = sum((iL - y_mean_i).^2);
56 | R_sq_i = 1 - SSE_i/SST_i;
57 |
58 | fprintf("OE fitting performance");
59 | fprintf("Identification dataset:\nR-squred vC:%.3f\nR-squred iL:%.3f\n", R_sq_v, R_sq_i)
60 |
61 | %% Read data table val
62 |
63 | data_path = fullfile("data", "RLC_data_val.csv");
64 | data_table_val = readtable(data_path);
65 |
66 |
67 | vin = data_table_val.V_IN;
68 | vC = data_table_val.V_C;
69 | iL = data_table_val.I_L;
70 | y = [vC iL];
71 | t = data_table.time;
72 | Ts = t(2) - t(1);
73 |
74 | %% Validation data %%
75 | data_val = iddata(y_meas,vin,Ts);
76 |
77 | y_sim_val = sim(model_subs, data_val);
78 | y_sim_val = y_sim_val.OutputData;
79 |
80 | loss = mean((vC - y_sim_val).^2);
81 |
82 | %%
83 | SSE_v = sum((vC - y_sim_val(:,1)).^2);
84 | y_mean_v = mean(vC);
85 | SST_v = sum((vC - y_mean_v).^2);
86 | R_sq_v = 1 - SSE_v/SST_v;
87 |
88 | SSE_i = sum((iL - y_sim_val(:,2)).^2);
89 | y_mean_i = mean(iL);
90 | SST_i = sum((iL - y_mean_i).^2);
91 | R_sq_i = 1 - SSE_i/SST_i;
92 |
93 | fprintf("Validation dataset:\nR-squred vC:%.2f\nR-squred iL:%.2f\n", R_sq_v, R_sq_i)
94 |
--------------------------------------------------------------------------------
/examples/RLC_example/RLC_SS_eval_sim.py:
--------------------------------------------------------------------------------
1 | import pandas as pd
2 | import numpy as np
3 | import torch
4 | import matplotlib
5 | import matplotlib.pyplot as plt
6 | import os
7 | import sys
8 |
9 | sys.path.append(os.path.join("..", ".."))
10 | from torchid.ssfitter import NeuralStateSpaceSimulator
11 | from torchid.ssmodels import NeuralStateSpaceModel
12 | from common import metrics
13 |
14 | if __name__ == '__main__':
15 |
16 | matplotlib.rc('font', **{'family': 'sans-serif', 'sans-serif': ['Helvetica'], 'size': 16})
17 | matplotlib.rc('text', usetex=True)
18 |
19 | plot_input = False
20 |
21 | #dataset_type = 'id'
22 | dataset_type = 'val'
23 | #model_type = '1step_nonoise'
24 | model_type = '1step_noise'
25 | #model_type = '64step_noise'
26 | #model_type = 'simerr_noise'
27 |
28 | # Column names in the dataset
29 | COL_T = ['time']
30 | COL_X = ['V_C', 'I_L']
31 | COL_U = ['V_IN']
32 | COL_Y = ['V_C']
33 |
34 | # Load dataset
35 | dataset_filename = f"RLC_data_{dataset_type}.csv"
36 | df_X = pd.read_csv(os.path.join("data", dataset_filename))
37 | time_data = np.array(df_X[COL_T], dtype=np.float32)
38 | x = np.array(df_X[COL_X], dtype=np.float32)
39 | u = np.array(df_X[COL_U], dtype=np.float32)
40 | y_var_idx = 0 # 0: voltage 1: current
41 | y = np.copy(x[:, [y_var_idx]])
42 | N = np.shape(y)[0]
43 | Ts = time_data[1] - time_data[0]
44 |
45 | # Add measurement noise
46 | std_noise_V = 0.0 * 5.0
47 | std_noise_I = 0.0 * 0.5
48 | std_noise = np.array([std_noise_V, std_noise_I])
49 | x_noise = np.copy(x) + np.random.randn(*x.shape) * std_noise
50 | x_noise = x_noise.astype(np.float32)
51 | y_noise = x_noise[:, [y_var_idx]]
52 |
53 | # Build validation data
54 | t_val_start = 0
55 | t_val_end = time_data[-1]
56 | idx_val_start = int(t_val_start//Ts)
57 | idx_val_end = int(t_val_end//Ts)
58 | u_val = u[idx_val_start:idx_val_end]
59 | x_meas_val = x_noise[idx_val_start:idx_val_end]
60 | x_true_val = x[idx_val_start:idx_val_end]
61 | y_val = y[idx_val_start:idx_val_end]
62 | time_val = time_data[idx_val_start:idx_val_end]
63 |
64 | # Setup neural model structure and load fitted model parameters
65 | ss_model = NeuralStateSpaceModel(n_x=2, n_u=1, n_feat=64)
66 | nn_solution = NeuralStateSpaceSimulator(ss_model)
67 | model_filename = f"model_SS_{model_type}.pkl"
68 | nn_solution.ss_model.load_state_dict(torch.load(os.path.join("models", model_filename)))
69 |
70 | # Evaluate the model in open-loop simulation against validation data
71 | x_0 = x_meas_val[0, :]
72 | with torch.no_grad():
73 | x_sim_torch = nn_solution.f_sim(torch.tensor(x_0), torch.tensor(u_val))
74 | loss = torch.mean(torch.abs(x_sim_torch - torch.tensor(x_true_val)))
75 |
76 | # Plot results
77 | x_sim = np.array(x_sim_torch)
78 | if not plot_input:
79 | fig, ax = plt.subplots(2, 1, sharex=True, figsize=(6, 5.5))
80 | else:
81 | fig, ax = plt.subplots(3, 1, sharex=True, figsize=(6, 7.5))
82 | time_val_us = time_val*1e6
83 |
84 | if dataset_type == 'id':
85 | t_plot_start = 0.2e-3
86 | else:
87 | t_plot_start = 1.9e-3
88 | t_plot_end = t_plot_start + 0.32e-3
89 |
90 | idx_plot_start = int(t_plot_start//Ts)
91 | idx_plot_end = int(t_plot_end//Ts)
92 |
93 | ax[0].plot(time_val_us[idx_plot_start:idx_plot_end], x_true_val[idx_plot_start:idx_plot_end,0], 'k', label='$v_C$')
94 | ax[0].plot(time_val_us[idx_plot_start:idx_plot_end], x_sim[idx_plot_start:idx_plot_end,0],'r--', label='$\hat{v}^{\mathrm{sim}}_C$')
95 | ax[0].legend(loc='upper right')
96 | ax[0].grid(True)
97 | ax[0].set_xlabel("Time ($\mu$s)")
98 | ax[0].set_ylabel("Voltage (V)")
99 | ax[0].set_ylim([-300, 300])
100 |
101 | ax[1].plot(time_val_us[idx_plot_start:idx_plot_end], np.array(x_true_val[idx_plot_start:idx_plot_end:,1]), 'k', label='$i_L$')
102 | ax[1].plot(time_val_us[idx_plot_start:idx_plot_end], x_sim[idx_plot_start:idx_plot_end:,1],'r--', label='$\hat i_L^{\mathrm{sim}}$')
103 | ax[1].legend(loc='upper right')
104 | ax[1].grid(True)
105 | ax[1].set_xlabel("Time ($\mu$s)")
106 | ax[1].set_ylabel("Current (A)")
107 | ax[1].set_ylim([-25, 25])
108 |
109 | if plot_input:
110 | ax[2].plot(time_val_us[idx_plot_start:idx_plot_end], u_val[idx_plot_start:idx_plot_end], 'k')
111 | #ax[2].legend(loc='upper right')
112 | ax[2].grid(True)
113 | ax[2].set_xlabel("Time ($\mu$s)")
114 | ax[2].set_ylabel("Input voltage $v_C$ (V)")
115 | ax[2].set_ylim([-400, 400])
116 |
117 | plt.tight_layout()
118 | fig_name = f"RLC_SS_{dataset_type}_{model_type}.pdf"
119 | fig.savefig(os.path.join("fig", fig_name), bbox_inches='tight')
120 |
121 | # R-squared metrics
122 | R_sq = metrics.r_square(x_true_val, x_sim)
123 | print(f"R-squared metrics: {R_sq}")
124 |
--------------------------------------------------------------------------------
/examples/RLC_example/RLC_SS_fit_1step.py:
--------------------------------------------------------------------------------
1 | import pandas as pd
2 | import numpy as np
3 | import torch
4 | import torch.optim as optim
5 | import time
6 | import matplotlib.pyplot as plt
7 | import os
8 | import sys
9 |
10 | sys.path.append(os.path.join("..", '..'))
11 | from torchid.ssfitter import NeuralStateSpaceSimulator
12 | from torchid.ssmodels import NeuralStateSpaceModel
13 |
14 |
15 | if __name__ == '__main__':
16 |
17 | # Set seed for reproducibility
18 | np.random.seed(0)
19 | torch.manual_seed(0)
20 |
21 | # Overall parameters
22 | t_fit = 2e-3 # fitting on t_fit ms of data
23 | lr = 1e-4 # learning rate
24 | num_iter = 40000 # gradient-based optimization steps
25 | test_freq = 500 # print message every test_freq iterations
26 | add_noise = True
27 |
28 | # Column names in the dataset
29 | COL_T = ['time']
30 | COL_X = ['V_C', 'I_L']
31 | COL_U = ['V_IN']
32 | COL_Y = ['V_C']
33 |
34 | # Load dataset
35 | df_X = pd.read_csv(os.path.join("data", "RLC_data_id.csv"))
36 | time_data = np.array(df_X[COL_T], dtype=np.float32)
37 | y = np.array(df_X[COL_Y], dtype=np.float32)
38 | x = np.array(df_X[COL_X], dtype=np.float32)
39 | u = np.array(df_X[COL_U], dtype=np.float32)
40 |
41 | # Add measurement noise
42 | std_noise_V = add_noise * 10.0
43 | std_noise_I = add_noise * 1.0
44 | std_noise = np.array([std_noise_V, std_noise_I])
45 | x_noise = np.copy(x) + np.random.randn(*x.shape)*std_noise
46 | x_noise = x_noise.astype(np.float32)
47 |
48 | # Compute SNR
49 | P_x = np.mean(x ** 2, axis=0)
50 | P_n = std_noise**2
51 | SNR = P_x/(P_n+1e-10)
52 | SNR_db = 10*np.log10(SNR)
53 |
54 | Ts = time_data[1] - time_data[0]
55 | n_fit = int(t_fit//Ts)#x.shape[0]
56 |
57 | # Fit data to pytorch tensors #
58 | input_data = u[0:n_fit]
59 | state_data = x_noise[0:n_fit]
60 | u_torch = torch.from_numpy(input_data)
61 | x_fit_torch = torch.from_numpy(state_data)
62 |
63 | # Setup neural model structure
64 | ss_model = NeuralStateSpaceModel(n_x=2, n_u=1, n_feat=64)
65 | nn_solution = NeuralStateSpaceSimulator(ss_model)
66 |
67 | # Setup optimizer
68 | optimizer = optim.Adam(nn_solution.ss_model.parameters(), lr=lr)
69 |
70 | # Scale loss with respect to the initial one
71 | with torch.no_grad():
72 | x_est_torch = nn_solution.f_onestep(x_fit_torch, u_torch)
73 | err_init = x_est_torch - x_fit_torch
74 | scale_error = torch.sqrt(torch.mean(err_init**2, dim=0))
75 |
76 | LOSS = []
77 | start_time = time.time()
78 | # Training loop
79 | for itr in range(0, num_iter):
80 | optimizer.zero_grad()
81 |
82 | # Perform one-step ahead prediction
83 | x_est_torch = nn_solution.f_onestep(x_fit_torch, u_torch)
84 | err = x_est_torch - x_fit_torch
85 | err_scaled = err / scale_error
86 |
87 | # Compute fit loss
88 | loss = torch.mean(err_scaled**2)
89 |
90 | # Statistics
91 | LOSS.append(loss.item())
92 | if itr % test_freq == 0:
93 | print('Iter {:04d} | Total Loss {:.6f}'.format(itr, loss.item()))
94 |
95 | # Optimize
96 | loss.backward()
97 | optimizer.step()
98 |
99 | train_time = time.time() - start_time # 114 seconds
100 | print(f"\nTrain time: {train_time:.2f}")
101 |
102 | # Save model
103 | if not os.path.exists("models"):
104 | os.makedirs("models")
105 | if add_noise:
106 | model_filename = "model_SS_1step_noise.pkl"
107 | else:
108 | model_filename = "model_SS_1step_nonoise.pkl"
109 |
110 | torch.save(nn_solution.ss_model.state_dict(), os.path.join("models", model_filename))
111 |
112 | # Simulate model
113 | x_0 = state_data[0, :]
114 | time_start = time.time()
115 | with torch.no_grad():
116 | x_sim = nn_solution.f_sim(torch.tensor(x_0), torch.tensor(input_data))
117 | loss = torch.mean(torch.abs(x_sim - x_fit_torch))
118 |
119 | # Plot
120 | if not os.path.exists("fig"):
121 | os.makedirs("fig")
122 |
123 | x_sim = np.array(x_sim)
124 | fig, ax = plt.subplots(2,1,sharex=True)
125 | ax[0].plot(np.array(x_fit_torch[:, 0]), 'k+', label='True')
126 | ax[0].plot(np.array(x_est_torch[:, 0].detach()), 'b', label='Pred')
127 | ax[0].plot(x_sim[:, 0], 'r', label='Sim')
128 | ax[0].legend()
129 | ax[1].plot(np.array(x_fit_torch[:, 1]), 'k+', label='True')
130 | ax[1].plot(np.array(x_est_torch[:, 1].detach()), 'b', label='Pred')
131 | ax[1].plot(x_sim[:, 1], 'r', label='Sim')
132 | ax[1].legend()
133 | ax[0].grid(True)
134 | ax[1].grid(True)
135 |
136 | fig, ax = plt.subplots(1, 1, figsize=(7.5, 6))
137 | ax.plot(LOSS)
138 | ax.grid(True)
139 | ax.set_ylabel("Loss (-)")
140 | ax.set_xlabel("Iteration (-)")
141 |
142 | if add_noise:
143 | fig_name = "RLC_SS_loss_1step_noise.pdf"
144 | else:
145 | fig_name = "RLC_SS_loss_1step_nonoise.pdf"
146 |
147 | fig.savefig(os.path.join("fig", fig_name), bbox_inches='tight')
148 |
--------------------------------------------------------------------------------
/examples/RLC_example/RLC_SS_fit_simerror.py:
--------------------------------------------------------------------------------
1 | import pandas as pd
2 | import numpy as np
3 | import torch
4 | import torch.optim as optim
5 | import time
6 | import matplotlib.pyplot as plt
7 | import os
8 | import sys
9 |
10 | sys.path.append(os.path.join("..", '..'))
11 | from torchid.ssfitter import NeuralStateSpaceSimulator
12 | from torchid.ssmodels import NeuralStateSpaceModel
13 |
14 | if __name__ == '__main__':
15 |
16 | # Set seed for reproducibility
17 | np.random.seed(0)
18 | torch.manual_seed(0)
19 |
20 | # Overall paramaters
21 | t_fit = 2e-3 #2e-3
22 | num_iter = 10000
23 | test_freq = 10
24 | add_noise = True
25 |
26 | # Column names in the dataset
27 | COL_T = ['time']
28 | COL_X = ['V_C', 'I_L']
29 | COL_U = ['V_IN']
30 | COL_Y = ['V_C']
31 |
32 | # Load dataset
33 | df_X = pd.read_csv(os.path.join("data", "RLC_data_id.csv"))
34 | time_data = np.array(df_X[COL_T], dtype=np.float32)
35 | y = np.array(df_X[COL_Y],dtype=np.float32)
36 | x = np.array(df_X[COL_X],dtype=np.float32)
37 | u = np.array(df_X[COL_U],dtype=np.float32)
38 | x0_torch = torch.from_numpy(x[0,:])
39 |
40 | # Add measurement noise
41 | std_noise_V = add_noise * 10.0
42 | std_noise_I = add_noise * 1.0
43 | std_noise = np.array([std_noise_V, std_noise_I])
44 | x_noise = np.copy(x) + np.random.randn(*x.shape)*std_noise
45 | x_noise = x_noise.astype(np.float32)
46 |
47 | Ts = time_data[1] - time_data[0]
48 | n_fit = int(t_fit//Ts)#x.shape[0]
49 |
50 | # Fit data to pytorch tensors #
51 | input_data = u[0:n_fit]
52 | state_data = x_noise[0:n_fit]
53 | u_torch = torch.from_numpy(input_data)
54 | x_true_torch = torch.from_numpy(state_data)
55 |
56 | # Setup neural model structure
57 | ss_model = NeuralStateSpaceModel(n_x=2, n_u=1, n_feat=64)
58 | nn_solution = NeuralStateSpaceSimulator(ss_model)
59 |
60 | # Setup optimizer
61 | params = list(nn_solution.ss_model.parameters())
62 | optimizer = optim.Adam(params, lr=1e-3)
63 |
64 | # Scale loss with respect to the initial one
65 | with torch.no_grad():
66 | x_est_torch = nn_solution.f_sim(x0_torch, u_torch)
67 | err_init = x_est_torch - x_true_torch
68 | scale_error = torch.sqrt(torch.mean((err_init)**2, dim=(0)))
69 |
70 | start_time = time.time()
71 | LOSS = []
72 | # Training loop
73 | for itr in range(1, num_iter + 1):
74 | optimizer.zero_grad()
75 |
76 | # Perform open-loop simulation
77 | x_est_torch = nn_solution.f_sim(x0_torch, u_torch)
78 |
79 | # Compute fit loss
80 | err = x_est_torch - x_true_torch
81 | err_scaled = err/scale_error
82 | loss = torch.mean(err_scaled ** 2)
83 |
84 | # Statistics
85 | LOSS.append(loss.item())
86 | if itr % test_freq == 0:
87 | print('Iter {:04d} | Total Loss {:.6f}'.format(itr, loss.item()))
88 |
89 | # Optimize
90 | loss.backward()
91 | optimizer.step()
92 |
93 | train_time = time.time() - start_time
94 | print(f"\nTrain time: {train_time:.2f}") # 7230.39 seconds!
95 |
96 | if not os.path.exists("models"):
97 | os.makedirs("models")
98 |
99 | if add_noise:
100 | model_filename = "model_SS_simerr_noise.pkl"
101 | else:
102 | model_filename = "model_SS_simerr_nonoise.pkl"
103 |
104 | # Save model
105 | if not os.path.exists("models"):
106 | os.makedirs("models")
107 | torch.save(nn_solution.ss_model.state_dict(), os.path.join("models", model_filename))
108 |
109 | t_val = 5e-3
110 | n_val = int(t_val//Ts)#x.shape[0]
111 |
112 | input_data_val = u[0:n_val]
113 | state_data_val = x[0:n_val]
114 | output_data_val = y[0:n_val]
115 |
116 | x0_val = np.zeros(2,dtype=np.float32)
117 | x0_torch_val = torch.from_numpy(x0_val)
118 | u_torch_val = torch.tensor(input_data_val)
119 | x_true_torch_val = torch.from_numpy(state_data_val)
120 |
121 | with torch.no_grad():
122 | x_pred_torch_val = nn_solution.f_sim(x0_torch_val, u_torch_val)
123 |
124 | # In[1]
125 |
126 | fig,ax = plt.subplots(3,1, sharex=True)
127 | ax[0].plot(np.array(x_true_torch_val[:,0]), label='True')
128 | ax[0].plot(np.array(x_pred_torch_val[:,0]), label='Fit')
129 | ax[0].legend()
130 | ax[0].grid(True)
131 |
132 | ax[1].plot(np.array(x_true_torch_val[:,1]), label='True')
133 | ax[1].plot(np.array(x_pred_torch_val[:,1]), label='Fit')
134 | ax[1].legend()
135 | ax[1].grid(True)
136 |
137 | ax[2].plot(np.array(u_torch_val), label='Input')
138 | ax[2].grid(True)
139 |
140 | if not os.path.exists("fig"):
141 | os.makedirs("fig")
142 |
143 | fig,ax = plt.subplots(1,1, figsize=(7.5,6))
144 | ax.plot(LOSS)
145 | ax.grid(True)
146 | ax.set_ylabel("Loss (-)")
147 | ax.set_xlabel("Iteration (-)")
148 |
149 | if add_noise:
150 | fig_name = "RLC_SS_loss_simerr_noise.pdf"
151 | else:
152 | fig_name = "RLC_SS_loss_simerr_nonoise.pdf"
153 |
154 | fig.savefig(os.path.join("fig", fig_name), bbox_inches='tight')
155 |
--------------------------------------------------------------------------------
/examples/RLC_example/RLC_generate_id.py:
--------------------------------------------------------------------------------
1 | from scipy.integrate import solve_ivp
2 | from scipy.interpolate import interp1d
3 | import numpy as np
4 | import matplotlib.pyplot as plt
5 | import control.matlab
6 | import pandas as pd
7 | import os
8 |
9 | from examples.RLC_example.symbolic_RLC import fxu_ODE, fxu_ODE_mod
10 |
11 | if __name__ == '__main__':
12 |
13 | # Set seed for reproducibility
14 | np.random.seed(42)
15 |
16 | # Input characteristics #
17 | len_sim = 5e-3
18 | Ts = 5e-7
19 |
20 | omega_input = 150e3
21 | std_input = 80
22 |
23 | tau_input = 1/omega_input
24 | Hu = control.TransferFunction([1], [1 / omega_input, 1])
25 | Hu = Hu * Hu
26 | Hud = control.matlab.c2d(Hu, Ts)
27 |
28 | N_sim = int(len_sim//Ts)
29 | N_skip = int(20 * tau_input // Ts) # skip initial samples to get a regime sample of d
30 | N_sim_u = N_sim + N_skip
31 | e = np.random.randn(N_sim_u)
32 | te = np.arange(N_sim_u) * Ts
33 | _, u, _ = control.forced_response(Hu, te, e)
34 | u = u[N_skip:]
35 | u = u /np.std(u) * std_input
36 |
37 | t_sim = np.arange(N_sim) * Ts
38 | u_func = interp1d(t_sim, u, kind='zero', fill_value="extrapolate")
39 |
40 |
41 | def f_ODE(t,x):
42 | u = u_func(t).ravel()
43 | return fxu_ODE(t, x, u)
44 |
45 | def f_ODE_mod(t,x):
46 | u = u_func(t).ravel()
47 | return fxu_ODE_mod(t, x, u)
48 |
49 |
50 | x0 = np.zeros(2)
51 | f_ODE(0.0,x0)
52 | t_span = (t_sim[0],t_sim[-1])
53 | y1 = solve_ivp(f_ODE, t_span, x0, t_eval = t_sim)
54 | y2 = solve_ivp(f_ODE_mod, t_span, x0, t_eval = t_sim)
55 |
56 | x1 = y1.y.T
57 | x2 = y2.y.T
58 |
59 | # In[plot]
60 | fig, ax = plt.subplots(3,1, figsize=(10,10), sharex=True)
61 | ax[0].plot(t_sim, x1[:,0],'b')
62 | ax[0].plot(t_sim, x2[:,0],'r')
63 | ax[0].set_xlabel('time (s)')
64 | ax[0].set_ylabel('Capacitor voltage (V)')
65 |
66 | ax[1].plot(t_sim, x1[:,1],'b')
67 | ax[1].plot(t_sim, x2[:,1],'r')
68 | ax[1].set_xlabel('time (s)')
69 | ax[1].set_ylabel('Inductor current (A)')
70 |
71 | ax[2].plot(t_sim, u,'b')
72 | ax[2].set_xlabel('time (s)')
73 | ax[2].set_ylabel('Input voltage (V)')
74 |
75 | ax[0].grid(True)
76 | ax[1].grid(True)
77 | ax[2].grid(True)
78 |
79 | # In[Save]
80 | if not os.path.exists("data"):
81 | os.makedirs("data")
82 |
83 | X = np.hstack((t_sim.reshape(-1, 1), x1, u.reshape(-1, 1), x1[:, 0].reshape(-1, 1)))
84 | COL_T = ['time']
85 | COL_X = ['V_C', 'I_L']
86 | COL_U = ['V_IN']
87 | COL_Y = ['V_C']
88 | COL = COL_T + COL_X + COL_U + COL_Y
89 | df_X = pd.DataFrame(X, columns=COL)
90 | # df_X.to_csv(os.path.join("data", "RLC_data_id.csv"), index=False)
91 |
92 |
93 | X = np.hstack((t_sim.reshape(-1, 1), x2, u.reshape(-1, 1), x2[:, 0].reshape(-1, 1)))
94 | COL_T = ['time']
95 | COL_X = ['V_C', 'I_L']
96 | COL_U = ['V_IN']
97 | COL_Y = ['V_C']
98 | COL = COL_T + COL_X + COL_U + COL_Y
99 | df_X = pd.DataFrame(X, columns=COL)
100 | df_X.to_csv(os.path.join("data", "RLC_data_id.csv"), index=False)
101 |
--------------------------------------------------------------------------------
/examples/RLC_example/RLC_generate_val.py:
--------------------------------------------------------------------------------
1 | from scipy.integrate import solve_ivp
2 | from scipy.interpolate import interp1d
3 | import numpy as np
4 | import matplotlib.pyplot as plt
5 | import control.matlab
6 | import pandas as pd
7 | import os
8 |
9 | from examples.RLC_example.symbolic_RLC import fxu_ODE, fxu_ODE_mod
10 |
11 | if __name__ == '__main__':
12 |
13 | # Set seed for reproducibility
14 | np.random.seed(42)
15 |
16 | # Input characteristics #
17 | len_sim = 5e-3
18 | Ts = 5e-7
19 |
20 | omega_input = 200e3
21 | std_input = 60
22 |
23 | tau_input = 1/omega_input
24 | Hu = control.TransferFunction([1], [1 / omega_input, 1])
25 | Hu = Hu * Hu
26 | Hud = control.matlab.c2d(Hu, Ts)
27 |
28 | N_sim = int(len_sim//Ts)
29 | N_skip = int(20 * tau_input // Ts) # skip initial samples to get a regime sample of d
30 | N_sim_u = N_sim + N_skip
31 | e = np.random.randn(N_sim_u)
32 | te = np.arange(N_sim_u) * Ts
33 | _, u, _ = control.forced_response(Hu, te, e)
34 | u = u[N_skip:]
35 | u = u /np.std(u) * std_input
36 |
37 | t_sim = np.arange(N_sim) * Ts
38 | u_func = interp1d(t_sim, u, kind='zero', fill_value="extrapolate")
39 |
40 |
41 | def f_ODE(t,x):
42 | u = u_func(t).ravel()
43 | return fxu_ODE(t, x, u)
44 |
45 | def f_ODE_mod(t,x):
46 | u = u_func(t).ravel()
47 | return fxu_ODE_mod(t, x, u)
48 |
49 |
50 | x0 = np.zeros(2)
51 | f_ODE(0.0,x0)
52 | t_span = (t_sim[0],t_sim[-1])
53 | y1 = solve_ivp(f_ODE, t_span, x0, t_eval = t_sim)
54 | y2 = solve_ivp(f_ODE_mod, t_span, x0, t_eval = t_sim)
55 |
56 | x1 = y1.y.T
57 | x2 = y2.y.T
58 |
59 | # In[plot]
60 | fig, ax = plt.subplots(3,1, figsize=(10,10), sharex=True)
61 | ax[0].plot(t_sim, x1[:,0],'b')
62 | ax[0].plot(t_sim, x2[:,0],'r')
63 | ax[0].set_xlabel('time (s)')
64 | ax[0].set_ylabel('Capacitor voltage (V)')
65 |
66 | ax[1].plot(t_sim, x1[:,1],'b')
67 | ax[1].plot(t_sim, x2[:,1],'r')
68 | ax[1].set_xlabel('time (s)')
69 | ax[1].set_ylabel('Inductor current (A)')
70 |
71 | ax[2].plot(t_sim, u,'b')
72 | ax[2].set_xlabel('time (s)')
73 | ax[2].set_ylabel('Input voltage (V)')
74 |
75 | ax[0].grid(True)
76 | ax[1].grid(True)
77 | ax[2].grid(True)
78 |
79 | # In[Save]
80 | if not os.path.exists("data"):
81 | os.makedirs("data")
82 |
83 | X = np.hstack((t_sim.reshape(-1, 1), x1, u.reshape(-1, 1), x1[:, 0].reshape(-1, 1)))
84 | COL_T = ['time']
85 | COL_X = ['V_C', 'I_L']
86 | COL_U = ['V_IN']
87 | COL_Y = ['V_C']
88 | COL = COL_T + COL_X + COL_U + COL_Y
89 | df_X = pd.DataFrame(X, columns=COL)
90 | # df_X.to_csv(os.path.join("data", "RLC_data_id.csv"), index=False)
91 |
92 |
93 | X = np.hstack((t_sim.reshape(-1, 1), x2, u.reshape(-1, 1), x2[:, 0].reshape(-1, 1)))
94 | COL_T = ['time']
95 | COL_X = ['V_C', 'I_L']
96 | COL_U = ['V_IN']
97 | COL_Y = ['V_C']
98 | COL = COL_T + COL_X + COL_U + COL_Y
99 | df_X = pd.DataFrame(X, columns=COL)
100 | df_X.to_csv(os.path.join("data", "RLC_data_val.csv"), index=False)
101 |
--------------------------------------------------------------------------------
/examples/RLC_example/RLC_subspace_comparison.m:
--------------------------------------------------------------------------------
1 | clear;
2 | clc;
3 | close all;
4 |
5 | %% Read data table
6 |
7 | data_path = fullfile("data", "RLC_data_id.csv");
8 | data_table = readtable(data_path);
9 |
10 |
11 | vin = data_table.V_IN;
12 | vC = data_table.V_C;
13 | iL = data_table.I_L;
14 | y = [vC iL];
15 | t = data_table.time;
16 | Ts = t(2) - t(1);
17 |
18 | %% Add noise %%
19 | add_noise = 0;
20 | STD_V = add_noise*10;
21 | STD_I = add_noise*1;
22 | vC_meas = vC + randn(size(vC))*STD_V;
23 | iL_meas = iL + randn(size(iL))*STD_V;
24 | y_meas = [vC_meas iL_meas];
25 |
26 | %% Identification data %%
27 | data_id = iddata(y_meas,vin,Ts);
28 | model_subs = n4sid(data_id);%, 2)
29 |
30 | y_sim = sim(model_subs, data_id);
31 | y_sim_val = y_sim.OutputData;
32 |
33 | loss = mean((vC - y_sim_val).^2);
34 |
35 | %% Plot data %%
36 |
37 | figure()
38 | plot(t, vC, 'k');
39 | hold on;
40 | plot(t, y_sim_val(:,1), 'b');
41 | legend('True', 'Model');
42 |
43 | figure()
44 | plot(t, iL, 'k');
45 | hold on;
46 | plot(t, y_sim_val(:,2), 'b');
47 | legend('True', 'Model');
48 |
49 | %%
50 | SSE_v = sum((vC - y_sim_val(:,1)).^2);
51 | y_mean_v = mean(vC);
52 | SST_v = sum((vC - y_mean_v).^2);
53 | R_sq_v = 1 - SSE_v/SST_v;
54 |
55 | SSE_i = sum((iL - y_sim_val(:,2)).^2);
56 | y_mean_i = mean(iL);
57 | SST_i = sum((iL - y_mean_i).^2);
58 | R_sq_i = 1 - SSE_i/SST_i;
59 |
60 | fprintf("Subspace fitting performance\n");
61 | fprintf("R-squred vC:%.2f\nR-squred iL:%.2f\n", R_sq_v, R_sq_i)
62 |
63 |
64 | %% Read data table val
65 |
66 | data_path = fullfile("data", "RLC_data_val.csv");
67 | data_table_val = readtable(data_path);
68 |
69 |
70 | vin = data_table_val.V_IN;
71 | vC = data_table_val.V_C;
72 | iL = data_table_val.I_L;
73 | y = [vC iL];
74 | t = data_table.time;
75 | Ts = t(2) - t(1);
76 |
77 | %% Validation data %%
78 | data_val = iddata(y_meas,vin,Ts);
79 |
80 | y_sim_val = sim(model_subs, data_val);
81 | y_sim_val = y_sim_val.OutputData;
82 |
83 | loss = mean((vC - y_sim_val).^2);
84 |
85 | %%
86 | SSE_v = sum((vC - y_sim_val(:,1)).^2);
87 | y_mean_v = mean(vC);
88 | SST_v = sum((vC - y_mean_v).^2);
89 | R_sq_v = 1 - SSE_v/SST_v;
90 |
91 | SSE_i = sum((iL - y_sim_val(:,2)).^2);
92 | y_mean_i = mean(iL);
93 | SST_i = sum((iL - y_mean_i).^2);
94 | R_sq_i = 1 - SSE_i/SST_i;
95 |
96 | fprintf("Validation dataset:\nR-squred vC:%.2f\nR-squred iL:%.2f\n", R_sq_v, R_sq_i)
97 |
--------------------------------------------------------------------------------
/examples/RLC_example/old/RLC_ident_sat_fit_ARX_lin.py:
--------------------------------------------------------------------------------
1 | import pandas as pd
2 | from scipy.integrate import odeint, solve_ivp
3 | from scipy.interpolate import interp1d
4 | import os
5 | import numpy as np
6 | import torch
7 | import torch.nn as nn
8 | import torch.optim as optim
9 | import time
10 | import matplotlib.pyplot as plt
11 | import os
12 | from symbolic_RLC import fxu_ODE, fxu_ODE_mod
13 | from neuralode import NeuralODE, RunningAverageMeter
14 | from ssmodels import NeuralStateSpaceModelLin
15 | from symbolic_RLC import A_nominal, B_nominal
16 |
17 | if __name__ == '__main__':
18 |
19 | COL_T = ['time']
20 | COL_X = ['V_C', 'I_L']
21 | COL_U = ['V_IN']
22 | COL_Y = ['V_C']
23 |
24 |
25 | df_X = pd.read_csv(os.path.join("data", "RLC_data_sat_FE.csv"))
26 |
27 | time_data = np.array(df_X[COL_T], dtype=np.float32)
28 | y = np.array(df_X[COL_Y],dtype=np.float32)
29 | x = np.array(df_X[COL_X],dtype=np.float32)
30 | u = np.array(df_X[COL_U],dtype=np.float32)
31 | x0_torch = torch.from_numpy(x[0,:])
32 |
33 |
34 | std_noise_V = 0.0*10
35 | std_noise_I = 0.0*1
36 | std_noise = np.array([std_noise_V, std_noise_I])
37 |
38 | x_noise = np.copy(x) + np.random.randn(*x.shape)*std_noise
39 | x_noise = x_noise.astype(np.float32)
40 |
41 |
42 | Ts = time_data[1] - time_data[0]
43 | t_fit = 2e-3
44 | n_fit = int(t_fit//Ts)#x.shape[0]
45 | num_iter = 20000
46 | test_freq = 100
47 |
48 | input_data = u[0:n_fit]
49 | state_data = x_noise[0:n_fit]
50 | u_torch = torch.from_numpy(input_data)
51 | x_true_torch = torch.from_numpy(state_data)
52 |
53 | ss_model = NeuralStateSpaceModelLin(A_nominal*Ts, B_nominal*Ts)
54 | nn_solution = NeuralODE(ss_model)
55 | #nn_solution.load_state_dict(torch.load(os.path.join("models", "model_ARX_FE_sat.pkl")))
56 |
57 | optimizer = optim.Adam(nn_solution.ss_model.parameters(), lr=1e-4)
58 | end = time.time()
59 | time_meter = RunningAverageMeter(0.97)
60 | loss_meter = RunningAverageMeter(0.97)
61 |
62 |
63 | ii = 0
64 | for itr in range(1, num_iter + 1):
65 | optimizer.zero_grad()
66 | x_pred_torch = nn_solution.f_onestep(x_true_torch, u_torch)
67 | #loss = torch.mean(torch.abs(x_pred_torch - x_true_torch))
68 | loss = torch.mean((x_pred_torch - x_true_torch) ** 2)
69 | loss.backward()
70 | optimizer.step()
71 |
72 | time_meter.update(time.time() - end)
73 | loss_meter.update(loss.item())
74 |
75 | if itr % test_freq == 0:
76 | with torch.no_grad():
77 | x_pred_torch = nn_solution.f_onestep(x_true_torch, u_torch) #func(x_true_torch, u_torch)
78 | loss = torch.mean((x_pred_torch - x_true_torch) ** 2)
79 | print('Iter {:04d} | Total Loss {:.6f}'.format(itr, loss.item()))
80 | ii += 1
81 | end = time.time()
82 |
83 | if not os.path.exists("models"):
84 | os.makedirs("models")
85 |
86 | torch.save(nn_solution.ss_model.state_dict(), os.path.join("models", "model_ARX_FE_sat_v1.pkl"))
87 |
88 | x_0 = state_data[0,:]
89 |
90 | with torch.no_grad():
91 | x_sim = nn_solution.f_sim(torch.tensor(x_0), torch.tensor(input_data))
92 | loss = torch.mean(torch.abs(x_sim - x_true_torch))
93 |
94 |
95 | x_sim = np.array(x_sim)
96 | fig,ax = plt.subplots(2,1,sharex=True)
97 | ax[0].plot(np.array(x_true_torch[:,0]), 'k+', label='True')
98 | ax[0].plot(np.array(x_pred_torch[:,0]), 'b', label='Pred')
99 | ax[0].plot(x_sim[:,0],'r', label='Sim')
100 | ax[0].legend()
101 | ax[1].plot(np.array(x_true_torch[:,1]), 'k+', label='True')
102 | ax[1].plot(np.array(x_pred_torch[:,1]), 'b', label='Pred')
103 | ax[1].plot(x_sim[:,1],'r', label='Sim')
104 | ax[1].legend()
105 | ax[0].grid(True)
106 | ax[1].grid(True)
107 |
--------------------------------------------------------------------------------
/examples/RLC_example/old/RLC_ident_sat_fit_hidden_ARX.py:
--------------------------------------------------------------------------------
1 | import pandas as pd
2 | from scipy.integrate import odeint, solve_ivp
3 | from scipy.interpolate import interp1d
4 |
5 | import numpy as np
6 | import torch
7 | import torch.nn as nn
8 | import torch.optim as optim
9 | import time
10 | import matplotlib.pyplot as plt
11 | import os
12 | from symbolic_RLC import fxu_ODE, fxu_ODE_mod
13 | from neuralode import NeuralODE, RunningAverageMeter
14 | from ssmodels import NeuralStateSpaceModelLin
15 | from symbolic_RLC import A_nominal, B_nominal
16 |
17 |
18 | if __name__ == '__main__':
19 |
20 | COL_T = ['time']
21 | COL_X = ['V_C', 'I_L']
22 | COL_U = ['V_IN']
23 | COL_Y = ['V_C']
24 |
25 | df_X = pd.read_csv("data/RLC_data_sat_FE.csv")
26 |
27 | time_data = np.array(df_X[COL_T], dtype=np.float32)
28 | y = np.array(df_X[COL_Y],dtype=np.float32)
29 | x = np.array(df_X[COL_X],dtype=np.float32)
30 | u = np.array(df_X[COL_U],dtype=np.float32)
31 | x0_torch = torch.from_numpy(x[0,:])
32 |
33 | Ts = time_data[1] - time_data[0]
34 | t_fit = 2e-3
35 | n_fit = int(t_fit//Ts)#x.shape[0]
36 | num_iter = 20000
37 | test_freq = 10
38 |
39 | std_noise_V = 1.0*10
40 | std_noise_I = 1.0*1
41 | std_noise = np.array([std_noise_V, std_noise_I])
42 |
43 | input_data = u[0:n_fit]
44 | state_data = x[0:n_fit]
45 | output_data = y[0:n_fit]
46 | u_torch = torch.from_numpy(input_data)
47 |
48 | y_torch = torch.tensor(output_data)
49 |
50 | state_meas = np.copy(state_data)
51 | state_meas += np.random.randn(*state_meas.shape) * std_noise
52 | state_hidden_init = np.copy(state_meas) + np.random.randn(*state_meas.shape) * [1e-4, 1e-4]
53 | state_hidden_init = state_hidden_init.astype(np.float32)
54 |
55 | x_hidden_torch = torch.tensor(state_hidden_init, requires_grad=True)
56 | x_meas_torch = torch.tensor(state_meas, requires_grad=False)
57 |
58 |
59 | ss_model = NeuralStateSpaceModelLin(A_nominal*Ts, B_nominal*Ts)
60 | nn_solution = NeuralODE(ss_model)
61 | #nn_solution.load_state_dict(torch.load('model_ARX_FE_sat.pkl'))
62 |
63 | params = list(nn_solution.ss_model.parameters())
64 | params += [x_hidden_torch]
65 | optimizer = optim.Adam(params, lr=1e-4)
66 | end = time.time()
67 | time_meter = RunningAverageMeter(0.97)
68 | loss_meter = RunningAverageMeter(0.97)
69 |
70 |
71 | scale_error = torch.tensor((std_noise).astype(np.float32))
72 | ii = 0
73 | for itr in range(1, num_iter + 1):
74 | optimizer.zero_grad()
75 | #x_pred_torch = nn_solution.f_ARX(x_hidden_torch, u_torch)
76 | #loss = torch.mean(torch.abs(x_pred_torch - x_true_torch))
77 | loss = 10000*nn_solution.f_ARX_consistency_loss(x_hidden_torch, u_torch) # consistency equation
78 | err_fit = x_meas_torch-x_hidden_torch
79 | err_fit_scaled = err_fit/scale_error
80 | loss += torch.mean((err_fit_scaled) **2) # fit equation
81 | loss.backward()
82 | optimizer.step()
83 |
84 | time_meter.update(time.time() - end)
85 | loss_meter.update(loss.item())
86 |
87 | if itr % test_freq == 0:
88 | with torch.no_grad():
89 | x_pred_torch = nn_solution.f_onestep(x_hidden_torch, u_torch) #func(x_true_torch, u_torch)
90 | #loss = torch.mean(torch.abs(x_pred_torch - x_hidden_torch))
91 | print('Iter {:04d} | Total Loss {:.6f}'.format(itr, loss.item()))
92 | ii += 1
93 | end = time.time()
94 |
95 | torch.save(nn_solution.ss_model.state_dict(), os.path.join("models", "model_ARX_FE_sat.pkl"))
96 |
97 | x_0 = state_data[0,:]
98 |
99 | with torch.no_grad():
100 | x_sim = nn_solution.f_sim(torch.tensor(x_0), torch.tensor(input_data))
101 |
102 |
103 | # In[Plot]
104 | x_hidden_torch = x_hidden_torch.detach()
105 | x_true_torch = torch.tensor(state_data)
106 | x_sim = np.array(x_sim)
107 | fig,ax = plt.subplots(2,1,sharex=True)
108 | ax[0].plot(np.array(x_true_torch[:,0]), 'k', label='True')
109 | ax[0].plot(np.array(x_meas_torch[:,0]), 'k+', label='Meas')
110 | ax[0].plot(np.array(x_hidden_torch[:,0]), 'g', label='Hidden')
111 | ax[0].plot(x_sim[:,0],'r', label='Sim')
112 | ax[0].legend()
113 | ax[1].plot(np.array(x_true_torch[:,1]), 'k', label='True')
114 | ax[1].plot(np.array(x_meas_torch[:,1]), 'k+', label='Meas')
115 | ax[1].plot(np.array(x_hidden_torch[:,1]), 'g', label='Hidden')
116 | ax[1].plot(x_sim[:,1],'r', label='Sim')
117 | ax[1].legend()
118 | ax[0].grid(True)
119 | ax[1].grid(True)
120 |
--------------------------------------------------------------------------------
/examples/RLC_example/old/RLC_ident_sat_fit_minibatch_OE_test.py:
--------------------------------------------------------------------------------
1 | import os
2 | import pandas as pd
3 | from scipy.integrate import odeint
4 | from scipy.interpolate import interp1d
5 |
6 | import numpy as np
7 | import torch
8 | import torch.nn as nn
9 | import torch.optim as optim
10 | import time
11 | import matplotlib.pyplot as plt
12 |
13 |
14 | from symbolic_RLC import fxu_ODE, fxu_ODE_mod
15 | from neuralode import NeuralODE, RunningAverageMeter
16 |
17 |
18 | if __name__ == '__main__':
19 |
20 | COL_T = ['time']
21 | COL_X = ['V_C', 'I_L']
22 | COL_U = ['V_IN']
23 | COL_Y = ['V_C']
24 |
25 | df_X = pd.read_csv(os.path.join("data", "RLC_data_sat_FE.csv"))
26 | #df_X = pd.read_csv("RLC_data.csv")
27 | t = np.array(df_X[COL_T], dtype=np.float32)
28 | y = np.array(df_X[COL_Y],dtype=np.float32)
29 | x = np.array(df_X[COL_X],dtype=np.float32)
30 | u = np.array(df_X[COL_U],dtype=np.float32)
31 | x0_torch = torch.from_numpy(x[0,:])
32 |
33 | Ts = t[1] - t[0]
34 | t_fit = 5e-3
35 | n_fit = int(t_fit/Ts)#x.shape[0]
36 | num_iter = 20000
37 | seq_len = 200
38 | batch_size = 1000
39 | test_freq = 10
40 |
41 | # Get fit data #
42 | u_fit = u[0:n_fit]
43 | x_fit = x[0:n_fit]
44 | y_fit = y[0:n_fit]
45 | time_fit = t[0:n_fit]
46 |
47 | # Fit data to pytorch tensors #
48 | u_torch_fit = torch.from_numpy(u_fit)
49 | y_true_torch_fit = torch.from_numpy(y_fit)
50 | x_true_torch_fit = torch.from_numpy(x_fit)
51 | time_torch_fit = torch.from_numpy(time_fit)
52 |
53 | def get_batch(batch_size, seq_len):
54 | num_train_samples = x_true_torch_fit.shape[0]
55 | s = torch.from_numpy(np.random.choice(np.arange(num_train_samples - seq_len, dtype=np.int64), batch_size, replace=False))
56 | batch_x0 = x_true_torch_fit[s, :] # (M, D)
57 | batch_t = torch.stack([time_torch_fit[s[i]:s[i] + seq_len] for i in range(batch_size)], dim=0)
58 | batch_x = torch.stack([x_true_torch_fit[s[i]:s[i] + seq_len] for i in range(batch_size)], dim=0)
59 | batch_u = torch.stack([u_torch_fit[s[i]:s[i] + seq_len] for i in range(batch_size)], dim=0)
60 |
61 | return batch_t, batch_x0, batch_u, batch_x
62 |
63 |
64 | nn_solution = NeuralODE()
65 | # nn_solution.load_state_dict(torch.load(os.path.join("models", "model_ARX_FE_sat.pkl")))
66 |
67 | optimizer = optim.Adam(nn_solution.parameters(), lr=1e-4)
68 | end = time.time()
69 | time_meter = RunningAverageMeter(0.97)
70 | loss_meter = RunningAverageMeter(0.97)
71 |
72 |
73 |
74 | ii = 0
75 | x_pred_torch = nn_solution.f_sim(x0_torch, u_torch_fit)
76 | loss = torch.mean((x_pred_torch - x_true_torch_fit)**2)
77 | for itr in range(0, num_iter):
78 |
79 |
80 | if itr % test_freq == 0:
81 | with torch.no_grad():
82 | #x_pred_torch = nn_solution.f_OE(x0_torch, u_torch_fit)
83 | #loss = torch.mean(torch.abs(x_pred_torch - x_true_torch_fit))
84 | print('Iter {:04d} | Total Loss {:.6f}'.format(itr, loss.item()))
85 | ii += 1
86 |
87 | optimizer.zero_grad()
88 | #batch_t, batch_x0, batch_u, batch_x = get_batch(batch_size, seq_len)
89 | N = x_true_torch_fit.shape[0]
90 | N = int(N // seq_len) * seq_len
91 | batch_size = int(N // seq_len)
92 | batch_x = x_true_torch_fit[0:N].view(batch_size, seq_len, -1)
93 | batch_u = u_torch_fit[0:N].view(batch_size, seq_len, -1)
94 | batch_x0 = batch_x[:, 0, :]
95 |
96 | batch_x_pred = nn_solution.f_sim_multistep(batch_x0, batch_u)
97 | # err = torch.abs(batch_x[:, 1:, :] - batch_x_pred[:, 1:, :])
98 | # err[:,:,1] = err[:,:,1]*100.0
99 | # loss = torch.mean(err)
100 | loss = torch.mean((batch_x[:,1:,:] - batch_x_pred[:,1:,:])**2) #torch.mean(torch.sq(batch_x[:,1:,:] - batch_x_pred[:,1:,:]))
101 | loss.backward()
102 | optimizer.step()
103 |
104 | time_meter.update(time.time() - end)
105 | loss_meter.update(loss.item())
106 |
107 |
108 | end = time.time()
109 |
110 | #torch.save(nn_solution.state_dict(), 'model.pkl')
111 |
112 | t_val = 5e-3
113 | n_val = int(t_val//Ts)#x.shape[0]
114 |
115 | input_data_val = u[0:n_val]
116 | state_data_val = x[0:n_val]
117 | output_data_val = y[0:n_val]
118 |
119 | x0_val = np.zeros(2,dtype=np.float32)
120 | x0_torch_val = torch.from_numpy(x0_val)
121 | u_torch_val = torch.tensor(input_data_val)
122 | x_true_torch_val = torch.from_numpy(state_data_val)
123 |
124 | with torch.no_grad():
125 | x_pred_torch_val = nn_solution.f_sim(x0_torch_val, u_torch_val)
126 |
127 | # In[1]
128 |
129 | fig,ax = plt.subplots(3,1, sharex=True)
130 | ax[0].plot(np.array(x_true_torch_val[:,0]), label='True')
131 | ax[0].plot(np.array(x_pred_torch_val[:,0]), label='Fit')
132 | ax[0].legend()
133 | ax[0].grid(True)
134 |
135 | ax[1].plot(np.array(x_true_torch_val[:,1]), label='True')
136 | ax[1].plot(np.array(x_pred_torch_val[:,1]), label='Fit')
137 | ax[1].legend()
138 | ax[1].grid(True)
139 |
140 | ax[2].plot(np.array(u_torch_val), label='Input')
141 | ax[2].grid(True)
142 |
--------------------------------------------------------------------------------
/examples/RLC_example/old/RLC_ident_sat_generate_FE.py:
--------------------------------------------------------------------------------
1 | from scipy.integrate import solve_ivp
2 | from scipy.interpolate import interp1d
3 | import numpy as np
4 | import matplotlib.pyplot as plt
5 | import control.matlab
6 | import pandas as pd
7 | import os
8 |
9 | from RLC_example_ss.symbolic_RLC import fxu_ODE, fxu_ODE_mod
10 |
11 | if __name__ == '__main__':
12 |
13 | np.random.seed(42)
14 | # Input characteristics #
15 | len_sim = 5e-3
16 | Ts = 2e-7
17 |
18 | omega_input = 150e3 # input power spectrum - cutoff frequency
19 | std_input = 80 # input power spectrum - amplitude
20 |
21 | std_noise_V = 10
22 | std_noise_I = 1
23 | std_noise = np.array([std_noise_V, std_noise_I])
24 |
25 | tau_input = 1/omega_input
26 | Hu = control.TransferFunction([1], [1 / omega_input, 1])
27 | Hu = Hu * Hu
28 | Hud = control.matlab.c2d(Hu, Ts)
29 |
30 | N_sim = int(len_sim//Ts)
31 | N_skip = int(20 * tau_input // Ts) # skip initial samples to get a regime sample of d
32 | N_sim_u = N_sim + N_skip
33 | e = np.random.randn(N_sim_u)
34 | te = np.arange(N_sim_u) * Ts
35 | _, u, _ = control.forced_response(Hu, te, e)
36 | u = u[N_skip:]
37 | u = u /np.std(u) * std_input
38 |
39 | t_sim = np.arange(N_sim) * Ts
40 | u_func = interp1d(t_sim, u, kind='zero', fill_value="extrapolate")
41 |
42 |
43 | def f_ODE(t,x):
44 | u = u_func(t).ravel()
45 | return fxu_ODE(t, x, u)
46 |
47 | def f_ODE_mod(t,x):
48 | u = u_func(t).ravel()
49 | return fxu_ODE_mod(t, x, u)
50 |
51 | # In[Integrate]
52 | x0 = np.zeros(2)
53 | t_span = (t_sim[0],t_sim[-1])
54 |
55 | x1 = np.empty((len(t_sim), x0.shape[0]))
56 | x2 = np.empty((len(t_sim), x0.shape[0]))
57 |
58 | x1step = np.copy(x0)
59 | x2step = np.copy(x0)
60 | for idx in range(len(t_sim)):
61 | time = t_sim[idx]
62 | x1[idx,:] = x1step
63 | x2[idx,:] = x2step
64 | x1step += f_ODE(time, x1step)*Ts
65 | x2step += f_ODE_mod(time, x2step)*Ts
66 |
67 |
68 | # In[Add noise]
69 | x1_noise = np.copy(x1) + np.random.randn(*x1.shape)*std_noise
70 | x2_noise = np.copy(x2) + np.random.randn(*x2.shape)*std_noise
71 |
72 | # In[plot]
73 | fig, ax = plt.subplots(3,1, figsize=(10,10), sharex=True)
74 | ax[0].plot(t_sim, x2[:,0],'b')
75 | ax[0].plot(t_sim, x2_noise[:,0],'r')
76 |
77 | ax[0].set_xlabel('time (s')
78 | ax[0].set_ylabel('Capacitor voltage (V)')
79 |
80 | # ax[1].plot(t_sim, x1[:,1],'b')
81 | ax[1].plot(t_sim, x2[:,1],'b')
82 | ax[1].plot(t_sim, x2_noise[:,1],'r')
83 | ax[1].set_xlabel('time (s)')
84 | ax[1].set_ylabel('Inductor current (A)')
85 |
86 | ax[2].plot(t_sim, u,'b')
87 | ax[2].set_xlabel('time (s)')
88 | ax[2].set_ylabel('Input voltage (V)')
89 |
90 | ax[0].grid(True)
91 | ax[1].grid(True)
92 | ax[2].grid(True)
93 |
94 | # In[Save]
95 | if not os.path.exists("data"):
96 | os.makedirs("data")
97 |
98 | X = np.hstack((t_sim.reshape(-1, 1), x1, u.reshape(-1, 1), x1[:, 0].reshape(-1, 1)))
99 | COL_T = ['time']
100 | COL_X = ['V_C', 'I_L']
101 | COL_U = ['V_IN']
102 | COL_Y = ['V_C']
103 | COL = COL_T + COL_X + COL_U + COL_Y
104 | df_X = pd.DataFrame(X, columns=COL)
105 | df_X.to_csv(os.path.join("data", "RLC_data_FE.csv"), index=False)
106 |
107 | X = np.hstack((t_sim.reshape(-1, 1), x2, u.reshape(-1, 1), x2[:, 0].reshape(-1, 1)))
108 | COL_T = ['time']
109 | COL_X = ['V_C', 'I_L']
110 | COL_U = ['V_IN']
111 | COL_Y = ['V_C']
112 | COL = COL_T + COL_X + COL_U + COL_Y
113 | df_X = pd.DataFrame(X, columns=COL)
114 | df_X.to_csv(os.path.join("data", "RLC_data_sat_FE.csv"), index=False)
115 |
--------------------------------------------------------------------------------
/examples/RLC_example/old/RLC_ident_sat_generate_FE_RK_comp.py:
--------------------------------------------------------------------------------
1 | from scipy.integrate import solve_ivp
2 | from scipy.interpolate import interp1d
3 | import numpy as np
4 | import matplotlib.pyplot as plt
5 | import control
6 | import control.matlab
7 | import pandas as pd
8 | import os
9 |
10 | from symbolic_RLC import fxu_ODE, fxu_ODE_mod
11 |
12 | if __name__ == '__main__':
13 |
14 | np.random.seed(42)
15 | # Input characteristics #
16 | len_sim = 5e-3
17 | Ts = 2e-7
18 |
19 | omega_input = 150e3 # input power spectrum - cutoff frequency
20 | std_input = 80 # input power spectrum - amplitude
21 |
22 | tau_input = 1/omega_input
23 | Hu = control.TransferFunction([1], [1 / omega_input, 1])
24 | Hu = Hu * Hu
25 | Hud = control.matlab.c2d(Hu, Ts)
26 |
27 | N_sim = int(len_sim//Ts)
28 | N_skip = int(20 * tau_input // Ts) # skip initial samples to get a regime sample of d
29 | N_sim_u = N_sim + N_skip
30 | e = np.random.randn(N_sim_u)
31 | te = np.arange(N_sim_u) * Ts
32 | _, u, _ = control.forced_response(Hu, te, e)
33 | u = u[N_skip:]
34 | u = u /np.std(u) * std_input
35 |
36 | t_sim = np.arange(N_sim) * Ts
37 | u_func = interp1d(t_sim, u, kind='zero', fill_value="extrapolate")
38 |
39 |
40 | def f_ODE(t,x):
41 | u = u_func(t).ravel()
42 | return fxu_ODE(t, x, u)
43 |
44 | def f_ODE_mod(t,x):
45 | u = u_func(t).ravel()
46 | return fxu_ODE_mod(t, x, u)
47 |
48 |
49 | x0 = np.zeros(2)
50 | t_span = (t_sim[0],t_sim[-1])
51 |
52 | x1 = np.empty((len(t_sim), x0.shape[0]))
53 | x2 = np.empty((len(t_sim), x0.shape[0]))
54 |
55 | x1step = np.copy(x0)
56 | x2step = np.copy(x0)
57 | for idx in range(len(t_sim)):
58 | time = t_sim[idx]
59 | x1[idx,:] = x1step
60 | x2[idx,:] = x2step
61 | x1step += f_ODE(time, x1step)*Ts
62 | x2step += f_ODE_mod(time, x2step)*Ts
63 |
64 | y1_RK = solve_ivp(f_ODE, t_span, x0, t_eval = t_sim)
65 | y2_RK = solve_ivp(f_ODE_mod, t_span, x0, t_eval = t_sim)
66 |
67 | x1_RK = y1_RK.y.T
68 | x2_RK = y2_RK.y.T
69 |
70 | # In[plot]
71 | fig, ax = plt.subplots(3,1, figsize=(10,10), sharex=True)
72 | ax[0].plot(t_sim, x1[:,0],'b')
73 | ax[0].plot(t_sim, x1_RK[:,0],'b*')
74 | ax[0].plot(t_sim, x2[:,0],'r')
75 | ax[0].plot(t_sim, x2_RK[:,0],'r*')
76 | ax[0].set_xlabel('time (s')
77 | ax[0].set_ylabel('Capacitor voltage (V)')
78 |
79 | ax[1].plot(t_sim, x1[:,1],'b')
80 | ax[1].plot(t_sim, x1_RK[:,1],'b*')
81 | ax[1].plot(t_sim, x2[:,1],'r')
82 | ax[1].plot(t_sim, x2_RK[:,1],'r*')
83 | ax[1].set_xlabel('time (s)')
84 | ax[1].set_ylabel('Inductor current (A)')
85 |
86 | ax[2].plot(t_sim, u,'b')
87 | ax[2].set_xlabel('time (s)')
88 | ax[2].set_ylabel('Input voltage (V)')
89 |
90 | ax[0].grid(True)
91 | ax[1].grid(True)
92 | ax[2].grid(True)
93 |
94 |
95 | if not os.path.exists("data"):
96 | os.makedirs("data")
97 |
98 | X = np.hstack((t_sim.reshape(-1, 1), x1, u.reshape(-1, 1), x1[:, 0].reshape(-1, 1)))
99 | COL_T = ['time']
100 | COL_X = ['V_C', 'I_L']
101 | COL_U = ['V_IN']
102 | COL_Y = ['V_C']
103 | COL = COL_T + COL_X + COL_U + COL_Y
104 | df_X = pd.DataFrame(X, columns=COL)
105 | df_X.to_csv(os.path.join("data", "RLC_data_FE.csv"), index=False)
106 |
107 | X = np.hstack((t_sim.reshape(-1, 1), x2, u.reshape(-1, 1), x2[:, 0].reshape(-1, 1)))
108 | COL_T = ['time']
109 | COL_X = ['V_C', 'I_L']
110 | COL_U = ['V_IN']
111 | COL_Y = ['V_C']
112 | COL = COL_T + COL_X + COL_U + COL_Y
113 | df_X = pd.DataFrame(X, columns=COL)
114 |
115 | df_X.to_csv(os.path.join("data", "RLC_data_sat_FE.csv"), index=False)
116 |
--------------------------------------------------------------------------------
/examples/RLC_example/old/RLC_ident_sat_generate_FE_val.py:
--------------------------------------------------------------------------------
1 | from scipy.integrate import solve_ivp
2 | from scipy.interpolate import interp1d
3 | import numpy as np
4 | import matplotlib.pyplot as plt
5 | import control
6 | import control.matlab
7 | import pandas as pd
8 | import os
9 |
10 | from RLC_example_ss.symbolic_RLC import fxu_ODE, fxu_ODE_mod
11 |
12 | if __name__ == '__main__':
13 |
14 | np.random.seed(42)
15 | # Input characteristics #
16 | len_sim = 5e-3
17 | Ts = 2e-7
18 |
19 | omega_input = 50e3 # input power spectrum - cutoff frequency
20 | std_input = 120 # input power spectrum - amplitude
21 |
22 | std_noise_V = 10
23 | std_noise_I = 1
24 | std_noise = np.array([std_noise_V, std_noise_I])
25 |
26 | tau_input = 1/omega_input
27 | Hu = control.TransferFunction([1], [1 / omega_input, 1])
28 | Hu = Hu * Hu
29 | Hud = control.matlab.c2d(Hu, Ts)
30 |
31 | N_sim = int(len_sim//Ts)
32 | N_skip = int(20 * tau_input // Ts) # skip initial samples to get a regime sample of d
33 | N_sim_u = N_sim + N_skip
34 | e = np.random.randn(N_sim_u)
35 | te = np.arange(N_sim_u) * Ts
36 | _, u, _ = control.forced_response(Hu, te, e)
37 | u = u[N_skip:]
38 | u = u /np.std(u) * std_input
39 |
40 | t_sim = np.arange(N_sim) * Ts
41 | u_func = interp1d(t_sim, u, kind='zero', fill_value="extrapolate")
42 |
43 |
44 | def f_ODE(t,x):
45 | u = u_func(t).ravel()
46 | return fxu_ODE(t, x, u)
47 |
48 | def f_ODE_mod(t,x):
49 | u = u_func(t).ravel()
50 | return fxu_ODE_mod(t, x, u)
51 |
52 | # In[Integrate]
53 | x0 = np.zeros(2)
54 | t_span = (t_sim[0],t_sim[-1])
55 |
56 | x1 = np.empty((len(t_sim), x0.shape[0]))
57 | x2 = np.empty((len(t_sim), x0.shape[0]))
58 |
59 | x1step = np.copy(x0)
60 | x2step = np.copy(x0)
61 | for idx in range(len(t_sim)):
62 | time = t_sim[idx]
63 | x1[idx,:] = x1step
64 | x2[idx,:] = x2step
65 | x1step += f_ODE(time, x1step)*Ts
66 | x2step += f_ODE_mod(time, x2step)*Ts
67 |
68 |
69 | # In[Add noise]
70 | x1_noise = np.copy(x1) + np.random.randn(*x1.shape)*std_noise
71 | x2_noise = np.copy(x2) + np.random.randn(*x2.shape)*std_noise
72 |
73 | # In[plot]
74 | fig, ax = plt.subplots(3,1, figsize=(10,10), sharex=True)
75 | ax[0].plot(t_sim, x2[:,0],'b')
76 | ax[0].plot(t_sim, x2_noise[:,0],'r')
77 |
78 | ax[0].set_xlabel('time (s')
79 | ax[0].set_ylabel('Capacitor voltage (V)')
80 |
81 | # ax[1].plot(t_sim, x1[:,1],'b')
82 | ax[1].plot(t_sim, x2[:,1],'b')
83 | ax[1].plot(t_sim, x2_noise[:,1],'r')
84 | ax[1].set_xlabel('time (s)')
85 | ax[1].set_ylabel('Inductor current (A)')
86 |
87 | ax[2].plot(t_sim, u,'b')
88 | ax[2].set_xlabel('time (s)')
89 | ax[2].set_ylabel('Input voltage (V)')
90 |
91 | ax[0].grid(True)
92 | ax[1].grid(True)
93 | ax[2].grid(True)
94 |
95 | # In[Save]
96 | if not os.path.exists("data"):
97 | os.makedirs("data")
98 |
99 | X = np.hstack((t_sim.reshape(-1, 1), x1, u.reshape(-1, 1), x1[:, 0].reshape(-1, 1)))
100 | COL_T = ['time']
101 | COL_X = ['V_C', 'I_L']
102 | COL_U = ['V_IN']
103 | COL_Y = ['V_C']
104 | COL = COL_T + COL_X + COL_U + COL_Y
105 | df_X = pd.DataFrame(X, columns=COL)
106 | df_X.to_csv(os.path.join("data", "RLC_data_FE.csv"), index=False)
107 |
108 | X = np.hstack((t_sim.reshape(-1, 1), x2, u.reshape(-1, 1), x2[:, 0].reshape(-1, 1)))
109 | COL_T = ['time']
110 | COL_X = ['V_C', 'I_L']
111 | COL_U = ['V_IN']
112 | COL_Y = ['V_C']
113 | COL = COL_T + COL_X + COL_U + COL_Y
114 | df_X = pd.DataFrame(X, columns=COL)
115 |
116 | df_X.to_csv(os.path.join("data", "RLC_data_sat_FE_val.csv"), index=False)
117 |
--------------------------------------------------------------------------------
/examples/RLC_example/old/RLC_ident_sat_refine_ARX.py:
--------------------------------------------------------------------------------
1 | import pandas as pd
2 | from scipy.integrate import odeint, solve_ivp
3 | from scipy.interpolate import interp1d
4 | import os
5 | import numpy as np
6 | import torch
7 | import torch.nn as nn
8 | import torch.optim as optim
9 | import time
10 | import matplotlib.pyplot as plt
11 | import os
12 | from symbolic_RLC import fxu_ODE, fxu_ODE_mod
13 | from neuralode import NeuralODE, RunningAverageMeter
14 |
15 |
16 | if __name__ == '__main__':
17 |
18 | COL_T = ['time']
19 | COL_X = ['V_C', 'I_L']
20 | COL_U = ['V_IN']
21 | COL_Y = ['V_C']
22 |
23 | df_X = pd.read_csv(os.path.join("data", "RLC_data_sat_FE.csv"))
24 |
25 | time_data = np.array(df_X[COL_T], dtype=np.float32)
26 | y = np.array(df_X[COL_Y],dtype=np.float32)
27 | x = np.array(df_X[COL_X],dtype=np.float32)
28 | u = np.array(df_X[COL_U],dtype=np.float32)
29 | x0_torch = torch.from_numpy(x[0,:])
30 |
31 | Ts = time_data[1] - time_data[0]
32 | t_fit = 2e-3
33 | n_fit = int(t_fit//Ts)#x.shape[0]
34 | num_iter = 20000
35 | test_freq = 100
36 |
37 | input_data = u[0:n_fit]
38 | state_data = x[0:n_fit]
39 | u_torch = torch.from_numpy(input_data)
40 | x_true_torch = torch.from_numpy(state_data)
41 | nn_solution = NeuralODE()
42 | nn_solution.load_state_dict(torch.load(os.path.join("models", "model_ARX_FE_sat.pkl")))
43 |
44 | optimizer = optim.Adam(nn_solution.parameters(), lr=1e-5)
45 | end = time.time()
46 | time_meter = RunningAverageMeter(0.97)
47 | loss_meter = RunningAverageMeter(0.97)
48 |
49 |
50 | ii = 0
51 | for itr in range(1, num_iter + 1):
52 | optimizer.zero_grad()
53 | x_pred_torch = nn_solution.f_onestep(x_true_torch, u_torch)
54 | #loss = torch.mean(torch.abs(x_pred_torch - x_true_torch))
55 | loss = torch.mean((x_pred_torch - x_true_torch) ** 2)
56 | loss.backward()
57 | optimizer.step()
58 |
59 | time_meter.update(time.time() - end)
60 | loss_meter.update(loss.item())
61 |
62 | if itr % test_freq == 0:
63 | with torch.no_grad():
64 | x_pred_torch = nn_solution.f_onestep(x_true_torch, u_torch) #func(x_true_torch, u_torch)
65 | loss = torch.mean(torch.abs(x_pred_torch - x_true_torch))
66 | print('Iter {:04d} | Total Loss {:.6f}'.format(itr, loss.item()))
67 | ii += 1
68 | end = time.time()
69 |
70 | torch.save(nn_solution.state_dict(), os.path.join("models", "model_ARX_FE_sat_v1.pkl"))
71 |
72 | x_0 = state_data[0,:]
73 |
74 | with torch.no_grad():
75 | x_sim = nn_solution.f_sim(torch.tensor(x_0), torch.tensor(input_data))
76 | loss = torch.mean(torch.abs(x_sim - x_true_torch))
77 |
78 |
79 | x_sim = np.array(x_sim)
80 | fig,ax = plt.subplots(2,1,sharex=True)
81 | ax[0].plot(np.array(x_true_torch[:,0]), 'k+', label='True')
82 | ax[0].plot(np.array(x_pred_torch[:,0]), 'b', label='Pred')
83 | ax[0].plot(x_sim[:,0],'r', label='Sim')
84 | ax[0].legend()
85 | ax[1].plot(np.array(x_true_torch[:,1]), 'k+', label='True')
86 | ax[1].plot(np.array(x_pred_torch[:,1]), 'b', label='Pred')
87 | ax[1].plot(x_sim[:,1],'r', label='Sim')
88 | ax[1].legend()
89 | ax[0].grid(True)
90 | ax[1].grid(True)
91 |
--------------------------------------------------------------------------------
/examples/RLC_example/old/RLC_use_model.py:
--------------------------------------------------------------------------------
1 | import os
2 | import pandas as pd
3 | from scipy.integrate import odeint
4 | from scipy.interpolate import interp1d
5 |
6 | import numpy as np
7 | import torch
8 | import torch.nn as nn
9 | import torch.optim as optim
10 | import time
11 | import matplotlib.pyplot as plt
12 |
13 | from symbolic_RLC import fxu_ODE, fxu_ODE_mod, A_nominal, B_nominal
14 | from torchid.ssfitter import NeuralStateSpaceSimulator, RunningAverageMeter
15 | from torchid.ssmodels import NeuralStateSpaceModelLin, NeuralStateSpaceModel
16 |
17 |
18 | if __name__ == '__main__':
19 |
20 | COL_T = ['time']
21 | COL_X = ['V_C', 'I_L']
22 | COL_U = ['V_IN']
23 | COL_Y = ['V_C']
24 | df_X = pd.read_csv(os.path.join("data", "RLC_data_sat_FE.csv"))
25 |
26 | time_data = np.array(df_X[COL_T], dtype=np.float32)
27 | y = np.array(df_X[COL_Y], dtype=np.float32)
28 | x = np.array(df_X[COL_X], dtype=np.float32)
29 | u = np.array(df_X[COL_U], dtype=np.float32)
30 | t = np.array(df_X[COL_T], dtype=np.float32)
31 | x0_torch = torch.from_numpy(x[0,:])
32 |
33 | Ts = time_data[1] - time_data[0]
34 |
35 | n_x = 2
36 | n_u = 1
37 | n_hidden = 64
38 | ss_model = NeuralStateSpaceModel(n_x, n_u, n_hidden)
39 | nn_solution = NeuralStateSpaceSimulator(ss_model)
40 | nn_solution.ss_model.load_state_dict(torch.load(os.path.join("models", "model_ARX_FE_sat.pkl")))
41 |
42 | x_torch = torch.tensor(x)
43 | x0_torch = torch.tensor(x[0,:])
44 | u_torch = torch.tensor(u)
45 | with torch.no_grad():
46 | x_sim_torch = nn_solution.f_sim(x0_torch, u_torch)
47 | loss = torch.mean(torch.abs(x_sim_torch - x_torch))
48 |
49 | x_sim = np.array(x_sim_torch)
50 |
51 | n_plot = t.size
52 | fig,ax = plt.subplots(3,1,sharex=True)
53 | ax[0].plot(t[:n_plot], x[:n_plot, 0], label='True')
54 | ax[0].plot(t[:n_plot], x_sim[:n_plot, 0], label='Simulated')
55 | ax[0].set_xlabel("Time (s)")
56 | ax[0].set_ylabel("Capacitor Voltage (V)")
57 | ax[0].legend()
58 | ax[0].grid()
59 |
60 | ax[1].plot(t[:n_plot], x[:n_plot, 1], label='True')
61 | ax[1].plot(t[:n_plot], x_sim[:n_plot, 1], label='Simulated')
62 | ax[1].set_xlabel("Time (s)")
63 | ax[1].set_ylabel("Inductor Current (A)")
64 | ax[1].legend()
65 | ax[1].grid()
66 |
67 | ax[2].plot(t[:n_plot], u[:n_plot, 0])
68 | ax[2].set_xlabel("Time (s)")
69 | ax[2].set_ylabel("Input Voltage (V)")
70 | #ax[2].legend()
71 | ax[2].grid()
72 | """
73 | VAR = []
74 | for idx_var in range(n_x):
75 | var = np.zeros((1,n_x)).astype(np.float32)
76 | var[0,idx_var] = 1.0 # differentiate w.r.t the nth variable
77 | VAR.append(torch.tensor(var))
78 |
79 | F_xu = ss_model(x_torch,u_torch)
80 | A = np.empty((n_x,n_x))
81 | B = np.empty((n_x,n_u))
82 |
83 | for idx_var in range(n_x):
84 | var = VAR[idx_var]
85 | F_xu.backward(var, retain_graph=True)
86 | A[idx_var,:] = np.array(x_torch.grad)
87 | B[idx_var,:] = np.array(u_torch.grad)
88 | x_torch.grad.data.zero_()
89 | u_torch.grad.data.zero_()
90 | """
91 |
--------------------------------------------------------------------------------
/examples/RLC_example/symbolic_RLC.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | # -*- coding: utf-8 -*-
3 | """
4 | Created on Tue Dec 25 12:27:55 2018
5 |
6 | @author: marco
7 | """
8 | import numba as nb
9 | from sympy import symbols, collect, cancel, init_printing, fraction
10 | import numpy as np
11 | import matplotlib.pyplot as plt
12 | import os
13 | # In[Symbols of the RLC circuit]
14 |
15 | R = symbols('R')
16 | L = symbols('L')
17 | C = symbols('C')
18 | s = symbols('s')
19 |
20 | # In[Impedances]
21 |
22 | ZR = R
23 | ZL = s*L
24 | ZC = 1/(s*C)
25 |
26 | ZRL = ZR + ZL # series R and L
27 |
28 | G1 = 1/(ZRL)
29 |
30 |
31 | G2 = ZC/(ZRL + ZC)
32 | G2sym = 1/(L*C)/(s**2 + R/L*s + 1/(L*C))
33 |
34 |
35 | # In[Impedances]
36 | z = symbols('z')
37 | Td = symbols('Td')
38 |
39 | s_subs = 2/Td * (z-1)/(z+1) # Tustin transform of the laplace variable s
40 |
41 | G2d = G2.subs(s,s_subs)
42 | G2d_simple = collect(cancel(G2d),z)
43 |
44 |
45 | # In[Substitution]
46 | R_val = 3
47 | L_val = 50e-6
48 | C_val = 270e-9
49 | Td_val = 1e-6
50 |
51 |
52 | @nb.jit(["float64(float64)", "float64[:](float64[:])"], nopython=True)
53 | def saturation_formula(current_abs):
54 | sat_ratio = (1/np.pi*np.arctan(-5*(current_abs-5))+0.5)*0.9 + 0.1
55 | return sat_ratio
56 |
57 | @nb.jit("float64[:](float64,float64[:],float64[:])",nopython=True)
58 | def fxu_ODE(t,x,u):
59 | A = np.array([[0.0, 1.0/C_val],
60 | [-1/(L_val), -R_val/L_val]
61 | ])
62 | B = np.array([[0.0], [1.0/(L_val)]])
63 | dx = np.zeros(2, dtype=np.float64)
64 | dx[0] = A[0,0]*x[0] + A[0,1]*x[1] + B[0,0]*u[0]
65 | dx[1] = A[1,0]*x[0] + A[1,1]*x[1] + B[1,0]*u[0]
66 | return dx
67 |
68 | @nb.jit("float64[:](float64,float64[:],float64[:])", nopython=True)
69 | def fxu_ODE_mod(t,x,u):
70 |
71 | I_abs = np.abs(x[1])
72 | L_val_mod = L_val*saturation_formula(I_abs)
73 | R_val_mod = R_val
74 | C_val_mod = C_val
75 |
76 | A = np.array([[0.0, 1.0/C_val_mod],
77 | [-1/(L_val_mod), -R_val_mod/L_val_mod]
78 | ])
79 | B = np.array([[0.0], [1.0/(L_val_mod)]])
80 | dx = np.zeros(2, dtype=np.float64)
81 | dx[0] = A[0,0]*x[0] + A[0,1]*x[1] + B[0,0]*u[0]
82 | dx[1] = A[1,0]*x[0] + A[1,1]*x[1] + B[1,0]*u[0]
83 | #dx = A @ x + B @ u
84 | return dx
85 |
86 |
87 | A_nominal = np.array([[0.0, 1.0/C_val],
88 | [-1/(L_val), -R_val/L_val]
89 | ])
90 |
91 | B_nominal = np.array([[0.0], [1.0/(L_val)]])
92 |
93 | if __name__ == '__main__':
94 |
95 | init_printing(use_unicode=True)
96 |
97 | x = np.zeros(2)
98 | u = np.zeros(1)
99 | dx = fxu_ODE_mod(0.0, x, u)
100 |
101 | sym = [R, L, C, Td]
102 | vals = [R_val, L_val, C_val, Td_val]
103 |
104 | G2d_val = G2d_simple.subs(zip(sym, vals))
105 | G2d_num,G2d_den = fraction(G2d_val)
106 |
107 | # In[Get coefficients]
108 |
109 | num_coeff = G2d_num.collect(z).as_coefficients_dict()
110 | den_coeff = G2d_den.collect(z).as_coefficients_dict()
111 |
112 | G2d_num = G2d_num / den_coeff[z**2] # Monic numerator
113 | G2d_den = G2d_den / den_coeff[z**2] # Monic denominator
114 | G2d_monic = G2d_num/G2d_den # Monic trasnfer function
115 |
116 |
117 | I = np.arange(0.,20.,0.1)
118 |
119 | fig, ax = plt.subplots(1, 1, sharex=True, figsize=(4, 3))
120 | ax.plot(I, L_val*1e6*saturation_formula(I), 'k')
121 | ax.grid(True)
122 | ax.set_xlabel('Inductor current $i_L$ (A)', fontsize=14)
123 | ax.set_ylabel('Inductance $L$ ($\mu$H)', fontsize=14)
124 | fig.savefig(os.path.join("fig", "RLC_characteristics.pdf"), bbox_inches='tight')
125 |
--------------------------------------------------------------------------------
/examples/RLC_example/test/RLC_IO_I_eval_sim.py:
--------------------------------------------------------------------------------
1 | import pandas as pd
2 | import numpy as np
3 | import torch
4 | import matplotlib.pyplot as plt
5 | import os
6 | import sys
7 |
8 | sys.path.append(os.path.join("..", ".."))
9 | from torchid.iofitter import NeuralIOSimulator
10 | from torchid.iomodels import NeuralIOModel
11 | from common import metrics
12 |
13 | if __name__ == '__main__':
14 |
15 | dataset_type = 'id'
16 | #dataset_type = 'val'
17 |
18 | #model_type = '32step_noise'
19 | model_type = '64step_noise'
20 | # model_type = '1step_nonoise'
21 | # model_type = '1step_noise'
22 |
23 | plot_input = False
24 |
25 | COL_T = ['time']
26 | COL_X = ['V_C', 'I_L']
27 | COL_U = ['V_IN']
28 | COL_Y = ['I_L']
29 |
30 | dataset_filename = f"RLC_data_{dataset_type}.csv"
31 | df_X = pd.read_csv(os.path.join("data", dataset_filename))
32 |
33 | time_data = np.array(df_X[COL_T], dtype=np.float32)
34 | # y = np.array(df_X[COL_Y], dtype=np.float32)
35 | x = np.array(df_X[COL_X], dtype=np.float32)
36 | u = np.array(df_X[COL_U], dtype=np.float32)
37 | y_var_idx = 1 # 0: voltage 1: current
38 |
39 | y = np.copy(x[:, [y_var_idx]])
40 |
41 | N = np.shape(y)[0]
42 | Ts = time_data[1] - time_data[0]
43 |
44 |
45 | n_a = 2 # autoregressive coefficients for y
46 | n_b = 2 # autoregressive coefficients for u
47 | n_max = np.max((n_a, n_b)) # delay
48 |
49 | std_noise_V = 1.0 * 10.0
50 | std_noise_I = 1.0 * 1.0
51 | std_noise = np.array([std_noise_V, std_noise_I])
52 |
53 | x_noise = np.copy(x) + np.random.randn(*x.shape) * std_noise
54 | x_noise = x_noise.astype(np.float32)
55 | y_noise = x_noise[:, [y_var_idx]]
56 |
57 | # Initialize optimization
58 | io_model = NeuralIOModel(n_a=n_a, n_b=n_b, n_feat=64)
59 | io_solution = NeuralIOSimulator(io_model)
60 |
61 | model_filename = f"model_IO_I_{model_type}.pkl"
62 | io_solution.io_model.load_state_dict(torch.load(os.path.join("models", model_filename)))
63 |
64 | # In[Validate model]
65 | t_val_start = 0
66 | t_val_end = time_data[-1]
67 | idx_val_start = int(t_val_start//Ts)#x.shape[0]
68 | idx_val_end = int(t_val_end//Ts)#x.shape[0]
69 |
70 | n_val = idx_val_end - idx_val_start
71 | u_val = np.copy(u[idx_val_start:idx_val_end])
72 | y_val = np.copy(y[idx_val_start:idx_val_end])
73 | y_meas_val = np.copy(y_noise[idx_val_start:idx_val_end])
74 | time_val = time_data[idx_val_start:idx_val_end]
75 |
76 | y_seq = np.zeros(n_a, dtype=np.float32) #np.array(np.flip(y_val[0:n_a].ravel()))
77 | u_seq = np.zeros(n_b, dtype=np.float32 ) #np.array(np.flip(u_val[0:n_b].ravel()))
78 |
79 | # Neglect initial values
80 | # y_val = y_val[n_max:, :]
81 | # y_meas_val = y_meas_val[n_max:, :]
82 | # u_val = u_val[n_max:, :]
83 | # time_val = time_val[n_max:, :]
84 |
85 | y_meas_val_torch = torch.tensor(y_meas_val)
86 |
87 | with torch.no_grad():
88 | y_seq_torch = torch.tensor(y_seq)
89 | u_seq_torch = torch.tensor(u_seq)
90 |
91 | u_torch = torch.tensor(u_val)
92 | y_val_sim_torch = io_solution.f_sim(y_seq_torch, u_seq_torch, u_torch)
93 |
94 | err_val = y_val_sim_torch - y_meas_val_torch
95 | loss_val = torch.mean((err_val)**2)
96 |
97 |
98 | if dataset_type == 'id':
99 | t_plot_start = 0.2e-3
100 | else:
101 | t_plot_start = 1.0e-3
102 | t_plot_end = t_plot_start + 0.3e-3
103 |
104 | idx_plot_start = int(t_plot_start//Ts)#x.shape[0]
105 | idx_plot_end = int(t_plot_end//Ts)#x.shape[0]
106 |
107 | # In[Plot]
108 | y_val_sim = np.array(y_val_sim_torch)
109 | time_val_us = time_val *1e6
110 |
111 | if plot_input:
112 | fig, ax = plt.subplots(2,1, sharex=True)
113 | else:
114 | fig, ax = plt.subplots(1, 1, sharex=True)
115 | ax = [ax]
116 |
117 | ax[0].plot(time_val_us[idx_plot_start:idx_plot_end], y_val[idx_plot_start:idx_plot_end], 'k', label='True')
118 | ax[0].plot(time_val_us[idx_plot_start:idx_plot_end], y_val_sim[idx_plot_start:idx_plot_end], 'r--', label='Model simulation')
119 | ax[0].legend(loc='upper right')
120 | ax[0].grid(True)
121 | ax[0].set_xlabel("Time ($\mu$s)")
122 | ax[0].set_ylabel("Capacitor voltage $v_C$ (V)")
123 | ax[0].set_ylim([-20, 20])
124 |
125 | if plot_input:
126 | ax[1].plot(time_val_us[idx_plot_start:idx_plot_end], u_val[idx_plot_start:idx_plot_end], 'k', label='Input')
127 | #ax[1].legend()
128 | ax[1].grid(True)
129 | ax[1].set_xlabel("Time ($\mu$s)")
130 | ax[1].set_ylabel("Input voltage $v_{in}$ (V)")
131 |
132 | fig_name = f"RLC_IO_{dataset_type}_{model_type}.pdf"
133 | fig.savefig(os.path.join("fig", fig_name), bbox_inches='tight')
134 |
135 |
136 | R_sq = metrics.r_square(y_val, y_val_sim)
137 | print(f"R-squared metrics: {R_sq}")
138 |
--------------------------------------------------------------------------------
/examples/RLC_example/test/RLC_SS_eval_pred.py:
--------------------------------------------------------------------------------
1 | import pandas as pd
2 | import numpy as np
3 | import torch
4 | import matplotlib.pyplot as plt
5 | import os
6 | import sys
7 |
8 | sys.path.append(os.path.join(".."))
9 | from torchid.ssfitter import NeuralStateSpaceSimulator
10 | from torchid.ssmodels import NeuralStateSpaceModel
11 | import scipy.linalg
12 | from torchid.util import get_random_batch_idx, get_sequential_batch_idx
13 |
14 | if __name__ == '__main__':
15 |
16 | COL_T = ['time']
17 | COL_X = ['V_C', 'I_L']
18 | COL_U = ['V_IN']
19 | COL_Y = ['V_C']
20 | df_X = pd.read_csv(os.path.join("data", "model_ss_1step_noise.pkl"))
21 |
22 | time_data = np.array(df_X[COL_T], dtype=np.float32)
23 | # y = np.array(df_X[COL_Y], dtype=np.float32)
24 | x = np.array(df_X[COL_X], dtype=np.float32)
25 | u = np.array(df_X[COL_U], dtype=np.float32)
26 | y_var_idx = 0 # 0: voltage 1: current
27 |
28 | y = np.copy(x[:, [y_var_idx]])
29 |
30 | N = np.shape(y)[0]
31 | Ts = time_data[1] - time_data[0]
32 |
33 | n_a = 2 # autoregressive coefficients for y
34 | n_b = 2 # autoregressive coefficients for u
35 | n_max = np.max((n_a, n_b)) # delay
36 |
37 | std_noise_V = 0.0 * 5.0
38 | std_noise_I = 0.0 * 0.5
39 | std_noise = np.array([std_noise_V, std_noise_I])
40 |
41 | x_noise = np.copy(x) + np.random.randn(*x.shape) * std_noise
42 | x_noise = x_noise.astype(np.float32)
43 | y_noise = x_noise[:, [y_var_idx]]
44 |
45 | # Initialize optimization
46 | ss_model = NeuralStateSpaceModel(n_x=2, n_u=1, n_feat=64) #NeuralStateSpaceModelLin(A_nominal*Ts, B_nominal*Ts)
47 | nn_solution = NeuralStateSpaceSimulator(ss_model)
48 | nn_solution.ss_model.load_state_dict(torch.load(os.path.join("models", "model_ARX_FE_sat_nonoise.pkl")))
49 |
50 |
51 | # In[Validate model]
52 | t_val_start = 0
53 | t_val_end = time_data[-1]
54 | idx_val_start = int(t_val_start//Ts)#x.shape[0]
55 | idx_val_end = int(t_val_end//Ts)#x.shape[0]
56 |
57 | # Build fit data
58 | u_val = u[idx_val_start:idx_val_end]
59 | x_val = x_noise[idx_val_start:idx_val_end]
60 | y_val = y[idx_val_start:idx_val_end]
61 | time_val = time_data[idx_val_start:idx_val_end]
62 |
63 |
64 | # Predict batch data
65 | seq_len = 128
66 | batch_start, batch_idx = get_sequential_batch_idx(y_val.shape[0], seq_len)
67 | batch_time = torch.tensor(time_val[batch_idx]) # torch.stack([time_torch_fit[batch_start[i]:batch_start[i] + seq_len] for i in range(batch_size)], dim=0)
68 | batch_x0 = torch.tensor(x_val[batch_start]) # x_meas_torch_fit[batch_start, :] # (M, D)
69 | batch_u = torch.tensor(u_val[batch_idx]) # torch.stack([u_torch_fit[batch_start[i]:batch_start[i] + seq_len] for i in range(batch_size)], dim=0)
70 | batch_x = torch.tensor(x_val[batch_idx]) # torch.stack([x_meas_torch_fit[batch_start[i]:batch_start[i] + seq_len] for i in range(batch_size)], dim=0)
71 | batch_x_pred = nn_solution.f_sim_multistep(batch_x0, batch_u)
72 |
73 | # Plot data
74 | batch_x_pred_np = np.array(batch_x_pred.detach())
75 | batch_time_np = np.array(batch_time.detach()).squeeze()
76 |
77 | fig, ax = plt.subplots(3,1, sharex=True)
78 | ax[0].plot(time_val, x_val[:,0], 'b')
79 | ax[0].plot(batch_time_np.T, batch_x_pred_np[:,:,0].T, 'r')
80 | ax[0].grid(True)
81 |
82 | ax[1].plot(time_val, x_val[:,1], 'b')
83 | ax[1].plot(batch_time_np.T, batch_x_pred_np[:,:,1].T, 'r')
84 | ax[1].grid(True)
85 |
86 | ax[2].plot(time_val, u_val, label='Input')
87 | ax[2].grid(True)
88 |
--------------------------------------------------------------------------------
/examples/RLC_example/test/RLC_SS_fit_1step_tf.py:
--------------------------------------------------------------------------------
1 | import pandas as pd
2 | import numpy as np
3 | import time
4 | import matplotlib.pyplot as plt
5 | import os
6 | import sys
7 | import tensorflow as tf
8 |
9 | sys.path.append(os.path.join(".."))
10 | from tfid.ssfitter import NeuralStateSpaceSimulator
11 | from tfid.ssmodels import NeuralStateSpaceModel
12 |
13 | if __name__ == '__main__':
14 |
15 | num_iter = 40000
16 | test_freq = 100
17 |
18 | add_noise = False
19 |
20 | COL_T = ['time']
21 | COL_X = ['V_C', 'I_L']
22 | COL_U = ['V_IN']
23 | COL_Y = ['V_C']
24 |
25 | df_X = pd.read_csv(os.path.join("data", "RLC_data_id.csv"))
26 |
27 | time_data = np.array(df_X[COL_T], dtype=np.float32)
28 | y = np.array(df_X[COL_Y], dtype=np.float32)
29 | x = np.array(df_X[COL_X], dtype=np.float32)
30 | u = np.array(df_X[COL_U], dtype=np.float32)
31 |
32 | std_noise_V = add_noise * 10.0
33 | std_noise_I = add_noise * 1.0
34 | std_noise = np.array([std_noise_V, std_noise_I])
35 |
36 | x_noise = np.copy(x) + np.random.randn(*x.shape) * std_noise
37 | x_noise = x_noise.astype(np.float32)
38 |
39 | Ts = time_data[1] - time_data[0]
40 | t_fit = 2e-3
41 | n_fit = int(t_fit // Ts) # x.shape[0]
42 |
43 |
44 | ss_model = NeuralStateSpaceModel(n_x=2, n_u=1, n_feat=64)
45 | nn_solution = NeuralStateSpaceSimulator(ss_model)
46 |
47 | u_fit = u[0:n_fit]
48 | x_targ_fit = x_noise[0:n_fit]
49 | x_est_init = nn_solution(x_targ_fit, u_fit)
50 |
51 |
52 | loss_object = tf.keras.losses.MeanSquaredError()
53 | optimizer = tf.keras.optimizers.Adam(lr=1e-4)
54 |
55 | x_est_init = nn_solution(x_targ_fit, u_fit)
56 | loss_init = loss_object(x_est_init, x_targ_fit)
57 |
58 |
59 | @tf.function
60 | def train_step():
61 | with tf.GradientTape() as tape:
62 | x_est = nn_solution(x_targ_fit, u_fit)
63 | loss_unscaled = loss_object(x_est, x_targ_fit)
64 | loss_scaled = loss_unscaled/loss_init
65 | gradients = tape.gradient(loss_scaled, nn_solution.trainable_variables)
66 | optimizer.apply_gradients(zip(gradients, nn_solution.trainable_variables))
67 | return loss_scaled
68 |
69 | train_start = time.time()
70 | LOSS = []
71 | for itr in range(num_iter):
72 | loss = train_step()
73 | LOSS.append(np.float32(loss))
74 | if itr % test_freq == 0:
75 | print('Iter {:04d} | Total Loss {:.6f}'.format(itr, loss))
76 |
77 | train_time = time.time() - train_start
78 | print(f"\nTrain time: {train_time:.2f}")
79 |
80 | @tf.function
81 | def sim_step(x0, u):
82 | x_sim = nn_solution.f_sim(x0,u)
83 | return x_sim
84 |
85 | #x_sim = sim_step(x[0, :], u_fit)
86 |
87 |
88 | x_sim = nn_solution.f_sim(x[0, :], u_fit)
89 |
90 |
91 | fig, ax = plt.subplots(2,1,sharex=True)
92 | ax[0].plot(x_targ_fit[:,0], 'k', label='True')
93 | ax[0].plot(x_sim[:,0],'r', label='Sim')
94 | ax[0].legend()
95 | ax[1].plot(x_targ_fit[:,1], 'k', label='True')
96 | ax[1].plot(x_sim[:,1],'r', label='Sim')
97 | ax[0].grid(True)
98 | ax[1].grid(True)
99 |
100 | fig, ax = plt.subplots(1,1,sharex=True)
101 | ax.plot(LOSS)
102 |
--------------------------------------------------------------------------------
/examples/RLC_example/test/RLC_SS_ident_sim_jit.py:
--------------------------------------------------------------------------------
1 | import pandas as pd
2 | import numpy as np
3 | import torch
4 | import torch.optim as optim
5 | import time
6 | import matplotlib.pyplot as plt
7 | import os
8 | import sys
9 |
10 | sys.path.append(os.path.join(".."))
11 | from torchid.ssfitter_jit import NeuralStateSpaceSimulator
12 | from torchid.util import RunningAverageMeter
13 | from torchid.ssmodels import NeuralStateSpaceModel
14 |
15 | if __name__ == '__main__':
16 |
17 | COL_T = ['time']
18 | COL_X = ['V_C', 'I_L']
19 | COL_U = ['V_IN']
20 | COL_Y = ['V_C']
21 |
22 | df_X = pd.read_csv(os.path.join("data", "RLC_data_id.csv"))
23 |
24 | time_data = np.array(df_X[COL_T], dtype=np.float32)
25 | y = np.array(df_X[COL_Y],dtype=np.float32)
26 | x = np.array(df_X[COL_X],dtype=np.float32)
27 | u = np.array(df_X[COL_U],dtype=np.float32)
28 | x0_torch = torch.from_numpy(x[0,:])
29 |
30 | std_noise_V = 0.0 * 10.0
31 | std_noise_I = 0.0 * 1.0
32 | std_noise = np.array([std_noise_V, std_noise_I])
33 |
34 | x_noise = np.copy(x) + np.random.randn(*x.shape)*std_noise
35 | x_noise = x_noise.astype(np.float32)
36 |
37 | Ts = time_data[1] - time_data[0]
38 | t_fit = 0.5e-3
39 | n_fit = int(t_fit//Ts)#x.shape[0]
40 | num_iter = 1000
41 | test_freq = 100
42 |
43 | input_data = u[0:n_fit]
44 | state_data = x_noise[0:n_fit]
45 | u_torch = torch.from_numpy(input_data)
46 | x_true_torch = torch.from_numpy(state_data)
47 |
48 | ss_model = NeuralStateSpaceModel(n_x=2, n_u=1, n_feat=64)
49 | ss_model = torch.jit.script(ss_model)
50 | nn_solution = NeuralStateSpaceSimulator(ss_model)
51 |
52 | params = list(nn_solution.ss_model.parameters())
53 | optimizer = optim.Adam(params, lr=1e-4)
54 | end = time.time()
55 |
56 | #func = torch.jit.trace(nn_solution, (x0_torch, u_torch))
57 | func = torch.jit.script(nn_solution)
58 | #func = nn_solution
59 |
60 | with torch.no_grad():
61 | x_est_torch = func(x0_torch, u_torch) #nn_solution.f_sim
62 | err_init = x_est_torch - x_true_torch
63 | scale_error = torch.sqrt(torch.mean((err_init)**2, dim=(0))) #torch.mean(torch.sq(batch_x[:,1:,:] - batch_x_pred[:,1:,:]))
64 |
65 | start_time = time.time()
66 | ii = 0
67 | for itr in range(1, num_iter + 1):
68 | optimizer.zero_grad()
69 | x_est_torch = func(x0_torch, u_torch)#nn_solution.f_sim(x0_torch, u_torch)
70 | err = x_est_torch - x_true_torch
71 | err_scaled = err/scale_error
72 | loss = torch.mean(err_scaled ** 2)
73 |
74 | if itr % test_freq == 0:
75 | print('Iter {:04d} | Total Loss {:.6f}'.format(itr, loss.item()))
76 | ii += 1
77 |
78 | loss.backward()
79 | optimizer.step()
80 |
81 | train_time = time.time() - start_time
82 |
83 | # torch.save(nn_solution.state_dict(), 'model.pkl')
84 |
85 | t_val = 5e-3
86 | n_val = int(t_val//Ts)#x.shape[0]
87 |
88 | input_data_val = u[0:n_val]
89 | state_data_val = x[0:n_val]
90 | output_data_val = y[0:n_val]
91 |
92 | x0_val = np.zeros(2,dtype=np.float32)
93 | x0_torch_val = torch.from_numpy(x0_val)
94 | u_torch_val = torch.tensor(input_data_val)
95 | x_true_torch_val = torch.from_numpy(state_data_val)
96 |
97 | with torch.no_grad():
98 | time_sim_start = time.perf_counter()
99 | x_pred_torch_val = nn_solution(x0_torch_val, u_torch_val)
100 | time_sim = time.perf_counter() - time_sim_start
101 |
102 | # In[1]
103 |
104 | fig,ax = plt.subplots(3,1, sharex=True)
105 | ax[0].plot(np.array(x_true_torch_val[:,0]), label='True')
106 | ax[0].plot(np.array(x_pred_torch_val[:,0]), label='Fit')
107 | ax[0].legend()
108 | ax[0].grid(True)
109 |
110 | ax[1].plot(np.array(x_true_torch_val[:,1]), label='True')
111 | ax[1].plot(np.array(x_pred_torch_val[:,1]), label='Fit')
112 | ax[1].legend()
113 | ax[1].grid(True)
114 |
115 | ax[2].plot(np.array(u_torch_val), label='Input')
116 | ax[2].grid(True)
117 |
--------------------------------------------------------------------------------
/examples/RLC_example/test/RLC_SS_ident_simerror_tf.py:
--------------------------------------------------------------------------------
1 | import pandas as pd
2 | import numpy as np
3 | import time
4 | import matplotlib.pyplot as plt
5 | import os
6 | import sys
7 | import tensorflow as tf
8 |
9 | sys.path.append(os.path.join(".."))
10 | from tfid.ssfitter import NeuralStateSpaceSimulator
11 | from tfid.ssmodels import NeuralStateSpaceModel
12 |
13 | if __name__ == '__main__':
14 |
15 | num_iter = 40000
16 | test_freq = 10
17 |
18 | add_noise = False
19 |
20 | COL_T = ['time']
21 | COL_X = ['V_C', 'I_L']
22 | COL_U = ['V_IN']
23 | COL_Y = ['V_C']
24 |
25 | df_X = pd.read_csv(os.path.join("data", "RLC_data_id.csv"))
26 |
27 | time_data = np.array(df_X[COL_T], dtype=np.float32)
28 | y = np.array(df_X[COL_Y], dtype=np.float32)
29 | x = np.array(df_X[COL_X], dtype=np.float32)
30 | u = np.array(df_X[COL_U], dtype=np.float32)
31 |
32 | std_noise_V = add_noise * 10.0
33 | std_noise_I = add_noise * 1.0
34 | std_noise = np.array([std_noise_V, std_noise_I])
35 |
36 | x_noise = np.copy(x) + np.random.randn(*x.shape) * std_noise
37 | x_noise = x_noise.astype(np.float32)
38 |
39 | Ts = time_data[1] - time_data[0]
40 | t_fit = 2e-3
41 | n_fit = int(t_fit // Ts) # x.shape[0]
42 |
43 | ss_model = NeuralStateSpaceModel(n_x=2, n_u=1, n_feat=64)
44 | nn_solution = NeuralStateSpaceSimulator(ss_model)
45 |
46 | u_fit = u[0:n_fit]
47 | x_targ_fit = x_noise[0:n_fit]
48 | x_est_init = nn_solution(x_targ_fit, u_fit)
49 |
50 |
51 | loss_object = tf.keras.losses.MeanSquaredError()
52 | optimizer = tf.keras.optimizers.Adam(lr=1e-4)
53 |
54 | compile_start = time.time()
55 | @tf.function
56 | def train_step():
57 | with tf.GradientTape() as tape:
58 | x_est = nn_solution.f_sim(x_noise[0,:], u_fit)
59 | loss = loss_object(x_est, x_targ_fit)
60 | gradients = tape.gradient(loss, nn_solution.trainable_variables)
61 | optimizer.apply_gradients(zip(gradients, nn_solution.trainable_variables))
62 | return loss
63 | loss = train_step()
64 | compile_time = time.time() - compile_start
65 | print(f"\nCompile time: {compile_time:.2f}")
66 |
67 | train_start = time.time()
68 | LOSS = []
69 | for itr in range(num_iter):
70 | loss=train_step()
71 | LOSS.append(np.float32(loss))
72 | if itr % test_freq == 0:
73 | print('Iter {:04d} | Total Loss {:.6f}'.format(itr, loss))
74 |
75 | train_time = time.time() - train_start
76 | print(f"\nTrain time: {train_time:.2f}")
77 |
78 | x_sim = nn_solution.f_sim(x[0, :], u_fit)
79 |
80 | fig, ax = plt.subplots(2,1,sharex=True)
81 | ax[0].plot(x_targ_fit[:,0], 'k', label='True')
82 | ax[0].plot(x_sim[:,0],'r', label='Sim')
83 | ax[0].legend()
84 | ax[1].plot(x_targ_fit[:,1], 'k', label='True')
85 | ax[1].plot(x_sim[:,1],'r', label='Sim')
86 | ax[0].grid(True)
87 | ax[1].grid(True)
88 |
89 | fig, ax = plt.subplots(1,1,sharex=True)
90 | ax.plot(LOSS)
91 |
--------------------------------------------------------------------------------
/examples/cartpole_example/cartpole_SS_eval_pred.py:
--------------------------------------------------------------------------------
1 | import pandas as pd
2 | import numpy as np
3 | import torch
4 | import matplotlib.pyplot as plt
5 | import os
6 | import sys
7 |
8 | sys.path.append(os.path.join("..", ".."))
9 | from torchid.ssfitter import NeuralStateSpaceSimulator
10 | from torchid.ssmodels import CartPoleStateSpaceModel
11 | from torchid.util import get_sequential_batch_idx
12 |
13 | if __name__ == '__main__':
14 |
15 | seq_len = 512 # simulation sequence length - we evaluate performance in terms of seq_len-step simulation error
16 | dataset_filename = "pendulum_data_oloop_val.csv"
17 | model_filename = "model_SS_64step_noise.pkl"
18 |
19 | # Column names in the dataset
20 | COL_T = ['time']
21 | COL_Y = ['p_meas', 'theta_meas']
22 | COL_X = ['p', 'v', 'theta', 'omega']
23 | COL_U = ['u']
24 |
25 | # Load dataset
26 | df_X = pd.read_csv(os.path.join("data", dataset_filename), sep=",")
27 | time_data = np.array(df_X[COL_T], dtype=np.float32)
28 | y = np.array(df_X[COL_Y], dtype=np.float32)
29 | x = np.array(df_X[COL_X], dtype=np.float32)
30 | u = np.array(df_X[COL_U], dtype=np.float32)
31 | x0_torch = torch.from_numpy(x[0, :])
32 |
33 | N = np.shape(y)[0]
34 | Ts = time_data[1] - time_data[0]
35 |
36 | # Load model and parameters
37 | ss_model = CartPoleStateSpaceModel(Ts, init_small=True)
38 | nn_solution = NeuralStateSpaceSimulator(ss_model)
39 | nn_solution.ss_model.load_state_dict(torch.load(os.path.join("models", model_filename)))
40 |
41 |
42 | # Build validation data
43 | t_val_start = 0
44 | t_val_end = time_data[-1]
45 | idx_val_start = int(t_val_start//Ts)
46 | idx_val_end = int(t_val_end//Ts)
47 | u_val = u[idx_val_start:idx_val_end]
48 | x_val = x[idx_val_start:idx_val_end]
49 | y_val = y[idx_val_start:idx_val_end]
50 | time_val = time_data[idx_val_start:idx_val_end]
51 |
52 | # Predict batch data
53 | batch_start, batch_idx = get_sequential_batch_idx(y_val.shape[0], seq_len)
54 | batch_time = torch.tensor(time_val[batch_idx])
55 | batch_x0 = torch.tensor(x_val[batch_start])
56 | batch_u = torch.tensor(u_val[batch_idx])
57 | batch_x = torch.tensor(x_val[batch_idx])
58 | batch_x_pred = nn_solution.f_sim_multistep(batch_x0, batch_u)
59 |
60 | # Plot data
61 | batch_x_pred_np = np.array(batch_x_pred.detach())
62 | batch_time_np = np.array(batch_time.detach()).squeeze()
63 |
64 | fig, ax = plt.subplots(3, 1, sharex=True)
65 | ax[0].plot(time_val, x_val[:, 0], 'b')
66 | ax[0].plot(batch_time_np.T, batch_x_pred_np[:, :, 0].T, 'r')
67 | ax[0].grid(True)
68 |
69 | ax[1].plot(time_val, x_val[:,2], 'b')
70 | ax[1].plot(batch_time_np.T, batch_x_pred_np[:, :, 2].T, 'r')
71 | ax[1].grid(True)
72 |
73 | ax[2].plot(time_val, u_val, label='Input')
74 | ax[2].grid(True)
75 |
--------------------------------------------------------------------------------
/examples/cartpole_example/cartpole_SS_ident_1step.py:
--------------------------------------------------------------------------------
1 | import os
2 | import time
3 | import numpy as np
4 | import sys
5 | import torch
6 | import torch.optim as optim
7 | import pandas as pd
8 | import matplotlib.pyplot as plt
9 |
10 | sys.path.append(os.path.join("..", ".."))
11 | from torchid.ssfitter import NeuralStateSpaceSimulator
12 | from torchid.ssmodels import CartPoleStateSpaceModel
13 |
14 |
15 | if __name__ == '__main__':
16 |
17 | # Set seed for reproducibility
18 | np.random.seed(0)
19 | torch.manual_seed(0)
20 |
21 | # Overall parameters
22 | num_iter = 30000 # gradient-based optimization steps
23 | test_freq = 100 # print message every test_freq iterations
24 | len_fit = 80 # number of seconds of the dataset used to fit
25 | lr = 5e-5 # learning rate
26 | add_noise = False
27 |
28 | # Column names in the dataset
29 | COL_T = ['time']
30 | COL_Y = ['p_meas', 'theta_meas']
31 | COL_X = ['p', 'v', 'theta', 'omega']
32 | COL_U = ['u']
33 | COL_R = ['r']
34 |
35 | # Load dataset
36 | df_X = pd.read_csv(os.path.join("data", "pendulum_data_oloop_id.csv"))
37 | t = np.array(df_X[COL_T], dtype=np.float32)
38 | y = np.array(df_X[COL_Y], dtype=np.float32)
39 | x = np.array(df_X[COL_X], dtype=np.float32)
40 | u = np.array(df_X[COL_U], dtype=np.float32)
41 | Ts = t[1] - t[0]
42 |
43 | # Add measurement noise
44 | x_noise = np.copy(x)
45 |
46 | # Setup neural model structure
47 | ss_model = CartPoleStateSpaceModel(Ts)
48 | nn_solution = NeuralStateSpaceSimulator(ss_model)
49 |
50 | # Fit data to pytorch tensors #
51 | n_fit = int(len_fit//Ts)
52 | u_fit = u[0:n_fit]
53 | x_fit = x_noise[0:n_fit]
54 | t_fit = t[0:n_fit]
55 | u_fit_torch = torch.from_numpy(u_fit)
56 | x_meas_fit_torch = torch.from_numpy(x_fit)
57 |
58 | # Setup optimizer
59 | params = list(nn_solution.ss_model.parameters())
60 | optimizer = optim.Adam(params, lr=lr)
61 | end = time.time()
62 |
63 | # Scale loss with respect to the initial one
64 | with torch.no_grad():
65 | x_est_torch = nn_solution.f_onestep(x_meas_fit_torch, u_fit_torch)
66 | err_init = x_est_torch - x_meas_fit_torch
67 | scale_error = torch.sqrt(torch.mean((err_init)**2, dim=0))
68 |
69 |
70 | LOSS = []
71 | start_time = time.time()
72 | # Training loop
73 | for itr in range(1, num_iter + 1):
74 | optimizer.zero_grad()
75 |
76 | # Perform one-step ahead prediction
77 | x_pred_torch = nn_solution.f_onestep(x_meas_fit_torch, u_fit_torch)
78 |
79 | # Compute fit loss
80 | err = x_pred_torch - x_meas_fit_torch
81 | err_scaled = err / scale_error
82 | loss_sc = torch.mean((err_scaled[:, [1, 3]]) ** 2)
83 |
84 | # Statistics
85 | LOSS.append(loss_sc.item())
86 | if itr % test_freq == 0:
87 | print('Iter {:04d} | Total Loss {:.6f}'.format(itr, loss_sc.item()))
88 |
89 | # Optimization step
90 | loss_sc.backward()
91 | optimizer.step()
92 |
93 | train_time = time.time() - start_time
94 | print(f"\nTrain time: {train_time:.2f}")
95 |
96 | if not os.path.exists("models"):
97 | os.makedirs("models")
98 |
99 | model_name = "model_SS_1step_nonoise.pkl"
100 | torch.save(nn_solution.ss_model.state_dict(), os.path.join("models", model_name))
101 |
102 | # Use the model in simulation
103 | x_0 = x_fit[0, :]
104 | with torch.no_grad():
105 | x_sim_torch = nn_solution.f_sim(torch.tensor(x_0), torch.tensor(u_fit))
106 | loss_sc = torch.mean(torch.abs(x_sim_torch - x_meas_fit_torch))
107 | x_sim = np.array(x_sim_torch)
108 |
109 | n_plot = 4000
110 | fig, ax = plt.subplots(2, 1, sharex=True)
111 | ax[0].plot(t_fit[:n_plot], x_fit[:n_plot, 0], label='True')
112 | ax[0].plot(t_fit[:n_plot], x_sim[:n_plot, 0], label='Simulated')
113 | ax[0].set_xlabel("Time (s)")
114 | ax[0].set_ylabel("Position (m)")
115 | ax[0].legend()
116 | ax[0].grid()
117 | ax[1].plot(t_fit[:n_plot], x[:n_plot, 2], label='True')
118 | ax[1].plot(t_fit[:n_plot], x_sim[:n_plot, 2], label='Simulated')
119 | ax[1].set_xlabel("Time (s)")
120 | ax[1].set_ylabel("Angle (rad)")
121 | ax[1].legend()
122 | ax[1].grid()
123 |
124 | if not os.path.exists("fig"):
125 | os.makedirs("fig")
126 |
127 | fig, ax = plt.subplots(1, 1, figsize=(7.5, 6))
128 | ax.plot(LOSS)
129 | ax.grid(True)
130 | ax.set_ylabel("Loss (-)")
131 | ax.set_xlabel("Iteration (-)")
132 |
133 | if add_noise:
134 | fig_name = "cartpole_SS_loss_1step_noise.pdf"
135 | else:
136 | fig_name = "cartpole_SS_loss_1step_nonoise.pdf"
137 |
138 | fig.savefig(os.path.join("fig", fig_name), bbox_inches='tight')
139 |
--------------------------------------------------------------------------------
/examples/cartpole_example/cartpole_dynamics.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 |
3 | # Constants #
4 | M = 0.5
5 | m = 0.2
6 | b = 0.1
7 | ftheta = 0.1
8 | l = 0.3
9 | g = 9.81
10 | RAD_TO_DEG = 360.0/2.0/np.pi
11 | DEG_TO_RAD = 1.0/RAD_TO_DEG
12 |
13 | # Nonlinear dynamics ODE:
14 | # \dot x = f_ODE(x,u)
15 | from numba import jit
16 |
17 | @jit("float64[:](float64,float64[:],float64[:])", nopython=True, cache=True)
18 | def f_ODE_jit(t, x, u):
19 | F = u[0]
20 | v = x[1]
21 | theta = x[2]
22 | omega = x[3]
23 | der = np.zeros(x.shape)
24 | der[0] = v
25 | der[1] = (m * l * np.sin(theta) * omega ** 2 - m * g * np.sin(theta) * np.cos(theta) + m * ftheta * np.cos(
26 | theta) * omega + F - b * v) / (M + m * (1 - np.cos(theta) ** 2))
27 | der[2] = omega
28 | der[3] = ((M + m) * (g * np.sin(theta) - ftheta * omega) - m * l * omega ** 2 * np.sin(theta) * np.cos(
29 | theta) - (
30 | F - b * v) * np.cos(theta)) / (l * (M + m * (1 - np.cos(theta) ** 2)))
31 | return der
32 |
33 |
34 | def f_ODE_wrapped(t,x,u):
35 | return f_ODE_jit(t,x,u)
36 |
37 | def f_ODE(t, x, u):
38 | F = u
39 | v = x[1]
40 | theta = x[2]
41 | omega = x[3]
42 | der = np.zeros(x.shape)
43 | der[0] = v
44 | der[1] = (m * l * np.sin(theta) * omega ** 2 - m * g * np.sin(theta) * np.cos(theta) + m * ftheta * np.cos(
45 | theta) * omega + F - b * v) / (M + m * (1 - np.cos(theta) ** 2))
46 | der[2] = omega
47 | der[3] = ((M + m) * (g * np.sin(theta) - ftheta * omega) - m * l * omega ** 2 * np.sin(theta) * np.cos(
48 | theta) - (
49 | F - b * v) * np.cos(theta)) / (l * (M + m * (1 - np.cos(theta) ** 2)))
50 | return der
51 |
52 |
53 | if __name__ == '__main__':
54 | f_ODE_jit(0.1, np.array([0.3, 0.2, 0.1, 0.4]), 3.1)
55 |
--------------------------------------------------------------------------------
/examples/cartpole_example/ltisim.py:
--------------------------------------------------------------------------------
1 | import control
2 | import numpy as np
3 |
4 | class LinearStateSpaceSystem:
5 | def __init__(self, A, B, C, D=None, x0=None):
6 | if x0 is None:
7 | x0 = np.zeros(A.shape[0])
8 | self.x = np.copy(x0)
9 | self.A = np.array(A)
10 | self.B = np.array(B)
11 | self.C = np.array(C)
12 | self.D = np.array(D)
13 |
14 | def output(self,u=None):
15 | self.y = self.C @ self.x
16 | if u is not None and self.D is not None:
17 | u = np.array(u).ravel()
18 | self.y += self.D @ u
19 | return self.y
20 |
21 | def update(self, u):
22 | u = np.array(u).ravel()
23 | self.x = self.A @ self.x + self.B @ u
24 |
25 | if __name__ == '__main__':
26 |
27 | Ts = 0.1
28 | nx = 2
29 | nu = 1
30 | ny = 1
31 |
32 | Ac = np.eye(2)
33 | Bc = np.ones((2,1))
34 | Cd = np.eye(2)
35 | Dc = np.zeros((2,1))
36 |
37 | Ad = np.eye(nx) + Ac*Ts
38 | Bd = Bc*Ts
39 |
40 | # Default controller parameters -
41 | K_NUM = [-2100, -10001, -100]
42 | K_DEN = [1, 100, 0]
43 |
44 | Ts = 1e-3
45 | K = control.tf(K_NUM,K_DEN)
46 | Kd_tf = control.c2d(K, Ts)
47 | Kd_ss = control.ss(Kd_tf)
48 | Kd = LinearStateSpaceSystem(A=Kd_ss.A, B=Kd_ss.B, C=Kd_ss.C, D=Kd_ss.D)
49 |
50 |
51 | P = -100.01
52 | I = -1
53 | D = -20
54 | N = 100.0
55 |
56 | kP = control.tf(P,1, Ts)
57 | kI = I*Ts*control.tf([0, 1], [1,-1], Ts)
58 | kD = D*control.tf([N, -N], [1.0, Ts*N -1], Ts)
59 | kPID = kP + kD + kI
60 |
61 |
62 |
--------------------------------------------------------------------------------
/examples/cartpole_example/old/cartpole_SS_minibatch.py:
--------------------------------------------------------------------------------
1 | import os
2 | import time
3 | import numpy as np
4 | import sys
5 | import torch
6 | import torch.optim as optim
7 | import pandas as pd
8 | import matplotlib.pyplot as plt
9 |
10 | sys.path.append(os.path.join(".."))
11 | from torchid.ssfitter import NeuralStateSpaceSimulator
12 | from torchid.ssmodels import CartPoleStateSpaceModel
13 |
14 | # In[Load data]
15 | if __name__ == '__main__':
16 |
17 | COL_T = ['time']
18 | COL_Y = ['p_meas', 'theta_meas']
19 | COL_X = ['p', 'v', 'theta', 'omega']
20 | COL_U = ['u']
21 | df_X = pd.read_csv(os.path.join("data", "pendulum_data_MPC_ref.csv"))
22 |
23 | t = np.array(df_X[COL_T], dtype=np.float32)
24 | y = np.array(df_X[COL_Y],dtype=np.float32)
25 | x = np.array(df_X[COL_X],dtype=np.float32)
26 | u = np.array(df_X[COL_U],dtype=np.float32)
27 | Ts = t[1] - t[0]
28 | x_noise = x
29 |
30 | n_x = x.shape[-1]
31 | ss_model = CartPoleStateSpaceModel(Ts)
32 | nn_solution = NeuralStateSpaceSimulator(ss_model)
33 | #model_name = "model_SS_1step_nonoise.pkl"
34 | model_name = "model_SS_150step_nonoise.pkl"
35 | nn_solution.ss_model.load_state_dict(torch.load(os.path.join("models", model_name )))
36 |
37 | len_fit = 40
38 | n_fit = int(len_fit//Ts)
39 | u_fit = u[0:n_fit]
40 | x_fit = x_noise[0:n_fit]
41 | t_fit = t[0:n_fit]
42 | u_fit_torch = torch.from_numpy(u_fit)
43 | x_meas_fit_torch = torch.from_numpy(x_fit)
44 | t_fit_torch = torch.from_numpy(t_fit)
45 |
46 | num_iter = 1000
47 | test_freq = 1
48 |
49 | params = list(nn_solution.ss_model.parameters())
50 | optimizer = optim.Adam(params, lr=1e-5)
51 | end = time.time()
52 |
53 |
54 | # In[Batch function]
55 | seq_len = 200
56 | batch_size = n_fit//seq_len
57 | test_freq = 10
58 | def get_batch(batch_size, seq_len):
59 | num_train_samples = x_meas_fit_torch.shape[0]
60 | s = torch.from_numpy(np.random.choice(np.arange(num_train_samples - seq_len, dtype=np.int64), batch_size, replace=False))
61 | batch_x0 = x_meas_fit_torch[s, :] # (M, D)
62 | batch_t = torch.stack([t_fit_torch[s[i]:s[i] + seq_len] for i in range(batch_size)], dim=0)
63 | batch_x = torch.stack([x_meas_fit_torch[s[i]:s[i] + seq_len] for i in range(batch_size)], dim=0)
64 | batch_u = torch.stack([u_fit_torch[s[i]:s[i] + seq_len] for i in range(batch_size)], dim=0)
65 |
66 | return batch_t, batch_x0, batch_u, batch_x
67 |
68 | len_sim = x.shape[0]
69 | dist_sim = 1
70 |
71 | s = np.arange(0, len_sim - seq_len ,dist_sim, dtype = np.int )
72 | batch_size_scale = len(s)
73 | x_fit_torch = torch.tensor(x_fit)
74 | batch_x0 = x_fit_torch[s, :] # (M, D)
75 | batch_t = torch.stack([t_fit_torch[s[i]:s[i] + seq_len] for i in range(batch_size_scale)], dim=0)
76 | batch_x = torch.stack([x_fit_torch[s[i]:s[i] + seq_len] for i in range(batch_size_scale)], dim=0)
77 | batch_u = torch.stack([u_fit_torch[s[i]:s[i] + seq_len] for i in range(batch_size_scale)], dim=0)
78 |
79 | zoh_error = batch_x -batch_x0.view(batch_size_scale,1,n_x)
80 | scale_error = 1e0/torch.sqrt(torch.mean(zoh_error**2,(0,1)))
81 |
82 | # In[Fit model]
83 | ii = 0
84 | loss = None
85 | for itr in range(0, num_iter):
86 |
87 |
88 | if itr > 0 and itr % test_freq == 0:
89 | with torch.no_grad():
90 | print('Iter {:04d} | Total Loss {:.6f}'.format(itr, loss.item()))
91 | ii += 1
92 | optimizer.zero_grad()
93 | batch_t, batch_x0, batch_u, batch_x = get_batch(batch_size, seq_len)
94 |
95 | batch_x_pred = nn_solution.f_sim_multistep(batch_x0, batch_u)
96 | err = batch_x - batch_x_pred
97 | err_scaled = err * scale_error
98 | loss = torch.mean(err_scaled**2)
99 | loss.backward()
100 | optimizer.step()
101 |
102 | end = time.time()
103 |
104 | # In[Save model parameters]
105 | # model_name = "model_SS_150step_nonoise.pkl"
106 | model_name = "model_SS_200step_nonoise.pkl"
107 |
108 | if not os.path.exists("models"):
109 | os.makedirs("models")
110 | torch.save(nn_solution.ss_model.state_dict(), os.path.join("models", model_name))
111 |
112 | # In[Simulate model]
113 | x_0 = x_fit[0, :]
114 | with torch.no_grad():
115 | x_sim_torch = nn_solution.f_sim(torch.tensor(x_0), torch.tensor(u_fit))
116 | loss = torch.mean(torch.abs(x_sim_torch - x_meas_fit_torch))
117 | x_sim = np.array(x_sim_torch)
118 | # In[1]
119 | n_plot = 200
120 |
121 | fig,ax = plt.subplots(2,1,sharex=True)
122 | ax[0].plot(t_fit[:n_plot], x_fit[:n_plot, 0], label='True')
123 | ax[0].plot(t_fit[:n_plot], x_sim[:n_plot,0], label='Simulated')
124 | ax[0].set_xlabel("Time (s)")
125 | ax[0].set_ylabel("Position (m)")
126 | ax[0].legend()
127 | ax[0].grid()
128 | ax[1].plot(t_fit[:n_plot], x[:n_plot,2], label='True')
129 | ax[1].plot(t_fit[:n_plot], x_sim[:n_plot, 2], label='Simulated')
130 | ax[1].set_xlabel("Time (s)")
131 | ax[1].set_ylabel("Angle (rad)")
132 | ax[1].legend()
133 | ax[1].grid()
134 |
--------------------------------------------------------------------------------
/examples/cartpole_example/old/fit_cartpole_OE.py:
--------------------------------------------------------------------------------
1 | import os
2 | import time
3 | import numpy as np
4 | import sys
5 | import torch
6 | import torch.optim as optim
7 | import pandas as pd
8 | import matplotlib.pyplot as plt
9 |
10 | sys.path.append(os.path.join(".."))
11 | from torchid.ssfitter import NeuralStateSpaceSimulator, RunningAverageMeter
12 | from torchid.ssmodels import CartPoleStateSpaceModel
13 |
14 | # In[Load data]
15 | if __name__ == '__main__':
16 |
17 | COL_T = ['time']
18 | COL_Y = ['p_meas', 'theta_meas']
19 | COL_X = ['p', 'v', 'theta', 'omega']
20 | COL_U = ['u']
21 | df_X = pd.read_csv(os.path.join("data", "pendulum_data_MPC.csv"))
22 |
23 | t = np.array(df_X[COL_T], dtype=np.float32)
24 | y = np.array(df_X[COL_Y],dtype=np.float32)
25 | x = np.array(df_X[COL_X],dtype=np.float32)
26 | u = np.array(df_X[COL_U],dtype=np.float32)
27 | Ts = t[1] - t[0]
28 | x_noise = x
29 |
30 | # In[Model]
31 | ss_model = CartPoleStateSpaceModel(Ts)
32 | nn_solution = NeuralStateSpaceSimulator(ss_model)
33 | model_name = "model_ARX_FE_sat_nonoise.pkl"
34 | nn_solution.ss_model.load_state_dict(torch.load(os.path.join("models", model_name)))
35 | # In[Setup optimization problem]
36 |
37 | len_fit = 40
38 | n_fit = int(len_fit//Ts)
39 | u_fit = u[0:n_fit]
40 | x_fit = x_noise[0:n_fit]
41 | t_fit = t[0:n_fit]
42 | u_fit_torch = torch.from_numpy(u_fit)
43 | x_meas_fit_torch = torch.from_numpy(x_fit)
44 | t_fit_torch = torch.from_numpy(t_fit)
45 |
46 | num_iter = 10000
47 | test_freq = 1
48 |
49 | params = nn_solution.ss_model.parameters()
50 | optimizer = optim.Adam(params, lr=1e-4)
51 | end = time.time()
52 | time_meter = RunningAverageMeter(0.97)
53 | loss_meter = RunningAverageMeter(0.97)
54 |
55 | #scale_error = 1./np.std(x_noise, axis=0)
56 | #scale_error = scale_error/np.sum(scale_error)
57 |
58 |
59 | # In[Batch function]
60 | seq_len = 100 #int(n_fit/10)
61 | batch_size = n_fit//seq_len
62 | test_freq = 10
63 | def get_batch(batch_size, seq_len):
64 | num_train_samples = x_meas_fit_torch.shape[0]
65 | s = torch.from_numpy(np.random.choice(np.arange(num_train_samples - seq_len, dtype=np.int64), batch_size, replace=False))
66 | batch_x0 = x_meas_fit_torch[s, :] # (M, D)
67 | batch_t = torch.stack([t_fit_torch[s[i]:s[i] + seq_len] for i in range(batch_size)], dim=0)
68 | batch_x = torch.stack([x_meas_fit_torch[s[i]:s[i] + seq_len] for i in range(batch_size)], dim=0)
69 | batch_u = torch.stack([u_fit_torch[s[i]:s[i] + seq_len] for i in range(batch_size)], dim=0)
70 |
71 | return batch_t, batch_x0, batch_u, batch_x
72 | # In[Scale]
73 | scale_error = 1e2*np.ones(4)/4
74 | scale_error = torch.tensor(scale_error.astype(np.float32))
75 | # In[Fit model]
76 | ii = 0
77 | loss = None
78 | for itr in range(0, num_iter):
79 |
80 |
81 | if itr > 0 and itr % test_freq == 0:
82 | with torch.no_grad():
83 | print('Iter {:04d} | Total Loss {:.6f}'.format(itr, loss.item()))
84 | ii += 1
85 | optimizer.zero_grad()
86 | batch_t, batch_x0, batch_u, batch_x = get_batch(batch_size, seq_len)
87 | #batch_size = 256
88 | #N = x_true_torch_fit.shape[0]
89 | #N = int(N // batch_size) * batch_size
90 | #seq_len = int(N // batch_size)
91 | #batch_x = x_true_torch_fit[0:N].view(batch_size, seq_len, -1)
92 | #batch_u = u_torch_fit[0:N].view(batch_size, seq_len, -1)
93 | #batch_x0 = batch_x[:, 0, :]
94 |
95 | batch_x_pred = nn_solution.f_sim_multistep(batch_x0, batch_u)
96 | err = batch_x[:,0:,:] - batch_x_pred[:,0:,:]
97 | err_scaled = err * scale_error
98 | loss = torch.mean(err_scaled**2)
99 | loss.backward()
100 | optimizer.step()
101 |
102 | time_meter.update(time.time() - end)
103 | loss_meter.update(loss.item())
104 | end = time.time()
105 |
106 | # In[Save model parameters]
107 |
108 | # In[Simulate model]
109 | x_0 = x_fit[0, :]
110 | with torch.no_grad():
111 | x_sim_torch = nn_solution.f_sim(torch.tensor(x_0), torch.tensor(u_fit))
112 | loss = torch.mean(torch.abs(x_sim_torch - x_meas_fit_torch))
113 | x_sim = np.array(x_sim_torch)
114 | # In[1]
115 | n_plot = 4000
116 |
117 | fig,ax = plt.subplots(2,1,sharex=True)
118 | ax[0].plot(t_fit[:n_plot], x_fit[:n_plot, 0], label='True')
119 | ax[0].plot(t_fit[:n_plot], x_sim[:n_plot,0], label='Simulated')
120 | ax[0].set_xlabel("Time (s)")
121 | ax[0].set_ylabel("Position (m)")
122 | ax[0].legend()
123 | ax[0].grid()
124 | ax[1].plot(t_fit[:n_plot], x[:n_plot,2], label='True')
125 | ax[1].plot(t_fit[:n_plot], x_sim[:n_plot, 2], label='Simulated')
126 | ax[1].set_xlabel("Time (s)")
127 | ax[1].set_ylabel("Angle (rad)")
128 | ax[1].legend()
129 | ax[1].grid()
130 |
--------------------------------------------------------------------------------
/examples/cartpole_example/old/fit_cartpole_ref_ARX.py:
--------------------------------------------------------------------------------
1 | import os
2 | import time
3 | import numpy as np
4 | import sys
5 | import torch
6 | import torch.optim as optim
7 | import pandas as pd
8 | import matplotlib.pyplot as plt
9 |
10 | sys.path.append(os.path.join(".."))
11 | from torchid.ssfitter import NeuralStateSpaceSimulator, RunningAverageMeter
12 | from torchid.ssmodels import NeuralStateSpaceModel, CartPoleStateSpaceModel
13 |
14 | # In[Load data]
15 | if __name__ == '__main__':
16 |
17 | COL_T = ['time']
18 | COL_Y = ['p_meas', 'theta_meas']
19 | COL_X = ['p', 'v', 'theta', 'omega']
20 | COL_U = ['u']
21 | COL_R = ['r']
22 | df_X = pd.read_csv(os.path.join("data", "pendulum_data_MPC_ref.csv"))
23 |
24 | t = np.array(df_X[COL_T], dtype=np.float32)
25 | y = np.array(df_X[COL_Y],dtype=np.float32)
26 | x = np.array(df_X[COL_X],dtype=np.float32)
27 | #u = np.array(df_X[COL_U],dtype=np.float32)
28 | u = np.array(df_X[COL_R],dtype=np.float32)
29 | Ts = t[1] - t[0]
30 | x_noise = x
31 |
32 | # In[Model]
33 | ss_model = CartPoleStateSpaceModel(Ts)#n_x=4, n_u=1, n_feat=64)
34 | nn_solution = NeuralStateSpaceSimulator(ss_model)
35 |
36 | # In[Setup optimization problem]
37 |
38 | len_fit = 40
39 | n_fit = int(len_fit//Ts)
40 | u_fit = u[0:n_fit]
41 | x_fit = x_noise[0:n_fit]
42 | t_fit = t[0:n_fit]
43 | u_fit_torch = torch.from_numpy(u_fit)
44 | x_meas_fit_torch = torch.from_numpy(x_fit)
45 | t_fit_torch = torch.from_numpy(t_fit)
46 |
47 | num_iter = 10000
48 | test_freq = 1
49 |
50 | params = list(nn_solution.ss_model.parameters())
51 | optimizer = optim.Adam(params, lr=1e-4)
52 | end = time.time()
53 | time_meter = RunningAverageMeter(0.97)
54 | loss_meter = RunningAverageMeter(0.97)
55 |
56 | #scale_error = 1./np.std(x_noise, axis=0)
57 | #scale_error = scale_error/np.sum(scale_error)
58 | #scale_error = 1e0*np.ones(4)/4
59 | #scale_error = 1./np.mean(np.abs(np.diff(x_fit, axis = 0)), axis=0)
60 | scale_error = 1./np.std(np.diff(x_fit, axis = 0), axis=0)
61 | scale_error = torch.tensor(scale_error.astype(np.float32))
62 |
63 | # In[Fit model]
64 | ii = 0
65 | for itr in range(1, num_iter + 1):
66 | optimizer.zero_grad()
67 | x_pred_torch = nn_solution.f_onestep(x_meas_fit_torch, u_fit_torch)
68 | err = x_pred_torch - x_meas_fit_torch
69 | err_scaled = err * scale_error
70 | loss = torch.mean((err_scaled)**2) #torch.mean(torch.sq(batch_x[:,1:,:] - batch_x_pred[:,1:,:]))
71 |
72 | loss.backward()
73 | optimizer.step()
74 |
75 | time_meter.update(time.time() - end)
76 | loss_meter.update(loss.item())
77 |
78 | if itr % test_freq == 0:
79 | with torch.no_grad():
80 | print('Iter {:04d} | Total Loss {:.6f}'.format(itr, loss.item()))
81 | ii += 1
82 | end = time.time()
83 |
84 | # In[Save model]
85 | if not os.path.exists("models"):
86 | os.makedirs("models")
87 |
88 | model_name = "model_ARX_FE_ref_nonoise.pkl"
89 | torch.save(nn_solution.ss_model.state_dict(), os.path.join("models", model_name))
90 |
91 |
92 |
93 | # In[Simulate model]
94 | x_0 = x_fit[0, :]
95 | with torch.no_grad():
96 | x_sim_torch = nn_solution.f_sim(torch.tensor(x_0), torch.tensor(u_fit))
97 | loss = torch.mean(torch.abs(x_sim_torch - x_meas_fit_torch))
98 | x_sim = np.array(x_sim_torch)
99 | # In[1]
100 | n_plot = 4000
101 | fig,ax = plt.subplots(2,1,sharex=True)
102 | ax[0].plot(t_fit[:n_plot], x_fit[:n_plot, 0], label='True')
103 | ax[0].plot(t_fit[:n_plot], x_sim[:n_plot,0], label='Simulated')
104 | ax[0].set_xlabel("Time (s)")
105 | ax[0].set_ylabel("Position (m)")
106 | ax[0].legend()
107 | ax[0].grid()
108 | ax[1].plot(t_fit[:n_plot], x[:n_plot,2], label='True')
109 | ax[1].plot(t_fit[:n_plot], x_sim[:n_plot, 2], label='Simulated')
110 | ax[1].set_xlabel("Time (s)")
111 | ax[1].set_ylabel("Angle (rad)")
112 | ax[1].legend()
113 | ax[1].grid()
114 |
--------------------------------------------------------------------------------
/examples/cartpole_example/old/fit_cartpole_residual_ARX.py:
--------------------------------------------------------------------------------
1 | import os
2 | import time
3 | import numpy as np
4 | import sys
5 | import torch
6 | import torch.optim as optim
7 | import pandas as pd
8 | import matplotlib.pyplot as plt
9 |
10 | sys.path.append(os.path.join(".."))
11 | from torchid.ssfitter import NeuralStateSpaceSimulator, NeuralSumODE, RunningAverageMeter
12 | from torchid.ssmodels import CartPoleStateSpaceModel, NeuralStateSpaceModel
13 |
14 | # In[Load data]
15 | if __name__ == '__main__':
16 |
17 | COL_T = ['time']
18 | COL_Y = ['p_meas', 'theta_meas']
19 | COL_X = ['p', 'v', 'theta', 'omega']
20 | COL_U = ['u']
21 | COL_R = ['r']
22 | df_X = pd.read_csv(os.path.join("data", "pendulum_data_MPC_ref.csv"))
23 |
24 | t = np.array(df_X[COL_T], dtype=np.float32)
25 | y = np.array(df_X[COL_Y],dtype=np.float32)
26 | x = np.array(df_X[COL_X],dtype=np.float32)
27 | #u = np.array(df_X[COL_U],dtype=np.float32)
28 | u = np.array(df_X[COL_U],dtype=np.float32)
29 | Ts = t[1] - t[0]
30 | x_noise = x
31 |
32 | # In[Model]
33 | ss_model = CartPoleStateSpaceModel(Ts)
34 | model_name = "model_ARX_FE_nonoise.pkl"
35 | ss_model.load_state_dict(torch.load(os.path.join("models", model_name)))
36 |
37 | ss_model_residual = NeuralStateSpaceModel(n_x=4, n_u=1, n_feat=64)
38 | nn_solution = NeuralSumODE([ss_model,ss_model_residual])
39 |
40 | # In[Setup optimization problem]
41 |
42 | len_fit = 40
43 | n_fit = int(len_fit//Ts)
44 | u_fit = u[0:n_fit]
45 | x_fit = x_noise[0:n_fit]
46 | t_fit = t[0:n_fit]
47 | u_fit_torch = torch.from_numpy(u_fit)
48 | x_meas_fit_torch = torch.from_numpy(x_fit)
49 | t_fit_torch = torch.from_numpy(t_fit)
50 |
51 | num_iter = 20000
52 | test_freq = 1
53 |
54 | params = list(nn_solution.ss_model_list[1].parameters())
55 | optimizer = optim.Adam(params, lr=1e-5)
56 | end = time.time()
57 | time_meter = RunningAverageMeter(0.97)
58 | loss_meter = RunningAverageMeter(0.97)
59 |
60 | #scale_error = 1./np.std(x_noise, axis=0)
61 | #scale_error = scale_error/np.sum(scale_error)
62 | #scale_error = 1e0*np.ones(4)/4
63 | #scale_error = 1./np.mean(np.abs(np.diff(x_fit, axis = 0)), axis=0)
64 | scale_error = 1./np.std(np.diff(x_fit, axis = 0), axis=0)
65 | scale_error = torch.tensor(scale_error.astype(np.float32))
66 |
67 | # In[Fit model]
68 | ii = 0
69 | for itr in range(1, num_iter + 1):
70 | optimizer.zero_grad()
71 | x_pred_torch = nn_solution.f_ARX(x_meas_fit_torch, u_fit_torch)
72 | err = x_pred_torch - x_meas_fit_torch
73 | err_scaled = err * scale_error
74 | loss = 10e3*torch.mean((err_scaled)**2) #torch.mean(torch.sq(batch_x[:,1:,:] - batch_x_pred[:,1:,:]))
75 |
76 | loss.backward()
77 | optimizer.step()
78 |
79 | time_meter.update(time.time() - end)
80 | loss_meter.update(loss.item())
81 |
82 | if itr % test_freq == 0:
83 | with torch.no_grad():
84 | print('Iter {:04d} | Total Loss {:.6f}'.format(itr, loss.item()))
85 | ii += 1
86 | end = time.time()
87 |
88 |
89 |
90 |
91 | # In[Simulate model]
92 | x_0 = x_fit[0, :]
93 | with torch.no_grad():
94 | x_sim_torch = nn_solution.f_OE(torch.tensor(x_0), torch.tensor(u_fit))
95 | loss = torch.mean(torch.abs(x_sim_torch - x_meas_fit_torch))
96 | x_sim = np.array(x_sim_torch)
97 | # In[1]
98 | n_plot = 200
99 | fig,ax = plt.subplots(2,1,sharex=True)
100 | ax[0].plot(t_fit[:n_plot], x_fit[:n_plot, 0], label='True')
101 | ax[0].plot(t_fit[:n_plot], x_sim[:n_plot,0], label='Simulated')
102 | ax[0].set_xlabel("Time (s)")
103 | ax[0].set_ylabel("Position (m)")
104 | ax[0].legend()
105 | ax[0].grid()
106 | ax[1].plot(t_fit[:n_plot], x[:n_plot,2], label='True')
107 | ax[1].plot(t_fit[:n_plot], x_sim[:n_plot, 2], label='Simulated')
108 | ax[1].set_xlabel("Time (s)")
109 | ax[1].set_ylabel("Angle (rad)")
110 | ax[1].legend()
111 | ax[1].grid()
112 |
--------------------------------------------------------------------------------
/examples/cartpole_example/old/ode_pendulum.py:
--------------------------------------------------------------------------------
1 | import os
2 | import argparse
3 | import time
4 | import numpy as np
5 | from scipy.interpolate import interp1d
6 |
7 | import torch
8 | import torch.nn as nn
9 | import torch.optim as optim
10 | import pandas as pd
11 | import matplotlib.pyplot as plt
12 |
13 | parser = argparse.ArgumentParser('ODE demo')
14 | parser.add_argument('--method', type=str, choices=['dopri5', 'adams'], default='dopri5')
15 | parser.add_argument('--data_size', type=int, default=1000)
16 | parser.add_argument('--batch_time', type=int, default=10)
17 | parser.add_argument('--batch_size', type=int, default=20)
18 | parser.add_argument('--niters', type=int, default=2)
19 | parser.add_argument('--test_freq', type=int, default=1)
20 | parser.add_argument('--viz', action='store_true')
21 | parser.add_argument('--gpu', type=int, default=0)
22 | parser.add_argument('--adjoint', action='store_true')
23 | args = parser.parse_args()
24 |
25 | if args.adjoint:
26 | from torchdiffeq import odeint_adjoint as odeint
27 | else:
28 | from torchdiffeq import odeint
29 |
30 | device = torch.device('cuda:' + str(args.gpu) if torch.cuda.is_available() else 'cpu')
31 |
32 |
33 | class ODEFunc(nn.Module):
34 |
35 | def __init__(self,u_fun):
36 | super(ODEFunc, self).__init__()
37 |
38 | self.net = nn.Sequential(
39 | nn.Linear(5, 50), # 4 states input
40 | nn.Tanh(),
41 | nn.Linear(50, 2), # 2 state equations output (2 are trivial!)
42 | )
43 |
44 | self.AL = nn.Linear(4,4, bias=False)
45 | self.AL.weight = torch.nn.Parameter(torch.tensor([[0.,1.,0.,0.],[0.,0.,0.,0.],[0.,0.,0.,1.],[0.,0.,0.,0.]]), requires_grad=False)
46 | self.WL = nn.Linear(2,4, bias=False)
47 | self.WL.weight = torch.nn.Parameter(torch.tensor([[0.,0.],[1.,0.],[0.,0.],[0.,1.]]), requires_grad=False)
48 |
49 |
50 | for m in self.net.modules():
51 | if isinstance(m, nn.Linear):
52 | nn.init.normal_(m.weight, mean=0, std=0.1)
53 | nn.init.constant_(m.bias, val=0)
54 |
55 | self.u_fun = u_fun
56 |
57 | def forward(self, t, x):
58 | Ts = 10e-3
59 | idx = int(t//Ts)
60 | #print(idx)
61 | #if idx >= 4000:
62 | # idx = 3999
63 | #ui = self.u[idx]
64 |
65 | ui = torch.tensor(np.array(self.u_fun(t)).reshape(1))
66 | xu = torch.cat((x, ui), 0)
67 | fx_tmp = self.net(xu)
68 | dx = self.WL(fx_tmp) + self.AL(x)
69 | return dx
70 |
71 |
72 | class RunningAverageMeter(object):
73 | """Computes and stores the average and current value"""
74 |
75 | def __init__(self, momentum=0.99):
76 | self.momentum = momentum
77 | self.reset()
78 |
79 | def reset(self):
80 | self.val = None
81 | self.avg = 0
82 |
83 | def update(self, val):
84 | if self.val is None:
85 | self.avg = val
86 | else:
87 | self.avg = self.avg * self.momentum + val * (1 - self.momentum)
88 | self.val = val
89 |
90 |
91 | if __name__ == '__main__':
92 |
93 | COL_T = ['time']
94 | COL_Y = ['p_meas', 'theta_meas']
95 | COL_X = ['p', 'v', 'theta', 'omega']
96 | COL_U = ['u']
97 |
98 | df_X = pd.read_csv("pendulum_data.csv")
99 |
100 | time_data = np.array(df_X[COL_T], dtype=np.float32)
101 | y = np.array(df_X[COL_Y],dtype=np.float32)
102 | x = np.array(df_X[COL_X],dtype=np.float32)
103 | u = np.array(df_X[COL_U],dtype=np.float32)
104 |
105 | u_fun = interp1d(time_data.ravel(), u.ravel(), kind='linear')
106 |
107 | time_torch = torch.from_numpy(time_data.ravel())
108 | func = ODEFunc(u_fun)
109 | x0_torch = torch.from_numpy(x[0,:])
110 | y_torch = torch.from_numpy(y)
111 |
112 | C_matrix = torch.from_numpy(np.array([[1, 0, 0, 0], [0, 0, 1, 0]], dtype=np.float32))
113 |
114 | optimizer = optim.RMSprop(func.parameters(), lr=1e-3)
115 | end = time.time()
116 |
117 | time_meter = RunningAverageMeter(0.97)
118 | loss_meter = RunningAverageMeter(0.97)
119 |
120 | ii = 0
121 | for itr in range(1, args.niters + 1):
122 | optimizer.zero_grad()
123 | pred_x = odeint(func, x0_torch, time_torch)
124 | pred_y = torch.tensordot(pred_x, C_matrix, ((-1,), (1,)))
125 | loss = torch.mean(torch.abs(pred_y - y_torch))
126 | loss.backward()
127 | optimizer.step()
128 |
129 | time_meter.update(time.time() - end)
130 | loss_meter.update(loss.item())
131 |
132 | if itr % args.test_freq == 0:
133 | with torch.no_grad():
134 | pred_x = odeint(func, x0_torch, time_torch)
135 | pred_y = torch.tensordot(pred_x, C_matrix, ((-1,), (1,)))
136 | loss = torch.mean(torch.abs(pred_y - y_torch))
137 | print('Iter {:04d} | Total Loss {:.6f}'.format(itr, loss.item()))
138 | ii += 1
139 |
140 | end = time.time()
141 |
142 |
--------------------------------------------------------------------------------
/examples/cartpole_example/old/ode_pendulum_forward_eul_loop.py:
--------------------------------------------------------------------------------
1 | import os
2 | import argparse
3 | import time
4 | import numpy as np
5 | from scipy.interpolate import interp1d
6 |
7 | import torch
8 | import torch.nn as nn
9 | import torch.optim as optim
10 | import pandas as pd
11 | import matplotlib.pyplot as plt
12 |
13 | parser = argparse.ArgumentParser('ODE demo')
14 | parser.add_argument('--niters', type=int, default=1000)
15 | parser.add_argument('--test_freq', type=int, default=10)
16 | parser.add_argument('--gpu', type=int, default=0)
17 | args = parser.parse_args()
18 |
19 |
20 | device = torch.device('cuda:' + str(args.gpu) if torch.cuda.is_available() else 'cpu')
21 |
22 |
23 | class ODEFunc(nn.Module):
24 |
25 | def __init__(self,u):
26 | super(ODEFunc, self).__init__()
27 |
28 | self.net = nn.Sequential(
29 | nn.Linear(5, 50), # 4 states input
30 | nn.Tanh(),
31 | nn.Linear(50, 2), # 2 state equations output (2 are trivial!)
32 | )
33 |
34 | self.AL = nn.Linear(4,4, bias=False)
35 | self.AL.weight = torch.nn.Parameter(torch.tensor([[0.,1.,0.,0.],[0.,0.,0.,0.],[0.,0.,0.,1.],[0.,0.,0.,0.]]), requires_grad=False)
36 | self.WL = nn.Linear(2,4, bias=False)
37 | self.WL.weight = torch.nn.Parameter(torch.tensor([[0.,0.],[1.,0.],[0.,0.],[0.,1.]]), requires_grad=False)
38 |
39 |
40 | for m in self.net.modules():
41 | if isinstance(m, nn.Linear):
42 | nn.init.normal_(m.weight, mean=0, std=1e-3)
43 | nn.init.constant_(m.bias, val=0)
44 |
45 | self.u = torch.Tensor(u)
46 |
47 | def forward(self, x0):
48 | Ts = 5e-3
49 | N = np.shape(self.u)[0]
50 | nx = np.shape(x0)[0]
51 |
52 | X = torch.empty((N,nx))
53 | xstep = x0
54 | for i in range(0,N):
55 | X[i,:] = xstep
56 |
57 | #identity = xold
58 | ustep = self.u[i]
59 | #uold = torch.tensor([0.0])
60 | xu = torch.cat((xstep, ustep), 0)
61 | fx_tmp = self.net(xu)
62 | dx = Ts*(self.WL(fx_tmp) + self.AL(xstep))
63 | xstep = xstep + dx
64 |
65 | return X
66 |
67 | class RunningAverageMeter(object):
68 | """Computes and stores the average and current value"""
69 |
70 | def __init__(self, momentum=0.99):
71 | self.momentum = momentum
72 | self.reset()
73 |
74 | def reset(self):
75 | self.val = None
76 | self.avg = 0
77 |
78 | def update(self, val):
79 | if self.val is None:
80 | self.avg = val
81 | else:
82 | self.avg = self.avg * self.momentum + val * (1 - self.momentum)
83 | self.val = val
84 |
85 |
86 | if __name__ == '__main__':
87 | COL_T = ['time']
88 | COL_Y = ['p_meas', 'theta_meas']
89 | COL_X = ['p', 'v', 'theta', 'omega']
90 | COL_U = ['u']
91 | COL_D = ['d']
92 |
93 | df_X = pd.read_csv("pendulum_data_PID.csv")
94 |
95 | time_data = np.array(df_X[COL_T], dtype=np.float32)
96 | y = np.array(df_X[COL_Y],dtype=np.float32)
97 | x = np.array(df_X[COL_X],dtype=np.float32)
98 |
99 | d = np.array(df_X[COL_D],dtype=np.float32)
100 | d_fun = interp1d(time_data.ravel(), d.ravel(), kind='linear')
101 |
102 | time_torch = torch.from_numpy(time_data.ravel())
103 | func = ODEFunc(d)
104 | x0_torch = torch.from_numpy(x[0,:])
105 | y_true_torch = torch.from_numpy(y)
106 | C_matrix = torch.from_numpy(np.array([[1, 0, 0, 0], [0, 0, 1, 0]], dtype=np.float32))
107 |
108 |
109 | # In[0]
110 | optimizer = optim.RMSprop(func.parameters(), lr=1e-3)
111 | end = time.time()
112 | time_meter = RunningAverageMeter(0.97)
113 | loss_meter = RunningAverageMeter(0.97)
114 |
115 | ii = 0
116 | for itr in range(1, args.niters + 1):
117 | optimizer.zero_grad()
118 | x_pred_torch = func(x0_torch)
119 | y_pred_torch = torch.tensordot(x_pred_torch, C_matrix, ((-1,), (1,)))
120 | loss = torch.mean(torch.abs(y_pred_torch - y_true_torch))
121 | loss.backward()
122 | optimizer.step()
123 |
124 | time_meter.update(time.time() - end)
125 | loss_meter.update(loss.item())
126 |
127 | if itr % args.test_freq == 0:
128 | with torch.no_grad():
129 | x_pred_torch = func(x0_torch)
130 | y_pred_torch = torch.tensordot(x_pred_torch, C_matrix, ((-1,), (1,)))
131 | loss = torch.mean(torch.abs(y_pred_torch - y_true_torch))
132 | print('Iter {:04d} | Total Loss {:.6f}'.format(itr, loss.item()))
133 | ii += 1
134 |
135 | end = time.time()
136 |
137 | # In[1]
138 | plt.figure()
139 | plt.plot(np.array(y_true_torch[:,0]))
140 | plt.plot(np.array(y_pred_torch[:,0]))
141 |
142 | plt.figure()
143 | plt.plot(np.array(y_true_torch[:,1]))
144 | plt.plot(np.array(y_pred_torch[:,1]))
145 |
--------------------------------------------------------------------------------
/examples/cartpole_example/test/cartpole_plot_model_NN_cloop.py:
--------------------------------------------------------------------------------
1 | import os
2 | import pandas as pd
3 | import matplotlib.pyplot as plt
4 | from examples.cartpole_example.cartpole_dynamics import RAD_TO_DEG, DEG_TO_RAD
5 |
6 | if __name__ == '__main__':
7 |
8 | #df_model = pd.read_csv(os.path.join("data", "pendulum_data_PID.csv"))
9 | #df_nn = pd.read_csv(os.path.join("data", "pendulum_data_PID_NN_model.csv"))
10 | df_meas = pd.read_csv(os.path.join("data", "pendulum_data_MPC_ref_val.csv"))
11 | df_nn = pd.read_csv(os.path.join("data", "pendulum_data_MPC_ref_val_NN_model.csv"))
12 |
13 |
14 | fig,axes = plt.subplots(3,1, figsize=(10,10), sharex=True)
15 | axes[0].plot(df_meas['time'], df_meas['p'], "k", label='p system')
16 | axes[0].plot(df_nn['time'], df_nn['p'], "r", label='p NN')
17 | axes[0].set_title("Position (m)")
18 | axes[0].set_ylim(-10, 10.0)
19 |
20 |
21 | axes[1].plot(df_meas['time'], df_meas['theta'] * RAD_TO_DEG, "k", label='theta system')
22 | axes[1].plot(df_nn['time'], df_nn['theta']*RAD_TO_DEG, "r", label='theta NN')
23 |
24 |
25 | axes[2].plot(df_meas['time'], df_meas['u'], label="u")
26 | axes[2].plot(df_nn['time'], df_nn['u'], label="u")
27 |
28 |
29 | for ax in axes:
30 | ax.grid(True)
31 | ax.legend()
32 |
--------------------------------------------------------------------------------
/examples/cartpole_example/test/cartpole_use_model.py:
--------------------------------------------------------------------------------
1 | import os
2 | import pandas as pd
3 | import numpy as np
4 | import torch
5 | import matplotlib.pyplot as plt
6 | import sys
7 | sys.path.append(os.path.join(".."))
8 | from torchid.ssfitter import NeuralStateSpaceSimulator
9 | from torchid.ssmodels import CartPoleStateSpaceModel
10 |
11 |
12 | if __name__ == '__main__':
13 |
14 | COL_T = ['time']
15 | COL_Y = ['p_meas', 'theta_meas']
16 | COL_X = ['p', 'v', 'theta', 'omega']
17 | COL_U = ['u']
18 | df_X = pd.read_csv(os.path.join("data", "pendulum_data_MPC.csv"))
19 |
20 | t = np.array(df_X[COL_T], dtype=np.float32)
21 | y = np.array(df_X[COL_Y],dtype=np.float32)
22 | x = np.array(df_X[COL_X],dtype=np.float32)
23 | u = np.array(df_X[COL_U],dtype=np.float32)
24 | Ts = t[1] - t[0]
25 |
26 | x0_torch = torch.from_numpy(x[0,:])
27 |
28 | ss_model = CartPoleStateSpaceModel(Ts)
29 | nn_solution = NeuralStateSpaceSimulator(ss_model)
30 | nn_solution.ss_model.load_state_dict(torch.load(os.path.join("models", "model_OE_minibatch.pkl")))
31 |
32 | x_torch = torch.tensor(x)
33 | x0_torch = torch.tensor(x[0,:])
34 | u_torch = torch.tensor(u)
35 | with torch.no_grad():
36 | x_sim_torch = nn_solution.f_sim(x0_torch, u_torch)
37 | loss = torch.mean(torch.abs(x_sim_torch - x_torch))
38 |
39 | x_sim = np.array(x_sim_torch)
40 |
41 | n_plot = t.size
42 | fig,ax = plt.subplots(3,1,sharex=True)
43 | ax[0].plot(t[:n_plot], x[:n_plot, 0], label='True')
44 | ax[0].plot(t[:n_plot], x_sim[:n_plot, 0], label='Simulated')
45 | ax[0].set_xlabel("Time (s)")
46 | ax[0].set_ylabel("Cart position (m)")
47 | ax[0].legend()
48 | ax[0].grid()
49 |
50 | ax[1].plot(t[:n_plot], x[:n_plot, 2], label='True')
51 | ax[1].plot(t[:n_plot], x_sim[:n_plot, 2], label='Simulated')
52 | ax[1].set_xlabel("Time (s)")
53 | ax[1].set_ylabel("Pendulum angle (rad)")
54 | ax[1].legend()
55 | ax[1].grid()
56 |
57 | ax[2].plot(t[:n_plot], u[:n_plot, 0])
58 | ax[2].set_xlabel("Time (s)")
59 | ax[2].set_ylabel("Input Voltage (V)")
60 | #ax[2].legend()
61 | ax[2].grid()
62 |
--------------------------------------------------------------------------------
/examples/cartpole_example/test/differentiator_example.py:
--------------------------------------------------------------------------------
1 | import scipy.signal as signal
2 | import numpy as np
3 | import matplotlib.pyplot as plt
4 | import pandas as pd
5 | import os
6 |
7 | def plot_response(fs, w, h, title):
8 | "Utility function to plot response functions"
9 | fig = plt.figure()
10 | ax = fig.add_subplot(111)
11 | ax.semilogx(0.5*fs*w/np.pi, 20*np.log10(np.abs(h)))
12 | ax.set_ylim(-60, 40)
13 | ax.set_xlim(0.1, 0.5*fs)
14 | ax.grid(True)
15 | ax.set_xlabel('Frequency (Hz)')
16 | ax.set_ylabel('Gain (dB)')
17 | ax.set_title(title)
18 |
19 | """
20 | fs = 100.0 # Sample rate, Hz
21 | cutoff = 10.0 # Desired cutoff frequency, Hz
22 | trans_width = 20.0 # Width of transition from pass band to stop band, Hz
23 | numtaps = 50 # Size of the FIR filter.
24 | taps = signal.remez(numtaps, [0, cutoff, cutoff + trans_width, 0.5*fs], [1, 0], Hz=fs)
25 | w, h = signal.freqz(taps, [1], worN=2000)
26 | plot_response(fs, w, h, "Low-pass Filter")
27 | """
28 |
29 | len_fit = 40
30 | seq_len = 50
31 | test_freq = 20
32 | num_iter = 16000
33 | test_freq = 50
34 | add_noise = True
35 |
36 |
37 | COL_T = ['time']
38 | COL_Y = ['p_meas', 'theta_meas']
39 | COL_X = ['p', 'v', 'theta', 'omega']
40 | COL_U = ['u']
41 | df_X = pd.read_csv(os.path.join("data", "pendulum_data_MPC_ref.csv"))
42 |
43 | std_noise_p = add_noise * 0.02
44 | std_noise_theta = add_noise * 0.004
45 | std_noise = np.array([std_noise_p, std_noise_theta])
46 |
47 | t = np.array(df_X[COL_T], dtype=np.float32)
48 | y = np.array(df_X[COL_Y], dtype=np.float32)
49 | x = np.array(df_X[COL_X], dtype=np.float32)
50 | u = np.array(df_X[COL_U], dtype=np.float32)
51 | y_meas = np.copy(y) + np.random.randn(*y.shape)*std_noise
52 |
53 |
54 |
55 | Ts = np.float(t[1] - t[0])
56 | fs = 1/Ts # Sample rate, Hz
57 | cutoff = 1.0 # Desired cutoff frequency, Hz
58 | trans_width = 3.0 # Width of transition from pass band to stop band, Hz
59 | numtaps = 40 # Size of the FIR filter.
60 | taps = signal.remez(numtaps, [0, cutoff, cutoff + trans_width, 0.5*fs], [2*np.pi*cutoff*10*1.5, 0], Hz=fs, type='differentiator')
61 | w, h = signal.freqz(taps, [1], worN=2000)
62 | plot_response(fs, w[1:], h[1:], "Derivative Filter")
63 |
64 |
65 | plt.figure()
66 | plt.plot(taps)
67 |
68 | x_est = np.zeros((y_meas.shape[0], 4), dtype=np.float32)
69 | x_est[:, 0] = y_meas[:, 0]
70 | x_est[:, 2] = y_meas[:, 1]
71 | x_est[:,1] = np.convolve(x_est[:,0],taps, 'same')*2*np.pi#signal.lfilter(taps, 1, y_meas[:,0])*2*np.pi
72 | x_est[:,3] = np.convolve(x_est[:,2],taps, 'same')*2*np.pi#signal.lfilter(taps, 1, y_meas[:,1])*2*np.pi
73 |
74 |
75 | fig,ax = plt.subplots(4,1,sharex=True)
76 |
77 | ax[0].plot(x[:,0], 'k')
78 | ax[0].plot(x_est[:,0], 'r')
79 |
80 | ax[1].plot(x[:,1], 'k')
81 | ax[1].plot(x_est[:,1], 'r')
82 |
83 | ax[2].plot(x[:,2], 'k')
84 | ax[2].plot(x_est[:,2], 'r')
85 |
86 | ax[3].plot(x[:,3], 'k')
87 | ax[3].plot(x_est[:,3], 'r')
88 |
--------------------------------------------------------------------------------
/examples/cartpole_example/test/differentiator_example_2.py:
--------------------------------------------------------------------------------
1 | import scipy.signal as signal
2 | import numpy as np
3 | import matplotlib.pyplot as plt
4 | import pandas as pd
5 | import os
6 |
7 | def plot_response(fs, w, h, title):
8 | "Utility function to plot response functions"
9 | fig = plt.figure()
10 | ax = fig.add_subplot(111)
11 | ax.semilogx(0.5*fs*w/np.pi, 20*np.log10(np.abs(h)))
12 | ax.set_ylim(-60, 40)
13 | ax.set_xlim(0.1, 0.5*fs)
14 | ax.grid(True)
15 | ax.set_xlabel('Frequency (Hz)')
16 | ax.set_ylabel('Gain (dB)')
17 | ax.set_title(title)
18 |
19 | """
20 | fs = 100.0 # Sample rate, Hz
21 | cutoff = 10.0 # Desired cutoff frequency, Hz
22 | trans_width = 20.0 # Width of transition from pass band to stop band, Hz
23 | numtaps = 50 # Size of the FIR filter.
24 | taps = signal.remez(numtaps, [0, cutoff, cutoff + trans_width, 0.5*fs], [1, 0], Hz=fs)
25 | w, h = signal.freqz(taps, [1], worN=2000)
26 | plot_response(fs, w, h, "Low-pass Filter")
27 | """
28 |
29 | add_noise = True
30 |
31 |
32 | COL_T = ['time']
33 | COL_Y = ['p_meas', 'theta_meas']
34 | COL_X = ['p', 'v', 'theta', 'omega']
35 | COL_U = ['u']
36 | df_X = pd.read_csv(os.path.join("data", "pendulum_data_MPC_ref_id.csv"))
37 |
38 | std_noise_p = add_noise * 0.02
39 | std_noise_theta = add_noise * 0.004
40 | std_noise = np.array([std_noise_p, std_noise_theta])
41 |
42 | t = np.array(df_X[COL_T], dtype=np.float32)
43 | y = np.array(df_X[COL_Y], dtype=np.float32)
44 | x = np.array(df_X[COL_X], dtype=np.float32)
45 | u = np.array(df_X[COL_U], dtype=np.float32)
46 | y_meas = np.copy(y) + np.random.randn(*y.shape)*std_noise
47 |
48 |
49 |
50 | Ts = np.float(t[1] - t[0])
51 | fs = 1/Ts # Sample rate, Hz
52 | cutoff = 1.0 # Desired cutoff frequency, Hz
53 | trans_width = 5 # Width of transition from pass band to stop band, Hz
54 | numtaps = 128 # Size of the FIR filter.
55 | taps = signal.remez(numtaps, [0, cutoff, cutoff + trans_width, 0.5*fs], [2*np.pi*2*np.pi*10*1.5, 0], Hz=fs, type='differentiator')
56 | w, h = signal.freqz(taps, [1], worN=2000)
57 | plot_response(fs, w[1:], h[1:], "Derivative Filter")
58 |
59 |
60 | plt.figure()
61 | plt.plot(taps)
62 |
63 | x_est = np.zeros((y_meas.shape[0], 4), dtype=np.float32)
64 | x_est[:, 0] = y_meas[:, 0]
65 | x_est[:, 2] = y_meas[:, 1]
66 | x_est[:,1] = np.convolve(x_est[:,0],taps, 'same')#signal.lfilter(taps, 1, y_meas[:,0])*2*np.pi
67 | x_est[:,3] = np.convolve(x_est[:,2],taps, 'same')#signal.lfilter(taps, 1, y_meas[:,1])*2*np.pi
68 |
69 |
70 | fig,ax = plt.subplots(4,1,sharex=True)
71 |
72 | ax[0].plot(x[:,0], 'k')
73 | ax[0].plot(x_est[:,0], 'r')
74 |
75 | ax[1].plot(x[:,1], 'k')
76 | ax[1].plot(x_est[:,1], 'r')
77 |
78 | ax[2].plot(x[:,2], 'k')
79 | ax[2].plot(x_est[:,2], 'r')
80 |
81 | ax[3].plot(x[:,3], 'k')
82 | ax[3].plot(x_est[:,3], 'r')
83 |
--------------------------------------------------------------------------------
/scheme_full.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/forgi86/sysid-neural-structures-fitting/886c969c03e86422b8a81d9e826b5745a4934bb0/scheme_full.png
--------------------------------------------------------------------------------
/torchid/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/forgi86/sysid-neural-structures-fitting/886c969c03e86422b8a81d9e826b5745a4934bb0/torchid/__init__.py
--------------------------------------------------------------------------------
/torchid/iofitter.py:
--------------------------------------------------------------------------------
1 | import torch
2 | import torch.nn as nn
3 | import numpy as np
4 |
5 |
6 | class NeuralIOSimulator:
7 | """ This class implements prediction/simulation methods for the IO model structure
8 |
9 | Attributes
10 | ----------
11 | io_model: nn.Module
12 | The neural IO model to be fitted
13 | """
14 |
15 | def __init__(self, io_model):
16 | self.io_model = io_model
17 |
18 | def f_onestep(self, PHI):
19 | """ Naive one-step prediction
20 |
21 | Parameters
22 | ----------
23 | PHI : Tensor. Size: (N, n_a + n_b)
24 | Measured IO regressor tensor
25 |
26 | Returns
27 | -------
28 | Tensor. Size: (N, n_y)
29 | One-step prediction of the output
30 |
31 | """
32 |
33 | Y_pred = self.io_model(PHI)
34 | return Y_pred
35 |
36 | def f_sim(self, y_seq, u_seq, U):
37 | """ Open-loop simulation
38 |
39 | Parameters
40 | ----------
41 | y_seq: Tensor. Size: (n_a)
42 | Initial regressor with past values of y
43 |
44 | u_seq: Tensor. Size: (n_b)
45 | Initial regressor with past values of u
46 |
47 | U : Tensor. Size: (N, n_u)
48 | Input sequence tensor
49 |
50 | Returns
51 | -------
52 | Tensor. Size: (N, n_y)
53 | Open-loop simulation of the output
54 |
55 | """
56 | N = np.shape(U)[0]
57 | Y_list = []
58 |
59 | for i in range(N):
60 | phi = torch.cat((y_seq, u_seq))
61 | yi = self.io_model(phi)
62 | Y_list += [yi]
63 |
64 | if i < N-1:
65 | # y shift
66 | y_seq[1:] = y_seq[0:-1]
67 | y_seq[0] = yi
68 |
69 | # u shift
70 | u_seq[1:] = u_seq[0:-1]
71 | u_seq[0] = U[i]
72 |
73 | Y = torch.stack(Y_list, 0)
74 | return Y
75 |
76 | def f_sim_multistep(self, batch_u, batch_y_seq, batch_u_seq):
77 | """ Multi-step simulation over (mini)batches
78 |
79 | Parameters
80 | ----------
81 | batch_u: Tensor. Size: (q, m, n_u)
82 | Input sequence for each subsequence in the minibatch
83 |
84 | batch_y_seq: Tensor. Size: (q, n_a)
85 | Initial regressor with past values of y for each subsequence in the minibatch
86 |
87 | batch_u_seq: Tensor. Size: (q, n_b)
88 | Initial regressor with past values of u for each subsequence in the minibatch
89 |
90 | Returns
91 | -------
92 | Tensor. Size: (q, m, n_y)
93 | Simulated output for all subsequences in the minibatch
94 |
95 | """
96 |
97 | batch_size = batch_u.shape[0] # number of training samples in the batch
98 | seq_len = batch_u.shape[1] # length of the training sequences
99 | n_a = batch_y_seq.shape[1] # number of autoregressive terms on y
100 | n_b = batch_u_seq.shape[1] # number of autoregressive terms on u
101 |
102 | Y_sim_list = []
103 | for i in range(seq_len):
104 | phi = torch.cat((batch_y_seq, batch_u_seq), -1)
105 | yi = self.io_model(phi)
106 | Y_sim_list += [yi]
107 |
108 | # y shift
109 | batch_y_seq[:, 1:] = batch_y_seq[:, 0:-1]
110 | batch_y_seq[:, [0]] = yi[:]
111 |
112 | # u shift
113 | batch_u_seq[:, 1:] = batch_u_seq[:, 0:-1]
114 | batch_u_seq[:, [0]] = batch_u[:, i]
115 |
116 | Y_sim = torch.stack(Y_sim_list, 1)
117 | return Y_sim
118 |
--------------------------------------------------------------------------------
/torchid/iomodels.py:
--------------------------------------------------------------------------------
1 | import torch
2 | import torch.nn as nn
3 | import numpy as np
4 |
5 |
6 | class NeuralIOModel(nn.Module):
7 | """ This class implements an IO neural model
8 |
9 | Attributes
10 | ----------
11 | n_a : int.
12 | number of autoregressive lags in y
13 | n_b : int.
14 | number of autoregressive lags in u
15 | n_feat : int.
16 | number of units in the hidden layer
17 | """
18 | def __init__(self, n_a, n_b, n_feat=64, small_init=True):
19 | super(NeuralIOModel, self).__init__()
20 | self.n_a = n_a
21 | self.n_b = n_b
22 | self.n_feat = n_feat
23 |
24 | const_np = np.zeros((n_a + n_b, 1), dtype=np.float32)
25 | const_np[0, 0] = 1.0
26 | self.const = torch.tensor(const_np)
27 |
28 | self.net = nn.Sequential(
29 | nn.Linear(n_a + n_b, n_feat), # 2 states, 1 input
30 | nn.ReLU(),
31 | nn.Linear(n_feat, 1),
32 | )
33 |
34 | if small_init:
35 | for m in self.net.modules():
36 | if isinstance(m, nn.Linear):
37 | nn.init.normal_(m.weight, mean=0, std=1e-4)
38 | nn.init.constant_(m.bias, val=0)
39 |
40 | def forward(self, phi):
41 | Y = self.net(phi) + torch.matmul(phi, self.const)
42 | return Y
43 |
44 |
45 | class NeuralIOModelComplex(nn.Module):
46 | def __init__(self, n_a, n_b, n_feat=64, small_init=True):
47 | super(NeuralIOModelComplex, self).__init__()
48 | self.n_a = n_a
49 | self.n_b = n_b
50 | self.n_feat = 64
51 |
52 | const_np = np.zeros((n_a + n_b, 1), dtype=np.float32)
53 | const_np[0, 0] = 1.0
54 | self.const = torch.tensor(const_np)
55 |
56 | self.net = nn.Sequential(
57 | nn.Linear(n_a + n_b, n_feat), # 2 states, 1 input
58 | nn.ELU(),
59 | nn.Linear(n_feat, n_feat),
60 | nn.ELU(),
61 | nn.Linear(n_feat, 1)
62 | )
63 |
64 | if small_init:
65 | for m in self.net.modules():
66 | if isinstance(m, nn.Linear):
67 | nn.init.normal_(m.weight, mean=0, std=1e-3)
68 | nn.init.constant_(m.bias, val=0)
69 |
70 | def forward(self, phi):
71 | Y = self.net(phi) + torch.matmul(phi, self.const)
72 | return Y
73 |
--------------------------------------------------------------------------------
/torchid/ssfitter.py:
--------------------------------------------------------------------------------
1 | import torch
2 | import torch.nn as nn
3 | import numpy as np
4 |
5 |
6 | class NeuralStateSpaceSimulator:
7 | """ This class implements prediction/simulation methods for the SS model structure
8 |
9 | Attributes
10 | ----------
11 | ss_model: nn.Module
12 | The neural SS model to be fitted
13 | Ts: float
14 | model sampling time
15 |
16 | """
17 |
18 | def __init__(self, ss_model, Ts=1.0):
19 | self.ss_model = ss_model
20 | self.Ts = Ts
21 |
22 | def f_onestep(self, X, U):
23 | """ Naive one-step prediction
24 |
25 | Parameters
26 | ----------
27 | X : Tensor. Size: (N, n_x)
28 | State sequence tensor
29 |
30 | U : Tensor. Size: (N, n_u)
31 | Input sequence tensor
32 |
33 | Returns
34 | -------
35 | Tensor. Size: (N, n_x)
36 | One-step prediction over N steps
37 |
38 | """
39 |
40 | X_pred = torch.empty(X.shape)
41 | X_pred[0, :] = X[0, :]
42 | DX = self.ss_model(X[0:-1], U[0:-1])
43 | X_pred[1:,:] = X[0:-1, :] + DX
44 |
45 | return X_pred
46 |
47 | def f_sim(self, x0, u):
48 | """ Open-loop simulation
49 |
50 | Parameters
51 | ----------
52 | x0 : Tensor. Size: (n_x)
53 | Initial state
54 |
55 | U : Tensor. Size: (N, n_u)
56 | Input sequence tensor
57 |
58 | Returns
59 | -------
60 | Tensor. Size: (N, n_x)
61 | Open-loop model simulation over N steps
62 |
63 | """
64 |
65 | N = np.shape(u)[0]
66 | nx = np.shape(x0)[0]
67 |
68 | X_list = []
69 | xstep = x0
70 | for i in range(N):
71 | X_list += [xstep]
72 | #X[i,:] = xstep
73 | ustep = u[i]
74 | dx = self.ss_model(xstep, ustep)
75 | xstep = xstep + dx
76 |
77 | X = torch.stack(X_list, 0)
78 |
79 | return X
80 |
81 | def f_sim_multistep(self, x0_batch, U_batch):
82 | """ Multi-step simulation over (mini)batches
83 |
84 | Parameters
85 | ----------
86 | x0_batch: Tensor. Size: (q, n_x)
87 | Initial state for each subsequence in the minibatch
88 |
89 | U_batch: Tensor. Size: (q, m, n_u)
90 | Input sequence for each subsequence in the minibatch
91 |
92 | Returns
93 | -------
94 | Tensor. Size: (q, m, n_x)
95 | Simulated state for all subsequences in the minibatch
96 |
97 | """
98 |
99 | batch_size = x0_batch.shape[0]
100 | n_x = x0_batch.shape[1]
101 | seq_len = U_batch.shape[1]
102 |
103 | X_sim_list = []
104 | xstep = x0_batch
105 | for i in range(seq_len):
106 | X_sim_list += [xstep] #X_sim[:, i, :] = xstep
107 | ustep = U_batch[:, i, :]
108 | dx = self.ss_model(xstep, ustep)
109 | xstep = xstep + dx
110 |
111 | X_sim = torch.stack(X_sim_list, 1)#.squeeze(2)
112 | return X_sim
113 |
114 | # def f_residual_fullyobserved(self, X_batch, U_batch):
115 | # X_increment = X_batch[:, -1, :] - X_batch[:, 0, :]
116 |
--------------------------------------------------------------------------------
/torchid/tmp/lstmfitter.py:
--------------------------------------------------------------------------------
1 | from __future__ import print_function
2 | import torch
3 | import torch.nn as nn
4 |
5 |
6 | class LSTMSimulator(nn.Module):
7 | def __init__(self, n_input = 1, n_hidden_1 = 64, n_hidden_2 = 32, n_output = 1):
8 |
9 | self.n_input = n_input
10 | self.n_hidden_1 = n_hidden_1
11 | self.n_hidden_2 = n_hidden_2
12 | self.n_output = n_output
13 |
14 | super(LSTMSimulator, self).__init__()
15 | self.lstm1 = nn.LSTMCell(self.n_input, self.n_hidden_1) # input size, hidden size
16 | self.lstm2 = nn.LSTMCell(self.n_hidden_1, self.n_hidden_2)
17 | self.linear = nn.Linear(self.n_hidden_2, self.n_output)
18 |
19 | def forward(self, input):
20 | batch_size = input.size(0)
21 | outputs = []
22 | h_t = torch.zeros(batch_size, self.n_hidden_1)#, dtype=torch.double)
23 | c_t = torch.zeros(batch_size, self.n_hidden_1)#, dtype=torch.double)
24 | h_t2 = torch.zeros(batch_size, self.n_hidden_2)#, dtype=torch.double)
25 | c_t2 = torch.zeros(batch_size, self.n_hidden_2)#, dtype=torch.double)
26 |
27 | seq_len = input.size(1)
28 | for t in range(seq_len): #, input_t in enumerate(input.chunk(input.size(1), dim=1)):
29 | input_t = input[:, t, :]
30 | h_t, c_t = self.lstm1(input_t, (h_t, c_t)) # input_t, (hidden_state_t, cell_state_t) -> hidden_state_{t+1}, cell_state_{t+1}
31 | h_t2, c_t2 = self.lstm2(h_t, (h_t2, c_t2))
32 | output = self.linear(h_t2)
33 | outputs += [output]
34 | outputs = torch.stack(outputs, 1)#.squeeze(2)
35 | return outputs
36 |
37 |
38 | class LSTMAutoRegressive(nn.Module):
39 |
40 | def __init__(self, n_input = 1, n_hidden_1 = 64, n_hidden_2 = 32, n_output = 1):
41 | self.n_input = n_input
42 | self.n_hidden_1 = n_hidden_1
43 | self.n_hidden_2 = n_hidden_2
44 | self.n_output = n_output
45 |
46 | super(LSTMAutoRegressive, self).__init__()
47 | self.lstm1 = nn.LSTMCell(self.n_input + self.n_output, self.n_hidden_1) # input size, hidden size
48 | self.lstm2 = nn.LSTMCell(self.n_hidden_1, self.n_hidden_2)
49 | self.linear = nn.Linear(self.n_hidden_2, self.n_output)
50 |
51 | def forward(self, input, delayed_output): # future=... to predict in the future!
52 | batch_size = input.size(0)
53 | outputs = []
54 | h_t = torch.zeros(batch_size, self.n_hidden_1)#, dtype=torch.double)
55 | c_t = torch.zeros(batch_size, self.n_hidden_1)#, dtype=torch.double)
56 | h_t2 = torch.zeros(batch_size, self.n_hidden_2)#, dtype=torch.double)
57 | c_t2 = torch.zeros(batch_size, self.n_hidden_2)#, dtype=torch.double)
58 |
59 | seq_len = input.size(1)
60 | for t in range(seq_len): #, input_t in enumerate(input.chunk(input.size(1), dim=1)):
61 | input_t = input[:, t, :]
62 | delayed_output_t = delayed_output[:,t,:]
63 | feature_t = torch.stack((input_t, delayed_output_t), 1).squeeze(-1)
64 | h_t, c_t = self.lstm1(feature_t, (h_t, c_t)) # input_t, (hidden_state_t, cell_state_t) -> hidden_state_{t+1}, cell_state_{t+1}
65 | h_t2, c_t2 = self.lstm2(h_t, (h_t2, c_t2))
66 | output = self.linear(h_t2)
67 | outputs += [output]
68 | outputs = torch.stack(outputs, 1)#.squeeze(2)
69 | return outputs
70 |
71 |
72 | def forward_sim(self, input, delayed_output_t=None):
73 | batch_size = input.size(0)
74 | outputs = []
75 | h_t = torch.zeros(batch_size, self.n_hidden_1)#, dtype=torch.double)
76 | c_t = torch.zeros(batch_size, self.n_hidden_1)#, dtype=torch.double)
77 | h_t2 = torch.zeros(batch_size, self.n_hidden_2)#, dtype=torch.double)
78 | c_t2 = torch.zeros(batch_size, self.n_hidden_2)#, dtype=torch.double)
79 |
80 | if delayed_output_t is None:
81 | delayed_output_t = torch.zeros(batch_size, self.n_output)
82 |
83 | seq_len = input.size(1)
84 | for t in range(seq_len): #, input_t in enumerate(input.chunk(input.size(1), dim=1)):
85 | input_t = input[:, t, :]
86 | #delayed_output_t = delayed_output[:,t,:]
87 | feature_t = torch.stack((input_t, delayed_output_t), 1).squeeze(-1)
88 | h_t, c_t = self.lstm1(feature_t, (h_t, c_t)) # input_t, (hidden_state_t, cell_state_t) -> hidden_state_{t+1}, cell_state_{t+1}
89 | h_t2, c_t2 = self.lstm2(h_t, (h_t2, c_t2))
90 | output = self.linear(h_t2)
91 | delayed_output_t = output
92 | outputs += [output]
93 | outputs = torch.stack(outputs, 1)#.squeeze(2)
94 | return outputs
95 |
--------------------------------------------------------------------------------
/torchid/tmp/lstmfitter_transposed.py:
--------------------------------------------------------------------------------
1 | from __future__ import print_function
2 | import torch
3 | import torch.nn as nn
4 |
5 |
6 | class LSTMSimulator(nn.Module):
7 | def __init__(self):
8 |
9 | self.n_input = 1
10 | self.n_hidden_1 = 64
11 | self.n_hidden_2 = 32
12 | self.n_output = 1
13 |
14 | super(LSTMSimulator, self).__init__()
15 | self.lstm1 = nn.LSTMCell(self.n_input, self.n_hidden_1) # input size, hidden size
16 | self.lstm2 = nn.LSTMCell(self.n_hidden_1, self.n_hidden_2)
17 | self.linear = nn.Linear(self.n_hidden_2, self.n_output)
18 |
19 | def forward(self, input, future = 0):
20 | batch_size = input.size(1)
21 | outputs = []
22 | h_t = torch.zeros(batch_size, self.n_hidden_1)#, dtype=torch.double)
23 | c_t = torch.zeros(batch_size, self.n_hidden_1)#, dtype=torch.double)
24 | h_t2 = torch.zeros(batch_size, self.n_hidden_2)#, dtype=torch.double)
25 | c_t2 = torch.zeros(batch_size, self.n_hidden_2)#, dtype=torch.double)
26 |
27 | seq_len = input.size(0)
28 | for t in range(seq_len): #, input_t in enumerate(input.chunk(input.size(1), dim=1)):
29 | input_t = input[t]#[:, t, :]
30 | h_t, c_t = self.lstm1(input_t, (h_t, c_t)) # input_t, (hidden_state_t, cell_state_t) -> hidden_state_{t+1}, cell_state_{t+1}
31 | h_t2, c_t2 = self.lstm2(h_t, (h_t2, c_t2))
32 | output = self.linear(h_t2)
33 | outputs += [output]
34 | outputs = torch.stack(outputs, 0)#.squeeze(2)
35 | return outputs
36 |
--------------------------------------------------------------------------------
/torchid/tmp/ssfitter_jit.py:
--------------------------------------------------------------------------------
1 | import torch
2 | import torch.nn as nn
3 | import numpy as np
4 | from torch.jit import Final
5 |
6 | class NeuralStateSpaceSimulator(nn.Module):
7 | def __init__(self, ss_model):
8 | super(NeuralStateSpaceSimulator, self).__init__()
9 | self.ss_model = ss_model
10 |
11 | def forward(self, x0, u):
12 |
13 | X_list: List[torch.Tensor] = []
14 | #X_list = []
15 | xstep = x0
16 | for ustep in u:
17 | X_list.append(xstep)
18 | #X[i,:] = xstep
19 | #ustep = u[i]
20 | dx = self.ss_model(xstep, ustep)
21 | xstep = xstep + dx
22 |
23 | X = torch.stack(X_list, 0)#.squeeze(2)
24 | return X
25 |
26 | class NeuralSumODE():
27 | def __init__(self, ss_model_list):
28 | self.ss_model_list = ss_model_list
29 |
30 | def get_DX(self, X, U):
31 | DX = torch.zeros(X.shape)
32 | for model in self.ss_model_list:
33 | DX += model(X,U)
34 | return DX
35 |
36 | def f_ARX(self, X, U):
37 | X_pred = torch.empty(X.shape)
38 | X_pred[0,:] = X[0,:]
39 | DX = self.get_DX(X[0:-1], U[0:-1])
40 | X_pred[1:,:] = X[0:-1,:] + DX
41 | return X_pred
42 |
43 | def f_OE(self, x0, u):
44 | N = np.shape(u)[0]
45 | nx = np.shape(x0)[0]
46 |
47 | X = torch.empty((N,nx))
48 | xstep = x0
49 | for i in range(N):
50 | X[i,:] = xstep
51 | ustep = u[i]
52 | dx = self.get_DX(xstep, ustep)
53 | xstep = xstep + dx
54 | return X
55 |
56 | def f_OE_minibatch(self, x0_batch, U_batch):
57 | len_batch = x0_batch.shape[0]
58 | n_x = x0_batch.shape[1]
59 | T_batch = U_batch.shape[1]
60 |
61 | X_pred = torch.empty((len_batch, T_batch, n_x))
62 | xstep = x0_batch
63 | for i in range(T_batch):
64 | X_pred[:,i,:] = xstep
65 | ustep = U_batch[:,i,:]
66 | dx = self.get_DX(xstep, ustep)
67 | xstep = xstep + dx
68 | return X_pred
69 |
70 | def f_ODE(self,t,x,u):
71 | x = torch.tensor(x.reshape(1,-1).astype(np.float32))
72 | u = torch.tensor(u.reshape(1,-1).astype(np.float32))
73 | return np.array(self.nn_derivative(x,u)).ravel().astype(np.float64)
74 |
--------------------------------------------------------------------------------
/torchid/util.py:
--------------------------------------------------------------------------------
1 | import torch
2 | import numpy as np
3 |
4 | class RunningAverageMeter(object):
5 | """Computes and stores the average and current value"""
6 |
7 | def __init__(self, momentum=0.99):
8 | self.momentum = momentum
9 | self.reset()
10 |
11 | def reset(self):
12 | self.val = None
13 | self.avg = 0
14 |
15 | def update(self, val):
16 | if self.val is None:
17 | self.avg = val
18 | else:
19 | self.avg = self.avg * self.momentum + val * (1 - self.momentum)
20 | self.val = val
21 |
22 |
23 | def get_torch_regressor_mat(x, n_a):
24 | seq_len = x.shape[0]
25 | X = torch.empty((seq_len - n_a + 1, n_a))
26 | for idx in range(seq_len - n_a + 1):
27 | X[idx] = x[idx:idx + n_a].flip([0])
28 | return X
29 |
30 |
31 | def get_random_batch_idx(num_samples, batch_size, seq_len, batch_first=True):
32 | batch_start = np.random.choice(np.arange(num_samples - seq_len, dtype=np.int64), batch_size, replace=False) # batch start indices
33 | batch_idx = batch_start[:,np.newaxis] + np.arange(seq_len) # batch all indices
34 | if not batch_first:
35 | batch_idx = batch_idx.T
36 | return batch_start, batch_idx
37 |
38 |
39 | def get_sequential_batch_idx(num_samples, seq_len, batch_first=True):
40 | batch_size = num_samples // seq_len
41 | batch_start = np.arange(0, batch_size, dtype=np.int64) * seq_len
42 | batch_idx = batch_start[:,np.newaxis] + np.arange(seq_len) # batch all indices
43 | if not batch_first:
44 | batch_idx = batch_idx.T
45 | return batch_start, batch_idx
46 |
47 |
48 | if __name__ == '__main__':
49 |
50 | N = 10
51 | n_a = 3
52 | x_np = np.arange(N).reshape(-1, 1).astype(np.float32)
53 | x = torch.tensor(x_np)
54 |
55 | X = torch.empty((N - n_a + 1, n_a))
56 | for idx_1 in range(N - n_a + 1):
57 | X[idx_1] = x[idx_1:idx_1 + n_a].flip([0])
58 |
59 | idx_start = np.arange(3,10, dtype=int)
60 | idx_1 = idx_start[:, np.newaxis] - np.arange(3, dtype=int)
61 | x[[idx_1]]
62 |
--------------------------------------------------------------------------------