├── .gitignore ├── LICENSE ├── MANIFEST.in ├── README.md ├── docs ├── Bogacki_Shampine.ipynb ├── Cash_Karp.ipynb ├── Demo_BS5.ipynb ├── Demo_CFMR7osc.ipynb ├── Demo_CKdisc.ipynb ├── Demo_ESDIRK.ipynb ├── Demo_Nystrom.ipynb ├── Demo_SSV2stab.ipynb ├── Demo_own_RK.ipynb ├── Demo_sensitivity.ipynb ├── Demo_solve_ivp.ipynb ├── Prince.ipynb ├── SC stability │ ├── stab_dS_BS5_Gustafsson.png │ ├── stab_dS_BS5_Soederlind.png │ ├── stab_dS_BS5_Watts.png │ ├── stab_dS_BS5_standard.png │ ├── stab_dS_CFMR7osc_Gustafsson.png │ ├── stab_dS_CFMR7osc_Soederlind.png │ ├── stab_dS_CFMR7osc_Watts.png │ ├── stab_dS_CFMR7osc_standard.png │ ├── stab_dS_CK5_Gustafsson.png │ ├── stab_dS_CK5_Soederlind.png │ ├── stab_dS_CK5_Watts.png │ ├── stab_dS_CK5_standard.png │ ├── stab_dS_Me4_Gustafsson.png │ ├── stab_dS_Me4_Soederlind.png │ ├── stab_dS_Me4_Watts.png │ ├── stab_dS_Me4_standard.png │ ├── stab_dS_Pr7_Gustafsson.png │ ├── stab_dS_Pr7_Soederlind.png │ ├── stab_dS_Pr7_Watts.png │ ├── stab_dS_Pr7_standard.png │ ├── stab_dS_Pr8_Gustafsson.png │ ├── stab_dS_Pr8_Soederlind.png │ ├── stab_dS_Pr8_Watts.png │ ├── stab_dS_Pr8_standard.png │ ├── stab_dS_Pr9_Gustafsson.png │ ├── stab_dS_Pr9_Soederlind.png │ ├── stab_dS_Pr9_Watts.png │ ├── stab_dS_Pr9_standard.png │ ├── stab_dS_Ts5_Gustafsson.png │ ├── stab_dS_Ts5_Soederlind.png │ ├── stab_dS_Ts5_Watts.png │ └── stab_dS_Ts5_standard.png ├── Shampine_Gordon_Watts.ipynb ├── all_methods.ipynb ├── interpolants │ ├── interpolation_BS5_best.png │ ├── interpolation_BS5_free.png │ ├── interpolation_BS5_low.png │ ├── interpolation_CFMR7osc.png │ ├── interpolation_CK5.png │ ├── interpolation_Fi4N.png │ ├── interpolation_Fi5N_0.png │ ├── interpolation_Fi5N_1.png │ ├── interpolation_Fi5N_2_orig.png │ ├── interpolation_Fi5N_3.png │ ├── interpolation_KC3I.png │ ├── interpolation_KC4I.png │ ├── interpolation_KC4Ia.png │ ├── interpolation_Kv3I.png │ ├── interpolation_MR6NN.png │ ├── interpolation_Me4.png │ ├── interpolation_Mu5Nmb.png │ ├── interpolation_Mu5Nmb_better.png │ └── interpolation_Ts5.png └── stability regions │ ├── stabilityBS5.png │ ├── stabilityCFMR7osc.png │ ├── stabilityCK5.png │ ├── stabilityCKdisc.png │ ├── stabilityFi4N.png │ ├── stabilityFi5N.png │ ├── stabilityMe4.png │ ├── stabilityMu5Nmb.png │ ├── stabilityPr7.png │ ├── stabilityPr8.png │ ├── stabilityPr9.png │ └── stabilityTs5.png ├── extensisq ├── __init__.py ├── bogacki.py ├── calvo.py ├── cash.py ├── common.py ├── fine.py ├── hosea.py ├── kennedy.py ├── kvaerno.py ├── merson.py ├── mikkawy.py ├── murua.py ├── prince.py ├── sensitivity.py ├── shampine.py ├── sommeijer.py └── tsitouras.py ├── setup.py └── tests ├── order_conditions.py ├── test_DAE.py ├── test_ivp.py ├── test_rk.py ├── test_rkn.py └── test_sens.py /.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | 6 | # C extensions 7 | *.so 8 | 9 | # Distribution / packaging 10 | .Python 11 | build/ 12 | develop-eggs/ 13 | dist/ 14 | downloads/ 15 | eggs/ 16 | .eggs/ 17 | lib/ 18 | lib64/ 19 | parts/ 20 | sdist/ 21 | var/ 22 | wheels/ 23 | pip-wheel-metadata/ 24 | share/python-wheels/ 25 | *.egg-info/ 26 | .installed.cfg 27 | *.egg 28 | MANIFEST 29 | 30 | # PyInstaller 31 | # Usually these files are written by a python script from a template 32 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 33 | *.manifest 34 | *.spec 35 | 36 | # Installer logs 37 | pip-log.txt 38 | pip-delete-this-directory.txt 39 | 40 | # Unit test / coverage reports 41 | htmlcov/ 42 | .tox/ 43 | .nox/ 44 | .coverage 45 | .coverage.* 46 | .cache 47 | nosetests.xml 48 | coverage.xml 49 | *.cover 50 | *.py,cover 51 | .hypothesis/ 52 | .pytest_cache/ 53 | 54 | # Translations 55 | *.mo 56 | *.pot 57 | 58 | # Django stuff: 59 | *.log 60 | local_settings.py 61 | db.sqlite3 62 | db.sqlite3-journal 63 | 64 | # Flask stuff: 65 | instance/ 66 | .webassets-cache 67 | 68 | # Scrapy stuff: 69 | .scrapy 70 | 71 | # Sphinx documentation 72 | docs/_build/ 73 | 74 | # PyBuilder 75 | target/ 76 | 77 | # Jupyter Notebook 78 | .ipynb_checkpoints 79 | 80 | # IPython 81 | profile_default/ 82 | ipython_config.py 83 | 84 | # pyenv 85 | .python-version 86 | 87 | # pipenv 88 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. 89 | # However, in case of collaboration, if having platform-specific dependencies or dependencies 90 | # having no cross-platform support, pipenv may install dependencies that don't work, or not 91 | # install all needed dependencies. 92 | #Pipfile.lock 93 | 94 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow 95 | __pypackages__/ 96 | 97 | # Celery stuff 98 | celerybeat-schedule 99 | celerybeat.pid 100 | 101 | # SageMath parsed files 102 | *.sage.py 103 | 104 | # Environments 105 | .env 106 | .venv 107 | env/ 108 | venv/ 109 | ENV/ 110 | env.bak/ 111 | venv.bak/ 112 | 113 | # Spyder project settings 114 | .spyderproject 115 | .spyproject 116 | 117 | # Rope project settings 118 | .ropeproject 119 | 120 | # mkdocs documentation 121 | /site 122 | 123 | # mypy 124 | .mypy_cache/ 125 | .dmypy.json 126 | dmypy.json 127 | 128 | # Pyre type checker 129 | .pyre/ 130 | 131 | # VS code 132 | .vscode/* 133 | .history/ 134 | tempCodeRunnerFile.py 135 | 136 | # simple local test 137 | test.py 138 | test_stiff.py 139 | test_dense_output.py 140 | test Nystrom methods.py 141 | docs/SC stability 142 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2025 W.R. Kampinga 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /MANIFEST.in: -------------------------------------------------------------------------------- 1 | include LICENSE 2 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # extensisq 2 | This package extends scipy.integrate with various methods (OdeSolver classes) for the solve_ivp function. 3 | 4 | ![python:3](https://img.shields.io/pypi/pyversions/extensisq?style=flat-square) 5 | ![platform:noarch](https://img.shields.io/conda/pn/conda-forge/extensisq?style=flat-square) 6 | [![license:MIT](https://img.shields.io/github/license/WRKampi/extensisq?style=flat-square)](https://github.com/WRKampi/extensisq/blob/main/LICENSE) 7 | [![downloads pypi](https://img.shields.io/pypi/dm/extensisq?label=PyPI%20downloads&style=flat-square)](https://pypistats.org/packages/extensisq) 8 | [![downloads conda](https://img.shields.io/conda/dn/conda-forge/extensisq?label=conda%20downloads&style=flat-square)](https://anaconda.org/conda-forge/extensisq) 9 | [![release-date](https://img.shields.io/github/release-date/WRKampi/extensisq?style=flat-square)](https://github.com/WRKampi/extensisq/releases) 10 | 11 | 12 | ## Installation 13 | 14 | You can install extensisq from [PyPI](https://pypi.org/project/extensisq/): 15 | 16 | pip install extensisq 17 | 18 | Or, if you'd rather use [conda](https://anaconda.org/conda-forge/extensisq): 19 | 20 | conda install conda-forge::extensisq 21 | 22 | 23 | ## Example 24 | Borrowed from the the scipy documentation: 25 | 26 | from scipy.integrate import solve_ivp 27 | from extensisq import BS5 28 | 29 | def exponential_decay(t, y): return -0.5 * y 30 | sol = solve_ivp(exponential_decay, [0, 10], [2, 4, 8], method=BS5) 31 | 32 | print(sol.t) 33 | print(sol.y) 34 | 35 | Notice that the class `BS5` is passed to `solve_ivp`, not the string `"BS5"`. The other methods can be used similarly. 36 | 37 | More examples are available as notebooks: 38 | 1. [Integration with Scipy's `solve_ivp` function](https://github.com/WRKampi/extensisq/blob/main/docs/Demo_solve_ivp.ipynb) 39 | 2. [Van der Pol's equation, Shampine Gordon Watts method `SWAG`](https://github.com/WRKampi/extensisq/blob/main/docs/Shampine_Gordon_Watts.ipynb) 40 | 3. [Implicit methods for stiff ODEs and DAEs](https://github.com/WRKampi/extensisq/blob/main/docs/Demo_ESDIRK.ipynb) 41 | 4. [About `BS5` and its interpolants](https://github.com/WRKampi/extensisq/blob/main/docs/Demo_BS5.ipynb) 42 | 5. [Higher order Prince methods `Pr7`, `Pr8` and `Pr9`](https://github.com/WRKampi/extensisq/blob/main/docs/Prince.ipynb) 43 | 6. [Special method `CKdisc` for non-smooth problems](https://github.com/WRKampi/extensisq/blob/main/docs/Demo_CKdisc.ipynb) 44 | 7. [Special method `CFMR7osc` for oscillatory problems](https://github.com/WRKampi/extensisq/blob/main/docs/Demo_CFMR7osc.ipynb) 45 | 8. [Special method `SSV2stab` for large, mildly stiff problems](https://github.com/WRKampi/extensisq/blob/main/docs/Demo_SSV2stab.ipynb) 46 | 9. [Fifth order methods compared](https://github.com/WRKampi/extensisq/blob/main/docs/all_methods.ipynb) 47 | 10. [Runge Kutta Nyström methods for second order equations](https://github.com/WRKampi/extensisq/blob/main/docs/Demo_Nystrom.ipynb) 48 | 11. [How to implement other explicit Runge Kutta methods](https://github.com/WRKampi/extensisq/blob/main/docs/Demo_own_RK.ipynb) 49 | 12. [Sensitivity analysis](https://github.com/WRKampi/extensisq/blob/main/docs/Demo_sensitivity.ipynb) 50 | 51 | 52 | ## Explicit Methods 53 | 54 | Currently, several explicit methods (for non-stiff problems) are provided. 55 | 56 | One multistep method is implemented: 57 | * `SWAG`: the variable order Adams-Bashforth-Moulton predictor-corrector method of Shampine, Gordon and Watts [5-7]. This is a translation of the Fortran code 'DDEABM' [C]. Matlab's method 'ode113' is related. 58 | 59 | Three explicit Runge Kutta methods of order 5 are implemented: 60 | * `BS5`: efficient fifth order method by Bogacki and Shampine [1,A]. Three interpolants are included: the original accurate fifth order interpolant, a lower cost fifth order one, and a 'free' fourth order one. 61 | * `CK5`: fifth order method with the coefficients from [2], for general use. 62 | * `Ts5`: relatively new solver (2011) by Tsitouras, optimized with fewer simplifying assumptions [3]. 63 | 64 | One fourth order method: 65 | * `Me4`: Merson's method, the first embedded RK method [14]. The embedded method for error estimation is 5th order for linear problems and 3rd order for general problems. A 3rd order interpolant is added. This method has a large stability region. It may be useful as alternative to 'RK23' for solving problems to lower accuracy. 66 | 67 | Three higher order explicit Runge Kutta methods by Prince [4] are implemented: 68 | * `Pr7`: a seventh order discrete method with fifth order error estimate, derived from a sixth order continuous method. 69 | * `Pr8`: an eighth order discrete method with sixth order error estimate, derived from a seventh order continuous method. 70 | * `Pr9`: a ninth order discrete method with seventh order error estimate, derived from an eighth order continuous method. 71 | 72 | The numbers in the names refer to the discrete methods, while the orders in [4] refer to the continuous methods. These methods are relatively efficient when dense output is needed, because the interpolants are free. (Other high-order methods typically need several additional function evaluations for dense output.) 73 | 74 | Three methods for specific types of problems are available: 75 | * `CKdisc`: variable order solver by Cash and Karp, tailored to solve non-smooth problems efficiently [2]. 76 | * `CFMR7osc`: explicit Runge Kutta method, with algebraic order 7, dispersion order 10 and dissipation order 9, to efficiently and accurately solve problems with oscillating solutions [12]. A free 5th order interpolant for dense output is added. 77 | * `SSV2stab`: second order stabilized Runge Kutta Chebyshev method [13,C], to explicity and efficiently solve large systems of mildly stiff ordinary differential equations up to low to moderate accuracy. Equations arising from semi-discretization of parabolic PDEs are a typical use case. 78 | 79 | Several Nyström methods are added. These are for second order initial value problems. Three methods are for general problems and one is for the strict problem in which the second derivative should not depend on the first derivative. The [demo](https://github.com/WRKampi/extensisq/blob/main/docs/Demo_Nystrom.ipynb) shows how to use these methods. 80 | * `Fi4N`: 4th order general Nyström method of Fine [16]. 81 | * `Fi5N`: 5th order general Nyström method of Fine [16, 17]. 82 | * `Mu5Nmb`: 5th order general Nyström method of Murua for integration of multibody equations. This is method "RKN5459" in the paper [18]. I added two interpolants. 83 | * `MR6NN`: 6th order strict Nyström method of El-Mikkawy and Rahmo [19]. I couldn't find the interpolant that the paper refers to as future work. However, I created a free C2-continuous sixth order interpolant and added it to this method. 84 | 85 | ## Implicit methods 86 | 87 | Several ESDIRK methods are implemented. These are single step Runge Kutta methods. The implementation is inspired by the theory in a paper of Shampine [20]. 88 | 89 | Two methods of Hosea and Shampine [21] are available: 90 | * `HS2I` or `TRBDF2` (alias): A 2nd order L-stable method (main, not secondary) that splits the step into a trapezium substep followed by a BDF substep. A 3rd order embedded method is used for error detection and stepsize adaptation. This method has a piecewise cubic C1-continuous interpolant. The main method has a rich history and was created by Banks et al. Matlab's method 'ode23tb' is related. 91 | * `HS2Ia` or `TRX2` (alias): An alternative 2nd order method with a similar construction that uses two trapezium substeps. This method is A-stable (not L-stable) and may be useful if numerical damping is undesirable. Matlab's method 'ode23t' is related. 92 | 93 | Three methods of Kennedy and Carpenter are available. These have L-stable main and secondary methods. Each method includes two interpolants of the same order as the main method. One interpolant is C0-continuous (continuous solution, but derivatives jump between steps), the other is C1-continuous (solution and derivatives are both continuous). The C0-interpolant is selected by default. 94 | * `KC3I`: a 5-stage, 3rd order method with 2nd order secondary method called ESDIRK3(2)5L[2]SA by its authors[22]. The interpolant in the paper is not used, because it doesn't seem to be C0-continuous. 95 | * `KC4I`: a 6-stage, 4th order method with 3rd order secondary method called ESDIRK4(3)6L[2]SA by its authors[22]. The C0-continuous interpolant is as given in the paper. 96 | * `KC4Ia`: this 7-stage method, named ESDIRK4(3)7L[2]SA in [23], is similar to the previous method, but has an additional stage each step. This results in lower error constants and a lower value of the RK diagonal coefficient (1/8 vs. 1/4). 97 | 98 | One method of Kværnø is implemented: 99 | * `Kv3I`: a 4 stage 3rd order method with 2nd order secondary method [24]. Both are stiffly accurate. The main method is L-stable and the secondary method is only A-stable. This method also has two interpolants: C0 and C1-continuous. 100 | 101 | ### Index 1 DAEs 102 | 103 | The Implicit methods in Extensisq have limited options to solve DAEs. These can be specified in mass matrix form: 104 | 105 | $$ M \dot{y}=f(t, y) $$ 106 | 107 | This looks similar to an ODE, but if $M$ is singular, it becomes a DAE. Extensisq only supports IVPs with a constant mass matrix and the DAE index should not exceed 1. Example 3 above demonstrates how a DAE can be solved. 108 | 109 | 110 | ## Sensitivity analysis 111 | Three methods for sensitiviy analysis are available; see [15] and Example 12 above. These can be used with any of the solvers. 112 | * `sens_forward`: to calculate the sensitivity of all solution components to (a few) parameters. 113 | * `sens_adjoint_end`: to calculate the sensitivity of a scalar function of the solution to (many) parameters. 114 | * `sens_adjoint_int`: to calculate the sensitivity of a scalar integral of the solution to (many) parameters. 115 | 116 | ## Other features 117 | The initial step size, when not supplied by you, is estimated using the method of Watts [7,B]. This method analyzes your problem with a few (3 to 4) evaluations and carefully estimates a safe stepsize to start the integration with. 118 | 119 | Most of extensisq's explicit Runge Kutta methods have stiffness detection. If many steps fail, or if the integration needs a lot of steps, the power iteration method of Shampine [8,A] is used to test your problem for stiffness. You will get a warning if your problem is diagnosed as stiff. The kind of roots (real, complex or nearly imaginary) is also reported, such that you can select a stiff solver that better suits your problem. 120 | 121 | Second order stepsize controllers [9-11] can be enabled for most of extensisq's Runge Kutta methods. You can set your own coefficients, or select one of the default values. These values are different for explicit methods than for implicit methods. 122 | 123 | ## References 124 | [1] P. Bogacki, L.F. Shampine, "An efficient Runge-Kutta (4,5) pair", Computers & Mathematics with Applications, Vol. 32, No. 6, 1996, pp. 15-28. https://doi.org/10.1016/0898-1221(96)00141-1 125 | 126 | [2] J. R. Cash, A. H. Karp, "A Variable Order Runge-Kutta Method for Initial Value Problems with Rapidly Varying Right-Hand Sides", ACM Trans. Math. Softw., Vol. 16, No. 3, 1990, pp. 201-222. https://doi.org/10.1145/79505.79507 127 | 128 | [3] Ch. Tsitouras, "Runge-Kutta pairs of order 5(4) satisfying only the first column simplifying assumption", Computers & Mathematics with Applications, Vol. 62, No. 2, 2011, pp. 770 - 775. https://doi.org/10.1016/j.camwa.2011.06.002 129 | 130 | [4] P.J. Prince, "Parallel Derivation of Efficient Continuous/Discrete Explicit Runge-Kutta Methods", Guisborough TS14 6NP U.K., September 6 2018. http://www.peteprince.co.uk/parallel.pdf 131 | 132 | [5] L.F. Shampine and M.K. Gordon, "Computer solution of ordinary differential equations: The initial value problem", San Francisco, W.H. Freeman, 1975. 133 | 134 | [6] H.A. Watts and L.F. Shampine, "Smoother Interpolants for Adams Codes", SIAM Journal on Scientific and Statistical Computing, Vol. 7, No. 1, 1986, pp. 334-345. https://doi.org/10.1137/0907022 135 | 136 | [7] H.A. Watts, "Starting step size for an ODE solver", Journal of Computational and Applied Mathematics, Vol. 9, No. 2, 1983, pp. 177-191. https://doi.org/10.1016/0377-0427(83)90040-7 137 | 138 | [8] L.F. Shampine, "Diagnosing Stiffness for Runge–Kutta Methods", SIAM Journal on Scientific and Statistical Computing, Vol. 12, No. 2, 1991, pp. 260-272. https://doi.org/10.1137/0912015 139 | 140 | [9] K. Gustafsson, "Control Theoretic Techniques for Stepsize Selection in Explicit Runge-Kutta Methods", ACM Trans. Math. Softw., Vol. 17, No. 4, 1991, pp. 533–554. https://doi.org/10.1145/210232.210242 141 | 142 | [10] G.Söderlind, "Automatic Control and Adaptive Time-Stepping", Numerical Algorithms, Vol. 31, No. 1, 2002, pp. 281-310. https://doi.org/10.1023/A:1021160023092 143 | 144 | [11] G. Söderlind, "Digital Filters in Adaptive Time-Stepping", ACM Trans. Math. Softw., Vol. 29, No. 1, 2003, pp. 1–26. https://doi.org/10.1145/641876.641877 145 | 146 | [12] M. Calvo, J.M. Franco, J.I. Montijano, L. Rández, "Explicit Runge-Kutta methods for initial value problems with oscillating solutions", Journal of Computational and Applied Mathematics, Vol. 76, No. 1–2, 1996, pp. 195-212. https://doi.org/10.1016/S0377-0427(96)00103-3 147 | 148 | [13] B.P. Sommeijer, L.F. Shampine, J.G. Verwer, "RKC: An explicit solver for parabolic PDEs", Journal of Computational and Applied Mathematics, Vol. 88, No. 2, 1998, pp. 315-326. https://doi.org/10.1016/S0377-0427(97)00219-7 149 | 150 | [14] E. Hairer, G. Wanner, S.P. Norsett, "Solving Ordinary Differential Equations I", Springer Berlin, Heidelberg, 1993, https://doi.org/10.1007/978-3-540-78862-1 151 | 152 | [15] R.Serban, A.C. Hindmarsh, "CVODES: The Sensitivity-Enabled ODE Solver in SUNDIALS", 5th International Conference on Multibody Systems Nonlinear Dynamics and Control, Vol. 6, 2005, https://doi.org/10.1115/DETC2005-85597 153 | 154 | [16] J.M. Fine, "Low order practical Runge-Kutta-Nyström methods", Computing, Vol. 38, 1987, pp. 281–297, https://doi.org/10.1007/BF02278707 155 | 156 | [17] J.M. Fine, "Interpolants for Runge-Kutta-Nyström methods", Computing, Vol. 39, 1987, pp. 27–42, https://doi.org/10.1007/BF02307711 157 | 158 | [18] A. Murua, "Runge-Kutta-Nyström methods for general second order ODEs with application to multi-body systems", Applied Numerical Mathematics, Vol. 28, Issues 2–4, 1998, pp. 387-399, https://doi.org/10.1016/S0168-9274(98)00055-5 159 | 160 | [19] M. El-Mikkawy, E.D. Rahmo, "A new optimized non-FSAL embedded Runge–Kutta–Nystrom algorithm of orders 6 and 4 in six stages", Applied Mathematics and Computation, Vol. 145, Issue 1, 2003, pp. 33-43, https://doi.org/10.1016/S0096-3003(02)00436-8 161 | 162 | [20] L. F. Shampine, "Implementation of Implicit Formulas for the Solution of ODEs", SIAM Journal on Scientific and Statistical Computing, Vol 1, No. 1, pp. 103-118, 1980, https://doi.org/10.1137/0901005. 163 | 164 | [21] M.E. Hosea, L.F. Shampine, "Analysis and implementation of TR-BDF2", Applied Numerical Mathematics, Vol. 20, No. 1-2, pp. 21-37, 1996, https://doi.org/10.1016/0168-9274(95)00115-8. 165 | 166 | [22] C. A. Kennedy, M.H. Carpenter, "Diagonally Implicit Runge-Kutta Methods for Ordinary Differential Equations. A Review", Langley Research Center, document ID 20160005923, https://ntrs.nasa.gov/citations/20160005923. 167 | 168 | [23] C. A. Kennedy, M.H. Carpenter, "Diagonally implicit Runge-Kutta methods for stiff ODEs", Applied Numerical Mathematics, Vol. 146, pp. 221-244, 2019, https://doi.org/10.1016/j.apnum.2019.07.008. 169 | 170 | [24] A. Kværnø, "Singly Diagonally Implicit Runge-Kutta Methods with an Explicit First Stage", BIT Numerical Mathematics, Vol. 44, pp. 489-502, 2004, https://doi.org/10.1023/B:BITN.0000046811.70614.38 171 | 172 | 173 | ## Original source codes (Fortran) 174 | 175 | [A] RKSuite, R.W. Brankin, I. Gladwell, L.F. Shampine. https://www.netlib.org/ode/rksuite/ 176 | 177 | [B] DDEABM, L.F. Shampine, H.A. Watts, M.K. Gordon. https://www.netlib.org/slatec/src/ 178 | 179 | [C] RKC, B.P. Sommeijer, L.F. Shampine, J.G. Verwer. https://www.netlib.org/ode/ 180 | -------------------------------------------------------------------------------- /docs/SC stability/stab_dS_BS5_Gustafsson.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/WRKampi/extensisq/0652335201571fb6e7cfef5ab686046d64f3a49f/docs/SC stability/stab_dS_BS5_Gustafsson.png -------------------------------------------------------------------------------- /docs/SC stability/stab_dS_BS5_Soederlind.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/WRKampi/extensisq/0652335201571fb6e7cfef5ab686046d64f3a49f/docs/SC stability/stab_dS_BS5_Soederlind.png -------------------------------------------------------------------------------- /docs/SC stability/stab_dS_BS5_Watts.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/WRKampi/extensisq/0652335201571fb6e7cfef5ab686046d64f3a49f/docs/SC stability/stab_dS_BS5_Watts.png -------------------------------------------------------------------------------- /docs/SC stability/stab_dS_BS5_standard.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/WRKampi/extensisq/0652335201571fb6e7cfef5ab686046d64f3a49f/docs/SC stability/stab_dS_BS5_standard.png -------------------------------------------------------------------------------- /docs/SC stability/stab_dS_CFMR7osc_Gustafsson.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/WRKampi/extensisq/0652335201571fb6e7cfef5ab686046d64f3a49f/docs/SC stability/stab_dS_CFMR7osc_Gustafsson.png -------------------------------------------------------------------------------- /docs/SC stability/stab_dS_CFMR7osc_Soederlind.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/WRKampi/extensisq/0652335201571fb6e7cfef5ab686046d64f3a49f/docs/SC stability/stab_dS_CFMR7osc_Soederlind.png -------------------------------------------------------------------------------- /docs/SC stability/stab_dS_CFMR7osc_Watts.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/WRKampi/extensisq/0652335201571fb6e7cfef5ab686046d64f3a49f/docs/SC stability/stab_dS_CFMR7osc_Watts.png -------------------------------------------------------------------------------- /docs/SC stability/stab_dS_CFMR7osc_standard.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/WRKampi/extensisq/0652335201571fb6e7cfef5ab686046d64f3a49f/docs/SC stability/stab_dS_CFMR7osc_standard.png -------------------------------------------------------------------------------- /docs/SC stability/stab_dS_CK5_Gustafsson.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/WRKampi/extensisq/0652335201571fb6e7cfef5ab686046d64f3a49f/docs/SC stability/stab_dS_CK5_Gustafsson.png -------------------------------------------------------------------------------- /docs/SC stability/stab_dS_CK5_Soederlind.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/WRKampi/extensisq/0652335201571fb6e7cfef5ab686046d64f3a49f/docs/SC stability/stab_dS_CK5_Soederlind.png -------------------------------------------------------------------------------- /docs/SC stability/stab_dS_CK5_Watts.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/WRKampi/extensisq/0652335201571fb6e7cfef5ab686046d64f3a49f/docs/SC stability/stab_dS_CK5_Watts.png -------------------------------------------------------------------------------- /docs/SC stability/stab_dS_CK5_standard.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/WRKampi/extensisq/0652335201571fb6e7cfef5ab686046d64f3a49f/docs/SC stability/stab_dS_CK5_standard.png -------------------------------------------------------------------------------- /docs/SC stability/stab_dS_Me4_Gustafsson.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/WRKampi/extensisq/0652335201571fb6e7cfef5ab686046d64f3a49f/docs/SC stability/stab_dS_Me4_Gustafsson.png -------------------------------------------------------------------------------- /docs/SC stability/stab_dS_Me4_Soederlind.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/WRKampi/extensisq/0652335201571fb6e7cfef5ab686046d64f3a49f/docs/SC stability/stab_dS_Me4_Soederlind.png -------------------------------------------------------------------------------- /docs/SC stability/stab_dS_Me4_Watts.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/WRKampi/extensisq/0652335201571fb6e7cfef5ab686046d64f3a49f/docs/SC stability/stab_dS_Me4_Watts.png -------------------------------------------------------------------------------- /docs/SC stability/stab_dS_Me4_standard.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/WRKampi/extensisq/0652335201571fb6e7cfef5ab686046d64f3a49f/docs/SC stability/stab_dS_Me4_standard.png -------------------------------------------------------------------------------- /docs/SC stability/stab_dS_Pr7_Gustafsson.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/WRKampi/extensisq/0652335201571fb6e7cfef5ab686046d64f3a49f/docs/SC stability/stab_dS_Pr7_Gustafsson.png -------------------------------------------------------------------------------- /docs/SC stability/stab_dS_Pr7_Soederlind.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/WRKampi/extensisq/0652335201571fb6e7cfef5ab686046d64f3a49f/docs/SC stability/stab_dS_Pr7_Soederlind.png -------------------------------------------------------------------------------- /docs/SC stability/stab_dS_Pr7_Watts.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/WRKampi/extensisq/0652335201571fb6e7cfef5ab686046d64f3a49f/docs/SC stability/stab_dS_Pr7_Watts.png -------------------------------------------------------------------------------- /docs/SC stability/stab_dS_Pr7_standard.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/WRKampi/extensisq/0652335201571fb6e7cfef5ab686046d64f3a49f/docs/SC stability/stab_dS_Pr7_standard.png -------------------------------------------------------------------------------- /docs/SC stability/stab_dS_Pr8_Gustafsson.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/WRKampi/extensisq/0652335201571fb6e7cfef5ab686046d64f3a49f/docs/SC stability/stab_dS_Pr8_Gustafsson.png -------------------------------------------------------------------------------- /docs/SC stability/stab_dS_Pr8_Soederlind.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/WRKampi/extensisq/0652335201571fb6e7cfef5ab686046d64f3a49f/docs/SC stability/stab_dS_Pr8_Soederlind.png -------------------------------------------------------------------------------- /docs/SC stability/stab_dS_Pr8_Watts.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/WRKampi/extensisq/0652335201571fb6e7cfef5ab686046d64f3a49f/docs/SC stability/stab_dS_Pr8_Watts.png -------------------------------------------------------------------------------- /docs/SC stability/stab_dS_Pr8_standard.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/WRKampi/extensisq/0652335201571fb6e7cfef5ab686046d64f3a49f/docs/SC stability/stab_dS_Pr8_standard.png -------------------------------------------------------------------------------- /docs/SC stability/stab_dS_Pr9_Gustafsson.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/WRKampi/extensisq/0652335201571fb6e7cfef5ab686046d64f3a49f/docs/SC stability/stab_dS_Pr9_Gustafsson.png -------------------------------------------------------------------------------- /docs/SC stability/stab_dS_Pr9_Soederlind.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/WRKampi/extensisq/0652335201571fb6e7cfef5ab686046d64f3a49f/docs/SC stability/stab_dS_Pr9_Soederlind.png -------------------------------------------------------------------------------- /docs/SC stability/stab_dS_Pr9_Watts.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/WRKampi/extensisq/0652335201571fb6e7cfef5ab686046d64f3a49f/docs/SC stability/stab_dS_Pr9_Watts.png -------------------------------------------------------------------------------- /docs/SC stability/stab_dS_Pr9_standard.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/WRKampi/extensisq/0652335201571fb6e7cfef5ab686046d64f3a49f/docs/SC stability/stab_dS_Pr9_standard.png -------------------------------------------------------------------------------- /docs/SC stability/stab_dS_Ts5_Gustafsson.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/WRKampi/extensisq/0652335201571fb6e7cfef5ab686046d64f3a49f/docs/SC stability/stab_dS_Ts5_Gustafsson.png -------------------------------------------------------------------------------- /docs/SC stability/stab_dS_Ts5_Soederlind.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/WRKampi/extensisq/0652335201571fb6e7cfef5ab686046d64f3a49f/docs/SC stability/stab_dS_Ts5_Soederlind.png -------------------------------------------------------------------------------- /docs/SC stability/stab_dS_Ts5_Watts.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/WRKampi/extensisq/0652335201571fb6e7cfef5ab686046d64f3a49f/docs/SC stability/stab_dS_Ts5_Watts.png -------------------------------------------------------------------------------- /docs/SC stability/stab_dS_Ts5_standard.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/WRKampi/extensisq/0652335201571fb6e7cfef5ab686046d64f3a49f/docs/SC stability/stab_dS_Ts5_standard.png -------------------------------------------------------------------------------- /docs/interpolants/interpolation_BS5_best.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/WRKampi/extensisq/0652335201571fb6e7cfef5ab686046d64f3a49f/docs/interpolants/interpolation_BS5_best.png -------------------------------------------------------------------------------- /docs/interpolants/interpolation_BS5_free.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/WRKampi/extensisq/0652335201571fb6e7cfef5ab686046d64f3a49f/docs/interpolants/interpolation_BS5_free.png -------------------------------------------------------------------------------- /docs/interpolants/interpolation_BS5_low.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/WRKampi/extensisq/0652335201571fb6e7cfef5ab686046d64f3a49f/docs/interpolants/interpolation_BS5_low.png -------------------------------------------------------------------------------- /docs/interpolants/interpolation_CFMR7osc.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/WRKampi/extensisq/0652335201571fb6e7cfef5ab686046d64f3a49f/docs/interpolants/interpolation_CFMR7osc.png -------------------------------------------------------------------------------- /docs/interpolants/interpolation_CK5.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/WRKampi/extensisq/0652335201571fb6e7cfef5ab686046d64f3a49f/docs/interpolants/interpolation_CK5.png -------------------------------------------------------------------------------- /docs/interpolants/interpolation_Fi4N.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/WRKampi/extensisq/0652335201571fb6e7cfef5ab686046d64f3a49f/docs/interpolants/interpolation_Fi4N.png -------------------------------------------------------------------------------- /docs/interpolants/interpolation_Fi5N_0.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/WRKampi/extensisq/0652335201571fb6e7cfef5ab686046d64f3a49f/docs/interpolants/interpolation_Fi5N_0.png -------------------------------------------------------------------------------- /docs/interpolants/interpolation_Fi5N_1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/WRKampi/extensisq/0652335201571fb6e7cfef5ab686046d64f3a49f/docs/interpolants/interpolation_Fi5N_1.png -------------------------------------------------------------------------------- /docs/interpolants/interpolation_Fi5N_2_orig.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/WRKampi/extensisq/0652335201571fb6e7cfef5ab686046d64f3a49f/docs/interpolants/interpolation_Fi5N_2_orig.png -------------------------------------------------------------------------------- /docs/interpolants/interpolation_Fi5N_3.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/WRKampi/extensisq/0652335201571fb6e7cfef5ab686046d64f3a49f/docs/interpolants/interpolation_Fi5N_3.png -------------------------------------------------------------------------------- /docs/interpolants/interpolation_KC3I.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/WRKampi/extensisq/0652335201571fb6e7cfef5ab686046d64f3a49f/docs/interpolants/interpolation_KC3I.png -------------------------------------------------------------------------------- /docs/interpolants/interpolation_KC4I.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/WRKampi/extensisq/0652335201571fb6e7cfef5ab686046d64f3a49f/docs/interpolants/interpolation_KC4I.png -------------------------------------------------------------------------------- /docs/interpolants/interpolation_KC4Ia.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/WRKampi/extensisq/0652335201571fb6e7cfef5ab686046d64f3a49f/docs/interpolants/interpolation_KC4Ia.png -------------------------------------------------------------------------------- /docs/interpolants/interpolation_Kv3I.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/WRKampi/extensisq/0652335201571fb6e7cfef5ab686046d64f3a49f/docs/interpolants/interpolation_Kv3I.png -------------------------------------------------------------------------------- /docs/interpolants/interpolation_MR6NN.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/WRKampi/extensisq/0652335201571fb6e7cfef5ab686046d64f3a49f/docs/interpolants/interpolation_MR6NN.png -------------------------------------------------------------------------------- /docs/interpolants/interpolation_Me4.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/WRKampi/extensisq/0652335201571fb6e7cfef5ab686046d64f3a49f/docs/interpolants/interpolation_Me4.png -------------------------------------------------------------------------------- /docs/interpolants/interpolation_Mu5Nmb.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/WRKampi/extensisq/0652335201571fb6e7cfef5ab686046d64f3a49f/docs/interpolants/interpolation_Mu5Nmb.png -------------------------------------------------------------------------------- /docs/interpolants/interpolation_Mu5Nmb_better.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/WRKampi/extensisq/0652335201571fb6e7cfef5ab686046d64f3a49f/docs/interpolants/interpolation_Mu5Nmb_better.png -------------------------------------------------------------------------------- /docs/interpolants/interpolation_Ts5.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/WRKampi/extensisq/0652335201571fb6e7cfef5ab686046d64f3a49f/docs/interpolants/interpolation_Ts5.png -------------------------------------------------------------------------------- /docs/stability regions/stabilityBS5.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/WRKampi/extensisq/0652335201571fb6e7cfef5ab686046d64f3a49f/docs/stability regions/stabilityBS5.png -------------------------------------------------------------------------------- /docs/stability regions/stabilityCFMR7osc.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/WRKampi/extensisq/0652335201571fb6e7cfef5ab686046d64f3a49f/docs/stability regions/stabilityCFMR7osc.png -------------------------------------------------------------------------------- /docs/stability regions/stabilityCK5.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/WRKampi/extensisq/0652335201571fb6e7cfef5ab686046d64f3a49f/docs/stability regions/stabilityCK5.png -------------------------------------------------------------------------------- /docs/stability regions/stabilityCKdisc.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/WRKampi/extensisq/0652335201571fb6e7cfef5ab686046d64f3a49f/docs/stability regions/stabilityCKdisc.png -------------------------------------------------------------------------------- /docs/stability regions/stabilityFi4N.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/WRKampi/extensisq/0652335201571fb6e7cfef5ab686046d64f3a49f/docs/stability regions/stabilityFi4N.png -------------------------------------------------------------------------------- /docs/stability regions/stabilityFi5N.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/WRKampi/extensisq/0652335201571fb6e7cfef5ab686046d64f3a49f/docs/stability regions/stabilityFi5N.png -------------------------------------------------------------------------------- /docs/stability regions/stabilityMe4.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/WRKampi/extensisq/0652335201571fb6e7cfef5ab686046d64f3a49f/docs/stability regions/stabilityMe4.png -------------------------------------------------------------------------------- /docs/stability regions/stabilityMu5Nmb.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/WRKampi/extensisq/0652335201571fb6e7cfef5ab686046d64f3a49f/docs/stability regions/stabilityMu5Nmb.png -------------------------------------------------------------------------------- /docs/stability regions/stabilityPr7.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/WRKampi/extensisq/0652335201571fb6e7cfef5ab686046d64f3a49f/docs/stability regions/stabilityPr7.png -------------------------------------------------------------------------------- /docs/stability regions/stabilityPr8.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/WRKampi/extensisq/0652335201571fb6e7cfef5ab686046d64f3a49f/docs/stability regions/stabilityPr8.png -------------------------------------------------------------------------------- /docs/stability regions/stabilityPr9.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/WRKampi/extensisq/0652335201571fb6e7cfef5ab686046d64f3a49f/docs/stability regions/stabilityPr9.png -------------------------------------------------------------------------------- /docs/stability regions/stabilityTs5.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/WRKampi/extensisq/0652335201571fb6e7cfef5ab686046d64f3a49f/docs/stability regions/stabilityTs5.png -------------------------------------------------------------------------------- /extensisq/__init__.py: -------------------------------------------------------------------------------- 1 | """This package extends scipy.integrate with various methods 2 | (OdeSolver classes) for the solve_ivp function. 3 | """ 4 | from extensisq.common import NFS, NFI, NLS 5 | from extensisq.tsitouras import Ts5 6 | from extensisq.bogacki import BS5 7 | from extensisq.cash import CK5, CKdisc 8 | from extensisq.prince import Pr7, Pr8, Pr9 9 | from extensisq.shampine import SWAG 10 | from extensisq.calvo import CFMR7osc 11 | from extensisq.sommeijer import SSV2stab 12 | from extensisq.merson import Me4 13 | from extensisq.fine import Fi4N, Fi5N 14 | from extensisq.murua import Mu5Nmb 15 | from extensisq.mikkawy import MR6NN 16 | from extensisq.hosea import HS2I, HS2Ia, TRBDF2, TRX2 17 | from extensisq.kennedy import KC3I, KC4I, KC4Ia 18 | from extensisq.kvaerno import Kv3I 19 | from extensisq.sensitivity import (sens_forward, sens_adjoint_end, 20 | sens_adjoint_int) 21 | 22 | __version__ = '0.6.0' 23 | __author__ = 'W.R. Kampinga' 24 | __copyright__ = 'Copyright 2025, W.R. Kampinga' 25 | __license__ = 'MIT' 26 | __credits__ = ( 27 | 'scipy', 'L.F Shampine', 'P. Bogacki', 'R.W. Brankin', 'I. Gladwell', 28 | 'J.R. Cash', 'A.H. Karp', 'Ch. Tsitouras', 'P.J. Prince', 'H.A. Watts', 29 | 'M.K. Gordon', 'G. Soederlind', 'K. Gustafsson', 'M. Calvo', 'J.M. Franco', 30 | 'J.I. Montijano', 'L. Randez', 'B.P. Sommeijer', 'J.G. Verwer', 31 | 'E. Hairer', 'A.C. Hindmarsh', 'R. Serban', 'R.H. Merson', 'J.M. Fine', 32 | 'A. Murua', 'M. El-Mikkawy', 'E.D. Rahmo', 'M.E. Hosea', 'C.A. Kennedy', 33 | 'M.H. Carpenter', 'A. Kvaerno') 34 | -------------------------------------------------------------------------------- /extensisq/bogacki.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | from extensisq.common import (norm, RungeKutta, HornerDenseOutput, NFS, 3 | calculate_scale, MAX_FACTOR) 4 | 5 | 6 | class BS5(RungeKutta): 7 | """Explicit Runge-Kutta method of order 5(4). 8 | 9 | This uses the Bogacki-Shampine pair of formulas [1]_. It is designed 10 | to be more efficient than the Dormand-Prince pair (RK45 in scipy). 11 | 12 | There are two independent fourth order estimates of the local error. 13 | The fifth order method is used to advance the solution (local 14 | extrapolation). Coefficients from [2]_ are used. 15 | 16 | The interpolator for dense output is of fifth order and needs three 17 | additional derivative function evaluations (when used). A free, fourth 18 | order interpolator is also available as method BS45_i. 19 | 20 | Can be applied in the complex domain. 21 | 22 | Parameters 23 | ---------- 24 | fun : callable 25 | Right-hand side of the system. The calling signature is ``fun(t, y)``. 26 | Here ``t`` is a scalar, and there are two options for the ndarray 27 | ``y``: It can either have shape (n,); then ``fun`` must return 28 | array_like with shape (n,). Alternatively it can have shape (n, k); 29 | then ``fun`` must return an array_like with shape (n, k), i.e., each 30 | column corresponds to a single column in ``y``. The choice between the 31 | two options is determined by `vectorized` argument (see below). 32 | t0 : float 33 | Initial time. 34 | y0 : array_like, shape (n,) 35 | Initial state. 36 | t_bound : float 37 | Boundary time - the integration won't continue beyond it. It also 38 | determines the direction of the integration. 39 | first_step : float or None, optional 40 | Initial step size. Default is ``None`` which means that the algorithm 41 | should choose. 42 | max_step : float, optional 43 | Maximum allowed step size. Default is np.inf, i.e., the step size is 44 | not bounded and determined solely by the solver. 45 | rtol, atol : float and array_like, optional 46 | Relative and absolute tolerances. The solver keeps the local error 47 | estimates less than ``atol + rtol * abs(y)``. Here `rtol` controls a 48 | relative accuracy (number of correct digits). But if a component of `y` 49 | is approximately below `atol`, the error only needs to fall within 50 | the same `atol` threshold, and the number of correct digits is not 51 | guaranteed. If components of y have different scales, it might be 52 | beneficial to set different `atol` values for different components by 53 | passing array_like with shape (n,) for `atol`. Default values are 54 | 1e-3 for `rtol` and 1e-6 for `atol`. 55 | vectorized : bool, optional 56 | Whether `fun` is implemented in a vectorized fashion. A vectorized 57 | implementation offers no advantages for this solver. Default is False. 58 | nfev_stiff_detect : int, optional 59 | Number of function evaluations for stiffness detection. This number has 60 | multiple purposes. If it is set to 0, then stiffness detection is 61 | disabled. For other (positive) values it is used to represent a 62 | 'considerable' number of function evaluations (nfev). A stiffness test 63 | is done if many steps fail and each time nfev exceeds integer multiples 64 | of `nfev_stiff_detect`. For the assessment itself, the problem is 65 | assessed as non-stiff if the predicted nfev to complete the integration 66 | is lower than `nfev_stiff_detect`. The default value is 5000. 67 | sc_params : tuple of size 4, "standard", "G", "H" or "W", optional 68 | Parameters for the stepsize controller (k*b1, k*b2, a2, g). The step 69 | size controller is, with k the exponent of the standard controller, 70 | _n for new and _o for old: 71 | h_n = h * g**(k*b1 + k*b2) * (h/h_o)**-a2 72 | * (err/tol)**-b1 * (err_o/tol_o)**-b2 73 | Predefined parameters are [3]_: 74 | Gustafsson "G" (0.7, -0.4, 0, 0.9), 75 | Soederlind "S" (0.6, -0.2, 0, 0.9), 76 | and "standard" (1, 0, 0, 0.9). 77 | The default for this method is "standard". 78 | interpolant : 'best', 'low' or 'free', optional 79 | Select the interpolant for dense output. The option 'best' is for the 80 | accurate fifth order interpolant described in [1], which needs 3 extra 81 | function evaluations per step. The option 'low' is for a less accurate 82 | fifth order interpolant which needs 'only' one extra function 83 | evaluation per step. 'free' is for a free fourth order interpolant. 84 | Recommendations: 'best' for events, 'free' for simple plotting or for 85 | long integrations for which the gobal error dominates. The accuracy of 86 | the method itself does not change and the extra function evaluations 87 | are only done at steps for which dense output is requested. The default 88 | is 'low': a safe option for which the method does not loose much of its 89 | performance when it is used with dense output. 90 | 91 | References 92 | ---------- 93 | .. [1] P. Bogacki, L.F. Shampine, "An efficient Runge-Kutta (4,5) pair", 94 | Computers & Mathematics with Applications, Vol. 32, No. 6, 1996, 95 | pp. 15-28. 96 | https://doi.org/10.1016/0898-1221(96)00141-1 97 | .. [2] RKSUITE: https://www.netlib.org/ode/rksuite/ 98 | .. [3] G.Söderlind, "Automatic Control and Adaptive Time-Stepping", 99 | Numerical Algorithms, Vol. 31, No. 1, 2002, pp. 281-310. 100 | https://doi.org/10.1023/A:1021160023092 101 | """ 102 | 103 | order = 5 104 | order_secondary = 4 105 | n_stages = 7 # the effective nr (total nr of stages is 8) 106 | n_extra_stages = 3 # for dense output 107 | tanang = 5.2 108 | stbrad = 3.9 109 | sc_params = "standard" 110 | 111 | # time step fractions 112 | C = np.array([0, 1/6, 2/9, 3/7, 2/3, 3/4, 1]) 113 | 114 | # coefficient matrix, including row of last stage 115 | A = np.array([ 116 | [0, 0, 0, 0, 0, 0, 0], 117 | [1/6, 0, 0, 0, 0, 0, 0], 118 | [2/27, 4/27, 0, 0, 0, 0, 0], 119 | [183/1372, -162/343, 1053/1372, 0, 0, 0, 0], 120 | [68/297, -4/11, 42/143, 1960/3861, 0, 0, 0], 121 | [597/22528, 81/352, 63099/585728, 58653/366080, 4617/20480, 0, 0], 122 | [174197/959244, -30942/79937, 8152137/19744439, 666106/1039181, 123 | -29421/29068, 482048/414219, 0] 124 | ]) 125 | 126 | # coefficients for propagating method 127 | B = np.array([587/8064, 0, 4440339/15491840, 24353/124800, 128 | 387/44800, 2152/5985, 7267/94080]) 129 | 130 | # coefficients for first error estimation method 131 | # this is actually the main error estimator 132 | E_pre = np.array([-3/1280, 0, 6561/632320, -343/20800, 243/12800, -1/95]) 133 | B_scale_pre = np.array([19/48, 0, -189/416, 343/780, 99/160, 0]) 134 | 135 | # coefficients for post error estimation method 136 | # this can account for sudden changes above c=3/4, which the main error 137 | # estimate cannot. 138 | E = np.array([2479/34992, 0, 123/416, 612941/3411720, 43/1440, 139 | 2272/6561, 79937/1113912, 3293/556956]) 140 | E[:-1] -= B # convert to error coefficients 141 | 142 | # extra time step fractions for dense output 143 | C_extra = np.array([1/2, 5/6, 1/9]) 144 | 145 | # coefficient matrix for dense output 146 | A_extra = np.array([ 147 | [455/6144, -837888343715/13176988637184, 98719073263/1551965184000], 148 | [0, 30409415/52955362, 1307/123552], 149 | [10256301/35409920, -48321525963/759168069632, 150 | 4632066559387/70181753241600], 151 | [2307361/17971200, 8530738453321/197654829557760, 152 | 7828594302389/382182512025600], 153 | [-387/102400, 1361640523001/1626788720640, 40763687/11070259200], 154 | [73/5130, -13143060689/38604458898, 34872732407/224610586200], 155 | [-7267/215040, 18700221969/379584034816, -2561897/30105600], 156 | [1/32, -5831595/847285792, 1/10], 157 | [0, -5183640/26477681, -1/10], 158 | [0, 0, -1403317093/11371610250]]).T 159 | 160 | # This is the original, very accurate, 5th order interpolant of [1, 2]. 161 | # It needs 3 extra function evaluations per step. 162 | Pbest = np.array([ 163 | [0, -11513270273/3502699200, -87098480009/5254048800, 164 | -2048058893/59875200, -1620741229/50038560, 165 | -12134338393/1050809760], 166 | [0, 0, 0, 0, 0, 0], 167 | [0, -29327744613/2436866432, -69509738227/1218433216, 168 | -39991188681/374902528, -539868024987/6092166080, 169 | -33197340367/1218433216], 170 | [0, -2382590741699/331755652800, -16209923456237/497633479200, 171 | -333945812879/5671036800, -7896875450471/165877826400, 172 | -284800997201/19905339168], 173 | [0, -36591193/86486400, -32406787/18532800, -633779/211200, 174 | -103626067/43243200, -540919/741312], 175 | [0, -611586736/89131185, -3357024032/1871754885, 183022264/5332635, 176 | 30405842464/623918295, 7157998304/374350977], 177 | [0, -65403/15680, -385151/15680, -1620541/31360, -719433/15680, 178 | -138073/9408], 179 | [1, 149/16, 2501/64, 4715/64, 3991/64, 1245/64], 180 | [0, 16, 199/3, 103, 71, 55/3], 181 | [0, -423642896/126351225, -11411880511/379053675, -26477681/359975, 182 | -1774004627/25270245, -1774004627/75810735], 183 | [0, 12, 59, 117, 105, 35]]) 184 | 185 | # This is an up to 12x less accurate, 5th order, low cost interpolant that 186 | # needs one extra function evaluation per step, instead of three. 187 | Plow = np.array([ 188 | [1, -155/36, 16441/2016, -56689/8064, 757/336], 189 | [0, 0, 0, 0, 0], 190 | [0, 6561/988, -14727987/774592, 60538347/3098368, -13321017/1936480], 191 | [0, 2401/702, -603337/56160, 2740913/224640, -24353/5200], 192 | [0, 0, -387/2240, 3483/8960, -1161/5600], 193 | [0, 1408/513, -45536/3591, 67960/3591, -17216/1995], 194 | [0, 0, -7267/4704, 21801/6272, -7267/3920], 195 | [0, -1/2, 4, -15/2, 4], 196 | [0, -8, 32, -40, 16]]) 197 | 198 | # This is a low accuracy, 4th order interpolant. It is free: it does not 199 | # need extra function evaluations. 200 | P = np.array([ 201 | [1, -2773674729811/735370896960, 316222661411/52526492640, 202 | -1282818361681/294148358784, 6918746667/5836276960], 203 | [0, 0, 0, 0, 0], 204 | [0, 1594012432639617/282545840187520, -303081611134977/20181845727680, 205 | 1643668176796011/113018336075008, -14071997888919/2883120818240], 206 | [0, -47637453654133/20485332129600, 125365109861131/10242666064800, 207 | -135424370922463/8194132851840, 2582696138393/379358002400], 208 | [0, 1915795112337/817078774400, -557453242737/58362769600, 209 | 3958638678747/326831509760, -285784868817/58362769600], 210 | [0, -1490252641456/654939705105, 692325952352/93562815015, 211 | -808867306376/130987941021, 4887837472/3465289445], 212 | [0, 824349534931/571955142080, -895925604353/122561816160, 213 | 2443928282393/228782056832, -5528580993/1167255392], 214 | [0, -38480331/36476731, 226874786/36476731, 215 | -374785310/36476731, 186390855/36476731]]) 216 | 217 | def __init__(self, fun, t0, y0, t_bound, nfev_stiff_detect=5000, 218 | sc_params='standard', interpolant='low', **extraneous): 219 | super().__init__( 220 | fun, t0, y0, t_bound, nfev_stiff_detect=nfev_stiff_detect, 221 | sc_params=sc_params, **extraneous) 222 | # custom initialization to create extended storage for dense output 223 | if interpolant not in ('best', 'low', 'free'): 224 | raise ValueError( 225 | "interpolant should be one of: 'best', 'low', 'free'") 226 | self.interpolant = interpolant 227 | if self.interpolant == 'best': 228 | self.K_extended = np.zeros((self.n_stages+self.n_extra_stages + 1, 229 | self.n), dtype=self.y.dtype) 230 | self.K = self.K_extended[:self.n_stages+1] 231 | elif self.interpolant == 'low': 232 | self.K_extended = np.zeros((self.n_stages + 2, 233 | self.n), dtype=self.y.dtype) 234 | self.K = self.K_extended[:self.n_stages+1] 235 | else: 236 | self.K_extended = self.K 237 | 238 | def _step_impl(self): 239 | 240 | # mostly follows the scipy implementation of RungeKutta 241 | t = self.t 242 | y = self.y 243 | 244 | h_abs, min_step = self._reassess_stepsize(t, y) 245 | 246 | # loop until step accepted 247 | step_accepted = False 248 | step_rejected = False 249 | while not step_accepted: 250 | 251 | if h_abs < min_step: 252 | return False, self.TOO_SMALL_STEP 253 | 254 | h = h_abs * self.direction 255 | t_new = t + h 256 | 257 | # calculate stages, except last two 258 | self.K[0] = self.f 259 | for i in range(1, self.n_stages - 1): 260 | self._rk_stage(h, i) 261 | 262 | # calculate pre_error_norm 263 | error_norm_pre = self._estimate_error_norm_pre(y, h) 264 | 265 | # reject step if pre_error too large 266 | if error_norm_pre > 1: 267 | step_rejected = True 268 | h_abs *= max( 269 | self.min_factor, 270 | self.safety * error_norm_pre ** self.error_exponent) 271 | 272 | NFS[()] += 1 273 | if self.nfev_stiff_detect: 274 | self.jflstp += 1 # for stiffness detection 275 | continue 276 | 277 | # calculate next stage 278 | self._rk_stage(h, self.n_stages - 1) 279 | 280 | # calculate error_norm and solution 281 | y_new, error_norm = self._comp_sol_err(y, h) 282 | 283 | # and evaluate 284 | if error_norm < 1: 285 | step_accepted = True 286 | 287 | if error_norm < self.tiny_err: 288 | factor = self.max_factor 289 | self.standard_sc = True 290 | 291 | elif self.standard_sc: 292 | factor = self.safety * error_norm ** self.error_exponent 293 | self.standard_sc = False 294 | 295 | else: 296 | # use second order SC controller 297 | h_ratio = h / self.h_previous 298 | factor = self.safety_sc * ( 299 | error_norm ** self.minbeta1 * 300 | self.error_norm_old ** self.minbeta2 * 301 | h_ratio ** self.minalpha) 302 | factor = min(self.max_factor, max(self.min_factor, factor)) 303 | 304 | if step_rejected: 305 | factor = min(1, factor) 306 | 307 | h_abs *= factor 308 | 309 | if factor < MAX_FACTOR: 310 | # reduce max_factor when on scale. 311 | self.max_factor = MAX_FACTOR 312 | 313 | else: 314 | if np.isnan(error_norm) or np.isinf(error_norm): 315 | return False, "Overflow or underflow encountered." 316 | 317 | step_rejected = True 318 | h_abs *= max(self.min_factor, 319 | self.safety * error_norm ** self.error_exponent) 320 | 321 | NFS[()] += 1 322 | self.jflstp += 1 # for stiffness detection 323 | 324 | # store for next step and interpolation 325 | self.h_previous = h 326 | self.y_old = y 327 | self.h_abs = h_abs 328 | self.f = self.K[self.n_stages].copy() 329 | self.error_norm_old = error_norm 330 | 331 | # output 332 | self.t = t_new 333 | self.y = y_new 334 | 335 | # stiffness detection 336 | self._diagnose_stiffness() 337 | 338 | return True, None 339 | 340 | def _estimate_error_norm_pre(self, y, h): 341 | # first error estimate 342 | # y_new is not available yet for scale, so use y_pre instead 343 | y_pre = y + h * (self.K[:6].T @ self.B_scale_pre) 344 | scale = calculate_scale(self.atol, self.rtol, y, y_pre) 345 | err = h * (self.K[:6, :].T @ self.E_pre) 346 | return norm(err / scale) 347 | 348 | def _dense_output_impl(self): 349 | h = self.h_previous 350 | K = self.K_extended 351 | 352 | if self.interpolant == 'free': 353 | Q = K.T @ self.P 354 | return HornerDenseOutput(self.t_old, self.t, self.y_old, Q) 355 | 356 | elif self.interpolant == 'low': 357 | s = self.n_stages + 1 358 | dy = K[:s, :].T @ self.A_extra[0, :s] * h 359 | K[s] = self.fun(self.t_old + self.C_extra[0] * h, self.y_old + dy) 360 | Q = K.T @ self.Plow 361 | return HornerDenseOutput(self.t_old, self.t, self.y_old, Q) 362 | 363 | # else: the accurate interpolant 364 | # calculate the required extra stages 365 | for s, (a, c) in enumerate(zip(self.A_extra, self.C_extra), 366 | start=self.n_stages+1): 367 | dy = K[:s, :].T @ a[:s] * h 368 | K[s] = self.fun(self.t_old + c * h, self.y_old + dy) 369 | 370 | # form Q. Usually: Q = K.T @ self.P as for the other interpolants, 371 | # but RKSuite groups summations to mitigate round-off: 372 | Q = np.empty((K.shape[1], self.Pbest.shape[1]), dtype=K.dtype) 373 | Q[:, 0] = self.K[7] 374 | KP = K * self.Pbest[:, 1, np.newaxis] # term for t**2 375 | Q[:, 1] = (KP[4] + ((KP[5] + KP[7]) + KP[0]) + ((KP[2] + KP[8]) + 376 | KP[9]) + ((KP[3] + KP[10]) + KP[6])) 377 | KP = K * self.Pbest[:, 2, np.newaxis] # term for t**3 378 | Q[:, 2] = (KP[4] + KP[5] + ((KP[2] + KP[8]) + (KP[9] + KP[7]) + 379 | KP[0]) + ((KP[3] + KP[10]) + KP[6])) 380 | KP = K * self.Pbest[:, 3, np.newaxis] # term for t**4 381 | Q[:, 3] = (((KP[3] + KP[7]) + (KP[6] + KP[5]) + KP[4]) + ((KP[9] + 382 | KP[8]) + (KP[2]+KP[10]) + KP[0])) 383 | KP = K * self.Pbest[:, 4, np.newaxis] # term for t**5 384 | Q[:, 4] = ((KP[9] + KP[8]) + ((KP[6] + KP[5]) + KP[4]) + ((KP[3] + 385 | KP[7]) + (KP[2] + KP[10]) + KP[0])) 386 | KP = K * self.Pbest[:, 5, np.newaxis] # term for t**6 387 | Q[:, 5] = (KP[4] + ((KP[9] + KP[7]) + (KP[6] + KP[5])) + ((KP[3] + 388 | KP[8]) + (KP[2] + KP[10]) + KP[0])) 389 | 390 | # RKSuite's, polynomial definition is different from scipy's: looking 391 | # back from the end of the step instead of forward from the start. 392 | # The call is modified to accomodate: 393 | return HornerDenseOutput(self.t, self.t+h, self.y, Q) 394 | -------------------------------------------------------------------------------- /extensisq/calvo.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | from extensisq.common import ( 3 | RungeKutta, NFS, norm, MAX_FACTOR, calculate_scale) 4 | 5 | 6 | class CFMR7osc(RungeKutta): 7 | """Explicit Runge-Kutta method of (algebraic) order 7, with an error 8 | estimate of order 5 and a free interpolant of (algebraic) order 5. 9 | 10 | This method by Calvo, Franco, Montijano and Randez is tuned to get a 11 | dispersion order of 10 and a dissipation order of 9. This is beneficial 12 | for problems with oscillating solutions (and linear problems in general). 13 | It can outperform methods of higher algebraic order, such as `DOP853`, 14 | for such problems. 15 | 16 | The interpolant that has been added is of dispersion order 6 and 17 | dissipation order 7. 18 | 19 | Can be applied in the complex domain. 20 | 21 | Parameters 22 | ---------- 23 | fun : callable 24 | Right-hand side of the system. The calling signature is ``fun(t, y)``. 25 | Here ``t`` is a scalar, and there are two options for the ndarray 26 | ``y``: It can either have shape (n,); then ``fun`` must return 27 | array_like with shape (n,). Alternatively it can have shape (n, k); 28 | then ``fun`` must return an array_like with shape (n, k), i.e., each 29 | column corresponds to a single column in ``y``. The choice between the 30 | two options is determined by `vectorized` argument (see below). 31 | t0 : float 32 | Initial time. 33 | y0 : array_like, shape (n,) 34 | Initial state. 35 | t_bound : float 36 | Boundary time - the integration won't continue beyond it. It also 37 | determines the direction of the integration. 38 | first_step : float or None, optional 39 | Initial step size. Default is ``None`` which means that the algorithm 40 | should choose. 41 | max_step : float, optional 42 | Maximum allowed step size. Default is np.inf, i.e., the step size is 43 | not bounded and determined solely by the solver. 44 | rtol, atol : float and array_like, optional 45 | Relative and absolute tolerances. The solver keeps the local error 46 | estimates less than ``atol + rtol * abs(y)``. Here `rtol` controls a 47 | relative accuracy (number of correct digits). But if a component of `y` 48 | is approximately below `atol`, the error only needs to fall within 49 | the same `atol` threshold, and the number of correct digits is not 50 | guaranteed. If components of y have different scales, it might be 51 | beneficial to set different `atol` values for different components by 52 | passing array_like with shape (n,) for `atol`. Default values are 53 | 1e-3 for `rtol` and 1e-6 for `atol`. 54 | vectorized : bool, optional 55 | Whether `fun` is implemented in a vectorized fashion. A vectorized 56 | implementation offers no advantages for this solver. Default is False. 57 | nfev_stiff_detect : int, optional 58 | Number of function evaluations for stiffness detection. This number has 59 | multiple purposes. If it is set to 0, then stiffness detection is 60 | disabled. For other (positive) values it is used to represent a 61 | 'considerable' number of function evaluations (nfev). A stiffness test 62 | is done if many steps fail and each time nfev exceeds integer multiples 63 | of `nfev_stiff_detect`. For the assessment itself, the problem is 64 | assessed as non-stiff if the predicted nfev to complete the integration 65 | is lower than `nfev_stiff_detect`. The default value is 5000. 66 | sc_params : tuple of size 4, "standard", "G", "H" or "W", optional 67 | Parameters for the stepsize controller (k*b1, k*b2, a2, g). The step 68 | size controller is, with k the exponent of the standard controller, 69 | _n for new and _o for old: 70 | h_n = h * g**(k*b1 + k*b2) * (h/h_o)**-a2 71 | * (err/tol)**-b1 * (err_o/tol_o)**-b2 72 | Predefined parameters are [3]_: 73 | Gustafsson "G" (0.7, -0.4, 0, 0.9), 74 | Soederlind "S" (0.6, -0.2, 0, 0.9), 75 | and "standard" (1, 0, 0, 0.9). 76 | The default for this method is "G". 77 | 78 | References 79 | ---------- 80 | .. [1] M. Calvo, J.M. Franco, J.I. Montijano, L. Randez, "Explicit 81 | Runge-Kutta methods for initial value problems with oscillating 82 | solutions", Journal of Computational and Applied Mathematics, Vol. 83 | 76, No. 1–2, 1996, pp. 195-212. 84 | https://doi.org/10.1016/S0377-0427(96)00103-3 85 | .. [2] G.Söderlind, "Automatic Control and Adaptive Time-Stepping", 86 | Numerical Algorithms, Vol. 31, No. 1, 2002, pp. 281-310. 87 | https://doi.org/10.1023/A:1021160023092 88 | """ 89 | order = 7 90 | order_secondary = 5 91 | n_stages = 9 92 | tanang = 40 93 | stbrad = 4.7 94 | sc_params = "G" 95 | 96 | # time step fractions 97 | C = np.array([0, 4/63, 2/21, 1/7, 7/17, 13/24, 7/9, 91/100, 1]) 98 | 99 | # coefficient matrix 100 | A = np.array([ 101 | [0, 0, 0, 0, 0, 0, 0, 0, 0], 102 | [4/63, 0, 0, 0, 0, 0, 0, 0, 0], 103 | [1/42, 1/14, 0, 0, 0, 0, 0, 0, 0], 104 | [1/28, 0, 3/28, 0, 0, 0, 0, 0, 0], 105 | [12551/19652, 0, -48363/19652, 10976/4913, 0, 0, 0, 0, 0], 106 | [-36616931/27869184, 0, 2370277/442368, -255519173/63700992, 107 | 226798819/445906944, 0, 0, 0, 0], 108 | [-10401401/7164612, 0, 47383/8748, -4914455/1318761, -1498465/7302393, 109 | 2785280/3739203, 0, 0, 0], 110 | [181002080831/17500000000, 0, -14827049601/400000000, 111 | 23296401527134463/857600000000000, 2937811552328081/949760000000000, 112 | -243874470411/69355468750, 2857867601589/3200000000000, 0, 0], 113 | [-228380759/19257212, 0, 4828803/113948, -331062132205/10932626912, 114 | -12727101935/3720174304, 22627205314560/4940625496417, 115 | -268403949/461033608, 3600000000000/19176750553961, 0]]) 116 | 117 | # coefficients for propagating method 118 | B = np.array([ 119 | 95/2366, 0, 0, 3822231133/16579123200, 555164087/2298419200, 120 | 1279328256/9538891505, 5963949/25894400, 50000000000/599799373173, 121 | 28487/712800]) 122 | 123 | # coefficients for error estimation, note E[-2] = 0. 124 | E = np.array([ 125 | 1689248233/50104356120, 0, 0, 1/4, 28320758959727/152103780259200, 126 | 66180849792/341834007515, 31163653341/152322513280, 127 | 36241511875000/394222326561063, 28487/712800, 0]) 128 | E[:-1] -= B 129 | 130 | # coefficients for interpolation (dense output) 131 | # free 5th order interpolant, Optimal T620 132 | P = np.array([ 133 | [1, -6248/1183, 12056/1183, -1345/182, 97/169, 160/169], 134 | [0, 0, 0, 0, 0, 0], 135 | [0, 0, 0, 0, 0, 0], 136 | [0, 661103345/110527488, -12471420661/828956160, 137 | 11535693337/1105274880, 2373818279/1381593600, -823543/287832], 138 | [0, 39338391/20894720, -975441759/114920960, 9504105153/459683840, 139 | -1803301911/82086400, 417605/51304], 140 | [0, -7599771648/1907778301, 41855533056/1907778301, 141 | -5870997504/146752177, 295068082176/9538891505, 142 | -1517322240/173434391], 143 | [0, 684531/517888, -11632653/1294720, 14012109/739840, 144 | -13714677/924800, 2187/578], 145 | [0, 16000000000/18175738581, -2410000000000/599799373173, 146 | 10000000000/2197067301, -2000000000/28561874913, 147 | -4000000000/3173541657], 148 | [0, 28487/23760, -199409/35640, 370331/47520, -199409/59400, 0], 149 | [0, -2, 10, -15, 7, 0]]) 150 | 151 | # redefine _step_impl() to save 1 evaluation for each rejected step 152 | def _step_impl(self): 153 | # mostly follows the scipy implementation of scipy's RungeKutta 154 | t = self.t 155 | y = self.y 156 | 157 | h_abs, min_step = self._reassess_stepsize(t, y) 158 | 159 | # loop until the step is accepted 160 | step_accepted = False 161 | step_rejected = False 162 | while not step_accepted: 163 | 164 | if h_abs < min_step: 165 | return False, self.TOO_SMALL_STEP 166 | h = h_abs * self.direction 167 | t_new = t + h 168 | 169 | # calculate stages needed for error evaluation 170 | self.K[0] = self.f 171 | for i in range(1, self.n_stages-1): 172 | self._rk_stage(h, i) 173 | 174 | # evaluate error with premature y_pre instead of y_new for weight 175 | error_norm_pre = self._estimate_error_norm_pre(y, h) 176 | 177 | # reject step if pre_error too large 178 | if error_norm_pre > 1: 179 | step_rejected = True 180 | h_abs *= max( 181 | self.min_factor, 182 | self.safety * error_norm_pre ** self.error_exponent) 183 | 184 | NFS[()] += 1 185 | if self.nfev_stiff_detect: 186 | self.jflstp += 1 # for stiffness detection 187 | continue 188 | 189 | # calculate last stage needed for output 190 | self._rk_stage(h, self.n_stages-1) 191 | 192 | # calculate error norm and solution (now with proper weight) 193 | y_new, error_norm = self._comp_sol_err(y, h) 194 | 195 | # evaluate error 196 | if error_norm < 1: 197 | step_accepted = True 198 | 199 | if error_norm < self.tiny_err: 200 | factor = self.max_factor 201 | self.standard_sc = True 202 | 203 | elif self.standard_sc: 204 | factor = self.safety * error_norm ** self.error_exponent 205 | self.standard_sc = False 206 | 207 | else: 208 | # use second order SC controller 209 | h_ratio = h / self.h_previous 210 | factor = self.safety_sc * ( 211 | error_norm ** self.minbeta1 * 212 | self.error_norm_old ** self.minbeta2 * 213 | h_ratio ** self.minalpha) 214 | factor = min(self.max_factor, max(self.min_factor, factor)) 215 | 216 | if step_rejected: 217 | factor = min(1, factor) 218 | 219 | h_abs *= factor 220 | 221 | if factor < MAX_FACTOR: 222 | # reduce max_factor when on scale. 223 | self.max_factor = MAX_FACTOR 224 | 225 | else: 226 | step_rejected = True 227 | h_abs *= max(self.min_factor, 228 | self.safety * error_norm ** self.error_exponent) 229 | 230 | NFS[()] += 1 231 | self.jflstp += 1 # for stiffness detection 232 | 233 | if np.isnan(error_norm) or np.isinf(error_norm): 234 | return False, "Overflow or underflow encountered." 235 | 236 | # evaluate ouput point for interpolation and next step 237 | self.K[self.n_stages] = self.fun(t + h, y_new) 238 | 239 | # store for next step, interpolation and stepsize control 240 | self.h_previous = h 241 | self.y_old = y 242 | self.h_abs = h_abs 243 | self.f = self.K[self.n_stages].copy() 244 | self.error_norm_old = error_norm 245 | 246 | # output 247 | self.t = t_new 248 | self.y = y_new 249 | 250 | # stiffness detection 251 | self._diagnose_stiffness() 252 | 253 | return True, None 254 | 255 | def _estimate_error_norm_pre(self, y, h): 256 | # first error estimate 257 | # y_new is not available yet for scale, so use y_pre instead 258 | y_pre = y + h * (self.K[:8].T @ self.A[8, :8]) 259 | scale = calculate_scale(self.atol, self.rtol, y, y_pre) 260 | err = h * (self.K[:8, :].T @ self.E[:8]) 261 | return norm(err / scale) 262 | -------------------------------------------------------------------------------- /extensisq/cash.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | from extensisq.common import (RungeKutta, HornerDenseOutput, CubicDenseOutput, 3 | NFS, calculate_scale, norm) 4 | 5 | 6 | SAFETY = 0.9 7 | 8 | 9 | class CK5(RungeKutta): 10 | """A 5th order method with 4th order error estimator that uses the 11 | coefficients of Cash and Karp [1]_. 12 | 13 | This is not the variable order method described [1]_. That method is 14 | available as `CKdisc`. 15 | 16 | Can be applied in the complex domain. 17 | 18 | Parameters 19 | ---------- 20 | fun : callable 21 | Right-hand side of the system. The calling signature is ``fun(t, y)``. 22 | Here ``t`` is a scalar, and there are two options for the ndarray 23 | ``y``: It can either have shape (n,); then ``fun`` must return 24 | array_like with shape (n,). Alternatively it can have shape (n, k); 25 | then ``fun`` must return an array_like with shape (n, k), i.e., each 26 | column corresponds to a single column in ``y``. The choice between the 27 | two options is determined by `vectorized` argument (see below). 28 | t0 : float 29 | Initial time. 30 | y0 : array_like, shape (n,) 31 | Initial state. 32 | t_bound : float 33 | Boundary time - the integration won't continue beyond it. It also 34 | determines the direction of the integration. 35 | first_step : float or None, optional 36 | Initial step size. Default is ``None`` which means that the algorithm 37 | should choose. 38 | max_step : float, optional 39 | Maximum allowed step size. Default is np.inf, i.e., the step size is 40 | not bounded and determined solely by the solver. 41 | rtol, atol : float and array_like, optional 42 | Relative and absolute tolerances. The solver keeps the local error 43 | estimates less than ``atol + rtol * abs(y)``. Here `rtol` controls a 44 | relative accuracy (number of correct digits). But if a component of `y` 45 | is approximately below `atol`, the error only needs to fall within 46 | the same `atol` threshold, and the number of correct digits is not 47 | guaranteed. If components of y have different scales, it might be 48 | beneficial to set different `atol` values for different components by 49 | passing array_like with shape (n,) for `atol`. Default values are 50 | 1e-3 for `rtol` and 1e-6 for `atol`. 51 | vectorized : bool, optional 52 | Whether `fun` is implemented in a vectorized fashion. A vectorized 53 | implementation offers no advantages for this solver. Default is False. 54 | nfev_stiff_detect : int, optional 55 | Number of function evaluations for stiffness detection. This number has 56 | multiple purposes. If it is set to 0, then stiffness detection is 57 | disabled. For other (positive) values it is used to represent a 58 | 'considerable' number of function evaluations (nfev). A stiffness test 59 | is done if many steps fail and each time nfev exceeds integer multiples 60 | of `nfev_stiff_detect`. For the assessment itself, the problem is 61 | assessed as non-stiff if the predicted nfev to complete the integration 62 | is lower than `nfev_stiff_detect`. The default value is 5000. 63 | sc_params : tuple of size 4, "standard", "G", "H" or "W", optional 64 | Parameters for the stepsize controller (k*b1, k*b2, a2, g). The step 65 | size controller is, with k the exponent of the standard controller, 66 | _n for new and _o for old: 67 | h_n = h * g**(k*b1 + k*b2) * (h/h_o)**-a2 68 | * (err/tol)**-b1 * (err_o/tol_o)**-b2 69 | Predefined parameters are: 70 | Gustafsson "G" (0.7, -0.4, 0, 0.9), Watts "W" (2, -1, -1, 0.8), 71 | Soederlind "S" (0.6, -0.2, 0, 0.9), and "standard" (1, 0, 0, 0.9). 72 | The default for this method is "G". 73 | 74 | References 75 | ---------- 76 | .. [1] J. R. Cash, A. H. Karp, "A Variable Order Runge-Kutta Method for 77 | Initial Value Problems with Rapidly Varying Right-Hand Sides", 78 | ACM Trans. Math. Softw., Vol. 16, No. 3, 1990, pp. 201-222, ISSN 79 | 0098-3500. https://doi.org/10.1145/79505.79507 80 | """ 81 | 82 | n_stages = 6 83 | order = 5 84 | order_secondary = 4 85 | tanang = 2.4 86 | stbrad = 3.7 87 | sc_params = "G" 88 | 89 | A = np.array([ 90 | [0, 0, 0, 0, 0, 0], 91 | [1/5, 0, 0, 0, 0, 0], 92 | [3/40, 9/40, 0, 0, 0, 0], 93 | [3/10, -9/10, 6/5, 0, 0, 0], 94 | [-11/54, 5/2, -70/27, 35/27, 0, 0], 95 | [1631/55296, 175/512, 575/13824, 44275/110592, 253/4096, 0]]) 96 | 97 | B = np.array([37/378, 0, 250/621, 125/594, 0, 512/1771]) 98 | 99 | C = np.array([0, 1/5, 3/10, 3/5, 1, 7/8]) 100 | 101 | E = np.array( 102 | [277/64512, 0, -6925/370944, 6925/202752, 277/14336, -277/7084, 0]) 103 | 104 | # fourth order, maximum ||T5|| over step is 1.52e-3 105 | P = np.array([ 106 | [1, -10405/3843, 32357/11529, -855/854], 107 | [0, 0, 0, 0], 108 | [0, 308500/88389, -1424000/265167, 67250/29463], 109 | [0, 5875/24156, 12875/36234, -3125/8052], 110 | [0, 235/1708, -235/854, 235/1708], 111 | [0, -287744/108031, 700416/108031, -381440/108031], 112 | [0, 3/2, -4, 5/2]]) 113 | 114 | 115 | class CKdisc(RungeKutta): 116 | """Cash Karp variable order (5, 3, 2) Runge Kutta method with error 117 | estimators of order (4, 2, 1). This method is created to efficiently solve 118 | non-smooth problems [1]_; problems with discontinuous derivatives. 119 | Interpolants for dense output have been added. 120 | 121 | The method prefers order 5. Whether this high order can be successfully 122 | reached in the current step is predicted multiple times between the 123 | evaluations of the derivative function. After the first failed prediction, 124 | propagation with fallback solutions of reduced order and step size is 125 | assessed. These fallback solutions do not need extra derivative 126 | evaluations. 127 | 128 | Step size is expected to be irregular in this method. This can interfere 129 | with stiffness detection and non-standard stepsize control, which are 130 | therefore disabled. 131 | 132 | Can be applied in the complex domain. 133 | 134 | A fixed fifth order method with the Cash Karp parameters is available as 135 | CK5. 136 | 137 | Parameters 138 | ---------- 139 | fun : callable 140 | Right-hand side of the system. The calling signature is ``fun(t, y)``. 141 | Here ``t`` is a scalar, and there are two options for the ndarray 142 | ``y``: It can either have shape (n,); then ``fun`` must return 143 | array_like with shape (n,). Alternatively it can have shape (n, k); 144 | then ``fun`` must return an array_like with shape (n, k), i.e., each 145 | column corresponds to a single column in ``y``. The choice between the 146 | two options is determined by `vectorized` argument (see below). 147 | t0 : float 148 | Initial time. 149 | y0 : array_like, shape (n,) 150 | Initial state. 151 | t_bound : float 152 | Boundary time - the integration won't continue beyond it. It also 153 | determines the direction of the integration. 154 | first_step : float or None, optional 155 | Initial step size. Default is ``None`` which means that the algorithm 156 | should choose. 157 | max_step : float, optional 158 | Maximum allowed step size. Default is np.inf, i.e., the step size is 159 | not bounded and determined solely by the solver. 160 | rtol, atol : float and array_like, optional 161 | Relative and absolute tolerances. The solver keeps the local error 162 | estimates less than ``atol + rtol * abs(y)``. Here `rtol` controls a 163 | relative accuracy (number of correct digits). But if a component of `y` 164 | is approximately below `atol`, the error only needs to fall within 165 | the same `atol` threshold, and the number of correct digits is not 166 | guaranteed. If components of y have different scales, it might be 167 | beneficial to set different `atol` values for different components by 168 | passing array_like with shape (n,) for `atol`. Default values are 169 | 1e-3 for `rtol` and 1e-6 for `atol`. 170 | vectorized : bool, optional 171 | Whether `fun` is implemented in a vectorized fashion. A vectorized 172 | implementation offers no advantages for this solver. Default is False. 173 | 174 | References 175 | ---------- 176 | .. [1] J. R. Cash, A. H. Karp, "A Variable Order Runge-Kutta Method for 177 | Initial Value Problems with Rapidly Varying Right-Hand Sides", 178 | ACM Trans. Math. Softw., Vol. 16, No. 3, 1990, pp. 201-222, ISSN 179 | 0098-3500. https://doi.org/10.1145/79505.79507 180 | """ 181 | # changes w.r.t. paper [1]_: 182 | # - loop reformulated to prevent code repetition. 183 | # - atol and rtol as in scipy, instead of only scalar atol. 184 | # - do not allow for an increase in step size directly after a failed step. 185 | # - incude interpolants for dense output. 186 | 187 | n_stages = 6 # for main method 188 | order = 5 # for main method 189 | order_secondary = 4 # for main method 190 | max_factor = 5 191 | min_factor = 1/5 192 | 193 | # time step fractions 194 | C = np.array([0, 1/5, 3/10, 3/5, 1, 7/8]) 195 | 196 | # coefficient matrix 197 | A = np.array([ 198 | [0, 0, 0, 0, 0, 0], 199 | [1/5, 0, 0, 0, 0, 0], 200 | [3/40, 9/40, 0, 0, 0, 0], 201 | [3/10, -9/10, 6/5, 0, 0, 0], 202 | [-11/54, 5/2, -70/27, 35/27, 0, 0], 203 | [1631/55296, 175/512, 575/13824, 44275/110592, 253/4096, 0]]) 204 | 205 | # all embedded orders: 206 | B_all = np.array([ 207 | [0, 0, 0, 0, 0, 0], # order 0 208 | [1, 0, 0, 0, 0, 0], # order 1 209 | [-3/2, 5/2, 0, 0, 0, 0], # order 2 210 | [19/54, 0, -10/27, 55/54, 0, 0], # order 3 211 | [2825/27648, 0, 18575/48384, 13525/55296, 277/14336, 212 | 1/4], # order 4 213 | [37/378, 0, 250/621, 125/594, 0, 512/1771]]) # order 5 214 | 215 | # coefficients for main propagating method 216 | B = B_all[5, :] # order 5 217 | E = np.zeros(7) 218 | E[:-1] = B_all[5, :] - B_all[4, :] # order 4(5) 219 | 220 | # coefficients for convergence assessment 221 | B_assess = B_all[[2, 3], :] 222 | E_assess = np.array([ 223 | B_all[2, :] - B_all[1, :], # order 1(2) 224 | B_all[3, :] - B_all[2, :]]) # order 2(3) 225 | 226 | # coefficients for fallback methods 227 | C_fallback = C[[1, 3]] 228 | B_fallback = np.array([ 229 | [1/10, 1/10, 0, 0, 0, 0], # order 2 230 | [1/10, 0, 2/5, 1/10, 0, 0]]) # order 3 231 | E_fallback = np.array([ 232 | [-1/10, 1/10, 0, 0, 0, 0], # order 1(2) 233 | [1/10, 0, -2/10, 1/10, 0, 0]]) # order 2(3) 234 | 235 | # fourth order interpolator for fifth order solution 236 | # maximum ||T5|| in [0,1] is 1.52e-3 237 | P = np.array([ 238 | [1, -10405/3843, 32357/11529, -855/854], 239 | [0, 0, 0, 0], 240 | [0, 308500/88389, -1424000/265167, 67250/29463], 241 | [0, 5875/24156, 12875/36234, -3125/8052], 242 | [0, 235/1708, -235/854, 235/1708], 243 | [0, -287744/108031, 700416/108031, -381440/108031], 244 | [0, 3/2, -4, 5/2]]) 245 | 246 | def __init__(self, fun, t0, y0, t_bound, **extraneous): 247 | super().__init__( 248 | fun, t0, y0, t_bound, nfev_stiff_detect=0, **extraneous) 249 | # adaptive weighing factors: 250 | self.twiddle = [1.5, 1.1] # starting values 251 | self.quit = [100., 100.] # starting values 252 | 253 | def _step_impl(self): 254 | t = self.t 255 | y = self.y 256 | twiddle = self.twiddle 257 | quit = self.quit 258 | 259 | h_abs, min_step = self._reassess_stepsize(t, y) 260 | 261 | order_accepted = 0 262 | step_rejected = False 263 | while not order_accepted: 264 | 265 | if h_abs < min_step: 266 | return False, self.TOO_SMALL_STEP 267 | h = h_abs * self.direction 268 | t_new = t + h 269 | 270 | # start the integration, two stages at a time 271 | self.K[0] = self.f # stage 0 272 | self._rk_stage(h, 1) # stage 1 273 | 274 | # first order error, second order solution, for assessment 275 | y_assess, err_assess, tol = self._comp_sol_err_tol( 276 | h, self.B_assess[0], self.E_assess[0], 2) 277 | E1 = norm(err_assess/tol) ** (1/2) 278 | esttol = E1 / self.quit[0] 279 | 280 | # assess if full step completion is likely 281 | if E1 < twiddle[0] * quit[0]: 282 | # green light -> continue with next two stages 283 | self._rk_stage(h, 2) # stage 2 284 | self._rk_stage(h, 3) # stage 3 285 | 286 | # second order error, third order solution, for assessment 287 | y_assess, err_assess, tol = self._comp_sol_err_tol( 288 | h, self.B_assess[1], self.E_assess[1], 4) 289 | E2 = norm(err_assess/tol) ** (1/3) 290 | esttol = E2 / self.quit[1] 291 | 292 | # assess if full step completion is likely 293 | if E2 < twiddle[1]*quit[1]: 294 | # green light -> continue with last two stages 295 | self._rk_stage(h, 4) # stage 4 296 | self._rk_stage(h, 5) # stage 5 297 | 298 | # second fourth error, fifth order solution, for output 299 | y_new, err, tol = self._comp_sol_err_tol(h, self.B, self.E) 300 | E4 = norm(err/tol) ** (1/5) 301 | E4 = E4 or 1e-160 # no div 0 302 | esttol = E4 303 | 304 | # assess final error 305 | if E4 < 1: 306 | # accept fifth order solution 307 | order_accepted = 4 # error order 308 | 309 | # update h for next step 310 | factor = min(self.max_factor, SAFETY/E4) 311 | if step_rejected: # not in paper 312 | factor = min(1.0, factor) 313 | h_abs *= factor 314 | 315 | # update quit factors 316 | q = [E1/E4, E2/E4] 317 | for j in (0, 1): 318 | if q[j] > quit[j]: 319 | q[j] = min(q[j], 10 * quit[j]) 320 | else: 321 | q[j] = max(q[j], 2/3 * quit[j]) 322 | quit[j] = max(1., min(10000., q[j])) 323 | 324 | break 325 | 326 | # fifth order solution NOT accepted 327 | if np.isnan(E4) or np.isinf(E4): 328 | return False, "Overflow or underflow encountered." 329 | 330 | # update twiddle factors 331 | e = [E1, E2] 332 | for i in (0, 1): 333 | EQ = e[i] / quit[i] 334 | if EQ < twiddle[i]: 335 | twiddle[i] = max(1.1, EQ) 336 | 337 | # assess propagation with third order fallback solution 338 | if E2 < 1: 339 | y_new, err, tol = self._comp_sol_err_tol( 340 | h, self.B_fallback[1], self.E_fallback[1], 4) 341 | 342 | # assess second order error 343 | if norm(err/tol) < 1: 344 | # accept third order fallback solution 345 | order_accepted = 2 # error order 346 | h_abs *= self.C_fallback[1] # reduce next step 347 | h = h_abs * self.direction # and THIS step 348 | break 349 | 350 | # assess propagation with second order fallback solution 351 | if E1 < 1: 352 | y_new, err, tol = self._comp_sol_err_tol( 353 | h, self.B_fallback[0], self.E_fallback[0], 2) 354 | 355 | # assess first order error 356 | if norm(err/tol) < 1: 357 | # accept second order fallback solution 358 | order_accepted = 1 359 | h_abs *= self.C_fallback[0] # reduce next step 360 | h = h_abs * self.direction # and THIS step 361 | break 362 | 363 | else: 364 | # non-smooth behavior detected retry step with h/5 365 | step_rejected = True 366 | h_abs *= self.C_fallback[0] 367 | NFS[()] += 1 368 | continue 369 | 370 | # just not accurate enough, retry with usual estimate for h 371 | step_rejected = True 372 | h_abs *= max(self.min_factor, SAFETY/esttol) 373 | NFS[()] += 1 374 | continue 375 | 376 | # end of main while loop 377 | 378 | # calculate the derivative of the accepted solution 379 | # for first stage of next step and for interpolation 380 | t_new = t + h 381 | f_new = self.fun(t_new, y_new) 382 | self.K[-1, :] = f_new 383 | 384 | # store for next step and interpolation 385 | self.order_accepted = order_accepted # error order 386 | self.h_previous = h 387 | self.y_old = y 388 | self.h_abs = h_abs 389 | self.f = f_new 390 | 391 | # output 392 | self.t = t_new 393 | self.y = y_new 394 | return True, None 395 | 396 | def _compute_error(self, h, E, i): 397 | return h * (self.K[:i, :].T @ E[:i]) 398 | 399 | def _compute_solution(self, h, B, i): 400 | return h * (self.K[:i, :].T @ B[:i]) + self.y 401 | 402 | def _comp_sol_err_tol(self, h, B, E, i=6): 403 | sol = self._compute_solution(h, B, i) 404 | err = self._compute_error(h, E, i) 405 | tol = calculate_scale(self.atol, self.rtol, self.y, sol) 406 | return sol, err, tol 407 | 408 | def _dense_output_impl(self): 409 | # select interpolator based on order of the accepted error (solution) 410 | if self.order_accepted == 4: 411 | # 4th order error estimate accepted (5th order solution) 412 | Q = self.K.T @ self.P 413 | return HornerDenseOutput(self.t_old, self.t, self.y_old, Q) 414 | # low order solution 415 | return CubicDenseOutput(self.t_old, self.t, self.y_old, self.y, 416 | self.K[0, :], self.K[-1, :]) 417 | -------------------------------------------------------------------------------- /extensisq/fine.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | from extensisq.common import ( 3 | RungeKuttaNystrom, QuinticHermiteDenseOutput, HornerDenseOutputNystrom) 4 | 5 | 6 | class Fi4N(RungeKuttaNystrom): 7 | """Explicit Runge-Kutta Nystrom (general) method by Fine [1]_ of order 4. 8 | This method is applicable to second order initial value problems only. 9 | 10 | The second order problem should be recast in first order form as 11 | u = [x, v], du = [v, a], with x, v, a variables like, position, 12 | velocity, acceleration. The derivative function du = f(t, u) should 13 | calculate only a and pass through v. (The order in u and du matters.) 14 | 15 | Can be applied in the complex domain. 16 | 17 | Parameters 18 | ---------- 19 | fun : callable 20 | Right-hand side of the system. The calling signature is ``fun(t, y)``. 21 | Here ``t`` is a scalar, and there are two options for the ndarray 22 | ``y``: It can either have shape (n,); then ``fun`` must return 23 | array_like with shape (n,). Alternatively it can have shape (n, k); 24 | then ``fun`` must return an array_like with shape (n, k), i.e., each 25 | column corresponds to a single column in ``y``. The choice between the 26 | two options is determined by `vectorized` argument (see below). For 27 | this second order problem, y should contain all solution components 28 | first followed by an equal number of first derivative components of the 29 | solution. Likewise, the returned array should contain the first 30 | derivatives first followed by the second derivatives. (The first 31 | derivatives are identical those in the input and the second derivatives 32 | are calculated.) 33 | t0 : float 34 | Initial time. 35 | y0 : array_like, shape (n,) 36 | Initial state. 37 | t_bound : float 38 | Boundary time - the integration won't continue beyond it. It also 39 | determines the direction of the integration. 40 | first_step : float or None, optional 41 | Initial step size. Default is ``None`` which means that the algorithm 42 | should choose. 43 | max_step : float, optional 44 | Maximum allowed step size. Default is np.inf, i.e., the step size is 45 | not bounded and determined solely by the solver. 46 | rtol, atol : float and array_like, optional 47 | Relative and absolute tolerances. The solver keeps the local error 48 | estimates less than ``atol + rtol * abs(y)``. Here `rtol` controls a 49 | relative accuracy (number of correct digits). But if a component of `y` 50 | is approximately below `atol`, the error only needs to fall within 51 | the same `atol` threshold, and the number of correct digits is not 52 | guaranteed. If components of y have different scales, it might be 53 | beneficial to set different `atol` values for different components by 54 | passing array_like with shape (n,) for `atol`. Default values are 55 | 1e-3 for `rtol` and 1e-6 for `atol`. 56 | vectorized : bool, optional 57 | Whether `fun` is implemented in a vectorized fashion. A vectorized 58 | implementation offers no advantages for this solver. Default is False. 59 | nfev_stiff_detect : int, optional 60 | Number of function evaluations for stiffness detection. This number has 61 | multiple purposes. If it is set to 0, then stiffness detection is 62 | disabled. For other (positive) values it is used to represent a 63 | 'considerable' number of function evaluations (nfev). A stiffness test 64 | is done if many steps fail and each time nfev exceeds integer multiples 65 | of `nfev_stiff_detect`. For the assessment itself, the problem is 66 | assessed as non-stiff if the predicted nfev to complete the integration 67 | is lower than `nfev_stiff_detect`. The default value is 5000. 68 | sc_params : tuple of size 4, "standard", "G", "H" or "W", optional 69 | Parameters for the stepsize controller (k*b1, k*b2, a2, g). The step 70 | size controller is, with k the exponent of the standard controller, 71 | _n for new and _o for old: 72 | h_n = h * g**(k*b1 + k*b2) * (h/h_o)**-a2 73 | * (err/tol)**-b1 * (err_o/tol_o)**-b2 74 | Predefined parameters are [2]_: 75 | Gustafsson "G" (0.7, -0.4, 0, 0.9), 76 | Soederlind "S" (0.6, -0.2, 0, 0.9), 77 | and "standard" (1, 0, 0, 0.9). 78 | The default for this method is "G". 79 | 80 | References 81 | ---------- 82 | .. [1] J.M. Fine, "Low order practical Runge-Kutta-Nyström methods", 83 | Computing, Vol. 38, 1987, pp. 281-297. 84 | https://doi.org/10.1007/BF02278707 85 | .. [2] G.Söderlind, "Automatic Control and Adaptive Time-Stepping", 86 | Numerical Algorithms, Vol. 31, No. 1, 2002, pp. 281-310. 87 | https://doi.org/10.1023/A:1021160023092 88 | """ 89 | n_stages = 5 90 | order = 4 91 | order_secondary = 3 92 | sc_params = "G" 93 | 94 | tanang = 40. 95 | stbre = 1.5 96 | stbim = 4. 97 | 98 | C = np.array([0, 2/9, 1/3, 3/4, 1]) 99 | A = np.array([[0, 0, 0, 0, 0], 100 | [2/81, 0, 0, 0, 0], 101 | [1/36, 1/36, 0, 0, 0], 102 | [9/128, 0, 27/128, 0, 0], 103 | [11/60, -3/20, 9/25, 8/75, 0]]) 104 | Ap = np.array([[0, 0, 0, 0, 0], 105 | [2/9, 0, 0, 0, 0], 106 | [1/12, 1/4, 0, 0, 0], 107 | [69/128, -243/128, 135/64, 0, 0], 108 | [-17/12, 27/4, -27/5, 16/15, 0]]) 109 | B = np.array([19/180, 0, 63/200, 16/225, 1/120]) 110 | Bp = np.array([1/9, 0, 9/20, 16/45, 1/12]) 111 | E = np.array([25/1116, 0, -63/1240, 64/1395, -13/744, 0]) 112 | Ep = np.array([2/125, 0, -27/625, 32/625, -3/125, 0]) 113 | 114 | 115 | class Fi5N(RungeKuttaNystrom): 116 | """Explicit Runge-Kutta Nystrom (general) method by Fine [1]_ of order 5. 117 | This method is applicable to second order initial value problems only. 118 | 119 | The second order problem should be recast in first order form as 120 | u = [x, v], du = [v, a], with x, v, a variables like, position, 121 | velocity, acceleration. The derivative function du = f(t, u) should 122 | calculate only a and pass through v. (The order in u and du matters.) 123 | 124 | This method can use one of 4 interpolants; see [2]_ for two options. 125 | 126 | Can be applied in the complex domain. 127 | 128 | Parameters 129 | ---------- 130 | fun : callable 131 | Right-hand side of the system. The calling signature is ``fun(t, y)``. 132 | Here ``t`` is a scalar, and there are two options for the ndarray 133 | ``y``: It can either have shape (n,); then ``fun`` must return 134 | array_like with shape (n,). Alternatively it can have shape (n, k); 135 | then ``fun`` must return an array_like with shape (n, k), i.e., each 136 | column corresponds to a single column in ``y``. The choice between the 137 | two options is determined by `vectorized` argument (see below). For 138 | this second order problem, y should contain all solution components 139 | first followed by an equal number of first derivative components of the 140 | solution. Likewise, the returned array should contain the first 141 | derivatives first followed by the second derivatives. (The first 142 | derivatives are identical those in the input and the second derivatives 143 | are calculated.) 144 | t0 : float 145 | Initial time. 146 | y0 : array_like, shape (n,) 147 | Initial state. 148 | t_bound : float 149 | Boundary time - the integration won't continue beyond it. It also 150 | determines the direction of the integration. 151 | first_step : float or None, optional 152 | Initial step size. Default is ``None`` which means that the algorithm 153 | should choose. 154 | max_step : float, optional 155 | Maximum allowed step size. Default is np.inf, i.e., the step size is 156 | not bounded and determined solely by the solver. 157 | rtol, atol : float and array_like, optional 158 | Relative and absolute tolerances. The solver keeps the local error 159 | estimates less than ``atol + rtol * abs(y)``. Here `rtol` controls a 160 | relative accuracy (number of correct digits). But if a component of `y` 161 | is approximately below `atol`, the error only needs to fall within 162 | the same `atol` threshold, and the number of correct digits is not 163 | guaranteed. If components of y have different scales, it might be 164 | beneficial to set different `atol` values for different components by 165 | passing array_like with shape (n,) for `atol`. Default values are 166 | 1e-3 for `rtol` and 1e-6 for `atol`. 167 | vectorized : bool, optional 168 | Whether `fun` is implemented in a vectorized fashion. A vectorized 169 | implementation offers no advantages for this solver. Default is False. 170 | nfev_stiff_detect : int, optional 171 | Number of function evaluations for stiffness detection. This number has 172 | multiple purposes. If it is set to 0, then stiffness detection is 173 | disabled. For other (positive) values it is used to represent a 174 | 'considerable' number of function evaluations (nfev). A stiffness test 175 | is done if many steps fail and each time nfev exceeds integer multiples 176 | of `nfev_stiff_detect`. For the assessment itself, the problem is 177 | assessed as non-stiff if the predicted nfev to complete the integration 178 | is lower than `nfev_stiff_detect`. The default value is 5000. 179 | sc_params : tuple of size 4, "standard", "G", "H" or "W", optional 180 | Parameters for the stepsize controller (k*b1, k*b2, a2, g). The step 181 | size controller is, with k the exponent of the standard controller, 182 | _n for new and _o for old: 183 | h_n = h * g**(k*b1 + k*b2) * (h/h_o)**-a2 184 | * (err/tol)**-b1 * (err_o/tol_o)**-b2 185 | Predefined parameters are [3]_: 186 | Gustafsson "G" (0.7, -0.4, 0, 0.9), 187 | Soederlind "S" (0.6, -0.2, 0, 0.9), 188 | and "standard" (1, 0, 0, 0.9). 189 | The default for this method is "G". 190 | interpolant : int: 0, 1, 2 or 3 191 | Select the interpolant for dense output. 192 | Option 0 is for the 4th order accurate interpolant that is the 5th 193 | order hermite polynomial that satisfies C2 continuity at the solution 194 | points. This free interpolant satisfies the RKN order conditions up to 195 | order 4. 196 | Option 1 is for the 5th order interpolant in [2]_ that needs two extra 197 | function evaluations (at c = 2/5 and 1/5). This interpolant changes the 198 | solution at the output point, which is uncommon practice. 199 | Option 2 is for the other 5th order interpolant in [2]_ that also nees 200 | two extra function evaluations (both at c = 1/2). This interpolant 201 | improves the interpolated velocity (1st derivative). The interpolated 202 | displacement (solution) is the same as for interpolant 0 as proposed in 203 | the paper. However, a modified interpolant is implemented in extensisq, 204 | which uses the extra evalutions to slightly improve the position 205 | interpolant. 206 | Option 3 is a 4th order accurate interpolant that needs 1 extra 207 | evaluation and is slightly more accurate than interpolant 0. 208 | Default: 0. 209 | 210 | References 211 | ---------- 212 | .. [1] J.M. Fine, "Low order practical Runge-Kutta-Nyström methods", 213 | Computing, Vol. 38, 1987, pp. 281-297. 214 | https://doi.org/10.1007/BF02278707 215 | .. [2] J.M. Fine, "Interpolants for Runge-Kutta-Nyström Methods", 216 | Computing, Vol. 39, 1987, pp. 27-42. 217 | https://doi.org/10.1007/BF02307711 218 | .. [3] G.Söderlind, "Automatic Control and Adaptive Time-Stepping", 219 | Numerical Algorithms, Vol. 31, No. 1, 2002, pp. 281-310. 220 | https://doi.org/10.1023/A:1021160023092 221 | """ 222 | n_stages = 6 223 | order = 5 224 | order_secondary = 4 225 | sc_params = "G" 226 | 227 | tanang = 15. 228 | stbre = 2.0 229 | stbim = 4.0 230 | 231 | C = np.array([0, 8/39, 4/13, 5/6, 43/47, 1]) 232 | A = np.array([[0, 0, 0, 0, 0, 0], 233 | [32/1521, 0, 0, 0, 0, 0], 234 | [4/169, 4/169, 0, 0, 0, 0], 235 | [175/5184, 0, 1625/5184, 0, 0, 0], 236 | [-342497279/5618900760, 6827067/46824173, 237 | 35048741/102161832, -2201514/234120865, 0, 0], 238 | [-7079/52152, 767/2173, 14027/52152, 30/2173, 0, 0]]) 239 | Ap = np.array([[0, 0, 0, 0, 0, 0], 240 | [8/39, 0, 0, 0, 0, 0], 241 | [1/13, 3/13, 0, 0, 0, 0], 242 | [7385/6912, -9425/2304, 13325/3456, 0, 0, 0], 243 | [223324757/91364240, -174255393/18272848, 244 | 382840094/46824173, -39627252/234120865, 0, 0], 245 | [108475/36464, -9633/848, 7624604/806183, 246 | 8100/49979, -4568212/19446707, 0]]) 247 | B = np.array([4817/51600, 0, 388869/1216880, 248 | 3276/23575, -1142053/22015140, 0]) 249 | Bp = np.array([4817/51600, 0, 1685099/3650640, 250 | 19656/23575, -53676491/88060560, 53/240]) 251 | E = np.array([8151/2633750, 0, -1377519/186334750, 252 | 586872/28879375, -36011118/2247378875, 0, 0]) 253 | Ep = np.array([8151/2633750, 0, -5969249/559004250, 254 | 3521232/28879375, -846261273/4494757750, 4187/36750, -1/25]) 255 | 256 | # first higher order interpolant of fine that replaces endpoint, 257 | # needs two extra evaluations 258 | C_extra1 = (2/5, 1/5) 259 | A_extra1 = np.array([ 260 | [3166724675977/89400687626250, 12182/175275, 261 | -2308196389073/59333908961250, 113223739712/2656609580625, 262 | -4985173058548/281912811350625, 0, -108322/9884875, 0, 0], 263 | [13703589067379/1021293449694000, -16588/178955775, 264 | 393366167467741/44058124399590000, -129051960428/18967820851875, 265 | 286445641484101/88563993965842500, 0, 185739/141153250, 0, 0]]) 266 | Ap_extra1 = np.array([ 267 | [-153602563/3630543750, 4/5, -15809974379/36693821250, 268 | 25076304/131584375, -20756397983/250144464375, 0, -251893/7317375, 269 | 0, 0], 270 | [549292232911/4942380225000, 0, 38673447228481/349667653965000, 271 | -14447155986/237691990625, 239970566676929/8434666091985000, 0, 272 | 48691361/4597563000, 0, 0]]) 273 | P1 = np.array([ 274 | [1/2, -65425102193/34422618000, 246336178993/68845236000, 275 | -17271401477/5737103000, 2646330829/2868551500], 276 | [0, 0, 0, 0, 0], 277 | [0, -103408733716249/21918241774800, 36216248499769/2087451597600, 278 | -34348334365943/1826520147900, 11946681497647/1826520147900], 279 | [0, 10008729576/15727000375, -45252884088/15727000375, 280 | 67501517184/15727000375, -28901603736/15727000375], 281 | [0, -42869319978551/58745639878800, 26279956317109/8392234268400, 282 | -5270387298308/1223867497475, 103642379853661/58745639878800], 283 | [0, 10265285443/27377989200, -14253109853/9125996400, 284 | 1166320544/570374775, -2474620297/3041998800], 285 | [0, -102497539/608399760, 451530737/608399760, -659633561/608399760, 286 | 899768737/1825199280], 287 | [0, 39325/31704, -96525/21136, 53625/10568, -7150/3963], 288 | [0, 25525/4848, -25525/1616, 25525/1616, -25525/4848]]) 289 | Pp1 = P1 * np.arange(2, 7) # derivative of P1 290 | Bi = np.array([ 291 | 20342293/227212000, 0, 159248338847/434024589600, 33225336/155712875, 292 | -27213980937/193879999600, 12057023/271069200, -19822/1129455, 293 | -3575/63408, 0]) 294 | 295 | # second higher order interpolant of fine where the velocity intepolant is 296 | # not the derivative of the displacement interpolant, needs two extra 297 | # evaluations. I modified the displacement interpolant to slightly increase 298 | # accuracy, making use of the extra evaluations. 299 | C_extra2 = (1/2, 1/2) 300 | A_extra2 = np.array([ 301 | [43312501780291/1275109236086400, 544043/4374864, 302 | -39526111133/929968205760, -72268815551/18945421466400, 303 | 15249672887173/919058778293760, 0, -936943/260282880, 0, 0], 304 | [78787/1651200, 0, 10240217/116820480, -5733/94300, 65097021/939312640, 305 | -53/1536, 1/64, 0, 0]]) 306 | Ap_extra2 = np.array([ 307 | [-10074474119/31414368960, 2, -2024048255/1461229504, 308 | 262779483/503599720, -27782604453665/107223524134272, 0, 309 | -16606877/292226688, 0, 0], 310 | [70201889/791750400, 0, 86794851169/168046260480, 26319519/45216850, 311 | -895699605317/1351201232640, 867730151/2728776960, -192743/1624272, 312 | -429/1918, 0]]) 313 | P2 = np.array([ 314 | [1/2, -2121/1720, 4213/2580, -3783/3440, 1261/4300], 315 | [0, 0, 0, 0, 0], 316 | [0, 388869/121688, -648115/91266, 7388511/1216880, -1685099/912660], 317 | [0, 6552/4715, -29484/4715, 39312/4715, -78624/23575], 318 | [0, -1142053/2201514, 67381127/17612112, -170165897/29353520, 319 | 53676491/22015140], 320 | [0, 0, -53/48, 159/80, -53/60], 321 | [0, -1/6, 1, -3/2, 2/3], 322 | [0, 0, 0, 0, 0], 323 | [0, -8/3, 8, -8, 8/3]]) 324 | Pp2 = np.array([ 325 | [1, -6478933/1649480, 4322879/618555, -18893117/3298960, 3783/2150], 326 | [0, 0, 0, 0, 0], 327 | [0, 12073105993/1050289128, -4230283954/131286141, 328 | 67778480393/2100578256, -1685099/152110], 329 | [0, 68501376/4521685, -212403168/4521685, 238152312/4521685, 330 | -471744/23575], 331 | [0, -10466915745/703750642, 177078743809/4222503852, 332 | -238162590567/5630005136, 53676491/3669190], 333 | [0, 28284245/4263714, -150799787/8527428, 565239223/34109712, -53/10], 334 | [0, -294260/101517, 893071/101517, -1004879/101517, 4], 335 | [0, -3432/959, 6864/959, -3432/959, 0], 336 | [0, -8, 32, -40, 16]]) 337 | 338 | # more accurate interpolant, not higher order, one extra evaluation 339 | C_extra3 = (7/13, ) 340 | A_extra3 = np.array([ 341 | [0.0514642952839635, 0, 0.103871371189972, -0.0689278735806428, 342 | 0.0829881092428669, -0.0427358611434873, 0.0183103732085115]]) 343 | Ap_extra3 = np.array([ 344 | [0.0990632691053544, 0, 0.421051250650231, -0.0409728359730535, 345 | 0.114675787267570, -0.0729606750492478, 0.0176047424606845]]) 346 | P3 = np.array([ 347 | [0.5, -1.17031483236872, 1.34773382152111, -0.621021772676567, 348 | -0.0597823106655287, 0.0967378073680061], 349 | [0, 0, 0, 0, 0, 0], 350 | [0, 2.65626120756873, -4.91705525506241, 2.75486717403821, 351 | 0.391732827495055, -0.566243630720938], 352 | [0, 1.36554439688042, -4.36630995367658, 2.82125202693705, 353 | 2.13320898315464, -1.81473468977485], 354 | [0, -0.514916544259984, 2.22831627015447, -1.0275974440311, 355 | -2.32367769527755, 1.58599961339654], 356 | [0, -0.00551650449428157, -0.398188141421234, -0.0973365487706088, 357 | 1.19047020644871, -0.689429011762588], 358 | [0, -0.1230728332372, 0.484182482444333, -0.214110447909797, 359 | -0.532035218564604, 0.385036017267268], 360 | [0, -2.20798489008896, 5.62132077604031, -3.61605298758719, 361 | -0.799916792590728, 1.00263389422656]]) 362 | Pp3 = P3 * np.arange(2, 8) # derivative of P3 363 | 364 | def __init__(self, fun, t0, y0, t_bound, 365 | sc_params=None, interpolant=0, **extraneous): 366 | super().__init__(fun, t0, y0, t_bound, **extraneous) 367 | # custom initialization to create extended storage for dense output 368 | if interpolant not in range(4): 369 | raise ValueError( 370 | "interpolant should be one of: 0, 1, 2, 3") 371 | self.interpolant = interpolant 372 | if self.interpolant == 3: 373 | self.K_extended = np.zeros((self.n_stages + 2, 374 | self.n), dtype=self.y.dtype) 375 | self.K = self.K_extended[:self.n_stages+1] 376 | elif self.interpolant != 0: 377 | self.K_extended = np.zeros((self.n_stages + 3, 378 | self.n), dtype=self.y.dtype) 379 | self.K = self.K_extended[:self.n_stages+1] 380 | 381 | def _dense_output_impl(self): 382 | if self.interpolant == 0: 383 | return QuinticHermiteDenseOutput( 384 | self.t_old, self.t, self.y_old, self.y, self.f_old, self.f) 385 | 386 | h = self.h_previous 387 | K = self.K_extended 388 | if self.interpolant == 1: 389 | C_extra = self.C_extra1 390 | A_extra, Ap_extra = self.A_extra1, self.Ap_extra1 391 | P, Pp = self.P1, self.Pp1 392 | elif self.interpolant == 2: 393 | C_extra = self.C_extra2 394 | A_extra, Ap_extra = self.A_extra2, self.Ap_extra2 395 | P, Pp = self.P2, self.Pp2 396 | else: 397 | C_extra = self.C_extra3 398 | A_extra, Ap_extra = self.A_extra3, self.Ap_extra3 399 | P, Pp = self.P3, self.Pp3 400 | for s, (a, ap, c) in enumerate(zip(A_extra, Ap_extra, C_extra), 401 | start=self.n_stages+1): 402 | dt = c * h 403 | du = (K[:s, :].T @ a[:s]) * h**2 + dt * self.y_old[self.n:] 404 | dv = (K[:s, :].T @ ap[:s]) * h 405 | dy = np.concatenate((du, dv)) 406 | K[s] = self.fun(self.t_old + dt, self.y_old + dy) 407 | Q = K.T @ P 408 | Qp = K.T @ Pp 409 | if self.interpolant == 1: 410 | # replace position at end of step (not the velocity) 411 | # (The derivative function is not evaluated at this updated point) 412 | du = (K.T @ self.Bi) * h**2 + h * self.y_old[self.n:] 413 | self.y[:self.n] = self.y_old[:self.n] + du 414 | return HornerDenseOutputNystrom(self.t_old, self.t, self.y_old, Q, Qp) 415 | -------------------------------------------------------------------------------- /extensisq/kvaerno.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | from extensisq.kennedy import KC # use 2 interpolants like KC methods 3 | # from extensisq.common import ESDIRK 4 | # from scipy.special import roots_laguerre 5 | 6 | 7 | class Kv3I(KC): 8 | """ESDIRK method of Kvaerno [1]_, ESDIRK3/2a with 4 stages. Main method of 9 | order 3 and embedded method of order 2 are both are stiffly accururate. 10 | Only the main method is L-stable. 11 | 12 | The method includes two interpolants. The default is C0 continuous and the 13 | alternative is C1 continuous. The error of the two interpolants is similar. 14 | 15 | The implementation is similar to the implementation of extensisq methods of 16 | Hosea. It adopts details from the BDF method of scipy and the paper of 17 | Shampine [2]_ and cubic Hermite spline extrapolation for prediction of some 18 | of the stages. 19 | 20 | Can be applied in the complex domain. 21 | 22 | Parameters 23 | ---------- 24 | fun : callable 25 | Right-hand side of the system. The calling signature is ``fun(t, y)``. 26 | Here ``t`` is a scalar, and there are two options for the ndarray 27 | ``y``: It can either have shape (n,); then ``fun`` must return 28 | array_like with shape (n,). Alternatively it can have shape (n, k); 29 | then ``fun`` must return an array_like with shape (n, k), i.e., each 30 | column corresponds to a single column in ``y``. The choice between the 31 | two options is determined by `vectorized` argument (see below). 32 | jac : {None, array_like, sparse_matrix, callable}, optional 33 | Jacobian matrix of the right-hand side of the system with respect to y, 34 | required by this method. The Jacobian matrix has shape (n, n) and its 35 | element (i, j) is equal to ``d f_i / d y_j``. 36 | There are three ways to define the Jacobian: 37 | 38 | * If array_like or sparse_matrix, the Jacobian is assumed to 39 | be constant. Furthermore, the ODE is assumed to be linear! If the 40 | If the supplied Jacobian is a constant approximation, but the ODE 41 | is not linear, then use a callable; see next. 42 | * If callable, the Jacobian is assumed to depend on both 43 | t and y; it will be called as ``jac(t, y)`` as necessary. 44 | The return value might be a sparse matrix. 45 | * If None (default), the Jacobian will be approximated by 46 | finite differences. 47 | 48 | It is generally recommended to provide the Jacobian (options 1 or 2) 49 | rather than relying on a finite-difference (option 3) approximation. 50 | The linear ODE assumption entails that only one iteration is done per 51 | stage, but also that the LU decomposition is done after each change in 52 | step size. If this is undesirable, then supply the jacobian as a 53 | callable (option 2). 54 | jac_sparsity : {None, array_like, sparse matrix}, optional 55 | Defines a sparsity structure of the Jacobian matrix for a 56 | finite-difference approximation. Its shape must be (n, n). This 57 | argument is ignored if `jac` is not `None`. If the Jacobian has only 58 | few non-zero elements in *each* row, providing the sparsity structure 59 | will greatly speed up the computations. A zero entry means that a 60 | corresponding element in the Jacobian is always zero. If None 61 | (default), the Jacobian is assumed to be dense. 62 | M : {None, array_like, sparse}, optional 63 | The method can solve more general problems (index 1 DAEs) of the form: 64 | M y' = f(t, y). 65 | In this case, `M` is a constant matrix of shape (n, n). The user 66 | supplied M can be a 2D matrix (dense or sparse) or, if M is diagonal, 67 | a 1D array of the diagonal. Default: None, which implies `M` is the 68 | identity matrix. 69 | jac_each_step : bool, optional 70 | If True, the jacobian is updated each step. Default: False. 71 | interpolant : {'C0', 'C1'}, optional 72 | Interpolant to use: C0 or C1 continuous. Default is 'C0'. 73 | t0 : float 74 | Initial time. 75 | y0 : array_like, shape (n,) 76 | Initial state. 77 | t_bound : float 78 | Boundary time - the integration won't continue beyond it. It also 79 | determines the direction of the integration. 80 | first_step : float or None, optional 81 | Initial step size. Default is ``None`` which means that the algorithm 82 | should choose. 83 | max_step : float, optional 84 | Maximum allowed step size. Default is np.inf, i.e., the step size is 85 | not bounded and determined solely by the solver. 86 | rtol, atol : float and array_like, optional 87 | Relative and absolute tolerances. The solver keeps the local error 88 | estimates less than ``atol + rtol * abs(y)``. Here `rtol` controls a 89 | relative accuracy (number of correct digits). But if a component of `y` 90 | is approximately below `atol`, the error only needs to fall within 91 | the same `atol` threshold, and the number of correct digits is not 92 | guaranteed. If components of y have different scales, it might be 93 | beneficial to set different `atol` values for different components by 94 | passing array_like with shape (n,) for `atol`. Default values are 95 | 1e-3 for `rtol` and 1e-6 for `atol`. 96 | vectorized : bool, optional 97 | Whether `fun` can be called in a vectorized fashion. Default is False. 98 | 99 | If ``vectorized`` is False, `fun` will always be called with ``y`` of 100 | shape ``(n,)``, where ``n = len(y0)``. 101 | 102 | If ``vectorized`` is True, `fun` may be called with ``y`` of shape 103 | ``(n, k)``, where ``k`` is an integer. In this case, `fun` must behave 104 | such that ``fun(t, y)[:, i] == fun(t, y[:, i])`` (i.e. each column of 105 | the returned array is the time derivative of the state corresponding 106 | with a column of ``y``). 107 | 108 | Setting ``vectorized=True`` allows for faster finite difference 109 | approximation of the Jacobian by this method, but may result in slower 110 | execution overall in some circumstances (e.g. small ``len(y0)``). 111 | sc_params : tuple of size 4, "standard", "G", "H" or "W", optional 112 | Parameters for the stepsize controller (k*b1, k*b2, a2, g). The step 113 | size controller is, with k the exponent of the standard controller, 114 | _n for new and _o for old: 115 | h_n = h * g**(k*b1 + k*b2) * (h/h_o)**-a2 116 | * (err/tol)**-b1 * (err_o/tol_o)**-b2 117 | sc_params : tuple of size 4, "standard", "G", "H" or "W", optional 118 | Parameters for the stepsize controller (k*b1, k*b2, a2, g). The step 119 | size controller is, with k the exponent of the standard controller, 120 | _n for new and _o for old: 121 | h_n = h * g**(k*b1 + k*b2) * (h/h_o)**-a2 122 | * (err/tol)**-b1 * (err_o/tol_o)**-b2 123 | Predefined parameters are [3]_: 124 | Gustafsson "G" (2, -1, -1, 0.8), 125 | Soederlind "S" (0.6, -0.2, 0, 0.8), 126 | and "standard" (1, 0, 0, 0.8). 127 | These coefficients are different than in explicit methods. The default 128 | for this method is "G". 129 | 130 | References 131 | ---------- 132 | .. [1] A. Kvaerno, "Singly Diagonally Implicit Runge-Kutta Methods with an 133 | Explicit First Stage", BIT Numerical Mathematics, Vol. 44, pp. 134 | 489-502, 2004, https://doi.org/10.1023/B:BITN.0000046811.70614.38 135 | .. [2] L. F. Shampine, "Implementation of Implicit Formulas for the 136 | Solution of ODEs", SIAM Journal on Scientific and Statistical 137 | Computing, Vol. 1, No. 1, pp. 103-118, 1980, 138 | https://doi.org/10.1137/0901005. 139 | .. [3] G.Söderlind, "Automatic Control and Adaptive Time-Stepping", 140 | Numerical Algorithms, Vol. 31, No. 1, 2002, pp. 281-310. 141 | https://doi.org/10.1023/A:1021160023092 142 | """ 143 | # Kvaerno ESDIRK32a 144 | n_stages = 4 145 | order = 3 146 | order_secondary = 2 147 | kappa = 1/15 # E has large coefficients 148 | filter_error = False # embedded methods has bounded Rh(inf) = 0.9569 149 | # coefficients 150 | d = 0.435866521508459 # = 1./roots_laguerre(3)[0][1] 151 | C = np.array([0, 2*d, 1, 1]) 152 | A = np.array([ 153 | [0, 0, 0, 0], 154 | [d, d, 0, 0], 155 | [(6*d - 4*d*d - 1)/(4*d), (1 - 2*d)/(4*d), d, 0], 156 | [(6*d - 1)/(12*d), -1/(24*d - 12)/d, (6*d - 6*d*d - 1)/(6*d - 3), d]]) 157 | B = A[-1, :] # R(inf) = 0 158 | Bh = A[-2, :] # R_h(inf) = 0.9569 159 | E = Bh - B 160 | Az = np.array([ 161 | [0, 0, 0, 0], 162 | [1, 0, 0, 0], 163 | [1 - 1/(2*d), 1/(2*d), 0, 0], 164 | [0, 0, 1, 0]]) 165 | # C0 interpolant 166 | P0 = np.array([ 167 | [1, -1.07357009006976, 0.382380060046507], 168 | [0, 4.47169016526534, -2.98112677684356], 169 | [1.0452602553351, -5.79624015039116, 3.51574001514907], 170 | [-1.0452602553351, 2.39812007519558, -0.91699329835202]]) 171 | # C1 interpolant, similar error as C0 172 | P1 = np.array([ 173 | [1, -1.07357009006976, 0.382380060046507, 0], 174 | [0, 4.47169016526534, -2.98112677684356, 0], 175 | [0, 0.252689145887228, -5.44633781140245, 3.95840878560824], 176 | [0, -3.65080922108287, 8.04508452819957, -3.95840878560825]]) 177 | P = P0 # default 178 | -------------------------------------------------------------------------------- /extensisq/merson.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | from extensisq.common import RungeKutta 3 | 4 | 5 | class Me4(RungeKutta): 6 | """Merson's explicit Runge-Kutta method [1]_ of order 4, with an embedded 7 | method for error estimation of order 3 (or 5 for linear time invariant 8 | problems) and a free interpolant of order 3 (4th order polynomial). 9 | 10 | This is the oldest embedded Runge-Kutta method. It has a large stability 11 | domain for a 4th order method, especially on the imaginary axis. 12 | 13 | Can be applied in the complex domain. 14 | 15 | Parameters 16 | ---------- 17 | fun : callable 18 | Right-hand side of the system. The calling signature is ``fun(t, y)``. 19 | Here ``t`` is a scalar, and there are two options for the ndarray 20 | ``y``: It can either have shape (n,); then ``fun`` must return 21 | array_like with shape (n,). Alternatively it can have shape (n, k); 22 | then ``fun`` must return an array_like with shape (n, k), i.e., each 23 | column corresponds to a single column in ``y``. The choice between the 24 | two options is determined by `vectorized` argument (see below). 25 | t0 : float 26 | Initial time. 27 | y0 : array_like, shape (n,) 28 | Initial state. 29 | t_bound : float 30 | Boundary time - the integration won't continue beyond it. It also 31 | determines the direction of the integration. 32 | first_step : float or None, optional 33 | Initial step size. Default is ``None`` which means that the algorithm 34 | should choose. 35 | max_step : float, optional 36 | Maximum allowed step size. Default is np.inf, i.e., the step size is 37 | not bounded and determined solely by the solver. 38 | rtol, atol : float and array_like, optional 39 | Relative and absolute tolerances. The solver keeps the local error 40 | estimates less than ``atol + rtol * abs(y)``. Here `rtol` controls a 41 | relative accuracy (number of correct digits). But if a component of `y` 42 | is approximately below `atol`, the error only needs to fall within 43 | the same `atol` threshold, and the number of correct digits is not 44 | guaranteed. If components of y have different scales, it might be 45 | beneficial to set different `atol` values for different components by 46 | passing array_like with shape (n,) for `atol`. Default values are 47 | 1e-3 for `rtol` and 1e-6 for `atol`. 48 | vectorized : bool, optional 49 | Whether `fun` is implemented in a vectorized fashion. A vectorized 50 | implementation offers no advantages for this solver. Default is False. 51 | nfev_stiff_detect : int, optional 52 | Number of function evaluations for stiffness detection. This number has 53 | multiple purposes. If it is set to 0, then stiffness detection is 54 | disabled. For other (positive) values it is used to represent a 55 | 'considerable' number of function evaluations (nfev). A stiffness test 56 | is done if many steps fail and each time nfev exceeds integer multiples 57 | of `nfev_stiff_detect`. For the assessment itself, the problem is 58 | assessed as non-stiff if the predicted nfev to complete the integration 59 | is lower than `nfev_stiff_detect`. The default value is 5000. 60 | sc_params : tuple of size 4, "standard", "G", "H" or "W", optional 61 | Parameters for the stepsize controller (k*b1, k*b2, a2, g). The step 62 | size controller is, with k the exponent of the standard controller, 63 | _n for new and _o for old: 64 | h_n = h * g**(k*b1 + k*b2) * (h/h_o)**-a2 65 | * (err/tol)**-b1 * (err_o/tol_o)**-b2 66 | Predefined parameters are [2]_: 67 | Gustafsson "G" (0.7, -0.4, 0, 0.9), 68 | Soederlind "S" (0.6, -0.2, 0, 0.9), 69 | and "standard" (1, 0, 0, 0.9). 70 | The default for this method is "G". 71 | 72 | References 73 | ---------- 74 | .. [1] E. Hairer, G. Wanner, S.P. Norsett, "Solving Ordinary Differential 75 | Equations I", Springer Berlin, Heidelberg, 1993, 76 | https://doi.org/10.1007/978-3-540-78862-1 77 | .. [2] G.Söderlind, "Automatic Control and Adaptive Time-Stepping", 78 | Numerical Algorithms, Vol. 31, No. 1, 2002, pp. 281-310. 79 | https://doi.org/10.1023/A:1021160023092 80 | """ 81 | 82 | # effective number of stages 83 | n_stages = 5 84 | 85 | # order of the main method 86 | order = 4 87 | 88 | # order of the secondary embedded method 89 | order_secondary = 3 90 | 91 | # time fraction coefficients (nodes) 92 | C = np.array([0, 1/3, 1/3, 1/2, 1]) 93 | 94 | # runge kutta coefficient matrix 95 | A = np.array([ 96 | [0, 0, 0, 0, 0], 97 | [1/3, 0, 0, 0, 0], 98 | [1/6, 1/6, 0, 0, 0], 99 | [1/8, 0, 3/8, 0, 0], 100 | [1/2, 0, -3/2, 2, 0]]) 101 | 102 | # output coefficients (weights) 103 | B = np.array([1/6, 0, 0, 2/3, 1/6]) 104 | 105 | # error coefficients (weights Bh - B) 106 | E = np.array([1/10, 0, 3/10, 2/5, 1/5, 0]) # B_hat 107 | E[:-1] -= B 108 | 109 | P = np.array([ 110 | [1, - 3, 11/3, -3/2], 111 | [0, 0, 0, 0], 112 | [0, 27/4, -27/2, 27/4], 113 | [0, -4, 32/3, -6], 114 | [0, -13/10, 49/15, -9/5], 115 | [0, 31/20, -41/10, 51/20]]) 116 | 117 | # Parameters for stiffness detection, optional 118 | stbrad = 3.4 119 | tanang = 20. 120 | 121 | # Parameters for stepsize control 122 | sc_params = "G" 123 | -------------------------------------------------------------------------------- /extensisq/mikkawy.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | from extensisq.common import RungeKuttaNystrom 3 | 4 | 5 | class MR6NN(RungeKuttaNystrom): 6 | """Explicit Runge-Kutta Nystrom method by El-Mikkawy and Rahmo [1]_ of 7 | order 6. The embedded method has order 4. This method is applicable to 8 | second order initial value problems only. Moreover, these problems must be 9 | independent of the first derivative (velocity). Undamped mechanics is an 10 | example of such a problem. 11 | 12 | The second order problem should be recast in first order form as 13 | u = [x, v], du = [v, a], with x, v, a variables like, position, 14 | velocity, acceleration. The derivative function du = f(t, u) should 15 | calculate only a and pass through v. (The order in u and du matters.) This 16 | is the same form as for general RKN methods in extensisq. So, although the 17 | the input of f() contains v, it must not be used in it. 18 | 19 | This method includes a free C2-continuous sixth order interpolant. 20 | 21 | Can be applied in the complex domain. 22 | 23 | Parameters 24 | ---------- 25 | fun : callable 26 | Right-hand side of the system. The calling signature is ``fun(t, y)``. 27 | Here ``t`` is a scalar, and there are two options for the ndarray 28 | ``y``: It can either have shape (n,); then ``fun`` must return 29 | array_like with shape (n,). Alternatively it can have shape (n, k); 30 | then ``fun`` must return an array_like with shape (n, k), i.e., each 31 | column corresponds to a single column in ``y``. The choice between the 32 | two options is determined by `vectorized` argument (see below). For 33 | this second order problem, y should contain all solution components 34 | first followed by an equal number of first derivative components of the 35 | solution. Likewise, the returned array should contain the first 36 | derivatives first followed by the second derivatives. (The first 37 | derivatives are identical those in the input and the second derivatives 38 | are calculated.) 39 | t0 : float 40 | Initial time. 41 | y0 : array_like, shape (n,) 42 | Initial state. 43 | t_bound : float 44 | Boundary time - the integration won't continue beyond it. It also 45 | determines the direction of the integration. 46 | first_step : float or None, optional 47 | Initial step size. Default is ``None`` which means that the algorithm 48 | should choose. 49 | max_step : float, optional 50 | Maximum allowed step size. Default is np.inf, i.e., the step size is 51 | not bounded and determined solely by the solver. 52 | rtol, atol : float and array_like, optional 53 | Relative and absolute tolerances. The solver keeps the local error 54 | estimates less than ``atol + rtol * abs(y)``. Here `rtol` controls a 55 | relative accuracy (number of correct digits). But if a component of `y` 56 | is approximately below `atol`, the error only needs to fall within 57 | the same `atol` threshold, and the number of correct digits is not 58 | guaranteed. If components of y have different scales, it might be 59 | beneficial to set different `atol` values for different components by 60 | passing array_like with shape (n,) for `atol`. Default values are 61 | 1e-3 for `rtol` and 1e-6 for `atol`. 62 | vectorized : bool, optional 63 | Whether `fun` is implemented in a vectorized fashion. A vectorized 64 | implementation offers no advantages for this solver. Default is False. 65 | sc_params : tuple of size 4, "standard", "G", "H" or "W", optional 66 | Parameters for the stepsize controller (k*b1, k*b2, a2, g). The step 67 | size controller is, with k the exponent of the standard controller, 68 | _n for new and _o for old: 69 | h_n = h * g**(k*b1 + k*b2) * (h/h_o)**-a2 70 | * (err/tol)**-b1 * (err_o/tol_o)**-b2 71 | Predefined parameters are [2]_: 72 | Gustafsson "G" (0.7, -0.4, 0, 0.9), 73 | Soederlind "S" (0.6, -0.2, 0, 0.9), 74 | and "standard" (1, 0, 0, 0.9). 75 | The default for this method is "G". 76 | 77 | References 78 | ---------- 79 | .. [1] M. El-Mikkawy, E.D. Rahmo, "A new optimized non-FSAL embedded 80 | Runge-Kutta-Nystrom algorithm of orders 6 and 4 in six stages", 81 | Applied Mathematics and Computation, Vol. 145, Issue 1, 2003, 82 | pp. 33-43, https://doi.org/10.1016/S0096-3003(02)00436-8 83 | .. [2] G.Söderlind, "Automatic Control and Adaptive Time-Stepping", 84 | Numerical Algorithms, Vol. 31, No. 1, 2002, pp. 281-310. 85 | https://doi.org/10.1023/A:1021160023092 86 | """ 87 | n_stages = 6 88 | order = 6 89 | order_secondary = 4 90 | sc_params = "G" 91 | 92 | C = np.array([0, 1/77, 1/3, 2/3, 13/15, 1]) 93 | 94 | A = np.array([ 95 | [0, 0, 0, 0, 0, 0], 96 | [1/11858, 0, 0, 0, 0, 0], 97 | [-7189/17118, 4070/8559, 0, 0, 0, 0], 98 | [4007/2403, -589655/355644, 25217/118548, 0, 0, 0], 99 | [-4477057/843750, 13331783894/2357015625, -281996/5203125, 100 | 563992/7078125, 0, 0], 101 | [17265/2002, -1886451746/212088107, 22401/31339, 2964/127897, 102 | 178125/5428423, 0]]) 103 | # no Ap 104 | 105 | B = np.array([-341/780, 386683451/661053840, 2853/11840, 267/3020, 106 | 9375/410176, 0]) 107 | 108 | Bp = np.array([-341/780, 29774625727/50240091840, 8559/23680, 801/3020, 109 | 140625/820352, 847/18240]) 110 | 111 | E = np.array([-95/39, 89332243/33052692, 317/3552, 623/5436, 54125/1845792, 112 | 0, 0]) 113 | E[:-1] -= B 114 | 115 | Ep = np.array([-95/39, 362030669/132210768, 317/2368, 623/1812, 116 | 270625/1230528, 0, 0]) 117 | Ep[:-1] -= Bp 118 | 119 | P = np.array([ 120 | [1/2, -445/39, 2095/78, -1231/52, 1421/195], 121 | [0, 56490936887/5024009184, -280556420221/10048018368, 122 | 419129707843/16746697280, -195064224509/25120045920], 123 | [0, 951/2368, 2853/4736, -31383/23680, 6657/11840], 124 | [0, -89/151, 267/151, -4539/3020, 623/1510], 125 | [0, 228125/410176, -1790625/820352, 2184375/820352, -415625/410176], 126 | [0, 847/1824, -5929/3648, 11011/6080, -5929/9120], 127 | [0, -2/3, 5/2, -3, 7/6]]) 128 | Pp = P * np.arange(2, 7) # derivative of P 129 | -------------------------------------------------------------------------------- /extensisq/murua.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | from extensisq.common import ( 3 | RungeKuttaNystrom, QuinticHermiteDenseOutput, HornerDenseOutputNystrom) 4 | 5 | 6 | class Mu5Nmb(RungeKuttaNystrom): 7 | """Explicit Runge-Kutta Nystrom (general) method by Murua [1]_ of order 5. 8 | This method is applicable to second order initial value problems only. The 9 | idea is to repeat the postion across several stages (but vary velocity). 10 | This allows for efficient integration of multibody equations. 11 | 12 | The second order problem should be recast in first order form as 13 | u = [x, v], du = [v, a], with x, v, a variables like, position, 14 | velocity, acceleration. The derivative function du = f(t, u) should 15 | calculate only a and pass through v. (The order in u and du matters.) 16 | 17 | This method can use a free interpolant or a better one. 18 | 19 | Can be applied in the complex domain. 20 | 21 | Parameters 22 | ---------- 23 | fun : callable 24 | Right-hand side of the system. The calling signature is ``fun(t, y)``. 25 | Here ``t`` is a scalar, and there are two options for the ndarray 26 | ``y``: It can either have shape (n,); then ``fun`` must return 27 | array_like with shape (n,). Alternatively it can have shape (n, k); 28 | then ``fun`` must return an array_like with shape (n, k), i.e., each 29 | column corresponds to a single column in ``y``. The choice between the 30 | two options is determined by `vectorized` argument (see below). For 31 | this second order problem, y should contain all solution components 32 | first followed by an equal number of first derivative components of the 33 | solution. Likewise, the returned array should contain the first 34 | derivatives first followed by the second derivatives. (The first 35 | derivatives are identical those in the input and the second derivatives 36 | are calculated.) 37 | t0 : float 38 | Initial time. 39 | y0 : array_like, shape (n,) 40 | Initial state. 41 | t_bound : float 42 | Boundary time - the integration won't continue beyond it. It also 43 | determines the direction of the integration. 44 | first_step : float or None, optional 45 | Initial step size. Default is ``None`` which means that the algorithm 46 | should choose. 47 | max_step : float, optional 48 | Maximum allowed step size. Default is np.inf, i.e., the step size is 49 | not bounded and determined solely by the solver. 50 | rtol, atol : float and array_like, optional 51 | Relative and absolute tolerances. The solver keeps the local error 52 | estimates less than ``atol + rtol * abs(y)``. Here `rtol` controls a 53 | relative accuracy (number of correct digits). But if a component of `y` 54 | is approximately below `atol`, the error only needs to fall within 55 | the same `atol` threshold, and the number of correct digits is not 56 | guaranteed. If components of y have different scales, it might be 57 | beneficial to set different `atol` values for different components by 58 | passing array_like with shape (n,) for `atol`. Default values are 59 | 1e-3 for `rtol` and 1e-6 for `atol`. 60 | vectorized : bool, optional 61 | Whether `fun` is implemented in a vectorized fashion. A vectorized 62 | implementation offers no advantages for this solver. Default is False. 63 | nfev_stiff_detect : int, optional 64 | Number of function evaluations for stiffness detection. This number has 65 | multiple purposes. If it is set to 0, then stiffness detection is 66 | disabled. For other (positive) values it is used to represent a 67 | 'considerable' number of function evaluations (nfev). A stiffness test 68 | is done if many steps fail and each time nfev exceeds integer multiples 69 | of `nfev_stiff_detect`. For the assessment itself, the problem is 70 | assessed as non-stiff if the predicted nfev to complete the integration 71 | is lower than `nfev_stiff_detect`. The default value is 5000. 72 | sc_params : tuple of size 4, "standard", "G", "H" or "W", optional 73 | Parameters for the stepsize controller (k*b1, k*b2, a2, g). The step 74 | size controller is, with k the exponent of the standard controller, 75 | _n for new and _o for old: 76 | h_n = h * g**(k*b1 + k*b2) * (h/h_o)**-a2 77 | * (err/tol)**-b1 * (err_o/tol_o)**-b2 78 | Predefined parameters are [2]_: 79 | Gustafsson "G" (0.7, -0.4, 0, 0.9), 80 | Soederlind "S" (0.6, -0.2, 0, 0.9), 81 | and "standard" (1, 0, 0, 0.9). 82 | The default for this method is "G". 83 | interpolant : "free" or "better" 84 | Select the interpolant for dense output. 85 | Option "free" is for the 5th order hermite polynomial that satisfies C2 86 | continuity at the solution points. This free interpolant satisfies the 87 | RKN order conditions up to order 4 and requires no extra function 88 | evaluations. The "better" interpolant has teh same RKN order as the 89 | free interpolant, but is much more accurate. The better interpolant 90 | needs one extra function evaluation. Default: "better". 91 | scale_embedded : bool 92 | reduce the sensitivity of error estimate from the embedded method. 93 | Default: True 94 | 95 | References 96 | ---------- 97 | .. [1] A. Murua, "Runge-Kutta-Nyström methods for general second order ODEs 98 | with application to multi-body systems", Applied Numerical 99 | Mathematics, Vol. 28, 1998, pp. 387-399. 100 | https://doi.org/10.1016/S0168-9274(98)00055-5 101 | .. [2] G.Söderlind, "Automatic Control and Adaptive Time-Stepping", 102 | Numerical Algorithms, Vol. 31, No. 1, 2002, pp. 281-310. 103 | https://doi.org/10.1023/A:1021160023092 104 | """ 105 | n_stages = 9 106 | order = 5 107 | order_secondary = 4 108 | sc_params = "G" 109 | 110 | tanang = 100. 111 | stbre = 2.9 112 | stbim = 4.75 113 | 114 | C = np.array([0, 771/3847, 771/3847, 3051/6788, 4331/6516, 4331/6516, 115 | 10463/11400, 10463/11400, 1]) 116 | Ap = np.array([ 117 | [0, 0, 0, 0, 0, 0, 0, 0, 0], 118 | [771/3847, 0, 0, 0, 0, 0, 0, 0, 0], 119 | [771/7694, 771/7694, 0, 0, 0, 0, 0, 0, 0], 120 | [-264272222/4845505509, -9458865980/12714902623, 121 | 17133165419/13729279360, 0, 0, 0, 0, 0, 0], 122 | [1943604853/18116134489, -2470367896/7636570485, 1733951147/3918733571, 123 | 4613437932/10523350595, 0, 0, 0, 0, 0], 124 | [369952551/2046485744, 281630106828/143708239525, 125 | -9868262031/5606899429, 208606720/5597531799, 792516107/3209667255, 126 | 0, 0, 0, 0], 127 | [-2089737154/15083636501, -39924138556/8175090533, 128 | 72922890855/14010113917, 9484193351/15493195043, 129 | -17895265139/12412283353, 278232/177835, 0, 0, 0], 130 | [-1762013041/13188190032, -22636373880/4795132451, 131 | 30527401913/6048941340, 11564353310/19632283007, 132 | -50677425731/36595197965, 12408/8167, 10722067/5782709432, 0, 0], 133 | [8034174097/12261534992, 72032427203/6782716235, 134 | -90566218637/8185393121, 18770105843/41171085325, 135 | 28010344030/6199889941, -21917292279/4540377286, 136 | -236637914115/8183370127, 71217630373/2409299224, 0]]) 137 | A = np.array([ 138 | [0, 0, 0, 0, 0, 0, 0, 0, 0], 139 | [594441/29598818, 0, 0, 0, 0, 0, 0, 0, 0], 140 | [594441/29598818, 0, 0, 0, 0, 0, 0, 0, 0], 141 | [-311625081/28869248936, 128/8219, 1015645542/10554116159, 142 | 0, 0, 0, 0, 0, 0], 143 | [1852480471/26299626569, -247/14069, 648800762/5897141541, 144 | 519849979/8963946221, 0, 0, 0, 0, 0], 145 | [1852480471/26299626569, -247/14069, 648800762/5897141541, 146 | 519849979/8963946221, 0, 0, 0, 0, 0], 147 | [229929851/7158517178, 113395809/8665398238, 4865737279/19748497543, 148 | 340133672/10137556453, 738/11587, 509108839/15737542787, 0, 0, 0], 149 | [229929851/7158517178, 113395809/8665398238, 4865737279/19748497543, 150 | 340133672/10137556453, 738/11587, 509108839/15737542787, 0, 0, 0], 151 | [164505448/2653157365, 0, 9357192/40412735, 736403089/7677655029, 152 | 960089/17896194, 482653907/11393392643, -47281957/150822000, 153 | 6715245221/20471724521, 0]]) 154 | Bp = np.array( 155 | [164505448/2653157365, 0, 3042/10505, 1586146904/9104113535, 156 | 4394/27465, 2081836558/16479128289, -50461/13230, 157 | 13928550541/3490062596, 91464477/8242174145]) 158 | B = np.array( 159 | [164505448/2653157365, 0, 9357192/40412735, 736403089/7677655029, 160 | 960089/17896194, 482653907/11393392643, -47281957/150822000, 161 | 6715245221/20471724521, 0]) 162 | Ep = np.array( 163 | [53757362/127184461, 0, -138687950/204047369, 161961633/188152853, 164 | 36242723/103243418, 1/2, 1147554103/9981952, -2395015001/20532034, 165 | 1, 23/100]) 166 | Ep[:-1] -= Bp 167 | E = np.array( 168 | [53757362/127184461, 0, -426604134200/784970228543, 169 | 605250622521/1277181566164, 79190349755/672734111688, 2185/13032, 170 | 1075258194511/113794252800, -2244129055937/234065187600, 0, 0]) 171 | E[:-1] -= B 172 | 173 | # better interpolant 174 | C_extra = 1/2 175 | A_extra = np.array([ 176 | 6272277221/169802071360, 0, 45601101/646603760, 177 | 105407530693976029/5083508586660343092, 261443/143169552, 178 | 4331771911506493999/3004050864175132445232, 264970711/603288000, 179 | -1050721229560409919849/2286323200843459728512, -91464477/52749914528, 180 | 1/64]) 181 | Ap_extra = np.array([ 182 | 4914093243/84901035680, 0, 9036261/29391080, 183 | 1755813615360948893/16945028622201143640, 7300631/238615920, 184 | 18144300078070688533/751012716043783111308, 86944303/80438400, 185 | -1292891362501846999547/1143161600421729864256, 186 | -640251339/131874786320, 1/32]) 187 | P_better = np.array([ 188 | [1/2, -4924143773/3183788838, 2398376727/1061262946, 189 | -1666124677/1061262946, 3332249354/7959472095], 190 | [0, 0, 0, 0, 0], 191 | [0, 18714384/8082547, -39774150/8082547, 161466318/40412735, 192 | -12168/10505], 193 | [0, 7364030890/7677655029, -978508507380924517/423625715555028591, 194 | 4540205538386050898/2118128577775142955, -6344587616/9104113535], 195 | [0, 4800445/8948097, -9572329/5965398, 26273923/14913495, 196 | -17576/27465], 197 | [0, 4826539070/11393392643, 198 | -237901641408829340815/187753179010945777827, 199 | 87064975276817078628/62584393003648592609, -8327346232/16479128289], 200 | [0, -47281957/15082200, 79677919/3351600, -910165057/25137000, 201 | 100922/6615], 202 | [0, 67152452210/20471724521, 203 | -1777256640792585385045/71447600026358116516, 204 | 2706892803882276765045/71447600026358116516, -13928550541/872515649], 205 | [0, 0, -91464477/1648434829, 823180293/8242174145, 206 | -365857908/8242174145], 207 | [0, -1/6, 1, -3/2, 2/3], 208 | [0, -8/3, 8, -8, 8/3]]) 209 | Pp_better = P_better * np.arange(2, 7) # derivative of P_better 210 | 211 | def __init__(self, fun, t0, y0, t_bound, 212 | interpolant='better', scale_embedded=True, **extraneous): 213 | super().__init__(fun, t0, y0, t_bound, **extraneous) 214 | # custom initialization to create extended storage for dense output 215 | if interpolant not in ('better', 'free'): 216 | raise ValueError( 217 | "interpolant should be one of: 'free', 'better'") 218 | self.interpolant = interpolant 219 | if interpolant == 'better': 220 | self.K_extended = np.zeros(( 221 | self.n_stages + 2, self.n), dtype=self.y.dtype) 222 | self.K = self.K_extended[:self.n_stages+1] 223 | if scale_embedded: 224 | factor = 0.75 225 | self.E *= factor 226 | self.Ep *= factor 227 | 228 | def _dense_output_impl(self): 229 | if self.interpolant == 'free': 230 | return QuinticHermiteDenseOutput( 231 | self.t_old, self.t, self.y_old, self.y, self.f_old, self.f) 232 | # else: 233 | h = self.h_previous 234 | K = self.K_extended 235 | 236 | # extra stage 237 | s = self.n_stages + 1 238 | dt = self.C_extra * h 239 | du = (self.K.T @ self.A_extra) * h**2 + dt * self.y_old[self.n:] 240 | dv = (self.K.T @ self.Ap_extra) * h 241 | dy = np.concatenate((du, dv)) 242 | K[s] = self.fun(self.t_old + dt, self.y_old + dy) 243 | 244 | Q = K.T @ self.P_better 245 | Qp = K.T @ self.Pp_better 246 | return HornerDenseOutputNystrom(self.t_old, self.t, self.y_old, Q, Qp) 247 | -------------------------------------------------------------------------------- /extensisq/sommeijer.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | from math import sqrt, sinh, cosh, log 3 | from warnings import warn 4 | from scipy.integrate._ivp.common import ( 5 | validate_max_step, validate_first_step, warn_extraneous) 6 | from scipy.integrate._ivp.base import OdeSolver 7 | from extensisq.common import (validate_tol, CubicDenseOutput, NFS, norm, 8 | calculate_scale) 9 | 10 | 11 | # global counters, several were removed 12 | nrejct = NFS # nr of rejected steps 13 | nfesig = np.array(0) # nr of fun evals for rho estimation 14 | maxm = np.array(0) # max nr of stages used 15 | 16 | 17 | class SSV2stab(OdeSolver): 18 | """Stabilized Runge Kutta Chebyshev method of Sommeijer, Shampine and 19 | Verwer [1]. 20 | 21 | This is a translation of the Fortran code rkc.f [2]. It a variable step 22 | size, variable formula code to explicity and efficiently solve a class of 23 | large systems of mildly stiff ordinary differential equations. The number 24 | of stages in this method is adapted in each step to stretch the stability 25 | region along the real axis as much as neccesary. 26 | 27 | This method is particularly suited for initial value problems arising from 28 | semi-discretization of diffusion-dominated parabolic partial differential 29 | equations. The accuracy of such problems is limited by the spatial 30 | discretization. Therefore the low (second) order temporal convergence of 31 | this method is appropriate. Scince this is an explicit method, no 32 | costly solves of large matrix vector equations are needed. 33 | 34 | Parameters 35 | ---------- 36 | fun : callable 37 | Right-hand side of the system. The calling signature is ``fun(t, y)``. 38 | Here ``t`` is a scalar, and there are two options for the ndarray 39 | ``y``: It can either have shape (n,); then ``fun`` must return 40 | array_like with shape (n,). Alternatively it can have shape (n, k); 41 | then ``fun`` must return an array_like with shape (n, k), i.e., each 42 | column corresponds to a single column in ``y``. The choice between the 43 | two options is determined by `vectorized` argument (see below). 44 | t0 : float 45 | Initial time. 46 | y0 : array_like, shape (n,) 47 | Initial state. 48 | t_bound : float 49 | Boundary time - the integration won't continue beyond it. It also 50 | determines the direction of the integration. 51 | first_step : float or None, optional 52 | Initial step size. Default is ``None`` which means that the algorithm 53 | should choose. 54 | max_step : float, optional 55 | Maximum allowed step size. Default is np.inf, i.e., the step size is 56 | not bounded and determined solely by the solver. 57 | rtol, atol : float and array_like, optional 58 | Relative and absolute tolerances. The solver keeps the local error 59 | estimates less than ``atol + rtol * abs(y)``. Here `rtol` controls a 60 | relative accuracy (number of correct digits). But if a component of `y` 61 | is approximately below `atol`, the error only needs to fall within 62 | the same `atol` threshold, and the number of correct digits is not 63 | guaranteed. If components of y have different scales, it might be 64 | beneficial to set different `atol` values for different components by 65 | passing array_like with shape (n,) for `atol`. Default values are 66 | 1e-3 for `rtol` and 1e-6 for `atol`. 67 | vectorized : bool, optional 68 | Whether `fun` is implemented in a vectorized fashion. A vectorized 69 | implementation offers no advantages for this solver. Default is False. 70 | const_jac : bool, optional 71 | If your problem has a constant Jacobian, then the spectral radius needs 72 | to be estimated only once. Setting const_jac=True will inform the 73 | method, resulting in a slight efficiency increase. Default: False. 74 | rho_jac : None or callable, optional 75 | If the upper bound of the spectral radius of your problem can be given 76 | in a simple, fast to evaluate expression, then you can inform the 77 | method using a function with signature: sprad = rho_jac(t, y). (`*args` 78 | is not passed to `rho_jac`.) This is more efficient than using power 79 | iterations to find a spectral radius estimate, as is done by default: 80 | rho_jac=None. `rho_jac` is called every step if const_jac==False. 81 | 82 | References 83 | ---------- 84 | .. [1] B.P. Sommeijer, L.F. Shampine, J.G. Verwer, "RKC: An explicit solver 85 | for parabolic PDEs", Journal of Computational and Applied 86 | Mathematics, Vol. 88, No. 2, 1998, pp. 315-326. 87 | https://doi.org/10.1016/S0377-0427(97)00219-7 88 | .. [2] Fortran code rkc.f. 89 | http://www.netlib.no/netlib/ode/ 90 | """ 91 | # the main modifications are marked with "# mod" 92 | 93 | def __init__(self, fun, t0, y0, t_bound, max_step=np.inf, rtol=1e-3, 94 | atol=1e-6, vectorized=False, first_step=None, 95 | const_jac=False, rho_jac=None, **extraneous): 96 | warn_extraneous(extraneous) 97 | super().__init__( 98 | fun, t0, y0, t_bound, vectorized, support_complex=False) 99 | if first_step is None: 100 | self.absh = None 101 | else: 102 | self.absh = validate_first_step(first_step, t0, t_bound) 103 | self.hold = None 104 | if not isinstance(const_jac, bool): 105 | raise TypeError('`const_jac` should be True or False') 106 | if rho_jac is not None: 107 | if not callable(rho_jac): 108 | raise TypeError( 109 | '`rho_jac` should be None or a function: ' 110 | '`sprad = rho_jac(t, y)`') 111 | elif not isinstance(rho_jac(self.t, self.y), float): 112 | raise TypeError('`rho_jac` should return a float') 113 | elif rho_jac(self.t, self.y) <= 0: 114 | raise ValueError('`rho_jac` should return a positive float') 115 | self.const_jac = const_jac 116 | self.rho_jac = rho_jac 117 | self.max_step = validate_max_step(max_step) 118 | self.rtol, self.atol = validate_tol(rtol, atol, self.y) 119 | self.uround = np.nextafter(np.finfo(self.y.dtype).epsneg, 1) 120 | self.sqrtu = sqrt(self.uround) 121 | self.sqrtmin = sqrt(np.finfo(self.y.dtype).tiny) 122 | self.W = np.empty((4, self.n), self.y.dtype) 123 | self.V = None # eigenvector for spectral radius estimation. It 124 | # was WORK(5) in Fortran. Init in self._rho(). 125 | 126 | # reset counters 127 | nrejct[()] = 0 128 | nfesig[()] = 0 129 | maxm[()] = 0 130 | self.nstsig = 0 131 | self.mlim = 0 # added, for stiffness warning 132 | 133 | # Initialize on the first call. 134 | mmax = int(round(sqrt(self.rtol/(10.0 * self.uround)))) 135 | self.mmax = max(mmax, 2) 136 | self.newspc = True 137 | self.jacatt = False 138 | self.W[0] = self.y 139 | self.W[1] = self.fun(self.t, self.y) # evaluate 140 | max_step = min(self.max_step, abs(self.t_bound - self.t)) 141 | self.max_step = min(max_step, sqrt(np.finfo(self.y.dtype).max)) 142 | hmin = abs(self.t) 143 | if self.t_bound != np.inf: 144 | hmin = max(hmin, abs(self.max_step)) 145 | self.hmin = max(self.sqrtmin, 10.0 * self.uround * hmin) 146 | 147 | def _init_step_size(self, t, yn, fn, vtemp1, vtemp2): 148 | absh = self.max_step 149 | if self.sprad * absh > 1.0: 150 | absh = 1.0 / self.sprad 151 | absh = max(absh, self.hmin) 152 | vtemp1[:] = yn + absh * fn 153 | vtemp2[:] = self.fun(t + absh, vtemp1) # evaluate 154 | wt = self.atol + self.rtol * np.abs(yn) 155 | est = absh * norm((vtemp2 - fn) / wt) 156 | if 0.1 * absh < self.max_step * sqrt(est): 157 | absh = max(0.1 * absh/sqrt(est), self.hmin) 158 | else: 159 | absh = self.max_step 160 | return absh 161 | 162 | def _step_impl(self): 163 | """original: subroutine RKCLOW in rkc.f""" 164 | t = self.t 165 | absh = self.absh 166 | y = self.y.copy() 167 | yn, fn, vtemp1, vtemp2 = self.W 168 | one3rd = 1/3 169 | two3rd = 2/3 170 | 171 | # Start of loop for taking one step. 172 | while True: 173 | # Estimate the spectral radius of the Jacobian when newspc=True. 174 | if self.newspc: 175 | if self.rho_jac is not None: 176 | self.sprad = self.rho_jac(t, yn) 177 | else: 178 | self.sprad = self._rho(t, yn, fn, vtemp1, vtemp2) 179 | if self.sprad is None: 180 | return False, ( 181 | "The method to estimate the spectral radius " 182 | "of the Jacobian did not converge") 183 | self.jacatt = True 184 | 185 | # Compute an initial step size. 186 | if absh is None: 187 | absh = self._init_step_size(t, yn, fn, vtemp1, vtemp2) 188 | 189 | # Adjust the step size and determine the number of stages m. 190 | if 1.1 * absh >= abs(self.t_bound - t): 191 | absh = abs(self.t_bound - t) 192 | m = 1 + int(sqrt(1.54 * absh * self.sprad + 1.0)) 193 | 194 | # Limit m to mmax to control the growth of roundoff error. 195 | if m > self.mmax: 196 | m = self.mmax 197 | absh = (m**2 - 1) / (1.54 * self.sprad) 198 | # added stiffness warning: 199 | self.mlim += 1 200 | if self.mlim == 15: 201 | warn('Your problem is too stiff for this method.') 202 | else: 203 | self.mlim = 0 204 | maxm[()] = max(m, maxm[()]) 205 | 206 | # A tentative solution at t+h is returned in y and its slope is 207 | # evaluated in vtemp1(*). Mod: a factor 4/3*(m**2-1) and a lower 208 | # bound are added to the calculation of hmin. 209 | h = self.direction * absh 210 | # hmin = 10.0 * self.uround * max(abs(t) + abs(t+h)) # original 211 | hmin = max(self.sqrtmin, 212 | 13.3*self.uround*(abs(t) + absh)*(m**2 - 1)) # mod 213 | self._stages(t, yn, fn, h, m, y, vtemp1, vtemp2) # stages 214 | vtemp1[:] = self.fun(t + h, y) # evaluate 215 | 216 | # Estimate the local error and compute its weighted RMS norm. 217 | # original: 218 | wt = calculate_scale(self.atol, self.rtol, y, yn) 219 | est = 0.8 * (yn - y) + 0.4 * h * (fn + vtemp1) 220 | err = norm(est / wt) 221 | 222 | if err < 1.0: 223 | # Step is accepted. 224 | break 225 | else: 226 | # Step is rejected. 227 | if np.isnan(err) or np.isinf(err): 228 | return False, "Overflow or underflow encountered." 229 | nrejct[()] += 1 230 | absh = 0.8 * absh / err**one3rd 231 | if absh < hmin: 232 | return False, self.TOO_SMALL_STEP 233 | else: 234 | self.newspc = not self.jacatt 235 | self.absh = absh 236 | 237 | # Step is accepted. 238 | t += h 239 | self.jacatt = self.const_jac 240 | self.nstsig = (self.nstsig + 1) % 25 241 | self.newspc = False 242 | if self.rho_jac is not None or self.nstsig == 0: 243 | self.newspc = not self.jacatt 244 | 245 | # Update the data for interpolation stored in W(*). 246 | ylast = yn.copy() 247 | yplast = fn.copy() 248 | yn[:] = y 249 | fn[:] = vtemp1 250 | vtemp1[:] = ylast 251 | vtemp2[:] = yplast 252 | fac = 10.0 253 | if self.hold is None: 254 | temp2 = err**one3rd 255 | if 0.8 < fac * temp2: 256 | fac = 0.8 / temp2 257 | else: 258 | # H220 dead-beat control (Soederlind's label) 259 | temp1 = 0.8 * absh * self.errold**one3rd 260 | temp2 = abs(self.hold) * err**two3rd 261 | if temp1 < fac * temp2: 262 | fac = temp1 / temp2 263 | absh = max(0.1, fac) * absh 264 | self.absh = max(hmin, min(self.max_step, absh)) 265 | self.errold = err 266 | self.hold = h 267 | 268 | # output 269 | self.y = y 270 | self.t = t 271 | return True, None 272 | 273 | def _stages(self, t, yn, fn, h, m, y, yjm1, yjm2): 274 | """Take a step of size h from t to t+h to get y(*). 275 | 276 | original: subroutine STEP in rkc.f""" 277 | 278 | w0 = 1.0 + 2.0 / (13.0 * m**2) 279 | temp1 = w0**2 - 1.0 280 | temp2 = sqrt(temp1) 281 | arg = m * log(w0 + temp2) 282 | w1 = sinh(arg) * temp1 / (cosh(arg) * m * temp2 - w0 * sinh(arg)) 283 | bjm1 = 1.0 / (2.0 * w0)**2 284 | bjm2 = bjm1 285 | 286 | # Evaluate the first stage. 287 | yjm2[:] = yn 288 | mus = w1 * bjm1 289 | yjm1[:] = yn + h * mus * fn 290 | thjm2 = 0.0 291 | thjm1 = mus 292 | zjm1 = w0 293 | zjm2 = 1.0 294 | dzjm1 = 1.0 295 | dzjm2 = 0.0 296 | d2zjm1 = 0.0 297 | d2zjm2 = 0.0 298 | 299 | # Evaluate stages j = 2,...,m. 300 | for j in range(2, m + 1): 301 | zj = 2.0 * w0 * zjm1 - zjm2 302 | dzj = 2.0 * w0 * dzjm1 - dzjm2 + 2.0 * zjm1 303 | d2zj = 2.0 * w0 * d2zjm1 - d2zjm2 + 4.0 * dzjm1 304 | bj = d2zj / dzj**2 305 | ajm1 = 1.0 - zjm1 * bjm1 306 | mu = 2.0 * w0 * bj / bjm1 307 | nu = -bj / bjm2 308 | mus = mu * w1/w0 309 | 310 | # Use the y array for temporary storage here. 311 | y[:] = self.fun(t + h * thjm1, yjm1) # evaluate 312 | y[:] = (mu * yjm1 + nu * yjm2 + (1.0 - mu - nu) * yn + 313 | h * mus * (y - ajm1 * fn)) 314 | thj = mu * thjm1 + nu * thjm2 + mus * (1.0 - ajm1) 315 | 316 | # Shift the data for the next stage. 317 | if j < m: 318 | yjm2[:] = yjm1 319 | yjm1[:] = y 320 | thjm2 = thjm1 321 | thjm1 = thj 322 | bjm2 = bjm1 323 | bjm1 = bj 324 | zjm2 = zjm1 325 | zjm1 = zj 326 | dzjm2 = dzjm1 327 | dzjm1 = dzj 328 | d2zjm2 = d2zjm1 329 | d2zjm1 = d2zj 330 | 331 | def _rho(self, t, yn, fn, v, fv): 332 | """_rho() attempts to compute a close upper bound, SPRAD, on the 333 | spectral radius of the Jacobian matrix using a nonlinear power method. 334 | A convergence failure is reported returning None. 335 | 336 | original: subroutine RKCRHO in rkc.f 337 | """ 338 | 339 | # sprad smaller than small = 1/hmax are not interesting because 340 | # they do not constrain the step size. 341 | small = 1.0 / self.max_step 342 | 343 | # The initial slope is used as first guess and thereafter the last 344 | # computed eigenvector. Some care is needed to deal with special 345 | # cases. Approximations to the eigenvector are normalized so that their 346 | # Euclidean norm has the constant value dynrm. 347 | if self.V is None: 348 | self.V = fn.copy() 349 | v[:] = self.V 350 | ynrm = np.linalg.norm(yn) 351 | vnrm = np.linalg.norm(v) 352 | if ynrm != 0.0 and vnrm != 0.0: 353 | dynrm = ynrm * self.sqrtu 354 | v[:] = yn + v * (dynrm/vnrm) 355 | elif ynrm != 0.0: 356 | dynrm = ynrm * self.sqrtu 357 | v[:] *= 1.0 + self.sqrtu 358 | elif vnrm != 0.0: 359 | dynrm = self.uround 360 | v[:] *= dynrm/vnrm 361 | else: 362 | dynrm = self.uround 363 | v[:] = dynrm 364 | 365 | # Now iterate with a nonlinear power method. 366 | sigma = 0.0 367 | itmax = 50 368 | for iter in range(itmax): 369 | # evaluation with fun_single does not increment the nfev counter of 370 | # scipy. This is a convention for Jacobian estimation, which is not 371 | # unlike the spectral radius estimation we are doing here. 372 | fv[:] = self.fun_single(t, v) # evaluate 373 | nfesig[()] += 1 374 | dfnrm = np.linalg.norm(fv - fn) 375 | sigmal = sigma 376 | sigma = dfnrm / dynrm 377 | 378 | # sprad is a little bigger than the estimate sigma of the 379 | # spectral radius, so is more likely to be an upper bound. 380 | sprad = 1.2 * sigma 381 | if iter and abs(sigma - sigmal) <= max(sigma, small) * 0.01: 382 | # converged 383 | self.V[:] = v - yn 384 | return sprad 385 | 386 | # The next v(*) is the change in f 387 | # scaled so that norm(v - yn) = dynrm. 388 | if dfnrm != 0.0: 389 | v[:] = yn + (fv - fn) * (dynrm/dfnrm) 390 | else: 391 | # The new v(*) degenerated to yn(*)--"randomly" perturb 392 | # current approximation to the eigenvector by changing 393 | # the sign of one component. 394 | index = iter % self.n 395 | v[index] = -v[index] 396 | 397 | # return None to report a convergence failure. 398 | return None 399 | 400 | def _dense_output_impl(self): 401 | """Cubic Hermite spline for C1 continuous dense output. 402 | 403 | instead of: subroutine RKCINT in rkc.f 404 | """ 405 | y, f, y_old, f_old = self.W[:4].copy() 406 | return CubicDenseOutput(self.t_old, self.t, y_old, y, f_old, f) 407 | -------------------------------------------------------------------------------- /extensisq/tsitouras.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | from extensisq.common import RungeKutta 3 | 4 | 5 | class Ts5(RungeKutta): 6 | """Explicit Runge-Kutta method of order 5, with an error estimate of order 7 | 4 and a free interpolant of order 4. 8 | 9 | This method mainly differs from RK45 (scipy default) by the values of its 10 | coefficients. These coefficients have been derived with fewer simplifying 11 | assumptions [1]_. This results in an increased efficiency in most cases. 12 | 13 | Can be applied in the complex domain. 14 | 15 | Parameters 16 | ---------- 17 | fun : callable 18 | Right-hand side of the system. The calling signature is ``fun(t, y)``. 19 | Here ``t`` is a scalar, and there are two options for the ndarray 20 | ``y``: It can either have shape (n,); then ``fun`` must return 21 | array_like with shape (n,). Alternatively it can have shape (n, k); 22 | then ``fun`` must return an array_like with shape (n, k), i.e., each 23 | column corresponds to a single column in ``y``. The choice between the 24 | two options is determined by `vectorized` argument (see below). 25 | t0 : float 26 | Initial time. 27 | y0 : array_like, shape (n,) 28 | Initial state. 29 | t_bound : float 30 | Boundary time - the integration won't continue beyond it. It also 31 | determines the direction of the integration. 32 | first_step : float or None, optional 33 | Initial step size. Default is ``None`` which means that the algorithm 34 | should choose. 35 | max_step : float, optional 36 | Maximum allowed step size. Default is np.inf, i.e., the step size is 37 | not bounded and determined solely by the solver. 38 | rtol, atol : float and array_like, optional 39 | Relative and absolute tolerances. The solver keeps the local error 40 | estimates less than ``atol + rtol * abs(y)``. Here `rtol` controls a 41 | relative accuracy (number of correct digits). But if a component of `y` 42 | is approximately below `atol`, the error only needs to fall within 43 | the same `atol` threshold, and the number of correct digits is not 44 | guaranteed. If components of y have different scales, it might be 45 | beneficial to set different `atol` values for different components by 46 | passing array_like with shape (n,) for `atol`. Default values are 47 | 1e-3 for `rtol` and 1e-6 for `atol`. 48 | vectorized : bool, optional 49 | Whether `fun` is implemented in a vectorized fashion. A vectorized 50 | implementation offers no advantages for this solver. Default is False. 51 | nfev_stiff_detect : int, optional 52 | Number of function evaluations for stiffness detection. This number has 53 | multiple purposes. If it is set to 0, then stiffness detection is 54 | disabled. For other (positive) values it is used to represent a 55 | 'considerable' number of function evaluations (nfev). A stiffness test 56 | is done if many steps fail and each time nfev exceeds integer multiples 57 | of `nfev_stiff_detect`. For the assessment itself, the problem is 58 | assessed as non-stiff if the predicted nfev to complete the integration 59 | is lower than `nfev_stiff_detect`. The default value is 5000. 60 | sc_params : tuple of size 4, "standard", "G", "H" or "W", optional 61 | Parameters for the stepsize controller (k*b1, k*b2, a2, g). The step 62 | size controller is, with k the exponent of the standard controller, 63 | _n for new and _o for old: 64 | h_n = h * g**(k*b1 + k*b2) * (h/h_o)**-a2 65 | * (err/tol)**-b1 * (err_o/tol_o)**-b2 66 | Predefined parameters are [2]_: 67 | Gustafsson "G" (0.7, -0.4, 0, 0.9), 68 | Soederlind "S" (0.6, -0.2, 0, 0.9), 69 | and "standard" (1, 0, 0, 0.9). 70 | The default for this method is "G". 71 | 72 | References 73 | ---------- 74 | .. [1] Ch. Tsitouras, "Runge-Kutta pairs of order 5(4) satisfying only the 75 | first column simplifying assumption", Computers & Mathematics with 76 | Applications, Vol. 62, No. 2, pp. 770 - 775, 2011. 77 | https://doi.org/10.1016/j.camwa.2011.06.002 78 | .. [2] G.Söderlind, "Automatic Control and Adaptive Time-Stepping", 79 | Numerical Algorithms, Vol. 31, No. 1, 2002, pp. 281-310. 80 | https://doi.org/10.1023/A:1021160023092 81 | """ 82 | 83 | order = 5 84 | order_secondary = 4 85 | n_stages = 6 # effective nr 86 | tanang = 3.0 87 | stbrad = 3.5 88 | sc_params = "G" 89 | 90 | C = np.array([0, 0.161, 0.327, 0.9, 0.9800255409045097, 1]) 91 | A = np.array([ 92 | [0, 0, 0, 0, 0, 0], 93 | [0, 0, 0, 0, 0, 0], 94 | [0, 0.3354806554923570, 0, 0, 0, 0], 95 | [0, -6.359448489975075, 4.362295432869581, 0, 0, 0], 96 | [0, -11.74888356406283, 7.495539342889836, -0.09249506636175525, 97 | 0, 0], 98 | [0, -12.92096931784711, 8.159367898576159, -0.07158497328140100, 99 | -0.02826905039406838, 0.0]]) 100 | A[:, 0] = C - A.sum(axis=1) 101 | B = np.array([ 102 | 0.09646076681806523, 0.01, 0.4798896504144996, 1.379008574103742, 103 | -3.290069515436081, 2.324710524099774]) 104 | E = np.array([ 105 | 0.001780011052226, 0.000816434459657, -0.007880878010262, 106 | 0.144711007173263, -0.582357165452555, 0.458082105929187, 107 | -1/66]) # last term corrected with a minus sign 108 | P = np.array([ 109 | [1, -2.763706197274826, 2.9132554618219126, -1.0530884977290216], 110 | [0, 0.13169999999999998, -0.2234, 0.1017], 111 | [0, 3.930296236894751, -5.941033872131505, 2.490627285651253], 112 | [0, -12.411077166933676, 30.338188630282318, -16.548102889244902], 113 | [0, 37.50931341651104, -88.1789048947664, 47.37952196281928], 114 | [0, -27.896526289197286, 65.09189467479368, -34.87065786149661], 115 | [0, 1.5, -4.0, 2.5]]) 116 | -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | import setuptools 2 | 3 | with open("README.md", "r") as fh: 4 | long_description = fh.read() 5 | 6 | setuptools.setup( 7 | name="extensisq", 8 | version="0.6.0", 9 | author="W.R. Kampinga", 10 | author_email='wrkampi@tuta.io', 11 | description="Extend scipy.integrate with various methods for solve_ivp", 12 | long_description=long_description, 13 | long_description_content_type="text/markdown", 14 | url="https://github.com/WRKampi/extensisq", 15 | packages=setuptools.find_packages(), 16 | install_requires=[ 17 | "numpy>=2.2.0", 18 | "scipy>=1.15.0", 19 | ], 20 | keywords=[ 21 | 'ode', 'ode-solver', 'ivp', 'ivp-methods', 'scipy', 'scipy-integrate', 22 | 'runge-kutta', 'runge-kutta-nystrom', 'differential-equations', 23 | 'cash-karp', 'prince', 'bogacki-shampine', 'adams', 'shampine-gordon', 24 | 'adams-bashforth-moulton', 'ode113', 'predictor-corrector', 'solver', 25 | 'sensitivity', 'sensitivity-analysis', 'trbdf2', 'trx2', 'esdirk', 26 | 'dae' 27 | ], 28 | classifiers=[ 29 | "Development Status :: 3 - Alpha", 30 | "Programming Language :: Python :: 3", 31 | "License :: OSI Approved :: MIT License", 32 | "Operating System :: OS Independent", 33 | "Intended Audience :: Science/Research", 34 | "Intended Audience :: Developers", 35 | "Topic :: Scientific/Engineering", 36 | "Topic :: Scientific/Engineering :: Mathematics", 37 | "Topic :: Scientific/Engineering :: Physics" 38 | ], 39 | python_requires='>=3.10', 40 | tests_require=['pytest'] 41 | ) 42 | -------------------------------------------------------------------------------- /tests/order_conditions.py: -------------------------------------------------------------------------------- 1 | # import sympy 2 | from math import factorial 3 | import numpy as np 4 | 5 | 6 | def calc_egps(order, c, A, Ap): 7 | """return w, w1, eta, gamma, phik 8 | Ap = A 9 | A = alpha 10 | """ 11 | # c = sympy.Matrix(c) 12 | # A = sympy.Matrix(A) 13 | # Ap = sympy.Matrix(Ap) 14 | c = np.atleast_2d(c).T 15 | C = np.diag(c[:, 0]) 16 | e = np.ones([len(c), 1]) 17 | dat = { 18 | 0 : None, 19 | 1 : ((1, 2, 1, 1, e),), 20 | 2 : ((2, 3, 1, 2, c),), 21 | 3 : ((7, 6, 1, 3, C@c), 22 | (2, 5, 1, 6, Ap@c)), 23 | 4 : ((38, 20, 1, 4, C@C@c), 24 | (4, 6, 3, 8, C@Ap@c), 25 | (2, 0, 1, 24, A@c), 26 | (7, 12, 1, 12, Ap@C@c), 27 | (2, 8, 1, 24, Ap@Ap@c)), 28 | 5 : ((295, 70, 1, 5, C@C@C@c), 29 | (14, 18, 6, 10, C@C@Ap@c), 30 | (4, 0, 4, 30, C@A@c), 31 | (14, 12, 4, 15, C@Ap@C@c), 32 | (4, 10, 4, 30, C@Ap@Ap@c), 33 | (7, 0, 1, 60, A@C@c), 34 | (2, 0, 1, 120, A@Ap@c), 35 | (6, 15, 3, 20, np.diag((Ap@c)[:, 0])@(Ap@c)), 36 | (38, 40, 1, 20, Ap@C@C@c), 37 | (4, 12, 3, 40, Ap@C@Ap@c), 38 | (2, 0, 1, 120, Ap@A@c), 39 | (7, 18, 1, 60, Ap@Ap@C@c), 40 | (2, 13, 1, 120, Ap@Ap@Ap@c)), 41 | 6 : ((2702, 251, 1, 6, C@C@C@C@c), 42 | (76, 60, 10, 12, C@C@C@Ap@c), 43 | (14, 0, 10, 36, C@C@A@c), 44 | (49, 36, 10, 18, C@C@Ap@C@c), 45 | (14, 30, 10, 36, C@C@Ap@Ap@c), 46 | (14, 0, 5, 72, C@A@C@c), 47 | (4, 0, 5, 144, C@A@Ap@c), 48 | (12, 30, 15, 24, C@np.diag((Ap@c)[:, 0])@(Ap@c)), 49 | (4, 0, 10, 72, np.diag((A@c)[:, 0])@(Ap@c)), 50 | (76, 40, 5, 24, C@Ap@C@C@c), 51 | (8, 12, 15, 48, C@Ap@C@Ap@c), 52 | (4, 0, 5, 144, C@Ap@A@c), 53 | (14, 24, 5, 72, C@Ap@Ap@C@c), 54 | (4, 16, 5, 144, C@Ap@Ap@Ap@c), 55 | (38, 0, 1, 120, A@C@C@c), 56 | (4, 0, 3, 240, A@C@Ap@c), 57 | (2, 0, 1, 720, A@A@c), 58 | (7, 0, 1, 360, A@Ap@C@c), 59 | (2, 0, 1, 720, A@Ap@Ap@c), 60 | (14, 18, 10, 36, np.diag((Ap@C@c)[:, 0])@(Ap@c)), 61 | (4, 15, 10, 72, np.diag((Ap@c)[:, 0])@(Ap@Ap@c)), 62 | (295, 140, 1, 30, Ap@C@C@C@c), # **4 missing in paper Fine 63 | (14, 36, 6, 60, Ap@C@C@Ap@c), 64 | (4, 0, 4, 180, Ap@C@A@c), 65 | (14, 24, 8, 90, Ap@C@Ap@C@c), 66 | (4, 20, 4, 180, Ap@C@Ap@Ap@c), 67 | (7, 0, 1, 360, Ap@A@C@c), 68 | (2, 0, 1, 720, Ap@A@Ap@c), 69 | (6, 30, 3, 120, Ap@np.diag((Ap@c)[:, 0])@(Ap@c)), 70 | (38, 60, 1, 120, Ap@Ap@C@C@c), 71 | (4, 18, 3, 240, Ap@Ap@C@Ap@c), 72 | (2, 0, 1, 720, Ap@Ap@A@c), 73 | (7, 30, 1, 360, Ap@Ap@Ap@C@c), 74 | (2, 31, 1, 720, Ap@Ap@Ap@Ap@c)), 75 | 7 : (# From Fehlberg 76 | #1: 77 | (None, None, 1, 7, C@C@C@C@C@c), 78 | (None, None, 15, 14, C@C@C@C@Ap@c), 79 | (None, 0, 20, 42, C@C@C@A@c), 80 | (None, None, 20, 21, C@C@C@Ap@C@c), 81 | (None, 0, 15, 84, C@C@A@C@c), 82 | (None, None, 15, 28, C@C@Ap@C@C@c), 83 | (None, None, 45, 28, C@C@np.diag((Ap@c)[:, 0])@(Ap@c)), 84 | #8: 85 | (None, 0, 6, 140, C@A@C@C@c), 86 | (None, 0, 60, 84, C@np.diag((A@c)[:, 0])@(Ap@c)), 87 | (None, None, 6, 35, C@Ap@C@C@C@c), 88 | (None, None, 60, 42, C@np.diag((Ap@C@c)[:, 0])@(Ap@c)), 89 | (None, 0, 1, 210, A@C@C@C@c), 90 | (None, 0, 10, 252, np.diag((A@c)[:, 0])@(A@c)), 91 | (None, 0, 15, 168, np.diag((A@C@c)[:, 0])@(Ap@c)), 92 | (None, 0, 20, 126, np.diag((A@c)[:, 0])@(Ap@C@c)), 93 | (None, None, 1, 42, Ap@C@C@C@C@c), 94 | (None, None, 15, 56, np.diag((Ap@C@C@c)[:, 0])@(Ap@c)), 95 | (None, None, 10, 63, np.diag((Ap@C@c)[:, 0])@(Ap@C@c)), 96 | (None, None, 15, 56, np.diag((Ap@c)[:, 0])@np.diag((Ap@c)[:, 0])@(Ap@c)), 97 | (None, None, 20, 42, C@C@C@Ap@Ap@c), 98 | (None, 0, 15, 168, C@C@A@Ap@c), 99 | #22: 100 | (None, None, 45, 56, C@C@Ap@C@Ap@c), 101 | (None, 0, 15, 168, C@C@Ap@A@c), 102 | (None, None, 15, 84, C@C@Ap@Ap@C@c), 103 | (None, 0, 18, 280, C@A@C@Ap@c), 104 | (None, 0, 6, 840, C@A@A@c), 105 | (None, 0, 6, 420, C@A@Ap@C@c), 106 | (None, None, 36, 70, C@Ap@C@C@Ap@c), 107 | (None, 0, 24, 210, C@Ap@C@A@c), 108 | (None, None, 24, 105, C@Ap@C@Ap@C@c), 109 | (None, 0, 6, 420, C@Ap@A@C@c), 110 | (None, None, 6, 140, C@Ap@Ap@C@C@c), 111 | (None, None, 18, 140, C@Ap@np.diag((Ap@c)[:, 0])@(Ap@c)), 112 | (None, None, 60, 84, C@np.diag((Ap@c)[:, 0])@(Ap@Ap@c)), 113 | (None, 0, 6, 420, A@C@C@Ap@c), 114 | #36: 115 | (None, 0, 4, 1260, A@C@A@c), 116 | (None, 0, 4, 630, A@C@Ap@C@c), 117 | (None, 0, 1, 2520, A@A@C@c), 118 | (None, 0, 1, 840, A@Ap@C@C@c), 119 | (None, 0, 3, 840, A@np.diag((Ap@c)[:, 0])@(Ap@c)), 120 | (None, None, 10, 84, Ap@C@C@C@Ap@c), 121 | (None, 0, 10, 252, Ap@C@C@A@c), 122 | (None, None, 10, 126, Ap@C@C@Ap@C@c), 123 | (None, 0, 5, 504, Ap@C@A@C@c), 124 | (None, None, 5, 168, Ap@C@Ap@C@C@c), 125 | (None, None, 15, 168, Ap@C@np.diag((Ap@c)[:, 0])@(Ap@c)), 126 | (None, 0, 1, 840, Ap@A@C@C@c), 127 | (None, 0, 10, 504, Ap@np.diag((A@c)[:, 0])@(Ap@c)), 128 | (None, None, 1, 210, Ap@Ap@C@C@C@c), 129 | (None, None, 10, 252, Ap@np.diag((Ap@C@c)[:, 0])@(Ap@c)), 130 | #51: 131 | (None, None, 10, 252, np.diag((Ap@Ap@c)[:, 0])@(Ap@Ap@c)), 132 | (None, 0, 20, 252, np.diag((A@c)[:, 0])@(Ap@Ap@c)), 133 | (None, None, 20, 126, np.diag((Ap@C@c)[:, 0])@(Ap@Ap@c)), 134 | (None, 0, 15, 336, np.diag((Ap@c)[:, 0])@(A@Ap@c)), 135 | (None, None, 45, 112, np.diag((Ap@c)[:, 0])@(Ap@C@Ap@c)), 136 | (None, 0, 15, 336, np.diag((Ap@c)[:, 0])@(Ap@A@c)), 137 | (None, None, 15, 168, np.diag((Ap@c)[:, 0])@(Ap@Ap@C@c)), 138 | (None, None, 15, 168, C@C@Ap@Ap@Ap@c), 139 | (None, 0, 6, 840, C@A@Ap@Ap@c), 140 | (None, None, 24, 210, C@Ap@C@Ap@Ap@c), 141 | (None, 0, 6, 840, C@Ap@A@Ap@c), 142 | (None, None, 18, 280, C@Ap@Ap@C@Ap@c), 143 | (None, 0, 6, 840, C@Ap@Ap@A@c), 144 | (None, None, 6, 420, C@Ap@Ap@Ap@C@c), 145 | (None, 0, 4, 1260, A@C@Ap@Ap@c), 146 | #66: 147 | (None, 0, 1, 5040, A@A@Ap@c), 148 | (None, 0, 3, 1680, A@Ap@C@Ap@c), 149 | (None, 0, 1, 5040, A@Ap@A@c), 150 | (None, 0, 1, 2520, A@Ap@Ap@C@c), 151 | (None, None, 10, 252, Ap@C@C@Ap@Ap@c), 152 | (None, 0, 5, 1008, Ap@C@A@Ap@c), 153 | (None, None, 15, 336, Ap@C@Ap@C@Ap@c), 154 | (None, 0, 5, 1008, Ap@C@Ap@A@c), 155 | (None, None, 5, 504, Ap@C@Ap@Ap@C@c), 156 | (None, 0, 3, 1680, Ap@A@C@Ap@c), 157 | (None, 0, 1, 5040, Ap@A@A@c), 158 | (None, 0, 1, 2520, Ap@A@Ap@C@c), 159 | (None, None, 6, 420, Ap@Ap@C@C@Ap@c), 160 | (None, 0, 4, 1260, Ap@Ap@C@A@c), 161 | (None, None, 4, 630, Ap@Ap@C@Ap@C@c), 162 | #81: 163 | (None, 0, 1, 2520, Ap@Ap@A@C@c), 164 | (None, None, 1, 840, Ap@Ap@Ap@C@C@c), 165 | (None, None, 3, 840, Ap@Ap@np.diag((Ap@c)[:, 0])@(Ap@c)), 166 | (None, None, 10, 504, Ap@np.diag((Ap@c)[:, 0])@(Ap@Ap@c)), 167 | (None, None, 15, 336, np.diag((Ap@c)[:, 0])@(Ap@Ap@Ap@c)), 168 | (None, None, 6, 840, C@Ap@Ap@Ap@Ap@c), 169 | (None, 0, 1, 5040, A@Ap@Ap@Ap@c), 170 | (None, None, 5, 1008, Ap@C@Ap@Ap@Ap@c), 171 | (None, 0, 1, 5040, Ap@A@Ap@Ap@c), 172 | (None, None, 4, 1260, Ap@Ap@C@Ap@Ap@c), 173 | (None, 0, 1, 5040, Ap@Ap@A@Ap@c), 174 | (None, None, 3, 1680, Ap@Ap@Ap@C@Ap@c), 175 | (None, 0, 1, 5040, Ap@Ap@Ap@A@c), 176 | (None, None, 1, 2520, Ap@Ap@Ap@Ap@C@c), 177 | (None, None, 1, 5040, Ap@Ap@Ap@Ap@Ap@c)) 178 | } 179 | return dat[order] 180 | # return [sympy.simplify(T) for T in dat[order]] 181 | 182 | 183 | def calc_Ts_norm(order, b, c, A, beta=None, alpha=None, t=1): 184 | """alpha and beta are the extra matrices for RKN methods""" 185 | if beta is None: 186 | assert alpha is None, "need neither or both beta and alpha" 187 | if alpha is None: 188 | assert beta is None, "need neither or both beta and alpha" 189 | # RK method, not RKN 190 | Tp = calc_Ts(order, b, c, A, beta, alpha, t) 191 | Tp_norm = np.sqrt(np.sum(np.asarray(Tp)**2)) 192 | return Tp_norm 193 | 194 | # General RKN method, need to add strict RKN method later.................. 195 | T, Tp = calc_Ts(order, b, c, A, beta=beta, alpha=alpha, t=t) 196 | T_norm = np.sqrt(np.sum(np.asarray(T)**2)) 197 | Tp_norm = np.sqrt(np.sum(np.asarray(Tp)**2)) 198 | return T_norm, Tp_norm 199 | 200 | 201 | def calc_Ts(order, b, c, A, beta=None, alpha=None, t=1): 202 | b = np.atleast_2d(b).T 203 | if beta is None: 204 | assert alpha is None, "need neither or both beta and alpha" 205 | if alpha is None: 206 | assert beta is None, "need neither or both beta and alpha" 207 | 208 | # RK method, not RKN 209 | egps = calc_egps(order, c, A, A) 210 | Tp = [] 211 | for w, w1, eta, gamma, phik in egps: 212 | if w1 == 0: 213 | continue 214 | phip = (b.T@phik)[0, 0] 215 | Tp.append(eta/factorial(order) * (gamma*phip - t**order)) 216 | return Tp 217 | 218 | # General RKN method, need to add strict RKN method later.................. 219 | beta = np.atleast_2d(beta).T 220 | egps = calc_egps(order, c, alpha, A) 221 | T = [] 222 | Tp = [] 223 | for w, w1, eta, gamma, phik in egps: 224 | phi = (beta.T@phik)[0, 0] 225 | phip = (b.T@phik)[0, 0] 226 | T.append(eta/factorial(order + 1) * ((order + 1)*gamma*phi - t**(order + 1))) 227 | Tp.append(eta/factorial(order) * (gamma*phip - t**order)) 228 | return T, Tp 229 | 230 | 231 | 232 | if __name__ == "__main__": 233 | from extensisq import Ts5, Fi5N 234 | from math import isclose 235 | 236 | # print(calc_Ts_norm(5, Ts5.B, Ts5.C, Ts5.A)) 237 | # print(calc_Ts(5, Ts5.B, Ts5.C, Ts5.A)) 238 | 239 | # print(calc_Ts_norm(4, Fi5N.Bp, Fi5N.C, Fi5N.Ap, alpha=Fi5N.A, beta=Fi5N.B)) 240 | # print(calc_Ts(1, Fi5N.Bp, Fi5N.C, Fi5N.Ap, alpha=Fi5N.A, beta=Fi5N.B)) 241 | 242 | # A = np.random.randn(7, 7) 243 | # alpha = np.random.rand(7, 7) 244 | # B = np.random.randn(7) 245 | # beta = np.random.rand(7) 246 | # C = np.random.randn(7) 247 | 248 | # T = calc_Ts(7, B, C, A) 249 | # print(len(T)) 250 | # T, Tp = calc_Ts(7, B, C, A, alpha=alpha, beta=beta) 251 | # print(len(T)) 252 | 253 | # for i in range(len(Tp)): 254 | # for j in range(i): 255 | # if isclose(Tp[i], Tp[j]): 256 | # print(i, j, Tp[i], Tp[j]) -------------------------------------------------------------------------------- /tests/test_DAE.py: -------------------------------------------------------------------------------- 1 | """Use Kaps problem to test DAE solver. 2 | 3 | # vary formats of J and M 4 | # include numerical J 5 | # also check with inconsistent y0 6 | also use a scrambled system with full M 7 | # test yp0 8 | # test if constraint is satisfied 9 | # test if solution is accurate 10 | # test if dense solution is accurate 11 | # test with eps=1e-3 12 | """ 13 | import pytest 14 | import numpy as np 15 | from numpy.testing import assert_, assert_allclose 16 | from itertools import product 17 | from scipy.integrate import solve_ivp 18 | from scipy.sparse import csr_matrix 19 | from extensisq import TRBDF2, TRX2, KC3I, KC4I, KC4Ia, Kv3I 20 | 21 | 22 | methods = [TRBDF2, TRX2, KC3I, KC4I, KC4Ia, Kv3I] 23 | 24 | # Kaps DAE 25 | 26 | 27 | def fun(t, y, eps=0.): 28 | return np.array( 29 | [-(1 + 2*eps)*y[0] + y[1]**2, 30 | y[0] - y[1] - y[1]**2]) 31 | 32 | 33 | def jac(t, y, eps=0.): 34 | return np.array([ 35 | [-(1 + 2*eps), 2*y[1]], 36 | [1, -1 - 2*y[1]]]) 37 | 38 | 39 | def jac_sparse(t, y, eps=0.): 40 | return csr_matrix(jac(t, y, eps)) 41 | 42 | 43 | def ref(t): 44 | return np.stack([np.exp(-t)**2, np.exp(-t)]) 45 | 46 | 47 | M_dense = np.array([[0, 0], [0, 1]]) 48 | M_sparse = csr_matrix(M_dense) 49 | M_diag = np.array([0, 1.]) 50 | 51 | y0_consistent = [1., 1.] 52 | y0_inconsistent = [2., 1.] 53 | yp0 = [-2., -1.] 54 | t_span = (0, 1.) 55 | 56 | 57 | @pytest.mark.parametrize("method", methods) 58 | def test_DAE(method): 59 | interpolant = {} 60 | if method in [KC3I, KC4I, KC4Ia, Kv3I]: 61 | interpolant = {'interpolant': 'C1'} 62 | for M, J, y0 in product([M_diag, M_sparse, M_dense], 63 | [jac, None, jac_sparse], 64 | [y0_consistent, y0_inconsistent]): 65 | sol = solve_ivp(fun, t_span, y0, method=method, jac=J, M=M, 66 | dense_output=True, **interpolant) 67 | 68 | # initial values 69 | assert_allclose(sol.y[:, 0], y0) 70 | assert_allclose(sol.sol(sol.t[0]), y0_consistent) 71 | h = (sol.t[1] - sol.t[0])/10 72 | yp_numerical = (sol.sol(sol.t[0]+h) - sol.sol(sol.t[0]))/h 73 | assert_allclose(yp_numerical, yp0, atol=1e-5, rtol=1e-2) 74 | # final values 75 | t_final = t_span[1] 76 | y_final = ref(t_final) 77 | assert_allclose(sol.y[:, -1], y_final, atol=1e-5, rtol=1e-2) 78 | assert_allclose(sol.sol(t_final), y_final, atol=1e-5, rtol=1e-2) 79 | # dense output 80 | assert_allclose(sol.sol(sol.t)[:, 1:], sol.y[:, 1:]) 81 | # solution 82 | assert_allclose(sol.y[:, 1:], ref(sol.t[1:]), atol=1e-5, rtol=1e-2) 83 | # constraint 84 | y_0, y_1 = sol.y[:, 1:] 85 | assert_allclose(y_1**2, y_0, atol=1e-6, rtol=1e-3) 86 | 87 | 88 | eps = 1e-3 89 | args = (eps, ) 90 | Mp_dense = np.array([[eps, 0], [0, 1]]) 91 | Mp_sparse = csr_matrix(M_dense) 92 | Mp_diag = np.array([eps, 1.]) 93 | 94 | 95 | @pytest.mark.parametrize("method", methods) 96 | def test_SPP(method): 97 | interpolant = {} 98 | if method in [KC3I, KC4I, KC4Ia, Kv3I]: 99 | interpolant = {'interpolant': 'C1'} 100 | for M, J, y0 in product([Mp_diag, Mp_sparse, Mp_dense], 101 | [jac, None, jac_sparse], 102 | [y0_consistent, y0_inconsistent]): 103 | sol = solve_ivp(fun, t_span, y0, method=method, jac=J, M=M, 104 | dense_output=True, args=args, **interpolant) 105 | 106 | # initial values 107 | assert_allclose(sol.y[:, 0], y0) 108 | if y0 == y0_consistent: 109 | assert_allclose(sol.sol(sol.t[0]), y0, atol=1e-5, rtol=1e-2) 110 | h = (sol.t[1] - sol.t[0])/10 111 | yp_numerical = (sol.sol(sol.t[0]+h) - sol.sol(sol.t[0]))/h 112 | assert_allclose(yp_numerical, yp0, atol=1e-5, rtol=1e-2) 113 | # final values 114 | t_final = t_span[1] 115 | y_final = ref(t_final) 116 | assert_allclose(sol.y[:, -1], y_final, atol=1e-5, rtol=1e-2) 117 | assert_allclose(sol.sol(t_final), y_final, atol=1e-5, rtol=1e-2) 118 | # dense output 119 | assert_allclose(sol.sol(sol.t)[:, 1:], sol.y[:, 1:]) 120 | 121 | 122 | np.random.seed(1) 123 | A = np.random.rand(2, 2) # transform equations 124 | B = np.random.rand(2, 2) # transform y and yp 125 | Binv = np.linalg.inv(B) 126 | M_hidden = A @ M_dense @ Binv # full matrix (not full rank) 127 | 128 | 129 | def fun_hidden(t, y, eps=0, A=A): 130 | return A @ fun(t, Binv @ y, eps) 131 | 132 | 133 | def jac_hidden(t, y, eps=0, A=A): 134 | return A @ jac(t, Binv @ y, eps) @ Binv 135 | 136 | 137 | @pytest.mark.parametrize("method", methods) 138 | def test_DAE_hidden(method): 139 | """Test if the methods can untangle the constraint from a mass matrix""" 140 | for y0 in [y0_consistent, y0_inconsistent]: 141 | sol = solve_ivp(fun, t_span, y0, method=method, jac=jac, M=M_dense, 142 | dense_output=True, args=args) 143 | sol_hidden = solve_ivp(fun_hidden, t_span, B @ y0, 144 | method=method, jac=jac_hidden, M=M_hidden) 145 | assert_(sol_hidden.success) 146 | # nr of steps, fun calls, jac calls 147 | # print(sol_hidden.t.size, sol.t.size) 148 | assert_(abs(sol_hidden.t.size - sol.t.size) < 3) 149 | print(sol_hidden.nfev, sol.nfev) 150 | assert_(abs(sol_hidden.nfev - sol.nfev) < 25) # still quite dissimilar 151 | print(sol_hidden.njev, sol.njev) 152 | assert_(abs(sol_hidden.njev - sol.njev) < 2) 153 | 154 | # solutions similar 155 | assert_allclose(Binv @ sol_hidden.y[:, 0], sol.y[:, 0]) 156 | if y0 == y0_consistent: 157 | assert_allclose(Binv @ sol_hidden.y, sol.sol(sol_hidden.t), 158 | atol=1e-5, rtol=1e-2) 159 | else: 160 | assert_allclose(Binv @ sol_hidden.y[:, 1:], 161 | sol.sol(sol_hidden.t[1:]), 162 | atol=1e-5, rtol=1e-2) 163 | 164 | 165 | def fun_e(t, y, eps, M=Mp_dense): 166 | return np.linalg.solve(M, fun(t, y, eps)) 167 | 168 | 169 | def jac_e(t, y, eps, M=Mp_dense): 170 | return np.linalg.solve(M, jac(t, y, eps)) 171 | 172 | 173 | def jac_e_sparse(t, y, eps=0.): 174 | return csr_matrix(jac_e(t, y, eps)) 175 | 176 | 177 | @pytest.mark.parametrize("method", methods) 178 | def test_Mass(method): 179 | interpolant = {} 180 | if method in [KC3I, KC4I, KC4Ia, Kv3I]: 181 | interpolant = {'interpolant': 'C1'} 182 | for y0 in [y0_consistent, y0_inconsistent]: 183 | sol_m = solve_ivp(fun, t_span, y0, method=method, jac=jac, M=Mp_diag, 184 | args=args) 185 | for J in [jac_e, jac_e_sparse]: 186 | sol = solve_ivp(fun_e, t_span, y0, method=method, jac=J, 187 | dense_output=True, args=args, **interpolant) 188 | 189 | # nr of steps, fun calls, jac calls 190 | # print(sol_m.t.size, sol.t.size) 191 | assert_(abs(sol_m.t.size - sol.t.size) < 3) 192 | # print(sol_m.nfev, sol.nfev) 193 | assert_(abs(sol_m.nfev - sol.nfev) < 20) 194 | # print(sol_m.njev, sol.njev) 195 | assert_(abs(sol_m.njev - sol.njev) < 2) 196 | # solutions similar 197 | assert_allclose(sol_m.y, sol.sol(sol_m.t), atol=1e-5, rtol=1e-2) 198 | -------------------------------------------------------------------------------- /tests/test_rk.py: -------------------------------------------------------------------------------- 1 | """Test from scipy to conform to scipy. modified""" 2 | import pytest 3 | from numpy.testing import assert_allclose, assert_ 4 | import numpy as np 5 | from order_conditions import calc_Ts_norm 6 | from extensisq import (BS5, Ts5, CK5, CKdisc, Pr7, Pr8, Pr9, CFMR7osc, Me4, 7 | TRX2, TRBDF2, KC3I, KC4I, KC4Ia, Kv3I) 8 | 9 | 10 | METHODS = [BS5, Ts5, CK5, CKdisc, Pr7, Pr8, Pr9, CFMR7osc, Me4, 11 | TRX2, TRBDF2, KC3I, KC4I, KC4Ia, Kv3I] 12 | 13 | 14 | @pytest.mark.parametrize("solver", METHODS) 15 | def test_orders(solver): 16 | # main method 17 | for i in range(solver.order): 18 | if i+1 > 7: # skip higher order tests, not implemented yet 19 | return 20 | _norm = calc_Ts_norm(i+1, solver.B, solver.C, solver.A) 21 | assert_(_norm < solver.n_stages*1e-14) 22 | # secondary method 23 | for i in range(solver.order_secondary): 24 | if i+1 > 7: # skip higher order tests, not implemented yet 25 | return 26 | E = solver.E 27 | B = solver.B 28 | if E.size == B.size: 29 | Bh = E + B 30 | A = solver.A 31 | C = solver.C 32 | else: 33 | A = np.zeros([E.size, E.size]) 34 | A[:B.size, :B.size] = solver.A 35 | A[-1, :-1] = B 36 | Bh = E.copy() 37 | Bh[:-1] += B 38 | C = np.ones(E.size) 39 | C[:-1] = solver.C 40 | _norm = calc_Ts_norm(i+1, Bh, C, A) 41 | print(_norm) 42 | assert_(_norm < solver.n_stages*1e-14) 43 | 44 | 45 | @pytest.mark.parametrize("solver", METHODS) 46 | def test_coefficient_properties(solver): 47 | assert_allclose(np.sum(solver.B), 1, rtol=1e-15) 48 | assert_allclose(np.sum(solver.E), 0, atol=1e-15) # added 49 | assert_allclose(np.sum(solver.A, axis=1), solver.C, rtol=1e-13) 50 | # added tests for runge kutta interpolants. (C1 continuity) 51 | if isinstance(solver.P, np.ndarray): 52 | # C0 end (C0 start automatically satisfied) 53 | Ps = np.sum(solver.P, axis=1) 54 | Ps[:solver.B.size] -= solver.B 55 | assert_allclose(Ps, 0, atol=1e-12) 56 | if solver in [KC3I, KC4I, KC4Ia]: 57 | P = solver.P1 58 | # C0 end (again, for P1 now) 59 | Ps = np.sum(solver.P, axis=1) 60 | Ps[:solver.B.size] -= solver.B 61 | assert_allclose(Ps, 0, atol=1e-12) 62 | else: 63 | P = solver.P 64 | # C1 start 65 | Ps = np.sum(P, axis=0) 66 | Ps[0] -= 1 67 | assert_allclose(Ps, 0, atol=1e-12) 68 | # C1 end 69 | dP = P * (np.arange(P.shape[1]) + 1) 70 | dPs = dP.sum(axis=1) 71 | dPs[-1] -= 1 72 | assert_allclose(dPs, 0, atol=2e-12) 73 | 74 | 75 | @pytest.mark.parametrize("solver_class", METHODS) 76 | def test_error_estimation(solver_class): 77 | if solver_class in [Me4, TRX2, TRBDF2, KC4Ia]: 78 | # Me4 does not pass this test: fifth order error estimate 79 | # similar reasoning for TRX2 and TRBDF2 80 | # KC4Ia does not pass this test, but I'm not sure why 81 | return 82 | else: 83 | step = 0.2 84 | solver = solver_class(lambda t, y: y, 0, [1], 1, first_step=step) 85 | solver.step() 86 | error_estimate = solver._estimate_error(solver.K, step) 87 | error = solver.y - np.exp([step]) 88 | # print(np.abs(error), np.abs(error_estimate)) 89 | assert_(np.abs(error) < np.abs(error_estimate)) 90 | 91 | 92 | @pytest.mark.parametrize("solver_class", METHODS) 93 | def test_error_estimation_complex(solver_class): 94 | h = 0.2 95 | solver = solver_class(lambda t, y: 1j * y, 0, [1j], 1, first_step=h) 96 | solver.step() 97 | err_norm = solver._estimate_error_norm(solver.K, h, scale=[1]) 98 | assert np.isrealobj(err_norm) 99 | -------------------------------------------------------------------------------- /tests/test_rkn.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | from numpy.testing import assert_, assert_allclose, assert_equal 3 | from scipy.integrate import solve_ivp 4 | import numpy as np 5 | from extensisq import Fi4N, Fi5N, Mu5Nmb, MR6NN 6 | from extensisq.common import norm 7 | from itertools import product 8 | from order_conditions import calc_Ts_norm 9 | 10 | 11 | METHODS = [Fi4N, Fi5N, Mu5Nmb, MR6NN] 12 | 13 | 14 | def fun_linear(t, y): 15 | return np.array([y[1], -y[0]]) 16 | 17 | 18 | def fun_linear_vectorized(t, y): 19 | return np.vstack((y[1] * np.ones_like(t), 20 | -y[0] * np.ones_like(t))) 21 | 22 | 23 | def sol_linear(t): 24 | return np.vstack((np.sin(t), 25 | np.cos(t))) 26 | 27 | 28 | def compute_error(y, y_true, rtol, atol): 29 | e = (y - y_true) / (atol + rtol * np.abs(y_true)) 30 | return np.linalg.norm(e, axis=0) / np.sqrt(e.shape[0]) 31 | 32 | 33 | y0 = [0, 1] 34 | 35 | @pytest.mark.parametrize("solver", METHODS) 36 | def test_orders(solver): 37 | # main method 38 | if solver is MR6NN: 39 | # order conditions of the strict Nystrom method are not implemented 40 | return 41 | # secondary method 42 | for i in range(solver.order): 43 | if i+1 > 7: # skip higher order tests, not implemented yet 44 | return 45 | _norm, _normp = calc_Ts_norm(i+1, solver.Bp, solver.C, solver.Ap, 46 | alpha=solver.A, beta=solver.B) 47 | print(_norm, _normp) 48 | assert_(_normp < solver.n_stages*1e-14) 49 | if i+1 < solver.order: 50 | assert_(_norm < solver.n_stages*1e-14) 51 | for i in range(solver.order_secondary): 52 | if i+1 > 7: # skip higher order tests, not implemented yet 53 | return 54 | E = solver.Ep 55 | B = solver.Bp 56 | epsilon = solver.E 57 | beta = solver.B 58 | if E.size == B.size: 59 | Bh = E + B 60 | betah = epsilon + beta 61 | A = solver.Ap 62 | alpha = solver.A 63 | C = solver.C 64 | else: 65 | A = np.zeros([E.size, E.size]) 66 | A[:B.size, :B.size] = solver.Ap 67 | A[-1, :-1] = B 68 | alpha = np.zeros([E.size, E.size]) 69 | alpha[:B.size, :B.size] = solver.A 70 | alpha[-1, :-1] = beta 71 | Bh = E.copy() 72 | Bh[:-1] += B 73 | betah = epsilon.copy() 74 | betah[:-1] += beta 75 | C = np.ones(E.size) 76 | C[:-1] = solver.C 77 | _norm, _normp = calc_Ts_norm(i+1, Bh, C, A, alpha=alpha, beta=betah) 78 | print(_norm, _normp) 79 | assert_(_normp < solver.n_stages*1e-14) 80 | if i+1 < solver.order_secondary: 81 | assert_(_norm < solver.n_stages*1e-14) 82 | 83 | 84 | @pytest.mark.parametrize("solver", METHODS) 85 | def test_coefficient_properties(solver): 86 | assert_allclose(np.sum(solver.B), 0.5, rtol=1e-13) 87 | assert_allclose(np.sum(solver.Bp), 1, rtol=1e-13) 88 | assert_allclose(np.sum(solver.E), 0, atol=1e-13) 89 | assert_allclose(np.sum(solver.Ep), 0, atol=1e-13) 90 | if solver.Ap is not NotImplemented: 91 | assert_allclose(np.sum(solver.Ap, axis=1), solver.C, rtol=1e-13) 92 | assert_allclose(np.sum(solver.A, axis=1), 0.5*solver.C**2, rtol=1e-13) 93 | 94 | 95 | @pytest.mark.parametrize("solver_class", METHODS) 96 | def test_error_estimation(solver_class): 97 | step = 0.2 98 | solver = solver_class(lambda t, y: [y[1], -y[0]], 0, [1, 0], 1, first_step=step) 99 | solver.step() 100 | error_estimate = solver._estimate_error(solver.K, step) 101 | error = solver.y - np.array([np.cos(step), -np.sin(step)]) 102 | # print(np.abs(error), np.abs(error_estimate)) 103 | assert_(norm(error) < norm(error_estimate)) 104 | 105 | 106 | @pytest.mark.parametrize("solver_class", METHODS) 107 | def test_error_estimation_complex(solver_class): 108 | h = 0.2 109 | solver = solver_class(lambda t, y: [y[1], -1j*y[0]], 0, [1j, 1], 1, first_step=h) 110 | solver.step() 111 | err_norm = solver._estimate_error_norm(solver.K, h, scale=[1]) 112 | assert np.isrealobj(err_norm) 113 | 114 | 115 | @pytest.mark.parametrize('method', METHODS) 116 | def test_integration(method): 117 | rtol = 1e-3 118 | atol = 1e-6 119 | 120 | for vectorized, t_span in product( 121 | [False, True], 122 | [[0, 2*np.pi], [2*np.pi, 0]] 123 | ): 124 | 125 | if vectorized: 126 | fun = fun_linear_vectorized 127 | else: 128 | fun = fun_linear 129 | 130 | res = solve_ivp(fun, t_span, y0, rtol=rtol, atol=atol, method=method, 131 | dense_output=True, vectorized=vectorized) 132 | assert_equal(res.t[0], t_span[0]) 133 | assert_(res.t_events is None) 134 | assert_(res.y_events is None) 135 | assert_(res.success) 136 | assert_equal(res.status, 0) 137 | 138 | if method in (Mu5Nmb, MR6NN): 139 | # These have relatively low errors, with relatively many evals 140 | assert_(res.nfev < 130) 141 | else: 142 | assert_(res.nfev < 60) 143 | 144 | assert_equal(res.njev, 0) 145 | assert_equal(res.nlu, 0) 146 | 147 | y_true = sol_linear(res.t) 148 | e = compute_error(res.y, y_true, rtol, atol) 149 | assert_(np.median(e) < 1) 150 | 151 | tc = np.linspace(*t_span) 152 | yc_true = sol_linear(tc) 153 | yc = res.sol(tc) 154 | 155 | e = compute_error(yc, yc_true, rtol, atol) 156 | assert_(np.median(e) < 1) 157 | 158 | tc = (5*t_span[0] + 3*t_span[-1])/8 159 | yc_true = sol_linear(tc).T 160 | yc = res.sol(tc) 161 | 162 | e = compute_error(yc, yc_true, rtol, atol) 163 | assert_(np.all(e < 5)) 164 | 165 | assert_allclose(res.sol(res.t), res.y, 166 | rtol=1e-11, atol=1e-12) # relaxed tol 167 | 168 | 169 | @pytest.mark.parametrize('cls', METHODS) 170 | def test_classes(cls): 171 | y0 = [0, 1] 172 | solver = cls(fun_linear, 0, y0, np.inf) 173 | # fun, t0, y0, t_bound 174 | assert_equal(solver.n, 1) 175 | assert_equal(solver.status, 'running') 176 | assert_equal(solver.t_bound, np.inf) 177 | assert_equal(solver.direction, 1) 178 | assert_equal(solver.t, 0) 179 | assert_equal(solver.y, y0) 180 | assert_(solver.step_size is None) 181 | assert_(solver.nfev > 0) 182 | assert_(solver.njev >= 0) 183 | assert_equal(solver.nlu, 0) 184 | with pytest.raises(RuntimeError): 185 | solver.dense_output() 186 | message = solver.step() 187 | assert_equal(solver.status, 'running') 188 | assert_equal(message, None) 189 | assert_equal(solver.n, 1) 190 | assert_equal(solver.t_bound, np.inf) 191 | assert_equal(solver.direction, 1) 192 | assert_(solver.t > 0) 193 | assert_(not np.all(np.equal(solver.y, y0))) 194 | assert_(solver.step_size > 0) 195 | assert_(solver.nfev > 0) 196 | assert_(solver.njev >= 0) 197 | assert_(solver.nlu >= 0) 198 | sol = solver.dense_output() 199 | assert_allclose(sol(0), y0, rtol=1e-14, atol=0) 200 | 201 | 202 | @pytest.mark.parametrize('method', METHODS) 203 | def test_wrong_problem(method): 204 | # odd nr of components 205 | fun = lambda t, y: -y 206 | with pytest.raises(AssertionError): 207 | method(fun, 0, [1], 1) 208 | # dx != v 209 | fun = lambda t, y: [-y[1], y[0]] 210 | with pytest.raises(AssertionError): 211 | method(fun, 0, [0, 1], 1) 212 | with pytest.raises(AssertionError): 213 | method(fun, 0, [1, 1], 1) 214 | with pytest.raises(AssertionError): 215 | method(fun, 0, [0, 0], 1) 216 | -------------------------------------------------------------------------------- /tests/test_sens.py: -------------------------------------------------------------------------------- 1 | """Test sensitivity methods""" 2 | import pytest 3 | from numpy.testing import assert_allclose 4 | import numpy as np 5 | from extensisq import sens_forward, sens_adjoint_int, sens_adjoint_end 6 | 7 | 8 | METHODS = ["LSODA", "BDF", "Radau"] 9 | 10 | 11 | def fun(t, y, *p): 12 | y1, y2, y3 = y 13 | p1, p2, p3 = p 14 | return np.array([-p1*y1 + p2*y2*y3, 15 | p1*y1 - p2*y2*y3 - p3*y2**2, 16 | p3*y2**2]) 17 | 18 | 19 | def jac(t, y, *p): 20 | y1, y2, y3 = y 21 | p1, p2, p3 = p 22 | return np.array([[-p1, p2*y3, p2*y2], 23 | [p1, -p2*y3 - 2*p3*y2, -p2*y2], 24 | [0., 2*p3*y2, 0.]]) 25 | 26 | 27 | def dfdp(t, y, *p): 28 | y1, y2, y3 = y 29 | p1, p2, p3 = p 30 | return np.array([[-y1, y2*y3, 0.], 31 | [y1, -y2*y3, -y2**2], 32 | [0., 0., y2**2]]) 33 | 34 | 35 | def g(t, y, *p): 36 | y1, y2, y3 = y 37 | p1, p2, p3 = p 38 | return [y1 + p2*y2*y3] 39 | 40 | 41 | def dgdy(t, y, *p): 42 | y1, y2, y3 = y 43 | p1, p2, p3 = p 44 | return np.array([1., p2*y3, p2*y2]) 45 | 46 | 47 | def dgdp(t, y, *p): 48 | y1, y2, y3 = y 49 | p1, p2, p3 = p 50 | return np.array([0., y2*y3, 0.]) 51 | 52 | 53 | y0 = np.array([1., 0., 0.]) 54 | p = (0.04, 1e4, 3e7) 55 | dy0dp = np.zeros([3, 3]) 56 | rtol = 1e-4 57 | atol = np.array([1e-8, 1e-14, 1e-6]) 58 | atol_adj = 1e-5 59 | atol_quad = 1e-6 60 | 61 | result_forward = { 62 | 'yf': [9.8517e-01, 3.3864e-05, 1.4794e-02], 63 | 'sens': [[-3.5595e-01, 9.5428e-08, -1.5832e-11], 64 | [3.9026e-04, -2.1310e-10, -5.2900e-13], 65 | [3.5556e-01, -9.5215e-08, 1.6361e-11]]} 66 | result_adjoint_int = { 67 | 'yf': [5.2016e-05, 2.0808e-10, 9.9995e-01], 68 | 'sens': [-7.8383e+05, 3.1991, -5.3301e-04], 69 | 'G': 1.8219e+04, 70 | 'lambda0': [3.4249e+04, 3.4206e+04, 3.4139e+04]} 71 | 72 | 73 | @pytest.mark.parametrize('method', METHODS) 74 | def test_sens_forward(method): 75 | t_span = (0., 0.4) 76 | use_approx_jac = method == "LSODA" 77 | 78 | sens, yf, _ = sens_forward( 79 | fun, t_span, y0, jac, dfdp, dy0dp, p=p, method=method, 80 | rtol=rtol, atol=atol, use_approx_jac=use_approx_jac) 81 | 82 | assert_allclose(yf, result_forward['yf'], rtol=1e-3) 83 | assert_allclose(sens, result_forward['sens'], rtol=1e-3) 84 | 85 | 86 | @pytest.mark.parametrize('method', METHODS) 87 | def test_sens_adjoint_int(method): 88 | t_span = (0., 4e7) 89 | 90 | sens, G, sol_y, sol_bw = sens_adjoint_int( 91 | fun, t_span, y0, jac, dfdp, dy0dp, p, g, dgdp, dgdy, method=method, 92 | atol=atol, rtol=rtol, atol_quad=atol_quad, atol_adj=atol_adj) 93 | yf = sol_y.y[:, -1] 94 | lambda0 = sol_bw.y[:3, -1] 95 | 96 | assert_allclose(yf, result_adjoint_int['yf'], rtol=1e-2) 97 | assert_allclose(sens, result_adjoint_int['sens'], rtol=1e-2) 98 | assert_allclose([G], [result_adjoint_int['G']], rtol=1e-2) 99 | assert_allclose(lambda0, result_adjoint_int['lambda0'], rtol=1e-2) 100 | 101 | 102 | @pytest.mark.parametrize('method', METHODS) 103 | def test_sens_adjoint_end(method): 104 | t_span = (0., 0.4) 105 | sol_y = None 106 | 107 | for i in range(3): 108 | def g(t, y, *p, i=i): 109 | return [y[i]] 110 | 111 | def dgdy(t, y, *p, i=i): 112 | a = np.zeros(3) 113 | a[i] = 1. 114 | return a 115 | 116 | def dgdp(t, y, *p): 117 | return np.zeros(3) 118 | 119 | sens, gf, sol_y, _ = sens_adjoint_end( 120 | fun, t_span, y0, jac, dfdp, dy0dp, p, g, dgdp, dgdy, method=method, 121 | atol=atol, rtol=rtol, atol_quad=atol_quad/10, atol_adj=atol_adj/10, 122 | sol_y=sol_y) 123 | 124 | assert_allclose(gf, [result_forward['yf'][i]], rtol=1e-3) 125 | assert_allclose(sens, result_forward['sens'][i], rtol=1e-2) 126 | --------------------------------------------------------------------------------