├── .github
└── workflows
│ └── build_wheels.yml
├── .gitignore
├── LICENSE.txt
├── MANIFEST.in
├── README.md
├── cyroot
├── __init__.pxd
├── __init__.py
├── _check_args.cpp
├── _check_args.pxd
├── _check_args.pyx
├── _defaults.py
├── _types.py
├── _version.py
├── fptr.cpp
├── fptr.pxd
├── fptr.pyx
├── ops
│ ├── __init__.pxd
│ ├── __init__.py
│ ├── matrix_ops.cpp
│ ├── matrix_ops.pxd
│ ├── matrix_ops.pyx
│ ├── scalar_ops.cpp
│ ├── scalar_ops.pxd
│ ├── scalar_ops.pyx
│ ├── vector_ops.cpp
│ ├── vector_ops.pxd
│ └── vector_ops.pyx
├── return_types.cpp
├── return_types.pxd
├── return_types.pyx
├── scalar_bracketing.cpp
├── scalar_bracketing.pyx
├── scalar_derivative_approximation.cpp
├── scalar_derivative_approximation.pxd
├── scalar_derivative_approximation.pyx
├── scalar_newton.cpp
├── scalar_newton.pyx
├── scalar_quasi_newton.cpp
├── scalar_quasi_newton.pyx
├── scalar_root.py
├── utils
│ ├── __init__.pxd
│ ├── __init__.py
│ ├── _function_registering.py
│ ├── _function_registering.pyi
│ ├── _warnings.py
│ ├── itertools.cpp
│ ├── itertools.pxd
│ └── itertools.pyx
├── vector_bracketing.cpp
├── vector_bracketing.pyx
├── vector_derivative_approximation.cpp
├── vector_derivative_approximation.pxd
├── vector_derivative_approximation.pyx
├── vector_newton.cpp
├── vector_newton.pyx
├── vector_quasi_newton.cpp
├── vector_quasi_newton.pyx
└── vector_root.py
├── examples
├── scalar_root.py
├── utils
│ ├── __init__.py
│ └── time_meter.py
└── vector_root.py
├── pyproject.toml
├── requirements.txt
├── resources
└── logo.png
├── setup.cfg
└── setup.py
/.github/workflows/build_wheels.yml:
--------------------------------------------------------------------------------
1 | name: Build
2 |
3 | on:
4 | push:
5 | tags:
6 | - 'v*'
7 |
8 | jobs:
9 | build_sdist:
10 | name: Build source distribution
11 | runs-on: ubuntu-latest
12 | steps:
13 | - uses: actions/checkout@v3
14 |
15 | - name: Build sdist
16 | run: pipx run build --sdist
17 |
18 | - name: Upload sdist
19 | uses: actions/upload-artifact@v3
20 | with:
21 | path: dist/*.tar.gz
22 |
23 | build_wheels:
24 | name: Build wheels on ${{ matrix.os }}
25 | runs-on: ${{ matrix.os }}
26 | strategy:
27 | matrix:
28 | os: [ ubuntu-latest, windows-latest, macos-latest ]
29 |
30 | steps:
31 | - uses: actions/checkout@v3
32 |
33 | - name: Build wheels
34 | uses: pypa/cibuildwheel@v2.12.1
35 |
36 | - name: Upload wheels
37 | uses: actions/upload-artifact@v3
38 | with:
39 | path: ./wheelhouse/*.whl
40 |
41 | publish_on_pypi:
42 | # if: github.event_name == 'push' && startsWith(github.ref, 'refs/tags/v')
43 | # if: github.event_name == 'release' && github.event.action == 'published'
44 | if: github.event_name == 'push' && startsWith(github.ref, 'refs/tags/v')
45 | needs: [ build_sdist, build_wheels ]
46 | runs-on: ubuntu-latest
47 | steps:
48 | - name: Download wheels
49 | uses: actions/download-artifact@v3
50 | with:
51 | name: artifact
52 | path: dist
53 |
54 | - name: Publish wheels to PyPI
55 | uses: pypa/gh-action-pypi-publish@v1.5.0
56 | with:
57 | user: __token__
58 | password: ${{ secrets.pypi_password }}
59 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | ## Ignore Visual Studio temporary files, build results, and
2 | ## files generated by popular Visual Studio add-ons.
3 | ##
4 | ## Get latest from https://github.com/github/gitignore/blob/master/VisualStudio.gitignore
5 |
6 | # User-specific files
7 | *.rsuser
8 | *.suo
9 | *.user
10 | *.userosscache
11 | *.sln.docstates
12 |
13 | # User-specific files (MonoDevelop/Xamarin Studio)
14 | *.userprefs
15 |
16 | # Build results
17 | [Dd]ebug/
18 | [Dd]ebugPublic/
19 | [Rr]elease/
20 | [Rr]eleases/
21 | x64/
22 | x86/
23 | [Aa][Rr][Mm]/
24 | [Aa][Rr][Mm]64/
25 | bld/
26 | [Bb]in/
27 | [Oo]bj/
28 | [Ll]og/
29 |
30 | # Visual Studio 2015/2017 cache/options directory
31 | .vs/
32 | # Uncomment if you have tasks that create the project's static files in wwwroot
33 | #wwwroot/
34 |
35 | # Visual Studio 2017 auto generated files
36 | Generated\ Files/
37 |
38 | # MSTest examples Results
39 | [Tt]est[Rr]esult*/
40 | [Bb]uild[Ll]og.*
41 |
42 | # NUNIT
43 | *.VisualState.xml
44 | TestResult.xml
45 |
46 | # Build Results of an ATL Project
47 | [Dd]ebugPS/
48 | [Rr]eleasePS/
49 | dlldata.c
50 |
51 | # Benchmark Results
52 | BenchmarkDotNet.Artifacts/
53 |
54 | # .NET Core
55 | project.lock.json
56 | project.fragment.lock.json
57 | artifacts/
58 |
59 | # StyleCop
60 | StyleCopReport.xml
61 |
62 | # Files built by Visual Studio
63 | *_i.c
64 | *_p.c
65 | *_h.h
66 | *.ilk
67 | *.meta
68 | *.obj
69 | *.iobj
70 | *.pch
71 | *.pdb
72 | *.ipdb
73 | *.pgc
74 | *.pgd
75 | *.rsp
76 | *.sbr
77 | *.tlb
78 | *.tli
79 | *.tlh
80 | *.tmp
81 | *.tmp_proj
82 | *_wpftmp.csproj
83 | *.log
84 | *.vspscc
85 | *.vssscc
86 | .builds
87 | *.pidb
88 | *.svclog
89 | *.scc
90 |
91 | # Chutzpah Test files
92 | _Chutzpah*
93 |
94 | # Visual C++ cache files
95 | ipch/
96 | *.aps
97 | *.ncb
98 | *.opendb
99 | *.opensdf
100 | *.sdf
101 | *.cachefile
102 | *.VC.db
103 | *.VC.VC.opendb
104 |
105 | # Visual Studio profiler
106 | *.psess
107 | *.vsp
108 | *.vspx
109 | *.sap
110 |
111 | # Visual Studio Trace Files
112 | *.e2e
113 |
114 | # TFS 2012 Local Workspace
115 | $tf/
116 |
117 | # Guidance Automation Toolkit
118 | *.gpState
119 |
120 | # ReSharper is a .NET coding add-in
121 | _ReSharper*/
122 | *.[Rr]e[Ss]harper
123 | *.DotSettings.user
124 |
125 | # JustCode is a .NET coding add-in
126 | .JustCode
127 |
128 | # TeamCity is a build add-in
129 | _TeamCity*
130 |
131 | # DotCover is a Code Coverage Tool
132 | *.dotCover
133 |
134 | # AxoCover is a Code Coverage Tool
135 | .axoCover/*
136 | !.axoCover/settings.json
137 |
138 | # Visual Studio code coverage results
139 | *.coverage
140 | *.coveragexml
141 |
142 | # NCrunch
143 | _NCrunch_*
144 | .*crunch*.local.xml
145 | nCrunchTemp_*
146 |
147 | # MightyMoose
148 | *.mm.*
149 | AutoTest.Net/
150 |
151 | # Web workbench (sass)
152 | .sass-cache/
153 |
154 | # Installshield output folder
155 | [Ee]xpress/
156 |
157 | # DocProject is a documentation generator add-in
158 | DocProject/buildhelp/
159 | DocProject/Help/*.HxT
160 | DocProject/Help/*.HxC
161 | DocProject/Help/*.hhc
162 | DocProject/Help/*.hhk
163 | DocProject/Help/*.hhp
164 | DocProject/Help/Html2
165 | DocProject/Help/html
166 |
167 | # Click-Once directory
168 | publish/
169 |
170 | # Publish Web Output
171 | *.[Pp]ublish.xml
172 | *.azurePubxml
173 | # Note: Comment the next line if you want to checkin your web deploy settings,
174 | # but database connection strings (with potential passwords) will be unencrypted
175 | *.pubxml
176 | *.publishproj
177 |
178 | # Microsoft Azure Web App publish settings. Comment the next line if you want to
179 | # checkin your Azure Web App publish settings, but sensitive information contained
180 | # in these scripts will be unencrypted
181 | PublishScripts/
182 |
183 | # NuGet Packages
184 | *.nupkg
185 | # The packages folder can be ignored because of Package Restore
186 | **/[Pp]ackages/*
187 | # except build/, which is used as an MSBuild target.
188 | !**/[Pp]ackages/build/
189 | # Uncomment if necessary however generally it will be regenerated when needed
190 | #!**/[Pp]ackages/repositories.config
191 | # NuGet v3's project.json files produces more ignorable files
192 | *.nuget.props
193 | *.nuget.targets
194 |
195 | # Microsoft Azure Build Output
196 | csx/
197 | *.build.csdef
198 |
199 | # Microsoft Azure Emulator
200 | ecf/
201 | rcf/
202 |
203 | # Windows Store app package directories and files
204 | AppPackages/
205 | BundleArtifacts/
206 | Package.StoreAssociation.xml
207 | _pkginfo.txt
208 | *.appx
209 |
210 | # Visual Studio cache files
211 | # files ending in .cache can be ignored
212 | *.[Cc]ache
213 | # but keep track of directories ending in .cache
214 | !?*.[Cc]ache/
215 |
216 | # Others
217 | ClientBin/
218 | ~$*
219 | *~
220 | *.dbmdl
221 | *.dbproj.schemaview
222 | *.jfm
223 | *.pfx
224 | *.publishsettings
225 | orleans.codegen.cs
226 |
227 | # Including strong name files can present a security risk
228 | # (https://github.com/github/gitignore/pull/2483#issue-259490424)
229 | #*.snk
230 |
231 | # Since there are multiple workflows, uncomment next line to ignore bower_components
232 | # (https://github.com/github/gitignore/pull/1529#issuecomment-104372622)
233 | #bower_components/
234 |
235 | # RIA/Silverlight projects
236 | Generated_Code/
237 |
238 | # Backup & report files from converting an old project file
239 | # to a newer Visual Studio version. Backup files are not needed,
240 | # because we have git ;-)
241 | _UpgradeReport_Files/
242 | Backup*/
243 | UpgradeLog*.XML
244 | UpgradeLog*.htm
245 | ServiceFabricBackup/
246 | *.rptproj.bak
247 |
248 | # SQL Server files
249 | *.mdf
250 | *.ldf
251 | *.ndf
252 |
253 | # Business Intelligence projects
254 | *.rdl.data
255 | *.bim.layout
256 | *.bim_*.settings
257 | *.rptproj.rsuser
258 | *- Backup*.rdl
259 |
260 | # Microsoft Fakes
261 | FakesAssemblies/
262 |
263 | # GhostDoc plugin setting file
264 | *.GhostDoc.xml
265 |
266 | # Node.js Tools for Visual Studio
267 | .ntvs_analysis.dat
268 | node_modules/
269 |
270 | # Visual Studio 6 build log
271 | *.plg
272 |
273 | # Visual Studio 6 workspace options file
274 | *.opt
275 |
276 | # Visual Studio 6 auto-generated workspace file (contains which files were open etc.)
277 | *.vbw
278 |
279 | # Visual Studio LightSwitch build output
280 | **/*.HTMLClient/GeneratedArtifacts
281 | **/*.DesktopClient/GeneratedArtifacts
282 | **/*.DesktopClient/ModelManifest.xml
283 | **/*.Server/GeneratedArtifacts
284 | **/*.Server/ModelManifest.xml
285 | _Pvt_Extensions
286 |
287 | # Paket dependency manager
288 | .paket/paket.exe
289 | paket-files/
290 |
291 | # FAKE - F# Make
292 | .fake/
293 |
294 | # JetBrains Rider
295 | .idea/
296 | *.sln.iml
297 |
298 | # CodeRush personal settings
299 | .cr/personal
300 |
301 | # Python Tools for Visual Studio (PTVS)
302 | __pycache__/
303 | *.pyc
304 | *.pyd
305 |
306 | # Cake - Uncomment if you are using it
307 | # tools/**
308 | # !tools/packages.config
309 |
310 | # Tabs Studio
311 | *.tss
312 |
313 | # Telerik's JustMock configuration file
314 | *.jmconfig
315 |
316 | # BizTalk build output
317 | *.btp.cs
318 | *.btm.cs
319 | *.odx.cs
320 | *.xsd.cs
321 |
322 | # OpenCover UI analysis results
323 | OpenCover/
324 |
325 | # Azure Stream Analytics local run output
326 | ASALocalRun/
327 |
328 | # MSBuild Binary and Structured Log
329 | *.binlog
330 |
331 | # NVidia Nsight GPU debugger configuration file
332 | *.nvuser
333 |
334 | # MFractors (Xamarin productivity tool) working folder
335 | .mfractor/
336 |
337 | # Local History for Visual Studio
338 | .localhistory/
339 |
340 | # BeatPulse healthcheck temp database
341 | healthchecksdb
342 |
343 | # build directories
344 | build/
345 | dist/
346 | wheelhouse/
347 | *.egg-info/
348 |
349 | # secret
350 | develop/
351 |
--------------------------------------------------------------------------------
/LICENSE.txt:
--------------------------------------------------------------------------------
1 | MIT License
2 |
3 | Copyright (c) 2023 Hoang-Nhat Tran
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the "Software"), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in all
13 | copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | SOFTWARE.
--------------------------------------------------------------------------------
/MANIFEST.in:
--------------------------------------------------------------------------------
1 | recursive-include cyroot *.cpp
2 | recursive-include cyroot *.pxd
3 | recursive-include cyroot *.pyx
4 | include README.md
5 | include LICENSE.txt
6 | include requirements*.txt
7 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 |  cy-root [](https://github.com/inspiros/cy-root/actions) [](https://pypi.org/project/cy-root) [](https://pepy.tech/project/cy-root) [](https://github.com/inspiros/cy-root/blob/master/LICENSE.txt)
2 | ========
3 |
4 |
5 |
6 |
7 | (Not this root)
8 |
9 |
10 |
11 | A simple root-finding package written in Cython.
12 | Many of the implemented methods can't be found in common Python libraries.
13 |
14 | ## News:
15 |
16 | - **v1.0.3:** All methods now return a _partially_ typed `namedtuple`-like Cython Extension object instead of
17 | `namedtuple`.
18 | - **v1.0.2:** Vector root-finding methods can now _(try to)_ solve systems of equations with number of inputs different
19 | from number of outputs.
20 |
21 | ## Requirements
22 |
23 | - Python 3.6+
24 | - dynamic-default-args
25 | - numpy
26 | - scipy
27 | - sympy
28 |
29 | #### For compilation:
30 |
31 | - Cython (if you want to build from `.pyx` files)
32 | - A C/C++ compiler
33 |
34 | ## Installation
35 |
36 | [cy-root](https://pypi.org/project/cy-root/) is now available on PyPI.
37 |
38 | ```bash
39 | pip install cy-root
40 | ```
41 |
42 | Alternatively, you can build from source.
43 | Make sure you have all the dependencies installed, then clone this repo and run:
44 |
45 | ```bash
46 | git clone git://github.com/inspiros/cy-root.git
47 | cd cy-root
48 | pip install .
49 | ```
50 |
51 | ## Supported algorithms
52 |
53 | **Note:**
54 | For more information about the listed algorithms, please use Google until I update the references.
55 |
56 | ### Scalar root:
57 |
58 | - **Bracketing methods:** (methods that require lower and upper bounds)
59 | - ✅ Bisect
60 | - ✅ [Hybisect](https://dl.acm.org/doi/10.1145/3437120.3437324) _(bisection with interval analysis)_
61 | - ✅ Regula Falsi
62 | - ✅ Illinois
63 | - ✅ Pegasus
64 | - ✅ Anderson–Björck
65 | - ✅ Dekker
66 | - ✅ Brent _(with Inverse Quadratic Interpolation and Hyperbolic Interpolation)_
67 | - ✅ [Chandrupatla](https://dl.acm.org/doi/10.1016/S0965-9978%2896%2900051-8)
68 | - ✅ [Ridders](https://doi.org/10.1109/TCS.1979.1084580)
69 | - ✅ [TOMS748](https://dl.acm.org/doi/10.1145/210089.210111)
70 | - ✅ [Wu](https://doi.org/10.1016/j.amc.2004.04.120)
71 | - ✅ [ITP](https://dl.acm.org/doi/10.1145/3423597)
72 | - **Newton-like methods:** (methods that require derivative and/or higher order derivatives)
73 | - ✅ Newton
74 | - ✅ Chebyshev
75 | - ✅ Halley
76 | - ✅ Super-Halley
77 | - ✅ Tangent Hyperbolas _(similar to Halley)_
78 | - ✅ Householder
79 | - **Quasi-Newton methods:** (methods that approximate derivative, use interpolation, or successive iteration)
80 | - ✅ Secant
81 | - ✅ Sidi
82 | - ✅ Steffensen
83 | - ✅ Inverse Quadratic Interpolation
84 | - ✅ Hyperbolic Interpolation
85 | - ✅ [Muller](https://www.ams.org/journals/mcom/1956-10-056/S0025-5718-1956-0083822-0/) _(for complex root)_
86 |
87 | ### Vector root:
88 |
89 | - **Bracketing methods:** (methods that require n-dimensional bracket)
90 | - ✅ [Vrahatis](https://doi.org/10.1007/BF01389620) _(generalized bisection using n-polygon)_
91 |
92 | [//]: # ( - ⬜ [Eiger-Sikorski-Stenger](https://dl.acm.org/doi/10.1145/2701.2705) _(planned)_)
93 | - **Newton-like methods:** (methods that require Jacobian and/or Hessian)
94 | - ✅ Generalized Newton
95 | - ✅ Generalized Chebyshev
96 | - ✅ Generalized Halley
97 | - ✅ Generalized Super-Halley
98 | - ✅ Generalized Tangent Hyperbolas _(similar to Generalized Halley)_
99 | - **Quasi-Newton methods:** (methods that approximate Jacobian, use interpolation, or successive iteration)
100 | - ✅ [Wolfe-Bittner](https://doi.org/10.1145/368518.368542)
101 | - ✅ [Robinson](https://epubs.siam.org/doi/abs/10.1137/0703057)
102 | - ✅ [Barnes](https://academic.oup.com/comjnl/article/8/1/66/489886)
103 | - ✅ Traub-Steffensen
104 | - ✅ [Broyden](https://doi.org/10.2307/2003941) _(Good and Bad)_
105 | - ✅ [Klement](https://jatm.com.br/jatm/article/view/373)
106 |
107 | #### Derivative Approximation:
108 |
109 | Methods that can be combined with any Newton-like root-finding methods to discard the need of analytical derivatives.
110 |
111 | - ✅ Finite Difference _(for both scalar and vector functions, up to arbitrary order)_
112 |
113 | ## Usage
114 |
115 | ### Examples:
116 |
117 | #### Example 1:
118 |
119 | Use `find_scalar_root` or `find_vector_root` and pass method name as the first argument.
120 | This example shows the use of `find_scalar_root` function with `itp` method.
121 |
122 | ```python
123 | from cyroot import find_scalar_root
124 |
125 | f = lambda x: x ** 2 - 612
126 | result = find_scalar_root(method='itp', f=f, a=-10, b=50)
127 | print(result)
128 | ```
129 |
130 | Output:
131 |
132 | ```
133 | RootResults(root=24.73863375370596, f_root=-1.1368683772161603e-13, iters=8, f_calls=10, bracket=(24.73863369031373, 24.738633753846678), f_bracket=(-3.1364744472739403e-06, 6.962181942071766e-09), precision=6.353294779160024e-08, error=1.1368683772161603e-13, converged=True, optimal=True)
134 | ```
135 |
136 | The names and pointers to all implemented methods are stored in two dictionaries `SCALAR_ROOT_FINDING_METHODS` and
137 | `VECTOR_ROOT_FINDING_METHODS`.
138 |
139 | ```python
140 | from cyroot import SCALAR_ROOT_FINDING_METHODS, VECTOR_ROOT_FINDING_METHODS
141 |
142 | print('scalar root methods:', SCALAR_ROOT_FINDING_METHODS.keys())
143 | print('vector root methods:', VECTOR_ROOT_FINDING_METHODS.keys())
144 | ```
145 |
146 | #### Example 2:
147 |
148 | Alternatively, import the function directly.
149 | You can also see the full list of input arguments of by using `help()` on them.
150 |
151 | This example shows the use of `muller` method for finding complex root:
152 |
153 | ```python
154 | from cyroot import muller
155 |
156 | # This function has no real root
157 | f = lambda x: x ** 4 + 4 * x ** 2 + 5
158 | # But Muller's method can be used to find complex root
159 | result = muller(f, x0=0, x1=10, x2=20)
160 | print(result)
161 | ```
162 |
163 | Output:
164 |
165 | ```
166 | RootResults(root=(0.34356074972251255+1.4553466902253551j), f_root=(-8.881784197001252e-16-1.7763568394002505e-15j), iters=43, f_calls=43, precision=3.177770418807502e-08, error=1.9860273225978185e-15, converged=True, optimal=True)
167 | ```
168 |
169 | #### Example 3:
170 |
171 | Considering the parabola $f(x)=x^2-612$ in **Example 1** with initial bounds $(a,b)$ where $a=-b$, many bracketing
172 | methods will fail to find a root as the values evaluated at initial bracket are identical.
173 |
174 | In this example, we use the `hybisect` method which repeatedly bisects the search regions until the Bolzano criterion
175 | holds, thus can find multiple roots:
176 |
177 | ```python
178 | import math
179 |
180 | from cyroot import hybisect
181 |
182 | f = lambda x: x ** 2 - 612
183 | # interval arithmetic function of f
184 | interval_f = lambda x_l, x_h: ((min(abs(x_l), abs(x_h))
185 | if math.copysign(1, x_l) * math.copysign(1, x_h) > 0
186 | else 0) ** 2 - 612,
187 | max(abs(x_l), abs(x_h)) ** 2 - 612)
188 |
189 | result = hybisect(f, interval_f, -50, 50)
190 | print(result)
191 | ```
192 |
193 | Output:
194 |
195 | ```
196 | RootResults(root=[-24.738633753707973, 24.738633753707973], f_root=[9.936229616869241e-11, 9.936229616869241e-11], split_iters=1, iters=[43, 43], f_calls=(92, 3), bracket=[(-24.738633753710815, -24.73863375370513), (24.73863375370513, 24.738633753710815)], f_bracket=[(nan, nan), (nan, nan)], precision=[5.6843418860808015e-12, 5.6843418860808015e-12], error=[9.936229616869241e-11, 9.936229616869241e-11], converged=[True, True], optimal=[True, True])
197 | ```
198 |
199 | #### Example 4:
200 |
201 | This example shows the use of the `halley` method with functions returning first and second order derivatives of `f`:
202 |
203 | ```python
204 | from cyroot import halley
205 |
206 | f = lambda x: x ** 3 - 5 * x ** 2 + 2 * x - 1
207 | # first order derivative
208 | df = lambda x: 3 * x ** 2 - 10 * x + 2
209 | # second order derivative
210 | d2f = lambda x: 6 * x - 10
211 |
212 | result = halley(f, df, d2f, x0=1.5)
213 | print(result)
214 | ```
215 |
216 | Output:
217 |
218 | ```
219 | RootResults(root=4.613470267581537, f_root=-3.623767952376511e-13, df_root=(19.7176210537612, 17.68082160548922), iters=11, f_calls=(12, 12, 12), precision=4.9625634836147965e-05, error=3.623767952376511e-13, converged=True, optimal=True)
220 | ```
221 |
222 | The `householder` method supports an arbitrary number of higher order derivatives:
223 |
224 | ```python
225 | from cyroot import householder
226 |
227 | f = lambda x: x ** 3 - 5 * x ** 2 + 2 * x - 1
228 | df = lambda x: 3 * x ** 2 - 10 * x + 2
229 | d2f = lambda x: 6 * x - 10
230 | d3f = lambda x: 6
231 |
232 | result = householder(f, dfs=[df, d2f, d3f], x0=1.5)
233 | print(result) # same result
234 | ```
235 |
236 | #### Example 5:
237 |
238 | Similarly, to find roots of systems of equations with Newton-like methods, you have to define functions returning
239 | **Jacobian** (and **Hessian**) of `F`.
240 |
241 | This example shows the use of `generalized_super_halley` method:
242 |
243 | ```python
244 | import numpy as np
245 |
246 | from cyroot import generalized_super_halley
247 |
248 | # all functions for vector root methods must take a numpy array
249 | # as argument, and return an array-like object
250 | F = lambda x: np.array([x[0] ** 2 + 2 * x[0] * np.sin(x[1]) - x[1],
251 | 4 * x[0] * x[1] ** 2 - x[1] ** 3 - 1])
252 | # Jacobian
253 | J = lambda x: np.array([
254 | [2 * x[0] + 2 * np.sin(x[1]), 2 * x[0] * np.cos(x[1]) - 1],
255 | [4 * x[1] ** 2, 8 * x[0] * x[1] - 3 * x[1] ** 2]
256 | ])
257 | # Hessian
258 | H = lambda x: np.array([
259 | [[2, 2 * np.cos(x[1])],
260 | [2 * np.cos(x[1]), -2 * x[0] * np.sin(x[1])]],
261 | [[0, 8 * x[1]],
262 | [8 * x[1], 8 * x[0] - 6 * x[1]]]
263 | ])
264 |
265 | result = generalized_super_halley(F, J, H, x0=np.array([2., 2.]))
266 | print(result)
267 | ```
268 |
269 | Output: _(a bit messy)_
270 |
271 | ```
272 | RootResults(root=array([0.48298601, 1.08951589]), f_root=array([-4.35123049e-11, -6.55444587e-11]), df_root=(array([[ 2.73877785, -0.55283751],
273 | [ 4.74817951, 0.6486328 ]]), array([[[ 2. , 0.92582907],
274 | [ 0.92582907, -0.85624041]],
275 |
276 | [[ 0. , 8.71612713],
277 | [ 8.71612713, -2.6732073 ]]])), iters=3, f_calls=(4, 4, 4), precision=0.0005808146393164461, error=6.554445874940029e-11, converged=True, optimal=True)
278 | ```
279 |
280 | #### Example 6:
281 |
282 | For vector bracketing root methods or vector root methods with multiple initial guesses, the input should be a 2D
283 | `np.ndarray`.
284 |
285 | This example shows the use of `vrahatis` method (a generalized bisection) with the example function in the original
286 | paper:
287 |
288 | ```python
289 | import numpy as np
290 |
291 | from cyroot import vrahatis
292 |
293 | F = lambda x: np.array([x[0] ** 2 - 4 * x[1],
294 | -2 * x[0] + x[1] ** 2 + 4 * x[1]])
295 |
296 | # If the initial points do not form an admissible n-polygon,
297 | # an exception will be raised.
298 | x0s = np.array([[-2., -0.25],
299 | [0.5, 0.25],
300 | [2, -0.25],
301 | [0.6, 0.25]])
302 |
303 | result = vrahatis(F, x0s=x0s)
304 | print(result)
305 | ```
306 |
307 | Output:
308 |
309 | ```
310 | RootResults(root=array([4.80212874e-11, 0.00000000e+00]), f_root=array([ 2.30604404e-21, -9.60425747e-11]), iters=34, f_calls=140, bracket=array([[ 2.29193750e-10, 2.91038305e-11],
311 | [-6.54727619e-12, 2.91038305e-11],
312 | [ 4.80212874e-11, 0.00000000e+00],
313 | [-6.98492260e-11, 0.00000000e+00]]), f_bracket=array([[-1.16415322e-10, -3.41972179e-10],
314 | [-1.16415322e-10, 1.29509874e-10],
315 | [ 2.30604404e-21, -9.60425747e-11],
316 | [ 4.87891437e-21, 1.39698452e-10]]), precision=2.9904297647806717e-10, error=9.604257471622717e-11, converged=True, optimal=True)
317 | ```
318 |
319 | #### Example 7:
320 |
321 | This example shows the use of `finite_difference` to approximate derivatives when analytical solutions are not
322 | available:
323 |
324 | ```python
325 | import math
326 |
327 | from cyroot import finite_difference
328 |
329 | f = lambda x: (math.sin(x) + 1) ** x
330 | x = 3 * math.pi / 2
331 | d3f_x = finite_difference(f, x,
332 | h=1e-4, # step
333 | order=1, # order
334 | kind='forward') # type: forward, backward, or central
335 | # 7.611804179666343e-36
336 | ```
337 |
338 | Similarly, `generalized_finite_difference` can compute vector derivative of arbitrary order
339 | (`order=1` for **Jacobian**, `order=2` for **Hessian**), and `h` can be a number or a `np.ndarray` containing different
340 | step sizes for each dimension:
341 |
342 | ```python
343 | import numpy as np
344 |
345 | from cyroot import generalized_finite_difference
346 |
347 | F = lambda x: np.array([x[0] ** 3 - 3 * x[0] * x[1] + 5 * x[1] - 7,
348 | x[0] ** 2 + x[0] * x[1] ** 2 - 4 * x[1] ** 2 + 3.5])
349 | x = np.array([2., 3.])
350 |
351 | # Derivatives of F will have shape (m, *([n] * order))
352 | # where n is number of inputs, m is number of outputs
353 | J_x = generalized_finite_difference(F, x, h=1e-4, order=1) # Jacobian
354 | # array([[ 2.99985, -1.00015],
355 | # [ 13.0003 , -11.9997 ]])
356 | H_x = generalized_finite_difference(F, x, h=1e-3, order=2) # Hessian
357 | # array([[[12. , -3. ],
358 | # [-3. , 0. ]],
359 | # [[ 2. , 6.001],
360 | # [ 6.001, -3.998]]])
361 | K_x = generalized_finite_difference(F, x, h=1e-2, order=3) # Kardashian, maybe
362 | # array([[[[ 6.00000000e+00, 2.32830644e-10],
363 | # [ 2.32830644e-10, 2.32830644e-10]],
364 | # [[ 2.32830644e-10, 2.32830644e-10],
365 | # [ 2.32830644e-10, 1.11758709e-08]]],
366 | # [[[ 0.00000000e+00, -3.72529030e-09],
367 | # [-3.72529030e-09, 1.99999999e+00]],
368 | # [[-3.72529030e-09, 1.99999999e+00],
369 | # [ 1.99999999e+00, -1.67638063e-08]]]])
370 | ```
371 |
372 | Conveniently, you can use the `FiniteDifference` and `GeneralizedFiniteDifference` classes to wrap our function and
373 | pass them to any Newton-like methods.
374 |
375 | This is actually the default behavior when derivative functions of all Newton-like methods or the initial Jacobian
376 | guess of some vector quasi-Newton methods are not provided.
377 |
378 | ```python
379 | from cyroot import GeneralizedFiniteDifference, generalized_halley
380 |
381 | J = GeneralizedFiniteDifference(F, h=1e-4, order=1)
382 | H = GeneralizedFiniteDifference(F, h=1e-3, order=2)
383 |
384 | result = generalized_halley(F, J=J, H=H, x0=x)
385 | print(result)
386 | ```
387 |
388 | Output:
389 |
390 | ```
391 | RootResults(root=array([2.16665878, 2.11415683]), f_root=array([-5.47455414e-11, 1.05089271e-11]), df_root=(array([[ 7.74141032, -1.49997634],
392 | [ 8.80307666, -7.75212506]]), array([[[ 1.30059527e+01, -3.00000000e+00],
393 | [-3.00000000e+00, -4.54747351e-13]],
394 |
395 | [[ 2.00000000e+00, 4.22931366e+00],
396 | [ 4.22931366e+00, -3.66668244e+00]]])), iters=4, f_calls=(5, 211, 211), precision=1.0327220168881081e-07, error=5.474554143347632e-11, converged=True, optimal=True)
397 | ```
398 |
399 | #### Output format:
400 |
401 | The returned `result` is a namedtuple-like object whose elements depend on the type of the method:
402 |
403 | - Common:
404 | - `root`: the solved root.
405 | - `f_root`: value evaluated at root.
406 | - `iters`: number of iterations.
407 | - `f_calls`: number of function calls.
408 | - `precision`: width of final bracket (for bracketing methods), or absolute difference of root with the last
409 | estimation, or the span of the set of final estimations.
410 | - `error`: absolute value of `f_root`.
411 | - `converged`: `True` if the stopping criterion is met, `False` if the procedure terminated prematurely.
412 | - `optimal`: `True` only if the error tolerance is satisfied `abs(f_root) <= etol`.
413 | - Exclusive to bracketing methods:
414 | - `bracket`: final bracket.
415 | - `f_bracket`: value evaluated at final bracket.
416 | - Exclusive to Newton-like methods:
417 | - `df_root`: derivative or tuple of derivatives (of increasing orders) evaluated at root.
418 |
419 | **Notes**:
420 |
421 | - `converged` can be `True` even if the solution is not optimal, which means the routine stopped because the
422 | precision tolerance is satisfied.
423 | - For `scipy.optimize.root` users, the stopping condition arguments `etol`, `ertol`, `ptol`, `prtol` are equivalent to
424 | `f_tol`, `f_rtol`, `x_tol`, `x_rtol`, respectively (but not identical).
425 |
426 | #### Configurations:
427 |
428 | The default values for stop condition arguments (i.e. `etol`, `ertol`, `ptol`, `prtol`, `max_iter`) are globally set to
429 | the values defined in [`_defaults.py`](cyroot/_defaults.py), and can be modified dynamically as follows:
430 |
431 | ```python
432 | import cyroot
433 |
434 | cyroot.set_default_stop_condition_args(
435 | etol=1e-7,
436 | ptol=0, # disable precision tolerance
437 | max_iter=100)
438 |
439 | help(cyroot.illinois) # run to check the updated docstring
440 | ```
441 |
442 | For more examples, please check the [`examples`](examples) folder.
443 |
444 | ## Contributing
445 |
446 | If you want to contribute, please contact me. \
447 | If you want an algorithm to be implemented, also drop me the paper (I will read if I have time).
448 |
449 | ## License
450 |
451 | The code is released under the MIT license. See [`LICENSE.txt`](LICENSE.txt) for details.
452 |
--------------------------------------------------------------------------------
/cyroot/__init__.pxd:
--------------------------------------------------------------------------------
1 | """
2 | A simple root-finding package written in Cython.
3 | """
4 |
--------------------------------------------------------------------------------
/cyroot/__init__.py:
--------------------------------------------------------------------------------
1 | """
2 | A simple root-finding package written in Cython.
3 | """
4 |
5 | from ._defaults import set_default_stop_condition_args
6 | from ._version import __version__
7 | from .scalar_bracketing import *
8 | from .scalar_derivative_approximation import *
9 | from .scalar_newton import *
10 | from .scalar_quasi_newton import *
11 | from .scalar_root import *
12 | from .utils._warnings import set_value_warning_filter
13 | from .vector_bracketing import *
14 | from .vector_derivative_approximation import *
15 | from .vector_newton import *
16 | from .vector_quasi_newton import *
17 | from .vector_root import *
18 |
--------------------------------------------------------------------------------
/cyroot/_check_args.pxd:
--------------------------------------------------------------------------------
1 | cimport numpy as np
2 |
3 | # --------------------------------
4 | # Bracketing methods
5 | # --------------------------------
6 | cdef bint _check_stop_cond_scalar_bracket(
7 | double a,
8 | double b,
9 | double f_a,
10 | double f_b,
11 | double etol,
12 | double ertol,
13 | double ptol,
14 | double prtol,
15 | double* r,
16 | double* f_r,
17 | double* precision,
18 | double* error,
19 | bint* converged,
20 | bint* optimal)
21 |
22 | cdef bint _check_stop_cond_vector_bracket(
23 | double[:, :] bs,
24 | double[:, :] F_bs,
25 | double etol,
26 | double ertol,
27 | double ptol,
28 | double prtol,
29 | double[:] r,
30 | double[:] F_r,
31 | double* precision,
32 | double* error,
33 | bint* converged,
34 | bint* optimal)
35 |
36 | ################################################################################
37 | # Guessing methods
38 | ################################################################################
39 | # --------------------------------
40 | # Double
41 | # --------------------------------
42 | ctypedef double (*precision_func_type_scalar)(double[:])
43 |
44 | cdef bint _check_stop_cond_scalar_initial_guess(
45 | double x0,
46 | double f_x0,
47 | double etol,
48 | double ertol,
49 | double ptol,
50 | double prtol,
51 | double* precision,
52 | double* error,
53 | bint* converged,
54 | bint* optimal)
55 |
56 | cdef bint _check_stop_cond_vector_initial_guess(
57 | double[:] x0,
58 | double[:] F_x0,
59 | double etol,
60 | double ertol,
61 | double ptol,
62 | double prtol,
63 | double* precision,
64 | double* error,
65 | bint* converged,
66 | bint* optimal)
67 |
68 | cdef bint _check_stop_cond_scalar_initial_guesses(
69 | double[:] x0s,
70 | double[:] f_x0s,
71 | double etol,
72 | double ertol,
73 | double ptol,
74 | double prtol,
75 | double* r,
76 | double* f_r,
77 | double* precision,
78 | double* error,
79 | bint* converged,
80 | bint* optimal)
81 |
82 | cdef bint _check_stop_cond_vector_initial_guesses(
83 | double[:, :] x0s,
84 | double[:, :] F_x0s,
85 | double etol,
86 | double ertol,
87 | double ptol,
88 | double prtol,
89 | double[:] r,
90 | double[:] F_r,
91 | double* precision,
92 | double* error,
93 | bint* converged,
94 | bint* optimal)
95 |
96 | # --------------------------------
97 | # Double Complex
98 | # --------------------------------
99 | ctypedef double (*precision_func_type_complex_scalar)(double complex[:])
100 |
101 | cdef bint _check_stop_cond_complex_scalar_initial_guess(
102 | double complex x0,
103 | double complex f_x0,
104 | double etol,
105 | double ertol,
106 | double ptol,
107 | double prtol,
108 | double* precision,
109 | double* error,
110 | bint* converged,
111 | bint* optimal)
112 |
113 | cdef bint _check_stop_cond_complex_vector_initial_guess(
114 | double complex[:] x0,
115 | double complex[:] F_x0,
116 | double etol,
117 | double ertol,
118 | double ptol,
119 | double prtol,
120 | double* precision,
121 | double* error,
122 | bint* converged,
123 | bint* optimal)
124 |
125 | cdef bint _check_stop_cond_complex_scalar_initial_guesses(
126 | double complex[:] x0s,
127 | double complex[:] f_x0s,
128 | double etol,
129 | double ertol,
130 | double ptol,
131 | double prtol,
132 | double complex* r,
133 | double complex* f_r,
134 | double* precision,
135 | double* error,
136 | bint* converged,
137 | bint* optimal)
138 |
139 | cdef bint _check_stop_cond_complex_vector_initial_guesses(
140 | double complex[:, :] x0s,
141 | double complex[:, :] F_x0s,
142 | double etol,
143 | double ertol,
144 | double ptol,
145 | double prtol,
146 | double complex[:] r,
147 | double complex[:] F_r,
148 | double* precision,
149 | double* error,
150 | bint* converged,
151 | bint* optimal)
152 |
--------------------------------------------------------------------------------
/cyroot/_check_args.pyx:
--------------------------------------------------------------------------------
1 | # distutils: language=c++
2 | # cython: cdivision = True
3 | # cython: initializedcheck = False
4 | # cython: boundscheck = False
5 | # cython: profile = False
6 |
7 | from typing import Sequence, Union
8 |
9 | import math
10 | import numpy as np
11 | from libc cimport math
12 |
13 | from .ops cimport scalar_ops as sops, vector_ops as vops
14 |
15 | __all__ = [
16 | '_check_stop_cond_args',
17 | '_check_initial_guesses_uniqueness',
18 | '_check_initial_vals_uniqueness',
19 | ]
20 |
21 | ################################################################################
22 | # Python-side checks
23 | ################################################################################
24 | # noinspection DuplicatedCode
25 | def _check_stop_cond_arg(tol: float, arg_name='tol'):
26 | if tol is None:
27 | return 0
28 | elif math.isnan(tol) or not math.isfinite(tol) or tol < 0:
29 | raise ValueError(f'{arg_name} must be non-negative finite number. Got {tol}.')
30 | return tol
31 |
32 | # noinspection DuplicatedCode
33 | def _check_stop_cond_args(etol: float,
34 | ertol: float,
35 | ptol: float,
36 | prtol: float,
37 | max_iter: int):
38 | """Check tolerances and max_iter."""
39 | etol = _check_stop_cond_arg(etol, 'etol')
40 | ertol = _check_stop_cond_arg(ertol, 'ertol')
41 | ptol = _check_stop_cond_arg(ptol, 'ptol')
42 | prtol = _check_stop_cond_arg(prtol, 'prtol')
43 |
44 | if max_iter is None or max_iter < 0 or math.isinf(max_iter) or math.isnan(max_iter):
45 | max_iter = 0
46 | elif not isinstance(max_iter, int):
47 | raise ValueError(f'max_iter must be an integer. Got {max_iter}.')
48 |
49 | if etol == ertol == ptol == prtol == max_iter == 0:
50 | raise ValueError(f'Disabling both tolerances and max_iter will '
51 | f'likely cause the algorithm to run indefinitely.')
52 | return etol, ertol, ptol, prtol, max_iter
53 |
54 | # noinspection DuplicatedCode
55 | def _check_initial_guesses_uniqueness(x0s: Union[Sequence[Union[float, complex, np.ndarray]], np.ndarray]):
56 | if not len(x0s):
57 | raise ValueError('Empty.')
58 | elif isinstance(x0s[0], np.ndarray):
59 | if np.unique(x0s if isinstance(x0s, np.ndarray) else
60 | np.stack(x0s), axis=0).shape[0] < len(x0s):
61 | raise ValueError(f'Initial guesses must be unique. Got:\n' +
62 | '\n'.join(repr(_) for _ in x0s))
63 | elif len(set(x0s)) < len(x0s):
64 | raise ValueError(f'Initial guesses must be unique. Got {x0s}.')
65 |
66 | # noinspection DuplicatedCode
67 | def _check_initial_vals_uniqueness(f_x0s: Union[Sequence[Union[float, complex, np.ndarray]], np.ndarray]):
68 | if not len(f_x0s):
69 | raise ValueError('Empty.')
70 | elif isinstance(f_x0s[0], np.ndarray):
71 | if np.unique(f_x0s if isinstance(f_x0s, np.ndarray) else
72 | np.stack(f_x0s), axis=0).shape[0] < len(f_x0s):
73 | raise ValueError('Initial guesses\' values must be unique. '
74 | 'Got:\n' + '\n'.join(repr(_) for _ in f_x0s))
75 | elif len(set(f_x0s)) < len(f_x0s):
76 | raise ValueError('Initial guesses\' values must be unique. '
77 | f'Got {f_x0s}.')
78 |
79 | ################################################################################
80 | # Bracketing methods
81 | ################################################################################
82 | # noinspection DuplicatedCode
83 | cdef inline bint _check_stop_cond_scalar_bracket(
84 | double a,
85 | double b,
86 | double f_a,
87 | double f_b,
88 | double etol,
89 | double ertol,
90 | double ptol,
91 | double prtol,
92 | double* r,
93 | double* f_r,
94 | double* precision,
95 | double* error,
96 | bint* converged,
97 | bint* optimal):
98 | """Check if stop condition is already met."""
99 | precision[0] = math.fabs(b - a)
100 | cdef double error_a = math.fabs(f_a), error_b = math.fabs(f_b)
101 | error[0] = math.fmin(error_a, error_b)
102 | r[0], f_r[0] = (a, f_a) if error_a < error_b else (b, f_b)
103 | optimal[0] = sops.isclose(0, error[0], ertol, etol)
104 | converged[0] = sops.isclose(0, precision[0], prtol, ptol) or optimal[0]
105 | return optimal[0] or sops.isclose(0, precision[0], prtol, ptol)
106 |
107 | # noinspection DuplicatedCode
108 | cdef inline bint _check_stop_cond_vector_bracket(
109 | double[:, :] bs,
110 | double[:, :] F_bs,
111 | double etol,
112 | double ertol,
113 | double ptol,
114 | double prtol,
115 | double[:] r,
116 | double[:] F_r,
117 | double* precision,
118 | double* error,
119 | bint* converged,
120 | bint* optimal):
121 | """Check if stop condition is already met."""
122 | if bs.shape[0] == 0:
123 | raise ValueError('Empty sequence.')
124 | precision[0] = vops.max(np.max(bs, 0) - np.min(bs, 0))
125 | cdef double[:] errors = np.abs(F_bs).max(1)
126 | cdef unsigned long best_i = vops.argmin(errors)
127 | error[0] = errors[best_i]
128 | r[:], F_r[:] = bs[best_i], F_bs[best_i]
129 | optimal[0] = sops.isclose(0, error[0], ertol, etol)
130 | converged[0] = sops.isclose(0, precision[0], prtol, ptol) or optimal[0]
131 | return optimal[0] or sops.isclose(0, precision[0], prtol, ptol)
132 |
133 | ################################################################################
134 | # Quasi-Newton methods with multiple guesses
135 | ################################################################################
136 | # --------------------------------
137 | # Double
138 | # --------------------------------
139 | # noinspection DuplicatedCode
140 | cdef inline bint _check_stop_cond_scalar_initial_guess(
141 | double x0,
142 | double f_x0,
143 | double etol,
144 | double ertol,
145 | double ptol,
146 | double prtol,
147 | double* precision,
148 | double* error,
149 | bint* converged,
150 | bint* optimal):
151 | """Check if stop condition is already met."""
152 | precision[0] = math.INFINITY
153 | error[0] = math.fabs(f_x0)
154 | optimal[0] = sops.isclose(0, error[0], ertol, etol)
155 | converged[0] = optimal[0]
156 | return optimal[0]
157 |
158 | # noinspection DuplicatedCode
159 | cdef inline bint _check_stop_cond_vector_initial_guess(
160 | double[:] x0,
161 | double[:] F_x0,
162 | double etol,
163 | double ertol,
164 | double ptol,
165 | double prtol,
166 | double* precision,
167 | double* error,
168 | bint* converged,
169 | bint* optimal):
170 | """Check if stop condition is already met."""
171 | precision[0] = math.INFINITY
172 | error[0] = vops.max(vops.fabs(F_x0))
173 | optimal[0] = sops.isclose(0, error[0], ertol, etol)
174 | converged[0] = optimal[0]
175 | return optimal[0]
176 |
177 | # noinspection DuplicatedCode
178 | cdef inline bint _check_stop_cond_scalar_initial_guesses(
179 | double[:] x0s,
180 | double[:] f_x0s,
181 | double etol,
182 | double ertol,
183 | double ptol,
184 | double prtol,
185 | double* r,
186 | double* f_r,
187 | double* precision,
188 | double* error,
189 | bint* converged,
190 | bint* optimal):
191 | """Check if stop condition is already met."""
192 | if x0s.shape[0] == 0:
193 | raise ValueError('Empty sequence.')
194 | if x0s.shape[0] == 1:
195 | r[0], f_r[0] = x0s[0], f_x0s[0]
196 | precision[0] = math.INFINITY
197 | error[0] = math.fabs(f_x0s[0])
198 | optimal[0] = sops.isclose(0, error[0], ertol, etol)
199 | converged[0] = optimal[0]
200 | return optimal[0]
201 | cdef double[:] errors = vops.fabs(f_x0s)
202 | cdef unsigned long best_i = vops.argmin(errors)
203 | r[0], f_r[0] = x0s[best_i], f_x0s[best_i]
204 | error[0] = errors[best_i]
205 | precision[0] = vops.max(x0s) - vops.min(x0s)
206 | optimal[0] = sops.isclose(0, error[0], ertol, etol)
207 | converged[0] = sops.isclose(0, precision[0], prtol, ptol) or optimal[0]
208 | return optimal[0] or sops.isclose(0, precision[0], prtol, ptol)
209 |
210 | # noinspection DuplicatedCode
211 | cdef inline bint _check_stop_cond_vector_initial_guesses(
212 | double[:, :] x0s,
213 | double[:, :] F_x0s,
214 | double etol,
215 | double ertol,
216 | double ptol,
217 | double prtol,
218 | double[:] r,
219 | double[:] F_r,
220 | double* precision,
221 | double* error,
222 | bint* converged,
223 | bint* optimal):
224 | """Check if stop condition is already met."""
225 | if x0s.shape[0] == 0:
226 | raise ValueError('Empty sequence.')
227 | if x0s.shape[0] == 1:
228 | r[:], F_r[:] = x0s[0], F_x0s[0]
229 | precision[0] = math.INFINITY
230 | error[0] = vops.max(vops.fabs(F_x0s[0]))
231 | optimal[0] = sops.isclose(0, error[0], ertol, etol)
232 | converged[0] = optimal[0]
233 | return optimal[0]
234 | cdef double[:] errors = np.abs(F_x0s).max(1)
235 | cdef unsigned long best_i = vops.argmin(errors)
236 | r[:], F_r[:] = x0s[best_i], F_x0s[best_i]
237 | error[0] = errors[best_i]
238 | precision[0] = vops.max(np.max(x0s, 0) - np.min(x0s, 0))
239 | optimal[0] = sops.isclose(0, error[0], ertol, etol)
240 | converged[0] = sops.isclose(0, precision[0], prtol, ptol) or optimal[0]
241 | return optimal[0] or sops.isclose(0, precision[0], prtol, ptol)
242 |
243 | # --------------------------------
244 | # Double Complex
245 | # --------------------------------
246 | # noinspection DuplicatedCode
247 | cdef inline bint _check_stop_cond_complex_scalar_initial_guess(
248 | double complex x0,
249 | double complex f_x0,
250 | double etol,
251 | double ertol,
252 | double ptol,
253 | double prtol,
254 | double* precision,
255 | double* error,
256 | bint* converged,
257 | bint* optimal):
258 | """Check if stop condition is already met."""
259 | precision[0] = math.INFINITY
260 | error[0] = sops.cabs(f_x0)
261 | optimal[0] = sops.isclose(0, error[0], ertol, etol)
262 | converged[0] = optimal[0]
263 | return optimal[0]
264 |
265 | # noinspection DuplicatedCode
266 | cdef inline bint _check_stop_cond_complex_vector_initial_guess(
267 | double complex[:] x0,
268 | double complex[:] F_x0,
269 | double etol,
270 | double ertol,
271 | double ptol,
272 | double prtol,
273 | double* precision,
274 | double* error,
275 | bint* converged,
276 | bint* optimal):
277 | """Check if stop condition is already met."""
278 | precision[0] = math.INFINITY
279 | error[0] = vops.max(vops.cabs(F_x0))
280 | optimal[0] = sops.isclose(0, error[0], ertol, etol)
281 | converged[0] = optimal[0]
282 | return optimal[0]
283 |
284 | # noinspection DuplicatedCode
285 | cdef inline bint _check_stop_cond_complex_scalar_initial_guesses(
286 | double complex[:] x0s,
287 | double complex[:] f_x0s,
288 | double etol,
289 | double ertol,
290 | double ptol,
291 | double prtol,
292 | double complex* r,
293 | double complex* f_r,
294 | double* precision,
295 | double* error,
296 | bint* converged,
297 | bint* optimal):
298 | """Check if stop condition is already met."""
299 | if x0s.shape[0] == 0:
300 | raise ValueError('Empty sequence.')
301 | if x0s.shape[0] == 1:
302 | r[0], f_r[0] = x0s[0], f_x0s[0]
303 | precision[0] = math.INFINITY
304 | error[0] = sops.cabs(f_x0s[0])
305 | optimal[0] = sops.isclose(0, error[0], ertol, etol)
306 | converged[0] = optimal[0]
307 | return optimal[0]
308 | cdef double[:] errors = vops.cabs(f_x0s)
309 | cdef unsigned long best_i = vops.argmin(errors)
310 | r[0], f_r[0] = x0s[best_i], f_x0s[best_i]
311 | error[0] = errors[best_i]
312 | cdef double[:] xs_abs = vops.cabs(x0s)
313 | precision[0] = vops.max(xs_abs) - vops.min(xs_abs)
314 | optimal[0] = sops.isclose(0, error[0], ertol, etol)
315 | converged[0] = sops.isclose(0, precision[0], prtol, ptol) or optimal[0]
316 | return optimal[0] or sops.isclose(0, precision[0], prtol, ptol)
317 |
318 | # noinspection DuplicatedCode
319 | cdef inline bint _check_stop_cond_complex_vector_initial_guesses(
320 | double complex[:, :] x0s,
321 | double complex[:, :] F_x0s,
322 | double etol,
323 | double ertol,
324 | double ptol,
325 | double prtol,
326 | double complex[:] r,
327 | double complex[:] F_r,
328 | double* precision,
329 | double* error,
330 | bint* converged,
331 | bint* optimal):
332 | """Check if stop condition is already met."""
333 | if x0s.shape[0] == 0:
334 | raise ValueError('Empty sequence.')
335 | if x0s.shape[0] == 1:
336 | r[:], F_r[:] = x0s[0], F_x0s[0]
337 | precision[0] = math.INFINITY
338 | error[0] = vops.max(vops.cabs(F_x0s[0]))
339 | optimal[0] = sops.isclose(0, error[0], ertol, etol)
340 | converged[0] = optimal[0]
341 | return optimal[0]
342 | cdef double[:] errors = np.abs(F_x0s).max(1)
343 | cdef unsigned long best_i = vops.argmin(errors)
344 | r[:], F_r[:] = x0s[best_i], F_x0s[best_i]
345 | error[0] = errors[best_i]
346 | cdef double[:, :] xs_abs = np.abs(x0s)
347 | precision[0] = vops.max(np.max(xs_abs, 0) - np.min(xs_abs, 0))
348 | optimal[0] = sops.isclose(0, error[0], ertol, etol)
349 | converged[0] = sops.isclose(0, precision[0], prtol, ptol) or optimal[0]
350 | return optimal[0] or sops.isclose(0, precision[0], prtol, ptol)
351 |
--------------------------------------------------------------------------------
/cyroot/_defaults.py:
--------------------------------------------------------------------------------
1 | from dynamic_default_args import dynamic_default_args, named_default
2 | import numpy as np
3 |
4 | from ._check_args import _check_stop_cond_args
5 |
6 | __all__ = [
7 | 'set_default_stop_condition_args',
8 | ]
9 |
10 | # stop condition args
11 | ETOL = 1e-10
12 | ERTOL = 4 * np.finfo(np.float64).eps
13 | PTOL = 1e-12
14 | PRTOL = 4 * np.finfo(np.float64).eps
15 | MAX_ITER = 200
16 |
17 | # derivative approximation args
18 | FINITE_DIFF_STEP = 1e-3
19 |
20 |
21 | @dynamic_default_args()
22 | def set_default_stop_condition_args(etol=named_default(ETOL=ETOL),
23 | ertol=named_default(ERTOL=ERTOL),
24 | ptol=named_default(PTOL=PTOL),
25 | prtol=named_default(PRTOL=PRTOL),
26 | max_iter=named_default(MAX_ITER=MAX_ITER)):
27 | """
28 | Check default values for etol, ertol, ptol, prtol, and max_iter.
29 | This function uses default values to be modified as its own inputs,
30 | so None value will be interpreted as disabling the stop condition (set to 0).
31 |
32 | Args:
33 | etol (float, optional): Error tolerance, indicating the
34 | desired precision of the root. Defaults to {etol}.
35 | ertol (float, optional): Relative error tolerance.
36 | Defaults to {ertol}.
37 | ptol (float, optional): Precision tolerance, indicating
38 | the minimum change of root approximations or width of
39 | brackets (in bracketing methods) after each iteration.
40 | Defaults to {ptol}.
41 | prtol (float, optional): Relative precision tolerance.
42 | Defaults to {prtol}.
43 | max_iter (int, optional): Maximum number of iterations.
44 | If set to 0, the procedure will run indefinitely until
45 | stopping condition is met. Defaults to {max_iter}.
46 | """
47 | etol, ertol, ptol, prtol, max_iter = _check_stop_cond_args(etol,
48 | ertol,
49 | ptol,
50 | prtol,
51 | max_iter)
52 | named_default('ETOL').value = etol
53 | named_default('ERTOL').value = ertol
54 | named_default('PTOL').value = ptol
55 | named_default('PRTOL').value = prtol
56 | named_default('MAX_ITER').value = max_iter
57 |
--------------------------------------------------------------------------------
/cyroot/_types.py:
--------------------------------------------------------------------------------
1 | from typing import Sequence, Union
2 |
3 | import numpy as np
4 |
5 | Real = Union[int, float]
6 | sr1 = Sequence[Real]
7 | sr2 = Sequence[sr1]
8 | sr3 = Sequence[sr2]
9 | sr4 = Sequence[sr3]
10 |
11 | SequenceNDReals = Union[sr1, sr2, sr3, sr4]
12 |
13 | VectorLike = Union[np.ndarray, sr1]
14 | Array2DLike = Union[np.ndarray, sr2]
15 | Array3DLike = Union[np.ndarray, sr3]
16 | Array4DLike = Union[np.ndarray, sr4]
17 | ArrayLike = Union[np.ndarray, SequenceNDReals]
18 |
--------------------------------------------------------------------------------
/cyroot/_version.py:
--------------------------------------------------------------------------------
1 | __all__ = [
2 | 'VERSION_MAJOR',
3 | 'VERSION_MINOR',
4 | 'VERSION_PATCH',
5 | 'VERSION_STRING',
6 | '__version__',
7 | ]
8 |
9 | VERSION_MAJOR = 1
10 | VERSION_MINOR = 0
11 | VERSION_PATCH = 3
12 |
13 | VERSION_STRING = f'{VERSION_MAJOR}.{VERSION_MINOR}.{VERSION_PATCH}'
14 |
15 | __version__ = VERSION_STRING
16 |
--------------------------------------------------------------------------------
/cyroot/fptr.pxd:
--------------------------------------------------------------------------------
1 | # distutils: language=c++
2 |
3 | cimport numpy as np
4 |
5 | cdef class TrackedFPtr:
6 | cdef public unsigned long n_f_calls
7 |
8 | # --------------------------------
9 | # Double Scalar
10 | # --------------------------------
11 | ctypedef double (*dsf_ptr)(double)
12 |
13 | cdef class DoubleScalarFPtr(TrackedFPtr):
14 | cdef double eval(self, double x) except *
15 |
16 | cdef class CyDoubleScalarFPtr(DoubleScalarFPtr):
17 | cdef dsf_ptr f
18 | @staticmethod
19 | cdef CyDoubleScalarFPtr from_f(dsf_ptr f)
20 | cpdef double eval(self, double x) except *
21 |
22 | cdef class PyDoubleScalarFPtr(DoubleScalarFPtr):
23 | cdef object f
24 | @staticmethod
25 | cdef DoubleScalarFPtr from_f(object f)
26 | cpdef double eval(self, double x) except *
27 |
28 | ctypedef fused double_scalar_func_type:
29 | dsf_ptr
30 | DoubleScalarFPtr
31 |
32 | # --------------------------------
33 | # Double Bi-Scalar
34 | # --------------------------------
35 | ctypedef (double, double) (*dbsf_ptr)(double, double)
36 |
37 | cdef class DoubleBiScalarFPtr(TrackedFPtr):
38 | cdef (double, double) eval(self, double a, double b) except *
39 |
40 | cdef class CyDoubleBiScalarFPtr(DoubleBiScalarFPtr):
41 | cdef dbsf_ptr f
42 | @staticmethod
43 | cdef CyDoubleBiScalarFPtr from_f(dbsf_ptr f)
44 | cpdef (double, double) eval(self, double a, double b) except *
45 |
46 | cdef class PyDoubleBiScalarFPtr(DoubleBiScalarFPtr):
47 | cdef object f
48 | @staticmethod
49 | cdef DoubleBiScalarFPtr from_f(object f)
50 | cpdef (double, double) eval(self, double a, double b) except *
51 |
52 | ctypedef fused double_bi_scalar_func_type:
53 | dbsf_ptr
54 | DoubleBiScalarFPtr
55 |
56 | # --------------------------------
57 | # Complex
58 | # --------------------------------
59 | ctypedef double complex (*csf_ptr)(double complex)
60 |
61 | cdef class ComplexScalarFPtr(TrackedFPtr):
62 | cdef double complex eval(self, double complex x) except *
63 |
64 | cdef class CyComplexScalarFPtr(ComplexScalarFPtr):
65 | cdef csf_ptr f
66 | @staticmethod
67 | cdef CyComplexScalarFPtr from_f(csf_ptr f)
68 | cpdef double complex eval(self, double complex x) except *
69 |
70 | cdef class PyComplexScalarFPtr(ComplexScalarFPtr):
71 | cdef object f
72 | @staticmethod
73 | cdef ComplexScalarFPtr from_f(object f)
74 | cpdef double complex eval(self, double complex x) except *
75 |
76 | ctypedef fused complex_scalar_func_type:
77 | csf_ptr
78 | ComplexScalarFPtr
79 |
80 | # --------------------------------
81 | # Complex Bi-Scalar
82 | # --------------------------------
83 | ctypedef (double complex, double complex) (*cbsf_ptr)(double complex, double complex)
84 |
85 | cdef class ComplexBiScalarFPtr(TrackedFPtr):
86 | cdef (double complex, double complex) eval(self, double complex a, double complex b) except *
87 |
88 | cdef class CyComplexBiScalarFPtr(ComplexBiScalarFPtr):
89 | cdef cbsf_ptr f
90 | @staticmethod
91 | cdef CyComplexBiScalarFPtr from_f(cbsf_ptr f)
92 | cpdef (double complex, double complex) eval(self, double complex a, double complex b) except *
93 |
94 | cdef class PyComplexBiScalarFPtr(ComplexBiScalarFPtr):
95 | cdef object f
96 | @staticmethod
97 | cdef ComplexBiScalarFPtr from_f(object f)
98 | cpdef (double complex, double complex) eval(self, double complex a, double complex b) except *
99 |
100 | ctypedef fused complex_bi_scalar_func_type:
101 | cbsf_ptr
102 | ComplexBiScalarFPtr
103 |
104 | # --------------------------------
105 | # Double MemoryView
106 | # --------------------------------
107 | ctypedef double[:] (*dvf_ptr)(double[:])
108 |
109 | cdef class DoubleVectorFPtr(TrackedFPtr):
110 | cdef double[:] eval(self, double[:] x) except *
111 |
112 | cdef class CyDoubleVectorFPtr(DoubleVectorFPtr):
113 | cdef dvf_ptr f
114 | @staticmethod
115 | cdef CyDoubleVectorFPtr from_f(dvf_ptr f)
116 | cpdef double[:] eval(self, double[:] x) except *
117 |
118 | cdef class PyDoubleVectorFPtr(DoubleVectorFPtr):
119 | cdef object f
120 | @staticmethod
121 | cdef DoubleVectorFPtr from_f(object f)
122 | cpdef double[:] eval(self, double[:] x) except *
123 |
124 | ctypedef fused double_vector_func_type:
125 | dvf_ptr
126 | DoubleVectorFPtr
127 |
128 | # --------------------------------
129 | # Complex MemoryView
130 | # --------------------------------
131 | ctypedef double complex[:] (*cvf_ptr)(double complex[:])
132 |
133 | cdef class ComplexVectorFPtr(TrackedFPtr):
134 | cdef double complex[:] eval(self, double complex[:] x) except *
135 |
136 | cdef class CyComplexVectorFPtr(ComplexVectorFPtr):
137 | cdef cvf_ptr f
138 | @staticmethod
139 | cdef CyComplexVectorFPtr from_f(cvf_ptr f)
140 | cpdef double complex[:] eval(self, double complex[:] x) except *
141 |
142 | cdef class PyComplexVectorFPtr(ComplexVectorFPtr):
143 | cdef object f
144 | @staticmethod
145 | cdef ComplexVectorFPtr from_f(object f)
146 | cpdef double complex[:] eval(self, double complex[:] x) except *
147 |
148 | ctypedef fused complex_vector_func_type:
149 | cvf_ptr
150 | ComplexVectorFPtr
151 |
152 | # --------------------------------
153 | # Numpy Array
154 | # --------------------------------
155 | ctypedef np.ndarray (*ndarray_f_ptr)(np.ndarray)
156 |
157 | cdef class NdArrayFPtr(TrackedFPtr):
158 | cdef np.ndarray eval(self, np.ndarray x)
159 |
160 | cdef class CyNdArrayFPtr(NdArrayFPtr):
161 | cdef ndarray_f_ptr f
162 | @staticmethod
163 | cdef CyNdArrayFPtr from_f(ndarray_f_ptr f)
164 | cpdef np.ndarray eval(self, np.ndarray x)
165 |
166 | cdef class PyNdArrayFPtr(NdArrayFPtr):
167 | cdef object f
168 | @staticmethod
169 | cdef NdArrayFPtr from_f(object f)
170 | cpdef np.ndarray eval(self, np.ndarray x)
171 |
172 | ctypedef fused ndarray_func_type:
173 | ndarray_f_ptr
174 | NdArrayFPtr
175 |
--------------------------------------------------------------------------------
/cyroot/fptr.pyx:
--------------------------------------------------------------------------------
1 | # distutils: language=c++
2 |
3 | import numpy as np
4 |
5 | __all__ = [
6 | 'DoubleScalarFPtr', 'CyDoubleScalarFPtr', 'PyDoubleScalarFPtr',
7 | 'DoubleBiScalarFPtr', 'CyDoubleBiScalarFPtr', 'PyDoubleBiScalarFPtr',
8 | 'ComplexScalarFPtr', 'CyComplexScalarFPtr', 'PyComplexScalarFPtr',
9 | 'ComplexBiScalarFPtr', 'CyComplexBiScalarFPtr', 'PyComplexBiScalarFPtr',
10 | 'DoubleVectorFPtr', 'CyDoubleVectorFPtr', 'PyDoubleVectorFPtr',
11 | 'ComplexVectorFPtr', 'CyComplexVectorFPtr', 'PyComplexVectorFPtr',
12 | 'NdArrayFPtr', 'CyNdArrayFPtr', 'PyNdArrayFPtr',
13 | ]
14 |
15 | cdef class TrackedFPtr:
16 | """abstract class"""
17 | def __cinit__(self):
18 | self.n_f_calls = 0
19 |
20 | # --------------------------------
21 | # Double Scalar
22 | # --------------------------------
23 | cdef class DoubleScalarFPtr(TrackedFPtr):
24 | def __call__(self, double x):
25 | return self.eval(x)
26 |
27 | cdef double eval(self, double x) except *:
28 | raise NotImplementedError
29 |
30 | cdef class CyDoubleScalarFPtr(DoubleScalarFPtr):
31 | def __init__(self):
32 | raise TypeError('This class cannot be instantiated directly.')
33 |
34 | @staticmethod
35 | cdef CyDoubleScalarFPtr from_f(dsf_ptr f):
36 | cdef CyDoubleScalarFPtr wrapper = CyDoubleScalarFPtr.__new__(CyDoubleScalarFPtr)
37 | wrapper.f = f
38 | return wrapper
39 |
40 | cpdef double eval(self, double x) except *:
41 | self.n_f_calls += 1
42 | return self.f(x)
43 |
44 | cdef class PyDoubleScalarFPtr(DoubleScalarFPtr):
45 | def __init__(self, f):
46 | self.f = f
47 |
48 | @staticmethod
49 | cdef DoubleScalarFPtr from_f(object f):
50 | if isinstance(f, DoubleScalarFPtr):
51 | return f
52 | cdef PyDoubleScalarFPtr wrapper = PyDoubleScalarFPtr.__new__(PyDoubleScalarFPtr)
53 | wrapper.f = f
54 | return wrapper
55 |
56 | cpdef double eval(self, double x) except *:
57 | self.n_f_calls += 1
58 | return self.f(x)
59 |
60 | # --------------------------------
61 | # Double Bi-Scalar
62 | # --------------------------------
63 | cdef class DoubleBiScalarFPtr(TrackedFPtr):
64 | def __call__(self, double a, double b):
65 | return self.eval(a, b)
66 |
67 | cdef (double, double) eval(self, double a, double b) except *:
68 | raise NotImplementedError
69 |
70 | cdef class CyDoubleBiScalarFPtr(DoubleBiScalarFPtr):
71 | def __init__(self):
72 | raise TypeError('This class cannot be instantiated directly.')
73 |
74 | @staticmethod
75 | cdef CyDoubleBiScalarFPtr from_f(dbsf_ptr f):
76 | cdef CyDoubleBiScalarFPtr wrapper = CyDoubleBiScalarFPtr.__new__(CyDoubleBiScalarFPtr)
77 | wrapper.f = f
78 | return wrapper
79 |
80 | cpdef (double, double) eval(self, double a, double b) except *:
81 | self.n_f_calls += 1
82 | return self.f(a, b)
83 |
84 | cdef class PyDoubleBiScalarFPtr(DoubleBiScalarFPtr):
85 | def __init__(self, f):
86 | self.f = f
87 |
88 | @staticmethod
89 | cdef DoubleBiScalarFPtr from_f(object f):
90 | if isinstance(f, DoubleBiScalarFPtr):
91 | return f
92 | cdef PyDoubleBiScalarFPtr wrapper = PyDoubleBiScalarFPtr.__new__(PyDoubleBiScalarFPtr)
93 | wrapper.f = f
94 | return wrapper
95 |
96 | cpdef (double, double) eval(self, double a, double b) except *:
97 | self.n_f_calls += 1
98 | return self.f(a, b)
99 |
100 | # --------------------------------
101 | # Complex
102 | # --------------------------------
103 | cdef class ComplexScalarFPtr(TrackedFPtr):
104 | def __call__(self, double complex x):
105 | return self.eval(x)
106 |
107 | cdef double complex eval(self, double complex x) except *:
108 | raise NotImplementedError
109 |
110 | cdef class CyComplexScalarFPtr(ComplexScalarFPtr):
111 | def __init__(self):
112 | raise TypeError('This class cannot be instantiated directly.')
113 |
114 | @staticmethod
115 | cdef CyComplexScalarFPtr from_f(csf_ptr f):
116 | cdef CyComplexScalarFPtr wrapper = CyComplexScalarFPtr.__new__(CyComplexScalarFPtr)
117 | wrapper.f = f
118 | return wrapper
119 |
120 | cpdef double complex eval(self, double complex x) except *:
121 | self.n_f_calls += 1
122 | return self.f(x)
123 |
124 | cdef class PyComplexScalarFPtr(ComplexScalarFPtr):
125 | def __init__(self, f):
126 | self.f = f
127 |
128 | @staticmethod
129 | cdef ComplexScalarFPtr from_f(object f):
130 | if isinstance(f, ComplexScalarFPtr):
131 | return f
132 | cdef PyComplexScalarFPtr wrapper = PyComplexScalarFPtr.__new__(PyComplexScalarFPtr)
133 | wrapper.f = f
134 | return wrapper
135 |
136 | cpdef double complex eval(self, double complex x) except *:
137 | self.n_f_calls += 1
138 | return self.f(x)
139 |
140 | # --------------------------------
141 | # Complex Bi-Scalar
142 | # --------------------------------
143 | cdef class ComplexBiScalarFPtr(TrackedFPtr):
144 | def __call__(self, double complex a, double complex b):
145 | return self.eval(a, b)
146 |
147 | cdef (double complex, double complex) eval(self, double complex a, double complex b) except *:
148 | raise NotImplementedError
149 |
150 | cdef class CyComplexBiScalarFPtr(ComplexBiScalarFPtr):
151 | def __init__(self):
152 | raise TypeError('This class cannot be instantiated directly.')
153 |
154 | @staticmethod
155 | cdef CyComplexBiScalarFPtr from_f(cbsf_ptr f):
156 | cdef CyComplexBiScalarFPtr wrapper = CyComplexBiScalarFPtr.__new__(CyComplexBiScalarFPtr)
157 | wrapper.f = f
158 | return wrapper
159 |
160 | cpdef (double complex, double complex) eval(self, double complex a, double complex b) except *:
161 | self.n_f_calls += 1
162 | return self.f(a, b)
163 |
164 | cdef class PyComplexBiScalarFPtr(ComplexBiScalarFPtr):
165 | def __init__(self, f):
166 | self.f = f
167 |
168 | @staticmethod
169 | cdef ComplexBiScalarFPtr from_f(object f):
170 | if isinstance(f, ComplexBiScalarFPtr):
171 | return f
172 | cdef PyComplexBiScalarFPtr wrapper = PyComplexBiScalarFPtr.__new__(PyComplexBiScalarFPtr)
173 | wrapper.f = f
174 | return wrapper
175 |
176 | cpdef (double complex, double complex) eval(self, double complex a, double complex b) except *:
177 | self.n_f_calls += 1
178 | return self.f(a, b)
179 |
180 | # --------------------------------
181 | # Double MemoryView
182 | # --------------------------------
183 | cdef class DoubleVectorFPtr(TrackedFPtr):
184 | def __call__(self, double[:] x):
185 | return self.eval(x)
186 |
187 | cdef double[:] eval(self, double[:] x) except *:
188 | raise NotImplementedError
189 |
190 | cdef class CyDoubleVectorFPtr(DoubleVectorFPtr):
191 | def __init__(self):
192 | raise TypeError('This class cannot be instantiated directly.')
193 |
194 | @staticmethod
195 | cdef CyDoubleVectorFPtr from_f(dvf_ptr f):
196 | cdef CyDoubleVectorFPtr wrapper = CyDoubleVectorFPtr.__new__(CyDoubleVectorFPtr)
197 | wrapper.f = f
198 | return wrapper
199 |
200 | cpdef double[:] eval(self, double[:] x) except *:
201 | self.n_f_calls += 1
202 | return self.f(x)
203 |
204 | cdef class PyDoubleVectorFPtr(DoubleVectorFPtr):
205 | def __init__(self, f):
206 | self.f = f
207 |
208 | @staticmethod
209 | cdef DoubleVectorFPtr from_f(object f):
210 | if isinstance(f, DoubleVectorFPtr):
211 | return f
212 | cdef PyDoubleVectorFPtr wrapper = PyDoubleVectorFPtr.__new__(PyDoubleVectorFPtr)
213 | wrapper.f = f
214 | return wrapper
215 |
216 | cpdef double[:] eval(self, double[:] x) except *:
217 | self.n_f_calls += 1
218 | return self.f(x)
219 |
220 | # --------------------------------
221 | # Complex MemoryView
222 | # --------------------------------
223 | cdef class ComplexVectorFPtr(TrackedFPtr):
224 | def __call__(self, double complex[:] x):
225 | return self.eval(x)
226 |
227 | cdef double complex[:] eval(self, double complex[:] x) except *:
228 | raise NotImplementedError
229 |
230 | cdef class CyComplexVectorFPtr(ComplexVectorFPtr):
231 | def __init__(self):
232 | raise TypeError('This class cannot be instantiated directly.')
233 |
234 | @staticmethod
235 | cdef CyComplexVectorFPtr from_f(cvf_ptr f):
236 | cdef CyComplexVectorFPtr wrapper = CyComplexVectorFPtr.__new__(CyComplexVectorFPtr)
237 | wrapper.f = f
238 | return wrapper
239 |
240 | cpdef double complex[:] eval(self, double complex[:] x) except *:
241 | self.n_f_calls += 1
242 | return self.f(x)
243 |
244 | cdef class PyComplexVectorFPtr(ComplexVectorFPtr):
245 | def __init__(self, f):
246 | self.f = f
247 |
248 | @staticmethod
249 | cdef ComplexVectorFPtr from_f(object f):
250 | if isinstance(f, ComplexVectorFPtr):
251 | return f
252 | cdef PyComplexVectorFPtr wrapper = PyComplexVectorFPtr.__new__(PyComplexVectorFPtr)
253 | wrapper.f = f
254 | return wrapper
255 |
256 | cpdef double complex[:] eval(self, double complex[:] x) except *:
257 | self.n_f_calls += 1
258 | return self.f(x)
259 |
260 | # --------------------------------
261 | # Numpy Array
262 | # --------------------------------
263 | cdef class NdArrayFPtr(TrackedFPtr):
264 | def __call__(self, np.ndarray x):
265 | return self.eval(x)
266 |
267 | cdef np.ndarray eval(self, np.ndarray x):
268 | raise NotImplementedError
269 |
270 | cdef class CyNdArrayFPtr(NdArrayFPtr):
271 | def __init__(self):
272 | raise TypeError('This class cannot be instantiated directly.')
273 |
274 | @staticmethod
275 | cdef CyNdArrayFPtr from_f(ndarray_f_ptr f):
276 | cdef CyNdArrayFPtr wrapper = CyNdArrayFPtr.__new__(CyNdArrayFPtr)
277 | wrapper.f = f
278 | return wrapper
279 |
280 | cpdef np.ndarray eval(self, np.ndarray x):
281 | self.n_f_calls += 1
282 | return self.f(x)
283 |
284 | cdef class PyNdArrayFPtr(NdArrayFPtr):
285 | def __init__(self, f):
286 | self.f = f
287 |
288 | @staticmethod
289 | cdef NdArrayFPtr from_f(object f):
290 | if isinstance(f, NdArrayFPtr):
291 | return f
292 | cdef PyNdArrayFPtr wrapper = PyNdArrayFPtr.__new__(PyNdArrayFPtr)
293 | wrapper.f = f
294 | return wrapper
295 |
296 | cpdef np.ndarray eval(self, np.ndarray x):
297 | self.n_f_calls += 1
298 | return np.asarray(self.f(x), dtype=np.float64)
299 |
--------------------------------------------------------------------------------
/cyroot/ops/__init__.pxd:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/inspiros/cy-root/7150aad4e83ff45037694517641746600b508ea5/cyroot/ops/__init__.pxd
--------------------------------------------------------------------------------
/cyroot/ops/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/inspiros/cy-root/7150aad4e83ff45037694517641746600b508ea5/cyroot/ops/__init__.py
--------------------------------------------------------------------------------
/cyroot/ops/matrix_ops.pxd:
--------------------------------------------------------------------------------
1 | cimport numpy as np
2 |
3 | cdef np.ndarray inv(np.ndarray[np.float64_t, ndim=2] A, np.ndarray b=*, int method=*, bint force=*)
4 |
--------------------------------------------------------------------------------
/cyroot/ops/matrix_ops.pyx:
--------------------------------------------------------------------------------
1 | # distutils: language=c++
2 | # cython: cdivision = True
3 | # cython: initializedcheck = False
4 | # cython: boundscheck = False
5 | # cython: profile = False
6 |
7 | import numpy as np
8 | import scipy as sp
9 |
10 | cdef inline bint is_square(np.ndarray[np.float64_t, ndim=2] A):
11 | return A.shape[0] == A.shape[1]
12 |
13 | cdef inline bint is_singular(np.ndarray[np.float64_t, ndim=2] A):
14 | return np.linalg.cond(A) > 1 / np.finfo(A.dtype).eps
15 |
16 | cdef np.ndarray inv(np.ndarray[np.float64_t, ndim=2] A,
17 | np.ndarray b=None,
18 | int method=1,
19 | bint force=False):
20 | cdef unsigned long m = A.shape[0]
21 | cdef unsigned long n = A.shape[1]
22 | cdef np.ndarray A_inv
23 | if method == 0: # inv
24 | if (not is_square(A) or is_singular(A)) and not force:
25 | method += 1
26 | else:
27 | A_inv = sp.linalg.inv(A)
28 | if b is not None:
29 | return A_inv.dot(b)
30 | return A_inv
31 | if method == 1: # solve
32 | if (not is_square(A) or is_singular(A)) and not force:
33 | method += 2
34 | else:
35 | return sp.linalg.solve(A, b if b is not None else np.eye(m))
36 | if method == 2: # lu_solve
37 | if (not is_square(A) or is_singular(A)) and not force:
38 | method += 1
39 | else:
40 | return sp.linalg.lu_solve(sp.linalg.lu_factor(A), b if b is not None else np.eye(m))
41 | if method == 3: # pinv
42 | A_inv = sp.linalg.pinv(A)
43 | if b is not None:
44 | return A_inv.dot(b)
45 | return A_inv
46 | if method == 4: # lstsq
47 | return sp.linalg.lstsq(A, b if b is not None else np.eye(m))[0]
48 | raise NotImplementedError(f'No implementation for method={method}.')
49 |
--------------------------------------------------------------------------------
/cyroot/ops/scalar_ops.pxd:
--------------------------------------------------------------------------------
1 | ctypedef fused real:
2 | int
3 | unsigned int
4 | long
5 | unsigned long
6 | float
7 | double
8 |
9 | ctypedef fused numeric:
10 | int
11 | unsigned int
12 | long
13 | unsigned long
14 | float
15 | double
16 | double complex
17 |
18 | cdef bint isclose(double a, double b, double rtol=*, double atol=*) nogil
19 | cdef bint cisclose(double complex a, double complex b, double rtol=*, double atol=*) nogil
20 | cdef real min(real a, real b) nogil
21 | cdef real max(real a, real b) nogil
22 |
23 | cdef extern from '' nogil:
24 | double fabs(double)
25 | double sqrt(double)
26 |
27 | cdef extern from '' nogil:
28 | double cabs 'abs'(double complex)
29 | double complex csqrt 'sqrt'(double complex)
30 |
31 | cdef extern from * nogil:
32 | """
33 | #include
34 |
35 | template
36 | int sign(T val) {
37 | return (T(0) < val) - (val < T(0));
38 | }
39 |
40 | template
41 | std::complex csign(std::complex val) {
42 | return val / std::norm(val);
43 | }
44 |
45 | unsigned long factorial(unsigned int n) {
46 | unsigned long f = 1;
47 | for (unsigned int i = 1; i < n + 1; i++)
48 | f *= i;
49 | return f;
50 | }
51 |
52 | unsigned long binomial_coef(unsigned long n, unsigned long k) {
53 | unsigned long bin_coef = 1;
54 | unsigned int i;
55 | if (k <= n / 2) {
56 | for (i = 0; i < k; i++)
57 | bin_coef *= n - i;
58 | return bin_coef / factorial(k);
59 | }
60 | for (i = 0; i < n - k; i++)
61 | bin_coef *= n - i;
62 | return bin_coef / factorial(n - k);
63 | }
64 | """
65 | int sign(double)
66 | double complex csign(double complex)
67 | unsigned long factorial(unsigned int)
68 | unsigned long binomial_coef(unsigned long, unsigned long)
69 |
--------------------------------------------------------------------------------
/cyroot/ops/scalar_ops.pyx:
--------------------------------------------------------------------------------
1 | # distutils: language=c++
2 | # cython: cdivision = True
3 | # cython: initializedcheck = False
4 | # cython: boundscheck = False
5 | # cython: profile = False
6 |
7 | from libc cimport math
8 |
9 | cdef inline bint isclose(double a, double b, double rtol=1e-5, double atol=1e-8) nogil:
10 | if math.isinf(b) or math.isinf(a):
11 | return math.isinf(a) and math.isinf(b)
12 | return math.fabs(a - b) <= atol + rtol * math.fabs(b)
13 |
14 | cdef inline bint cisclose(double complex a, double complex b, double rtol=1e-5, double atol=1e-8) nogil:
15 | return isclose(a.real, b.real, rtol, atol) and isclose(a.imag, b.imag, rtol, atol)
16 |
17 | cdef inline real min(real a, real b) nogil:
18 | return b if a > b else a
19 |
20 | cdef inline real max(real a, real b) nogil:
21 | return b if a < b else a
22 |
--------------------------------------------------------------------------------
/cyroot/ops/vector_ops.pxd:
--------------------------------------------------------------------------------
1 | from .scalar_ops cimport real, numeric
2 |
3 | cdef bint equal(real[:] a, real[:] b) nogil
4 | cdef bint cequal(double complex[:] a, double complex[:] b) nogil
5 | cdef bint allclose(double[:] a, double[:] b, double rtol=*, double atol=*) nogil
6 | cdef bint callclose(double complex[:] a, double complex[:] b, double rtol=*, double atol=*) nogil
7 | cdef bint anyclose(double[:] a, double[:] b, double rtol=*, double atol=*) nogil
8 | cdef bint canyclose(double complex[:] a, double complex[:] b, double rtol=*, double atol=*) nogil
9 | cdef int[:] sign(double[:] xs) nogil
10 | cdef double complex[:] csign(double complex[:] xs) nogil
11 | cdef double[:] fabs(double[:] xs) nogil
12 | cdef double[:] cabs(double complex[:] xs) nogil
13 | cdef double width(double[:] xs) nogil
14 | cdef double cwidth(double complex[:] xs) nogil
15 | cdef double[:] sqrt(double[:] xs) nogil
16 | cdef double complex[:] csqrt(double complex[:] xs) nogil
17 | cdef double norm(double[:] xs, double order=*) nogil
18 | cdef double cnorm(double complex[:] xs, double order=*) nogil
19 | cdef double[:] permute(double[:] xs, unsigned long[:] inds) nogil
20 | cdef numeric sum(numeric[:] xs) nogil
21 | cdef numeric prod(numeric[:] xs) nogil
22 | cdef double complex cprod(double complex[:] xs) nogil
23 | cdef double mean(double[:] xs) nogil
24 | cdef double min(double[:] xs) nogil
25 | cdef double max(double[:] xs) nogil
26 | cdef unsigned long argmin(double[:] xs) nogil
27 | cdef unsigned long argmax(double[:] xs) nogil
28 | cdef (unsigned long, unsigned long) argminmax(double[:] xs) nogil
29 | cdef void sort(double[::1] xs) nogil
30 | cdef unsigned long[:] argsort(double[:] xs, bint reverse=*) nogil
31 |
--------------------------------------------------------------------------------
/cyroot/ops/vector_ops.pyx:
--------------------------------------------------------------------------------
1 | # distutils: language=c++
2 | # cython: cdivision = True
3 | # cython: initializedcheck = False
4 | # cython: boundscheck = False
5 | # cython: profile = False
6 |
7 | from cython cimport view
8 | from libcpp.algorithm cimport sort as cpp_sort
9 | from libcpp.vector cimport vector
10 | from libc cimport math
11 |
12 | from . cimport scalar_ops as sops
13 |
14 | cdef inline bint equal(real[:] a, real[:] b) nogil:
15 | if a.shape[0] != b.shape[0]:
16 | return False
17 | cdef unsigned long i
18 | for i in range(a.shape[0]):
19 | if a[i] != b[i]:
20 | return False
21 | return True
22 |
23 | cdef inline bint cequal(double complex[:] a, double complex[:] b) nogil:
24 | if a.shape[0] != b.shape[0]:
25 | return False
26 | cdef unsigned long i
27 | for i in range(a.shape[0]):
28 | if a[i].real != b[i].real or a[i].imag != b[i].imag:
29 | return False
30 | return True
31 |
32 | cdef inline bint allclose(double[:] a, double[:] b, double rtol=1e-5, double atol=1e-8) nogil:
33 | if a.shape[0] != b.shape[0]:
34 | return False
35 | cdef unsigned long i
36 | for i in range(a.shape[0]):
37 | if not sops.isclose(a[i], b[i], rtol, atol):
38 | return False
39 | return True
40 |
41 | cdef inline bint callclose(double complex[:] a, double complex[:] b, double rtol=1e-5, double atol=1e-8) nogil:
42 | if a.shape[0] != b.shape[0]:
43 | return False
44 | cdef unsigned long i
45 | for i in range(a.shape[0]):
46 | if not sops.cisclose(a[i], b[i], rtol, atol):
47 | return False
48 | return True
49 |
50 | cdef inline bint anyclose(double[:] a, double[:] b, double rtol=1e-5, double atol=1e-8) nogil:
51 | if a.shape[0] != b.shape[0]:
52 | return False
53 | cdef unsigned long i
54 | for i in range(a.shape[0]):
55 | if sops.isclose(a[i], b[i], rtol, atol):
56 | return True
57 | return False
58 |
59 | cdef inline bint canyclose(double complex[:] a, double complex[:] b, double rtol=1e-5, double atol=1e-8) nogil:
60 | if a.shape[0] != b.shape[0]:
61 | return False
62 | cdef unsigned long i
63 | for i in range(a.shape[0]):
64 | if sops.cisclose(a[i], b[i], rtol, atol):
65 | return True
66 | return False
67 |
68 | cdef inline int[:] sign(double[:] xs) nogil:
69 | cdef unsigned long i
70 | cdef int[:] res
71 | with gil:
72 | res = view.array(shape=(xs.shape[0],), itemsize=sizeof(int), format='i')
73 | for i in range(xs.shape[0]):
74 | res[i] = sops.sign(xs[i])
75 | return res
76 |
77 | cdef inline double complex[:] csign(double complex[:] xs) nogil:
78 | cdef unsigned long i
79 | cdef double complex[:] res
80 | with gil:
81 | res = view.array(shape=(xs.shape[0],), itemsize=sizeof(double complex), format='c')
82 | for i in range(xs.shape[0]):
83 | res[i] = sops.csign(xs[i])
84 | return res
85 |
86 | cdef inline double[:] fabs(double[:] xs) nogil:
87 | cdef unsigned long i
88 | cdef double[:] res
89 | with gil:
90 | res = view.array(shape=(xs.shape[0],), itemsize=sizeof(double), format='d')
91 | for i in range(xs.shape[0]):
92 | res[i] = math.fabs(xs[i])
93 | return res
94 |
95 | cdef inline double[:] cabs(double complex[:] xs) nogil:
96 | cdef unsigned long i
97 | cdef double[:] res
98 | with gil:
99 | res = view.array(shape=(xs.shape[0],), itemsize=sizeof(double), format='d')
100 | for i in range(xs.shape[0]):
101 | res[i] = sops.cabs(xs[i])
102 | return res
103 |
104 | cdef inline double width(double[:] xs) nogil:
105 | cdef unsigned long argmin_i, argmax_i
106 | argmin_i, argmax_i = argminmax(xs)
107 | return xs[argmax_i] - xs[argmin_i]
108 |
109 | cdef inline double cwidth(double complex[:] xs) nogil:
110 | cdef unsigned long argmin_i, argmax_i
111 | cdef double[:] xs_abs = cabs(xs)
112 | argmin_i, argmax_i = argminmax(xs_abs)
113 | return xs_abs[argmax_i] - xs_abs[argmin_i]
114 |
115 | cdef inline double[:] sqrt(double[:] xs) nogil:
116 | cdef unsigned long i
117 | cdef double[:] res
118 | with gil:
119 | res = view.array(shape=(xs.shape[0],), itemsize=sizeof(double), format='d')
120 | for i in range(xs.shape[0]):
121 | res[i] = sops.sqrt(xs[i])
122 | return res
123 |
124 | cdef inline double complex[:] csqrt(double complex[:] xs) nogil:
125 | cdef unsigned long i
126 | cdef double complex[:] res
127 | with gil:
128 | res = view.array(shape=(xs.shape[0],), itemsize=sizeof(double complex), format='C')
129 | for i in range(xs.shape[0]):
130 | res[i] = sops.csqrt(xs[i])
131 | return res
132 |
133 | cdef inline double norm(double[:] xs, double order=2) nogil:
134 | if order == math.INFINITY:
135 | return max(fabs(xs))
136 | if order == -math.INFINITY:
137 | return min(fabs(xs))
138 | cdef unsigned long i
139 | cdef double res = 0
140 | if order == 0:
141 | for i in range(xs.shape[0]):
142 | res += xs[i] != 0
143 | return res
144 | for i in range(xs.shape[0]):
145 | res += sops.fabs(xs[i]) ** order
146 | return res ** (1 / order)
147 |
148 | cdef inline double cnorm(double complex[:] xs, double order=2) nogil:
149 | if order == math.INFINITY:
150 | return max(cabs(xs))
151 | if order == -math.INFINITY:
152 | return min(cabs(xs))
153 | cdef unsigned long i
154 | cdef double res = 0
155 | if order == 0:
156 | for i in range(xs.shape[0]):
157 | res += xs[i] != 0
158 | return res
159 | for i in range(xs.shape[0]):
160 | res += sops.cabs(xs[i]) ** order
161 | return res ** (1 / order)
162 |
163 | cdef inline double[:] permute(double[:] xs, unsigned long[:] inds) nogil:
164 | cdef unsigned long i
165 | cdef double[:] res
166 | with gil:
167 | res = view.array(shape=(xs.shape[0],), itemsize=sizeof(double), format='d')
168 | for i in range(xs.shape[0]):
169 | res[i] = xs[inds[i]]
170 | return res
171 |
172 | cdef inline numeric sum(numeric[:] xs) nogil:
173 | cdef unsigned long i
174 | cdef numeric res = 0
175 | for i in range(xs.shape[0]):
176 | res += xs[i]
177 | return res
178 |
179 | cdef inline numeric prod(numeric[:] xs) nogil:
180 | cdef numeric res = 1
181 | cdef unsigned long i
182 | for i in range(xs.shape[0]):
183 | res *= xs[i]
184 | return res
185 |
186 | cdef inline double complex cprod(double complex[:] xs) nogil:
187 | cdef double complex res = 1
188 | cdef unsigned long i
189 | for i in range(xs.shape[0]):
190 | res *= xs[i]
191 | return res
192 |
193 | cdef inline double mean(double[:] xs) nogil:
194 | return sum[double](xs) / xs.shape[0]
195 |
196 | cdef inline double min(double[:] xs) nogil:
197 | if xs.shape[0] == 0:
198 | raise ValueError('Empty sequence.')
199 | cdef unsigned long i
200 | cdef double minimum = xs[0]
201 | for i in range(1, xs.shape[0]):
202 | if xs[i] < minimum:
203 | minimum = xs[i]
204 | return minimum
205 |
206 | cdef inline double max(double[:] xs) nogil:
207 | if xs.shape[0] == 0:
208 | raise ValueError('Empty sequence.')
209 | cdef unsigned long i
210 | cdef double maximum = xs[0]
211 | for i in range(1, xs.shape[0]):
212 | if xs[i] > maximum:
213 | maximum = xs[i]
214 | return maximum
215 |
216 | cdef inline unsigned long argmin(double[:] xs) nogil:
217 | if xs.shape[0] == 0:
218 | raise ValueError('Empty sequence.')
219 | cdef unsigned long i, argmin_i = 0
220 | cdef double minimum = xs[0]
221 | for i in range(1, xs.shape[0]):
222 | if xs[i] < minimum:
223 | minimum = xs[i]
224 | argmin_i = i
225 | return argmin_i
226 |
227 | cdef inline unsigned long argmax(double[:] xs) nogil:
228 | if xs.shape[0] == 0:
229 | raise ValueError('Empty sequence.')
230 | cdef unsigned long i, argmax_i = 0
231 | cdef double maximum = xs[0]
232 | for i in range(1, xs.shape[0]):
233 | if xs[i] > maximum:
234 | maximum = xs[i]
235 | argmax_i = i
236 | return argmax_i
237 |
238 | cdef inline (unsigned long, unsigned long) argminmax(double[:] xs) nogil:
239 | if xs.shape[0] == 0:
240 | raise ValueError('Empty sequence.')
241 | cdef unsigned long i, argmin_i = 0, argmax_i = 0
242 | cdef double minimum = xs[0], maximum = xs[0]
243 | for i in range(1, xs.shape[0]):
244 | if xs[i] < minimum:
245 | minimum = xs[i]
246 | argmin_i = i
247 | if xs[i] > maximum:
248 | maximum = xs[i]
249 | argmax_i = i
250 | return argmin_i, argmax_i
251 |
252 | cdef inline void sort(double[::1] xs) nogil:
253 | cpp_sort(&xs[0], (&xs[0]) + xs.shape[0])
254 |
255 | cdef struct _IndexedDouble:
256 | unsigned long id
257 | double val
258 |
259 | cdef bint _ascending_cmp(_IndexedDouble &lhs, _IndexedDouble &rhs) nogil:
260 | return lhs.val < rhs.val
261 |
262 | cdef bint _descending_cmp(_IndexedDouble &lhs, _IndexedDouble &rhs) nogil:
263 | return lhs.val > rhs.val
264 |
265 | cdef inline unsigned long[:] argsort(double[:] xs, bint reverse=False) nogil:
266 | cdef unsigned long i
267 | cdef vector[_IndexedDouble] indexed_xs = vector[_IndexedDouble](xs.shape[0])
268 | for i in range(xs.shape[0]):
269 | indexed_xs[i].id = i
270 | indexed_xs[i].val = xs[i]
271 | if not reverse:
272 | cpp_sort(indexed_xs.begin(), indexed_xs.end(), &_ascending_cmp)
273 | else:
274 | cpp_sort(indexed_xs.begin(), indexed_xs.end(), &_descending_cmp)
275 |
276 | cdef unsigned long[:] inds
277 | with gil:
278 | inds = view.array(shape=(xs.shape[0],), itemsize=sizeof(long), format='L')
279 | for i in range(xs.shape[0]):
280 | inds[i] = indexed_xs[i].id
281 | return inds
282 |
--------------------------------------------------------------------------------
/cyroot/return_types.pxd:
--------------------------------------------------------------------------------
1 |
2 | cdef class RootReturnType:
3 | cdef readonly object root
4 | cdef readonly object f_root
5 | cdef readonly unsigned int iters
6 | cdef readonly object f_calls
7 | cdef readonly double precision
8 | cdef readonly double error
9 | cdef readonly bint converged
10 | cdef readonly bint optimal
11 |
12 | cdef class BracketingMethodsReturnType(RootReturnType):
13 | cdef readonly object bracket
14 | cdef readonly object f_bracket
15 |
16 | cdef class NewtonMethodsReturnType(RootReturnType):
17 | cdef readonly object df_root
18 |
19 | # --------------------------------
20 | # Splitting Methods
21 | # --------------------------------
22 | cdef class MultiRootsReturnType:
23 | cdef readonly list root
24 | cdef readonly list f_root
25 | cdef readonly unsigned int split_iters
26 | cdef readonly list iters
27 | cdef readonly object f_calls
28 | cdef readonly list precision
29 | cdef readonly list error
30 | cdef readonly list converged
31 | cdef readonly list optimal
32 |
33 | cdef class SplittingBracketingMethodsReturnType(MultiRootsReturnType):
34 | cdef readonly list bracket
35 | cdef readonly list f_bracket
36 |
37 | cdef class SplittingNewtonMethodsReturnType(MultiRootsReturnType):
38 | cdef readonly list df_root
39 |
--------------------------------------------------------------------------------
/cyroot/return_types.pyx:
--------------------------------------------------------------------------------
1 | # distutils: language=c++
2 |
3 | from libc cimport math
4 |
5 | __all__ = [
6 | 'RootReturnType',
7 | 'BracketingMethodsReturnType',
8 | 'NewtonMethodsReturnType',
9 | 'MultiRootsReturnType',
10 | 'SplittingBracketingMethodsReturnType',
11 | 'SplittingNewtonMethodsReturnType',
12 | ]
13 |
14 | # noinspection DuplicatedCode
15 | cdef class RootReturnType:
16 | def __init__(self,
17 | root=None,
18 | f_root=None,
19 | iters=0,
20 | f_calls=0,
21 | precision=math.NAN,
22 | error=math.NAN,
23 | converged=False,
24 | optimal=False):
25 | self.root = root
26 | self.f_root = f_root
27 | self.iters = iters
28 | self.f_calls = f_calls
29 | self.precision = precision
30 | self.error = error
31 | self.converged = converged
32 | self.optimal = optimal
33 |
34 | def __getitem__(self, i: int):
35 | if i < 0: i += 8
36 | if i == 0: return self.root
37 | if i == 1: return self.f_root
38 | if i == 2: return self.iters
39 | if i == 3: return self.f_calls
40 | if i == 4: return self.precision
41 | if i == 5: return self.error
42 | if i == 6: return self.converged
43 | if i == 7: return self.optimal
44 | raise IndexError('Index out of range.')
45 |
46 | def __repr__(self):
47 | return f'RootResults(root={self.root}, f_root={self.f_root}, ' \
48 | f'iters={self.iters}, f_calls={self.f_calls}, ' \
49 | f'precision={self.precision}, error={self.error}, ' \
50 | f'converged={self.converged}, optimal={self.optimal})'
51 |
52 | # noinspection DuplicatedCode
53 | cdef class BracketingMethodsReturnType(RootReturnType):
54 | def __init__(self,
55 | root=None,
56 | f_root=None,
57 | iters=0,
58 | f_calls=0,
59 | bracket=(),
60 | f_bracket=(),
61 | precision=math.NAN,
62 | error=math.NAN,
63 | converged=False,
64 | optimal=False):
65 | super().__init__(root, f_root, iters, f_calls, precision, error, converged, optimal)
66 | self.bracket = bracket
67 | self.f_bracket = f_bracket
68 |
69 | def __getitem__(self, i: int):
70 | if i < 0: i += 10
71 | if i == 0: return self.root
72 | if i == 1: return self.f_root
73 | if i == 2: return self.iters
74 | if i == 3: return self.f_calls
75 | if i == 4: return self.bracket
76 | if i == 5: return self.f_bracket
77 | if i == 6: return self.precision
78 | if i == 7: return self.error
79 | if i == 8: return self.converged
80 | if i == 9: return self.optimal
81 | raise IndexError('Index out of range.')
82 |
83 | def __repr__(self):
84 | return f'RootResults(root={self.root}, f_root={self.f_root}, ' \
85 | f'iters={self.iters}, f_calls={self.f_calls}, ' \
86 | f'bracket={self.bracket}, f_bracket={self.f_bracket}, ' \
87 | f'precision={self.precision}, error={self.error}, ' \
88 | f'converged={self.converged}, optimal={self.optimal})'
89 |
90 | # noinspection DuplicatedCode
91 | cdef class NewtonMethodsReturnType(RootReturnType):
92 | def __init__(self,
93 | root=None,
94 | f_root=None,
95 | df_root=(),
96 | iters=0,
97 | f_calls=0,
98 | precision=math.NAN,
99 | error=math.NAN,
100 | converged=False,
101 | optimal=False):
102 | super().__init__(root, f_root, iters, f_calls, precision, error, converged, optimal)
103 | self.df_root = df_root
104 |
105 | def __getitem__(self, i: int):
106 | if i < 0: i += 9
107 | if i == 0: return self.root
108 | if i == 1: return self.f_root
109 | if i == 2: return self.df_root
110 | if i == 3: return self.iters
111 | if i == 4: return self.f_calls
112 | if i == 5: return self.precision
113 | if i == 6: return self.error
114 | if i == 7: return self.converged
115 | if i == 8: return self.optimal
116 | raise IndexError('Index out of range.')
117 |
118 | def __repr__(self):
119 | return f'RootResults(root={self.root}, f_root={self.f_root}, df_root={self.df_root}, ' \
120 | f'iters={self.iters}, f_calls={self.f_calls}, ' \
121 | f'precision={self.precision}, error={self.error}, ' \
122 | f'converged={self.converged}, optimal={self.optimal})'
123 |
124 | # --------------------------------
125 | # Splitting Methods
126 | # --------------------------------
127 | # noinspection DuplicatedCode
128 | cdef class MultiRootsReturnType:
129 | def __init__(self,
130 | root=(),
131 | f_root=(),
132 | split_iters=0,
133 | iters=(),
134 | f_calls=0,
135 | precision=(),
136 | error=(),
137 | converged=(),
138 | optimal=()):
139 | self.root = root
140 | self.f_root = f_root
141 | self.split_iters = split_iters
142 | self.iters = iters
143 | self.f_calls = f_calls
144 | self.precision = precision
145 | self.error = error
146 | self.converged = converged
147 | self.optimal = optimal
148 |
149 | def __getitem__(self, i: int):
150 | if i < 0: i += 9
151 | if i == 0: return self.root
152 | if i == 1: return self.f_root
153 | if i == 2: return self.split_iters
154 | if i == 3: return self.iters
155 | if i == 4: return self.f_calls
156 | if i == 5: return self.precision
157 | if i == 6: return self.error
158 | if i == 7: return self.converged
159 | if i == 8: return self.optimal
160 | raise IndexError('Index out of range.')
161 |
162 | def __repr__(self):
163 | return f'RootResults(root={self.root}, f_root={self.f_root}, ' \
164 | f'split_iters={self.split_iters}, iters={self.iters}, f_calls={self.f_calls}, ' \
165 | f'precision={self.precision}, error={self.error}, ' \
166 | f'converged={self.converged}, optimal={self.optimal})'
167 |
168 | # noinspection DuplicatedCode
169 | cdef class SplittingBracketingMethodsReturnType(MultiRootsReturnType):
170 | def __init__(self,
171 | root=(),
172 | f_root=(),
173 | split_iters=0,
174 | iters=(),
175 | f_calls=0,
176 | bracket=(),
177 | f_bracket=(),
178 | precision=(),
179 | error=(),
180 | converged=(),
181 | optimal=()):
182 | super().__init__(root, f_root, split_iters, iters, f_calls, precision, error, converged, optimal)
183 | self.bracket = bracket
184 | self.f_bracket = f_bracket
185 |
186 | def __getitem__(self, i: int):
187 | if i < 0: i += 11
188 | if i == 0: return self.root
189 | if i == 1: return self.f_root
190 | if i == 2: return self.split_iters
191 | if i == 3: return self.iters
192 | if i == 4: return self.f_calls
193 | if i == 5: return self.bracket
194 | if i == 6: return self.f_bracket
195 | if i == 7: return self.precision
196 | if i == 8: return self.error
197 | if i == 9: return self.converged
198 | if i == 10: return self.optimal
199 | raise IndexError('Index out of range.')
200 |
201 | def __repr__(self):
202 | return f'RootResults(root={self.root}, f_root={self.f_root}, ' \
203 | f'split_iters={self.split_iters}, iters={self.iters}, f_calls={self.f_calls}, ' \
204 | f'bracket={self.bracket}, f_bracket={self.f_bracket}, ' \
205 | f'precision={self.precision}, error={self.error}, ' \
206 | f'converged={self.converged}, optimal={self.optimal})'
207 |
208 | # noinspection DuplicatedCode
209 | cdef class SplittingNewtonMethodsReturnType(MultiRootsReturnType):
210 | def __init__(self,
211 | root=(),
212 | f_root=(),
213 | df_root=(),
214 | split_iters=0,
215 | iters=(),
216 | f_calls=0,
217 | precision=(),
218 | error=(),
219 | converged=(),
220 | optimal=()):
221 | super().__init__(root, f_root, iters, f_calls, precision, error, converged, optimal)
222 | self.df_root = df_root
223 |
224 | def __getitem__(self, i: int):
225 | if i < 0: i += 10
226 | if i == 0: return self.root
227 | if i == 1: return self.f_root
228 | if i == 2: return self.df_root
229 | if i == 3: return self.split_iters
230 | if i == 4: return self.iters
231 | if i == 5: return self.f_calls
232 | if i == 6: return self.precision
233 | if i == 7: return self.error
234 | if i == 8: return self.converged
235 | if i == 9: return self.optimal
236 | raise IndexError('Index out of range.')
237 |
238 | def __repr__(self):
239 | return f'RootResults(root={self.root}, f_root={self.f_root}, df_root={self.df_root}, ' \
240 | f'iters={self.split_iters}, iters={self.iters}, f_calls={self.f_calls}, ' \
241 | f'precision={self.precision}, error={self.error}, ' \
242 | f'converged={self.converged}, optimal={self.optimal})'
243 |
--------------------------------------------------------------------------------
/cyroot/scalar_derivative_approximation.pxd:
--------------------------------------------------------------------------------
1 | from .fptr cimport DoubleScalarFPtr
2 |
3 | ################################################################################
4 | # Base Class
5 | ################################################################################
6 | cdef class DerivativeApproximation(DoubleScalarFPtr):
7 | cdef DoubleScalarFPtr f
8 | cpdef double eval(self, double x) except *
9 | cpdef double eval_with_f_val(self, double x, double f_x) except *
10 |
11 | ################################################################################
12 | # Finite Difference
13 | ################################################################################
14 | cdef double finite_difference_kernel(
15 | DoubleScalarFPtr f,
16 | double x,
17 | double f_x,
18 | double h=*,
19 | int order=*,
20 | int kind=*)
21 |
22 | cdef class FiniteDifference(DerivativeApproximation):
23 | cdef public int order, kind
24 | cdef public double h
25 | cpdef double eval(self, double x) except *
26 | cpdef double eval_with_f_val(self, double x, double f_x) except *
27 |
--------------------------------------------------------------------------------
/cyroot/scalar_derivative_approximation.pyx:
--------------------------------------------------------------------------------
1 | # distutils: language=c++
2 | # cython: cdivision = True
3 | # cython: initializedcheck = False
4 | # cython: boundscheck = False
5 | # cython: profile = False
6 |
7 | from typing import Callable, Sequence, Optional, Union
8 |
9 | import cython
10 |
11 | from ._types import VectorLike
12 | from .fptr cimport DoubleScalarFPtr, PyDoubleScalarFPtr
13 | from .ops cimport scalar_ops as sops
14 | from .utils._function_registering import register
15 |
16 | __all__ = [
17 | 'DerivativeApproximation',
18 | 'finite_difference',
19 | 'FiniteDifference',
20 | ]
21 |
22 | ################################################################################
23 | # Base Class
24 | ################################################################################
25 | # noinspection DuplicatedCode
26 | cdef class DerivativeApproximation(DoubleScalarFPtr):
27 | def __init__(self, f: Union[DoubleScalarFPtr, Callable[[float], float]]):
28 | if isinstance(f, DoubleScalarFPtr):
29 | self.f = f
30 | else:
31 | self.f = PyDoubleScalarFPtr(f)
32 |
33 | cpdef double eval(self, double x) except *:
34 | raise NotImplementedError
35 |
36 | cpdef double eval_with_f_val(self, double x, double f_x) except *:
37 | raise NotImplementedError
38 |
39 | ################################################################################
40 | # Finite Difference
41 | ################################################################################
42 | # noinspection DuplicatedCode
43 | cdef double finite_difference_kernel(
44 | DoubleScalarFPtr f,
45 | double x,
46 | double f_x,
47 | double h=1.,
48 | int order=1,
49 | int kind=0):
50 | cdef unsigned int i
51 | cdef double f_i, diff = 0.
52 | cdef int bin_coef, sgn = (-1) ** order if kind == 1 else 1
53 | for i in range(order + 1):
54 | bin_coef = sops.binomial_coef(order, i)
55 | if kind == 1: # forward
56 | f_i = f.eval(x + i * h) if i > 0 else f_x
57 | diff += sgn * bin_coef * f_i
58 | elif kind == -1: # backward
59 | f_i = f.eval(x - i * h) if i > 0 else f_x
60 | diff += sgn * bin_coef * f_i
61 | else: # central
62 | f_i = f.eval(x + ( order / 2 - i) * h) if 2 * i != order else f_x
63 | diff += sgn * bin_coef * f.eval(x + (order / 2 - i) * h)
64 | sgn = -sgn
65 | return diff / h ** order
66 |
67 | def _check_finite_difference_args(h: Union[float, VectorLike], order: int, kind: int):
68 | if (isinstance(h, float) and h == 0) or \
69 | (isinstance(h, Sequence) and any(h[i] == 0 for i in range(len(h)))):
70 | raise ValueError('h must be non-zero.')
71 | if order < 1:
72 | raise ValueError('order must be positive number.')
73 | if kind == 'forward':
74 | kind = 1
75 | elif kind == 'backward':
76 | kind = -1
77 | elif kind == 'central':
78 | kind = 0
79 | elif kind not in [-1, 0, 1]:
80 | raise ValueError('kind must be either 1/\'forward\', -1/\'backward\', '
81 | f'or 0\'central\'. Got {kind}.')
82 | return h, order, kind
83 |
84 | # noinspection DuplicatedCode
85 | cdef class FiniteDifference(DerivativeApproximation):
86 | """
87 | A class to wrap a function for derivative approximation.
88 | """
89 | def __init__(self,
90 | f: Union[DoubleScalarFPtr, Callable[[float], float]],
91 | h: float = 1.,
92 | order: int = 1,
93 | kind: Union[int, str] = 0):
94 | super().__init__(f)
95 | # check args
96 | h, order, kind = _check_finite_difference_args(h, order, kind)
97 | self.h = h
98 | self.order = order
99 | self.kind = kind
100 |
101 | cpdef double eval(self, double x) except *:
102 | self.n_f_calls += 1
103 | cdef double f_x = self.f.eval(x)
104 | return finite_difference_kernel(
105 | self.f, x, f_x, self.h, self.order, self.kind)
106 |
107 | cpdef double eval_with_f_val(self, double x, double f_x) except *:
108 | self.n_f_calls += 1
109 | return finite_difference_kernel(
110 | self.f, x, f_x, self.h, self.order, self.kind)
111 |
112 | # noinspection DuplicatedCode
113 | @register('cyroot.da.scalar')
114 | @cython.binding(True)
115 | def finite_difference(f: Callable[[float], float],
116 | x: float,
117 | f_x: Optional[float] = None,
118 | h: float = 1.,
119 | order: int = 1,
120 | kind: Union[int, str] = 0):
121 | """
122 | Finite difference method.
123 |
124 | Args:
125 | f (function): Function for which the derivative is sought.
126 | x (float): Point at which the derivative is sought.
127 | f_x (float, optional): Value evaluated at point ``x``.
128 | h (float): Finite difference step. Defaults to 1.
129 | order (int): Order of derivative to be estimated.
130 | Defaults to 1.
131 | kind (int, str): Type of finite difference, including ``1``
132 | for forward, ``-1`` for backward, and ``0`` for central.
133 | Defaults to 0.
134 |
135 | Returns:
136 | diff: Estimated derivative.
137 | """
138 | # check args
139 | h, order, kind = _check_finite_difference_args(h, order, kind)
140 |
141 | f_wrapper = PyDoubleScalarFPtr.from_f(f)
142 | if f_x is None:
143 | f_x = f_wrapper.eval(x)
144 | return finite_difference_kernel(f_wrapper, x, f_x, h, order, kind)
145 |
--------------------------------------------------------------------------------
/cyroot/scalar_quasi_newton.pyx:
--------------------------------------------------------------------------------
1 | # distutils: language=c++
2 | # cython: cdivision = True
3 | # cython: initializedcheck = False
4 | # cython: boundscheck = False
5 | # cython: profile = False
6 |
7 | from typing import Callable, Optional
8 |
9 | import cython
10 | import numpy as np
11 | cimport numpy as np
12 | from cython cimport view
13 | from dynamic_default_args import dynamic_default_args, named_default
14 | from libc cimport math
15 |
16 | from ._check_args cimport (
17 | _check_stop_cond_scalar_initial_guess,
18 | _check_stop_cond_scalar_initial_guesses,
19 | _check_stop_cond_complex_scalar_initial_guesses,
20 | )
21 | from ._check_args import (
22 | _check_stop_cond_args,
23 | _check_initial_guesses_uniqueness,
24 | _check_initial_vals_uniqueness,
25 | )
26 | from ._defaults import ETOL, ERTOL, PTOL, PRTOL, MAX_ITER
27 | from ._types import VectorLike
28 | from .fptr cimport (
29 | DoubleScalarFPtr, PyDoubleScalarFPtr,
30 | ComplexScalarFPtr, PyComplexScalarFPtr
31 | )
32 | from .ops cimport scalar_ops as sops, vector_ops as vops
33 | from .return_types cimport RootReturnType
34 | from .utils._function_registering import register
35 |
36 | __all__ = [
37 | 'secant',
38 | 'sidi',
39 | 'steffensen',
40 | 'inverse_quadratic_interp',
41 | 'hyperbolic_interp',
42 | 'muller',
43 | ]
44 |
45 | ################################################################################
46 | # Secant
47 | ################################################################################
48 | # noinspection DuplicatedCode
49 | cdef RootReturnType secant_kernel(
50 | DoubleScalarFPtr f,
51 | double x0,
52 | double x1,
53 | double f_x0,
54 | double f_x1,
55 | double etol=ETOL,
56 | double ertol=ERTOL,
57 | double ptol=PTOL,
58 | double prtol=PRTOL,
59 | unsigned long max_iter=MAX_ITER):
60 | cdef unsigned long step = 0
61 | cdef double r, f_r, precision, error
62 | cdef bint converged, optimal
63 | cdef double[2] xs = [x0, x1], f_xs = [f_x0, f_x1]
64 | if _check_stop_cond_scalar_initial_guesses(xs, f_xs, etol, ertol, ptol, prtol,
65 | &r, &f_r, &precision, &error, &converged, &optimal):
66 | return RootReturnType(r, f_r, step, f.n_f_calls, precision, error, converged, optimal)
67 |
68 | cdef double x2, df_01
69 | converged = True
70 | while not (sops.isclose(0, error, ertol, etol) or
71 | sops.isclose(0, precision, prtol, ptol)):
72 | if step >= max_iter > 0:
73 | converged = False
74 | break
75 | step += 1
76 | df_01 = f_x0 - f_x1
77 | if df_01 == 0:
78 | converged = False
79 | break
80 | x2 = x0 - f_x0 * (x0 - x1) / df_01
81 | x0, f_x0 = x1, f_x1
82 | x1, f_x1 = x2, f.eval(x2)
83 |
84 | precision = math.fabs(x1 - x0)
85 | error = math.fabs(f_x1)
86 |
87 | r, f_r = x1, f_x1
88 | optimal = sops.isclose(0, error, ertol, etol)
89 | return RootReturnType(r, f_r, step, f.n_f_calls, precision, error, converged, optimal)
90 |
91 | # noinspection DuplicatedCode
92 | @register('cyroot.scalar.quasi_newton')
93 | @dynamic_default_args()
94 | @cython.binding(True)
95 | def secant(f: Callable[[float], float],
96 | x0: float,
97 | x1: float,
98 | f_x0: Optional[float] = None,
99 | f_x1: Optional[float] = None,
100 | etol: float = named_default(ETOL=ETOL),
101 | ertol: float = named_default(ERTOL=ERTOL),
102 | ptol: float = named_default(PTOL=PTOL),
103 | prtol: float = named_default(PRTOL=PRTOL),
104 | max_iter: int = named_default(MAX_ITER=MAX_ITER)) -> RootReturnType:
105 | """
106 | Secant method for scalar root-finding.
107 |
108 | Args:
109 | f (function): Function for which the root is sought.
110 | x0 (float): First initial point.
111 | x1 (float): Second initial point.
112 | f_x0 (float, optional): Value evaluated at first initial point.
113 | f_x1 (float, optional): Value evaluated at second initial point.
114 | etol (float, optional): Error tolerance, indicating the
115 | desired precision of the root. Defaults to {etol}.
116 | ertol (float, optional): Relative error tolerance.
117 | Defaults to {ertol}.
118 | ptol (float, optional): Precision tolerance, indicating
119 | the minimum change of root approximations or width of
120 | brackets (in bracketing methods) after each iteration.
121 | Defaults to {ptol}.
122 | prtol (float, optional): Relative precision tolerance.
123 | Defaults to {prtol}.
124 | max_iter (int, optional): Maximum number of iterations.
125 | If set to 0, the procedure will run indefinitely until
126 | stopping condition is met. Defaults to {max_iter}.
127 |
128 | Returns:
129 | solution: The solution represented as a ``RootResults`` object.
130 | """
131 | # check params
132 | etol, ertol, ptol, prtol, max_iter = _check_stop_cond_args(etol, ertol, ptol, prtol, max_iter)
133 | _check_initial_guesses_uniqueness((x0, x1))
134 |
135 | f_wrapper = PyDoubleScalarFPtr.from_f(f)
136 | if f_x0 is None:
137 | f_x0 = f_wrapper.eval(x0)
138 | if f_x1 is None:
139 | f_x1 = f_wrapper.eval(x1)
140 | _check_initial_vals_uniqueness((f_x0, f_x1))
141 |
142 | res = secant_kernel(f_wrapper, x0, x1, f_x0, f_x1,
143 | etol, ertol, ptol, prtol, max_iter)
144 | return res
145 |
146 | ################################################################################
147 | # Sidi
148 | ################################################################################
149 | # noinspection DuplicatedCode
150 | cdef RootReturnType sidi_kernel(
151 | DoubleScalarFPtr f,
152 | double[:] x0s,
153 | double[:] f_x0s,
154 | double etol=ETOL,
155 | double ertol=ERTOL,
156 | double ptol=PTOL,
157 | double prtol=PRTOL,
158 | unsigned long max_iter=MAX_ITER):
159 | cdef unsigned long step = 0
160 | cdef double r, f_r, precision, error
161 | cdef bint converged, optimal
162 | if _check_stop_cond_scalar_initial_guesses(x0s, f_x0s, etol, ertol, ptol, prtol,
163 | &r, &f_r, &precision, &error, &converged, &optimal):
164 | return RootReturnType(r, f_r, step, f.n_f_calls, precision, error, converged, optimal)
165 |
166 | # sort by error of f
167 | cdef unsigned long[:] inds = vops.argsort(vops.fabs(f_x0s), reverse= True)
168 | cdef double[:] xs = vops.permute(x0s, inds)
169 | cdef double[:] f_xs = vops.permute(f_x0s, inds)
170 |
171 | cdef double xn, f_xn, dp_xn
172 | cdef double[:] dfs = view.array(shape=(1 + 1,), itemsize=sizeof(double), format='d')
173 | cdef NewtonPolynomial poly = NewtonPolynomial(x0s.shape[0])
174 | converged = True
175 | while not (sops.isclose(0, error, ertol, etol) or
176 | sops.isclose(0, precision, prtol, ptol)):
177 | if step >= max_iter > 0:
178 | converged = False
179 | break
180 | step += 1
181 | poly.update(xs, f_xs)
182 |
183 | dp_xn = poly.dnf(xs[-1], 1, dfs)
184 | if dp_xn == 0:
185 | converged = False
186 | break
187 | xn = xs[-1] - f_xs[-1] / dp_xn
188 | f_xn = f.eval(xn)
189 | # remove x0 and add xn
190 | xs[:-1] = xs[1:]
191 | xs[-1] = xn
192 | f_xs[:-1] = f_xs[1:]
193 | f_xs[-1] = f_xn
194 |
195 | precision = vops.width(xs)
196 | error = math.fabs(f_xn)
197 |
198 | r, f_r = xn, f_xn
199 | optimal = sops.isclose(0, error, ertol, etol)
200 | return RootReturnType(r, f_r, step, f.n_f_calls, precision, error, converged, optimal)
201 |
202 | cdef class NewtonPolynomial:
203 | cdef unsigned int n
204 | cdef double[:] x, a
205 | cdef double[:, :] DD
206 |
207 | def __cinit__(self, unsigned int n):
208 | self.n = n
209 | self.x = view.array(shape=(self.n - 1,),
210 | itemsize=sizeof(double),
211 | format='d')
212 | self.a = view.array(shape=(self.n,),
213 | itemsize=sizeof(double),
214 | format='d')
215 |
216 | self.DD = view.array(shape=(self.n, self.n),
217 | itemsize=sizeof(double),
218 | format='d')
219 |
220 | cdef void update(self, double[:] xs, double[:] ys) nogil:
221 | self.x[:] = xs[:-1]
222 |
223 | cdef unsigned int i, j
224 | # Fill in divided differences
225 | self.DD[:, 0] = ys
226 | for j in range(1, self.n):
227 | self.DD[:j, j] = 0
228 | for i in range(j, self.n):
229 | self.DD[i, j] = (self.DD[i, j - 1] - self.DD[i - 1, j - 1]) / (xs[i] - xs[i - j])
230 | # Copy diagonal elements into array for returning
231 | for j in range(self.n):
232 | self.a[j] = self.DD[j, j]
233 |
234 | def __call__(self, double x):
235 | return self.f(x)
236 |
237 | cdef inline double f(self, double x) nogil:
238 | cdef double f_x = self.a[-1]
239 | cdef unsigned int k
240 | for k in range(self.n - 2, -1, -1):
241 | f_x = f_x * (x - self.x[k]) + self.a[k]
242 | return f_x
243 |
244 | cdef inline double df(self, double x, double[:] out) nogil:
245 | return self.dnf(x, 1, out)
246 |
247 | cdef inline double dnf(self, double x, int order, double[:] out) nogil:
248 | out[0] = self.a[-1]
249 | out[1:] = 0
250 | cdef unsigned int i, k
251 | cdef double v
252 | for k in range(self.n - 2, -1, -1):
253 | v = x - self.x[k]
254 | for i in range(order, 0, -1):
255 | out[i] = out[i] * v + out[i - 1]
256 | out[0] = out[0] * v + self.a[k]
257 | return out[-1]
258 |
259 | # noinspection DuplicatedCode
260 | @register('cyroot.scalar.quasi_newton')
261 | @dynamic_default_args()
262 | @cython.binding(True)
263 | def sidi(f: Callable[[float], float],
264 | x0s: VectorLike,
265 | f_x0s: Optional[VectorLike] = None,
266 | etol: float = named_default(ETOL=ETOL),
267 | ertol: float = named_default(ERTOL=ERTOL),
268 | ptol: float = named_default(PTOL=PTOL),
269 | prtol: float = named_default(PRTOL=PRTOL),
270 | max_iter: int = named_default(MAX_ITER=MAX_ITER)) -> RootReturnType:
271 | """
272 | Sidi's Generalized Secant method for scalar root-finding.
273 |
274 | Args:
275 | f (function): Function for which the root is sought.
276 | x0s (tuple of float): Tuple of initial points.
277 | f_x0s (tuple of float, optional): Tuple of values evaluated at initial points.
278 | etol (float, optional): Error tolerance, indicating the
279 | desired precision of the root. Defaults to {etol}.
280 | ertol (float, optional): Relative error tolerance.
281 | Defaults to {ertol}.
282 | ptol (float, optional): Precision tolerance, indicating
283 | the minimum change of root approximations or width of
284 | brackets (in bracketing methods) after each iteration.
285 | Defaults to {ptol}.
286 | prtol (float, optional): Relative precision tolerance.
287 | Defaults to {prtol}.
288 | max_iter (int, optional): Maximum number of iterations.
289 | If set to 0, the procedure will run indefinitely until
290 | stopping condition is met. Defaults to {max_iter}.
291 |
292 | Returns:
293 | solution: The solution represented as a ``RootResults`` object.
294 | """
295 | # check params
296 | etol, ertol, ptol, prtol, max_iter = _check_stop_cond_args(etol, ertol, ptol, prtol, max_iter)
297 |
298 | x0s = np.asarray(x0s, dtype=np.float64)
299 | if x0s.shape[0] < 2:
300 | raise ValueError('Requires at least 2 initial guesses. '
301 | f'Got {x0s.shape[0]}.')
302 | _check_initial_guesses_uniqueness(x0s)
303 |
304 | f_wrapper = PyDoubleScalarFPtr.from_f(f)
305 | if f_x0s is None:
306 | f_x0s = np.array([f_wrapper.eval(x) for x in x0s], dtype=np.float64)
307 | else:
308 | f_x0s = np.asarray(f_x0s, dtype=np.float64)
309 | if x0s.shape[0] != f_x0s.shape[0]:
310 | raise ValueError('xs and f_xs must have same size. '
311 | f'Got {x0s.shape[0]} and {f_x0s.shape[0]}.')
312 | _check_initial_guesses_uniqueness(f_x0s)
313 |
314 | res = sidi_kernel(f_wrapper, x0s, f_x0s, etol, ertol, ptol, prtol, max_iter)
315 | return res
316 |
317 | ################################################################################
318 | # Steffensen
319 | ################################################################################
320 | # noinspection DuplicatedCode
321 | cdef RootReturnType steffensen_kernel(
322 | DoubleScalarFPtr f,
323 | double x0,
324 | double f_x0,
325 | bint aitken=True,
326 | double etol=ETOL,
327 | double ertol=ERTOL,
328 | double ptol=PTOL,
329 | double prtol=PRTOL,
330 | unsigned long max_iter=MAX_ITER):
331 | cdef unsigned long step = 0
332 | cdef double precision, error
333 | cdef bint converged, optimal
334 | if _check_stop_cond_scalar_initial_guess(x0, f_x0, etol, ertol, ptol, prtol,
335 | &precision, &error, &converged, &optimal):
336 | return RootReturnType(x0, f_x0, step, f.n_f_calls, precision, error, converged, optimal)
337 |
338 | cdef double x1, x2, x3, denom
339 | converged = True
340 | while not (sops.isclose(0, error, ertol, etol) or
341 | sops.isclose(0, precision, prtol, ptol)):
342 | if step >= max_iter > 0:
343 | converged = False
344 | break
345 | step += 1
346 | x1 = x0 + f_x0
347 | x2 = x1 + f.eval(x1)
348 | denom = x2 - 2 * x1 + x0
349 | if denom == 0:
350 | converged = False
351 | break
352 | # Use Aitken's delta-squared method to find a better approximation
353 | if aitken:
354 | x3 = x0 - (x1 - x0) ** 2 / denom
355 | else:
356 | x3 = x2 - (x2 - x1) ** 2 / denom
357 | precision = math.fabs(x3 - x0)
358 | x0, f_x0 = x3, f.eval(x3)
359 | error = math.fabs(f_x0)
360 |
361 | optimal = sops.isclose(0, error, ertol, etol)
362 | return RootReturnType(x0, f_x0, step, f.n_f_calls, precision, error, converged, optimal)
363 |
364 | # noinspection DuplicatedCode
365 | @register('cyroot.scalar.quasi_newton')
366 | @dynamic_default_args()
367 | @cython.binding(True)
368 | def steffensen(f: Callable[[float], float],
369 | x0: float,
370 | f_x0: Optional[float] = None,
371 | aitken: bool = True,
372 | etol: float = named_default(ETOL=ETOL),
373 | ertol: float = named_default(ERTOL=ERTOL),
374 | ptol: float = named_default(PTOL=PTOL),
375 | prtol: float = named_default(PRTOL=PRTOL),
376 | max_iter: int = named_default(MAX_ITER=MAX_ITER)) -> RootReturnType:
377 | """
378 | Steffensen's method for scalar root-finding.
379 |
380 | Args:
381 | f (function): Function for which the root is sought.
382 | x0 (float): First initial point.
383 | f_x0 (float, optional): Value evaluated at first initial point.
384 | aitken (bool, optional): Use Aitken's delta-squared process or not.
385 | Defaults to True.
386 | etol (float, optional): Error tolerance, indicating the
387 | desired precision of the root. Defaults to {etol}.
388 | ertol (float, optional): Relative error tolerance.
389 | Defaults to {ertol}.
390 | ptol (float, optional): Precision tolerance, indicating
391 | the minimum change of root approximations or width of
392 | brackets (in bracketing methods) after each iteration.
393 | Defaults to {ptol}.
394 | prtol (float, optional): Relative precision tolerance.
395 | Defaults to {prtol}.
396 | max_iter (int, optional): Maximum number of iterations.
397 | If set to 0, the procedure will run indefinitely until
398 | stopping condition is met. Defaults to {max_iter}.
399 |
400 | Returns:
401 | solution: The solution represented as a ``RootResults`` object.
402 | """
403 | # check params
404 | etol, ertol, ptol, prtol, max_iter = _check_stop_cond_args(etol, ertol, ptol, prtol, max_iter)
405 |
406 | f_wrapper = PyDoubleScalarFPtr.from_f(f)
407 | if f_x0 is None:
408 | f_x0 = f_wrapper.eval(x0)
409 |
410 | res = steffensen_kernel(f_wrapper, x0, f_x0, aitken,
411 | etol, ertol, ptol, prtol, max_iter)
412 | return res
413 |
414 | ################################################################################
415 | # Inverse Quadratic Interpolation
416 | ################################################################################
417 | # noinspection DuplicatedCode
418 | cdef RootReturnType inverse_quadratic_interp_kernel(
419 | DoubleScalarFPtr f,
420 | double x0,
421 | double x1,
422 | double x2,
423 | double f_x0,
424 | double f_x1,
425 | double f_x2,
426 | double etol=ETOL,
427 | double ertol=ERTOL,
428 | double ptol=PTOL,
429 | double prtol=PRTOL,
430 | unsigned long max_iter=MAX_ITER):
431 | cdef unsigned long step = 0
432 | cdef double r, f_r, precision, error
433 | cdef bint converged, optimal
434 | cdef double[3] x_arr = [x0, x1, x2], f_arr = [f_x0, f_x1, f_x2]
435 | cdef double[:] xs = x_arr, f_xs = f_arr
436 | if _check_stop_cond_scalar_initial_guesses(xs, f_xs, etol, ertol, ptol, prtol,
437 | &r, &f_r, &precision, &error, &converged, &optimal):
438 | return RootReturnType(r, f_r, step, f.n_f_calls, precision, error, converged, optimal)
439 |
440 | cdef double x3, df_01, df_02, df_12
441 | converged = True
442 | while not (sops.isclose(0, error, ertol, etol) or
443 | sops.isclose(0, precision, prtol, ptol)):
444 | if step >= max_iter > 0:
445 | converged = False
446 | break
447 | step += 1
448 | df_01 = f_xs[0] - f_xs[1]
449 | df_02 = f_xs[0] - f_xs[2]
450 | df_12 = f_xs[1] - f_xs[2]
451 | if df_01 == 0 or df_02 == 0 or df_12 == 0:
452 | converged = False
453 | break
454 | x3 = (xs[0] * f_xs[1] * f_xs[2] / (df_01 * df_02)
455 | + xs[1] * f_xs[0] * f_xs[2] / (-df_01 * df_12)
456 | + xs[2] * f_xs[0] * f_xs[1] / (df_02 * df_12))
457 | xs[0], f_xs[0] = xs[1], f_xs[1]
458 | xs[1], f_xs[1] = xs[2], f_xs[2]
459 | xs[2], f_xs[2] = x3, f.eval(x3)
460 |
461 | precision = vops.width(xs)
462 | error = math.fabs(f_xs[2])
463 |
464 | r, f_r = xs[2], f_xs[2]
465 | optimal = sops.isclose(0, error, ertol, etol)
466 | return RootReturnType(r, f_r, step, f.n_f_calls, precision, error, converged, optimal)
467 |
468 | # noinspection DuplicatedCode
469 | @register('cyroot.scalar.quasi_newton')
470 | @dynamic_default_args()
471 | @cython.binding(True)
472 | def inverse_quadratic_interp(
473 | f: Callable[[float], float],
474 | x0: float,
475 | x1: float,
476 | x2: float,
477 | f_x0: Optional[float] = None,
478 | f_x1: Optional[float] = None,
479 | f_x2: Optional[float] = None,
480 | etol: float = named_default(ETOL=ETOL),
481 | ertol: float = named_default(ERTOL=ERTOL),
482 | ptol: float = named_default(PTOL=PTOL),
483 | prtol: float = named_default(PRTOL=PRTOL),
484 | max_iter: int = named_default(MAX_ITER=MAX_ITER)) -> RootReturnType:
485 | """
486 | Inverse Quadratic Interpolation method for scalar root-finding.
487 |
488 | Args:
489 | f (function): Function for which the root is sought.
490 | x0 (float): First initial point.
491 | x1 (float): Second initial point.
492 | x2 (float): Third initial point.
493 | f_x0 (float, optional): Value evaluated at first initial point.
494 | f_x1 (float, optional): Value evaluated at second initial point.
495 | f_x2 (float, optional): Value evaluated at third initial point.
496 | etol (float, optional): Error tolerance, indicating the
497 | desired precision of the root. Defaults to {etol}.
498 | ertol (float, optional): Relative error tolerance.
499 | Defaults to {ertol}.
500 | ptol (float, optional): Precision tolerance, indicating
501 | the minimum change of root approximations or width of
502 | brackets (in bracketing methods) after each iteration.
503 | Defaults to {ptol}.
504 | prtol (float, optional): Relative precision tolerance.
505 | Defaults to {prtol}.
506 | max_iter (int, optional): Maximum number of iterations.
507 | If set to 0, the procedure will run indefinitely until
508 | stopping condition is met. Defaults to {max_iter}.
509 |
510 | Returns:
511 | solution: The solution represented as a ``RootResults`` object.
512 | """
513 | # check params
514 | etol, ertol, ptol, prtol, max_iter = _check_stop_cond_args(etol, ertol, ptol, prtol, max_iter)
515 | _check_initial_guesses_uniqueness((x0, x1, x2))
516 |
517 | f_wrapper = PyDoubleScalarFPtr.from_f(f)
518 | if f_x0 is None:
519 | f_x0 = f_wrapper.eval(x0)
520 | if f_x1 is None:
521 | f_x1 = f_wrapper.eval(x1)
522 | if f_x2 is None:
523 | f_x2 = f_wrapper.eval(x2)
524 | _check_initial_vals_uniqueness((f_x0, f_x1, f_x2))
525 |
526 | res = inverse_quadratic_interp_kernel(f_wrapper, x0, x1, x2, f_x0, f_x1, f_x2,
527 | etol, ertol, ptol, prtol, max_iter)
528 | return res
529 |
530 | ################################################################################
531 | # Hyperbolic Interpolation
532 | ################################################################################
533 | # noinspection DuplicatedCode
534 | cdef RootReturnType hyperbolic_interp_kernel(
535 | DoubleScalarFPtr f,
536 | double x0,
537 | double x1,
538 | double x2,
539 | double f_x0,
540 | double f_x1,
541 | double f_x2,
542 | double etol=ETOL,
543 | double ertol=ERTOL,
544 | double ptol=PTOL,
545 | double prtol=PRTOL,
546 | unsigned long max_iter=MAX_ITER):
547 | cdef unsigned long step = 0
548 | cdef double r, f_r, precision, error
549 | cdef bint converged, optimal
550 | cdef double[3] x_arr = [x0, x1, x2], f_arr = [f_x0, f_x1, f_x2]
551 | cdef double[:] xs = x_arr, f_xs = f_arr
552 | if _check_stop_cond_scalar_initial_guesses(xs, f_xs, etol, ertol, ptol, prtol,
553 | &r, &f_r, &precision, &error, &converged, &optimal):
554 | return RootReturnType(r, f_r, step, f.n_f_calls, precision, error, converged, optimal)
555 |
556 | cdef double x3, d_01, d_12, df_01, df_02, df_12
557 | converged = True
558 | while not (sops.isclose(0, error, ertol, etol) or
559 | sops.isclose(0, precision, prtol, ptol)):
560 | if step >= max_iter > 0:
561 | converged = False
562 | break
563 | step += 1
564 |
565 | d_01 = xs[0] - xs[1]
566 | d_12 = xs[1] - xs[2]
567 | df_01 = f_xs[0] - f_xs[1]
568 | df_02 = f_xs[0] - f_xs[2]
569 | df_12 = f_xs[1] - f_xs[2]
570 | if d_01 == 0 or d_12 == 0:
571 | converged = False
572 | break
573 | denom = f_xs[0] * df_12 / d_12 - f_xs[2] * df_01 / d_01
574 | if denom == 0:
575 | converged = False
576 | break
577 | x3 = xs[1] - f_xs[1] * df_02 / denom
578 | xs[0], f_xs[0] = xs[1], f_xs[1]
579 | xs[1], f_xs[1] = xs[2], f_xs[2]
580 | xs[2], f_xs[2] = x3, f.eval(x3)
581 |
582 | precision = vops.width(xs)
583 | error = math.fabs(f_xs[2])
584 |
585 | r, f_r = xs[2], f_xs[2]
586 | optimal = sops.isclose(0, error, ertol, etol)
587 | return RootReturnType(r, f_r, step, f.n_f_calls, precision, error, converged, optimal)
588 |
589 | # noinspection DuplicatedCode
590 | @register('cyroot.scalar.quasi_newton')
591 | @dynamic_default_args()
592 | @cython.binding(True)
593 | def hyperbolic_interp(
594 | f: Callable[[float], float],
595 | x0: float,
596 | x1: float,
597 | x2: float,
598 | f_x0: Optional[float] = None,
599 | f_x1: Optional[float] = None,
600 | f_x2: Optional[float] = None,
601 | etol: float = named_default(ETOL=ETOL),
602 | ertol: float = named_default(ERTOL=ERTOL),
603 | ptol: float = named_default(PTOL=PTOL),
604 | prtol: float = named_default(PRTOL=PRTOL),
605 | max_iter: int = named_default(MAX_ITER=MAX_ITER)) -> RootReturnType:
606 | """
607 | Hyperbolic Interpolation method for scalar root-finding.
608 |
609 | Args:
610 | f (function): Function for which the root is sought.
611 | x0 (float): First initial point.
612 | x1 (float): Second initial point.
613 | x2 (float): Third initial point.
614 | f_x0 (float, optional): Value evaluated at first initial point.
615 | f_x1 (float, optional): Value evaluated at second initial point.
616 | f_x2 (float, optional): Value evaluated at third initial point.
617 | etol (float, optional): Error tolerance, indicating the
618 | desired precision of the root. Defaults to {etol}.
619 | ertol (float, optional): Relative error tolerance.
620 | Defaults to {ertol}.
621 | ptol (float, optional): Precision tolerance, indicating
622 | the minimum change of root approximations or width of
623 | brackets (in bracketing methods) after each iteration.
624 | Defaults to {ptol}.
625 | prtol (float, optional): Relative precision tolerance.
626 | Defaults to {prtol}.
627 | max_iter (int, optional): Maximum number of iterations.
628 | If set to 0, the procedure will run indefinitely until
629 | stopping condition is met. Defaults to {max_iter}.
630 |
631 | Returns:
632 | solution: The solution represented as a ``RootResults`` object.
633 | """
634 | # check params
635 | etol, ertol, ptol, prtol, max_iter = _check_stop_cond_args(etol, ertol, ptol, prtol, max_iter)
636 | _check_initial_guesses_uniqueness((x0, x1, x2))
637 |
638 | f_wrapper = PyDoubleScalarFPtr.from_f(f)
639 | if f_x0 is None:
640 | f_x0 = f_wrapper.eval(x0)
641 | if f_x1 is None:
642 | f_x1 = f_wrapper.eval(x1)
643 | if f_x2 is None:
644 | f_x2 = f_wrapper.eval(x2)
645 | _check_initial_vals_uniqueness((f_x0, f_x1, f_x2))
646 |
647 | res = hyperbolic_interp_kernel(f_wrapper, x0, x1, x2, f_x0, f_x1, f_x2,
648 | etol, ertol, ptol, prtol, max_iter)
649 | return res
650 |
651 | ################################################################################
652 | # Muller
653 | ################################################################################
654 | # noinspection DuplicatedCode
655 | cdef RootReturnType muller_kernel(
656 | ComplexScalarFPtr f,
657 | double complex x0,
658 | double complex x1,
659 | double complex x2,
660 | double complex f_x0,
661 | double complex f_x1,
662 | double complex f_x2,
663 | double etol=ETOL,
664 | double ertol=ERTOL,
665 | double ptol=PTOL,
666 | double prtol=PRTOL,
667 | unsigned long max_iter=MAX_ITER):
668 | cdef unsigned long step = 0
669 | cdef double complex r, f_r
670 | cdef double precision, error
671 | cdef bint converged, optimal
672 | cdef double complex[3] x_arr = [x0, x1, x2], f_arr = [f_x0, f_x1, f_x2]
673 | cdef double complex[:] xs = x_arr, f_xs = f_arr
674 | if _check_stop_cond_complex_scalar_initial_guesses(xs, f_xs, etol, ertol, ptol, prtol,
675 | &r, &f_r, &precision, &error, &converged, &optimal):
676 | return RootReturnType(r, f_r, step, f.n_f_calls, precision, error, converged, optimal)
677 |
678 | cdef double complex div_diff_01, div_diff_12, div_diff_02, a, b, s_delta, d1, d2, d, x3
679 | cdef double complex d_01, d_02, d_12
680 | converged = True
681 | while not (sops.isclose(0, error, ertol, etol) or
682 | sops.isclose(0, precision, prtol, ptol)):
683 | if step >= max_iter > 0:
684 | converged = False
685 | break
686 | step += 1
687 |
688 | d_01 = xs[0] - xs[1]
689 | d_02 = xs[0] - xs[2]
690 | d_12 = xs[1] - xs[2]
691 | if d_01 == 0 or d_02 == 0 or d_12 == 0:
692 | converged = False
693 | break
694 |
695 | div_diff_01 = (f_xs[0] - f_xs[1]) / d_01
696 | div_diff_02 = (f_xs[0] - f_xs[2]) / d_02
697 | div_diff_12 = (f_xs[1] - f_xs[2]) / d_12
698 | b = div_diff_01 + div_diff_02 - div_diff_12
699 | a = (div_diff_01 - div_diff_12) / d_02
700 | s_delta = sops.csqrt(b ** 2 - 4 * a * f_xs[2]) # \sqrt{b^2 - 4ac}
701 | d1, d2 = b + s_delta, b - s_delta
702 | # take the higher-magnitude denominator
703 | d = d1 if sops.cabs(d1) > sops.cabs(d2) else d2
704 |
705 | x3 = xs[2] - 2 * f_xs[2] / d
706 | xs[0], f_xs[0] = xs[1], f_xs[1]
707 | xs[1], f_xs[1] = xs[2], f_xs[2]
708 | xs[2], f_xs[2] = x3, f.eval(x3)
709 |
710 | precision = vops.cwidth(xs)
711 | error = sops.cabs(f_xs[2])
712 |
713 | r, f_r = xs[2], f_xs[2]
714 | optimal = sops.isclose(0, error, ertol, etol)
715 | return RootReturnType(r, f_r, step, f.n_f_calls, precision, error, converged, optimal)
716 |
717 | # noinspection DuplicatedCode
718 | @register('cyroot.scalar.quasi_newton')
719 | @dynamic_default_args()
720 | @cython.binding(True)
721 | def muller(f: Callable[[complex], complex],
722 | x0: complex,
723 | x1: complex,
724 | x2: complex,
725 | f_x0: Optional[complex] = None,
726 | f_x1: Optional[complex] = None,
727 | f_x2: Optional[complex] = None,
728 | etol: float = named_default(ETOL=ETOL),
729 | ertol: float = named_default(ERTOL=ERTOL),
730 | ptol: float = named_default(PTOL=PTOL),
731 | prtol: float = named_default(PRTOL=PRTOL),
732 | max_iter: int = named_default(MAX_ITER=MAX_ITER)) -> RootReturnType:
733 | """
734 | Muller's method for scalar root-finding.
735 |
736 | References:
737 | https://www.ams.org/journals/mcom/1956-10-056/S0025-5718-1956-0083822-0/
738 |
739 | Args:
740 | f (function): Function for which the root is sought.
741 | x0 (complex): First initial point.
742 | x1 (complex): Second initial point.
743 | x2 (complex): Third initial point.
744 | f_x0 (complex, optional): Value evaluated at first
745 | initial point.
746 | f_x1 (complex, optional): Value evaluated at second
747 | initial point.
748 | f_x2 (complex, optional): Value evaluated at third
749 | initial point.
750 | etol (float, optional): Error tolerance, indicating the
751 | desired precision of the root. Defaults to {etol}.
752 | ertol (float, optional): Relative error tolerance.
753 | Defaults to {ertol}.
754 | ptol (float, optional): Precision tolerance, indicating
755 | the minimum change of root approximations or width of
756 | brackets (in bracketing methods) after each iteration.
757 | Defaults to {ptol}.
758 | prtol (float, optional): Relative precision tolerance.
759 | Defaults to {prtol}.
760 | max_iter (int, optional): Maximum number of iterations.
761 | If set to 0, the procedure will run indefinitely until
762 | stopping condition is met. Defaults to {max_iter}.
763 |
764 | Returns:
765 | solution: The solution represented as a ``RootResults`` object.
766 | """
767 | # check params
768 | etol, ertol, ptol, prtol, max_iter = _check_stop_cond_args(etol, ertol, ptol, prtol, max_iter)
769 | _check_initial_guesses_uniqueness((x0, x1, x2))
770 |
771 | f_wrapper = PyComplexScalarFPtr.from_f(f)
772 | if f_x0 is None:
773 | f_x0 = f_wrapper.eval(x0)
774 | if f_x1 is None:
775 | f_x1 = f_wrapper.eval(x1)
776 | if f_x2 is None:
777 | f_x2 = f_wrapper.eval(x2)
778 | _check_initial_vals_uniqueness((f_x0, f_x1, f_x2))
779 |
780 | res = muller_kernel(f_wrapper, x0, x1, x2, f_x0, f_x1, f_x2,
781 | etol, ertol, ptol, prtol, max_iter)
782 | return res
783 |
--------------------------------------------------------------------------------
/cyroot/scalar_root.py:
--------------------------------------------------------------------------------
1 | from inspect import getmembers
2 | from functools import partial
3 |
4 | from . import scalar_bracketing, scalar_quasi_newton, scalar_newton
5 | from .utils._function_registering import is_tagged_with_any_startswith
6 |
7 | __all__ = [
8 | 'SCALAR_ROOT_FINDING_METHODS',
9 | 'find_scalar_root',
10 | ]
11 |
12 | # noinspection DuplicatedCode
13 | SCALAR_ROOT_FINDING_METHODS = {}
14 | for module in [scalar_bracketing, scalar_quasi_newton, scalar_newton]:
15 | SCALAR_ROOT_FINDING_METHODS.update(
16 | getmembers(module, partial(is_tagged_with_any_startswith, start='cyroot.scalar')))
17 |
18 |
19 | # noinspection DuplicatedCode
20 | def find_scalar_root(method: str, *args, **kwargs):
21 | """
22 | Find the root of a scalar function.
23 |
24 | Args:
25 | method (str): Name of the method. A full list of supported
26 | methods is stored in ``SCALAR_ROOT_FINDING_METHODS``.
27 | *args: Extra arguments to be passed.
28 | **kwargs: Extra keyword arguments to be passed.
29 |
30 | Returns:
31 | solution: The solution represented as a ``RootResults`` object.
32 | """
33 | if method in SCALAR_ROOT_FINDING_METHODS.keys():
34 | return SCALAR_ROOT_FINDING_METHODS[method](*args, **kwargs)
35 | else:
36 | raise ValueError(f'No implementation for {str(method)} found. '
37 | f'Supported methods are: {", ".join(SCALAR_ROOT_FINDING_METHODS.keys())}')
38 |
--------------------------------------------------------------------------------
/cyroot/utils/__init__.pxd:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/inspiros/cy-root/7150aad4e83ff45037694517641746600b508ea5/cyroot/utils/__init__.pxd
--------------------------------------------------------------------------------
/cyroot/utils/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/inspiros/cy-root/7150aad4e83ff45037694517641746600b508ea5/cyroot/utils/__init__.py
--------------------------------------------------------------------------------
/cyroot/utils/_function_registering.py:
--------------------------------------------------------------------------------
1 | from functools import wraps
2 | from typing import Callable, Any
3 |
4 | __all__ = [
5 | 'register',
6 | 'is_tagged',
7 | 'is_tagged_with_all',
8 | 'is_tagged_with_any_startswith',
9 | 'is_tagged_with_any_startswith_any',
10 | 'is_tagged_with_any_endswith',
11 | 'is_tagged_with_any_endswith_any',
12 | ]
13 |
14 |
15 | def register(*tags: str, wrap: bool = False):
16 | """This decorator is used to add tag to functions."""
17 |
18 | def decorator(f: Callable):
19 | if wrap:
20 | @wraps(f)
21 | def wrapper(*args, **kwargs):
22 | return f(*args, **kwargs)
23 | else:
24 | wrapper = f
25 | if not hasattr(wrapper, '__tags__'):
26 | wrapper.__tags__ = set()
27 | for tag in tags:
28 | wrapper.__tags__.add(tag)
29 |
30 | return wrapper
31 |
32 | return decorator
33 |
34 |
35 | def is_tagged(f: Any) -> bool:
36 | return hasattr(f, '__tags__')
37 |
38 |
39 | def is_tagged_with_all(f: Any, *tags: str) -> bool:
40 | return hasattr(f, '__tags__') and all(tag in f.__tags__ for tag in tags)
41 |
42 |
43 | def is_tagged_with_any(f: Any, *tags: str) -> bool:
44 | return hasattr(f, '__tags__') and any(tag in f.__tags__ for tag in tags)
45 |
46 |
47 | def is_tagged_with_any_startswith(f: Any, start: str) -> bool:
48 | return hasattr(f, '__tags__') and any(tag.startswith(start) for tag in f.__tags__)
49 |
50 |
51 | def is_tagged_with_any_startswith_any(f: Any, *starts: str) -> bool:
52 | return hasattr(f, '__tags__') and any(any(tag.startswith(_ for _ in starts))
53 | for tag in f.__tags__)
54 |
55 |
56 | def is_tagged_with_any_endswith(f: Any, end: str) -> bool:
57 | return hasattr(f, '__tags__') and any(tag.endswith(end) for tag in f.__tags__)
58 |
59 |
60 | def is_tagged_with_any_endswith_any(f: Any, *ends: str) -> bool:
61 | return hasattr(f, '__tags__') and any(any(tag.endswith(_ for _ in ends))
62 | for tag in f.__tags__)
63 |
--------------------------------------------------------------------------------
/cyroot/utils/_function_registering.pyi:
--------------------------------------------------------------------------------
1 | from typing import Any
2 |
3 |
4 | def _empty_decorator(f):
5 | return f
6 |
7 |
8 | register = _empty_decorator
9 |
10 |
11 | def is_tagged(f: Any) -> bool:
12 | ...
13 |
14 |
15 | def is_tagged_with_all(f: Any, *tags: str) -> bool:
16 | ...
17 |
18 |
19 | def is_tagged_with_any(f: Any, *tags: str) -> bool:
20 | ...
21 |
22 |
23 | def is_tagged_with_any_startswith(f: Any, start: str) -> bool:
24 | ...
25 |
26 |
27 | def is_tagged_with_any_startswith_any(f: Any, *starts: str) -> bool:
28 | ...
29 |
30 |
31 | def is_tagged_with_any_endswith(f: Any, end: str) -> bool:
32 | ...
33 |
34 |
35 | def is_tagged_with_any_endswith_any(f: Any, *ends: str) -> bool:
36 | ...
37 |
--------------------------------------------------------------------------------
/cyroot/utils/_warnings.py:
--------------------------------------------------------------------------------
1 | import warnings
2 |
3 | __all__ = [
4 | 'ValueWarning',
5 | 'warn_value',
6 | 'set_value_warning_filter',
7 | ]
8 |
9 |
10 | class ValueWarning(UserWarning):
11 | pass
12 |
13 |
14 | warnings.simplefilter('always', ValueWarning)
15 |
16 |
17 | def warn_value(message, filename=None, lineno=None):
18 | if filename is not None and lineno is not None:
19 | warnings.warn_explicit(message, ValueWarning, filename, lineno, module=None)
20 | else:
21 | warnings.warn(message, ValueWarning, stacklevel=2)
22 |
23 |
24 | def set_value_warning_filter(action: str = 'always',
25 | lineno: int = 0,
26 | append: bool = False):
27 | """
28 | Add value warning filter.
29 |
30 | Args:
31 | action (str): one of ``'error'``, ``'ignore'``, ``'always'``,
32 | ``'default'``, ``'module'``, or ``'once'``. Defaults to ``'always'``.
33 | lineno (int): an integer line number, 0 matches all warnings.
34 | Defaults to 0.
35 | append (bool): if True, append to the list of filters. Defaults to False.
36 |
37 | See Also:
38 | ``warnings.simplefilter``
39 | """
40 | warnings.simplefilter(action, ValueWarning, lineno, append)
41 |
--------------------------------------------------------------------------------
/cyroot/utils/itertools.pxd:
--------------------------------------------------------------------------------
1 | cpdef unsigned int[:, :] product(unsigned int[:] ns)
2 |
--------------------------------------------------------------------------------
/cyroot/utils/itertools.pyx:
--------------------------------------------------------------------------------
1 | # distutils: language=c++
2 | # cython: cdivision = True
3 | # cython: initializedcheck = False
4 | # cython: boundscheck = False
5 | # cython: profile = False
6 |
7 | from cython cimport view
8 | import numpy as np
9 | cimport numpy as np
10 |
11 | from ..ops cimport vector_ops
12 |
13 | __all__ = [
14 | 'product',
15 | ]
16 |
17 | cdef inline void product_kernel(
18 | unsigned int[:] ns,
19 | unsigned int[:, :] out,
20 | unsigned int d=0) nogil:
21 | if d >= ns.shape[0]:
22 | return
23 | cdef unsigned int n = ns[d]
24 | cdef unsigned int step = vector_ops.prod[np.uint32_t](ns[d + 1:])
25 | cdef unsigned int i
26 | for i in range(out.shape[0]):
27 | out[i, d] = (i / step) % n
28 | product_kernel(ns, out, d + 1)
29 |
30 | cpdef unsigned int[:, :] product(unsigned int[:] ns):
31 | cdef np.ndarray[np.uint32_t, ndim=2] empty = np.empty((0, 0), dtype=np.uint32)
32 | cdef unsigned int i
33 | for i in range(ns.shape[0]):
34 | if ns[i] == 0:
35 | return empty
36 |
37 | cdef unsigned int[:, :] out = view.array(
38 | shape=(vector_ops.prod[np.uint32_t](ns), ns.shape[0]),
39 | itemsize=sizeof(int),
40 | format='I')
41 | product_kernel(ns, out, 0)
42 | return out
43 |
--------------------------------------------------------------------------------
/cyroot/vector_bracketing.pyx:
--------------------------------------------------------------------------------
1 | # distutils: language=c++
2 | # cython: cdivision = True
3 | # cython: initializedcheck = False
4 | # cython: boundscheck = False
5 | # cython: profile = False
6 |
7 | from typing import Callable, Optional, Union, Tuple
8 |
9 | import cython
10 | import numpy as np
11 | cimport numpy as np
12 | from cython cimport view
13 | from dynamic_default_args import dynamic_default_args, named_default
14 | from libc cimport math
15 |
16 | from .scalar_bracketing import bisect
17 | from ._check_args import (
18 | _check_stop_cond_args,
19 | )
20 | from ._check_args cimport _check_stop_cond_vector_bracket
21 | from ._defaults import ETOL, ERTOL, PTOL, PRTOL, MAX_ITER
22 | from ._types import VectorLike, Array2DLike
23 | from .fptr cimport NdArrayFPtr, PyNdArrayFPtr
24 | from .ops cimport scalar_ops as sops, vector_ops as vops
25 | from .return_types cimport BracketingMethodsReturnType
26 | from .utils._function_registering import register
27 | from .utils._warnings import warn_value
28 |
29 | __all__ = [
30 | 'vrahatis',
31 | ]
32 |
33 | ################################################################################
34 | # Bisection
35 | ################################################################################
36 | #------------------------
37 | # Vrahatis
38 | #------------------------
39 | # noinspection DuplicatedCode
40 | cdef BracketingMethodsReturnType vrahatis_kernel(
41 | NdArrayFPtr F,
42 | np.ndarray[np.float64_t, ndim=2] x0s,
43 | np.ndarray[np.float64_t, ndim=2] F_x0s,
44 | np.ndarray[np.float64_t, ndim=2] S_x0s,
45 | double etol=ETOL,
46 | double ertol=ERTOL,
47 | double ptol=PTOL,
48 | double prtol=PRTOL,
49 | unsigned long max_iter=MAX_ITER):
50 | cdef unsigned int d_in = x0s.shape[1], d_out = F_x0s.shape[1]
51 | cdef unsigned long step = 0
52 | cdef double precision, error
53 | cdef np.ndarray[np.float64_t, ndim=1] r = np.empty(d_in, dtype=np.float64)
54 | cdef np.ndarray[np.float64_t, ndim=1] F_r = np.empty(d_out, dtype=np.float64)
55 | cdef bint converged, optimal
56 | if _check_stop_cond_vector_bracket(x0s, F_x0s, etol, ertol, ptol, prtol,
57 | r, F_r, &precision, &error, &converged, &optimal):
58 | return BracketingMethodsReturnType(
59 | r, F_r, step, F.n_f_calls, x0s, F_x0s, precision, error, converged, optimal)
60 |
61 | cdef unsigned long[:, :] vertices = _get_1_simplexes(S_x0s)
62 | cdef np.ndarray[np.float64_t, ndim=1] S_r = np.empty(d_out, dtype=np.float64)
63 | cdef unsigned long i, p0, p1
64 | cdef long r_id = -1
65 | converged = True
66 | while not (sops.isclose(0, error, ertol, etol) or
67 | sops.isclose(0, precision, prtol, ptol)):
68 | if step >= max_iter > 0:
69 | converged = False
70 | break
71 | step += 1
72 | for i in range(vertices.shape[0]):
73 | if vertices[i, 2] == 1:
74 | p0 = vertices[i, 0]
75 | p1 = vertices[i, 1]
76 | r = (x0s[p0] + x0s[p1]) / 2
77 | F_r[:] = F.eval(r)
78 | S_r[:] = np.sign(F_r)
79 | if np.array_equal(S_r, S_x0s[p0]):
80 | x0s[p0] = r
81 | F_x0s[p0] = F_r
82 | r_id = p0
83 | else:
84 | x0s[p1] = r
85 | F_x0s[p1] = F_r
86 | r_id = p1
87 | error = vops.max(vops.fabs(F_r))
88 | precision = vops.max(x0s.max(0) - x0s.min(0))
89 | if not sops.isclose(0, error, ertol, etol):
90 | r_id = -1
91 | else:
92 | break
93 | if r_id >= 0:
94 | break
95 |
96 | if r_id >= 0:
97 | # return the vertex with small enough error
98 | optimal = sops.isclose(0, error, ertol, etol)
99 | return BracketingMethodsReturnType(
100 | x0s[r_id], F_x0s[r_id], step, F.n_f_calls, x0s, F_x0s, precision, error, converged, optimal)
101 | elif step == 0:
102 | # if the precision tol is satisfied without running into the loop,
103 | # just return the vertex with the smallest error
104 | optimal = sops.isclose(0, error, ertol, etol)
105 | return BracketingMethodsReturnType(
106 | r, F_r, step, F.n_f_calls, x0s, F_x0s, precision, error, converged, optimal)
107 | # otherwise, find the diagonal with the longest length
108 | cdef unsigned long best_i
109 | cdef double longest_len_squared = -math.INFINITY, diag_len_squared
110 | for i in range(vertices.shape[0]):
111 | if vertices[i, 2] == d_in - 1:
112 | diag_len_squared = np.power(x0s[vertices[i, 0]] - x0s[vertices[i, 1]], 2).sum()
113 | if diag_len_squared > longest_len_squared:
114 | best_i = i
115 | longest_len_squared = diag_len_squared
116 | r = (x0s[vertices[best_i, 0]] + x0s[vertices[best_i, 1]]) / 2
117 | F_r = F.eval(r)
118 |
119 | error = vops.max(vops.fabs(F_r))
120 | optimal = sops.isclose(0, error, ertol, etol)
121 | return BracketingMethodsReturnType(
122 | r, F_r, step, F.n_f_calls, x0s, F_x0s, precision, error, converged, optimal)
123 |
124 | # noinspection DuplicatedCode
125 | cpdef unsigned long[:, :] _get_1_simplexes(double[:, :] S):
126 | cdef unsigned long[:, :] vertices = view.array(
127 | shape=(S.shape[0] * (S.shape[0] - 1) // 2, 3), itemsize=sizeof(long), format='L')
128 | vertices[:, 2] = 0
129 | cdef unsigned int v_id = 0, i, j, k
130 | with nogil:
131 | for i in range(S.shape[0]):
132 | for j in range(i + 1, S.shape[0]):
133 | vertices[v_id, 0] = i
134 | vertices[v_id, 1] = j
135 | for k in range(S.shape[1]):
136 | vertices[v_id, 2] += S[i, k] == S[j, k]
137 | v_id += 1
138 | return vertices
139 |
140 | # noinspection DuplicatedCode
141 | cpdef np.ndarray[np.float64_t, ndim=2] get_M(unsigned int n,
142 | bint sign=False):
143 | cdef np.ndarray[np.float64_t, ndim=2] M = np.empty((2 ** n, n), dtype=np.float64)
144 | cdef unsigned int i, j, rate = 1
145 | for j in range( M.shape[1] - 1, -1, -1):
146 | for i in range(M.shape[0]):
147 | M[i, j] = (i // rate) % 2
148 | rate *= 2
149 | if sign:
150 | M[M == 0] = -1
151 | return M
152 |
153 | # noinspection DuplicatedCode
154 | def compute_admissible_n_polygon(
155 | F: Callable[[VectorLike], VectorLike],
156 | x: VectorLike,
157 | h: Optional[Union[VectorLike, float]] = None,
158 | eps: float=1e-5) -> Tuple[np.ndarray, np.ndarray]:
159 | """
160 | Find an admissible n-polygon from an initial point.
161 |
162 | Args:
163 | F (function): Function for which the root is sought.
164 | x (np.ndarray): Initial point.
165 | h (np.ndarray, optional): Search direction.
166 | eps: (float): ``etol`` for internal bisection.
167 |
168 | Returns:
169 | V (np.ndarray): Vertices of the n-polygon.
170 | S (np.ndarray): Signs of ``V``.
171 | """
172 | if eps <= 0:
173 | raise ValueError(f'eps must be positive. Got {eps}.')
174 | if isinstance(F, NdArrayFPtr):
175 | F = F.eval
176 |
177 | x = np.asarray(x, dtype=np.float64).reshape(-1)
178 | F_x = F(x)
179 | d_in, d_out = x.shape[0], F_x.shape[0]
180 | n = 2 ** d_out
181 |
182 | if h is None:
183 | h = np.full(d_in, np.abs(x).mean(), dtype=np.float64)
184 | elif isinstance(h, np.ndarray):
185 | h = h.astype(np.float64).reshape(-1)
186 | if h.shape[0] != d_in:
187 | raise ValueError('h must be of the same dimension as x. '
188 | f'Got d_in={d_in}, h.shape={h.shape[0]}.')
189 | else:
190 | h = np.full(d_in, h, dtype=np.float64)
191 |
192 | E = np.tile(x, [2 ** d_in, 1])
193 | cdef M = get_M(d_in)
194 | V = E + M.dot(np.diag(h))
195 | F_V = np.empty((V.shape[0], d_out), dtype=np.float64)
196 | F_V[0] = F_x
197 | for i in range(1, F_V.shape[0]):
198 | F_V[i] = F(V[i])
199 | S = np.sign(F_V)
200 |
201 | if np.unique(S, axis=0).shape[0] == n:
202 | if d_in == d_out:
203 | return V, S
204 | else:
205 | V_out = np.empty((n, d_in), dtype=np.float64)
206 | S_out = np.empty((n, d_out), dtype=np.float64)
207 | V_out[0] = V[0]
208 | S_out[0] = S[0]
209 | k = 1
210 | for i in range(V.shape[0]):
211 | if not np.equal(S[i], S_out[:k]).all(1).any():
212 | V_out[k] = V[i]
213 | S_out[k] = S[i]
214 | k += 1
215 | if k == n:
216 | return V_out, S_out
217 |
218 | vertices = _get_1_simplexes(M)
219 | mask1 = np.ones(V.shape[0], dtype=np.bool_)
220 | mask2 = np.ones(V.shape[0], dtype=np.bool_)
221 | for i in range(vertices.shape[0]):
222 | if vertices[i, 2] != 1:
223 | continue
224 | pair = vertices[i, :2]
225 | simplex = V[pair]
226 | signs = S[pair]
227 |
228 | mask1[:] = True
229 | mask2[:] = True
230 | mask1[pair[0]] = False
231 | mask2[pair[1]] = False
232 | if not (np.equal(signs[0], S[mask1]).all(1).any() and
233 | np.equal(signs[1], S[mask2]).all(1).any()):
234 | continue
235 |
236 | coef = simplex[1] - simplex[0]
237 | intercept = simplex[0]
238 | length = np.linalg.norm(coef)
239 | if length == 0:
240 | continue
241 |
242 | for j in range(d_out):
243 | res = bisect(lambda r: F(r * coef + intercept)[j], 0, 1,
244 | algo=2, etol=eps, ertol=0, ptol=0, prtol=0, max_iter=0)
245 | if res.optimal: # found an intersection between simplex and F_j
246 | step = 2 * eps / length
247 | new_simplex = np.empty_like(simplex)
248 | new_simplex[0] = max(0, res.root - step) * coef + intercept
249 | new_simplex[1] = min(1, res.root + step) * coef + intercept
250 | new_signs = np.empty_like(signs)
251 | new_signs[0] = np.sign(F(new_simplex[0]))
252 | new_signs[1] = np.sign(F(new_simplex[1]))
253 | # new_signs[1, j] = -new_signs[0, j]
254 |
255 | if np.array_equal(signs, new_signs):
256 | continue
257 | elif (np.equal(new_signs[0], S[mask1]).all(1).any() and
258 | np.equal(new_signs[1], S[mask2]).all(1).any()):
259 | continue
260 |
261 | V[pair] = new_simplex
262 | S[pair] = new_signs
263 | if np.unique(S, axis=0).shape[0] == n:
264 | if d_in == d_out:
265 | return V, S
266 | else:
267 | V_out = np.empty((n, d_in), dtype=np.float64)
268 | S_out = np.empty((n, d_out), dtype=np.float64)
269 | V_out[0] = V[0]
270 | S_out[0] = S[0]
271 | k = 1
272 | for i in range(V.shape[0]):
273 | if not np.equal(S[i], S_out[:k]).all(1).any():
274 | V_out[k] = V[i]
275 | S_out[k] = S[i]
276 | k += 1
277 | if k == n:
278 | return V_out, S_out
279 | raise ValueError('Unable to find an admissible n-polygon. '
280 | 'Try to modify initial point x or search direction h. '
281 | 'Best unique signs:\n' + repr(np.unique(S)))
282 |
283 | # noinspection DuplicatedCode
284 | def sorted_by_vertices(*mats, S):
285 | sorted_inds = np.lexsort(
286 | tuple(S[:, j] for j in range(S.shape[1] - 1, -1, -1)))
287 | return *(m[sorted_inds] for m in mats), S[sorted_inds]
288 |
289 | # noinspection DuplicatedCode
290 | @register('cyroot.vector.bracketing')
291 | @dynamic_default_args()
292 | @cython.binding(True)
293 | def vrahatis(F: Callable[[VectorLike], VectorLike],
294 | x0s: Union[Array2DLike, VectorLike],
295 | F_x0s: Optional[Array2DLike] = None,
296 | h: Optional[Union[VectorLike, float]] = None,
297 | etol: float = named_default(ETOL=ETOL),
298 | ertol: float = named_default(ERTOL=ERTOL),
299 | ptol: float = named_default(PTOL=PTOL),
300 | prtol: float = named_default(PRTOL=PRTOL),
301 | max_iter: int = named_default(MAX_ITER=MAX_ITER)) -> BracketingMethodsReturnType:
302 | """
303 | Vrahatis's Generalized Bisection method for vector root-finding.
304 | This method requires the initial brackets to form an admissible
305 | n-polygon, which is an n-sided polygon with the sign of the function
306 | ``F`` on each of its corner is unique.
307 |
308 | The parameter ``h`` can be used to set a search direction to construct
309 | the admissible n-polygon from a single input guess following the
310 | algorithm proposed in the original paper. However, it is usually not
311 | very helpful.
312 |
313 | Another shortcoming is that the initial search region should be
314 | well divided into n parts by sign of the function ``F``. This means
315 | the bracket should be close enough for complex functions.
316 | Otherwise, the convergence is not guaranteed.
317 |
318 | References:
319 | https://doi.org/10.1007/BF01389620
320 |
321 | Args:
322 | F (function): Function for which the root is sought.
323 | x0s (np.ndarray): Initial bounds or a single initial point.
324 | F_x0s (np.ndarray, optional): Values evaluated at initial
325 | bounds.
326 | h (np.ndarray, optional): Search direction used to construct
327 | the admissible n-polygon from a single initial point.
328 | etol (float, optional): Error tolerance, indicating the
329 | desired precision of the root. Defaults to {etol}.
330 | ertol (float, optional): Relative error tolerance.
331 | Defaults to {ertol}.
332 | ptol (float, optional): Precision tolerance, indicating
333 | the minimum change of root approximations or width of
334 | brackets (in bracketing methods) after each iteration.
335 | Defaults to {ptol}.
336 | prtol (float, optional): Relative precision tolerance.
337 | Defaults to {prtol}.
338 | max_iter (int, optional): Maximum number of iterations.
339 | If set to 0, the procedure will run indefinitely until
340 | stopping condition is met. Defaults to {max_iter}.
341 |
342 | Returns:
343 | solution: The solution represented as a ``RootResults`` object.
344 | """
345 | # check params
346 | etol, ertol, ptol, prtol, max_iter = _check_stop_cond_args(etol, ertol, ptol, prtol, max_iter)
347 |
348 | x0s = np.asarray(x0s, dtype=np.float64)
349 |
350 | F_wrapper = PyNdArrayFPtr.from_f(F)
351 | if x0s.ndim == 1 or x0s.shape[0] == 1:
352 | x0s, S_x0s = compute_admissible_n_polygon(F_wrapper, x0s.reshape(-1), h)
353 | elif x0s.ndim != 2:
354 | raise ValueError('Initial bounds must be 2D array. '
355 | f'Got {x0s.shape}.')
356 |
357 | if F_x0s is None:
358 | F_x0s = np.stack([F_wrapper.eval(x0s[i]) for i in range(x0s.shape[0])])
359 | else:
360 | F_x0s = np.asarray(F_x0s, dtype=np.float64)
361 | if F_x0s.ndim != 2 or F_x0s.shape[0] != x0s.shape[0]:
362 | raise ValueError('x0s and F_x0s must have same length.')
363 | if x0s.shape[0] != 2 ** F_x0s.shape[1]:
364 | raise ValueError('Initial bounds do not form an admissible n-polygon. '
365 | f'Expected {2 ** F_x0s.shape[1]} points, got {x0s.shape[0]}.')
366 |
367 | if x0s.shape[1] < F_x0s.shape[1]:
368 | warn_value('Input dimension is smaller than output dimension. '
369 | f'Got n={x0s.shape[1]}, m={F_x0s.shape[1]}.')
370 |
371 | S_x0s = np.sign(F_x0s)
372 | if np.unique(S_x0s, axis=0).shape[0] != x0s.shape[0]:
373 | raise ValueError('Initial bounds do not form an admissible n-polygon. '
374 | f'Got signs:\n' + repr(S_x0s))
375 |
376 | # sort by order of M
377 | x0s, F_x0s, S_x0s = sorted_by_vertices(x0s, F_x0s, S=S_x0s)
378 | res = vrahatis_kernel(
379 | F_wrapper, x0s, F_x0s, S_x0s, etol, ertol, ptol, prtol, max_iter)
380 | return res
381 |
382 | #------------------------
383 | # Eiger-Sikorski-Stenger
384 | #------------------------
385 | # TODO: Add Eiger-Sikorski-Stenger' Bisection method using simplex
386 | # as presented in https://dl.acm.org/doi/10.1145/2701.2705.
387 |
--------------------------------------------------------------------------------
/cyroot/vector_derivative_approximation.pxd:
--------------------------------------------------------------------------------
1 | cimport numpy as np
2 |
3 | from .fptr cimport NdArrayFPtr
4 |
5 | ################################################################################
6 | # Base Class
7 | ################################################################################
8 | cdef class VectorDerivativeApproximation(NdArrayFPtr):
9 | cdef NdArrayFPtr F
10 | cpdef np.ndarray eval(self, np.ndarray x)
11 | cpdef np.ndarray eval_with_f_val(self, np.ndarray x, np.ndarray F_x)
12 |
13 | ################################################################################
14 | # Finite Difference
15 | ################################################################################
16 | cdef np.ndarray generalized_finite_difference_kernel(
17 | NdArrayFPtr F,
18 | np.ndarray[np.float64_t, ndim=1] x,
19 | np.ndarray[np.float64_t, ndim=1] F_x,
20 | np.ndarray[np.float64_t, ndim=1] h,
21 | int order=*,
22 | int kind=*)
23 |
24 | cdef class GeneralizedFiniteDifference(VectorDerivativeApproximation):
25 | cdef public int order, kind
26 | cdef public np.ndarray h
27 | cpdef np.ndarray eval(self, np.ndarray x)
28 | cpdef np.ndarray eval_with_f_val(self, np.ndarray x, np.ndarray F_x)
29 |
--------------------------------------------------------------------------------
/cyroot/vector_derivative_approximation.pyx:
--------------------------------------------------------------------------------
1 | # distutils: language=c++
2 | # cython: cdivision = True
3 | # cython: initializedcheck = False
4 | # cython: boundscheck = False
5 | # cython: profile = False
6 |
7 | import array
8 | from typing import Callable, Optional, Union
9 |
10 | import numpy as np
11 | cimport numpy as np
12 | import cython
13 | from cpython cimport array
14 | from cython cimport view
15 | from libcpp.algorithm cimport sort
16 | from libcpp.vector cimport vector
17 |
18 | from ._types import VectorLike, ArrayLike
19 | from .fptr cimport NdArrayFPtr, PyNdArrayFPtr
20 | from .ops cimport scalar_ops as sops, vector_ops as vops
21 | from .scalar_derivative_approximation import _check_finite_difference_args
22 | from .utils._function_registering import register
23 | from .utils.itertools cimport product
24 |
25 | __all__ = [
26 | 'VectorDerivativeApproximation',
27 | 'generalized_finite_difference',
28 | 'GeneralizedFiniteDifference',
29 | ]
30 |
31 | ################################################################################
32 | # Base Class
33 | ################################################################################
34 | # noinspection DuplicatedCode
35 | cdef class VectorDerivativeApproximation(NdArrayFPtr):
36 | """
37 | A class to wrap a vector function for derivative approximation.
38 | """
39 | def __init__(self, F: Union[NdArrayFPtr, Callable[[VectorLike], VectorLike]]):
40 | if isinstance(F, NdArrayFPtr):
41 | self.F = F
42 | else:
43 | self.F = PyNdArrayFPtr(F)
44 |
45 | cpdef np.ndarray eval(self, np.ndarray x):
46 | raise NotImplementedError
47 |
48 | cpdef np.ndarray eval_with_f_val(self, np.ndarray x, np.ndarray F_x):
49 | raise NotImplementedError
50 |
51 | ################################################################################
52 | # Finite Difference
53 | ################################################################################
54 | # noinspection DuplicatedCode
55 | cdef unsigned int[:, :] _vector_derivative_indices(unsigned int[:] ns, bint[:] unique_mask):
56 | cdef unsigned int[:, :] inds = product(ns)
57 | unique_mask[:] = True
58 | cdef unsigned int i, j, k
59 | with nogil:
60 | for i in range(inds.shape[0]):
61 | for j in range(inds.shape[1] - 1):
62 | for k in range(j + 1, inds.shape[1]):
63 | if inds[i, j] > inds[i, k]:
64 | unique_mask[i] = False
65 | break
66 | if not unique_mask[i]:
67 | break
68 | return inds
69 |
70 | cdef np.ndarray[np.uint32_t, ndim=2] _vector_perturbation_steps(unsigned int dim, int order):
71 | cdef unsigned int[:] ns = view.array(shape=(dim,),
72 | itemsize=sizeof(int),
73 | format='I')
74 | ns[:] = order + 1
75 | cdef unsigned int[:, :] inds = product(ns)
76 | cdef array.array perturb_inds = array.array('I')
77 | for i in range(inds.shape[0]):
78 | steps = inds[i]
79 | if sum(steps) <= order:
80 | perturb_inds.append(i)
81 | return np.ascontiguousarray(np.asarray(inds, dtype=np.uint32)[perturb_inds])
82 |
83 | cdef int[:] _finite_difference_coefs(int order, int kind):
84 | cdef int[:] out = view.array(shape=(order + 1,),
85 | itemsize=sizeof(int),
86 | format='i')
87 | cdef long i
88 | if kind == 1:
89 | for i in range(order + 1):
90 | out[i] = (-1) ** (order - i) * sops.binomial_coef(order, i)
91 | else:
92 | for i in range(order + 1):
93 | out[i] = (-1) ** i * sops.binomial_coef(order, i)
94 | return out
95 |
96 | cdef unsigned int[:] _index_to_grad_comb(unsigned int[:] index, unsigned int dim):
97 | cdef unsigned int[:] comb = view.array(shape=(dim,),
98 | itemsize=sizeof(int),
99 | format='I')
100 | comb[:] = 0
101 | cdef unsigned int i
102 | for i in range(index.shape[0]):
103 | comb[index[i]] += 1
104 | return comb
105 |
106 | # noinspection DuplicatedCode
107 | cdef np.ndarray generalized_finite_difference_kernel(
108 | NdArrayFPtr F,
109 | np.ndarray[np.float64_t, ndim=1] x,
110 | np.ndarray[np.float64_t, ndim=1] F_x,
111 | np.ndarray[np.float64_t, ndim=1] h,
112 | int order=1,
113 | int kind=0):
114 | cdef unsigned int i, j, k, ii, eq_ii
115 | cdef unsigned int[:] dims = view.array(shape=(order + 1,),
116 | itemsize=sizeof(int),
117 | format='I')
118 | dims[0] = F_x.shape[0]
119 | dims[1:] = x.shape[0]
120 | cdef np.ndarray[np.float64_t, ndim=2] D = np.zeros(dims, dtype=np.float64).reshape(F_x.shape[0], -1)
121 |
122 | cdef unsigned int[:, :] indices
123 | cdef unsigned int[:] index
124 | cdef bint[:] unique_mask = view.array(shape=(vops.prod[np.uint32_t](dims[1:]),),
125 | itemsize=sizeof(int),
126 | format='i')
127 | indices = _vector_derivative_indices(dims[1:], unique_mask)
128 |
129 | cdef bint zero_step
130 | cdef np.ndarray[np.uint32_t, ndim=2] perturbation_steps = _vector_perturbation_steps(x.shape[0], order)
131 | cdef np.ndarray[np.float64_t, ndim=1] perturbation_step
132 | cdef np.ndarray[np.float64_t, ndim=2] F_perturbations = np.empty(
133 | (perturbation_steps.shape[0], F_x.shape[0]), dtype=np.float64)
134 | for i in range(perturbation_steps.shape[0]):
135 | if kind == 1:
136 | perturbation_step = perturbation_steps[i]
137 | elif kind == -1:
138 | perturbation_step = -perturbation_steps[i]
139 | else:
140 | perturbation_step = order / 2. - perturbation_steps[i]
141 |
142 | if kind != 0:
143 | if i == 0:
144 | F_perturbations[0] = F_x
145 | else:
146 | F_perturbations[i] = F.eval(x + perturbation_step * h)
147 | else:
148 | zero_step = True
149 | for j in range(perturbation_step.shape[0]):
150 | if perturbation_step[j] != 0:
151 | zero_step = False
152 | break
153 | if zero_step:
154 | F_perturbations[i] = F_x
155 | else:
156 | F_perturbations[i] = F.eval(x + perturbation_step * h)
157 |
158 | cdef double scale
159 | cdef int coef
160 | cdef int[:] coefs
161 | cdef vector[vector[int]] all_coefs = vector[vector[int]](x.shape[0])
162 | for j in range(all_coefs.size()):
163 | all_coefs[j].reserve(order + 1)
164 | cdef unsigned int[:] grad_comb, coefs_lens = view.array(shape=(x.shape[0],),
165 | itemsize=sizeof(int),
166 | format='I')
167 | cdef unsigned int[:, :] perturbs
168 | cdef unsigned int[:] perturb
169 | for ii in range(indices.shape[0]):
170 | if unique_mask[ii]:
171 | index = indices[ii]
172 | grad_comb = _index_to_grad_comb(index, x.shape[0])
173 | scale = 1
174 | for j in range(x.shape[0]):
175 | scale *= h[j] ** grad_comb[j]
176 | all_coefs[j].clear()
177 | coefs = _finite_difference_coefs(grad_comb[j], kind)
178 | for k in range(coefs.shape[0]):
179 | all_coefs[j].push_back(coefs[k])
180 | coefs_lens[j] = all_coefs[j].size()
181 | perturbs = product(coefs_lens)
182 | for j in range(perturbs.shape[0]):
183 | perturb = perturbs[j]
184 | coef = 1
185 | for k in range(all_coefs.size()):
186 | coef *= all_coefs[k][perturb[k]]
187 | for k in range(perturbation_steps.shape[0]):
188 | if vops.equal[np.uint32_t](perturbation_steps[k], perturb):
189 | break
190 | D[:, ii] += coef * F_perturbations[k] / scale
191 |
192 | cdef unsigned int[:] eq_index = view.array(shape=(indices.shape[1],),
193 | itemsize=sizeof(int),
194 | format='I')
195 | for ii in range(indices.shape[0]): # repeated indices
196 | if not unique_mask[ii]:
197 | eq_index[:] = indices[ii]
198 | sort(&eq_index[0], (&eq_index[0]) + eq_index.shape[0])
199 | eq_ii = 0
200 | for i in range(order - 1, -1, -1):
201 | eq_ii += eq_index[i] * x.shape[0] ** (order - 1 - i)
202 | D[:, ii] = D[:, eq_ii]
203 | return D.reshape(dims)
204 |
205 | # noinspection DuplicatedCode
206 | cdef class GeneralizedFiniteDifference(VectorDerivativeApproximation):
207 | def __init__(self,
208 | F: Union[NdArrayFPtr, Callable[[VectorLike], VectorLike]],
209 | h: Union[float, VectorLike] = 1.,
210 | order: int = 1,
211 | kind: Union[int, str] = 0):
212 | super().__init__(F)
213 | # check args
214 | h, order, kind = _check_finite_difference_args(h, order, kind)
215 | if isinstance(h, float):
216 | self.h = np.full(1, h)
217 | else:
218 | self.h = np.asarray(h, dtype=np.float64)
219 | self.order = order
220 | self.kind = kind
221 |
222 | cpdef np.ndarray eval(self, np.ndarray x):
223 | self.n_f_calls += 1
224 | cdef np.ndarray[np.float64_t, ndim=1] F_x = self.F.eval(x)
225 | cdef np.ndarray[np.float64_t, ndim=1] h
226 | if self.h.shape[0] == x.shape[0]:
227 | h = self.h
228 | elif self.h.shape[0] == 1:
229 | h = np.full(x.shape[0], self.h[0])
230 | else:
231 | raise ValueError(f'x.shape[0]={x.shape[0]} while h.shape[0]={self.h.shape[0]}.')
232 | return generalized_finite_difference_kernel(
233 | self.F, x, F_x, h, self.order, self.kind)
234 |
235 | cpdef np.ndarray eval_with_f_val(self, np.ndarray x, np.ndarray F_x):
236 | self.n_f_calls += 1
237 | cdef np.ndarray[np.float64_t, ndim=1] h
238 | if self.h.shape[0] == x.shape[0]:
239 | h = self.h
240 | elif self.h.shape[0] == 1:
241 | h = np.full(x.shape[0], self.h[0])
242 | else:
243 | raise ValueError(f'x.shape[0]={x.shape[0]} while h.shape[0]={self.h.shape[0]}.')
244 | return generalized_finite_difference_kernel(
245 | self.F, x, F_x, h, self.order, self.kind)
246 |
247 | # noinspection DuplicatedCode
248 | @register('cyroot.da.vector')
249 | @cython.binding(True)
250 | def generalized_finite_difference(F: Callable[[VectorLike], VectorLike],
251 | x: VectorLike,
252 | F_x: Optional[ArrayLike] = None,
253 | h: Union[float, VectorLike] = 1.,
254 | order: int = 1,
255 | kind: Union[int, str] = 0):
256 | """
257 | Generalized finite difference method.
258 |
259 | Args:
260 | F (function): Function for which the derivative is sought.
261 | x (np.ndarray): Point at which the derivative is sought.
262 | F_x (np.ndarray, optional): Value evaluated at point ``x``.
263 | h (float, np.ndarray): Finite difference step. Defaults to 1.
264 | order (int): Order of derivative to be estimated.
265 | Defaults to 1.
266 | kind (int, str): Type of finite difference, including ``1``
267 | for forward, ``-1`` for backward, and ``0`` for central.
268 | Defaults to 0.
269 |
270 | Returns:
271 | diff: Estimated derivative.
272 | """
273 | # check args
274 | h, order, kind = _check_finite_difference_args(h, order, kind)
275 |
276 | x = np.asarray(x, dtype=np.float64)
277 | if isinstance(h, float):
278 | h = np.full(x.shape[0], h)
279 | else:
280 | h = np.asarray(h, dtype=np.float64)
281 |
282 | F_wrapper = PyNdArrayFPtr.from_f(F)
283 | if F_x is None:
284 | F_x = F_wrapper.eval(x)
285 | else:
286 | F_x = np.asarray(F_x, dtype=np.float64)
287 | return generalized_finite_difference_kernel(F_wrapper, x, F_x, h, order, kind)
288 |
--------------------------------------------------------------------------------
/cyroot/vector_newton.pyx:
--------------------------------------------------------------------------------
1 | # distutils: language=c++
2 | # cython: cdivision = True
3 | # cython: initializedcheck = False
4 | # cython: boundscheck = False
5 | # cython: profile = False
6 |
7 | from typing import Callable, Optional, Union
8 |
9 | import numpy as np
10 | cimport numpy as np
11 | import cython
12 | from dynamic_default_args import dynamic_default_args, named_default
13 |
14 | from ._check_args import _check_stop_cond_args
15 | from ._check_args cimport _check_stop_cond_vector_initial_guess
16 | from ._defaults import ETOL, ERTOL, PTOL, PRTOL, MAX_ITER, FINITE_DIFF_STEP
17 | from ._types import VectorLike, Array2DLike, Array3DLike
18 | from .fptr cimport NdArrayFPtr, PyNdArrayFPtr
19 | from .ops cimport scalar_ops as sops, vector_ops as vops, matrix_ops as mops
20 | from .return_types cimport NewtonMethodsReturnType
21 | from .utils._function_registering import register
22 | from .utils._warnings import warn_value
23 | from .vector_derivative_approximation import GeneralizedFiniteDifference, VectorDerivativeApproximation
24 |
25 | __all__ = [
26 | 'generalized_newton',
27 | 'generalized_halley',
28 | 'generalized_super_halley',
29 | 'generalized_chebyshev',
30 | 'generalized_tangent_hyperbolas',
31 | ]
32 |
33 | ################################################################################
34 | # Generalized Newton
35 | ################################################################################
36 | # noinspection DuplicatedCode
37 | cdef NewtonMethodsReturnType generalized_newton_kernel(
38 | NdArrayFPtr F,
39 | NdArrayFPtr J,
40 | np.ndarray[np.float64_t, ndim=1] x0,
41 | np.ndarray[np.float64_t, ndim=1] F_x0,
42 | np.ndarray[np.float64_t, ndim=2] J_x0,
43 | double etol=ETOL,
44 | double ertol=ERTOL,
45 | double ptol=PTOL,
46 | double prtol=PRTOL,
47 | unsigned long max_iter=MAX_ITER):
48 | cdef unsigned long step = 0
49 | cdef double precision, error
50 | cdef bint converged, optimal
51 | if _check_stop_cond_vector_initial_guess(x0, F_x0, etol, ertol, ptol, prtol,
52 | &precision, &error, &converged, &optimal):
53 | return NewtonMethodsReturnType(
54 | x0, F_x0, J_x0, step, (F.n_f_calls, J.n_f_calls), precision, error, converged, optimal)
55 |
56 | cdef bint use_derivative_approximation = isinstance(J, VectorDerivativeApproximation)
57 | cdef np.ndarray[np.float64_t, ndim=1] d_x
58 | converged = True
59 | while not (sops.isclose(0, error, ertol, etol) or
60 | sops.isclose(0, precision, prtol, ptol)):
61 | if step >= max_iter > 0:
62 | converged = False
63 | break
64 | step += 1
65 |
66 | d_x = mops.inv(J_x0, -F_x0)
67 | x0 = x0 + d_x
68 | F_x0 = F.eval(x0)
69 | if use_derivative_approximation:
70 | J_x0 = J.eval_with_f_val(x0, F_x0)
71 | else:
72 | J_x0 = J.eval(x0)
73 | precision = vops.max(vops.fabs(d_x))
74 | error = vops.max(vops.fabs(F_x0))
75 |
76 | optimal = sops.isclose(0, error, ertol, etol)
77 | return NewtonMethodsReturnType(
78 | x0, F_x0, J_x0, step, (F.n_f_calls, J.n_f_calls), precision, error, converged, optimal)
79 |
80 | # noinspection DuplicatedCode
81 | @register('cyroot.vector.newton')
82 | @dynamic_default_args()
83 | @cython.binding(True)
84 | def generalized_newton(F: Callable[[VectorLike], VectorLike],
85 | J: Optional[Callable[[VectorLike], Array2DLike]],
86 | x0: VectorLike,
87 | F_x0: Optional[VectorLike] = None,
88 | J_x0: Optional[Array2DLike] = None,
89 | h: Optional[Union[float, VectorLike]] = named_default(
90 | FINITE_DIFF_STEP=FINITE_DIFF_STEP),
91 | etol: float = named_default(ETOL=ETOL),
92 | ertol: float = named_default(ERTOL=ERTOL),
93 | ptol: float = named_default(PTOL=PTOL),
94 | prtol: float = named_default(PRTOL=PRTOL),
95 | max_iter: int = named_default(MAX_ITER=MAX_ITER)) -> NewtonMethodsReturnType:
96 | """
97 | Generalized Newton's method for vector root-finding.
98 |
99 | Args:
100 | F (function): Function for which the root is sought.
101 | J (function, optional): Function returning the Jacobian
102 | of ``F``.
103 | x0 (np.ndarray): First initial point.
104 | F_x0 (np.ndarray, optional): Value evaluated at initial
105 | point.
106 | J_x0 (np.ndarray, optional): Jacobian at initial point.
107 | h (float, np.ndarray, optional): Finite difference step size,
108 | ignored when ``J`` and ``H`` are not None. Defaults to {h}.
109 | etol (float, optional): Error tolerance, indicating the
110 | desired precision of the root. Defaults to {etol}.
111 | ertol (float, optional): Relative error tolerance.
112 | Defaults to {ertol}.
113 | ptol (float, optional): Precision tolerance, indicating
114 | the minimum change of root approximations or width of
115 | brackets (in bracketing methods) after each iteration.
116 | Defaults to {ptol}.
117 | prtol (float, optional): Relative precision tolerance.
118 | Defaults to {prtol}.
119 | max_iter (int, optional): Maximum number of iterations.
120 | If set to 0, the procedure will run indefinitely until
121 | stopping condition is met. Defaults to {max_iter}.
122 |
123 | Returns:
124 | solution: The solution represented as a ``RootResults`` object.
125 | """
126 | # check params
127 | etol, ertol, ptol, prtol, max_iter = _check_stop_cond_args(etol, ertol, ptol, prtol, max_iter)
128 |
129 | x0 = np.asarray(x0, dtype=np.float64)
130 |
131 | F_wrapper = PyNdArrayFPtr.from_f(F)
132 | if J is None:
133 | J_wrapper = GeneralizedFiniteDifference(F_wrapper, h=h, order=1)
134 | else:
135 | J_wrapper = PyNdArrayFPtr.from_f(J)
136 |
137 | if F_x0 is None:
138 | F_x0 = F_wrapper.eval(x0)
139 | else:
140 | F_x0 = np.asarray(F_x0, dtype=np.float64)
141 | if J_x0 is None:
142 | J_x0 = J_wrapper.eval(x0)
143 | else:
144 | J_x0 = np.asarray(J_x0, dtype=np.float64)
145 |
146 | if x0.shape[0] < F_x0.shape[0]:
147 | warn_value('Input dimension is smaller than output dimension. '
148 | f'Got d_in={x0.shape[0]}, d_out={F_x0.shape[0]}.')
149 |
150 | res = generalized_newton_kernel(
151 | F_wrapper, J_wrapper, x0, F_x0, J_x0, etol, ertol, ptol, prtol, max_iter)
152 | return res
153 |
154 | ################################################################################
155 | # Generalized Halley
156 | ################################################################################
157 | # noinspection DuplicatedCode
158 | cdef NewtonMethodsReturnType generalized_halley_kernel(
159 | NdArrayFPtr F,
160 | NdArrayFPtr J,
161 | NdArrayFPtr H,
162 | np.ndarray[np.float64_t, ndim=1] x0,
163 | np.ndarray[np.float64_t, ndim=1] F_x0,
164 | np.ndarray[np.float64_t, ndim=2] J_x0,
165 | np.ndarray[np.float64_t, ndim=3] H_x0,
166 | double etol=ETOL,
167 | double ertol=ERTOL,
168 | double ptol=PTOL,
169 | double prtol=PRTOL,
170 | unsigned long max_iter=MAX_ITER):
171 | cdef unsigned long d_in = x0.shape[0], d_out = F_x0.shape[0]
172 | cdef unsigned long step = 0
173 | cdef double precision, error
174 | cdef bint converged, optimal
175 | if _check_stop_cond_vector_initial_guess(x0, F_x0, etol, ertol, ptol, prtol,
176 | &precision, &error, &converged, &optimal):
177 | return NewtonMethodsReturnType(
178 | x0, F_x0, J_x0, H_x0, step, (F.n_f_calls, J.n_f_calls, H.n_f_calls),
179 | precision, error, converged, optimal)
180 |
181 | cdef unsigned long i
182 | cdef bint[2] use_derivative_approximation = [isinstance(J, VectorDerivativeApproximation),
183 | isinstance(H, VectorDerivativeApproximation)]
184 | cdef np.ndarray[np.float64_t, ndim=1] a, b, denom = np.zeros(d_in, dtype=np.float64)
185 | cdef np.ndarray[np.float64_t, ndim=1] d_x = np.zeros(d_in, dtype=np.float64)
186 | converged = True
187 | while not (sops.isclose(0, error, ertol, etol) or
188 | sops.isclose(0, precision, prtol, ptol)):
189 | if step >= max_iter > 0:
190 | converged = False
191 | break
192 | step += 1
193 |
194 | a = mops.inv(J_x0, -F_x0) # -J^-1.F
195 | b = mops.inv(J_x0, H_x0.dot(a).dot(a)) # J^-1.H.a^2
196 | denom[:d_out] = a[:d_out] + .5 * b[:d_out]
197 | for i in range(min(d_in, d_out)):
198 | if denom[i] == 0:
199 | converged = False
200 | if not converged:
201 | break
202 |
203 | d_x[:d_out] = np.power(a[:d_out], 2) / denom[:d_out] # a^2 / (a + .5 * b)
204 | x0 = x0 + d_x
205 | F_x0 = F.eval(x0)
206 | if use_derivative_approximation[0]:
207 | J_x0 = J.eval_with_f_val(x0, F_x0)
208 | else:
209 | J_x0 = J.eval(x0)
210 | if use_derivative_approximation[1]:
211 | H_x0 = H.eval_with_f_val(x0, F_x0)
212 | else:
213 | H_x0 = H.eval(x0)
214 | precision = vops.max(vops.fabs(d_x))
215 | error = vops.max(vops.fabs(F_x0))
216 |
217 | optimal = sops.isclose(0, error, ertol, etol)
218 | return NewtonMethodsReturnType(
219 | x0, F_x0, (J_x0, H_x0), step, (F.n_f_calls, J.n_f_calls, H.n_f_calls),
220 | precision, error, converged, optimal)
221 |
222 | # noinspection DuplicatedCode
223 | cdef NewtonMethodsReturnType generalized_modified_halley_kernel(
224 | NdArrayFPtr F,
225 | NdArrayFPtr J,
226 | NdArrayFPtr H,
227 | np.ndarray[np.float64_t, ndim=1] x0,
228 | np.ndarray[np.float64_t, ndim=1] F_x0,
229 | np.ndarray[np.float64_t, ndim=2] J_x0,
230 | np.ndarray[np.float64_t, ndim=3] H_x0,
231 | double alpha=0.5,
232 | double etol=ETOL,
233 | double ertol=ERTOL,
234 | double ptol=PTOL,
235 | double prtol=PRTOL,
236 | unsigned long max_iter=MAX_ITER):
237 | cdef unsigned long step = 0
238 | cdef double precision, error
239 | cdef bint converged, optimal
240 | if _check_stop_cond_vector_initial_guess(x0, F_x0, etol, ertol, ptol, prtol,
241 | &precision, &error, &converged, &optimal):
242 | return NewtonMethodsReturnType(
243 | x0, F_x0, J_x0, H_x0, step, (F.n_f_calls, J.n_f_calls, H.n_f_calls),
244 | precision, error, converged, optimal)
245 |
246 | cdef bint[2] use_derivative_approximation = [isinstance(J, VectorDerivativeApproximation),
247 | isinstance(H, VectorDerivativeApproximation)]
248 | cdef np.ndarray[np.float64_t, ndim=1] d_x, a
249 | cdef np.ndarray[np.float64_t, ndim=2] L_F, I = np.eye(x0.shape[0], dtype=np.float64)
250 | converged = True
251 | while not (sops.isclose(0, error, ertol, etol) or
252 | sops.isclose(0, precision, prtol, ptol)):
253 | if step >= max_iter > 0:
254 | converged = False
255 | break
256 | step += 1
257 |
258 | a = mops.inv(J_x0, F_x0) # J^-1.F
259 | L_F = mops.inv(J_x0, H_x0.dot(a)) # J^-1.H.J^-1.F
260 | d_x = -(I + .5 * L_F.dot(mops.inv(I - alpha * L_F))).dot(a) # (I - alpha * L_f)^-1
261 | x0 = x0 + d_x
262 | F_x0 = F.eval(x0)
263 | if use_derivative_approximation[0]:
264 | J_x0 = J.eval_with_f_val(x0, F_x0)
265 | else:
266 | J_x0 = J.eval(x0)
267 | if use_derivative_approximation[1]:
268 | H_x0 = H.eval_with_f_val(x0, F_x0)
269 | else:
270 | H_x0 = H.eval(x0)
271 | precision = vops.max(vops.fabs(d_x))
272 | error = vops.max(vops.fabs(F_x0))
273 |
274 | optimal = sops.isclose(0, error, ertol, etol)
275 | return NewtonMethodsReturnType(
276 | x0, F_x0, (J_x0, H_x0), step, (F.n_f_calls, J.n_f_calls, H.n_f_calls),
277 | precision, error, converged, optimal)
278 |
279 | # noinspection DuplicatedCode
280 | @register('cyroot.vector.newton')
281 | @dynamic_default_args()
282 | @cython.binding(True)
283 | def generalized_halley(F: Callable[[VectorLike], VectorLike],
284 | J: Optional[Callable[[VectorLike], Array2DLike]],
285 | H: Optional[Callable[[VectorLike], Array3DLike]],
286 | x0: VectorLike,
287 | F_x0: Optional[VectorLike] = None,
288 | J_x0: Optional[Array2DLike] = None,
289 | H_x0: Optional[Array3DLike] = None,
290 | alpha: Optional[float] = None,
291 | h: Optional[Union[float, VectorLike]] = named_default(
292 | FINITE_DIFF_STEP=FINITE_DIFF_STEP),
293 | etol: float = named_default(ETOL=ETOL),
294 | ertol: float = named_default(ERTOL=ERTOL),
295 | ptol: float = named_default(PTOL=PTOL),
296 | prtol: float = named_default(PRTOL=PRTOL),
297 | max_iter: int = named_default(MAX_ITER=MAX_ITER)) -> NewtonMethodsReturnType:
298 | """
299 | Generalized Halley's method for vector root-finding as presented in the paper
300 | "Abstract Padé-approximants for the solution of a system of nonlinear equations".
301 |
302 | References:
303 | https://www.sciencedirect.com/science/article/pii/0898122183901190
304 |
305 | Args:
306 | F (function): Function for which the root is sought.
307 | J (function, optional): Function returning the Jacobian
308 | of ``F``.
309 | H (function, optional): Function returning the Hessian
310 | of ``F``.
311 | x0 (np.ndarray): First initial point.
312 | F_x0 (np.ndarray, optional): Value evaluated at initial
313 | point.
314 | J_x0 (np.ndarray, optional): Jacobian at initial point.
315 | H_x0 (np.ndarray, optional): Hessian at initial point.
316 | alpha (float, optional): If set, the modified halley
317 | formula which has parameter alpha will be used.
318 | h (float, np.ndarray, optional): Finite difference step size,
319 | ignored when ``J`` and ``H`` are not None. Defaults to {h}.
320 | etol (float, optional): Error tolerance, indicating the
321 | desired precision of the root. Defaults to {etol}.
322 | ertol (float, optional): Relative error tolerance.
323 | Defaults to {ertol}.
324 | ptol (float, optional): Precision tolerance, indicating
325 | the minimum change of root approximations or width of
326 | brackets (in bracketing methods) after each iteration.
327 | Defaults to {ptol}.
328 | prtol (float, optional): Relative precision tolerance.
329 | Defaults to {prtol}.
330 | max_iter (int, optional): Maximum number of iterations.
331 | If set to 0, the procedure will run indefinitely until
332 | stopping condition is met. Defaults to {max_iter}.
333 |
334 | Returns:
335 | solution: The solution represented as a ``RootResults`` object.
336 | """
337 | # check params
338 | etol, ertol, ptol, prtol, max_iter = _check_stop_cond_args(etol, ertol, ptol, prtol, max_iter)
339 |
340 | x0 = np.asarray(x0, dtype=np.float64)
341 |
342 | F_wrapper = PyNdArrayFPtr.from_f(F)
343 | if J is None:
344 | J_wrapper = GeneralizedFiniteDifference(F_wrapper, h=h, order=1)
345 | else:
346 | J_wrapper = PyNdArrayFPtr.from_f(J)
347 | if H is None:
348 | H_wrapper = GeneralizedFiniteDifference(F_wrapper, h=h, order=2)
349 | else:
350 | H_wrapper = PyNdArrayFPtr.from_f(H)
351 |
352 | if F_x0 is None:
353 | F_x0 = F_wrapper.eval(x0)
354 | else:
355 | F_x0 = np.asarray(F_x0, dtype=np.float64)
356 | if J_x0 is None:
357 | J_x0 = J_wrapper.eval(x0)
358 | else:
359 | J_x0 = np.asarray(J_x0, dtype=np.float64)
360 | if H_x0 is None:
361 | H_x0 = H_wrapper.eval(x0)
362 | else:
363 | H_x0 = np.asarray(H_x0, dtype=np.float64)
364 |
365 | if x0.shape[0] < F_x0.shape[0]:
366 | warn_value('Input dimension is smaller than output dimension. '
367 | f'Got d_in={x0.shape[0]}, d_out={F_x0.shape[0]}.')
368 |
369 | if alpha is None:
370 | res = generalized_halley_kernel(
371 | F_wrapper, J_wrapper, H_wrapper, x0, F_x0, J_x0, H_x0,
372 | etol, ertol, ptol, prtol, max_iter)
373 | else:
374 | res = generalized_modified_halley_kernel(
375 | F_wrapper, J_wrapper, H_wrapper, x0, F_x0, J_x0, H_x0,
376 | alpha, etol, ertol, ptol, prtol, max_iter)
377 | return res
378 |
379 | #------------------------
380 | # Super-Halley
381 | #------------------------
382 | # noinspection DuplicatedCode
383 | @register('cyroot.vector.newton')
384 | @dynamic_default_args()
385 | @cython.binding(True)
386 | def generalized_super_halley(F: Callable[[VectorLike], VectorLike],
387 | J: Optional[Callable[[VectorLike], Array2DLike]],
388 | H: Optional[Callable[[VectorLike], Array3DLike]],
389 | x0: VectorLike,
390 | F_x0: Optional[VectorLike] = None,
391 | J_x0: Optional[Array2DLike] = None,
392 | H_x0: Optional[Array3DLike] = None,
393 | h: Optional[Union[float, VectorLike]] = named_default(
394 | FINITE_DIFF_STEP=FINITE_DIFF_STEP),
395 | etol: float = named_default(ETOL=ETOL),
396 | ertol: float = named_default(ERTOL=ERTOL),
397 | ptol: float = named_default(PTOL=PTOL),
398 | prtol: float = named_default(PRTOL=PRTOL),
399 | max_iter: int = named_default(MAX_ITER=MAX_ITER)) -> NewtonMethodsReturnType:
400 | """
401 | Generalized Super-Halley's method for vector root-finding.
402 | This is equivalent to calling ``generalized_halley`` with ``alpha=1``.
403 |
404 | References:
405 | https://www.sciencedirect.com/science/article/abs/pii/S0096300399001757
406 |
407 | Args:
408 | F (function): Function for which the root is sought.
409 | J (function, optional): Function returning the Jacobian
410 | of ``F``.
411 | H (function, optional): Function returning the Hessian
412 | of ``F``.
413 | x0 (np.ndarray): First initial point.
414 | F_x0 (np.ndarray, optional): Value evaluated at initial
415 | point.
416 | J_x0 (np.ndarray, optional): Jacobian at initial point.
417 | H_x0 (np.ndarray, optional): Hessian at initial point.
418 | h (float, np.ndarray, optional): Finite difference step size,
419 | ignored when ``J`` and ``H`` are not None. Defaults to {h}.
420 | etol (float, optional): Error tolerance, indicating the
421 | desired precision of the root. Defaults to {etol}.
422 | ertol (float, optional): Relative error tolerance.
423 | Defaults to {ertol}.
424 | ptol (float, optional): Precision tolerance, indicating
425 | the minimum change of root approximations or width of
426 | brackets (in bracketing methods) after each iteration.
427 | Defaults to {ptol}.
428 | prtol (float, optional): Relative precision tolerance.
429 | Defaults to {prtol}.
430 | max_iter (int, optional): Maximum number of iterations.
431 | If set to 0, the procedure will run indefinitely until
432 | stopping condition is met. Defaults to {max_iter}.
433 |
434 | Returns:
435 | solution: The solution represented as a ``RootResults`` object.
436 | """
437 | return generalized_halley(F, J, H, x0, F_x0, J_x0, H_x0, 1, h, etol, ertol, ptol, prtol, max_iter)
438 |
439 | #------------------------
440 | # Chebyshev
441 | #------------------------
442 | # noinspection DuplicatedCode
443 | @register('cyroot.vector.newton')
444 | @dynamic_default_args()
445 | @cython.binding(True)
446 | def generalized_chebyshev(F: Callable[[VectorLike], VectorLike],
447 | J: Optional[Callable[[VectorLike], Array2DLike]],
448 | H: Optional[Callable[[VectorLike], Array3DLike]],
449 | x0: VectorLike,
450 | F_x0: Optional[VectorLike] = None,
451 | J_x0: Optional[Array2DLike] = None,
452 | H_x0: Optional[Array3DLike] = None,
453 | h: Optional[Union[float, VectorLike]] = named_default(
454 | FINITE_DIFF_STEP=FINITE_DIFF_STEP),
455 | etol: float = named_default(ETOL=ETOL),
456 | ertol: float = named_default(ERTOL=ERTOL),
457 | ptol: float = named_default(PTOL=PTOL),
458 | prtol: float = named_default(PRTOL=PRTOL),
459 | max_iter: int = named_default(MAX_ITER=MAX_ITER)) -> NewtonMethodsReturnType:
460 | """
461 | Generalized Chebyshev's method for vector root-finding.
462 | This is equivalent to calling ``generalized_halley`` with ``alpha=0``.
463 |
464 | References:
465 | https://www.sciencedirect.com/science/article/abs/pii/S0096300399001757
466 |
467 | Args:
468 | F (function): Function for which the root is sought.
469 | J (function, optional): Function returning the Jacobian
470 | of ``F``.
471 | H (function, optional): Function returning the Hessian
472 | of ``F``.
473 | x0 (np.ndarray): First initial point.
474 | F_x0 (np.ndarray, optional): Value evaluated at initial
475 | point.
476 | J_x0 (np.ndarray, optional): Jacobian at initial point.
477 | H_x0 (np.ndarray, optional): Hessian at initial point.
478 | h (float, np.ndarray, optional): Finite difference step size,
479 | ignored when ``J`` and ``H`` are not None. Defaults to {h}.
480 | etol (float, optional): Error tolerance, indicating the
481 | desired precision of the root. Defaults to {etol}.
482 | ertol (float, optional): Relative error tolerance.
483 | Defaults to {ertol}.
484 | ptol (float, optional): Precision tolerance, indicating
485 | the minimum change of root approximations or width of
486 | brackets (in bracketing methods) after each iteration.
487 | Defaults to {ptol}.
488 | prtol (float, optional): Relative precision tolerance.
489 | Defaults to {prtol}.
490 | max_iter (int, optional): Maximum number of iterations.
491 | If set to 0, the procedure will run indefinitely until
492 | stopping condition is met. Defaults to {max_iter}.
493 |
494 | Returns:
495 | solution: The solution represented as a ``RootResults`` object.
496 | """
497 | return generalized_halley(F, J, H, x0, F_x0, J_x0, H_x0, 0, h, etol, ertol, ptol, prtol, max_iter)
498 |
499 | #------------------------
500 | # Tangent Hyperbolas
501 | #------------------------
502 | # noinspection DuplicatedCode
503 | cdef NewtonMethodsReturnType generalized_tangent_hyperbolas_kernel(
504 | NdArrayFPtr F,
505 | NdArrayFPtr J,
506 | NdArrayFPtr H,
507 | np.ndarray[np.float64_t, ndim=1] x0,
508 | np.ndarray[np.float64_t, ndim=1] F_x0,
509 | np.ndarray[np.float64_t, ndim=2] J_x0,
510 | np.ndarray[np.float64_t, ndim=3] H_x0,
511 | int formula=2,
512 | double etol=ETOL,
513 | double ertol=ERTOL,
514 | double ptol=PTOL,
515 | double prtol=PRTOL,
516 | unsigned long max_iter=MAX_ITER):
517 | cdef unsigned long step = 0
518 | cdef double precision, error
519 | cdef bint converged, optimal
520 | if _check_stop_cond_vector_initial_guess(x0, F_x0, etol, ertol, ptol, prtol,
521 | &precision, &error, &converged, &optimal):
522 | return NewtonMethodsReturnType(
523 | x0, F_x0, J_x0, H_x0, step, (F.n_f_calls, J.n_f_calls, H.n_f_calls),
524 | precision, error, converged, optimal)
525 |
526 | cdef bint[2] use_derivative_approximation = [isinstance(J, VectorDerivativeApproximation),
527 | isinstance(H, VectorDerivativeApproximation)]
528 | cdef np.ndarray[np.float64_t, ndim=1] d_x, a
529 | cdef np.ndarray[np.float64_t, ndim=2] I, B
530 | if formula == 1:
531 | I = np.eye(F_x0.shape[0], dtype=np.float64)
532 | converged = True
533 | while not (sops.isclose(0, error, ertol, etol) or
534 | sops.isclose(0, precision, prtol, ptol)):
535 | if step >= max_iter > 0:
536 | converged = False
537 | break
538 | step += 1
539 |
540 | a = mops.inv(J_x0, -F_x0) # -J^-1.F
541 | if formula == 2: # more likely
542 | d_x = mops.inv(J_x0 + .5 * H_x0.dot(a), -F_x0) # (J + .5 * H.a)^-1.F
543 | else:
544 | B = mops.inv(J_x0, H_x0.dot(a)) # J^-1.H.a
545 | d_x = mops.inv(I + .5 * B, a) # (I + .5 * J^-1.H.a)^-1.a
546 | x0 = x0 + d_x
547 | F_x0 = F.eval(x0)
548 | if use_derivative_approximation[0]:
549 | J_x0 = J.eval_with_f_val(x0, F_x0)
550 | else:
551 | J_x0 = J.eval(x0)
552 | if use_derivative_approximation[1]:
553 | H_x0 = H.eval_with_f_val(x0, F_x0)
554 | else:
555 | H_x0 = H.eval(x0)
556 | precision = vops.max(vops.fabs(d_x))
557 | error = vops.max(vops.fabs(F_x0))
558 |
559 | optimal = sops.isclose(0, error, ertol, etol)
560 | return NewtonMethodsReturnType(
561 | x0, F_x0, (J_x0, H_x0), step, (F.n_f_calls, J.n_f_calls, H.n_f_calls),
562 | precision, error, converged, optimal)
563 |
564 | # noinspection DuplicatedCode
565 | @register('cyroot.vector.newton')
566 | @dynamic_default_args()
567 | @cython.binding(True)
568 | def generalized_tangent_hyperbolas(F: Callable[[VectorLike], VectorLike],
569 | J: Optional[Callable[[VectorLike], Array2DLike]],
570 | H: Optional[Callable[[VectorLike], Array3DLike]],
571 | x0: VectorLike,
572 | F_x0: Optional[VectorLike] = None,
573 | J_x0: Optional[Array2DLike] = None,
574 | H_x0: Optional[Array3DLike] = None,
575 | formula: int = 2,
576 | h: Optional[Union[float, VectorLike]] = named_default(
577 | FINITE_DIFF_STEP=FINITE_DIFF_STEP),
578 | etol: float = named_default(ETOL=ETOL),
579 | ertol: float = named_default(ERTOL=ERTOL),
580 | ptol: float = named_default(PTOL=PTOL),
581 | prtol: float = named_default(PRTOL=PRTOL),
582 | max_iter: int = named_default(MAX_ITER=MAX_ITER)) -> NewtonMethodsReturnType:
583 | """
584 | Generalized Tangent Hyperbolas method for vector root-finding.
585 |
586 | This method is the same as Generalized Halley's method.
587 | There are 2 formulas ``formula=1`` and ``formula=2``, the later
588 | (default) requires 1 less matrix inversion.
589 |
590 | This is essentially similar to generalized Halley method.
591 |
592 | Args:
593 | F (function): Function for which the root is sought.
594 | J (function, optional): Function returning the Jacobian
595 | of ``F``.
596 | H (function, optional): Function returning the Hessian
597 | of ``F``.
598 | x0 (np.ndarray): First initial point.
599 | F_x0 (np.ndarray, optional): Value evaluated at initial
600 | point.
601 | J_x0 (np.ndarray, optional): Jacobian at initial point.
602 | H_x0 (np.ndarray, optional): Hessian at initial point.
603 | formula (int): Formula 1 or 2. Defaults to 2.
604 | h (float, np.ndarray, optional): Finite difference step size,
605 | ignored when ``J`` and ``H`` are not None. Defaults to {h}.
606 | etol (float, optional): Error tolerance, indicating the
607 | desired precision of the root. Defaults to {etol}.
608 | ertol (float, optional): Relative error tolerance.
609 | Defaults to {ertol}.
610 | ptol (float, optional): Precision tolerance, indicating
611 | the minimum change of root approximations or width of
612 | brackets (in bracketing methods) after each iteration.
613 | Defaults to {ptol}.
614 | prtol (float, optional): Relative precision tolerance.
615 | Defaults to {prtol}.
616 | max_iter (int, optional): Maximum number of iterations.
617 | If set to 0, the procedure will run indefinitely until
618 | stopping condition is met. Defaults to {max_iter}.
619 |
620 | Returns:
621 | solution: The solution represented as a ``RootResults`` object.
622 | """
623 | # check params
624 | etol, ertol, ptol, prtol, max_iter = _check_stop_cond_args(etol, ertol, ptol, prtol, max_iter)
625 | if formula not in [1, 2]:
626 | raise ValueError(f'Unknown formula {formula}.')
627 |
628 | x0 = np.asarray(x0, dtype=np.float64)
629 |
630 | F_wrapper = PyNdArrayFPtr.from_f(F)
631 | if J is None:
632 | J_wrapper = GeneralizedFiniteDifference(F_wrapper, h=h, order=1)
633 | else:
634 | J_wrapper = PyNdArrayFPtr.from_f(J)
635 | if H is None:
636 | H_wrapper = GeneralizedFiniteDifference(F_wrapper, h=h, order=2)
637 | else:
638 | H_wrapper = PyNdArrayFPtr.from_f(H)
639 |
640 | if F_x0 is None:
641 | F_x0 = F_wrapper.eval(x0)
642 | else:
643 | F_x0 = np.asarray(F_x0, dtype=np.float64)
644 | if J_x0 is None:
645 | J_x0 = J_wrapper.eval(x0)
646 | else:
647 | J_x0 = np.asarray(J_x0, dtype=np.float64)
648 | if H_x0 is None:
649 | H_x0 = H_wrapper.eval(x0)
650 | else:
651 | H_x0 = np.asarray(H_x0, dtype=np.float64)
652 |
653 | if x0.shape[0] < F_x0.shape[0]:
654 | warn_value('Input dimension is smaller than output dimension. '
655 | f'Got d_in={x0.shape[0]}, d_out={F_x0.shape[0]}.')
656 |
657 | res = generalized_tangent_hyperbolas_kernel(
658 | F_wrapper, J_wrapper, H_wrapper, x0, F_x0, J_x0, H_x0, formula,
659 | etol, ertol, ptol, prtol, max_iter)
660 | return res
661 |
--------------------------------------------------------------------------------
/cyroot/vector_root.py:
--------------------------------------------------------------------------------
1 | from inspect import getmembers
2 | from functools import partial
3 | from . import vector_bracketing, vector_quasi_newton, vector_newton
4 | from .utils._function_registering import is_tagged_with_any_startswith
5 |
6 | __all__ = [
7 | 'VECTOR_ROOT_FINDING_METHODS',
8 | 'find_vector_root',
9 | ]
10 |
11 | # noinspection DuplicatedCode
12 | VECTOR_ROOT_FINDING_METHODS = {}
13 | for module in [vector_bracketing, vector_quasi_newton, vector_newton]:
14 | VECTOR_ROOT_FINDING_METHODS.update(
15 | getmembers(module, partial(is_tagged_with_any_startswith, start='cyroot.vector')))
16 |
17 |
18 | # noinspection DuplicatedCode
19 | def find_vector_root(method: str, *args, **kwargs):
20 | """
21 | Find the root of a vector function.
22 |
23 | Args:
24 | method (str): Name of the method. A full list of supported
25 | methods is stored in ``VECTOR_ROOT_FINDING_METHODS``.
26 | *args: Extra arguments to be passed.
27 | **kwargs: Extra keyword arguments to be passed.
28 |
29 | Returns:
30 | solution: The solution represented as a ``RootResults`` object.
31 | """
32 | if method in VECTOR_ROOT_FINDING_METHODS.keys():
33 | return VECTOR_ROOT_FINDING_METHODS[method](*args, **kwargs)
34 | elif 'generalized_' + method in VECTOR_ROOT_FINDING_METHODS.keys():
35 | return VECTOR_ROOT_FINDING_METHODS['generalized_' + method](*args, **kwargs)
36 | else:
37 | raise ValueError(f'No implementation for {method} found. '
38 | f'Supported methods are: {", ".join(VECTOR_ROOT_FINDING_METHODS.keys())}')
39 |
--------------------------------------------------------------------------------
/examples/scalar_root.py:
--------------------------------------------------------------------------------
1 | import math
2 |
3 | from cyroot import *
4 | from utils import timeit
5 |
6 | f = lambda x: x ** 3 + 2 * x ** 2 - 3 * x + 1
7 | df = lambda x: 3 * x ** 2 + 4 * x - 3
8 | d2f = lambda x: 3 * x + 4
9 | d3f = lambda x: 3
10 | interval_f = lambda x_l, x_h: (x_l ** 3 + 2 * (min(abs(x_l), abs(x_h))
11 | if math.copysign(1, x_l) * math.copysign(1, x_h) > 0
12 | else 0) ** 2 - 3 * x_h + 1,
13 | x_h ** 3 + 2 * max(abs(x_l), abs(x_h)) ** 2 - 3 * x_l + 1)
14 |
15 |
16 | def test_output(etol=1e-8, ptol=1e-10):
17 | # examples output
18 | print('Output Test')
19 |
20 | print(f'\n{"Bracketing":-^50}')
21 | print('[Bisect]', bisect(f, -10, 10, etol=etol, ptol=ptol))
22 | print('[Modified Bisect]', bisect(f, -10, 10, algo='modified', etol=etol, ptol=ptol))
23 | print('[HyBisect]', hybisect(f, interval_f, -10, 10, etol=etol, ptol=ptol))
24 | print('[Regula Falsi]', regula_falsi(f, -10, 10, etol=etol, ptol=ptol))
25 | print('[Illinois]', illinois(f, -10, 10, etol=etol, ptol=ptol))
26 | print('[Pegasus]', pegasus(f, -10, 10, etol=etol, ptol=ptol))
27 | print('[Anderson-Bjorck]', anderson_bjorck(f, -10, 10, etol=etol, ptol=ptol))
28 | print('[Dekker]', dekker(f, -10, 10, etol=etol, ptol=ptol))
29 | print('[Brent]', brent(f, -10, 10, etol=etol, ptol=ptol))
30 | print('[Chandrupatla]', chandrupatla(f, -10, 10, etol=etol, ptol=ptol))
31 | print('[Ridders]', ridders(f, -10, 10, etol=etol, ptol=ptol))
32 | print('[Toms748]', toms748(f, -10, 10, k=1, etol=etol, ptol=ptol))
33 | print('[Wu]', wu(f, -10, 10, etol=etol, ptol=ptol))
34 | print('[ITP]', itp(f, -10, 10, etol=etol, ptol=ptol))
35 |
36 | print(f'\n{"Quasi-Newton":-^50}')
37 | print('[Secant]', secant(f, -10, 10, etol=etol, ptol=ptol))
38 | print('[Sidi]', sidi(f, [-10, -5, 0, 5, 10], etol=etol, ptol=ptol))
39 | print('[Steffensen]', steffensen(f, -5, etol=etol, ptol=ptol))
40 | print('[Inverse Quadratic Interp]', inverse_quadratic_interp(f, -10, -5, 0, etol=etol, ptol=ptol))
41 | print('[Hyperbolic Interp]', hyperbolic_interp(f, -10, -5, 0, etol=etol, ptol=ptol))
42 | print('[Muller]', muller(f, -10, -5, 0, etol=etol, ptol=ptol))
43 |
44 | print(f'\n{"Newton":-^50}')
45 | print('[Newton]', newton(f, df, -5, etol=etol, ptol=ptol))
46 | print('[Halley]', halley(f, df, d2f, -5, etol=etol, ptol=ptol))
47 | print('[Super-Halley]', super_halley(f, df, d2f, -5, etol=etol, ptol=ptol))
48 | print('[Chebyshev]', chebyshev(f, df, d2f, -5, etol=etol, ptol=ptol))
49 | print('[Tangent Hyperbolas]', tangent_hyperbolas(f, df, d2f, -5, etol=etol, ptol=ptol))
50 | print('[Householder]', householder(f, [df, d2f, d3f], -5, etol=etol, ptol=ptol))
51 |
52 |
53 | def test_speed(etol=1e-8, ptol=1e-10, times=100):
54 | # examples speed
55 | print('Speed Test')
56 |
57 | print(f'\n{"Bracketing":-^50}')
58 | timeit(bisect, args=(f, -10, 10), kwargs=dict(etol=etol, ptol=ptol),
59 | name='Bisect', number=times)
60 | timeit(regula_falsi, args=(f, -10, 10), kwargs=dict(etol=etol, ptol=ptol),
61 | name='Regula Falsi', number=times)
62 | timeit(illinois, args=(f, -10, 10), kwargs=dict(etol=etol, ptol=ptol),
63 | name='Illinois', number=times)
64 | timeit(pegasus, args=(f, -10, 10), kwargs=dict(etol=etol, ptol=ptol),
65 | name='Pegasus', number=times)
66 | timeit(anderson_bjorck, args=(f, -10, 10), kwargs=dict(etol=etol, ptol=ptol),
67 | name='Anderson-Bjork', number=times)
68 | timeit(dekker, args=(f, -10, 10), kwargs=dict(etol=etol, ptol=ptol),
69 | name='Dekker', number=times)
70 | timeit(brent, args=(f, -10, 10), kwargs=dict(etol=etol, ptol=ptol),
71 | name='Brent', number=times)
72 | timeit(chandrupatla, args=(f, -10, 10), kwargs=dict(etol=etol, ptol=ptol),
73 | name='Chandrupatla', number=times)
74 | timeit(ridders, args=(f, -10, 10), kwargs=dict(etol=etol, ptol=ptol),
75 | name='Ridders', number=times)
76 | timeit(toms748, args=(f, -10, 10), kwargs=dict(etol=etol, ptol=ptol),
77 | name='Toms748', number=times)
78 | timeit(wu, args=(f, -10, 10), kwargs=dict(etol=etol, ptol=ptol),
79 | name='Wu', number=times)
80 | timeit(itp, args=(f, -10, 10), kwargs=dict(etol=etol, ptol=ptol),
81 | name='ITP', number=times)
82 |
83 | print(f'\n{"Quasi-Newton":-^50}')
84 | timeit(secant, args=(f, -10, -5), kwargs=dict(etol=etol, ptol=ptol),
85 | name='Secant', number=times)
86 | timeit(sidi, args=(f, [-10, -5, 0, 5, 10]), kwargs=dict(etol=etol, ptol=ptol),
87 | name='Sidi', number=times)
88 | timeit(steffensen, args=(f, -5), kwargs=dict(etol=etol, ptol=ptol),
89 | name='Steffensen', number=times)
90 | timeit(inverse_quadratic_interp, args=(f, -10, -5, 0), kwargs=dict(etol=etol, ptol=ptol),
91 | name='Inverse Quadratic Interp', number=times)
92 | timeit(hyperbolic_interp, args=(f, -10, -5, 0), kwargs=dict(etol=etol, ptol=ptol),
93 | name='Hyperbolic Interp', number=times)
94 | timeit(muller, args=(f, -10, -5, 0), kwargs=dict(etol=etol, ptol=ptol),
95 | name='Muller', number=times)
96 |
97 | print(f'\n{"Newton":-^50}')
98 | timeit(newton, args=(f, df, -5), kwargs=dict(etol=etol, ptol=ptol),
99 | name='Newton', number=times)
100 | timeit(halley, args=(f, df, d2f, -5), kwargs=dict(etol=etol, ptol=ptol),
101 | name='Halley', number=times)
102 | timeit(super_halley, args=(f, df, d2f, -5), kwargs=dict(etol=etol, ptol=ptol),
103 | name='Super-Halley', number=times)
104 | timeit(chebyshev, args=(f, df, d2f, -5), kwargs=dict(etol=etol, ptol=ptol),
105 | name='Chebyshev', number=times)
106 | timeit(tangent_hyperbolas, args=(f, df, d2f, -5), kwargs=dict(etol=etol, ptol=ptol),
107 | name='Tangent Hyperbolas', number=times)
108 | timeit(householder, args=(f, [df, d2f, d3f], -5), kwargs=dict(etol=etol, ptol=ptol, c_code=True),
109 | name='Householder', number=times, warmup=True)
110 |
111 |
112 | if __name__ == '__main__':
113 | test_output()
114 | # test_speed()
115 |
--------------------------------------------------------------------------------
/examples/utils/__init__.py:
--------------------------------------------------------------------------------
1 | from .time_meter import *
2 |
--------------------------------------------------------------------------------
/examples/utils/time_meter.py:
--------------------------------------------------------------------------------
1 | import sys
2 | import time
3 |
4 | from tqdm import trange
5 |
6 | __all__ = [
7 | 'TimeMeter',
8 | 'timeit',
9 | ]
10 |
11 |
12 | class TimeMeter:
13 | def __init__(self):
14 | self.n = self.avg = self._start = self._end = self.last_elapsed_time = 0
15 |
16 | def reset(self):
17 | self.n = self.avg = self._start = self._end = self.last_elapsed_time = 0
18 |
19 | def __enter__(self):
20 | self._start = time.time()
21 |
22 | def __exit__(self, exc_type, exc_val, exc_tb):
23 | self._end = time.time()
24 | self.last_elapsed_time = self._end - self._start
25 | self.avg = (self.avg * self.n + self.last_elapsed_time) / (self.n + 1)
26 | self.n += 1
27 |
28 | @property
29 | def fps(self):
30 | return 1 / self.avg if self.avg else float('nan')
31 |
32 |
33 | def timeit(func, args=(), kwargs={}, name=None, number=100, warmup=False):
34 | name = name if name is not None else func.__name__
35 | time_meter = TimeMeter()
36 | pbar = trange(number, desc=f'[{name}]', file=sys.stdout)
37 | for _ in pbar:
38 | if warmup and _ == 1:
39 | time_meter.reset()
40 | with time_meter:
41 | func(*args, **kwargs)
42 | pbar.set_description(f'[{name}] fps={time_meter.fps:.03f}')
43 |
--------------------------------------------------------------------------------
/examples/vector_root.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 |
3 | from cyroot import *
4 |
5 | F = lambda x: np.array([x[0] ** 3 - 3 * x[0] * x[1] + 5 * x[1] - 7,
6 | x[0] ** 2 + x[0] * x[1] ** 2 - 4 * x[1] ** 2 + 3.5])
7 | J = lambda x: np.array([
8 | [3 * x[0] ** 2 - 3 * x[1], -3 * x[0] + 5],
9 | [2 * x[0] + x[1] ** 2, 2 * x[0] * x[1] - 8 * x[1]]
10 | ])
11 | H = lambda x: np.array([
12 | [[6 * x[0], -3],
13 | [-3, 0]],
14 | [[2, 2 * x[1]],
15 | [2 * x[1], 2 * x[0] - 8]],
16 | ])
17 |
18 |
19 | def test_output(etol=1e-8, ptol=1e-12):
20 | # examples output
21 | print('Output Test')
22 |
23 | print(f'\n{"Bracketing":-^50}')
24 | print('[Vrahatis]', vrahatis(F, np.array([[0., 0.],
25 | [3, -1.],
26 | [2.5, -5.],
27 | [1., -2.]]),
28 | etol=etol, ptol=ptol))
29 |
30 | print(f'\n{"Quasi-Newton":-^50}')
31 | print('[Wolfe-Bittner]', wolfe_bittner(F, np.array([[2., 2.],
32 | [4., 7.],
33 | [-1., 0.]]),
34 | etol=etol, ptol=ptol))
35 | print('[Robinson]', robinson(F, x0=[2., 2.], x1=[4., 7.], etol=etol, ptol=ptol))
36 | print('[Barnes]', barnes(F, [10., 10.], etol=etol, ptol=ptol))
37 | print('[Traub-Steffensen]', traub_steffensen(F, [4., -2.], etol=etol, ptol=ptol))
38 | print('[Broyden Good]', broyden(F, [10., 10.], algo='good', etol=etol, ptol=ptol))
39 | print('[Broyden Bad]', broyden(F, [10., 10.], algo='bad', etol=etol, ptol=ptol))
40 | print('[Klement]', klement(F, [10., 10.], etol=etol, ptol=ptol))
41 |
42 | print(f'\n{"Newton":-^50}')
43 | print('[Newton]', generalized_newton(F, J, [10., 10.], etol=etol, ptol=ptol))
44 | print('[Halley]', generalized_halley(F, J, H, [10., 10.], etol=etol, ptol=ptol))
45 | print('[Super-Halley]', generalized_super_halley(F, J, H, [10., 10.], etol=etol, ptol=ptol))
46 | print('[Chebyshev]', generalized_chebyshev(F, J, H, [10., 10.], etol=etol, ptol=ptol))
47 | print('[Tangent Hyperbolas]', generalized_tangent_hyperbolas(F, J, H, x0=[10., 10.], etol=etol, ptol=ptol))
48 |
49 |
50 | if __name__ == '__main__':
51 | test_output()
52 |
--------------------------------------------------------------------------------
/pyproject.toml:
--------------------------------------------------------------------------------
1 | [build-system]
2 | requires = [
3 | "setuptools>=42",
4 | "wheel",
5 | "Cython",
6 | "dynamic-default-args",
7 | "numpy",
8 | "scipy",
9 | "sympy",
10 | ]
11 |
12 | [tool.cibuildwheel]
13 | build = ["cp36-*", "cp37-*", "cp38-*", "cp39-*", "cp310-*", "cp311-*"]
14 | archs = ["auto64"]
15 | skip = ["*-musllinux_*", "*-win32", "*-manylinux_i686"]
16 |
17 | [tool.cibuildwheel.linux]
18 | before-all = [
19 | "yum install -y lapack-devel",
20 | "yum install -y blas-devel",
21 | ]
22 |
--------------------------------------------------------------------------------
/requirements.txt:
--------------------------------------------------------------------------------
1 | dynamic-default-args
2 | numpy
3 | scipy
4 | sympy
5 |
--------------------------------------------------------------------------------
/resources/logo.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/inspiros/cy-root/7150aad4e83ff45037694517641746600b508ea5/resources/logo.png
--------------------------------------------------------------------------------
/setup.cfg:
--------------------------------------------------------------------------------
1 | [metadata]
2 | name = cy-root
3 | description = Cython implementations of multiple root-finding methods.
4 | long_description = file: README.md
5 | long_description_content_type = text/markdown
6 | license = MIT
7 | license_files = LICENSE.txt
8 | author = Hoang-Nhat Tran (inspiros)
9 | author_email = hnhat.tran@gmail.com
10 | url = https://github.com/inspiros/cy-root
11 | download_urls = https://pypi.org/project/cy-root
12 | project_urls =
13 | Source = https://github.com/inspiros/cy-root
14 | classifiers =
15 | Development Status :: 3 - Alpha
16 | Intended Audience :: Developers
17 | Intended Audience :: Education
18 | Intended Audience :: Science/Research
19 | License :: OSI Approved :: MIT License
20 | Programming Language :: Python
21 | Programming Language :: Python :: 3
22 | Programming Language :: Python :: 3.6
23 | Programming Language :: Python :: 3.7
24 | Programming Language :: Python :: 3.8
25 | Programming Language :: Python :: 3.9
26 | Programming Language :: Python :: 3.10
27 | Programming Language :: Python :: 3.11
28 | Programming Language :: Cython
29 | Operating System :: OS Independent
30 | Topic :: Scientific/Engineering :: Mathematics
31 | keywords = root-finding
32 |
33 | [options]
34 | zip_safe = False
35 | include_package_data = True
36 | packages = find:
37 | python_requires = >=3.6
38 | setup_requires = dynamic-default-args; numpy; scipy; sympy
39 | install_requires = dynamic-default-args; numpy; scipy; sympy
40 |
41 | [options.extras_require]
42 | dev = cython
43 | examples = tqdm
44 |
45 | [options.packages.find]
46 | exclude =
47 | examples*
48 | tools*
49 | docs*
50 | resources*
51 | tests*
52 |
53 | [options.package_data]
54 | * = *.pyx, *.pxd, *.h, *.hpp, *.c, *.cpp, *.
55 |
--------------------------------------------------------------------------------
/setup.py:
--------------------------------------------------------------------------------
1 | import os
2 |
3 | from setuptools import Extension, setup
4 |
5 | try:
6 | from Cython.Distutils import build_ext
7 | from Cython.Build import cythonize
8 | except ImportError:
9 | use_cython = False
10 | else:
11 | use_cython = True
12 |
13 | PACKAGE_ROOT = 'cyroot'
14 |
15 |
16 | def get_version(version_file='_version.py'):
17 | import importlib.util
18 | version_file_path = os.path.abspath(os.path.join(PACKAGE_ROOT, version_file))
19 | spec = importlib.util.spec_from_file_location('_version', version_file_path)
20 | version_module = importlib.util.module_from_spec(spec)
21 | spec.loader.exec_module(version_module)
22 | return str(version_module.__version__)
23 |
24 |
25 | def get_ext_modules():
26 | import numpy
27 | # Find all includes
28 | include_dirs = [
29 | PACKAGE_ROOT,
30 | numpy.get_include(),
31 | ]
32 |
33 | ext_modules = []
34 | if use_cython:
35 | is_ext_file = lambda f: f.endswith('.pyx')
36 | else:
37 | is_ext_file = lambda f: os.path.splitext(f)[1] in ['.c', '.cpp']
38 |
39 | for root, dirs, files in os.walk(PACKAGE_ROOT):
40 | for d in dirs:
41 | dir_path = os.path.join(root, d)
42 | if any(is_ext_file(f) for f in os.listdir(dir_path)):
43 | include_dirs.append(dir_path)
44 |
45 | for root, dirs, files in os.walk(PACKAGE_ROOT):
46 | for f in filter(lambda f: is_ext_file(f), files):
47 | f_path = os.path.join(root, f)
48 | ext_modules.append(
49 | Extension(name=os.path.splitext(f_path)[0].replace(os.sep, '.'),
50 | sources=[f_path],
51 | include_dirs=include_dirs)
52 | )
53 | if use_cython:
54 | # Set up the ext_modules for Cython or not, depending
55 | ext_modules = cythonize(ext_modules, language_level='3')
56 | return ext_modules
57 |
58 |
59 | def setup_package():
60 | setup(
61 | version=get_version(),
62 | ext_modules=get_ext_modules(),
63 | cmdclass={'build_ext': build_ext} if use_cython else {},
64 | )
65 |
66 |
67 | if __name__ == '__main__':
68 | setup_package()
69 |
--------------------------------------------------------------------------------