├── numethods ├── utils.py ├── exceptions.py ├── __init__.py ├── differentiation.py ├── quadrature.py ├── interpolation.py ├── orthogonal.py ├── roots.py ├── eigen.py ├── ode.py ├── solvers.py ├── linalg.py └── fitting.py ├── pyproject.toml ├── LICENSE ├── tests └── test_basic.py ├── tutorials ├── README.md ├── tutorial3_orthogonalization.ipynb ├── tutorial1_vectors_matrices.ipynb ├── tutorial4_root_finding.ipynb └── tutorial2_linear_systems.ipynb ├── README.md └── examples └── demo.py /numethods/utils.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | def relative_tolerance_reached(delta: float, value: float, tol: float) -> bool: 3 | return abs(delta) <= tol * (1.0 + abs(value)) 4 | -------------------------------------------------------------------------------- /pyproject.toml: -------------------------------------------------------------------------------- 1 | [build-system] 2 | requires = ["setuptools>=61.0"] 3 | build-backend = "setuptools.build_meta" 4 | 5 | [project] 6 | name = "numethods" 7 | version = "0.1.0" 8 | description = "Some numerical methods implemented in Python" 9 | authors = [ 10 | { name = "Deniz Donmez", email = "denzdonmez@gmail.com" } 11 | ] 12 | readme = "README.md" 13 | requires-python = ">=3.8" 14 | license = { text = "MIT" } 15 | 16 | [tool.setuptools.packages.find] 17 | where = ["."] 18 | include = ["numethods"] 19 | -------------------------------------------------------------------------------- /numethods/exceptions.py: -------------------------------------------------------------------------------- 1 | class NumericalError(Exception): 2 | """Base class for numerical method errors.""" 3 | pass 4 | 5 | class NonSquareMatrixError(NumericalError): 6 | """Raised when a non-square matrix is provided where a square matrix is required.""" 7 | pass 8 | 9 | class SingularMatrixError(NumericalError): 10 | """Raised when a matrix is singular to working precision.""" 11 | pass 12 | 13 | class NotSymmetricError(NumericalError): 14 | """Raised when a matrix expected to be symmetric is not.""" 15 | pass 16 | 17 | class NotPositiveDefiniteError(NumericalError): 18 | """Raised when a matrix expected to be SPD is not.""" 19 | pass 20 | 21 | class ConvergenceError(NumericalError): 22 | """Raised when an iterative method fails to converge within limits.""" 23 | pass 24 | 25 | class DomainError(NumericalError): 26 | """Raised when inputs violate a method's domain assumptions.""" 27 | pass 28 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2025 Deniz Dönmez 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /numethods/__init__.py: -------------------------------------------------------------------------------- 1 | from .linalg import Matrix, Vector 2 | from .orthogonal import ( 3 | QRGramSchmidt, 4 | QRModifiedGramSchmidt, 5 | QRHouseholder, 6 | QRSolver, 7 | LeastSquaresSolver, 8 | ) 9 | from .solvers import LUDecomposition, GaussJordan, Jacobi, GaussSeidel, Cholesky 10 | from .roots import Bisection, FixedPoint, Secant, NewtonRoot, print_trace 11 | from .interpolation import NewtonInterpolation, LagrangeInterpolation 12 | from .quadrature import Trapezoidal, Simpson, GaussLegendre 13 | from .eigen import ( 14 | PowerIteration, 15 | InversePowerIteration, 16 | RayleighQuotientIteration, 17 | QREigenvalues, 18 | SVD, 19 | ) 20 | from .ode import ( 21 | Euler, 22 | Heun, 23 | RK2, 24 | RK4, 25 | BackwardEuler, 26 | ODETrapezoidal, 27 | AdamsBashforth, 28 | AdamsMoulton, 29 | PredictorCorrector, 30 | RK45, 31 | ) 32 | from .differentiation import ( 33 | ForwardDiff, 34 | BackwardDiff, 35 | CentralDiff, 36 | CentralDiff4th, 37 | SecondDerivative, 38 | RichardsonExtrap, 39 | ) 40 | from .fitting import PolyFit, LinearFit, ExpFit, NonlinearFit, plot_fit, plot_residuals 41 | 42 | from .exceptions import * 43 | -------------------------------------------------------------------------------- /tests/test_basic.py: -------------------------------------------------------------------------------- 1 | import math 2 | from numethods import * 3 | 4 | def approx_equal(a, b, tol=1e-8): 5 | return abs(a-b) <= tol*(1+abs(b)) 6 | 7 | def test_lu_gauss_seidel(): 8 | A = Matrix([[10, -1, 2, 0], 9 | [-1, 11, -1, 3], 10 | [2, -1, 10, -1], 11 | [0, 3, -1, 8]]) 12 | b = Vector([6, 25, -11, 15]) 13 | x_lu = LUDecomposition(A).solve(b) 14 | x_gs = GaussSeidel(A, b, tol=1e-12).solve() 15 | for i in range(4): 16 | assert approx_equal(x_lu[i], x_gs[i]) 17 | 18 | def test_cholesky(): 19 | A = Matrix([[4, 1, 1], 20 | [1, 3, 0], 21 | [1, 0, 2]]) 22 | b = Vector([1, 2, 3]) 23 | x = Cholesky(A).solve(b) 24 | Ax = [sum(A.data[i][j]*x[j] for j in range(3)) for i in range(3)] 25 | for i in range(3): 26 | assert approx_equal(Ax[i], b[i]) 27 | 28 | def test_roots(): 29 | f = lambda x: x**3 - 2 30 | df = lambda x: 3*x**2 31 | r = NewtonRoot(f, df, 1.0).solve() 32 | assert approx_equal(r, 2**(1/3)) 33 | b = Bisection(f, 0, 2).solve() 34 | assert approx_equal(b, 2**(1/3)) 35 | 36 | def test_interp(): 37 | x = [0,1,2] 38 | y = [1,3,2] 39 | n = NewtonInterpolation(x,y) 40 | l = LagrangeInterpolation(x,y) 41 | for t in [0,0.5,1.7,2.0]: 42 | assert approx_equal(n.evaluate(t), l.evaluate(t), tol=1e-9) 43 | -------------------------------------------------------------------------------- /tutorials/README.md: -------------------------------------------------------------------------------- 1 | # Tutorial Series 2 | 3 | This package comes with a set of Jupyter notebooks designed as a structured tutorial series in **numerical methods**, both mathematically rigorous and hands-on with code. 4 | 5 | ## Core Tutorials 6 | 7 | 1. [Tutorial 1: Vectors and Matrices](./tutorial1_vectors_matrices.ipynb) 8 | 9 | - Definitions of vectors and matrices. 10 | - Vector operations: addition, scalar multiplication, dot product, norms. 11 | - Matrix operations: addition, multiplication, transpose, inverse. 12 | - Matrix and vector norms. 13 | - Examples with `numethods.linalg`. 14 | 15 | 2. [Tutorial 2: Linear Systems of Equations](./tutorial2_linear_systems.ipynb) 16 | 17 | - Gaussian elimination and Gauss–Jordan. 18 | - LU decomposition. 19 | - Cholesky decomposition. 20 | - Iterative methods: Jacobi and Gauss-Seidel. 21 | - Examples with `numethods.solvers`. 22 | 23 | 3. [Tutorial 3: Orthogonalization and QR Factorization](./tutorial3_orthogonalization.ipynb) 24 | 25 | - Inner products and orthogonality. 26 | - Gram–Schmidt process (classical and modified). 27 | - Householder reflections. 28 | - QR decomposition and applications. 29 | - Examples with `numethods.orthogonal`. 30 | 31 | 4. [Tutorial 4: Root-Finding Methods](./tutorial4_root_finding.ipynb) 32 | 33 | - Bisection method. 34 | - Fixed-point iteration. 35 | - Newton’s method. 36 | - Secant method. 37 | - Convergence analysis and error behavior. 38 | - Trace outputs for iteration history. 39 | - Examples with `numethods.roots`. 40 | 41 | --- 42 | -------------------------------------------------------------------------------- /numethods/differentiation.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | from typing import Callable 3 | 4 | 5 | # ---------------------------- 6 | # First derivative approximations 7 | # ---------------------------- 8 | 9 | 10 | def ForwardDiff(f: Callable[[float], float], x: float, h: float = 1e-5) -> float: 11 | """Forward finite difference approximation of f'(x).""" 12 | return (f(x + h) - f(x)) / h 13 | 14 | 15 | def BackwardDiff(f: Callable[[float], float], x: float, h: float = 1e-5) -> float: 16 | """Backward finite difference approximation of f'(x).""" 17 | return (f(x) - f(x - h)) / h 18 | 19 | 20 | def CentralDiff(f: Callable[[float], float], x: float, h: float = 1e-5) -> float: 21 | """Central finite difference approximation of f'(x) (2nd-order accurate).""" 22 | return (f(x + h) - f(x - h)) / (2 * h) 23 | 24 | 25 | def CentralDiff4th(f: Callable[[float], float], x: float, h: float = 1e-5) -> float: 26 | """Fourth-order accurate central difference approximation of f'(x).""" 27 | return (-f(x + 2 * h) + 8 * f(x + h) - 8 * f(x - h) + f(x - 2 * h)) / (12 * h) 28 | 29 | 30 | # ---------------------------- 31 | # Second derivative 32 | # ---------------------------- 33 | 34 | 35 | def SecondDerivative(f: Callable[[float], float], x: float, h: float = 1e-5) -> float: 36 | """Central difference approximation of second derivative f''(x).""" 37 | return (f(x + h) - 2 * f(x) + f(x - h)) / (h**2) 38 | 39 | 40 | # ---------------------------- 41 | # Richardson Extrapolation 42 | # ---------------------------- 43 | 44 | 45 | def RichardsonExtrap(f: Callable[[float], float], x: float, h: float = 1e-2) -> float: 46 | """Richardson extrapolation to improve derivative accuracy. 47 | Combines estimates with step h and h/2. 48 | """ 49 | D_h = CentralDiff(f, x, h) 50 | D_h2 = CentralDiff(f, x, h / 2) 51 | return (4 * D_h2 - D_h) / 3 52 | -------------------------------------------------------------------------------- /numethods/quadrature.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | from typing import Callable 3 | import math 4 | 5 | 6 | class Quadrature: 7 | """Base class for numerical integration on [a, b].""" 8 | 9 | def __init__(self, f: Callable[[float], float], a: float, b: float, n: int = 100): 10 | self.f = f 11 | self.a = a 12 | self.b = b 13 | self.n = n 14 | 15 | def integrate(self) -> float: 16 | raise NotImplementedError 17 | 18 | 19 | class Trapezoidal(Quadrature): 20 | """Composite trapezoidal rule.""" 21 | 22 | def integrate(self) -> float: 23 | h = (self.b - self.a) / self.n 24 | s = 0.5 * (self.f(self.a) + self.f(self.b)) 25 | for i in range(1, self.n): 26 | s += self.f(self.a + i * h) 27 | return h * s 28 | 29 | 30 | class Simpson(Quadrature): 31 | """Composite Simpson’s rule (n must be even).""" 32 | 33 | def integrate(self) -> float: 34 | if self.n % 2 != 0: 35 | raise ValueError("n must be even for Simpson's rule") 36 | h = (self.b - self.a) / self.n 37 | s = self.f(self.a) + self.f(self.b) 38 | for i in range(1, self.n): 39 | coef = 4 if i % 2 == 1 else 2 40 | s += coef * self.f(self.a + i * h) 41 | return h * s / 3.0 42 | 43 | 44 | class GaussLegendre(Quadrature): 45 | """Gauss–Legendre quadrature (supports 2- and 3-point).""" 46 | 47 | def __init__(self, f, a, b, n=2): 48 | super().__init__(f, a, b, n) 49 | 50 | def integrate(self) -> float: 51 | if self.n == 2: 52 | nodes = [-1 / math.sqrt(3), 1 / math.sqrt(3)] 53 | weights = [1, 1] 54 | elif self.n == 3: 55 | nodes = [-math.sqrt(3 / 5), 0.0, math.sqrt(3 / 5)] 56 | weights = [5 / 9, 8 / 9, 5 / 9] 57 | else: 58 | raise NotImplementedError("Only 2- and 3-point Gauss-Legendre supported") 59 | 60 | mid = 0.5 * (self.a + self.b) 61 | half = 0.5 * (self.b - self.a) 62 | return half * sum(w * self.f(mid + half * x) for x, w in zip(nodes, weights)) 63 | -------------------------------------------------------------------------------- /numethods/interpolation.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | from typing import List 3 | 4 | 5 | class NewtonInterpolation: 6 | """Polynomial interpolation via divided differences.""" 7 | 8 | def __init__(self, x: List[float], y: List[float]): 9 | if len(x) != len(y): 10 | raise ValueError("x and y must have same length") 11 | if len(set(x)) != len(x): 12 | raise ValueError("x values must be distinct") 13 | self.x = [float(v) for v in x] 14 | self.coeffs = self._divided_differences(self.x, [float(v) for v in y]) 15 | 16 | def _divided_differences(self, x: List[float], y: List[float]) -> List[float]: 17 | n = len(x) 18 | coef = y[:] 19 | for j in range(1, n): 20 | for i in range(n - 1, j - 1, -1): 21 | denom = x[i] - x[i - j] 22 | if abs(denom) < 1e-20: 23 | raise ZeroDivisionError("Repeated x values in divided differences") 24 | coef[i] = (coef[i] - coef[i - 1]) / denom 25 | return coef 26 | 27 | def evaluate(self, t: float) -> float: 28 | n = len(self.x) 29 | result = 0.0 30 | for i in reversed(range(n)): 31 | result = result * (t - self.x[i]) + self.coeffs[i] 32 | return result 33 | 34 | 35 | class LagrangeInterpolation: 36 | """Lagrange-form polynomial interpolation.""" 37 | 38 | def __init__(self, x: List[float], y: List[float]): 39 | if len(x) != len(y): 40 | raise ValueError("x and y must have same length") 41 | if len(set(x)) != len(x): 42 | raise ValueError("x values must be distinct") 43 | self.x = [float(v) for v in x] 44 | self.y = [float(v) for v in y] 45 | 46 | def evaluate(self, t: float) -> float: 47 | x, y = self.x, self.y 48 | n = len(x) 49 | total = 0.0 50 | for i in range(n): 51 | L = 1.0 52 | for j in range(n): 53 | if i == j: 54 | continue 55 | denom = x[i] - x[j] 56 | if abs(denom) < 1e-20: 57 | raise ZeroDivisionError("Repeated x values in Lagrange basis") 58 | L *= (t - x[j]) / denom 59 | total += y[i] * L 60 | return total 61 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # numethods 2 | 3 | A lightweight, from-scratch, object-oriented Python package implementing classic numerical methods. 4 | **No NumPy / SciPy solvers used**, algorithms are implemented transparently for learning and research. 5 | 6 | ## Why this might be useful 7 | 8 | - Great for teaching/learning numerical methods step by step. 9 | - Good reference for people writing their own solvers in C/Fortran/Julia. 10 | - Lightweight, no dependencies. 11 | - Consistent object-oriented API (.solve() etc). 12 | 13 | ## Tutorial Series 14 | 15 | This package comes with a set of Jupyter notebooks designed as a structured tutorial series in **numerical methods**, both mathematically rigorous and hands-on with code. See [Tutorials](./tutorials/README.md). 16 | 17 | ## Features 18 | 19 | ### Linear system solvers 20 | 21 | - **LU decomposition** (with partial pivoting): `LUDecomposition` 22 | - **Gauss-Jordan** elimination: `GaussJordan` 23 | - **Jacobi** iterative method: `Jacobi` 24 | - **Gauss-Seidel** iterative method: `GaussSeidel` 25 | - **Cholesky** factorization (SPD): `Cholesky` 26 | 27 | ### Root-finding 28 | 29 | - **Bisection**: `Bisection` 30 | - **Fixed-Point Iteration**: `FixedPoint` 31 | - **Secant**: `Secant` 32 | - **Newton's method** (for roots): `NewtonRoot` 33 | 34 | ### Interpolation 35 | 36 | - **Newton** (divided differences): `NewtonInterpolation` 37 | - **Lagrange** polynomials: `LagrangeInterpolation` 38 | 39 | ### Orthogonalization, QR, and Least Squares 40 | 41 | - **Classical Gram–Schmidt**: `QRGramSchmidt` 42 | - **Modified Gram–Schmidt**: `QRModifiedGramSchmidt` 43 | - **Householder QR** (numerically stable): `QRHouseholder` 44 | - **QR-based linear solver** (square systems): `QRSolver` 45 | - **Least Squares** for overdetermined systems (via QR): `LeastSquaresSolver` 46 | 47 | ### Eigenvalue methods 48 | 49 | - **Power Iteration** (dominant eigenvalue/vector): `PowerIteration` 50 | - **Inverse Power Iteration** (optionally shifted): `InversePowerIteration` 51 | - **Rayleigh Quotient Iteration**: `RayleighQuotientIteration` 52 | - **QR eigenvalue iteration** (unshifted, educational): `QREigenvalues` 53 | 54 | ### Singular Value Decomposition 55 | 56 | - **SVD** via eigen-decomposition of \(A^T A\): `SVD` 57 | 58 | ### ODE solvers 59 | 60 | **Initial value problem solvers** for \( y'(t) = f(t,y), \; y(t_0)=y_0 \) 61 | 62 | - **Euler's method** (explicit, first order): `Euler` 63 | - **Heun's method** / Improved Euler (2nd order): `Heun` 64 | - **Runge-Kutta 2** (midpoint, 2nd order): `RK2` 65 | - **Runge-Kutta 4** (classic, 4th order): `RK4` 66 | - **Backward Euler** (implicit, requires Newton iteration): `BackwardEuler` 67 | - **Trapezoidal rule** (implicit, 2nd order): `ODETrapezoidal` 68 | - **Adams-Bashforth** (multistep explicit): `AdamsBashforth` 69 | - **Adams-Moulton** (multistep implicit): `AdamsMoulton` 70 | - **Predictor-Corrector** (AB predictor + AM corrector): `PredictorCorrector` 71 | - **Adaptive Runge–Kutta (RK45)** (Fehlberg/Dormand–Prince, step control): `RK45` 72 | 73 | ### Quadrature (Numerical Integration) 74 | 75 | - **Trapezoidal rule** (composite): `Trapezoidal` 76 | - **Simpson's rule** (composite, even n): `Simpson` 77 | - **Gauss-Legendre quadrature** (2 and 3 point): `GaussLegendre` 78 | 79 | ### Numerical Differentiation 80 | 81 | - **Forward difference**: `ForwardDiff` 82 | - **Backward difference**: `BackwardDiff` 83 | - **Central difference (2nd order)**: `CentralDiff` 84 | - **Central difference (4th order)**: `CentralDiff4th` 85 | - **Second derivative**: `SecondDerivative` 86 | - **Richardson extrapolation**: `RichardsonExtrap` 87 | 88 | ### Curve Fitting 89 | 90 | - **Polynomial least squares fit**: `PolyFit` 91 | - **Linear regression with custom basis functions**: `LinearFit` 92 | - **Exponential fit** (via log transform): `ExpFit` 93 | - **Nonlinear least squares (Gauss-Newton / Levenberg-Marquardt)**: `NonlinearFit` 94 | 95 | ### Matrix & Vector utilities 96 | 97 | - Minimal `Matrix` / `Vector` classes 98 | - `@` operator for **matrix multiplication** 99 | - `*` for **scalar**–matrix multiplication 100 | - `.T` for transpose 101 | - Forward / backward substitution helpers 102 | - Norms, dot products, row/column access 103 | 104 | --- 105 | 106 | ## Install (editable) 107 | 108 | ```bash 109 | pip install -e /numethods 110 | ``` 111 | 112 | or just add `/numethods` to `PYTHONPATH`. 113 | 114 | ## Examples 115 | 116 | ```bash 117 | python /numethods/examples/demo.py 118 | ``` 119 | 120 | ## Notes 121 | 122 | - All algorithms are implemented without relying on external linear algebra solvers. 123 | - Uses plain Python floats and list-of-lists for matrices/vectors. 124 | - Tolerances use a relative criterion `|Δ| ≤ tol (1 + |value|)`. 125 | - ODE implicit solvers use Newton’s method with finite-difference Jacobian approximation. 126 | - Curve fitting supports polynomial, linear basis, exponential, and general nonlinear regression. 127 | - Visualization requires `matplotlib`. 128 | -------------------------------------------------------------------------------- /numethods/orthogonal.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | from typing import List 3 | from .linalg import Matrix, Vector, backward_substitution 4 | from .exceptions import SingularMatrixError 5 | 6 | 7 | class QRGramSchmidt: 8 | """Classical Gram-Schmidt orthogonalization.""" 9 | 10 | def __init__(self, A: Matrix): 11 | self.m, self.n = A.shape() 12 | self.Q = Matrix.zeros(self.m, self.n) 13 | self.R = Matrix.zeros(self.n, self.n) 14 | self._decompose(A) 15 | 16 | def _decompose(self, A: Matrix) -> None: 17 | m, n = self.m, self.n 18 | Qcols: List[Vector] = [] 19 | for j in range(n): 20 | v = A.col(j) 21 | for k in range(j): 22 | qk = Qcols[k] 23 | r = qk.dot(v) 24 | self.R.data[k][j] = r 25 | v = Vector([vi - r * qi for vi, qi in zip(v.data, qk.data)]) 26 | norm = sum(vi * vi for vi in v.data) ** 0.5 27 | if abs(norm) < 1e-15: 28 | raise SingularMatrixError("Linearly dependent columns in Gram-Schmidt") 29 | self.R.data[j][j] = norm 30 | qj = Vector([vi / norm for vi in v.data]) 31 | Qcols.append(qj) 32 | for i in range(m): 33 | self.Q.data[i][j] = qj[i] 34 | 35 | 36 | class QRModifiedGramSchmidt: 37 | """Modified Gram-Schmidt orthogonalization.""" 38 | 39 | def __init__(self, A: Matrix): 40 | self.m, self.n = A.shape() 41 | self.Q = Matrix.zeros(self.m, self.n) 42 | self.R = Matrix.zeros(self.n, self.n) 43 | self._decompose(A) 44 | 45 | def _decompose(self, A: Matrix) -> None: 46 | m, n = self.m, self.n 47 | V = [A.col(j).data for j in range(n)] 48 | for i in range(n): 49 | vi = Vector(V[i]) 50 | norm = sum(v * v for v in vi.data) ** 0.5 51 | if abs(norm) < 1e-15: 52 | raise SingularMatrixError("Linearly dependent columns in MGS") 53 | self.R.data[i][i] = norm 54 | qi = Vector([v / norm for v in vi.data]) 55 | for r in range(m): 56 | self.Q.data[r][i] = qi[r] 57 | for j in range(i + 1, n): 58 | r = qi.dot(Vector(V[j])) 59 | self.R.data[i][j] = r 60 | V[j] = [vj - r * qi_k for vj, qi_k in zip(V[j], qi.data)] 61 | 62 | 63 | class QRHouseholder: 64 | """Stable QR decomposition using Householder reflectors.""" 65 | 66 | def __init__(self, A: Matrix): 67 | self.m, self.n = A.shape() 68 | self.R = A.copy() 69 | self.Q = Matrix.identity(self.m) 70 | self._decompose() 71 | 72 | def _decompose(self) -> None: 73 | m, n = self.m, self.n 74 | for k in range(min(m, n)): 75 | x = [self.R.data[i][k] for i in range(k, m)] 76 | normx = sum(xi * xi for xi in x) ** 0.5 77 | if normx < 1e-15: 78 | continue 79 | sign = 1.0 if x[0] >= 0 else -1.0 80 | u1 = x[0] + sign * normx 81 | v = [xi / u1 if i > 0 else 1.0 for i, xi in enumerate(x)] 82 | normv = Vector(v).norm2() 83 | v = [vi / normv for vi in v] 84 | for j in range(k, n): 85 | s = sum(v[i] * self.R.data[k + i][j] for i in range(len(v))) 86 | for i in range(len(v)): 87 | self.R.data[k + i][j] -= 2 * s * v[i] 88 | for j in range(m): 89 | s = sum(v[i] * self.Q.data[j][k + i] for i in range(len(v))) 90 | for i in range(len(v)): 91 | self.Q.data[j][k + i] -= 2 * s * v[i] 92 | # self.Q = self.Q.transpose() 93 | 94 | 95 | class QRSolver: 96 | """Solve Ax=b given QR (square A).""" 97 | 98 | def __init__(self, qr: QRHouseholder | QRGramSchmidt | QRModifiedGramSchmidt): 99 | self.Q, self.R = qr.Q, qr.R 100 | 101 | def solve(self, b: Vector) -> Vector: 102 | Qtb = Vector( 103 | [ 104 | sum(self.Q.data[i][j] * b[i] for i in range(self.Q.m)) 105 | for j in range(self.Q.n) 106 | ] 107 | ) 108 | return backward_substitution(self.R, Qtb) 109 | 110 | 111 | class LeastSquaresSolver: 112 | """Solve overdetermined system Ax ≈ b in least squares sense using QR.""" 113 | 114 | def __init__(self, A: Matrix, b: Vector): 115 | self.A, self.b = A, b 116 | 117 | def solve(self) -> Vector: 118 | qr = QRHouseholder(self.A) 119 | Q, R = qr.Q, qr.R 120 | # Compute Q^T b (dimension m) 121 | Qtb_full = [ 122 | sum(Q.data[i][j] * self.b[i] for i in range(Q.m)) for j in range(Q.n) 123 | ] 124 | # Take only first n entries 125 | Qtb = Vector(Qtb_full[: self.A.n]) 126 | # Extract leading nxn block of R 127 | Rtop = Matrix([R.data[i][: self.A.n] for i in range(self.A.n)]) 128 | return backward_substitution(Rtop, Qtb) 129 | -------------------------------------------------------------------------------- /numethods/roots.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | from typing import Callable, List, Dict, Any 3 | from .exceptions import ConvergenceError, DomainError 4 | 5 | 6 | class Bisection: 7 | def __init__( 8 | self, 9 | f: Callable[[float], float], 10 | a: float, 11 | b: float, 12 | tol: float = 1e-10, 13 | max_iter: int = 10_000, 14 | ): 15 | if a >= b: 16 | raise ValueError("Require a < b") 17 | fa, fb = f(a), f(b) 18 | if fa * fb > 0: 19 | raise DomainError("f(a) and f(b) must have opposite signs") 20 | self.f, self.a, self.b = f, a, b 21 | self.tol, self.max_iter = tol, max_iter 22 | 23 | def solve(self) -> float: 24 | a, b, f = self.a, self.b, self.f 25 | fa, fb = f(a), f(b) 26 | for _ in range(self.max_iter): 27 | c = 0.5 * (a + b) 28 | fc = f(c) 29 | if abs(fc) <= self.tol or 0.5 * (b - a) <= self.tol: 30 | return c 31 | if fa * fc < 0: 32 | b, fb = c, fc 33 | else: 34 | a, fa = c, fc 35 | raise ConvergenceError("Bisection did not converge") 36 | 37 | def trace(self) -> List[Dict[str, Any]]: 38 | steps = [] 39 | a, b, f = self.a, self.b, self.f 40 | fa, fb = f(a), f(b) 41 | for k in range(self.max_iter): 42 | c = 0.5 * (a + b) 43 | fc = f(c) 44 | steps.append( 45 | { 46 | "iter": k, 47 | "a": a, 48 | "b": b, 49 | "c": c, 50 | "f(a)": fa, 51 | "f(b)": fb, 52 | "f(c)": fc, 53 | "interval": b - a, 54 | } 55 | ) 56 | if abs(fc) <= self.tol or 0.5 * (b - a) <= self.tol: 57 | return steps 58 | if fa * fc < 0: 59 | b, fb = c, fc 60 | else: 61 | a, fa = c, fc 62 | raise ConvergenceError("Bisection did not converge") 63 | 64 | 65 | class FixedPoint: 66 | def __init__( 67 | self, 68 | g: Callable[[float], float], 69 | x0: float, 70 | tol: float = 1e-10, 71 | max_iter: int = 10_000, 72 | ): 73 | self.g, self.x0, self.tol, self.max_iter = g, x0, tol, max_iter 74 | 75 | def solve(self) -> float: 76 | x = self.x0 77 | for _ in range(self.max_iter): 78 | x_new = self.g(x) 79 | if abs(x_new - x) <= self.tol * (1.0 + abs(x_new)): 80 | return x_new 81 | x = x_new 82 | raise ConvergenceError("Fixed-point iteration did not converge") 83 | 84 | def trace(self) -> List[Dict[str, Any]]: 85 | steps = [] 86 | x = self.x0 87 | for k in range(self.max_iter): 88 | x_new = self.g(x) 89 | steps.append({"iter": k, "x": x, "x_new": x_new, "error": abs(x_new - x)}) 90 | if abs(x_new - x) <= self.tol * (1.0 + abs(x_new)): 91 | return steps 92 | x = x_new 93 | raise ConvergenceError("Fixed-point iteration did not converge") 94 | 95 | 96 | class Secant: 97 | def __init__( 98 | self, 99 | f: Callable[[float], float], 100 | x0: float, 101 | x1: float, 102 | tol: float = 1e-10, 103 | max_iter: int = 10_000, 104 | ): 105 | self.f, self.x0, self.x1, self.tol, self.max_iter = f, x0, x1, tol, max_iter 106 | 107 | def solve(self) -> float: 108 | x0, x1, f = self.x0, self.x1, self.f 109 | f0, f1 = f(x0), f(x1) 110 | for _ in range(self.max_iter): 111 | denom = f1 - f0 112 | if abs(denom) < 1e-20: 113 | raise ConvergenceError("Secant encountered nearly zero denominator") 114 | x2 = x1 - f1 * (x1 - x0) / denom 115 | if abs(x2 - x1) <= self.tol * (1.0 + abs(x2)): 116 | return x2 117 | x0, x1 = x1, x2 118 | f0, f1 = f1, f(x1) 119 | raise ConvergenceError("Secant did not converge") 120 | 121 | def trace(self) -> List[Dict[str, Any]]: 122 | steps = [] 123 | x0, x1, f = self.x0, self.x1, self.f 124 | f0, f1 = f(x0), f(x1) 125 | for k in range(self.max_iter): 126 | denom = f1 - f0 127 | if abs(denom) < 1e-20: 128 | raise ConvergenceError("Secant encountered nearly zero denominator") 129 | x2 = x1 - f1 * (x1 - x0) / denom 130 | steps.append( 131 | { 132 | "iter": k, 133 | "x0": x0, 134 | "x1": x1, 135 | "x2": x2, 136 | "f(x0)": f0, 137 | "f(x1)": f1, 138 | "error": abs(x2 - x1), 139 | } 140 | ) 141 | if abs(x2 - x1) <= self.tol * (1.0 + abs(x2)): 142 | return steps 143 | x0, x1 = x1, x2 144 | f0, f1 = f1, f(x1) 145 | raise ConvergenceError("Secant did not converge") 146 | 147 | 148 | class NewtonRoot: 149 | def __init__( 150 | self, 151 | f: Callable[[float], float], 152 | df: Callable[[float], float], 153 | x0: float, 154 | tol: float = 1e-10, 155 | max_iter: int = 10_000, 156 | ): 157 | self.f, self.df, self.x0, self.tol, self.max_iter = f, df, x0, tol, max_iter 158 | 159 | def solve(self) -> float: 160 | x = self.x0 161 | for _ in range(self.max_iter): 162 | dfx = self.df(x) 163 | if abs(dfx) < 1e-20: 164 | raise ConvergenceError("Derivative near zero in Newton method") 165 | x_new = x - self.f(x) / dfx 166 | if abs(x_new - x) <= self.tol * (1.0 + abs(x_new)): 167 | return x_new 168 | x = x_new 169 | raise ConvergenceError("Newton method did not converge") 170 | 171 | def trace(self) -> List[Dict[str, Any]]: 172 | steps = [] 173 | x = self.x0 174 | for k in range(self.max_iter): 175 | dfx = self.df(x) 176 | if abs(dfx) < 1e-20: 177 | raise ConvergenceError("Derivative near zero in Newton method") 178 | x_new = x - self.f(x) / dfx 179 | steps.append( 180 | { 181 | "iter": k, 182 | "x": x, 183 | "f(x)": self.f(x), 184 | "df(x)": dfx, 185 | "x_new": x_new, 186 | "error": abs(x_new - x), 187 | } 188 | ) 189 | if abs(x_new - x) <= self.tol * (1.0 + abs(x_new)): 190 | return steps 191 | x = x_new 192 | raise ConvergenceError("Newton method did not converge") 193 | 194 | 195 | def print_trace(steps: List[Dict[str, Any]]): 196 | if not steps: 197 | print("No steps recorded.") 198 | return 199 | # Get headers from dict keys 200 | headers = list(steps[0].keys()) 201 | # Print header 202 | print(" | ".join(f"{h:>10}" for h in headers)) 203 | print("-" * (13 * len(headers))) 204 | # Print rows 205 | for row in steps: 206 | print( 207 | " | ".join( 208 | f"{row[h]:>10.6g}" if isinstance(row[h], (int, float)) else str(row[h]) 209 | for h in headers 210 | ) 211 | ) 212 | -------------------------------------------------------------------------------- /examples/demo.py: -------------------------------------------------------------------------------- 1 | from numethods import Matrix, Vector 2 | from numethods.orthogonal import ( 3 | QRHouseholder, 4 | QRModifiedGramSchmidt, 5 | LeastSquaresSolver, 6 | ) 7 | from numethods.solvers import ( 8 | LUDecomposition, 9 | GaussJordan, 10 | Jacobi, 11 | GaussSeidel, 12 | Cholesky, 13 | ) 14 | from numethods.roots import Bisection, FixedPoint, Secant, NewtonRoot, print_trace 15 | from numethods.interpolation import NewtonInterpolation, LagrangeInterpolation 16 | from numethods.quadrature import Trapezoidal, Simpson, GaussLegendre 17 | from numethods.eigen import ( 18 | PowerIteration, 19 | InversePowerIteration, 20 | RayleighQuotientIteration, 21 | QREigenvalues, 22 | SVD, 23 | ) 24 | from numethods.ode import ( 25 | Euler, 26 | Heun, 27 | RK2, 28 | RK4, 29 | BackwardEuler, 30 | ODETrapezoidal, 31 | AdamsBashforth, 32 | AdamsMoulton, 33 | PredictorCorrector, 34 | RK45, 35 | ) 36 | from numethods.differentiation import ( 37 | ForwardDiff, 38 | BackwardDiff, 39 | CentralDiff, 40 | CentralDiff4th, 41 | SecondDerivative, 42 | RichardsonExtrap, 43 | ) 44 | from numethods.fitting import ( 45 | LinearFit, 46 | NonlinearFit, 47 | PolyFit, 48 | ExpFit, 49 | plot_fit, 50 | plot_residuals, 51 | ) 52 | import math 53 | 54 | 55 | def run_solver(Solver, name, **kwargs): 56 | # Test problem: y' = -2y + t, y(0) = 1 57 | f = lambda t, y: -2 * y + t 58 | t0, y0, h, t_end = 0.0, 1.0, 0.1, 2.0 59 | solver = Solver(f, t0, y0, h, **kwargs) 60 | ts, ys = solver.solve(t_end) 61 | print(f"{name:20s} final y({t_end}) ≈ {ys[-1]:.6f}") 62 | return ts, ys 63 | 64 | 65 | def demo_ode(): 66 | print("Solving y' = -2y + t, y(0)=1, over [0, 2]") 67 | print("=" * 60) 68 | 69 | run_solver(Euler, "Euler") 70 | run_solver(Heun, "Heun (Improved Euler)") 71 | run_solver(RK2, "RK2 (Midpoint)") 72 | run_solver(RK4, "RK4 (Classic)") 73 | run_solver(BackwardEuler, "Backward Euler") 74 | run_solver(ODETrapezoidal, "Trapezoidal Rule") 75 | run_solver(AdamsBashforth, "Adams-Bashforth (2-step)", order=2) 76 | run_solver(AdamsMoulton, "Adams-Moulton (2-step)") 77 | run_solver(PredictorCorrector, "Predictor-Corrector") 78 | run_solver(RK45, "RK45 (adaptive)", tol=1e-6) 79 | 80 | print("=" * 60) 81 | print("All solvers finished.") 82 | 83 | 84 | def demo_differentiation(): 85 | f = lambda x: x**3 # f'(x) = 3x^2, f''(x) = 6x 86 | x0 = 2.0 87 | 88 | print("Forward :", ForwardDiff(f, x0)) 89 | print("Backward :", BackwardDiff(f, x0)) 90 | print("Central :", CentralDiff(f, x0)) 91 | print("4th order:", CentralDiff4th(f, x0)) 92 | print("Richardson:", RichardsonExtrap(f, x0)) 93 | print("Second derivative:", SecondDerivative(f, x0)) 94 | 95 | 96 | def demo_qr(): 97 | A = Matrix([[2, -1], [1, 2], [1, 1]]) 98 | 99 | b = Vector([1, 2, 3]) 100 | 101 | # Factorization 102 | qr = QRHouseholder(A) 103 | Q, R = qr.Q, qr.R 104 | print("Q =", Q) 105 | print("R =", R) 106 | 107 | qrm = QRModifiedGramSchmidt(A) 108 | Qm, Rm = qrm.Q, qrm.R 109 | print("Qm =", Qm) 110 | print("Rm =", Rm) 111 | print("Q^T Q =", Q.T @ Q) 112 | print("Qm^T Qm =", Qm.T @ Qm) 113 | print("A=Qm Rm =", Qm @ Rm) 114 | print("A=Q R =", Q @ R) 115 | 116 | # Solve Ax = b (least squares, since A is tall) 117 | x_ls = LeastSquaresSolver(A, b).solve() 118 | print("Least squares solution:", x_ls) 119 | 120 | 121 | def demo_eigen(): 122 | A = Matrix([[4, 1, 1], [1, 3, 0], [1, 0, 2]]) 123 | print("\n=== Power Iteration ===") 124 | solver_pi = PowerIteration(A, tol=1e-12, max_iter=100) 125 | lam, x = solver_pi.solve() 126 | solver_pi.trace() 127 | print(f"Dominant eigenvalue ≈ {lam:.6f}, eigenvector ≈ {x}\n") 128 | 129 | print("\n=== Inverse Power Iteration (shift=0) ===") 130 | solver_ip = InversePowerIteration(A, shift=0.0, tol=1e-12, max_iter=100) 131 | mu, x = solver_ip.solve() 132 | solver_ip.trace() 133 | print(f"Smallest eigenvalue ≈ {mu:.6f}, eigenvector ≈ {x}\n") 134 | 135 | print("\n=== Rayleigh Quotient Iteration ===") 136 | solver_rqi = RayleighQuotientIteration(A, tol=1e-12, max_iter=20) 137 | mu, x = solver_rqi.solve() 138 | solver_rqi.trace() 139 | print(f"Eigenvalue ≈ {mu:.6f}, eigenvector ≈ {x}\n") 140 | 141 | M = Matrix([[3, 1, 1], [-1, 3, 1], [1, 1, 3], [0, 2, 1]]) 142 | U, S, V = SVD(M).solve() 143 | print("Singular values:", S) 144 | 145 | 146 | def demo_linear_solvers(): 147 | A = Matrix([[4, -1, 0], [-1, 4, -1], [0, -1, 3]]) 148 | b = Vector([15, 10, 10]) 149 | 150 | print("LU:", LUDecomposition(A).solve(b)) 151 | print("Gauss-Jordan:", GaussJordan(A).solve(b)) 152 | print("Cholesky:", Cholesky(A).solve(b)) 153 | print("Jacobi:", Jacobi(A, b, tol=1e-12).solve()) 154 | print("Gauss-Seidel:", GaussSeidel(A, b, tol=1e-12).solve()) 155 | 156 | 157 | def demo_roots(): 158 | f = lambda x: x**2 - 2 159 | df = lambda x: 2 * x 160 | 161 | # Newton 162 | steps = NewtonRoot(f, df, x0=1.0).trace() 163 | print("Newton Method Trace (x^2 - 2):") 164 | print_trace(steps) 165 | 166 | # Secant 167 | steps = Secant(f, 0, 2).trace() 168 | print("\nSecant Method Trace (x^2 - 2):") 169 | print_trace(steps) 170 | 171 | # Bisection 172 | steps = Bisection(f, 0, 2).trace() 173 | print("\nBisection Method Trace (x^2 - 2):") 174 | print_trace(steps) 175 | 176 | # Fixed-point: solve 177 | g = lambda x: 0.5 * (x + 2 / x) 178 | steps = FixedPoint(g, 1.0).trace() 179 | print("\nFixed-Point Iteration Trace (x^2 - 2):") 180 | print_trace(steps) 181 | 182 | 183 | def demo_interpolation(): 184 | x = [0, 1, 2, 3] 185 | y = [1, 2, 0, 5] 186 | newt = NewtonInterpolation(x, y) 187 | lagr = LagrangeInterpolation(x, y) 188 | t = 1.5 189 | print("Newton interpolation at", t, "=", newt.evaluate(t)) 190 | print("Lagrange interpolation at", t, "=", lagr.evaluate(t)) 191 | 192 | 193 | def demo_quadrature(): 194 | f = lambda x: x**2 195 | I1 = Trapezoidal(f, 0, 1, n=100).integrate() 196 | I2 = Simpson(f, 0, 1, n=100).integrate() 197 | I3 = GaussLegendre(f, 0, 1, n=2).integrate() 198 | 199 | print("Trapezoidal integral of x^2 over [0,1]:", I1) 200 | print("Simpson integral of x^2 over [0,1]:", I2) 201 | print("Gauss-Legendre integral of x^2 over [0,1]:", I3) 202 | 203 | 204 | def demo_fitting(): 205 | x = [0, 1, 2, 3, 4] 206 | y = [1, 2.7, 7.4, 20.1, 54.6] 207 | 208 | # Polynomial fit (degree 2) 209 | poly = PolyFit(x, y, degree=2) 210 | 211 | # Exponential fit 212 | expfit = ExpFit(x, y) 213 | 214 | # Linear fit with a chosen basis (example: [1, x]) 215 | basis = [lambda t: 1.0, lambda t: t] 216 | lin = LinearFit(x, y, basis) 217 | 218 | # Nonlinear exponential fit (Gauss–Newton / LM) 219 | def model(x, params): 220 | a, b = params 221 | return a * math.exp(b * x) 222 | 223 | nonlin = NonlinearFit( 224 | model, x, y, init_params=[1.0, 0.8], lam=1e-3, max_iter=50, verbose=True 225 | ) 226 | 227 | # Plot all fits 228 | plot_fit( 229 | x, 230 | y, 231 | [poly, lin, expfit, nonlin], 232 | labels=["Polynomial", "Linear (1,x)", "Exponential", "Nonlinear"], 233 | ) 234 | 235 | # Line plot (current style) 236 | plot_residuals( 237 | x, 238 | y, 239 | [poly, lin, expfit, nonlin], 240 | labels=["Polynomial", "Linear (1,x)", "Exponential", "Nonlinear"], 241 | mode="line", 242 | ) 243 | poly.summary() 244 | poly.trace() 245 | lin.summary() 246 | lin.trace() 247 | expfit.summary() 248 | expfit.trace() 249 | nonlin.summary() 250 | nonlin.trace() 251 | 252 | # Bar chart of absolute residuals 253 | # plot_residuals( 254 | # x, 255 | # y, 256 | # [poly, expfit, nonlin], 257 | # labels=["Polynomial", "Exponential", "Nonlinear"], 258 | # mode="bar", 259 | # ) 260 | 261 | 262 | if __name__ == "__main__": 263 | demo_qr() 264 | demo_eigen() 265 | demo_linear_solvers() 266 | demo_roots() 267 | demo_interpolation() 268 | demo_quadrature() 269 | demo_ode() 270 | demo_differentiation() 271 | demo_fitting() 272 | -------------------------------------------------------------------------------- /numethods/eigen.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | from .linalg import Matrix, Vector 3 | from .orthogonal import QRHouseholder 4 | from .solvers import LUDecomposition 5 | from .exceptions import NonSquareMatrixError, ConvergenceError 6 | import math 7 | 8 | 9 | def solve_linear(M: Matrix, b: Vector) -> Vector: 10 | """Solve Mx = b using LU decomposition.""" 11 | solver = LUDecomposition(M) 12 | return solver.solve(b) 13 | 14 | 15 | class PowerIteration: 16 | def __init__(self, A: Matrix, tol: float = 1e-10, max_iter: int = 5000): 17 | if not A.is_square(): 18 | raise NonSquareMatrixError("A must be square") 19 | self.A, self.tol, self.max_iter = A, tol, max_iter 20 | self.history = [] 21 | 22 | def solve(self, x0: Vector | None = None) -> tuple[float, Vector]: 23 | n = self.A.n 24 | x = Vector([1.0] * n) if x0 is None else x0.copy() 25 | lam_old = 0.0 26 | self.history.clear() 27 | 28 | for k in range(self.max_iter): 29 | y = self.A @ x 30 | nrm = y.norm2() 31 | if nrm == 0.0: 32 | raise ConvergenceError("Zero vector encountered") 33 | x = (1.0 / nrm) * y 34 | lam = (x.dot(self.A @ x)) / (x.dot(x)) 35 | err = abs(lam - lam_old) 36 | 37 | self.history.append({"iter": k, "lambda": lam, "error": err}) 38 | 39 | if err <= self.tol * (1.0 + abs(lam)): 40 | return lam, x 41 | lam_old = lam 42 | 43 | raise ConvergenceError("Power iteration did not converge") 44 | 45 | def trace(self): 46 | if not self.history: 47 | print("No iterations stored. Run .solve() first.") 48 | return 49 | print("Power Iteration Trace") 50 | print(f"{'iter':>6} | {'lambda':>12} | {'error':>12}") 51 | print("-" * 40) 52 | for row in self.history: 53 | print(f"{row['iter']:6d} | {row['lambda']:12.6e} | {row['error']:12.6e}") 54 | 55 | 56 | class InversePowerIteration: 57 | def __init__( 58 | self, A: Matrix, shift: float = 0.0, tol: float = 1e-10, max_iter: int = 5000 59 | ): 60 | if not A.is_square(): 61 | raise NonSquareMatrixError("A must be square") 62 | self.A, self.shift, self.tol, self.max_iter = A, shift, tol, max_iter 63 | self.history = [] 64 | 65 | def solve(self, x0: Vector | None = None) -> tuple[float, Vector]: 66 | n = self.A.n 67 | x = Vector([1.0] * n) if x0 is None else x0.copy() 68 | mu_old = None 69 | self.history.clear() 70 | 71 | for k in range(self.max_iter): 72 | M = Matrix( 73 | [ 74 | [ 75 | self.A.data[i][j] - (self.shift if i == j else 0.0) 76 | for j in range(n) 77 | ] 78 | for i in range(n) 79 | ] 80 | ) 81 | y = solve_linear(M, x) 82 | nrm = y.norm2() 83 | if nrm == 0.0: 84 | raise ConvergenceError("Zero vector") 85 | x = (1.0 / nrm) * y 86 | mu = (x.dot(self.A @ x)) / (x.dot(x)) 87 | err = abs(mu - mu_old) if mu_old is not None else float("inf") 88 | 89 | self.history.append({"iter": k, "mu": mu, "error": err}) 90 | 91 | if (mu_old is not None) and err <= self.tol * (1.0 + abs(mu)): 92 | return mu, x 93 | mu_old = mu 94 | 95 | raise ConvergenceError("Inverse/shifted power iteration did not converge") 96 | 97 | def trace(self): 98 | if not self.history: 99 | print("No iterations stored. Run .solve() first.") 100 | return 101 | print("Inverse/Shifted Power Iteration Trace") 102 | print(f"{'iter':>6} | {'mu':>12} | {'error':>12}") 103 | print("-" * 40) 104 | for row in self.history: 105 | print(f"{row['iter']:6d} | {row['mu']:12.6e} | {row['error']:12.6e}") 106 | 107 | 108 | class RayleighQuotientIteration: 109 | def __init__(self, A: Matrix, tol: float = 1e-12, max_iter: int = 1000): 110 | if not A.is_square(): 111 | raise NonSquareMatrixError("A must be square") 112 | self.A, self.tol, self.max_iter = A, tol, max_iter 113 | self.history = [] 114 | 115 | def solve(self, x0: Vector | None = None) -> tuple[float, Vector]: 116 | n = self.A.n 117 | x = Vector([1.0] * n) if x0 is None else x0.copy() 118 | x = (1.0 / x.norm2()) * x 119 | mu = (x.dot(self.A @ x)) / (x.dot(x)) 120 | self.history.clear() 121 | 122 | for k in range(self.max_iter): 123 | M = Matrix( 124 | [ 125 | [self.A.data[i][j] - (mu if i == j else 0.0) for j in range(n)] 126 | for i in range(n) 127 | ] 128 | ) 129 | y = solve_linear(M, x) 130 | x = (1.0 / y.norm2()) * y 131 | mu_new = (x.dot(self.A @ x)) / (x.dot(x)) 132 | err = abs(mu_new - mu) 133 | 134 | self.history.append({"iter": k, "mu": mu_new, "error": err}) 135 | 136 | if err <= self.tol * (1.0 + abs(mu_new)): 137 | return mu_new, x 138 | mu = mu_new 139 | 140 | raise ConvergenceError("Rayleigh quotient iteration did not converge") 141 | 142 | def trace(self): 143 | if not self.history: 144 | print("No iterations stored. Run .solve() first.") 145 | return 146 | print("Rayleigh Quotient Iteration Trace") 147 | print(f"{'iter':>6} | {'mu':>12} | {'error':>12}") 148 | print("-" * 40) 149 | for row in self.history: 150 | print(f"{row['iter']:6d} | {row['mu']:12.6e} | {row['error']:12.6e}") 151 | 152 | 153 | class QREigenvalues: 154 | def __init__(self, A: Matrix, tol: float = 1e-10, max_iter: int = 10000): 155 | if not A.is_square(): 156 | raise NonSquareMatrixError("A must be square") 157 | self.A0, self.tol, self.max_iter = A.copy(), tol, max_iter 158 | 159 | def solve(self) -> Matrix: 160 | A = self.A0.copy() 161 | n = A.n 162 | for _ in range(self.max_iter): 163 | qr = QRHouseholder(A) 164 | Q, R = qr.Q, qr.R 165 | A = R @ Q 166 | off = 0.0 167 | for i in range(1, n): 168 | off += sum(abs(A.data[i][j]) for j in range(0, i)) 169 | if off <= self.tol: 170 | return A 171 | raise ConvergenceError("QR did not converge") 172 | 173 | 174 | class SVD: 175 | def __init__(self, A: Matrix, tol: float = 1e-10, max_iter: int = 10000): 176 | self.A, A = A, A 177 | self.tol, self.max_iter = tol, max_iter 178 | 179 | def _eig_sym(self, S: Matrix): 180 | n = S.n 181 | V = Matrix.identity(n) 182 | A = S.copy() 183 | for _ in range(self.max_iter): 184 | qr = QRHouseholder(A) 185 | Q, R = qr.Q, qr.R 186 | A = R @ Q 187 | V = V @ Q 188 | off = 0.0 189 | for i in range(1, n): 190 | off += sum(abs(A.data[i][j]) for j in range(0, i)) 191 | if off <= self.tol: 192 | break 193 | return [A.data[i][i] for i in range(n)], V 194 | 195 | def solve(self) -> tuple[Matrix, Vector, Matrix]: 196 | At = self.A.transpose() 197 | S = At @ self.A 198 | eigvals, V = self._eig_sym(S) 199 | idx = sorted(range(len(eigvals)), key=lambda i: eigvals[i], reverse=True) 200 | eigvals = [eigvals[i] for i in idx] 201 | V = Matrix([[V.data[r][i] for i in idx] for r in range(V.m)]) 202 | sing = [math.sqrt(ev) if ev > 0 else 0.0 for ev in eigvals] 203 | Ucols = [] 204 | for j, sv in enumerate(sing): 205 | vj = V.col(j) 206 | Av = self.A @ vj 207 | if sv > 1e-14: 208 | uj = (1.0 / sv) * Av 209 | else: 210 | nrm = Av.norm2() 211 | uj = (1.0 / nrm) * Av if nrm > 0 else Vector([0.0] * self.A.m) 212 | nrm = uj.norm2() 213 | uj = (1.0 / nrm) * uj if nrm > 0 else uj 214 | Ucols.append(uj.data) 215 | U = Matrix([[Ucols[j][i] for j in range(len(Ucols))] for i in range(self.A.m)]) 216 | 217 | Sigma = Vector(sing) 218 | return U, Sigma, V 219 | -------------------------------------------------------------------------------- /numethods/ode.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | from typing import Callable, List, Tuple 3 | 4 | 5 | class ODESolver: 6 | def __init__( 7 | self, f: Callable[[float, float], float], t0: float, y0: float, h: float 8 | ): 9 | self.f = f 10 | self.t = t0 11 | self.y = y0 12 | self.h = h 13 | 14 | def step(self) -> float: 15 | raise NotImplementedError 16 | 17 | def solve(self, t_end: float) -> Tuple[List[float], List[float]]: 18 | ts, ys = [self.t], [self.y] 19 | while self.t < t_end - 1e-14: 20 | h = min(self.h, t_end - self.t) 21 | self.h = h 22 | self.y = self.step() 23 | self.t += h 24 | ts.append(self.t) 25 | ys.append(self.y) 26 | return ts, ys 27 | 28 | 29 | # ------------------ Explicit Methods ------------------ 30 | 31 | 32 | class Euler(ODESolver): 33 | def step(self): 34 | return self.y + self.h * self.f(self.t, self.y) 35 | 36 | 37 | class Heun(ODESolver): 38 | def step(self): 39 | k1 = self.f(self.t, self.y) 40 | y_predict = self.y + self.h * k1 41 | k2 = self.f(self.t + self.h, y_predict) 42 | return self.y + 0.5 * self.h * (k1 + k2) 43 | 44 | 45 | class RK2(ODESolver): # midpoint 46 | def step(self): 47 | k1 = self.f(self.t, self.y) 48 | k2 = self.f(self.t + 0.5 * self.h, self.y + 0.5 * self.h * k1) 49 | return self.y + self.h * k2 50 | 51 | 52 | class RK4(ODESolver): 53 | def step(self): 54 | k1 = self.f(self.t, self.y) 55 | k2 = self.f(self.t + 0.5 * self.h, self.y + 0.5 * self.h * k1) 56 | k3 = self.f(self.t + 0.5 * self.h, self.y + 0.5 * self.h * k2) 57 | k4 = self.f(self.t + self.h, self.y + self.h * k3) 58 | return self.y + (self.h / 6.0) * (k1 + 2 * k2 + 2 * k3 + k4) 59 | 60 | 61 | # ------------------ Implicit Methods ------------------ 62 | 63 | 64 | class BackwardEuler(ODESolver): 65 | def step(self): 66 | # Newton iteration for implicit solve 67 | y_new = self.y # initial guess 68 | for _ in range(20): 69 | F = y_new - self.y - self.h * self.f(self.t + self.h, y_new) 70 | dF = 1.0 - self.h * self._dfdy(self.t + self.h, y_new) 71 | if abs(dF) < 1e-14: 72 | break 73 | y_next = y_new - F / dF 74 | if abs(y_next - y_new) < 1e-12: 75 | return y_next 76 | y_new = y_next 77 | return y_new 78 | 79 | def _dfdy(self, t, y): 80 | eps = 1e-8 81 | return (self.f(t, y + eps) - self.f(t, y - eps)) / (2 * eps) 82 | 83 | 84 | class ODETrapezoidal(ODESolver): 85 | def step(self): 86 | y_new = self.y # initial guess 87 | for _ in range(20): 88 | F = ( 89 | y_new 90 | - self.y 91 | - 0.5 92 | * self.h 93 | * (self.f(self.t, self.y) + self.f(self.t + self.h, y_new)) 94 | ) 95 | dF = 1.0 - 0.5 * self.h * self._dfdy(self.t + self.h, y_new) 96 | if abs(dF) < 1e-14: 97 | break 98 | y_next = y_new - F / dF 99 | if abs(y_next - y_new) < 1e-12: 100 | return y_next 101 | y_new = y_next 102 | return y_new 103 | 104 | def _dfdy(self, t, y): 105 | eps = 1e-8 106 | return (self.f(t, y + eps) - self.f(t, y - eps)) / (2 * eps) 107 | 108 | 109 | # ------------------ Multistep Methods ------------------ 110 | 111 | 112 | class AdamsBashforth(ODESolver): 113 | """k-step Adams–Bashforth. Default: 2-step.""" 114 | 115 | def __init__(self, f, t0, y0, h, order=2): 116 | super().__init__(f, t0, y0, h) 117 | self.order = order 118 | # Bootstrap with RK4 119 | rk4 = RK4(f, t0, y0, h) 120 | self.ts, self.ys = [t0], [y0] 121 | for _ in range(order - 1): 122 | t1 = rk4.t + h 123 | y1 = rk4.step() 124 | rk4.t, rk4.y = t1, y1 125 | self.ts.append(t1) 126 | self.ys.append(y1) 127 | 128 | def solve(self, t_end): 129 | ts, ys = self.ts[:], self.ys[:] 130 | while ts[-1] < t_end - 1e-14: 131 | h = min(self.h, t_end - ts[-1]) 132 | f_vals = [self.f(ts[-i - 1], ys[-i - 1]) for i in range(self.order)] 133 | if self.order == 2: 134 | y_next = ys[-1] + h * (3 / 2 * f_vals[0] - 1 / 2 * f_vals[1]) 135 | elif self.order == 3: 136 | y_next = ys[-1] + h * ( 137 | 23 / 12 * f_vals[0] - 16 / 12 * f_vals[1] + 5 / 12 * f_vals[2] 138 | ) 139 | else: 140 | raise NotImplementedError("Only 2- and 3-step AB implemented") 141 | t_next = ts[-1] + h 142 | ts.append(t_next) 143 | ys.append(y_next) 144 | return ts, ys 145 | 146 | 147 | class AdamsMoulton(ODESolver): 148 | """2-step Adams–Moulton implicit method (trapezoidal).""" 149 | 150 | def solve(self, t_end): 151 | ts, ys = [self.t], [self.y] 152 | rk4 = RK4(self.f, self.t, self.y, self.h) 153 | t1, y1 = rk4.t + self.h, rk4.step() 154 | ts.append(t1) 155 | ys.append(y1) 156 | while ts[-1] < t_end - 1e-14: 157 | h = min(self.h, t_end - ts[-1]) 158 | f_prev = self.f(ts[-1], ys[-1]) 159 | y_guess = ys[-1] + h * f_prev # predictor 160 | for _ in range(10): 161 | F = ys[-1] + 0.5 * h * (f_prev + self.f(ts[-1] + h, y_guess)) - y_guess 162 | dF = -1 - 0.5 * h * self._dfdy(ts[-1] + h, y_guess) 163 | y_new = y_guess - F / dF 164 | if abs(y_new - y_guess) < 1e-12: 165 | break 166 | y_guess = y_new 167 | t_next = ts[-1] + h 168 | ts.append(t_next) 169 | ys.append(y_guess) 170 | return ts, ys 171 | 172 | def _dfdy(self, t, y): 173 | eps = 1e-8 174 | return (self.f(t, y + eps) - self.f(t, y - eps)) / (2 * eps) 175 | 176 | 177 | class PredictorCorrector(ODESolver): 178 | """AB2 predictor + AM2 corrector""" 179 | 180 | def solve(self, t_end): 181 | ts, ys = [self.t], [self.y] 182 | rk4 = RK4(self.f, self.t, self.y, self.h) 183 | t1, y1 = rk4.t + self.h, rk4.step() 184 | ts.append(t1) 185 | ys.append(y1) 186 | while ts[-1] < t_end - 1e-14: 187 | h = min(self.h, t_end - ts[-1]) 188 | f_n = self.f(ts[-1], ys[-1]) 189 | f_nm1 = self.f(ts[-2], ys[-2]) 190 | y_pred = ys[-1] + h * (3 / 2 * f_n - 1 / 2 * f_nm1) 191 | y_corr = ys[-1] + 0.5 * h * (f_n + self.f(ts[-1] + h, y_pred)) 192 | t_next = ts[-1] + h 193 | ts.append(t_next) 194 | ys.append(y_corr) 195 | return ts, ys 196 | 197 | 198 | # ------------------ Adaptive RK45 ------------------ 199 | 200 | 201 | class RK45(ODESolver): 202 | """Runge–Kutta–Fehlberg (4,5) adaptive step.""" 203 | 204 | def __init__(self, f, t0, y0, h, tol=1e-6): 205 | super().__init__(f, t0, y0, h) 206 | self.tol = tol 207 | 208 | def solve(self, t_end): 209 | ts, ys = [self.t], [self.y] 210 | while self.t < t_end - 1e-14: 211 | h = min(self.h, t_end - self.t) 212 | y, err = self._rkf_step(self.t, self.y, h) 213 | if err < self.tol: 214 | self.t += h 215 | self.y = y 216 | ts.append(self.t) 217 | ys.append(self.y) 218 | # adapt step 219 | s = 0.84 * (self.tol / (err + 1e-14)) ** 0.25 220 | self.h = min(h * max(0.1, s), 5 * h) 221 | else: 222 | self.h = 0.5 * h 223 | return ts, ys 224 | 225 | def _rkf_step(self, t, y, h): 226 | f = self.f 227 | k1 = h * f(t, y) 228 | k2 = h * f(t + 0.25 * h, y + 0.25 * k1) 229 | k3 = h * f(t + 3 / 8 * h, y + 3 / 32 * k1 + 9 / 32 * k2) 230 | k4 = h * f( 231 | t + 12 / 13 * h, y + 1932 / 2197 * k1 - 7200 / 2197 * k2 + 7296 / 2197 * k3 232 | ) 233 | k5 = h * f( 234 | t + h, y + 439 / 216 * k1 - 8 * k2 + 3680 / 513 * k3 - 845 / 4104 * k4 235 | ) 236 | k6 = h * f( 237 | t + 0.5 * h, 238 | y 239 | - 8 / 27 * k1 240 | + 2 * k2 241 | - 3544 / 2565 * k3 242 | + 1859 / 4104 * k4 243 | - 11 / 40 * k5, 244 | ) 245 | y4 = y + (25 / 216 * k1 + 1408 / 2565 * k3 + 2197 / 4104 * k4 - 1 / 5 * k5) 246 | y5 = y + ( 247 | 16 / 135 * k1 248 | + 6656 / 12825 * k3 249 | + 28561 / 56430 * k4 250 | - 9 / 50 * k5 251 | + 2 / 55 * k6 252 | ) 253 | err = abs(y5 - y4) 254 | return y5, err 255 | -------------------------------------------------------------------------------- /numethods/solvers.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | from .linalg import Matrix, Vector, forward_substitution, backward_substitution 3 | from .exceptions import ( 4 | NonSquareMatrixError, 5 | SingularMatrixError, 6 | NotSymmetricError, 7 | NotPositiveDefiniteError, 8 | ConvergenceError, 9 | ) 10 | 11 | 12 | class LUDecomposition: 13 | """LU decomposition with partial pivoting: PA = LU""" 14 | 15 | def __init__(self, A: Matrix): 16 | if not A.is_square(): 17 | raise NonSquareMatrixError("A must be square") 18 | self.n = A.n 19 | self.L = Matrix.identity(self.n) 20 | self.U = A.copy() 21 | self.P = Matrix.identity(self.n) 22 | self.steps: list[tuple[int, Matrix, Matrix, Matrix]] = [] # store pivot steps 23 | self._decompose() 24 | 25 | def _decompose(self) -> None: 26 | n = self.n 27 | for k in range(n): 28 | pivot_row = self.U.max_abs_in_col(k, k) 29 | if abs(self.U.data[pivot_row][k]) < 1e-15: 30 | raise SingularMatrixError("Matrix is singular to working precision") 31 | self.U.swap_rows(k, pivot_row) 32 | self.P.swap_rows(k, pivot_row) 33 | if k > 0: 34 | self.L.data[k][:k], self.L.data[pivot_row][:k] = ( 35 | self.L.data[pivot_row][:k], 36 | self.L.data[k][:k], 37 | ) 38 | for i in range(k + 1, n): 39 | m = self.U.data[i][k] / self.U.data[k][k] 40 | self.L.data[i][k] = m 41 | for j in range(k, n): 42 | self.U.data[i][j] -= m * self.U.data[k][j] 43 | # record step 44 | self.steps.append((k, self.L.copy(), self.U.copy(), self.P.copy())) 45 | 46 | def solve(self, b: Vector) -> Vector: 47 | Pb = Vector( 48 | [ 49 | sum(self.P.data[i][j] * b[j] for j in range(self.n)) 50 | for i in range(self.n) 51 | ] 52 | ) 53 | y = forward_substitution(self.L, Pb) 54 | x = backward_substitution(self.U, y) 55 | return x 56 | 57 | def trace(self): 58 | print("LU Decomposition Trace (steps of elimination)") 59 | for k, L, U, P in self.steps: 60 | print(f"\nStep {k}:") 61 | print(f"L = {L}") 62 | print(f"U = {U}") 63 | print(f"P = {P}") 64 | 65 | 66 | class GaussJordan: 67 | """Gauss-Jordan elimination.""" 68 | 69 | def __init__(self, A: Matrix): 70 | if not A.is_square(): 71 | raise NonSquareMatrixError("A must be square") 72 | self.n = A.n 73 | self.A = A.copy() 74 | self.steps: list[tuple[int, Matrix]] = [] 75 | 76 | def solve(self, b: Vector) -> Vector: 77 | n = self.n 78 | Ab = self.A.augment(b) 79 | for col in range(n): 80 | pivot = Ab.max_abs_in_col(col, col) 81 | if abs(Ab.data[pivot][col]) < 1e-15: 82 | raise SingularMatrixError("Matrix is singular or nearly singular") 83 | Ab.swap_rows(col, pivot) 84 | pv = Ab.data[col][col] 85 | Ab.data[col] = [v / pv for v in Ab.data[col]] 86 | for r in range(n): 87 | if r == col: 88 | continue 89 | factor = Ab.data[r][col] 90 | Ab.data[r] = [ 91 | rv - factor * cv for rv, cv in zip(Ab.data[r], Ab.data[col]) 92 | ] 93 | # record step 94 | self.steps.append((col, Ab.copy())) 95 | return Vector(row[-1] for row in Ab.data) 96 | 97 | def trace(self): 98 | print("Gauss-Jordan Trace (row reduction steps)") 99 | for step, Ab in self.steps: 100 | print(f"\nColumn {step}:") 101 | print(f"Augmented matrix = {Ab}") 102 | 103 | 104 | class Jacobi: 105 | """Jacobi iterative method for Ax = b.""" 106 | 107 | def __init__( 108 | self, A: Matrix, b: Vector, tol: float = 1e-10, max_iter: int = 10_000 109 | ): 110 | if not A.is_square(): 111 | raise NonSquareMatrixError("A must be square") 112 | if A.n != len(b): 113 | raise ValueError("Dimension mismatch") 114 | self.A = A.copy() 115 | self.b = b.copy() 116 | self.tol = tol 117 | self.max_iter = max_iter 118 | self.history: list[float] = [] 119 | 120 | def solve(self, x0: Vector | None = None) -> Vector: 121 | n = self.A.n 122 | x = Vector([0.0] * n) if x0 is None else x0.copy() 123 | for _ in range(self.max_iter): 124 | x_new = [0.0] * n 125 | for i in range(n): 126 | diag = self.A.data[i][i] 127 | if abs(diag) < 1e-15: 128 | raise SingularMatrixError("Zero diagonal entry in Jacobi") 129 | s = sum(self.A.data[i][j] * x[j] for j in range(n) if j != i) 130 | x_new[i] = (self.b[i] - s) / diag 131 | x_new = Vector(x_new) 132 | r = (self.A @ x_new) - self.b 133 | res_norm = r.norm2() 134 | self.history.append(res_norm) 135 | if res_norm <= self.tol * (1.0 + x_new.norm2()): 136 | return x_new 137 | x = x_new 138 | raise ConvergenceError("Jacobi did not converge within max_iter") 139 | 140 | def trace(self): 141 | print("Jacobi Iteration Trace") 142 | print(f"{'iter':>6} | {'residual norm':>14}") 143 | print("-" * 26) 144 | for k, res in enumerate(self.history): 145 | print(f"{k:6d} | {res:14.6e}") 146 | 147 | 148 | class GaussSeidel: 149 | """Gauss-Seidel iterative method for Ax = b.""" 150 | 151 | def __init__( 152 | self, A: Matrix, b: Vector, tol: float = 1e-10, max_iter: int = 10_000 153 | ): 154 | if not A.is_square(): 155 | raise NonSquareMatrixError("A must be square") 156 | if A.n != len(b): 157 | raise ValueError("Dimension mismatch") 158 | self.A = A.copy() 159 | self.b = b.copy() 160 | self.tol = tol 161 | self.max_iter = max_iter 162 | self.history: list[float] = [] 163 | 164 | def solve(self, x0: Vector | None = None) -> Vector: 165 | n = self.A.n 166 | x = Vector([0.0] * n) if x0 is None else x0.copy() 167 | for _ in range(self.max_iter): 168 | x_old = x.copy() 169 | for i in range(n): 170 | diag = self.A.data[i][i] 171 | if abs(diag) < 1e-15: 172 | raise SingularMatrixError("Zero diagonal entry in Gauss-Seidel") 173 | s1 = sum(self.A.data[i][j] * x[j] for j in range(i)) 174 | s2 = sum(self.A.data[i][j] * x_old[j] for j in range(i + 1, n)) 175 | x[i] = (self.b[i] - s1 - s2) / diag 176 | r = (self.A @ x) - self.b 177 | res_norm = r.norm2() 178 | self.history.append(res_norm) 179 | if res_norm <= self.tol * (1.0 + x.norm2()): 180 | return x 181 | raise ConvergenceError("Gauss-Seidel did not converge within max_iter") 182 | 183 | def trace(self): 184 | print("Gauss-Seidel Iteration Trace") 185 | print(f"{'iter':>6} | {'residual norm':>14}") 186 | print("-" * 26) 187 | for k, res in enumerate(self.history): 188 | print(f"{k:6d} | {res:14.6e}") 189 | 190 | 191 | class Cholesky: 192 | """Cholesky factorization A = L L^T for SPD matrices.""" 193 | 194 | def __init__(self, A: Matrix): 195 | if not A.is_square(): 196 | raise NonSquareMatrixError("A must be square") 197 | n = A.n 198 | for i in range(n): 199 | for j in range(i + 1, n): 200 | if abs(A.data[i][j] - A.data[j][i]) > 1e-12: 201 | raise NotSymmetricError("Matrix is not symmetric") 202 | self.n = n 203 | self.L = Matrix.zeros(n, n) 204 | self.steps: list[tuple[int, Matrix]] = [] 205 | self._decompose(A.copy()) 206 | 207 | def _decompose(self, A: Matrix) -> None: 208 | n = self.n 209 | for i in range(n): 210 | for j in range(i + 1): 211 | s = sum(self.L.data[i][k] * self.L.data[j][k] for k in range(j)) 212 | if i == j: 213 | val = A.data[i][i] - s 214 | if val <= 0.0: 215 | raise NotPositiveDefiniteError( 216 | "Matrix is not positive definite" 217 | ) 218 | self.L.data[i][j] = val**0.5 219 | else: 220 | self.L.data[i][j] = (A.data[i][j] - s) / self.L.data[j][j] 221 | # record after each row i 222 | self.steps.append((i, self.L.copy())) 223 | 224 | def solve(self, b: Vector) -> Vector: 225 | y = forward_substitution(self.L, b) 226 | x = backward_substitution(self.L.transpose(), y) 227 | return x 228 | 229 | def trace(self): 230 | print("Cholesky Decomposition Trace") 231 | for i, L in self.steps: 232 | print(f"\nRow {i}:") 233 | print(f"L = {L}") 234 | -------------------------------------------------------------------------------- /tutorials/tutorial3_orthogonalization.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "id": "b2277afb", 6 | "metadata": {}, 7 | "source": [ 8 | "# Tutorial 3 - Orthogonalization & QR Decomposition" 9 | ] 10 | }, 11 | { 12 | "cell_type": "markdown", 13 | "id": "1e4a68da", 14 | "metadata": {}, 15 | "source": [ 16 | "\n", 17 | "In this tutorial, we study **orthogonalization** methods and the **QR decomposition**, which are central to numerical linear algebra.\n", 18 | "\n", 19 | "We will cover:\n", 20 | "\n", 21 | "- Orthogonal and orthonormal vectors\n", 22 | "- QR decomposition\n", 23 | "- Classical Gram-Schmidt\n", 24 | "- Modified Gram-Schmidt\n", 25 | "- Householder transformations\n", 26 | "- Applications: least squares\n", 27 | "- Examples with the `numethods` package\n" 28 | ] 29 | }, 30 | { 31 | "cell_type": "markdown", 32 | "id": "515dc61f", 33 | "metadata": {}, 34 | "source": [ 35 | "\n", 36 | "## 1. Why Orthogonalization?\n", 37 | "\n", 38 | "- Orthogonal vectors are easier to work with numerically.\n", 39 | "- Many algorithms are more **stable** when using orthogonal bases.\n", 40 | "- Key applications:\n", 41 | " - Solving **least squares problems**\n", 42 | " - Computing **eigenvalues**\n", 43 | " - Ensuring numerical stability in projections\n" 44 | ] 45 | }, 46 | { 47 | "cell_type": "markdown", 48 | "id": "af61a18f", 49 | "metadata": {}, 50 | "source": [ 51 | "\n", 52 | "## 2. Definitions\n", 53 | "\n", 54 | "### Orthogonal and Orthonormal vectors\n", 55 | "\n", 56 | "Two vectors $u, v \\in \\mathbb{R}^n$ are **orthogonal** if\n", 57 | "\n", 58 | "$$ u \\cdot v = 0. $$\n", 59 | "\n", 60 | "A set of vectors $\\{q_1, \\dots, q_m\\}$ is **orthonormal** if\n", 61 | "\n", 62 | "$$ q_i \\cdot q_j = \\begin{cases} 1 & i = j, \\\\ 0 & i \\neq j. \\end{cases} $$\n" 63 | ] 64 | }, 65 | { 66 | "cell_type": "markdown", 67 | "id": "4f2330f1", 68 | "metadata": {}, 69 | "source": [ 70 | "\n", 71 | "### QR Decomposition\n", 72 | "\n", 73 | "For any $A \\in \\mathbb{R}^{m \\times n}$ with linearly independent columns, we can write\n", 74 | "\n", 75 | "$$ A = QR, $$\n", 76 | "\n", 77 | "- $Q \\in \\mathbb{R}^{m \\times n}$ has orthonormal columns ($Q^T Q = I$)\n", 78 | "- $R \\in \\mathbb{R}^{n \\times n}$ is upper triangular\n" 79 | ] 80 | }, 81 | { 82 | "cell_type": "markdown", 83 | "id": "66567b83", 84 | "metadata": {}, 85 | "source": [ 86 | "\n", 87 | "## 3. Gram-Schmidt Orthogonalization\n", 88 | "\n", 89 | "### Classical Gram-Schmidt (CGS)\n", 90 | "\n", 91 | "Given linearly independent vectors $a_1, \\dots, a_n$:\n", 92 | "\n", 93 | "$$\n", 94 | "q_1 = \\frac{a_1}{\\|a_1\\|}\n", 95 | "$$\n", 96 | "$$\n", 97 | "q_k = \\frac{a_k - \\sum_{j=1}^{k-1} (q_j \\cdot a_k) q_j}{\\left\\|a_k - \\sum_{j=1}^{k-1} (q_j \\cdot a_k) q_j\\right\\|}\n", 98 | "$$\n", 99 | "\n", 100 | "Matrix form:\n", 101 | "\n", 102 | "$$ A = QR, \\quad R_{jk} = q_j^T a_k. $$\n", 103 | "\n", 104 | "⚠️ CGS can lose orthogonality in finite precision arithmetic.\n" 105 | ] 106 | }, 107 | { 108 | "cell_type": "code", 109 | "execution_count": 1, 110 | "id": "3fe97ce3", 111 | "metadata": {}, 112 | "outputs": [ 113 | { 114 | "name": "stdout", 115 | "output_type": "stream", 116 | "text": [ 117 | "Q (CGS): Matrix([[0.8164965809277261, -0.5520524474738834], [0.4082482904638631, 0.7590721152765896], [0.4082482904638631, 0.34503277967117707]])\n", 118 | "R (CGS): Matrix([[2.449489742783178, 0.4082482904638631], [0.0, 2.41522945769824]])\n" 119 | ] 120 | } 121 | ], 122 | "source": [ 123 | "from numethods import Matrix, Vector\n", 124 | "from numethods import QRGramSchmidt, QRModifiedGramSchmidt, QRHouseholder, LeastSquaresSolver\n", 125 | "\n", 126 | "# Example matrix\n", 127 | "A = Matrix([[2, -1], [1, 2], [1, 1]])\n", 128 | "\n", 129 | "# Classical Gram-Schmidt\n", 130 | "qrg = QRGramSchmidt(A)\n", 131 | "print(\"Q (CGS):\", qrg.Q)\n", 132 | "print(\"R (CGS):\", qrg.R)" 133 | ] 134 | }, 135 | { 136 | "cell_type": "markdown", 137 | "id": "ba84b59a", 138 | "metadata": {}, 139 | "source": [ 140 | "\n", 141 | "### Modified Gram-Schmidt (MGS)\n", 142 | "\n", 143 | "Same idea, but orthogonalization is done step by step:\n", 144 | "\n", 145 | "```\n", 146 | "for k = 1..n:\n", 147 | " q_k = a_k\n", 148 | " for j = 1..k-1:\n", 149 | " r_jk = q_j^T q_k\n", 150 | " q_k = q_k - r_jk q_j\n", 151 | " r_kk = ||q_k||\n", 152 | " q_k = q_k / r_kk\n", 153 | "```\n", 154 | "MGS is more stable than CGS.\n" 155 | ] 156 | }, 157 | { 158 | "cell_type": "code", 159 | "execution_count": 2, 160 | "id": "e01e25ff", 161 | "metadata": {}, 162 | "outputs": [ 163 | { 164 | "name": "stdout", 165 | "output_type": "stream", 166 | "text": [ 167 | "Q (MGS): Matrix([[0.8164965809277261, -0.5520524474738834], [0.4082482904638631, 0.7590721152765896], [0.4082482904638631, 0.34503277967117707]])\n", 168 | "R (MGS): Matrix([[2.449489742783178, 0.4082482904638631], [0.0, 2.41522945769824]])\n" 169 | ] 170 | } 171 | ], 172 | "source": [ 173 | "# Modified Gram-Schmidt\n", 174 | "qrm = QRModifiedGramSchmidt(A)\n", 175 | "print(\"Q (MGS):\", qrm.Q)\n", 176 | "print(\"R (MGS):\", qrm.R)\n" 177 | ] 178 | }, 179 | { 180 | "cell_type": "markdown", 181 | "id": "d893d189", 182 | "metadata": {}, 183 | "source": [ 184 | "\n", 185 | "## 4. Householder Reflections\n", 186 | "\n", 187 | "A more stable method uses **Householder matrices**.\n", 188 | "\n", 189 | "For a vector $x \\in \\mathbb{R}^m$:\n", 190 | "\n", 191 | "$$\n", 192 | "v = x \\pm \\|x\\| e_1, \\quad H = I - 2 \\frac{vv^T}{v^T v}.\n", 193 | "$$\n", 194 | "\n", 195 | "- $H$ is orthogonal ($H^T H = I$).\n", 196 | "- Applying $H$ zeros out all but the first component of $x$.\n", 197 | "\n", 198 | "QR via Householder:\n", 199 | "\n", 200 | "$$\n", 201 | "R = H_n H_{n-1} \\cdots H_1 A, \\quad Q = H_1^T H_2^T \\cdots H_n^T.\n", 202 | "$$" 203 | ] 204 | }, 205 | { 206 | "cell_type": "code", 207 | "execution_count": 3, 208 | "id": "15dfc35c", 209 | "metadata": {}, 210 | "outputs": [ 211 | { 212 | "name": "stdout", 213 | "output_type": "stream", 214 | "text": [ 215 | "Q (Householder): Matrix([[-0.8164965809277258, 0.552052447473883, -0.16903085094570333], [-0.40824829046386296, -0.7590721152765892, -0.5070925528371099], [-0.40824829046386296, -0.34503277967117707, 0.8451542547285166]])\n", 216 | "R (Householder): Matrix([[-2.449489742783177, -0.408248290463863], [2.220446049250313e-16, -2.415229457698238], [2.220446049250313e-16, 2.220446049250313e-16]])\n" 217 | ] 218 | } 219 | ], 220 | "source": [ 221 | "# Householder QR\n", 222 | "qrh = QRHouseholder(A)\n", 223 | "print(\"Q (Householder):\", qrh.Q)\n", 224 | "print(\"R (Householder):\", qrh.R)\n" 225 | ] 226 | }, 227 | { 228 | "cell_type": "markdown", 229 | "id": "2b6c612f", 230 | "metadata": {}, 231 | "source": [ 232 | "\n", 233 | "## 5. Applications of QR\n", 234 | "\n", 235 | "### Least Squares\n", 236 | "\n", 237 | "We want to solve\n", 238 | "\n", 239 | "$$ \\min_x \\|Ax - b\\|_2. $$\n", 240 | "\n", 241 | "If $A = QR$, then\n", 242 | "\n", 243 | "$$ \\min_x \\|Ax - b\\|_2 = \\min_x \\|QRx - b\\|_2. $$\n", 244 | "\n", 245 | "Since $Q$ has orthonormal columns:\n", 246 | "\n", 247 | "$$ R x = Q^T b. $$\n", 248 | "\n", 249 | "So we can solve using back-substitution.\n" 250 | ] 251 | }, 252 | { 253 | "cell_type": "code", 254 | "execution_count": 4, 255 | "id": "25b399b7", 256 | "metadata": {}, 257 | "outputs": [ 258 | { 259 | "name": "stdout", 260 | "output_type": "stream", 261 | "text": [ 262 | "Least squares solution: Vector([1.0285714285714287, 0.828571428571429])\n" 263 | ] 264 | } 265 | ], 266 | "source": [ 267 | "# Least squares example\n", 268 | "b = Vector([1, 2, 3])\n", 269 | "x_ls = LeastSquaresSolver(A, b).solve()\n", 270 | "print(\"Least squares solution:\", x_ls)" 271 | ] 272 | }, 273 | { 274 | "cell_type": "markdown", 275 | "id": "a94c88c8", 276 | "metadata": {}, 277 | "source": [ 278 | "\n", 279 | "## 6. Key Takeaways\n", 280 | "\n", 281 | "- CGS is simple but numerically unstable.\n", 282 | "- MGS is more stable and preferred if using Gram-Schmidt.\n", 283 | "- Householder QR is the standard in practice (stable and efficient).\n", 284 | "- QR decomposition underlies least squares, eigenvalue methods, and more.\n" 285 | ] 286 | } 287 | ], 288 | "metadata": { 289 | "kernelspec": { 290 | "display_name": "Python 3", 291 | "language": "python", 292 | "name": "python3" 293 | }, 294 | "language_info": { 295 | "codemirror_mode": { 296 | "name": "ipython", 297 | "version": 3 298 | }, 299 | "file_extension": ".py", 300 | "mimetype": "text/x-python", 301 | "name": "python", 302 | "nbconvert_exporter": "python", 303 | "pygments_lexer": "ipython3", 304 | "version": "3.13.7" 305 | } 306 | }, 307 | "nbformat": 4, 308 | "nbformat_minor": 5 309 | } 310 | -------------------------------------------------------------------------------- /numethods/linalg.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | from typing import Iterable, Tuple, List, Union 3 | from .exceptions import NonSquareMatrixError, SingularMatrixError 4 | import math 5 | 6 | Number = float # We'll use float throughout 7 | 8 | 9 | class Vector: 10 | def __init__(self, data: Iterable[Number]): 11 | self.data = [float(x) for x in data] 12 | 13 | def __len__(self) -> int: 14 | return len(self.data) 15 | 16 | def __getitem__(self, i: int) -> Number: 17 | return self.data[i] 18 | 19 | def __setitem__(self, i: int, value: Number) -> None: 20 | self.data[i] = float(value) 21 | 22 | def copy(self) -> "Vector": 23 | return Vector(self.data[:]) 24 | 25 | def norm(self) -> Number: 26 | return sum(abs(x) for x in self.data) 27 | 28 | def norm_inf(self) -> Number: 29 | return max(abs(x) for x in self.data) if self.data else 0.0 30 | 31 | def norm2(self) -> Number: 32 | return sum(x * x for x in self.data) ** 0.5 33 | 34 | def __add__(self, other: "Vector") -> "Vector": 35 | if len(self) != len(other): 36 | raise ValueError("Vector dimensions must match for addition") 37 | return Vector([a + b for a, b in zip(self.data, other.data)]) 38 | 39 | def __sub__(self, other: "Vector") -> "Vector": 40 | if len(self) != len(other): 41 | raise ValueError("Vector dimensions must match for subtraction") 42 | return Vector([a - b for a, b in zip(self.data, other.data)]) 43 | 44 | def __mul__(self, scalar: Number) -> "Vector": 45 | return Vector(scalar * x for x in self.data) 46 | 47 | __rmul__ = __mul__ 48 | 49 | def dot(self, other: "Vector") -> Number: 50 | assert len(self) == len(other) 51 | return sum(a * b for a, b in zip(self.data, other.data)) 52 | 53 | def __repr__(self): 54 | return f"Vector({self.data})" 55 | 56 | 57 | class Matrix: 58 | def __init__(self, rows: List[Iterable[Number]]): 59 | data = [list(map(float, row)) for row in rows] 60 | if not data: 61 | self.m, self.n = 0, 0 62 | else: 63 | n = len(data[0]) 64 | for r in data: 65 | if len(r) != n: 66 | raise ValueError("All rows must have the same length") 67 | self.m, self.n = len(data), n 68 | self.data = data 69 | 70 | @staticmethod 71 | def zeros(m: int, n: int) -> "Matrix": 72 | return Matrix([[0.0] * n for _ in range(m)]) 73 | 74 | @staticmethod 75 | def identity(n: int) -> "Matrix": 76 | A = Matrix.zeros(n, n) 77 | for i in range(n): 78 | A.data[i][i] = 1.0 79 | return A 80 | 81 | def copy(self) -> "Matrix": 82 | return Matrix([row[:] for row in self.data]) 83 | 84 | def shape(self) -> Tuple[int, int]: 85 | return self.m, self.n 86 | 87 | def __getitem__(self, idx): 88 | i, j = idx 89 | return self.data[i][j] 90 | 91 | def __setitem__(self, idx, value): 92 | i, j = idx 93 | self.data[i][j] = float(value) 94 | 95 | def row(self, i: int) -> Vector: 96 | return Vector(self.data[i][:]) 97 | 98 | def col(self, j: int) -> Vector: 99 | return Vector(self.data[i][j] for i in range(self.m)) 100 | 101 | def norm(self) -> float: 102 | """Matrix 1-norm: max column sum.""" 103 | return max( 104 | sum(abs(self.data[i][j]) for i in range(self.m)) for j in range(self.n) 105 | ) 106 | 107 | def norm_inf(self) -> float: 108 | """Matrix infinity norm: max row sum.""" 109 | return max(sum(abs(v) for v in row) for row in self.data) 110 | 111 | def norm2(self, tol: float = 1e-10, max_iter: int = 5000) -> float: 112 | """Spectral norm via power iteration on A^T A.""" 113 | # lazy import, avoids circular import 114 | from .eigen import PowerIteration 115 | 116 | if not self.is_square(): 117 | raise NonSquareMatrixError("Spectral norm requires square matrix") 118 | AtA = self.T @ self 119 | lam, _ = PowerIteration(AtA, tol=tol, max_iter=max_iter).solve() 120 | return math.sqrt(lam) 121 | 122 | def norm_fro(self) -> Number: 123 | return ( 124 | sum(self.data[i][j] ** 2 for i in range(self.m) for j in range(self.n)) 125 | ** 0.5 126 | ) 127 | 128 | def inverse(self) -> "Matrix": 129 | # lazy import, avoids circular import 130 | from .solvers import LUDecomposition 131 | 132 | """Compute A^{-1} using LU decomposition.""" 133 | if not self.is_square(): 134 | raise NonSquareMatrixError("Inverse requires square matrix") 135 | n = self.n 136 | solver = LUDecomposition(self) 137 | cols = [] 138 | for j in range(n): 139 | e = Vector([1.0 if i == j else 0.0 for i in range(n)]) 140 | x = solver.solve(e) 141 | cols.append([x[i] for i in range(n)]) 142 | return Matrix([[cols[j][i] for j in range(n)] for i in range(n)]) 143 | 144 | def condition_number(self, norm: str = "2") -> float: 145 | """ 146 | Compute condition number κ(A) = ||A|| * ||A^{-1}||. 147 | norm: "1", "inf", or "2" 148 | """ 149 | if not self.is_square(): 150 | raise NonSquareMatrixError("Condition number requires square matrix") 151 | 152 | if norm == "1": 153 | normA = self.norm() 154 | normAinv = self.inverse().norm() 155 | elif norm == "inf": 156 | normA = self.norm_inf() 157 | normAinv = self.inverse().norm_inf() 158 | elif norm == "2": 159 | normA = self.norm2() 160 | normAinv = self.inverse().norm2() 161 | else: 162 | raise ValueError("norm must be '1', 'inf', or '2'") 163 | 164 | return normA * normAinv 165 | 166 | def __add__(self, other: "Matrix") -> "Matrix": 167 | if not isinstance(other, Matrix): 168 | raise TypeError("Can only add Matrix with Matrix") 169 | if self.m != other.m or self.n != other.n: 170 | raise ValueError("Matrix dimensions must match for addition") 171 | return Matrix( 172 | [ 173 | [self.data[i][j] + other.data[i][j] for j in range(self.n)] 174 | for i in range(self.m) 175 | ] 176 | ) 177 | 178 | def __sub__(self, other: "Matrix") -> "Matrix": 179 | if not isinstance(other, Matrix): 180 | raise TypeError("Can only subtract Matrix with Matrix") 181 | if self.m != other.m or self.n != other.n: 182 | raise ValueError("Matrix dimensions must match for subtraction") 183 | return Matrix( 184 | [ 185 | [self.data[i][j] - other.data[i][j] for j in range(self.n)] 186 | for i in range(self.m) 187 | ] 188 | ) 189 | 190 | def transpose(self) -> "Matrix": 191 | return Matrix([[self.data[i][j] for i in range(self.m)] for j in range(self.n)]) 192 | 193 | T = property(transpose) 194 | 195 | def __matmul__(self, other: Union["Matrix", "Vector"]): 196 | if isinstance(other, Matrix): 197 | if self.n != other.m: 198 | raise ValueError("dims") 199 | return Matrix( 200 | [ 201 | [ 202 | sum(self.data[i][k] * other.data[k][j] for k in range(self.n)) 203 | for j in range(other.n) 204 | ] 205 | for i in range(self.m) 206 | ] 207 | ) 208 | elif isinstance(other, Vector): 209 | if self.n != len(other): 210 | raise ValueError("dims") 211 | return Vector( 212 | [ 213 | sum(self.data[i][k] * other[k] for k in range(self.n)) 214 | for i in range(self.m) 215 | ] 216 | ) 217 | else: 218 | raise TypeError("Unsupported @") 219 | 220 | def __mul__(self, s): 221 | if isinstance(s, (int, float)): 222 | return Matrix([[v * s for v in row] for row in self.data]) 223 | raise TypeError("Use @ for matrix multiply; * is scalar") 224 | 225 | __rmul__ = __mul__ 226 | 227 | def is_square(self) -> bool: 228 | return self.m == self.n 229 | 230 | def augment(self, b: Vector) -> "Matrix": 231 | if self.m != len(b): 232 | raise ValueError("Dimension mismatch for augmentation") 233 | return Matrix([self.data[i] + [b[i]] for i in range(self.m)]) 234 | 235 | def max_abs_in_col(self, col: int, start_row: int = 0) -> int: 236 | max_i = start_row 237 | max_val = abs(self.data[start_row][col]) 238 | for i in range(start_row + 1, self.m): 239 | v = abs(self.data[i][col]) 240 | if v > max_val: 241 | max_val, max_i = v, i 242 | return max_i 243 | 244 | def swap_rows(self, i: int, j: int) -> None: 245 | if i != j: 246 | self.data[i], self.data[j] = self.data[j], self.data[i] 247 | 248 | def __repr__(self): 249 | return f"Matrix({self.data})" 250 | 251 | 252 | def forward_substitution(L: Matrix, b: Vector) -> Vector: 253 | """Solve Lx = b for x using forward substitution""" 254 | if not L.is_square(): 255 | raise NonSquareMatrixError("L must be square") 256 | n = L.n 257 | x = [0.0] * n 258 | for i in range(n): 259 | s = sum(L.data[i][j] * x[j] for j in range(i)) 260 | if abs(L.data[i][i]) < 1e-15: 261 | raise SingularMatrixError("Zero pivot in forward substitution") 262 | x[i] = (b[i] - s) / L.data[i][i] 263 | return Vector(x) 264 | 265 | 266 | def backward_substitution(U: Matrix, b: Vector) -> Vector: 267 | """Solve Ux = b for x using backward substitution""" 268 | if not U.is_square(): 269 | raise NonSquareMatrixError("U must be square") 270 | n = U.n 271 | x = [0.0] * n 272 | for i in reversed(range(n)): 273 | s = sum(U.data[i][j] * x[j] for j in range(i + 1, n)) 274 | if abs(U.data[i][i]) < 1e-15: 275 | raise SingularMatrixError("Zero pivot in backward substitution") 276 | x[i] = (b[i] - s) / U.data[i][i] 277 | return Vector(x) 278 | -------------------------------------------------------------------------------- /tutorials/tutorial1_vectors_matrices.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "id": "a9d0ab82", 6 | "metadata": {}, 7 | "source": [ 8 | "# 📘 Tutorial 1 - Vectors & Matrices in `numethods`\n", 9 | "\n", 10 | "---\n", 11 | "\n", 12 | "## 1. Introduction\n", 13 | "\n", 14 | "In numerical computing, **vectors** and **matrices** are the basic objects. \n", 15 | "Almost every algorithm (linear solvers, eigenvalue problems, optimization, curve fitting, etc.) is built upon them. \n", 16 | "\n", 17 | "In this tutorial, we will:\n", 18 | "- Define what vectors and matrices are.\n", 19 | "- Review their basic operations (addition, scalar multiplication, multiplication).\n", 20 | "- Define and compute vector and matrix **norms**.\n", 21 | "- Show how these operations work in the `numethods` package." 22 | ] 23 | }, 24 | { 25 | "cell_type": "markdown", 26 | "id": "b91edc5c", 27 | "metadata": {}, 28 | "source": [ 29 | "## 2. Importing our package" 30 | ] 31 | }, 32 | { 33 | "cell_type": "code", 34 | "execution_count": 1, 35 | "id": "2107103a", 36 | "metadata": {}, 37 | "outputs": [], 38 | "source": [ 39 | "from numethods.linalg import Vector, Matrix" 40 | ] 41 | }, 42 | { 43 | "cell_type": "markdown", 44 | "id": "f64a4d9a", 45 | "metadata": {}, 46 | "source": [ 47 | "## 3. What is a vector?\n", 48 | "\n", 49 | "A **vector** is an ordered collection of numbers (scalars). \n", 50 | "We can think of a vector as a column:\n", 51 | "\n", 52 | "$$\n", 53 | "v =\n", 54 | "\\begin{bmatrix}\n", 55 | "v_1 \\\\\n", 56 | "v_2 \\\\\n", 57 | "\\vdots \\\\\n", 58 | "v_n\n", 59 | "\\end{bmatrix}, \\quad v \\in \\mathbb{R}^n\n", 60 | "$$\n", 61 | "\n", 62 | "or as a row:\n", 63 | "\n", 64 | "$$\n", 65 | "v^T = \\begin{bmatrix} v_1 & v_2 & \\cdots & v_n \\end{bmatrix}.\n", 66 | "$$\n", 67 | "\n", 68 | "### Example\n", 69 | "A vector in $\\mathbb{R}^3$:\n", 70 | "\n", 71 | "$$\n", 72 | "v = \\begin{bmatrix} 1 \\\\ 2 \\\\ 3 \\end{bmatrix}.\n", 73 | "$$" 74 | ] 75 | }, 76 | { 77 | "cell_type": "code", 78 | "execution_count": 2, 79 | "id": "46c96e42", 80 | "metadata": {}, 81 | "outputs": [ 82 | { 83 | "name": "stdout", 84 | "output_type": "stream", 85 | "text": [ 86 | "v = Vector([1.0, 2.0, 3.0])\n" 87 | ] 88 | } 89 | ], 90 | "source": [ 91 | "v = Vector([1, 2, 3])\n", 92 | "print(\"v =\", v)" 93 | ] 94 | }, 95 | { 96 | "cell_type": "markdown", 97 | "id": "5e951a1b", 98 | "metadata": {}, 99 | "source": [ 100 | "## 4. What is a matrix?\n", 101 | "\n", 102 | "A **matrix** is a rectangular array of numbers with rows and columns:\n", 103 | "\n", 104 | "$$\n", 105 | "A =\n", 106 | "\\begin{bmatrix}\n", 107 | "a_{11} & a_{12} & \\cdots & a_{1n} \\\\\n", 108 | "a_{21} & a_{22} & \\cdots & a_{2n} \\\\\n", 109 | "\\vdots & \\vdots & \\ddots & \\vdots \\\\\n", 110 | "a_{m1} & a_{m2} & \\cdots & a_{mn}\n", 111 | "\\end{bmatrix}, \\quad A \\in \\mathbb{R}^{m \\times n}\n", 112 | "$$" 113 | ] 114 | }, 115 | { 116 | "cell_type": "code", 117 | "execution_count": 3, 118 | "id": "a092da2f", 119 | "metadata": {}, 120 | "outputs": [ 121 | { 122 | "name": "stdout", 123 | "output_type": "stream", 124 | "text": [ 125 | "A =\n", 126 | " Matrix([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [7.0, 8.0, 9.0]])\n" 127 | ] 128 | } 129 | ], 130 | "source": [ 131 | "A = Matrix([[1, 2, 3],\n", 132 | " [4, 5, 6],\n", 133 | " [7, 8, 9]])\n", 134 | "print(\"A =\\n\", A)" 135 | ] 136 | }, 137 | { 138 | "cell_type": "markdown", 139 | "id": "4f0d9ec4", 140 | "metadata": {}, 141 | "source": [ 142 | "## 5. Basic operations\n", 143 | "\n", 144 | "### 5.1 Vector addition and subtraction\n", 145 | "For $ u, v \\in \\mathbb{R}^n $:\n", 146 | "\n", 147 | "$$\n", 148 | "u + v = \\begin{bmatrix} u_1 + v_1 \\\\ u_2 + v_2 \\\\ \\vdots \\\\ u_n + v_n \\end{bmatrix},\n", 149 | "\\quad\n", 150 | "u - v = \\begin{bmatrix} u_1 - v_1 \\\\ u_2 - v_2 \\\\ \\vdots \\\\ u_n - v_n \\end{bmatrix}.\n", 151 | "$$" 152 | ] 153 | }, 154 | { 155 | "cell_type": "code", 156 | "execution_count": 4, 157 | "id": "b7571e71", 158 | "metadata": {}, 159 | "outputs": [ 160 | { 161 | "name": "stdout", 162 | "output_type": "stream", 163 | "text": [ 164 | "u + v = Vector([4.0, 4.0, 4.0])\n", 165 | "u - v = Vector([2.0, 0.0, -2.0])\n" 166 | ] 167 | } 168 | ], 169 | "source": [ 170 | "u = Vector([3, 2, 1])\n", 171 | "v = Vector([1, 2, 3])\n", 172 | "\n", 173 | "print(\"u + v =\", u + v)\n", 174 | "print(\"u - v =\", u - v)" 175 | ] 176 | }, 177 | { 178 | "cell_type": "markdown", 179 | "id": "1aa8f396", 180 | "metadata": {}, 181 | "source": [ 182 | "### 5.2 Scalar multiplication\n", 183 | "For $ \\alpha \\in \\mathbb{R}, v \\in \\mathbb{R}^n $:\n", 184 | "\n", 185 | "$$\n", 186 | "\\alpha v = \\begin{bmatrix} \\alpha v_1 \\\\ \\alpha v_2 \\\\ \\vdots \\\\ \\alpha v_n \\end{bmatrix}.\n", 187 | "$$" 188 | ] 189 | }, 190 | { 191 | "cell_type": "code", 192 | "execution_count": 5, 193 | "id": "b4168dfe", 194 | "metadata": {}, 195 | "outputs": [ 196 | { 197 | "name": "stdout", 198 | "output_type": "stream", 199 | "text": [ 200 | "2 * v = Vector([2.0, 4.0, 6.0])\n", 201 | "v * 2 = Vector([2.0, 4.0, 6.0])\n" 202 | ] 203 | } 204 | ], 205 | "source": [ 206 | "v = Vector([1, 2, 3])\n", 207 | "\n", 208 | "print(\"2 * v =\", 2 * v)\n", 209 | "print(\"v * 2 =\", v * 2)" 210 | ] 211 | }, 212 | { 213 | "cell_type": "markdown", 214 | "id": "e4919b59", 215 | "metadata": {}, 216 | "source": [ 217 | "### 5.3 Matrix addition and subtraction\n", 218 | "For $ A, B \\in \\mathbb{R}^{m \\times n} $:\n", 219 | "\n", 220 | "$$\n", 221 | "A + B = [ a_{ij} + b_{ij} ], \\quad\n", 222 | "A - B = [ a_{ij} - b_{ij} ].\n", 223 | "$$" 224 | ] 225 | }, 226 | { 227 | "cell_type": "code", 228 | "execution_count": 6, 229 | "id": "bd544880", 230 | "metadata": {}, 231 | "outputs": [ 232 | { 233 | "name": "stdout", 234 | "output_type": "stream", 235 | "text": [ 236 | "A + B =\n", 237 | " Matrix([[6.0, 8.0], [10.0, 12.0]])\n", 238 | "A - B =\n", 239 | " Matrix([[-4.0, -4.0], [-4.0, -4.0]])\n" 240 | ] 241 | } 242 | ], 243 | "source": [ 244 | "A = Matrix([[1, 2], [3, 4]])\n", 245 | "B = Matrix([[5, 6], [7, 8]])\n", 246 | "\n", 247 | "print(\"A + B =\\n\", A + B)\n", 248 | "print(\"A - B =\\n\", A - B)" 249 | ] 250 | }, 251 | { 252 | "cell_type": "markdown", 253 | "id": "0a1fb7f1", 254 | "metadata": {}, 255 | "source": [ 256 | "### 5.4 Matrix-Vector multiplication\n", 257 | "For $ A \\in \\mathbb{R}^{m \\times n}, v \\in \\mathbb{R}^n $:\n", 258 | "\n", 259 | "$$\n", 260 | "(Av)_i = \\sum_{j=1}^n a_{ij} v_j.\n", 261 | "$$" 262 | ] 263 | }, 264 | { 265 | "cell_type": "code", 266 | "execution_count": 7, 267 | "id": "d65fd20e", 268 | "metadata": {}, 269 | "outputs": [ 270 | { 271 | "name": "stdout", 272 | "output_type": "stream", 273 | "text": [ 274 | "A @ v = Vector([-2.0, -2.0, -2.0])\n" 275 | ] 276 | } 277 | ], 278 | "source": [ 279 | "A = Matrix([[1, 2, 3],\n", 280 | " [4, 5, 6],\n", 281 | " [7, 8, 9]])\n", 282 | "v = Vector([1, 0, -1])\n", 283 | "\n", 284 | "print(\"A @ v =\", A @ v)" 285 | ] 286 | }, 287 | { 288 | "cell_type": "markdown", 289 | "id": "8c2d30ec", 290 | "metadata": {}, 291 | "source": [ 292 | "### 5.5 Matrix-Matrix multiplication\n", 293 | "For $ A \\in \\mathbb{R}^{m \\times n}, B \\in \\mathbb{R}^{n \\times p} $:\n", 294 | "\n", 295 | "$$\n", 296 | "(AB)_{ij} = \\sum_{k=1}^n a_{ik} b_{kj}.\n", 297 | "$$" 298 | ] 299 | }, 300 | { 301 | "cell_type": "code", 302 | "execution_count": 8, 303 | "id": "f46a96ec", 304 | "metadata": {}, 305 | "outputs": [ 306 | { 307 | "name": "stdout", 308 | "output_type": "stream", 309 | "text": [ 310 | "A @ B =\n", 311 | " Matrix([[4.0, 4.0], [10.0, 8.0]])\n" 312 | ] 313 | } 314 | ], 315 | "source": [ 316 | "A = Matrix([[1, 2],\n", 317 | " [3, 4]])\n", 318 | "B = Matrix([[2, 0],\n", 319 | " [1, 2]])\n", 320 | "\n", 321 | "print(\"A @ B =\\n\", A @ B)" 322 | ] 323 | }, 324 | { 325 | "cell_type": "markdown", 326 | "id": "470e38cb", 327 | "metadata": {}, 328 | "source": [ 329 | "### 5.6 Transpose\n", 330 | "For $ A \\in \\mathbb{R}^{m \\times n} $:\n", 331 | "\n", 332 | "$$\n", 333 | "A^T_{ij} = A_{ji}.\n", 334 | "$$" 335 | ] 336 | }, 337 | { 338 | "cell_type": "code", 339 | "execution_count": 9, 340 | "id": "42168bd4", 341 | "metadata": {}, 342 | "outputs": [ 343 | { 344 | "name": "stdout", 345 | "output_type": "stream", 346 | "text": [ 347 | "A^T =\n", 348 | " Matrix([[1.0, 4.0], [2.0, 5.0], [3.0, 6.0]])\n" 349 | ] 350 | } 351 | ], 352 | "source": [ 353 | "A = Matrix([[1, 2, 3],\n", 354 | " [4, 5, 6]])\n", 355 | "\n", 356 | "print(\"A^T =\\n\", A.T)" 357 | ] 358 | }, 359 | { 360 | "cell_type": "markdown", 361 | "id": "226efb8f", 362 | "metadata": {}, 363 | "source": [ 364 | "## 6. Norms\n", 365 | "\n", 366 | "Norms measure the **size** or **length** of vectors and matrices." 367 | ] 368 | }, 369 | { 370 | "cell_type": "markdown", 371 | "id": "120cf212", 372 | "metadata": {}, 373 | "source": [ 374 | "### 6.1 Vector norms\n", 375 | "- **1-norm**: $\\|v\\|_1 = \\sum |v_i|$ \n", 376 | "- **2-norm (Euclidean)**: $\\|v\\|_2 = \\sqrt{\\sum v_i^2}$ \n", 377 | "- **∞-norm**: $\\|v\\|_\\infty = \\max |v_i|$" 378 | ] 379 | }, 380 | { 381 | "cell_type": "code", 382 | "execution_count": 10, 383 | "id": "3b187412", 384 | "metadata": {}, 385 | "outputs": [ 386 | { 387 | "name": "stdout", 388 | "output_type": "stream", 389 | "text": [ 390 | "‖v‖₁ = 12.0\n", 391 | "‖v‖₂ = 7.0710678118654755\n", 392 | "‖v‖∞ = 5.0\n" 393 | ] 394 | } 395 | ], 396 | "source": [ 397 | "v = Vector([3, -4, 5])\n", 398 | "\n", 399 | "print(\"‖v‖₁ =\", v.norm())\n", 400 | "print(\"‖v‖₂ =\", v.norm2())\n", 401 | "print(\"‖v‖∞ =\", v.norm_inf())" 402 | ] 403 | }, 404 | { 405 | "cell_type": "markdown", 406 | "id": "3ad8369e", 407 | "metadata": {}, 408 | "source": [ 409 | "### 6.2 Matrix norms\n", 410 | "- **Frobenius norm**: $\\|A\\|_F = \\sqrt{\\sum_{i,j} a_{ij}^2}$ \n", 411 | "- **1-norm** (maximum column sum): $\\|A\\|_1 = \\max_j \\sum_i |a_{ij}|$ \n", 412 | "- **∞-norm** (maximum row sum): $\\|A\\|_\\infty = \\max_i \\sum_j |a_{ij}|$" 413 | ] 414 | }, 415 | { 416 | "cell_type": "code", 417 | "execution_count": 11, 418 | "id": "72a8ae7c", 419 | "metadata": {}, 420 | "outputs": [ 421 | { 422 | "name": "stdout", 423 | "output_type": "stream", 424 | "text": [ 425 | "‖A‖_F = 5.477225575051661\n", 426 | "‖A‖₁ = 6.0\n", 427 | "‖A‖∞ = 7.0\n" 428 | ] 429 | } 430 | ], 431 | "source": [ 432 | "A = Matrix([[1, -2],\n", 433 | " [3, 4]])\n", 434 | "\n", 435 | "print(\"‖A‖_F =\", A.norm_fro())\n", 436 | "print(\"‖A‖₁ =\", A.norm())\n", 437 | "print(\"‖A‖∞ =\", A.norm_inf())" 438 | ] 439 | }, 440 | { 441 | "cell_type": "markdown", 442 | "id": "eab94be3", 443 | "metadata": {}, 444 | "source": [ 445 | "## 7. Summary\n", 446 | "- A **vector** is an element of $\\mathbb{R}^n$, a list of numbers. \n", 447 | "- A **matrix** is a rectangular array of numbers $\\mathbb{R}^{m \\times n}$. \n", 448 | "- We can add, subtract, and scale vectors/matrices. \n", 449 | "- Multiplication extends naturally: matrix-vector and matrix-matrix. \n", 450 | "- **Transpose** flips rows and columns. \n", 451 | "- **Norms** measure the size of vectors and matrices." 452 | ] 453 | } 454 | ], 455 | "metadata": { 456 | "kernelspec": { 457 | "display_name": "Python 3 (ipykernel)", 458 | "language": "python", 459 | "name": "python3" 460 | }, 461 | "language_info": { 462 | "codemirror_mode": { 463 | "name": "ipython", 464 | "version": 3 465 | }, 466 | "file_extension": ".py", 467 | "mimetype": "text/x-python", 468 | "name": "python", 469 | "nbconvert_exporter": "python", 470 | "pygments_lexer": "ipython3", 471 | "version": "3.10.11" 472 | } 473 | }, 474 | "nbformat": 4, 475 | "nbformat_minor": 5 476 | } 477 | -------------------------------------------------------------------------------- /numethods/fitting.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | from typing import List, Callable 3 | import math 4 | from .linalg import Matrix, Vector 5 | from .orthogonal import LeastSquaresSolver 6 | from .differentiation import ForwardDiff, BackwardDiff, CentralDiff, CentralDiff4th 7 | 8 | 9 | class PolyFit: 10 | """Least squares polynomial fit of chosen degree.""" 11 | 12 | def __init__(self, x: List[float], y: List[float], degree: int): 13 | if len(x) != len(y): 14 | raise ValueError("x and y must have same length") 15 | if degree < 0: 16 | raise ValueError("degree must be non-negative") 17 | 18 | self.x = [float(v) for v in x] 19 | self.y = [float(v) for v in y] 20 | self.degree = degree 21 | self.coeffs = self._fit() 22 | 23 | def _fit(self): 24 | n = len(self.x) 25 | m = self.degree + 1 26 | A = Matrix([[self.x[i] ** j for j in range(m)] for i in range(n)]) 27 | b = Vector(self.y) 28 | return LeastSquaresSolver(A, b).solve() 29 | 30 | def evaluate(self, t: float) -> float: 31 | return sum(self.coeffs[j] * (t**j) for j in range(len(self.coeffs))) 32 | 33 | def summary(self): 34 | print("Polynomial Fit Coefficients") 35 | print("degree =", self.degree) 36 | print(" coeff | value") 37 | print("-------------------") 38 | for j, c in enumerate(self.coeffs): 39 | print(f" c{j:<3}| {c: .6f}") 40 | print() 41 | 42 | def trace(self): 43 | print("Polynomial Fit Trace (Vandermonde system)") 44 | print(" x | y | " + " | ".join([f"x^{j}" for j in range(self.degree + 1)])) 45 | print("-" * 40) 46 | for xi, yi in zip(self.x, self.y): 47 | row = " | ".join([f"{xi**j: .4f}" for j in range(self.degree + 1)]) 48 | print(f"{xi: .4f} | {yi: .4f} | {row}") 49 | print() 50 | 51 | 52 | class LinearFit: 53 | """Least squares fit with custom basis functions.""" 54 | 55 | def __init__( 56 | self, x: List[float], y: List[float], basis: List[Callable[[float], float]] 57 | ): 58 | if len(x) != len(y): 59 | raise ValueError("x and y must have same length") 60 | if not basis: 61 | raise ValueError("basis must contain at least one function") 62 | 63 | self.x = [float(v) for v in x] 64 | self.y = [float(v) for v in y] 65 | self.basis = basis 66 | self.coeffs = self._fit() 67 | 68 | def _fit(self): 69 | n = len(self.x) 70 | m = len(self.basis) 71 | A = Matrix([[phi(self.x[i]) for phi in self.basis] for i in range(n)]) 72 | b = Vector(self.y) 73 | return LeastSquaresSolver(A, b).solve() 74 | 75 | def evaluate(self, t: float) -> float: 76 | return sum(c * phi(t) for c, phi in zip(self.coeffs, self.basis)) 77 | 78 | def summary(self): 79 | print("Linear Fit Coefficients") 80 | print(" basis | value") 81 | print("-------------------") 82 | for j, c in enumerate(self.coeffs): 83 | print(f" φ{j:<3}| {c: .6f}") 84 | print() 85 | 86 | def trace(self): 87 | print("Linear Fit Trace (design matrix)") 88 | print(" x | y | " + " | ".join([f"φ{j}(x)" for j in range(len(self.basis))])) 89 | print("-" * 40) 90 | for xi, yi in zip(self.x, self.y): 91 | row = " | ".join([f"{phi(xi): .4f}" for phi in self.basis]) 92 | print(f"{xi: .4f} | {yi: .4f} | {row}") 93 | print() 94 | 95 | 96 | class ExpFit: 97 | """Fit y ≈ a * exp(bx) using log transform + linear least squares.""" 98 | 99 | def __init__(self, x: List[float], y: List[float]): 100 | if len(x) != len(y): 101 | raise ValueError("x and y must have same length") 102 | if any(val <= 0 for val in y): 103 | raise ValueError("y values must be positive for exponential fit") 104 | 105 | self.x = [float(v) for v in x] 106 | self.y = [float(v) for v in y] 107 | self.a, self.b = self._fit() 108 | 109 | def _fit(self): 110 | Y = [math.log(v) for v in self.y] 111 | A = Matrix([[1.0, self.x[i]] for i in range(len(self.x))]) 112 | b = Vector(Y) 113 | coeffs = LeastSquaresSolver(A, b).solve() 114 | a = math.exp(coeffs[0]) 115 | b = coeffs[1] 116 | return a, b 117 | 118 | def evaluate(self, t: float) -> float: 119 | return self.a * math.exp(self.b * t) 120 | 121 | def summary(self): 122 | print("Exponential Fit Parameters") 123 | print(" param | value") 124 | print("-------------------") 125 | print(f" a | {self.a: .6f}") 126 | print(f" b | {self.b: .6f}") 127 | print() 128 | 129 | def trace(self): 130 | print("Exponential Fit Trace (log transform)") 131 | print(" x | y | log(y)") 132 | print("-------------------") 133 | for xi, yi in zip(self.x, self.y): 134 | print(f"{xi: .4f} | {yi: .4f} | {math.log(yi): .4f}") 135 | print() 136 | 137 | 138 | class NonlinearFit: 139 | """Nonlinear least squares fitting using adaptive Levenberg–Marquardt.""" 140 | 141 | def __init__( 142 | self, 143 | model: Callable[[float, List[float]], float], 144 | x: List[float], 145 | y: List[float], 146 | init_params: List[float], 147 | max_iter: int = 100, 148 | tol: float = 1e-8, 149 | lam: float = 1e-3, 150 | derivative_method: str = "central", 151 | verbose: bool = True, 152 | ): 153 | if len(x) != len(y): 154 | raise ValueError("x and y must have same length") 155 | 156 | self.model = model 157 | self.x = [float(v) for v in x] 158 | self.y = [float(v) for v in y] 159 | self.params = [float(p) for p in init_params] 160 | self.max_iter = max_iter 161 | self.tol = tol 162 | self.lam = lam 163 | self.derivative_method = derivative_method 164 | self.verbose = verbose 165 | # history stores tuples: (iter, params, res_norm, λ, step, status) 166 | self.history: List[tuple] = [] 167 | self._fit() 168 | 169 | def _residuals(self, params): 170 | return Vector([self.model(xi, params) - yi for xi, yi in zip(self.x, self.y)]) 171 | 172 | def _jacobian(self, params): 173 | methods = { 174 | "forward": ForwardDiff, 175 | "backward": BackwardDiff, 176 | "central": CentralDiff, 177 | "central4th": CentralDiff4th, 178 | } 179 | diff_method = methods[self.derivative_method] 180 | 181 | m, n = len(self.x), len(params) 182 | J = [[0.0] * n for _ in range(m)] 183 | for j in range(n): 184 | for i, xi in enumerate(self.x): 185 | 186 | def func(pj): 187 | new_params = params[:] 188 | new_params[j] = pj 189 | return self.model(xi, new_params) 190 | 191 | J[i][j] = diff_method(func, params[j]) 192 | return Matrix(J) 193 | 194 | def _fit(self): 195 | params = self.params[:] 196 | prev_res_norm = float("inf") 197 | 198 | for k in range(self.max_iter): 199 | r = self._residuals(params) 200 | J = self._jacobian(params) 201 | JT = J.T 202 | A = JT @ J 203 | g = JT @ r 204 | 205 | for i in range(len(params)): 206 | A.data[i][i] += self.lam 207 | 208 | try: 209 | delta = LeastSquaresSolver(A, Vector([-gi for gi in g])).solve() 210 | new_params = [p + d for p, d in zip(params, delta)] 211 | new_r = self._residuals(new_params) 212 | res_norm = sum(abs(val) for val in new_r) 213 | status = "ok" 214 | except Exception: 215 | delta = [0.0] * len(params) 216 | new_params = params[:] 217 | res_norm = float("inf") 218 | status = "solver fail" 219 | self.lam *= 10 220 | self.history.append((k, params[:], res_norm, self.lam, delta, status)) 221 | continue 222 | 223 | # record this iteration 224 | self.history.append((k, params[:], res_norm, self.lam, delta, status)) 225 | 226 | # stopping conditions 227 | if res_norm < self.tol: 228 | params = new_params 229 | break 230 | if max(abs(d) for d in delta) < self.tol: 231 | params = new_params 232 | break 233 | if abs(prev_res_norm - res_norm) < 1e-12: 234 | params = new_params 235 | break 236 | 237 | if res_norm < prev_res_norm: 238 | params = new_params 239 | prev_res_norm = res_norm 240 | self.lam = max(self.lam / 10, 1e-12) 241 | else: 242 | self.lam *= 10 243 | if self.lam > 1e12: 244 | break 245 | 246 | self.params = params 247 | 248 | def evaluate(self, t: float) -> float: 249 | return self.model(t, self.params) 250 | 251 | def summary(self): 252 | print("Nonlinear Fit Final Parameters") 253 | for j, p in enumerate(self.params): 254 | print(f" param{j} = {p: .6f}") 255 | print() 256 | 257 | def trace(self): 258 | print("Nonlinear Fit Trace (Levenberg–Marquardt)") 259 | header = ( 260 | " iter | " 261 | + " | ".join([f"param{j}" for j in range(len(self.params))]) 262 | + " | res_norm | λ | step_norm | status" 263 | ) 264 | print(header) 265 | print("-" * len(header)) 266 | for k, params, res_norm, lam, delta, status in self.history: 267 | row = " | ".join([f"{p: .6f}" for p in params]) 268 | step_norm = max(abs(d) for d in delta) if delta else 0.0 269 | print( 270 | f"{k:5d} | {row} | {res_norm: .6e} | {lam: .1e} | {step_norm: .3e} | {status}" 271 | ) 272 | print("Final params:", self.params) 273 | print() 274 | 275 | 276 | # ---------------------------- 277 | # Plotting helper for curve fitting 278 | # ---------------------------- 279 | def plot_fit( 280 | x: List[float], 281 | y: List[float], 282 | fit_objects: List[object], 283 | labels: List[str] = None, 284 | true_func: Callable[[float], float] = None, 285 | num_points: int = 200, 286 | ): 287 | """ 288 | Plot data points and fitted curves. 289 | fit_objects must implement .evaluate(t). 290 | """ 291 | try: 292 | import matplotlib.pyplot as plt 293 | except ImportError: 294 | raise ImportError( 295 | "matplotlib is required for plotting. Install with `pip install matplotlib`." 296 | ) 297 | 298 | def linspace(a: float, b: float, n: int) -> List[float]: 299 | if n == 1: 300 | return [a] 301 | step = (b - a) / (n - 1) 302 | return [a + i * step for i in range(n)] 303 | 304 | plt.scatter(x, y, color="black", label="data") 305 | t_vals = linspace(min(x), max(x), num_points) 306 | 307 | if true_func: 308 | plt.plot(t_vals, [true_func(t) for t in t_vals], "k--", label="true function") 309 | 310 | for i, fit in enumerate(fit_objects): 311 | lbl = labels[i] if labels else f"fit{i + 1}" 312 | plt.plot(t_vals, [fit.evaluate(t) for t in t_vals], label=lbl) 313 | 314 | plt.legend() 315 | plt.show() 316 | 317 | 318 | def plot_residuals( 319 | x: List[float], 320 | y: List[float], 321 | fit_objects: List[object], 322 | labels: List[str] = None, 323 | mode: str = "line", 324 | ): 325 | """ 326 | Plot residuals (y_i - fit.evaluate(x_i)) for each fit object. 327 | mode: "line" (default) or "bar" for absolute residual magnitudes. 328 | """ 329 | try: 330 | import matplotlib.pyplot as plt 331 | except ImportError: 332 | raise ImportError("matplotlib is required for plotting.") 333 | 334 | plt.axhline(0, color="black", linewidth=0.8) 335 | 336 | for i, fit in enumerate(fit_objects): 337 | residuals = [yi - fit.evaluate(xi) for xi, yi in zip(x, y)] 338 | lbl = labels[i] if labels else f"fit{i + 1}" 339 | if mode == "line": 340 | plt.plot(x, residuals, marker="o", linestyle="--", label=lbl) 341 | elif mode == "bar": 342 | plt.bar( 343 | [xi + i * 0.1 for xi in x], 344 | [abs(r) for r in residuals], 345 | width=0.1, 346 | label=lbl, 347 | ) 348 | 349 | plt.xlabel("x") 350 | plt.ylabel("Residuals") 351 | plt.title("Curve Fitting Residuals") 352 | plt.legend() 353 | plt.show() 354 | -------------------------------------------------------------------------------- /tutorials/tutorial4_root_finding.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "id": "3c51341a", 6 | "metadata": {}, 7 | "source": [ 8 | "# Tutorial 4: Root-Finding Methods\n", 9 | "---\n", 10 | "In this tutorial, we study classical **root-finding algorithms** for nonlinear equations. We will:\n", 11 | "\n", 12 | "- Define the root-finding problem mathematically\n", 13 | "- Derive several algorithms (bisection, fixed-point, Newton, secant)\n", 14 | "- Discuss convergence conditions and error behavior\n", 15 | "- Compare methods with worked examples using the `numethods` package.\n" 16 | ] 17 | }, 18 | { 19 | "cell_type": "markdown", 20 | "id": "2999aa4f", 21 | "metadata": {}, 22 | "source": [ 23 | "## 1. Problem Setup and Notation\n", 24 | "\n", 25 | "We seek to solve a nonlinear scalar equation:\n", 26 | "$$\n", 27 | "f(x) = 0, \\quad f: \\mathbb{R} \\to \\mathbb{R}.\n", 28 | "$$\n", 29 | "with a continuously differentiable function.\n", 30 | "\n", 31 | "\n", 32 | "### Root, residual, and error\n", 33 | "- A **root** $r$ satisfies $f(r)=0$.\n", 34 | "- **Absolute error:** $(e_k = |x_k - x^\\star|)$.\n", 35 | "- **Residual:** $(r_k = |f(x_k)|)$. \n", 36 | "Note that small residual does not always imply small error.\n", 37 | "\n", 38 | "### Multiplicity\n", 39 | "A root $r$ has **multiplicity** $m$ if\n", 40 | "$$\n", 41 | "f(r) = f'(r) = \\dots = f^{(m-1)}(r) = 0, \\quad f^{(m)}(r) \\neq 0.\n", 42 | "$$\n", 43 | "If $x^\\star$ satisfies $f(x^\\star)=0$ and $f'(x^\\star)\\ne 0$, we say the root is **simple** (multiplicity 1).\n", 44 | "\n", 45 | "If $f'(x^\\star)=\\cdots=f^{(m-1)}(x^\\star)=0$ and $f^{(m)}(x^\\star)\\ne 0$, we say the root has **multiplicity** (m).\n", 46 | "\n", 47 | "- **Simple roots** ($m=1$): most methods converge rapidly.\n", 48 | "- **Multiple roots** ($m>1$): convergence often slows.\n" 49 | ] 50 | }, 51 | { 52 | "cell_type": "markdown", 53 | "id": "b610cea2", 54 | "metadata": {}, 55 | "source": [ 56 | "## 2. Bisection Method\n", 57 | "\n", 58 | "**Assumption (Intermediate Value Theorem):** If f is continuous on ([a,b]) and (f(a),f(b) < 0),\n", 59 | "then there exists $x^\\star$ in (a,b) with $f(x^\\star)=0$.\n", 60 | "\n", 61 | "- Assumes $f$ is continuous on $[a,b]$ with $f(a)f(b)<0$.\n", 62 | "- Repeatedly bisect interval and select subinterval containing the root.\n", 63 | "\n", 64 | "**Iteration:**\n", 65 | "$$\n", 66 | "c_k = \\frac{a_k+b_k}{2}, \\quad f(c_k).\n", 67 | "$$\n", 68 | "\n", 69 | "**Error bound:** interval length halves each step:\n", 70 | "$$\n", 71 | "|c_k-r| \\le \\frac{b-a}{2^k}.\n", 72 | "$$\n", 73 | "- Convergence: **linear**, guaranteed.\n" 74 | ] 75 | }, 76 | { 77 | "cell_type": "code", 78 | "execution_count": null, 79 | "id": "d26f75fb", 80 | "metadata": {}, 81 | "outputs": [ 82 | { 83 | "name": "stdout", 84 | "output_type": "stream", 85 | "text": [ 86 | "Bisection root: 1.4142135605216026\n", 87 | "\n", 88 | "Bisection Method Trace (x^2 - 2):\n", 89 | " iter | a | b | c | f(a) | f(b) | f(c) | interval\n", 90 | "--------------------------------------------------------------------------------------------------------\n", 91 | " 0 | 0 | 2 | 1 | -2 | 2 | -1 | 2\n", 92 | " 1 | 1 | 2 | 1.5 | -1 | 2 | 0.25 | 1\n", 93 | " 2 | 1 | 1.5 | 1.25 | -1 | 0.25 | -0.4375 | 0.5\n", 94 | " 3 | 1.25 | 1.5 | 1.375 | -0.4375 | 0.25 | -0.109375 | 0.25\n", 95 | " 4 | 1.375 | 1.5 | 1.4375 | -0.109375 | 0.25 | 0.0664062 | 0.125\n", 96 | " 5 | 1.375 | 1.4375 | 1.40625 | -0.109375 | 0.0664062 | -0.0224609 | 0.0625\n", 97 | " 6 | 1.40625 | 1.4375 | 1.42188 | -0.0224609 | 0.0664062 | 0.0217285 | 0.03125\n", 98 | " 7 | 1.40625 | 1.42188 | 1.41406 | -0.0224609 | 0.0217285 | -0.000427246 | 0.015625\n", 99 | " 8 | 1.41406 | 1.42188 | 1.41797 | -0.000427246 | 0.0217285 | 0.0106354 | 0.0078125\n", 100 | " 9 | 1.41406 | 1.41797 | 1.41602 | -0.000427246 | 0.0106354 | 0.00510025 | 0.00390625\n", 101 | " 10 | 1.41406 | 1.41602 | 1.41504 | -0.000427246 | 0.00510025 | 0.00233555 | 0.00195312\n", 102 | " 11 | 1.41406 | 1.41504 | 1.41455 | -0.000427246 | 0.00233555 | 0.000953913 | 0.000976562\n", 103 | " 12 | 1.41406 | 1.41455 | 1.41431 | -0.000427246 | 0.000953913 | 0.000263274 | 0.000488281\n", 104 | " 13 | 1.41406 | 1.41431 | 1.41418 | -0.000427246 | 0.000263274 | -8.20011e-05 | 0.000244141\n", 105 | " 14 | 1.41418 | 1.41431 | 1.41425 | -8.20011e-05 | 0.000263274 | 9.06326e-05 | 0.00012207\n", 106 | " 15 | 1.41418 | 1.41425 | 1.41422 | -8.20011e-05 | 9.06326e-05 | 4.31482e-06 | 6.10352e-05\n", 107 | " 16 | 1.41418 | 1.41422 | 1.4142 | -8.20011e-05 | 4.31482e-06 | -3.88434e-05 | 3.05176e-05\n", 108 | " 17 | 1.4142 | 1.41422 | 1.41421 | -3.88434e-05 | 4.31482e-06 | -1.72643e-05 | 1.52588e-05\n", 109 | " 18 | 1.41421 | 1.41422 | 1.41421 | -1.72643e-05 | 4.31482e-06 | -6.47477e-06 | 7.62939e-06\n", 110 | " 19 | 1.41421 | 1.41422 | 1.41421 | -6.47477e-06 | 4.31482e-06 | -1.07998e-06 | 3.8147e-06\n", 111 | " 20 | 1.41421 | 1.41422 | 1.41421 | -1.07998e-06 | 4.31482e-06 | 1.61742e-06 | 1.90735e-06\n", 112 | " 21 | 1.41421 | 1.41421 | 1.41421 | -1.07998e-06 | 1.61742e-06 | 2.68718e-07 | 9.53674e-07\n", 113 | " 22 | 1.41421 | 1.41421 | 1.41421 | -1.07998e-06 | 2.68718e-07 | -4.05632e-07 | 4.76837e-07\n", 114 | " 23 | 1.41421 | 1.41421 | 1.41421 | -4.05632e-07 | 2.68718e-07 | -6.84571e-08 | 2.38419e-07\n", 115 | " 24 | 1.41421 | 1.41421 | 1.41421 | -6.84571e-08 | 2.68718e-07 | 1.0013e-07 | 1.19209e-07\n", 116 | " 25 | 1.41421 | 1.41421 | 1.41421 | -6.84571e-08 | 1.0013e-07 | 1.58366e-08 | 5.96046e-08\n", 117 | " 26 | 1.41421 | 1.41421 | 1.41421 | -6.84571e-08 | 1.58366e-08 | -2.63102e-08 | 2.98023e-08\n", 118 | " 27 | 1.41421 | 1.41421 | 1.41421 | -2.63102e-08 | 1.58366e-08 | -5.23681e-09 | 1.49012e-08\n" 119 | ] 120 | } 121 | ], 122 | "source": [ 123 | "from numethods import Bisection, FixedPoint, NewtonRoot, Secant, print_trace\n", 124 | "import math\n", 125 | "\n", 126 | "# Example function: f(x) = x^2 - 2\n", 127 | "f = lambda x: x**2 - 2\n", 128 | "df = lambda x: 2*x\n", 129 | "\n", 130 | "# Bisection\n", 131 | "bisect = Bisection(f, 0, 2, tol=1e-8)\n", 132 | "root_b = bisect.solve()\n", 133 | "print('Bisection root:', root_b)\n", 134 | "\n", 135 | "steps = bisect.trace()\n", 136 | "print(\"\\nBisection Method Trace (x^2 - 2):\")\n", 137 | "print_trace(steps)" 138 | ] 139 | }, 140 | { 141 | "cell_type": "markdown", 142 | "id": "3be4a510", 143 | "metadata": {}, 144 | "source": [ 145 | "## 3. Fixed-Point Iteration\n", 146 | "- Rewrite equation as $x=g(x)$.\n", 147 | "- Iterate:\n", 148 | "$$\n", 149 | "x_{k+1} = g(x_k).\n", 150 | "$$\n", 151 | "\n", 152 | "**Convergence theorem (Banach fixed-point):** If g is continuously differentiable near $(x^\\star)$ and\n", 153 | "$$\n", 154 | "|g'(x_\\star)| < 1,\n", 155 | "$$\n", 156 | "then for initial guesses $x_0$ sufficiently close to $x^\\star$, the iterates converge **linearly** to $x^\\star$ with asymptotic rate $|g'(x^\\star)|$.\n", 157 | "\n", 158 | "**Choice of g.** Different rearrangements yield different g's with different convergence properties.\n", 159 | "A poor choice (with $(|g'|\\ge 1))$ can diverge.\n", 160 | "\n", 161 | "- Rate: linear with factor $|g'(r)|$.\n" 162 | ] 163 | }, 164 | { 165 | "cell_type": "code", 166 | "execution_count": 2, 167 | "id": "436ce6f6", 168 | "metadata": {}, 169 | "outputs": [ 170 | { 171 | "name": "stdout", 172 | "output_type": "stream", 173 | "text": [ 174 | "Fixed-point failed: Fixed-point iteration did not converge\n", 175 | "\n", 176 | "Fixed-Point Iteration Trace (x^2 - 2):\n", 177 | " iter | x | x_new | error\n", 178 | "----------------------------------------------------\n", 179 | " 0 | 1 | 1.5 | 0.5\n", 180 | " 1 | 1.5 | 1.41667 | 0.0833333\n", 181 | " 2 | 1.41667 | 1.41422 | 0.00245098\n", 182 | " 3 | 1.41422 | 1.41421 | 2.1239e-06\n", 183 | " 4 | 1.41421 | 1.41421 | 1.59495e-12\n" 184 | ] 185 | } 186 | ], 187 | "source": [ 188 | "# Fixed point: g(x)=sqrt(2) ~ rewriting\n", 189 | "g = lambda x: (2/x) # not always convergent, but demonstrates\n", 190 | "try:\n", 191 | " fp = FixedPoint(g, 1.0, tol=1e-8)\n", 192 | " root_fp = fp.solve()\n", 193 | " print('Fixed-point root:', root_fp)\n", 194 | "except Exception as e:\n", 195 | " print('Fixed-point failed:', e)\n", 196 | "\n", 197 | "g = lambda x: 0.5 * (x + 2 / x)\n", 198 | "steps = FixedPoint(g, 1.0).trace()\n", 199 | "print(\"\\nFixed-Point Iteration Trace (x^2 - 2):\")\n", 200 | "print_trace(steps)" 201 | ] 202 | }, 203 | { 204 | "cell_type": "markdown", 205 | "id": "40e66a30", 206 | "metadata": {}, 207 | "source": [ 208 | "## 4. Newton’s Method\n", 209 | "From Taylor expansion:\n", 210 | "$$\n", 211 | "f(x) \\approx f(x_k) + f'(x_k)(x-x_k).\n", 212 | "$$\n", 213 | "Set $f(x)=0$ to solve for next iterate:\n", 214 | "$$\n", 215 | "x_{k+1} = x_k - \\frac{f(x_k)}{f'(x_k)}.\n", 216 | "$$\n", 217 | "\n", 218 | "- **Quadratic convergence** for simple roots.\n", 219 | "- For multiple roots: drops to linear.\n", 220 | "- Requires derivative, sensitive to initial guess.\n" 221 | ] 222 | }, 223 | { 224 | "cell_type": "code", 225 | "execution_count": 4, 226 | "id": "7ebf9068", 227 | "metadata": {}, 228 | "outputs": [ 229 | { 230 | "name": "stdout", 231 | "output_type": "stream", 232 | "text": [ 233 | "Newton root: 1.4142135623730951\n", 234 | "Newton Method Trace (x^2 - 2):\n", 235 | " iter | x | f(x) | df(x) | x_new | error\n", 236 | "------------------------------------------------------------------------------\n", 237 | " 0 | 1 | -1 | 2 | 1.5 | 0.5\n", 238 | " 1 | 1.5 | 0.25 | 3 | 1.41667 | 0.0833333\n", 239 | " 2 | 1.41667 | 0.00694444 | 2.83333 | 1.41422 | 0.00245098\n", 240 | " 3 | 1.41422 | 6.0073e-06 | 2.82843 | 1.41421 | 2.1239e-06\n", 241 | " 4 | 1.41421 | 4.51061e-12 | 2.82843 | 1.41421 | 1.59472e-12\n" 242 | ] 243 | } 244 | ], 245 | "source": [ 246 | "# Newton\n", 247 | "newton = NewtonRoot(f, df, 1.0, tol=1e-12)\n", 248 | "root_n = newton.solve()\n", 249 | "print('Newton root:', root_n)\n", 250 | "\n", 251 | "steps = newton.trace()\n", 252 | "print(\"Newton Method Trace (x^2 - 2):\")\n", 253 | "print_trace(steps)" 254 | ] 255 | }, 256 | { 257 | "cell_type": "markdown", 258 | "id": "84888305", 259 | "metadata": {}, 260 | "source": [ 261 | "## 5. Secant Method\n", 262 | "- Avoids derivative by approximating slope with finite difference:\n", 263 | "$$\n", 264 | "x_{k+1} = x_k - f(x_k) \\frac{x_k - x_{k-1}}{f(x_k)-f(x_{k-1})}.\n", 265 | "$$\n", 266 | "\n", 267 | "- Convergence order: $\\approx 1.618$ (superlinear).\n", 268 | "- More efficient than Newton if derivative expensive.\n" 269 | ] 270 | }, 271 | { 272 | "cell_type": "code", 273 | "execution_count": 5, 274 | "id": "f2318bf3", 275 | "metadata": {}, 276 | "outputs": [ 277 | { 278 | "name": "stdout", 279 | "output_type": "stream", 280 | "text": [ 281 | "Secant root: 1.414213562373095\n", 282 | "\n", 283 | "Secant Method Trace (x^2 - 2):\n", 284 | " iter | x0 | x1 | x2 | f(x0) | f(x1) | error\n", 285 | "-------------------------------------------------------------------------------------------\n", 286 | " 0 | 0 | 2 | 1 | -2 | 2 | 1\n", 287 | " 1 | 2 | 1 | 1.33333 | 2 | -1 | 0.333333\n", 288 | " 2 | 1 | 1.33333 | 1.42857 | -1 | -0.222222 | 0.0952381\n", 289 | " 3 | 1.33333 | 1.42857 | 1.41379 | -0.222222 | 0.0408163 | 0.0147783\n", 290 | " 4 | 1.42857 | 1.41379 | 1.41421 | 0.0408163 | -0.00118906 | 0.000418335\n", 291 | " 5 | 1.41379 | 1.41421 | 1.41421 | -0.00118906 | -6.00729e-06 | 2.12421e-06\n", 292 | " 6 | 1.41421 | 1.41421 | 1.41421 | -6.00729e-06 | 8.93146e-10 | 3.15775e-10\n", 293 | " 7 | 1.41421 | 1.41421 | 1.41421 | 8.93146e-10 | -8.88178e-16 | 2.22045e-16\n" 294 | ] 295 | } 296 | ], 297 | "source": [ 298 | "# Secant\n", 299 | "sec = Secant(f, 0, 2, tol=1e-12)\n", 300 | "root_s = sec.solve()\n", 301 | "print('Secant root:', root_s)\n", 302 | "\n", 303 | "steps = sec.trace()\n", 304 | "print(\"\\nSecant Method Trace (x^2 - 2):\")\n", 305 | "print_trace(steps)\n" 306 | ] 307 | }, 308 | { 309 | "cell_type": "markdown", 310 | "id": "7ed8a0b5", 311 | "metadata": {}, 312 | "source": [ 313 | "## 6. Stopping Criteria\n", 314 | "We stop iteration when:\n", 315 | "- $|f(x_k)| \\le \\varepsilon$ (residual small), or\n", 316 | "- $|x_{k+1}-x_k| \\le \\varepsilon(1+|x_{k+1}|)$ (relative step small).\n" 317 | ] 318 | }, 319 | { 320 | "cell_type": "markdown", 321 | "id": "833b538c", 322 | "metadata": {}, 323 | "source": [ 324 | "## 7. Comparison of Methods\n", 325 | "| Method | Requires derivative | Convergence rate | Guarantee? |\n", 326 | "|--------|---------------------|------------------|------------|\n", 327 | "| Bisection | No | Linear | Yes (if sign change) |\n", 328 | "| Fixed-Point | No | Linear | Not always |\n", 329 | "| Newton | Yes | Quadratic | Locally (good guess) |\n", 330 | "| Secant | No | ~1.618 (superlinear) | Locally (good guess) |\n" 331 | ] 332 | }, 333 | { 334 | "cell_type": "markdown", 335 | "id": "0d372aa0", 336 | "metadata": {}, 337 | "source": [ 338 | "## 8. Exercises\n", 339 | "1. Apply all four methods to $f(x)=\\cos x - x$.\n", 340 | "2. Try Newton’s method on $f(x)=(x-1)^2$ and compare convergence rate with $f(x)=x^2-2$.\n", 341 | "3. Modify Secant to stop if denominator too small.\n" 342 | ] 343 | } 344 | ], 345 | "metadata": { 346 | "kernelspec": { 347 | "display_name": "Python 3", 348 | "language": "python", 349 | "name": "python3" 350 | }, 351 | "language_info": { 352 | "codemirror_mode": { 353 | "name": "ipython", 354 | "version": 3 355 | }, 356 | "file_extension": ".py", 357 | "mimetype": "text/x-python", 358 | "name": "python", 359 | "nbconvert_exporter": "python", 360 | "pygments_lexer": "ipython3", 361 | "version": "3.13.7" 362 | } 363 | }, 364 | "nbformat": 4, 365 | "nbformat_minor": 5 366 | } 367 | -------------------------------------------------------------------------------- /tutorials/tutorial2_linear_systems.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "id": "d3933270", 6 | "metadata": {}, 7 | "source": [ 8 | "# Tutorial 2 - Solving Linear Systems\n", 9 | "\n", 10 | "In this notebook, we will study numerical methods for solving systems of linear equations:\n", 11 | "\n", 12 | "$$\n", 13 | "A x = b, \\quad A \\in \\mathbb{R}^{n \\times n}, \\, x, b \\in \\mathbb{R}^n.\n", 14 | "$$\n", 15 | "## Motivation\n", 16 | "Why we care about solving Ax=b? in numerical methods (e.g., arises in ODEs, PDEs, optimization, physics).\n", 17 | "\n", 18 | "Exact solution: $ x=A^{-1}b $, but computing $ A^{-1} $ explicitly is costly/unstable.\n", 19 | "\n", 20 | "Numerical algorithms instead use factorizations or iterative schemes.\n", 21 | "\n", 22 | "Such systems appear everywhere in scientific computing:\n", 23 | "- discretization of differential equations (ODEs, PDEs),\n", 24 | "- optimization problems,\n", 25 | "- physical simulations,\n", 26 | "- statistical models.\n", 27 | "\n", 28 | "We will study **direct methods** (exact in theory) and **iterative methods** (successive approximations)." 29 | ] 30 | }, 31 | { 32 | "cell_type": "markdown", 33 | "id": "5fe78c6e", 34 | "metadata": {}, 35 | "source": [ 36 | "## 1. Direct Methods\n", 37 | "Introduce algorithms that give the solution in a finite number of steps (up to roundoff):\n", 38 | "\n", 39 | "- Gauss-Jordan elimination (concept, matrix reduction).\n", 40 | "\n", 41 | "- LU decomposition (and forward/backward substitution).\n", 42 | "\n", 43 | "- Cholesky decomposition (special case for symmetric positive definite matrices).\n", 44 | "\n", 45 | "### 1.1 Gauss-Jordan Elimination\n", 46 | "We augment $A$ with $b$ and apply row operations until $A$ becomes the identity:\n", 47 | "$$\n", 48 | "[A | b] \\;\\longrightarrow\\; [I | x].\n", 49 | "$$" 50 | ] 51 | }, 52 | { 53 | "cell_type": "code", 54 | "execution_count": 1, 55 | "id": "9bf371eb", 56 | "metadata": {}, 57 | "outputs": [ 58 | { 59 | "name": "stdout", 60 | "output_type": "stream", 61 | "text": [ 62 | "Gauss–Jordan solution: Vector([7.111111111111111, -3.2222222222222223])\n" 63 | ] 64 | } 65 | ], 66 | "source": [ 67 | "from numethods.linalg import Matrix, Vector\n", 68 | "from numethods.solvers import GaussJordan\n", 69 | "\n", 70 | "A = Matrix([[2, 1], [5, 7]])\n", 71 | "b = Vector([11, 13])\n", 72 | "solver = GaussJordan(A)\n", 73 | "x = solver.solve(b)\n", 74 | "print(\"Gauss–Jordan solution:\", x)" 75 | ] 76 | }, 77 | { 78 | "cell_type": "markdown", 79 | "id": "a45de9c0", 80 | "metadata": {}, 81 | "source": [ 82 | "### 1.2 LU Decomposition\n", 83 | "\n", 84 | "Factorization:\n", 85 | "$$\n", 86 | "A = L U,\n", 87 | "$$\n", 88 | "with $L$ lower-triangular, $U$ upper-triangular." 89 | ] 90 | }, 91 | { 92 | "cell_type": "code", 93 | "execution_count": 2, 94 | "id": "7211fd5a", 95 | "metadata": {}, 96 | "outputs": [ 97 | { 98 | "name": "stdout", 99 | "output_type": "stream", 100 | "text": [ 101 | "LU solution: Vector([1.0, 2.0])\n" 102 | ] 103 | } 104 | ], 105 | "source": [ 106 | "from numethods.solvers import LUDecomposition\n", 107 | "\n", 108 | "A = Matrix([[3, 1], [6, 3]])\n", 109 | "b = Vector([5, 12])\n", 110 | "solver = LUDecomposition(A)\n", 111 | "x = solver.solve(b)\n", 112 | "print(\"LU solution:\", x)" 113 | ] 114 | }, 115 | { 116 | "cell_type": "markdown", 117 | "id": "476368f3", 118 | "metadata": {}, 119 | "source": [ 120 | "### 1.3 Cholesky Decomposition\n", 121 | "\n", 122 | "For symmetric positive-definite (SPD) matrices:\n", 123 | "$$\n", 124 | "A = L L^T.\n", 125 | "$$" 126 | ] 127 | }, 128 | { 129 | "cell_type": "code", 130 | "execution_count": 8, 131 | "id": "29c710f2", 132 | "metadata": {}, 133 | "outputs": [ 134 | { 135 | "name": "stdout", 136 | "output_type": "stream", 137 | "text": [ 138 | "Cholesky solution: Vector([1.1666666666666665, 0.6666666666666667])\n" 139 | ] 140 | } 141 | ], 142 | "source": [ 143 | "from numethods.solvers import Cholesky\n", 144 | "\n", 145 | "A = Matrix([[4, 2], [2, 4]])\n", 146 | "b = Vector([6, 5])\n", 147 | "solver = Cholesky(A)\n", 148 | "x = solver.solve(b)\n", 149 | "print(\"Cholesky solution:\", x)" 150 | ] 151 | }, 152 | { 153 | "cell_type": "markdown", 154 | "id": "c8b85927", 155 | "metadata": {}, 156 | "source": [ 157 | "## 2. Iterative Methods\n", 158 | "\n", 159 | "### 2.1 Jacobi Iteration\n", 160 | "\n", 161 | "Update:\n", 162 | "$$\n", 163 | "x_i^{(k+1)} = \\frac{1}{a_{ii}}\\Big(b_i - \\sum_{j\\ne i} a_{ij}x_j^{(k)}\\Big).\n", 164 | "$$" 165 | ] 166 | }, 167 | { 168 | "cell_type": "code", 169 | "execution_count": 4, 170 | "id": "ebd0b31c", 171 | "metadata": {}, 172 | "outputs": [ 173 | { 174 | "name": "stdout", 175 | "output_type": "stream", 176 | "text": [ 177 | "Jacobi solution: Vector([4.999999991891302, 4.999999985914916, 4.999999984848761, 4.999999987766864])\n" 178 | ] 179 | } 180 | ], 181 | "source": [ 182 | "from numethods.solvers import Jacobi\n", 183 | "\n", 184 | "A = Matrix([[4, -1, 0, 0],\n", 185 | " [-1, 4, -1, 0],\n", 186 | " [0, -1, 4, -1],\n", 187 | " [0, 0, -1, 3]])\n", 188 | "b = Vector([15, 10, 10, 10])\n", 189 | "solver = Jacobi(A, b, tol=1e-8, max_iter=100)\n", 190 | "x = solver.solve()\n", 191 | "print(\"Jacobi solution:\", x)" 192 | ] 193 | }, 194 | { 195 | "cell_type": "markdown", 196 | "id": "3e1bd09d", 197 | "metadata": {}, 198 | "source": [ 199 | "### 2.2 Gauss-Seidel Iteration\n", 200 | "\n", 201 | "Update:\n", 202 | "$$\n", 203 | "x_i^{(k+1)} = \\frac{1}{a_{ii}}\\Big(b_i - \\sum_{ji} a_{ij}x_j^{(k)}\\Big).\n", 204 | "$$" 205 | ] 206 | }, 207 | { 208 | "cell_type": "code", 209 | "execution_count": 5, 210 | "id": "5bea9ffe", 211 | "metadata": {}, 212 | "outputs": [ 213 | { 214 | "name": "stdout", 215 | "output_type": "stream", 216 | "text": [ 217 | "Gauss–Seidel solution: Vector([4.999999973600783, 4.999999981068349, 4.999999991156471, 4.999999997052157])\n" 218 | ] 219 | } 220 | ], 221 | "source": [ 222 | "from numethods.solvers import GaussSeidel\n", 223 | "\n", 224 | "solver = GaussSeidel(A, b, tol=1e-8, max_iter=100)\n", 225 | "x = solver.solve()\n", 226 | "print(\"Gauss–Seidel solution:\", x)" 227 | ] 228 | }, 229 | { 230 | "cell_type": "markdown", 231 | "id": "4f365016", 232 | "metadata": {}, 233 | "source": [ 234 | "## 3. Convergence\n", 235 | "\n", 236 | "The residual is:\n", 237 | "$$\n", 238 | "r^{(k)} = b - A x^{(k)}.\n", 239 | "$$\n", 240 | "\n", 241 | "A good stopping rule: $\\|r^{(k)}\\| < tol$." 242 | ] 243 | }, 244 | { 245 | "cell_type": "code", 246 | "execution_count": 6, 247 | "id": "993a9d44", 248 | "metadata": {}, 249 | "outputs": [ 250 | { 251 | "data": { 252 | "image/png": "iVBORw0KGgoAAAANSUhEUgAAAkgAAAGwCAYAAABSN5pGAAAAOnRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjEwLjMsIGh0dHBzOi8vbWF0cGxvdGxpYi5vcmcvZiW1igAAAAlwSFlzAAAPYQAAD2EBqD+naQAAYglJREFUeJzt3QlYlNX+B/Cvgywq4MYmaYKprC6laIi73lxRTC3tqoDbzcQ0b5bmgpblTcu/C6Y3F0DbTMu1XNJUBCkVU1MWtYtLGSCaCpiAzPyfc0YQcAME3pl5v5/nmeu8M8PMce7k/Djv73xPFZ1OpwMRERERFdDcu0pEREREAgskIiIiomJYIBEREREVwwKJiIiIqBgWSERERETFsEAiIiIiKoYFEhEREVExVYvfQCWj1Wpx+fJl2NjYoEqVKkoPh4iIiEpAxD9mZGTA2dkZGs3D54lYIJWRKI4aNGig9DCIiIioDC5duoT69es/9H4WSGUkZo7y32BbW1ulh0NEREQlcPPmTTnBkf89/jAskMoo/7SaKI5YIBERERmXx7XHsEmbiIiIqBgWSERERETFsEAiIiIiKoY9SEREZBTy8vKQm5ur9DDIwJmbm8PMzOyJn4cFEhERGXxuTUpKCq5fv670UMhI1KpVC05OTk+UU8gCiYiIDFp+ceTg4IDq1asznJceWUzfunULaWlp8rhevXooKxZIRERk0KfV8oujunXrKj0cMgLVqlWTf4oiSXxuynq6jU3aRERksPJ7jsTMEVFJ5X9enqRnTdUF0oABA1C7dm0MGjRI6aEQEdEj8LQaVfbnRdUF0sSJE7F27Vqlh0FEREQGRtUFUufOnR+7FwsRERGpj9EWSFFRUfD394ezs7OcStu8efN9j1m2bBlcXFxgZWWFtm3b4vDhw4qMlYiIqKI97Lsw3/nz5+Vjjh8/XqnjMlZGWyBlZWWhRYsWsgh6kPXr12Py5MkIDQ3FsWPH5GN79OhRsPSvtLKzs+UOwIUvFeHStVtITKmY5yYiosoTFBSEgIAAGAqxg/2ff/4Jb29vpYdiFIy2QOrVqxfmzp0rG60fZOHChRgzZgyCg4Ph6emJFStWyK72NWvWlOn15s2bh5o1axZcxAetvOXmaTHhy1/QPywGn/10QeY5EBERlQex3F2EJ1atyoQfky6QHiUnJwdxcXHo3r17wW0ajUYex8bGluk5p02bhhs3bhRcLl26hPL2d24ealc3R/YdLWZsPoXXPj+GG7cYq09EdF8YYM6dSr88yS+tO3fuRPv27WXCs8hz6tu3L3777bcij/n9998xdOhQ1KlTBzVq1EDr1q3x888/F9y/fPlyPPPMM7CwsICbmxvWrVt33+uIGSIxgSCygBo1aoSNGzcW3MdTbKVjkmVkenq6DBdzdHQscrs4TkxMLDgWBdOJEyfk6br69etjw4YN8PX1feBzWlpayktFsrUyx+pAH6yJScaHOxOx41QKTv5+A0uGPotWDWtX6GsTERkL8cuk56xdlf668e/2QHWLsn1tiu8Z0fbRvHlzZGZmYtasWfIMiChWxC/w4rZOnTrhqaeewtatW+VMj2gP0Wq18uc3bdokV14vWrRIfndt375dniER311dunQpeJ2ZM2fiP//5DxYvXiwLqCFDhuDXX3+Fh4dHub0PamGSBVJJ7dmzB4ZGk5uF0S5X0WZcO3m67cLVW3jpv7GY/I+mGNfpGWg0zAIhIjI2AwcOLHIs2j3s7e0RHx8ve4K++OILXLlyBUeOHJEzSELjxo0LHv/RRx/JnqbXXntNHoti66effpK3Fy6QBg8ejNGjR8vr7733Hn744QcsXboUn3zySSX9TU2HSRZIdnZ28lxrampqkdvFsajKDdbf14HPBgJp8Wg+fDO2T2gvT7VtOX4ZC3YlIfa3q1j4cgs42FgpPVIiIsVUMzeTszlKvG5ZnT17Vs4aiVNm4ixH/szQxYsXZYEkZpKeffbZguKouISEBIwdO7bIbX5+fnKmqLDiZ0HEMU+plY1J9iCJ87OtWrXC3r17C24TH0Zx/LBTaAbBvDpQrRaQewv4YjBsridh0cstsWBQc/kfZvS5dPRefBAHzlxReqRERIoRfTTiVFdlX54knVnE0ly7dg0rV66URVJ+b5HomS28fxgZDqMtkMT5WlEV51fGycnJ8rqoxvOnH8UHMTIyUlbe48aNk+eAxTlbg1XVAnhpHdCgLXD7BrBuAKr8lYzBrRtg24T2cHeyQXpmDgLXHMa87xOQc0f/GwgRERmuq1evIikpCTNmzEC3bt1kP9Bff/1V5DGiN0l8h4ki6kHEz8TExBS5TRyLVdqFidNuxY/Zf6SyU2xHjx4tct5VFERCYGAgIiIi8PLLL8vzuWJKMyUlBS1btpSrCIo3bhsci+rAK+uBiL5A6ilgbQAwchcaO9TD5vF++OD7BKyNvYD/Rv0PPyVfw9Ihz+LputzEkYjIUIk9P8XKtU8//RT16tWTv8hPnTq1yGPE6rUPPvhA5iaJWBnxuF9++UWGIYszH1OmTMFLL70kT8OJJu1t27bh22+/va+XViw2EqvfxIq5zz//XAYkr169upL/xqZBY8zbhIgll8UvojjKFxISggsXLsiQRzGdKdK0jUK12sCwb4HarsD1C3ImCbeuwcrcDO/298aKYa1ga1UVJy5dR58lB7HtxGWlR0xERMWI1g6ROSRWqX311Vcyfkb0G73xxhtYsGDBfa0hu3fvhoODA3r37o1mzZrJ1Wiin1YQhZPoNxJN2V5eXvjvf/+L8PBw+V1Y2Jw5c+RriRkpsdfol19+ed8sE5VMFR3TCMtEJGmLwEiRiWRra1sxL/LXeWBNTyDjT+Cp1sCILYCltbzr979uYeJXxxF3QT9NO8SnAUL9vVDNouxNhEREhub27duyhcLV1VVuG2VMevbsKVeihYWFKT0U1bn9iM9NSb+/jXYGSRVquwDDN+lnlP44Cqz/J3AnW95Vv3Z1rB/7PEK6NIboG/zqyCX0C4tGwp/cpoSISEmiv0jkFO3fv79IYDEZFxZIhs7BA/jnRsC8BvC//cA3owFtnryrqpkGb/Zww+ej2sLexhJn0zLRf1kM1kQnc5sSIiKFjBw5Eq+++ir+/e9/o3///koPh8qIp9gM+RRbYb/tA754CcjLAZ4dDvRbKta6Ftx9NTMbb208ib2J+s14OzW1x0eDW8jCiYjIWBnzKTZSDk+xqckzXYCBq4EqGuCXdcAPM8WGRAV317W2xKrA1nivvxcsq2pkVlLPRVH4MbFoWCYRERE9HgskY+LZD/Bfor9+aCkQ/X9F7hYhZsN9XQoyk65m5WBkxFHM2nIKt3P1p+WIiIjo8VggGZvnhgMvzNVf3zsHOBp+30OaOtrIzKSRfq7yWOQmiQbuxBQ2cBMREZUECyRj1G4C0OHf+uvb3wBOfXPfQ0Rm0ix/T0QE+8DO2hJnUjPRLywG4TFs4CYiInocFkjGqutMoJXYNkUHfPsv4GzRNNV8nd0csHNSB3Rzd5Bbk8zZFo/giCO4kqGPCyAiIqL7sUAyVmIFW5+PAa8XAW0usH4YcFG/+WFxdncbuN+928C9P+kKei2Owr67K96IiIgqwv79+2V/7PXr1x/6GLEDRq1atUr1vOI5N2/ejIrEAsmYacyAAf8FGncH7vwNfDEYSD390A/TiEIN3GLTWzGTNHvraTZwExFVELEX6MSJE2WitlhuLvYD9fPzw/Lly3Hr1i0Ysry8PLndibu7O6pVq4Y6derILbtWrVpV4udo164d/vzzT7ms3tgY7Wa1dFdVC+CldcC6AODSz8BnA4FRPwC1Gjzw4fkN3B/uTER4zHlEHDqP2N+uYsnQZ+HmZFPpwyciMlX/+9//ZDEkZkfERrRifzVLS0v8+uuvcuPap556Cv369YOhmjNnjtzzTWyVIjbAFflBYqN4kRReUmKPOScnJxgjziCZAovqwNCvAHt3/b5toki6de2hDxcN3GLftvwG7qTUDPiHRSOCDdxEROXmtddek5vViqLipZdegoeHBxo1aiTTtb/77jv4+/vLxy1cuFAWTzVq1ECDBg3kz2VmZhY8z+zZs9GyZcsiz71o0SK4uLgUOZXVpk0b+RyiIBOFmdisXThx4gS6dOkCGxsbGYzYqlUrOabH2bp1qxzL4MGDZeBiixYtMGrUKLz55ptFNuSdN2+evF/MMonHbNy48ZGn2MQptaeffhrVq1fHgAEDcPXq1ftee8uWLXjuuefkrJt4z0SxdufOHVQmFkimonodYNg3gI0zkJ4EfDkEyP37kT+S38Dd9W4D92w2cBORMRC/yOVkVf6lFL9Aii/93bt3Y/z48bJoeRBROAgajQZLlizB6dOnERkZiR9//BFvvfVWiV9LFA4BAQHo1KkTTp48idjYWIwdO7bg+f/5z3+ifv36OHLkCOLi4jB16lSYm5s/9nmdnJzkWK5cufLQx4jiaO3atVixYoUc/xtvvIFhw4bhwIEDD3z8zz//LIuskJAQHD9+XBZuc+feja656+DBgxgxYoQ8NRkfHy9nsURR9f7776My8RSbKalZHxj+LbCmh/5028aR+tNvZg//v1nMIK0ObI11P13A+98lFDRwLxjUAl3cHSp1+EREJZJ7C/jAufJf953LgMWDi53izp07J2fk3dzcitxuZ2cnt8EQRPH04YcfYtKkSQX3i1khUTCIvdw++eSTEr2WOPUlts3o27cvnnnmGXmbmK3Kd/HiRUyZMkX2EglNmjQp0fMuXLgQgwYNkoWSl5eX7CcSs1+9evWS92dnZ8tTh3v27IGvr6+8Tcz2REdHy6JGFGzFLV68GD179iwoAJs2bYpDhw5h586dBY8Rs0WiiAsMDCx4zvfee0/+TGhoKCoLZ5BMcXNbcbrNzBJI+h74/t+P/a2HDdxERJXj8OHDcuZEFByiwBBEgdGtWzfZkyROgw0fPlzOQJW0iVs0TwcFBaFHjx7ytJ0oQkRjdL7Jkydj9OjR6N69u2y6/u233wrus7a2vu8iijPB09MTp06dwk8//SQ34E1LS5PPL54rvwgUY/zHP/5R5OfFjFLh1ygsISFBNnoXll9c5ROnBN99990izzlmzBj5d6rMxnbOIJmihu2AgauAr0cAcRGATT2g89TH/tjDGrgXD20Jd6dK2JCXiKgkzKvrZ3OUeN0SEqvWxC+fSUlJRW4XsyGC6NcRzp8/L2d+xo0bJ08hiWJHzMCI01A5OTmyT0ecgiveH5qbm1vkODw8HK+//rqciVm/fj1mzJiBH374Ac8//7zsYXrllVdk39OOHTvkLMxXX30l+39EsVZc4Q1cNRoNfHx85EXMdH322WeygJs+fXpBn5R4XlHcFSaa0ctKPK+YRXrxxRfvu68yNyxmgWTK+7b1+Qj47t/A/nmAtSPQWgRLPlp+A3enpvZ4c8NJ2cAtErin9XJHUDuXgnPaRESKEf8OlfBUl1Lq1q0rZ1bECrAJEyY8tA9J9ASJRuePP/5YFiPC119/XeQx9vb2Mi5AFEn5/wY/qLB59tln5WXatGlyVuaLL76QBVL+qSxxET1CQ4cOlQWVKJBEIVcanp6e8s+srCx5XRRC4hTeg06nPYg49Sf6kAoTM1SFieZsUViWdmzljQWSKfMZDWSkAFELgO8mA9YOgHufEv1ofgP3lA0nsC/pikzgPnDmiuxNsrcp+28GRERqIXqIxGoysURezOI0b95cFkGiWToxMVGuJhNFgJgNWrp0qTx9FRMTIxueC+vcubNslJ4/f77sCRKzRGImKH+mJzk5WcYGiMgAZ2dnWVycPXtWNjr//fffsv9I/JxYafb777/L1x84cOBjxz9o0CA5ftF7JPqQxOuI4ksUWqKfSazQEyvaRNElirz27dvLXijxdxBjy+8hKkzMconn/Oijj2Q/065du4r0HwmzZs2Ss2pipZsYg3jPxGk3cbqveEN3hdJRmdy4cUPMd8o/DZpWq9Ntfk2nC7XV6d5z0OkuxJbyx7W6yEPJuqbTv9c1fHu77rl3d+v2JqRU2HCJiAr7+++/dfHx8fJPY3T58mVdSEiIztXVVWdubq6ztrbWtWnTRrdgwQJdVlaWfMzChQt19erV01WrVk3Xo0cP3dq1a+X3y19//VXwPMuXL9c1aNBAV6NGDd2IESN077//vq5hw4byvpSUFF1AQIB8DgsLC3n7rFmzdHl5ebrs7GzdkCFD5M+K+5ydneV4SvJ+fvrpp7ouXbro7O3t5c8+/fTTuqCgIN358+eLfEcsWrRI5+bmJv9+4rHi73DgwAF5/759++77u6xevVpXv359+ff19/fXffTRR7qaNWsWee2dO3fq2rVrJx9ja2sr3zMxnnziOTdt2lSmz01Jv7+r3H0hKiWxakAkg4pqufD5WoOUdwdY/0/gzE7AqhYwchfgoF/NUFJnUjPw+pe/IDElQx4H+jbEtN4e8pQcEVFFESu+xMyFmP2ozP4TMt3PTUm/v7mKTQ3EMv9B4UB9H+D2dX2Q5M3SNTjmN3AH++mDySJjL6BfWDQSU25W0KCJiIiUwwJJVWnb64G6TYCbv+uLpL8fvnngoxq4w+8mcJ9JzZQN3OFM4CYiIhPDAklNatTVp22LFW1p8cBXrwC5+sCy0uhyt4G7i5u9TOAWDdwjI44gPZMJ3EREZBpYIKlN7Yb6IsnSFrgQA3w7BtCWPgxSzCCtCfLBbH9PWFTVyJVuPRdFYV9SWoUMm4iIqDKxQFIjp2bAkM8BMwsgYSuw4+1S7TGUT+RxBPm5YmuIH9wc7yZwhx/BnG1M4Cai8sXT+FTZnxcWSGrl2hEY8F9R5gBHVgJHVpX5qUTK9pYQPxkkKYgU7oBlMXLlGxHRk8jfVLUyt5gg45f/eSnJprwPw2X+aljm/ygxS4AfZur3bhu7D3D0eqKn+zExFVM2nMTVrBxYVtVgRh8PDHu+IRO4iajMxB5c169fh4ODg9x6g/+e0MOIkkYUR2LfuFq1aqFevXpl/v5mgaT2Akn83//Fy8DZXYC9OzBmn37F2xNIy7gtiySRvC1093DAhwObo641E7iJqPTE15TYakMUSUQlIYojkf79oGKaBVIFM5kCSchKB5a3AzJTgVbBgP+iJ35KrVaH8EPn8eGOROTkaeX2JB8PboGOTe3LZchEpD55eXn3bdJKVJw4rWZm9vAQYxZIFcykCiTht33AugHidzXgpXX6zW7LQfzlm5j41S84m6bf9Xl0e1dM6ekGy6pM4CYiosrHJG0qnWe6AH4T9de3hgDXL5XL03o622LbhPYY/nxDebwqOhkDlh3CuTQ2cBMRkeFigUT3dJ0BPNUKuH0D+Hasfg+3ciASuN8L8MbKEa1Rp4YF4v+8ib5Lo/HZTxe4dJeIiAwSCyS6x8wcGLgKsLABLh4CDn5Urk//D09H7JzYAR2a2OF2rhYzNp/CmLVHcZUJ3EREZGBYIFFRdRoBfRfqrx/4ELgQW65P72BrhcjgNpjZ1xMWZhrsSUhDz8UHEXV3xRsREZEhYIFE92v+EtBiKKDTAt+MBv7+q1yfXqOpglHtXbF5vB+aOFjjSkY2Rqw5jHe3xTOBm4iIDAILJHqw3gv0s0k3fwe2vl6mrUhK2sAd6Ktv4F4Tk8wEbiIiMggskOjBLG2AgasBjbl+v7a4iAp5GdHAPae/N9YEtUbdGhZITMmA/9JoRB46zwZuIiJSDAskerinngO6zdJf3zkNSEussJfq6u6InZM6orObPbLvaBG69TRGRhyRp9+IiIgqGwskejTfEOCZrsCdv4GNI4Hc2xX2UiJtOzzIB7P9PWFRVYN9SVfQa3EU9iWmVdhrEhERPYiqC6QBAwagdu3aGDRokNJDMVwaDRCwAqhhD6Sd1m9sW4HEvjlBfq7YFtIe7k42SM/MQXDEEYRuOcUGbiIiqjSqLpAmTpyItWvXKj0Mw2fjqC+ShMOfAonfV/hLujnZyFVuwX4u8jgy9gL6hUUj4c+bFf7aREREqi6QOnfuDBsbG6WHYRyadNefbhO2jAduXq7wlxQN3KH+XogI9oGdtSXOpGai/7IYhMcks4GbiIjUWSBFRUXB398fzs7O8rTL5s2b73vMsmXL4OLiAisrK7Rt2xaHDx9WZKyqIRq267UA/r6m34pEWzmnvDq7OWDXpA7o5u6AnDtazNkWL0+7sYGbiIhUVyBlZWWhRYsWsgh6kPXr12Py5MkIDQ3FsWPH5GN79OiBtLR7Db0tW7aEt7f3fZfLl0s/+5GdnS13AC58UZ2qlsDANYB5DeD8QSD6/yrtpetaW2JVYGu8298LllU12J/fwJ3EBm4iIip/VXRGcK5CzCBt2rQJAQEBBbeJGSMfHx+EhYXJY61WiwYNGmDChAmYOnVqiZ97//798jk2btz4yMfNnj0bc+bMue/2GzduwNbWFqryy+fAlteAKmbAyF1AA59KffmklAy8/uUvSLobKCn6lN7u6S5PyRERET2KmOCoWbPmY7+/DXYG6VFycnIQFxeH7t27F9ym0WjkcWxs+e4dlm/atGnyzcy/XLp0CarV8hXAexCgywO+HQ1kV27ytWjg3hLih6B2+gbu8JjzTOAmIqJyZZQFUnp6OvLy8uDo6FjkdnGckpJS4ucRBdXgwYPx/fffo379+o8sriwtLWWlWfiiWlWqAH0+Bmo2AP46D+ws+YxdeRGzRbP7ecncpMIJ3Ot+usAGbiIiUmeBVF727NmDK1eu4NatW/j999/h6+ur9JCMR7VawACx9L8K8MtnQPxWRYbRxd0BOyZ1QKem+gTumZtPYczaOFzLylFkPEREZBqMskCys7ODmZkZUlNTi9wujp2cnBQbl+q4tAfaT9Jf3/Y6cPNPRYbhYGMlZ5Jm9vWEhZkGexJS0XNRFKLPpisyHiIiMn5GWSBZWFigVatW2Lt3b8FtoklbHHMWqJJ1fgdwag78/Ze+cVurVWQYGk0VjGrvKsMlGztYIy0jG8NW/4wPvk+Q0QBEREQmUSBlZmbi+PHj8iIkJyfL6xcvXpTHYon/ypUrERkZiYSEBIwbN05GAwQHBys8cpWpagEMXAVUtQJ++1GftK0gT2dbuU3JsOeflsefRv0PAz6Jwbm0TEXHRURExsVgl/mL5fddunS57/bAwEBERETI62J5/oIFC2Rjtsg8WrJkiVz+b0jLBFXj8Erg+zcBM0vgXwcABw+lR4Tdp1Pw9jcn8detXFiZazCrrxeGtmkgYyOIiEidbpbw+9tgCyRDxwKpGPEx+nwwcO4HwLEZMGavPlhSYak3b+PfX59A9Dl9P9I/PB3x4cDmqFPDQumhERGRAkw6B4kMkJiV6b8MqF4XSP0V+HEuDIGjrRXWjmyD6b09YG5WBT/Ep6LHoigcPHtF6aEREZEBY4FE5cfGEei3VH/90FIgOQqGQDRwj+nYqKCBW+zhNnz1YczdHo/sO5WznxwRERkXFkhUvtz7AM8FinNuwKZX9avbDISXc03ZwD38+YbyeFV0MvqHMYGbiIjuxwKJyl+PD4A6jYCbfwDbJ+v7kwxENQszvBfgjdWBrYskcK+NPc8EbiIiKsACicqfpTXw4kr9ZranvwV+3QBD083DsUgC96wtpzEy4og8/UZERMQCiSpG/dZA57t7tH33b+CvCzA0IoE7ItgHs/09YVFVg31JV9BrcRT2JaYpPTQiIlIYCySqOO0nA/XbANk39f1IWsNriBaZSEF+rrI3yc3RBumZOQiOOILQLadwO9fwxktERJWDBRJVHLOqwIv/BSysgYuHgJjFMFRuTjbYEuKHYD8XeRwZewH9wqKR8OdNpYdGREQKYIFEFUs0a/f6UH993/vAZf3WMYbIytwMof5e8rSbnbUlzqRmylVua6KT2cBNRKQyLJCo4rX8J+DhD2jvAN+MBnJuwZB1dnPArkkd0M3dATl5Wry7PR5B4WzgJiJSExZIVDkp2/5LAGsn4OpZ4IeZMHR1rS2xKrA13u3vBcuqGhw4wwZuIiI1YYFElaN6HSDgE/31I6uAM7tg6EQD9whfF2yb0B7uTvcauGdvPc0GbiIiE8cCiSpP425A23H661tCgCz9BrKGrqmjjdymJKidvoE74tB5BCxjAjcRkSljgUSVq3soYO8OZKUBW183qJTtxzVwz+7nhfAg0cB9L4F7HRO4iYhMEgskqlzm1fQp2xpzIOk74Jd1MCZd3B2wY2LHggTumVtOY8zao7iayQZuIiJTwgKJKl+95kDXGfrrO6YCV3+DMbG3sZQzSbP6esLCTIM9CWnoufggDp69ovTQiIionLBAImW0mwA0bA/kZgGb/gXk3YEx0WiqYGR7V9mb1MTBWkYADF99GO9/F4/sO2zgJiIydiyQSBkaM2DAcsDSFvj9CBC9EMbI09kWW0PaY9jzT8vjlQeT8eInh3AuLVPpoRER0RNggUTKqfU00Odj/fX9/wF+j4MxqmZhhrkBzbByRGvUrm6O05dvou/Sg/j85wts4CYiMlIskEhZzQYDXi8Cujzg2zFAThaM1T88HbFzUke0b2yH27laTN90CmPXxeFaVo7SQyMiolJigUTKp2z3XQjYOAPXfgN2323eNlKOtlZYO7INZvTxkA3cP8SnoseiKDZwExEZGRZIpLxqtfX9SMLRNUDSThgz0cA9ukMjbBrfDo0LNXDP3c4GbiIiY8ECiQxDo87A8+P117eGAJnGP+Pi5VwT20LaY/jzDeXxquhk9A+LwVkmcBMRGTwWSGQ4us0CHDyBrCvANuNJ2X5cA/d7Ad5YHdgadWroE7j7MoGbiMjgsUAiw2FupU/ZNrMAkr4HjkXCVHTzEA3cHdCxUAL3qMijSGcCNxGRQWKBRIbFyRvoOlN/fec7Rpey/SgONlaICPJBqL8nLKpq8GNiGnouisK+pDSlh0ZERMWwQCLD4xsCuHTQp2x/O9boUrYf18Ad7OeKrSF+cHO0QXpmDoLDj2D21tO4ncsGbiIiQ8ECiQyPRgMMWAFY1gT+OAoc/Aimxt3JFltC/BDUzkUeRxw6Lxu4E1NuKj00IiJigUQGq2Z9fT6ScGA+8PtRmBorczPM7ueF8GAf2FlbIik1A/3CYhAek8wGbiIihbFAIsPVbBDgPeheyna2ae5v1sXNQTZwd3V3QM4dLeZsi0dwxBGZn0RERMpggUSGrc9HgG194Nr/gN3TYarEDJKIAni3v5ds4N6fdAW9FrOBm4hIKSyQyEhStqsAcRFA0g6YqipVqmCEr4sMlyzcwD1nGxu4iYgqGwskMnyuHQHfuynbW0TKtmnPqrg52RRp4A6POY+AZTE4wwRuIqJKwwKJjChl2wu4lQ5snWASKdslauAO8kHduwnc/iKB+6cLbOAmIqoELJDIOFS1BAbeTdk+s1N/uk0Furg7YMekDuiUn8C9+RTGrI3DVSZwExFVKBZIZDwcvYBuofrru0wrZftxCdxiJmlmX09YmGmwJyEVPRcfxMGzxr+hLxGRoWKBRMbl+df0PUm5t/RL//NyoQYigXtUe1dsHu+HJg7WMgJg+OrDeP+7eGTfYQM3EVF5Y4FExpeyHbAcsBIp23FAlOmlbD+Kp7Mttoa0x7Dnn5bHKw8m48VPDuFcmmlmRBERKYUFEhlnynafuynbUQtMMmX7UapZmGFuQDOsHNEataub4/Tlm+i79CA+/5kN3ERE5YUFEhlvynazwSafsv0o//B0xM5JHdG+sR1u52oxfdMpjF0Xh2tZOUoPjYjI6Km2QLp+/Tpat26Nli1bwtvbGytXrlR6SFRavQulbIumbRVytLXC2pFtMKOPh2zg/iE+FT0WRbGBm4joCVXRqXROPi8vD9nZ2ahevTqysrJkkXT06FHUrVu3RD9/8+ZN1KxZEzdu3ICtrW2Fj5ceIjkKiOwHQAcM+RJw7w21On35BiZ+dbygH2l0e1dM6ekGy6pmSg+NiMhglPT7W7UzSGZmZrI4EkShJOpEldaKxk2saGsXor8uAiRNPGX7Ubyca8ptSoY/31Aer4pORv8wJnATEZWFwRZIUVFR8Pf3h7Ozs9yjavPmzfc9ZtmyZXBxcYGVlRXatm2Lw4cPl/o0W4sWLVC/fn1MmTIFdnZ25fg3oErTdSbg6K2alO3HNXC/F+AtN74tnMC9NvY8fwEgIjKFAkmc9hLFiyiCHmT9+vWYPHkyQkNDcezYMfnYHj16IC3t3gxCfn9R8cvly5fl/bVq1cKJEyeQnJyML774AqmpqQ8dj5hlEtNyhS9kQCnbL35aKGU7HGrXzcOxSAL3rC2nMSryKNKZwE1EZDo9SGIGadOmTQgICCi4TcwY+fj4ICwsTB5rtVo0aNAAEyZMwNSpU0v9Gq+99hq6du2KQYMGPfD+2bNnY86cOffdzh4kA3IoDNg9HTCvDvzrIGDXGGqn1eoQGXse83YkIueOFnbWFlgwuAW6uDkoPTQiIkWYdA9STk4O4uLi0L1794LbNBqNPI6NjS3Rc4jZoowMfW+GeJPEKT03N7eHPn7atGnycfmXS5culcPfhCosZXvTWNWkbD8ugTvYzxVbQ/zg5miD9MwcBIcfweytp3E7lwncREQmVSClp6fLVWiOjo5FbhfHKSkpJXqOCxcuoEOHDvLUnPhTzDw1a9bsoY+3tLSUlWbhCxliyvaKQinbC5QekcFwd7LFlhA/BLVzkccRh87LBu7EFJ4qJiIymQKpPLRp0wbHjx+XPUgnT57Ev/71L6WHROWh5lOFUrY/Ai4dUXpEBsPK3Ayz+3khPNgHdtaWSErNQL+lMQiPSWYDNxGRKRRIYrWZWKZfvKlaHDs5OSk2LjIQTNl+JNF/tHNSB3Rzd0BOnhZztsUjOOKI3ACXiIiMuECysLBAq1atsHfv3oLbRJO2OPb19VV0bGRgKdt/Jas2ZftRxAzSqsDWeLe/FyyrarA/6Qp6LY7CvkT15kgRERlFgZSZmSlPgYmLIJbii+sXL16Ux2KJv9geJDIyEgkJCRg3bpyMBggODlZ45GQQqtUCBqwQayCBY5FA4ndKj8ggV4eO8HXB1pD29xq4I9jATURk0Mv89+/fjy5dutx3e2BgICIiIuR1scR/wYIFsjFbZB4tWbJELv+vDNxqxEjsngEcWgpUtwNeiwWsubz9QURB9J8dibJ5W3B3ssGSoc+iqaON0kMjIlLk+9tgCyRDxwLJSNzJBlZ2BVJPAU16AK+sF1MnSo/KYIlTbFM2npCzSeLUm9gEd9jzDeVsExGRKTDpHCSiMqVsn93FlO3H6OLugB0TOxYkcM/cchpj1h7FVSZwE5HKsEAi0+foBXSfrb++azqQfk7pERk0extLhAf5YFZfT1iYabAnIQ09Fx9E1JkrSg+NiKjSsEAidWg7DnDtpE/ZFkv/mbL92ATuke1dsXm8H5o4WMsIgBFrDmPu9nhk32EDNxGZPhZIpKKU7eX6lO3Lx5iyXUKezrZyldvw5xvK41XRyRiw7BDOpTFbiohMm+ZJl+Jzh3syGkzZLpNqFmZ4L8AbK0e0Ru3q5oj/8yb6Lj2Iz3++wARuIjJZpS6QRB5Rnz59UKNGDdkFXrt2bXmpVauW/JPIoDFlu8z+4emIXZM6okMTO9zO1WL6plMYuy4O17JylB4aEVG5K/Uyfz8/P/lb48SJE+XmsMWX/3bq1AlqwGX+Ruzv68ByP+Dm78BzI4B+S5UekVHRanVYE5OM+TuT5FYloqn748Et0LGpvdJDIyJSLgfJ2toacXFxcHNzg5qxQDJyyQeBSH8AOmDIl4B7b6VHZHROX76BiV8dL+hHGtXeFW/1dINlVTOlh0ZEVPk5SD4+Prh06VJpf4zIsLh2ANqF6K9vnQBkcg+y0vJyrolthRq4V0cno39YDM6kZig9NCKiJ1bqGaTffvsNr776KoYNGwZvb2+Ym5sXub958+ZQA84gmQCmbJebvQmpeGvjSVzN0idwT+/jIQsnJnATkWpOsf3000945ZVXcP78+XtPUqWK7EsSf+blqSMjhQWSiUg9DXzaGcjLAfr+H9B6pNIjMlppGbcxZcNJHLgbKNnV3QHzBzWHnbWl0kMjIqr4AsnT0xMeHh546623Htik3bChfrrd1LFAMiGxy4Bd7wDm1YF/HQTsGis9IqNu4I6MPY95OxKRc0cLO2sLLBjcAl3cuEkwEZl4gSSW9584cQKNG6v7S4QFkgnRaoF1AUDyAcD5OWDUbsCs6KljKp3ElJuY+OVxJN3tRwpq54KpvdxhZc4GbiIy0Sbtrl27ygKJyGQwZbvcuTvZYkuInyyMhIhD52UDtyiciIiMQalnkD799FPMnTsXI0eORLNmze5r0u7Xrx/UgDNIJujXjcA3o4AqZsDIXUADH6VHZBL2JaXJ3qT0zGxYVNVgWi93WTixgZuITOoUm0b8tv2wJ2OTNhm7b0YDv24AarsCr0YDltZKj8gkiOLo7Y0nsTdRH6fQ2c0eCwa1kCGTREQmcYpNq9U+9KKW4ohMWO+PANv6wF/JwK5pSo/GZIiVbKsCW+Pd/l4yBmB/0hX0WhyFfXcLJiIiQ1OqAik3NxdVq1bFqVOnKm5EREqqVgsYsELMhwLH1gKJ3ys9IpMhZphH+Lpga0h7uDnaID0zB8ERRzB762nczuUvV0RkxAWS6Dd6+umnOVNEKkrZDmHKdjlzc7K5r4E7YBkTuInIsJT6FNv06dPxzjvv4Nq1axUzIiJD0HUm4OgN3LoKbAkBSteqR48hlvvP7ueF8CAfmZWUmJIB/6XRWBd7XobOEhEprdRN2s8++yzOnTsnT7eJUEiRi1TYsWPHoAZs0lYBpmxXiisZ2Xhzw4mCBO7uHg74cGBz1GUCNxEp+P1dtbRPHBAQ8KRjIzIOjl5At1Bg93Rg13TApSNTtiuAWMkmZpLEqbb/7EjEnoQ09Fx8EB8PboGOTe2VHh4RqVSpZ5BIjzNIakrZ7g8kRzFluxLEX76JiV/9grNpmfJ4dHtXTOnpBsuqTOAmIgPPQcoXFxeHhIQEed3Ly0ueelMTFkgqcuMPYLkvcPsG0OltoMs7So/IpP2dk4cPvk/Aup8uyGPPerZYMvRZNHZgJhURGXCBlJaWhiFDhmD//v2oVauWvO369evo0qULvvrqK9jbq2NKnAWSyjBlu9L9EJ+KtzaewF+3cmFlrsHMvp54pc3TTOAmIsMMipwwYQIyMjJw+vRpuZJNXEQuknjB119//clGTWSomg0Cmg0GdHnAt2OAbP0pIKo4//B0xK5JHdGhiR1u52oxfdMpjF0Xh2tZOUoPjYhUoNQzSKLq2rNnD3x8iv4GffjwYbzwwgtyNkkNOIOkQn9fB5b7ATd/B54LBPotUXpEqqDV6rAmJhnzdyYhJ08rm7rZwE1EBrnVSPENagVxm7iPyLRTtpffTdmOZMp2JdFoqmB0h0bYNL6d7EMSsQAj1hzGe9vjkX2HobVEVDFKXSB17doVEydOxOXLlwtu++OPP/DGG2+gW7du5T0+IsPi2rFQyvYEpmxXIi/nmtgW0h7Dn28oj1dHJ6N/GBO4ichACqSwsDA5PeXi4oJnnnlGXlxdXeVtS5curZhREhlkynY6U7YrWTULM7wX4I3Vga1Rt8a9BO61TOAmonJWpmX+4kdEH1JiYqI89vDwQPfu3aEm7EFSOaZsKy4t4zambDhZkMDdxc0eCwa3gB0TuIlIyRwktWOBRDgUpk/ZNq8O/OsgU7YVIP75Egnc83YkIueOVu7rJoqkLm4OSg+NiNRYIO3du1deRCZS8cbsNWvWQA1YIBFTtg1HYspNTPzyOJLu9iMFtXPB1F7uclNcIqJKWcU2Z84cuZxfFEjp6en466+/ilyIVEOjAQJWAFY1gcvHgKgFSo9ItdydbLElxA/Bfi7yWMwq9QuLRsKfN5UeGhEZqVLPINWrVw/z58/H8OHDoWacQaICTNk2KPuT0vDmhpNIz8yGhZlGziSJwokJ3ERUoTNIOTk5aNeuXWl/jMh0MWXboHR2c8DOSR3Qzd1BBku+uz0eQeFHZH4SEVFJlbpAGj16NL744ovS/hiRaev9EWBbH/grGdjFzWyVJlayrQpsjff6e8GyqkaudOu5KAr7EplbRUQVdIpNhESuXbsWzZs3l5fiqdoLFy6EGvAUG91HNGtH9hNrq4AhXwLuvZUeEQEySPL1L3+RmUkCG7iJ1O1mRa1i69Kly8OfrEoV/Pjjj1ADFkj0QLtnAIeWAtXtgNdiAWsuNzcEt3Pz8OHORITHnJfH7k42WDzkWbg52Sg9NCKqZMxBKgGRBi7eHI1Gg9q1a2Pfvn0l/lkWSPRAd7KBlV2B1FNAkx7AK+vFbw5Kj4ru2peUhikbTiA9M0eeepvex0NuXcIGbiL1uFlRTdqm5tChQzh+/HipiiOih6pqCbz4KWBmAZzdBcSFKz0iKkQESO6Y2BGd3eyRfUeLWVtOY3TkUVzNZAM3ERWl+gKJqNw5egHdZ+uv75oOpJ9TekRUiL2NJcKDfBDq7yljAPYmpqHn4oOIurtlCRGRQRdIUVFR8Pf3h7Ozs5z+3rx5832PWbZsmTxNZmVlhbZt2+Lw4cOleg3xvJ06dYKPjw8+//zzchw9qV7bcYBrJyD3ln7pf16u0iOiYv/tB/u5ynDJJg7WMgJgxJrDmLs9Htl38pQeHhEZAIMtkLKystCiRQtZBD3I+vXrMXnyZISGhuLYsWPysT169JDbn+Rr2bIlvL2977tcvnxZ3h8dHY24uDhs3boVH3zwAU6ePFlpfz9SQ8r2cqZsGziPerbYNqG97EMSVkUnY8CyQziXpl/xRkTqZRRN2uK3vU2bNiEgIKDgNjFjJGZ+wsLC5LHYE65BgwaYMGECpk6dWurXmDJlCry8vBAUFPTA+7Ozs+WlcJOXeD02aVPJU7Z3Ag3aKD0ieog98al465uTuJaVAytzDWb29cQrbZ5mAzeRSpu0q5bkycQMS0n16ydyYCqWSPMWMz/Tpk0ruE2sROvevTtiY2NLPEMliiobGxtkZmbKeIKXXnrpoY+fN2+e3IeOqNQp22d2Ar9uAL4dC7waDVhaKz0qeoDuno7YWb8D/r3hBA6eTcf0TaewP+kKPhzYHHVqWCg9PCKqZCUqkArP3DyK+E0rL6/iz9+LTXLF6zg6Oha5XRwnJiaW6DlSU1MxYMAAeV0815gxY+SM1MOIYkyc0is+g0RUopTtC7F3U7anAf2WKj0ieggHWytEBrfBmphkzN+ZhB/iU3H8UhQ+HtwCHZvaKz08IjK0AknMtJiaRo0a4cSJEyV+vKWlpbwQlVq1WsCAFUCkP3BsLdC0F1O2DZhGUwWjOzSC7zN1MfGr4ziXlikbuEe1d8WUHm5M4CZSCYNt0n4UOzs7mJmZyVmgwsSxk5OTYuMieijXDkC7EP31rROATO4JZui8nGtiW8i9Bu7V0ckIWBYjty4hItNXohmkB/XvHDhwABcvXpT9QIW9/vrrqGgWFhZo1aoV9u7dW3D6T8xyieOQkLtfQkSGputM4Ld9+pTtLSFM2TYC1SzM8F6AtwyWfGvjSbmfm//SaCZwE6lAqVex/fLLL+jduzdu3bolC6U6derInqDq1avDwcEB//vf/8plYKJx+tw5fcDes88+KzfBFfvAidd7+umn5TL/wMBA/Pe//0WbNm2waNEifP3117IHqXhvUkXgViNUJqmngU+7AHnZQJ+FgM8opUdEJZSWcRtTNpzEgbuBkl3c7LFgcAvYWfPUO5ExqbC92Dp37oymTZtixYoV8gVEH4+5uTmGDRuGiRMn4sUXXyyP8WP//v0P3BhXFEURERHyuljiv2DBAqSkpMjMoyVLlsjl/5WBBRKVWewyYNc7gHl14F8HAbvGSo+ISkj8cxlx6Dzm7UhEzh0t7KwtZJEktjAhIpUXSLVq1cLPP/8MNzc3eV0sq/fw8JC3ieKlpKvIjB0LJCozsehhXQCQfABwfg4YtRswM1d6VFQKiSk3MfHL40i6248U1M4FU3u5s4GbSM2b1YrZIpE5JIhTaqIPSRAvdunSpScZM5E6MGXb6Lk72cptSkRhJIhZpf5hMbJwIiLTUOoCSfQDHTlyRF4X+5jNmjVL7mM2adIkuY0HEZVAzaeAvv+nvy4KpEul20eQlCdmi2b380J4sI/sQxKzSf2WxmBNdLI8FUdEKiuQxJ5l9erVk9fff/991K5dG+PGjcOVK1fw6aefVsQYiUyT90Cg2UuATqtP2c7OVHpEVAai/2jnpA7o5u6AnDwt3t0ej6DwI3IDXCIyXkaxF5shYg8SlYu/rwPL/YCbvwPPjWDKthET/5R+9tMFzP0uAdl3tKhbwwIfiQZudzZwE6miB4mIKiBlG1X0KduJ3yk9IiojkYk03NcF2ya0h7uTDa5m5SA44ghmbz2N27kVvwUTESk8g+Tq6vrIcLTyykEydJxBonK1eyZwaAlQvS4wLhawqfgsL6o4oiD6cGciwmPOy2NRMC0e8izcnGyUHhqR6t2sqGX+ixcvLnKcm5srwyN37tyJKVOmYOrUqVADFkhUru5kAyu7Aam/Ak1eAF75minbJmBfUhqmbDiB9MwcWFTVYHpvD4zwZQI3kUkWSA+zbNkyHD16FOHh4VADFkhU7lLjgU87M2XbxIhm7SkbT2B/kj6BWzRzzx/UHHWZwE2kjh6kXr164ZtvvimvpyNSH0dPoHuo/vruGUC6fqsdMm72NpYID/JBqL+nnEXam5iGnosPIuruliVEZJjKrUDauHGj3CeNiJ5A23GAaycg9xbw7RggL1fpEVE5EKfUgv1csWW8H5o4WMtZpRFrDuO97fHIvsMGbiJDVOpTbCIosvD5c/HjYi80kYP0ySefYOzYsVADnmKjCnPjD2C5L3D7BtDxLaDrdKVHROXcwP3B9wlYG3tBHnvUs8XSoS3R2IEN3ERG3YM0Z86cIsdi2xF7e3u5ia27uzvUggUSVahT3wAbRwJVNMDIXUCDNkqPiMrZnvhUvPXNSVzLyoGVuQYz+njin22fZgM3kak1aasNCySqcN+MAX79GqjtCrwaDVhaKz0iKmdpN2/j3xtO4ODZdHnc3cMRHw5sxgZuImMpkMSTlZRaigUWSFThmLKtClqtDmtikjF/Z5LcqkQ0dX88uAU6NrVXemhEJqlcCyRxGq2k0755eepoOGSBRJUi+SAQ6S+6/YAhXwDufZQeEVWQ+Ms3MfGrX3A2Tb8n30g/V7zV001uiktEBlogHThwoOD6+fPnZRhkUFAQfH195W2xsbGIjIzEvHnzEBgYCDVggUSVhinbqm3gFgncS4Y+i6aObOAmMvgepG7dumH06NEYOnRokdu/+OILfPrpp9i/fz/UgAUSVRqmbKvO3oRUvLXxpNzPzbKqBu8wgZvI8IMixWxR69at77td3Hb48OHSj5SIHq2qJfDip4CZJXB2N3B0jdIjogrWzcMROyd1RGc3e2Tf0SJ062mMjDgi85OIqHKUukBq0KABVq5ced/tq1atkvcRUQVgyrZqE7hn303g3pd0Bb0WR2FfYprSQyNShVKfYvv+++8xcOBANG7cGG3btpW3iZmjs2fPyq1GevfuDTXgKTaqdFotsC4ASD4AOD8HjNoNmJkrPSqqBEkpGXj9y1+QlJohj4PauWBqL3c2cBMZ0ik2UQCdOXMG/v7+uHbtmryI6+I2tRRHRIrQaICA5YBVTeDyMeDAfKVHRJXEzckGW0L8EOznIo8jDp1Hv7BoJPxZ8ggWIiodBkWWEWeQSDFM2Va1/UlpeHPDSaRnZsPCTIO3e7kjuJ0LNBo2cBNV+iq2kydPwtvbW+YhieuP0rx5c6gBCyRSFFO2Ve1qZrZc5bb3bj9Sp6b2+GhwC9m3RESVHBQpNqR1cHAoCI180I+J2xkUSVQJmLKteuLf4M9+uoC53yXIlW51a1hgweDm6OrOnCyiSiuQLly4gKef1m+iKK4/SsOGDaEGLJBIcUzZJgBnUvUN3IkpbOAmKgluVlvBWCCRQWDKNt1N4P5wZyLCY87LYzdHfQK3aO4mokpaxSa2FPnuu+8Kjt966y3UqlUL7dq1e+zsEhGVs64zAMdmwK2rwNYQcd5F6RGRAsRsUai/F8KDfWBnbSHjAPzDohF56PwD2yGI6PFKXSB98MEHqFatWkGqdlhYGObPnw87Ozu88cYbpX06InoSTNmmQrq4OWDHRH0Cd87dBO7RkUdlUzcRVXCBdOnSJRkSKWzevBmDBg3C2LFj5Ua1Bw8eLO3TEdGTYso2PSKBW6x067HoIA6cuaL00IhMu0CytrbG1atX5fXdu3fjH//4h7xuZWWFv//+u/xHSESP13Yc4NoJyL0FfDsGyMtVekSkILGgJsjPFVvG+6GJg7XMTApccxjvbY9H9h11rDQmqvQCSRREo0ePlpfC6dmnT5+Gi4s+5ZWIKhlTtukBPOrZYtuE9hjhq19dvDo6GQHLDuFcmn7FGxGVY4G0bNky+Pr64sqVK3Lvtbp168rb4+LiMHTo0NI+HRGVl5pPAX3/T3/94EfApcNKj4gMpIH73f7eWB3YGnVqWMjtSfoujZYZSmzgJno4LvMvIy7zJ4PFlG16iLSbt/HvDSdw8Gy6PO7u4YgPBzZDXWsmcJN63KyoZf6CaMYeNmyYXNr/xx9/yNvWrVuH6Ojoso+YiMpH7wWAbX3gr2Rg1zSlR0MGxMHWCpHBbTCjj4fcx21PQip6Lj6IKDZwEz15gSROq/Xo0UMu9T927Biys/XLR0UlJiIAiEhh1WoBA1aICWLg2Fog8V5uGZHY1HZ0h0bYNL4dGjtY40pGNkbcbeAWgZNEVMYCae7cuVixYgVWrlwJc3Pzgtv9/PxkwUREBsC1A9Bugv761glARqrSIyID4+VcE9tCijdwx8itS4ioDAVSUlISOnbseN/t4nze9evXy2tcRPSkmLJNj1HN4l4Dt9jsVuzn5r80GmtjmcBNVOoCycnJCefO3R9EJ/qPGjVqVF7jIqInxZRtKqFuHo7YMakDOjW1R/YdLWZtOY2REUfk6TcitSp1gTRmzBhMnDgRP//8swwju3z5Mj7//HO8+eabGDduXMWMkojKhinbVEIONlaICL6XwL0v6Qp6LY7CvsQ0pYdGZBzL/MXDRTO22Frk1q1b8jZLS0tZIL333ntQCy7zJ6Oh1QLrAoDkA4Dzc8Co3YDZvf5BouKSUjLw+pe/yE1vhaB2Lpjay11mKhGp5fu7zDlIOTk58lRbZmYmPD095RYkYquR/I1sDZ3opXr55ZeLHH/55ZcICAgo0c+zQCKjcuMPYLkvcPsG0PEtoOt0pUdEBk6saPtwZyLCY87L46aO1lg85FmZzk1kzCq8QCpMLPUXCdvz589HSkoKjI0o8sQ2KRcuXECNGjVK9DMskMjonPoG2DgSqKIBRu4CGrRRekRkBPYnpeHNDSflfm4iO+ntXu4Ibuci4wKIjFG5B0WKImjatGlo3bq1DIjcvHmzvD08PByurq74v//7P7zxxhswRlu3bkW3bt1KXBwRGSXvgUCzlwCdFvh2LJCdqfSIyAh0dnPAzkkd0M3dATl5WpmXFMwGblKBEhdIs2bNwvLly+VMy/nz5zF48GCMHTtWFkYLFy6Ut7399tvlNrCoqCj4+/vD2dlZNoPnF2SFiVkrMR4rKyu0bdsWhw+Xbe+pr7/+usjpNiKTxZRtKgM7a0usCmyN9/p7wbKqBgfOXEHPRWzgJtNW4gJpw4YNWLt2LTZu3Ijdu3cjLy8Pd+7cwYkTJzBkyBCYmZVv815WVhZatGghi6AHWb9+PSZPnozQ0FAZUCkeKxK+09Lu/QfbsmVLeHt733cRK+8KT7UdOnQIvXv3LtfxExkkpmxTGYlfVIf7umDbhPZwd7LB1awcOZM0e+tpJnCTSSpxD5KFhQWSk5Px1FNPyWPRjC1mbJo1a1Yp/2Fu2rSpSAO1mDHy8fFBWFiYPNZqtWjQoAEmTJiAqVOnlvi5xR5yu3btwmefffbYU4z526rkF1bi9diDREZp90zg0BKgel1gXCxg46j0iMiIG7jdHG2wZOizcHOyUXpoRJXfgyRmjESRlK9q1apy5ZoSxAq6uLg4dO/eveA2jUYjj2NjYyvk9JqINRBvaP5FFEdERosp2/QExHL/UH8vhAf7wM7aQsYB+IdFI/IQE7jJdFQt6QPFhz4oKEhmHgm3b9/Gq6++el9j87fffouKlp6eLgs2R8eiv/WK48TExBI/j6gexSyY2ID3cUSDujilV3wGicioU7Y/7XwvZdtnlNKjIiPTxc0BOyZ2xJSNJ7A/6QpCt55G1JkrmD+oOepa678riIxViWeQAgMD4eDgUDCDMmzYMNlAXXhWRVyMiRhvampqkZmxhxGFoZiKK3whMmpM2aZyYG9jifCgewncexPT0HPxQVkoEaliBkks5zcUdnZ2silcFDeFiWOxVxwRlVDbccCZXfqU7W/HMGWbytwnGuTniraN6soE7rNpmRix5jBGtXfFWz3dYFmVCdykgr3YDIGY8WnVqhX27t1bcJto0hbHvr6+io6NyKhoNEDAcsCqJnD5GBC1QOkRkRETKdtildsI34byeHV0MgKWHcK5NP2WJUTGRGPI6dbHjx+XF0GsoBPXL168KI9FP9DKlSsRGRmJhIQEuVGuiAYIDg5WeORERqbmU0Df/9Nfj/oIuFS2PDGi/Abud/t7Y9WI1qhTwwIJf95E36XR+PznC2zgJqNSLluNVIT9+/ejS5cuD+yFioiIkNfFEv8FCxbI7U1E5tGSJUvk8v/KwK1GyOR8Mwb49WugtivwajRgqcwqVTIdaTdv498bTuDg2XR5/A9PR3w4sLksnIhUsRebGrFAIpPz93VguR9w83fguRFAv6VKj4hMgFarw5qYZMzfmSS3KhFN3R8PboGOTe2VHhqp1M3yzkEiIrWlbH+v9IjIBIhNbUd3aIRN49uhsYO13MNNNHCLPd2YwE2GjAUSEd3j2gFoF6K/vnUCkMm9tqh8eDnXxLaQ9hj+fOEG7hicSWUDNxkmFkhEVFTXmYCjN3ArHdjClG0qP9UszPBegDdWB7ZG3RoWSEzJgP/SaKyNZQI3GR4WSET0gJTtlYCZJXB2FxBnOBloZBq6eThix6QO6NTUHtl3tJi15TRGRhyRp9+IDAULJCJ6dMr2rulM2aZy52BjhYjgewnc+5KuoNfiKOxL5GldMgwskIjo4Snbrp2A3Fv6lO28XKVHRCaawC16k9wcbZCemYPgiCOYvfU0G7hJcSyQiOjBmLJNlcTNyQZbQvwQ7OcijyMOnUe/sGgZMkmkFBZIRFSKlO0jSo+ITDiBO9TfS552s7O2xJnUTPQPi5Gr3USWElFlY4FERI/mPRBo9hKgy9OfasvOVHpEZMI6uzlg16QO6ObuIIMlRV5SUMQRpGXcVnpopDIskIjo8XovAGzrA38lA7veUXo0ZOLqWltiVWBrGQlgWVWDqDNX0GvRQfyYmKr00EhFWCARUSlTtiOZsk2V0sAtQiW3T2gPdycbXM3KwciIo2zgpkrDAomIypCyHcKUbaoUTRxtsHm8H0b6uRY0cIvepKQUJnBTxWKBRERlSNm+ypRtqtQG7ln+nncbuC2QlJoB/7BoRB5iAjdVHBZIRFRyTNkmhRu4d0zsiM5u9si5o0Xo1tMYHXkU6ZlM4KbyxwKJiEqHKdukIHsbS4QH3Uvg3puYhp6LDuLAmStKD41MDAskIio9pmyTASRwbxnvhyYO1nIGKXDNYRkJkH2HDdxUPlggEVHpMWWbDIBHPVtsm9AeI3wbymMRKhmw7BDOpbGBm54cCyQiKhumbJOBNHC/298bq0a0Rp0aFnJ7kr5Lo/H5zxfYwE1PhAUSEZUdU7bJQHT3dMTOiR3QoYkdbudqMX3TKYxdF4drWTlKD42MFAskIirHlO1pSo+GVMzB1gqRwW0wo48HLMw0+CE+FT0WReHgWTZwU+mxQCKickzZXsuUbVKURlMFozs0wqbx7dDYwRpXMrIxfPVhzGUDN5USCyQiKueU7QlM2SbFeTnXxLaQ9nK7EmFVdLJM4D6TygZuKhkWSERUzinb6UzZJoNQzcJMbni7OrA16tawQGJKBvyXRmNtLBO46fFYIBFRxaRsH12j9IiIpG4ejtgxqQM6NbVH9h0tZm05jZERR5jATY/EAomIKiZle/cMpmyTwXCwsZIJ3KF3E7j3JV1Bz0VR2JfE08H0YCyQiKh8MWWbDLiBO9jPFVtD/ODmaIP0zBwEhx/B7K2ncTuXDdxUFAskIipfTNkmA+fuZIstIX4I9nORxxGHzqNfWLQMmSTKxwKJiCo4ZXsBcOmw0iMiui+BO9TfCxHBPrCztsSZ1Ey5yk1sV6LVsoGbWCARUYWnbGuBb8cyZZsMUmc3B+yc1AHd3B2Qk6eVG94GRRxBWsZtpYdGCmOBREQVhynbZATEDNKqwNZ4r78XLKtqEHXmCnotOogfE1OVHhopiAUSEVViyvZ3So+I6IGqVKmC4b4u2D6hPdydbHA1KwcjI44idMspNnCrFAskIqqElO0J91K2M/hbORmuJo422DzeDyP9XOVxZOwF2ZuUlMIEbrVhgUREFa/rDMCxGXDrKrCVKdtk+A3cs/w97zZwWyApNQP+YdGIPMQEbjVhgURElZSy/endlO3dTNkmo2ng3jGxI7q42SPnjhahW09jVORRJnCrBAskIqocTNkmI2RvY4k1QT6YfTeB+8fENPRcdBAHzlxRemhUwVggEVHlYco2GWkDd9DdBO6mjtZyBilwzWEZCZB9hw3cpooFEhEpl7J9YL7SIyIqVQL31pD2CPRtKI9FqGTAskM4m8oGblPEAomIlEvZPvgRU7bJ6Bq45/T3xurA1qhTw0JuT9J3aTQ+++kCG7hNDAskIqp8TNkmI9fNwxE7J3ZAhyZ2yL6jxYzNpzB2XRyuZeUoPTQqJyyQiEgZTNkmI+dga4XI4DaY0ccDFmYa/BCfih6LomQSNxk/1RZIH330Eby8vODt7Y3PPvtM6eEQqQ9TtskEaDRVMLpDI2wa3w6NHaxxJSMbI+42cDOB27ipskD69ddf8cUXXyAuLg5HjhxBWFgYrl+/rvSwiNSHKdtkIryca2JbSHuMKNLAHYMzbOA2WqoskBISEuDr6wsrKytUq1YNLVq0wM6dO5UeFpE6MWWbTEQ1CzO8e7eBu24NCySmZMB/aTTWxjKB2xgZZIEUFRUFf39/ODs7y/yJzZs33/eYZcuWwcXFRRY5bdu2xeHDJV8JI06r7d+/X84a/fXXX/L6H3/8Uc5/CyIqEaZskwk2cO+Y1AGdmtrLBu5ZW05jZMQRefqNjIdBFkhZWVlyVkcUQQ+yfv16TJ48GaGhoTh27Jh8bI8ePZCWllbwmJYtW8pCqPjl8uXL8PT0xOuvv46uXbvixRdfxPPPPw8zM7NK/BsSURFM2SYT42BjJfdyy0/g3pd0Bb0WR2Ff4r3vKTJsVXQGPu8nZpA2bdqEgICAgtvEjJGPj4/sHRK0Wi0aNGiACRMmYOrUqaV+jdGjR2PAgAHo06fPQx+TnZ0tL/lu3rwpX/PGjRuwtbUt9WsSUTFaLbAuAEg+ADg/B4zaDZiZKz0qoieWlJKBiV/9Ik+5CSJoclpvD5mpRJVPfH/XrFnzsd/fBjmD9Cg5OTmyubp79+4Ft2k0GnkcGxtb4ufJn21KSkqSp+fEDNSjzJs3T76h+RdRHBFROWLKNpkoNycbbB7vh2A/F3kcGXsB/cKiZcgkGS6jK5DS09ORl5cHR0fHIreL45SUlBI/T//+/eWptmHDhiE8PBxVq1Z95OOnTZsmq838y6VLl8r8dyCih2DKNpkoMVsU6u8lT7vZWVviTGom+ofFyNVuWq1Bn8hRLaMrkMqLmG2Kj4+Xy/xbtWr12MdbWlrKqbjCFyKqAEzZJhPW2c0BuyZ1QHcPB+TkaWVeUmD4YaRl3FZ6aGTsBZKdnZ1sqE5NLZqXIo6dnJwUGxcRlSOmbJMJq2ttiZUjWmNugDcsq2pw8Gw6ei06iB8TmQNmSIyuQLKwsJAzPnv37i24TTRpi2ORbUREJoAp22TixAKkYc83xPYJ7eFRzxZXs3IwMuIoQrecYgK3gTDIAikzMxPHjx+XFyE5OVlev3jxojwWS/xXrlyJyMhIGfo4btw4GQ0QHBys8MiJqNwwZZtUoImjaOBuh1HtXQsauEVvklj5RsoyyGX+IrixS5cu990eGBiIiIgIeV0s8V+wYIFszBaZR0uWLJHL/w1tmSARPYE72cDKbkDqr0CTF4BXvha/eis9KqIKsT8pDW9uOIn0zGyZnTS9t4fcukTMNlHlf38bZIFkDFggEVWS1Hjg085AXjbQZyHgM0rpERFVGFEcTdlwQgZLCl3dHTB/UHO58o3Kh8nmIBGRyjBlm1REFEJrgu4lcP+YmIaeiw7iwBl9wUSVhwUSERm+tuMA105A7i3g2zFAXq7SIyKqMOKUWpCfK7aG+KGpo7WcVQpccxjvbotH9h02cFcWFkhEZHwp21ELlB4RUYVzd7LF1pD2cmsSYU1MMgKWHcLZVDZwVwYWSERkfCnbokBiyjapJIF7Tn9vrA5sjTo1LOT2JH2XRuOzny6ALcQViwUSERkPpmyTSnXzcMTOiR3QoYkdsu9oMWPzKYxdF4drWTlKD81ksUAiIuPClG1SKQdbK0QGt8GMPh6wMNPgh/hU9FgUhSg2cFcIFkhEZOQp298rPSKiSqPRVMHoDo2waXw7NHawxpWMbIxYc1ju6cYG7vLFAomIjDRlO0R/fWsIU7ZJdbyca2JbSHsMf17fwL06OlkmcJ9hA3e5YYFERMap60zA0Ru4dVVfJLFhlVSmmoUZ3gvQN3DXrWGBxJQM+C+NxtrY82zgLgcskIjIOFW1BF5cCZhZAmd3A0fXKD0iIsUauHdM6oBOTe1lA/esLacxMuKIzE+ismOBRETGiynbRJKDjRUign0QejeBW2xV0nNRFPYlpSk9NKPFAomIjBtTtokKEriD7yZwuznaID0zB8HhRzB762nczmUDd2mxQCIi48aUbaL7Eri3hPgh2M9FHkccOo9+YdEyZJJKjgUSERk/pmwT3ZfAHervJU+7iQ1wz6Rmov+yGKyJToZWywbukmCBRESmgSnbRPfp7OaAnZM6oJu7A3LuaPHu9ngERRxBWsZtpYdm8FggEZHpYMo20X3EDNKqwNYyEsCyqkYmb/dadBA/JjI/7FFYIBGR6WDKNtFDG7hFqOT2Ce3hUc8WV7NyMDLiKEK3nGID90OwQCIi08KUbaKHauJog83j22FUe1d5HBl7QSZwJ6Uwgbs4FkhEZHqYsk30UJZVzTCzr2dBA3dSagb8w6IREZPMBO5CWCARkelhyjZRiRu4u7jZywbu2dvimcBdCAskIjJNTNkmeiwxg7QmyAeziyRwH8R+JnCzQCIiE8aUbaISNXAH3U3gbupoLWeQgsKP4N1t8ci+o94GbhZIRGS6mLJNVKoE7q0h7RHo21Aer4lJRsCyQzibqs4GbhZIRGTamLJNVKoE7jn9vbE6sDXq1LCQ25P0XRqNdT9dUF0DNwskIjJ9TNkmKpVuHo6ygbtDEztk39Fi5uZTGLM2DteycqAWLJCISB2Ysk1UKg42VogMboMZfTxgYabBnoRU9FgUJZO41YAFEhGpA1O2iUpNo6mC0R0aYdP4dmjsYI0rGdkYseYw3tseb/IJ3CyQiEhlKdsT9NeZsk1UYl7ONbEtpD1G3G3gXh0tGrhjcMaEG7hZIBGRunSdATg2Y8o2USlVszDDu3cbuOvWsEBiSgb8l0Zjbex5k2zgZoFERCpM2f6UKdtET9DAvWNSB3Rqai8buGdtOW2SCdwskIhIpSnbs/XXmbJNVKYG7ohgH4QWSeCOwj4TSuBmgURE6tT2VaZsEz1hAnfw3QRuN0cbpGfmIDj8CGZvPW0SDdwskIhInZiyTVRuCdxbQvwQ7OcijyMOnUe/sGgZMmnMWCARkXoxZZuo3BK4Q/295Gk3sQHumdRM9F8WgzXRydBqjbOBmwUSEambSNlu/jJTtonKQWc3B5nA3c3dATl3tHh3ezyCIo4gLeM2jA0LJCIikbJdswFTtonKgZ21JVYFtsZ7Ad6wrKqRydu9Fh3Ej4nGlTvGAomISPQhMWWbqFwbuIc/3xDbJ7SHRz1bXM3KwciIowjdcspoGrhZIBERCS7tmbJNVM6aONpg8/h2GNXeVR5Hxl5A/7AYJKUYfgI3CyQionxM2SYqd5ZVzTCzr2dBA3dSagb8w6IReciwE7hZIBER5WPKNlGFN3B3cbOXDdyhW09jVORRg03gVkWBNGDAANSuXRuDBg0q1X1EpEJM2SaqMGIGaU2QD2bfTeD+MTENPRcdxIEzV2BoVFEgTZw4EWvXri31fUSkUkzZJqrQBu6gIgnc2QhccxjvbotH9h3DaeBWRYHUuXNn2NjYlPo+IlIppmwTVVoCd6BvQ3m8JiYZAcsO4WyqYTRwK14gRUVFwd/fH87OzrKq3Lx5832PWbZsGVxcXGBlZYW2bdvi8GGm3RJRZaRsL9JfZ8o2UYUlcM/p7401Qa1Rt4aF3J6k79JofPbTBcUbuBUvkLKystCiRQtZBD3I+vXrMXnyZISGhuLYsWPysT169EBa2r0dg1u2bAlvb+/7LpcvX67EvwkRmRzvF5myTVQJuro7YsekDujY1B7Zd7SYsfkUxq6Lw7WsHCilKhTWq1cveXmYhQsXYsyYMQgODpbHK1aswHfffYc1a9Zg6tSp8rbjx49X+Dizs7PlJd/Nm8a9CR8RlSJl+8Kheynb/ZYqPSIik+RgY4WIIB+EHzqPD3ckYk9Cqjzd1rZRXXXOID1KTk4O4uLi0L1794LbNBqNPI6Nja3UscybNw81a9YsuDRo0KBSX5+IDCVl+zulR0RksjSaKjJUctP4dpjTz0ux4kiOBQYsPT0deXl5cHR0LHK7OE5JSSnx84iCavDgwfj+++9Rv379IsXVo+4rbNq0abhx40bB5dKlS0/wNyMi403ZnsCUbaIK5uVcEyN8XaAkxU+xVYY9e/aU6b7CLC0t5YWIVJyy/ds+IPVXfcr2K1+L9cpKj4qI1DiDZGdnBzMzM6SmFv1tTRw7OTkpNi4iUmnK9sCVTNkmUgmDLpAsLCzQqlUr7N27t+A2rVYrj319fRUdGxGpkIMHU7aJVELxAikzM1OuQstfiZacnCyvX7x4UR6LJf4rV65EZGQkEhISMG7cOBkNkL+qjYioUjFlm0gVFO9BOnr0KLp06VJwLAoiITAwEBEREXj55Zdx5coVzJo1SzZmi8yjnTt33te4TURUqSnby331KdsH5gNdpys9KiIqZ1V0SkdVGimRgySW+4sVbba2tkoPh4gq26lvgY3BQBUNMHIX0KCN0iMionL8/lb8FBsRkVFiyjaRSWOBRET0JCnbNRvcS9kmIpPBAomIqLxSthO2Kz0iIionLJCIiJ40Zdvvdf31ba8zZZvIRLBAIiJ6Ul2mA47NgFtX9SnbXPtCZPRYIBERlWfKtoU1cOe20iMiImPPQSIiMpmU7XGHgLrPcI82IhPAAomIqLzYNVZ6BERUTniKjYiIiKgYFkhERERExbBAIiIiIiqGBRIRERFRMSyQiIiIiIphgURERERUDAskIiIiomJYIBEREREVwwKJiIiIqBgWSERERETFsEAiIiIiKoYFEhEREVExLJCIiIiIiqla/AYqGZ1OJ/+8efOm0kMhIiKiEsr/3s7/Hn8YFkhllJGRIf9s0KCB0kMhIiKiMnyP16xZ86H3V9E9roSiB9Jqtbh8+TJsbGxQpUqVcq1sRdF16dIl2NraltvzmgK+Nw/G9+Xh+N48GN+XB+P7oo73RqfTyeLI2dkZGs3DO404g1RG4k2tX79+hT2/+AAa+4ewovC9eTC+Lw/H9+bB+L48GN+XhzOV9+ZRM0f52KRNREREVAwLJCIiIqJiWCAZGEtLS4SGhso/qSi+Nw/G9+Xh+N48GN+XB+P78nCWKnxv2KRNREREVAxnkIiIiIiKYYFEREREVAwLJCIiIqJiWCARERERFcMCycAsW7YMLi4usLKyQtu2bXH48GGo2ezZs2VSeeGLu7s71CgqKgr+/v4y/VW8D5s3by5yv1hvMWvWLNSrVw/VqlVD9+7dcfbsWaj9fQkKCrrvM9SzZ0+Yunnz5sHHx0em/Ts4OCAgIABJSUlFHnP79m2MHz8edevWhbW1NQYOHIjU1FSYupK8N507d77vc/Pqq6/ClC1fvhzNmzcvCIP09fXFjh07VPt5YYFkQNavX4/JkyfLpZTHjh1DixYt0KNHD6SlpUHNvLy88OeffxZcoqOjoUZZWVnyMyGK6AeZP38+lixZghUrVuDnn39GjRo15OdH/KOm5vdFEAVR4c/Ql19+CVN34MAB+WX2008/4YcffkBubi5eeOEF+X7le+ONN7Bt2zZs2LBBPl5sn/Tiiy/C1JXkvRHGjBlT5HMj/hszZWJ3iP/85z+Ii4vD0aNH0bVrV/Tv3x+nT59W5+dFLPMnw9CmTRvd+PHjC47z8vJ0zs7Ounnz5unUKjQ0VNeiRQulh2FwxH+6mzZtKjjWarU6Jycn3YIFCwpuu379us7S0lL35Zdf6tSi+PsiBAYG6vr3769Tu7S0NPn+HDhwoODzYW5urtuwYUPBYxISEuRjYmNjdWp+b4ROnTrpJk6cqFO72rVr61atWqXKzwtnkAxETk6OrNrFaZHC+72J49jYWKiZOE0kTp80atQI//znP3Hx4kWlh2RwkpOTkZKSUuTzI/YaEqdp1f75Efbv3y9Ppbi5uWHcuHG4evUq1ObGjRvyzzp16sg/xb83Yuak8GdGnL5++umnVfeZKf7e5Pv8889hZ2cHb29vTJs2Dbdu3YJa5OXl4auvvpKzauJUmxo/L9ys1kCkp6fLD6Sjo2OR28VxYmIi1Ep8wUdERMgvNjHFPWfOHHTo0AGnTp2S/QOkJ4oj4UGfn/z71EqcXhOnAVxdXfHbb7/hnXfeQa9eveQ/6mZmZlADrVaLSZMmwc/PT37ZC+JzYWFhgVq1aqn6M/Og90Z45ZVX0LBhQ/nL2cmTJ/H222/LPqVvv/0WpuzXX3+VBdHt27dln9GmTZvg6emJ48ePq+7zwgKJDJr4IssnmgdFwST+0fr6668xatQoRcdGxmHIkCEF15s1ayY/R88884ycVerWrRvUQPTbiF8q1Nq/V5b3ZuzYsUU+N2Lxg/i8iCJbfH5MlfhlVBRDN27cwMaNGxEYGCj7jdSIp9gMhJjGFb/NFl8RII6dnJwUG5ehEb+9NG3aFOfOnVN6KAYl/zPCz8/jiVO14r83tXyGQkJCsH37duzbt0824eYTnwtxav/69euq/cw87L15EPHLmWDqnxsxS9S4cWO0atVKrvYTCyAWL16sys8LCyQD+lCKD+TevXuLTP2KYzHdSXqZmZnyNzjx2xzdI04fiX+kCn9+bt68KVez8fNT1O+//y57kEz9MyR61kUBIE6R/Pjjj/IzUpj498bc3LzIZ0acQhI9fqb+mXnce/MgYlZFMPXPTXFarRbZ2dmq/LzwFJsBEUv8xXRm69at0aZNGyxatEg2yAUHB0Ot3nzzTZlxI06riSWlIgJBzLQNHToUaiwOC//2KhqzxT/aorFUNEqKPoq5c+eiSZMm8h/8mTNnyv4JkfGi1vdFXETfmshrEQWkKK7feust+RuyiEAw9VNHX3zxBbZs2SL79fL7RETzvsjJEn+K09Ti3x3xPoncmwkTJsgvu+effx5qfm/E50Tc37t3b5n5I3qQxBL3jh07ylO0pko0oou2BvHvSUZGhnwPxKnoXbt2qfPzovQyOipq6dKluqefflpnYWEhl/3/9NNPOjV7+eWXdfXq1ZPvx1NPPSWPz507p1Ojffv2ySW1xS9iGXv+Uv+ZM2fqHB0d5fL+bt266ZKSknRqfl9u3bqle+GFF3T29vZyiXLDhg11Y8aM0aWkpOhM3YPeE3EJDw8veMzff/+te+211+RS7urVq+sGDBig+/PPP3Vqf28uXryo69ixo65OnTryv6XGjRvrpkyZortx44bOlI0cOVL+NyL+vbW3t5f/huzevVu1n5cq4n+ULtKIiIiIDAl7kIiIiIiKYYFEREREVAwLJCIiIqJiWCARERERFcMCiYiIiKgYFkhERERExbBAIiIiIiqGBRIRERFRMSyQiIjKyMXFRW4JRESmhwUSERmFoKCggn3lOnfuLPeeqywRERGoVavWfbcfOXIEY8eOrbRxEFHl4Wa1RKRaOTk5sLCwKPPP29vbl+t4iMhwcAaJiIxuJunAgQNYvHgxqlSpIi/nz5+X9506dUruRm5tbQ1HR0cMHz4c6enpBT8rZp5CQkLk7JOdnR169Oghb1+4cCGaNWuGGjVqoEGDBnjttdeQmZkp7xO7mQcHB+PGjRsFrzd79uwHnmK7ePEi+vfvL19f7Hb+0ksvITU1teB+8XMtW7bEunXr5M+KHdKHDBkid04nIsPCAomIjIoojHx9fTFmzBj8+eef8iKKmuvXr6Nr16549tlncfToUezcuVMWJ6JIKSwyMlLOGsXExGDFihXyNo1GgyVLluD06dPy/h9//BFvvfWWvK9du3ayCBIFT/7rvfnmm/eNS6vVyuLo2rVrsoD74Ycf8L///Q8vv/xykcf99ttv2Lx5M7Zv3y4v4rH/+c9/KvQ9I6LS4yk2IjIqYtZFFDjVq1eHk5NTwe1hYWGyOPrggw8KbluzZo0sns6cOYOmTZvK25o0aYL58+cXec7C/UxiZmfu3Ll49dVX8cknn8jXEq8pZo4Kv15xe/fuxa+//ork5GT5msLatWvh5eUle5V8fHwKCinR02RjYyOPxSyX+Nn333+/3N4jInpynEEiIpNw4sQJ7Nu3T57eyr+4u7sXzNrka9Wq1X0/u2fPHnTr1g1PPfWULFxE0XL16lXcunWrxK+fkJAgC6P84kjw9PSUzd3ivsIFWH5xJNSrVw9paWll+jsTUcXhDBIRmQTRM+Tv748PP/zwvvtEEZJP9BkVJvqX+vbti3HjxslZnDp16iA6OhqjRo2STdxipqo8mZubFzkWM1NiVomIDAsLJCIyOuK0V15eXpHbnnvuOXzzzTdyhqZq1ZL/0xYXFycLlI8//lj2Iglff/31Y1+vOA8PD1y6dEle8meR4uPjZW+UmEkiIuPCU2xEZHREEfTzzz/L2R+xSk0UOOPHj5cN0kOHDpU9P+K02q5du+QKtEcVN40bN0Zubi6WLl0qm6rFCrP85u3CrydmqESvkHi9B5166969u1wJ989//hPHjh3D4cOHMWLECHTq1AmtW7eukPeBiCoOCyQiMjpiFZmZmZmcmRFZRGJ5vbOzs1yZJoqhF154QRYrovla9ADlzww9SIsWLeQyf3FqztvbG59//jnmzZtX5DFiJZto2hYr0sTrFW/yzj9VtmXLFtSuXRsdO3aUBVOjRo2wfv36CnkPiKhiVdHpdLoKfg0iIiIio8IZJCIiIqJiWCARERERFcMCiYiIiKgYFkhERERExbBAIiIiIiqGBRIRERFRMSyQiIiIiIphgURERERUDAskIiIiomJYIBEREREVwwKJiIiICEX9P/17GFxXLsdPAAAAAElFTkSuQmCC", 253 | "text/plain": [ 254 | "
" 255 | ] 256 | }, 257 | "metadata": {}, 258 | "output_type": "display_data" 259 | } 260 | ], 261 | "source": [ 262 | "import matplotlib.pyplot as plt\n", 263 | "\n", 264 | "solver_j = Jacobi(A, b, tol=1e-12, max_iter=50)\n", 265 | "xj = solver_j.solve()\n", 266 | "res_j = solver_j.history\n", 267 | "\n", 268 | "solver_gs = GaussSeidel(A, b, tol=1e-12, max_iter=50)\n", 269 | "xgs = solver_gs.solve()\n", 270 | "res_gs = solver_gs.history\n", 271 | "\n", 272 | "plt.semilogy(res_j, label=\"Jacobi\")\n", 273 | "plt.semilogy(res_gs, label=\"Gauss–Seidel\")\n", 274 | "plt.xlabel(\"Iteration\")\n", 275 | "plt.ylabel(\"Residual norm\")\n", 276 | "plt.legend()\n", 277 | "plt.show()" 278 | ] 279 | }, 280 | { 281 | "cell_type": "markdown", 282 | "id": "41a3c7a9", 283 | "metadata": {}, 284 | "source": [ 285 | "## 4. Summary\n", 286 | "\n", 287 | "- Direct: Gauss–Jordan, LU, Cholesky.\n", 288 | "- Iterative: Jacobi, Gauss–Seidel.\n", 289 | "- Convergence depends on matrix properties (e.g., diagonal dominance, SPD).\n", 290 | "- Direct methods better for small/moderate dense systems.\n", 291 | "- Iterative methods essential for large, sparse systems (e.g., PDE discretizations).\n", 292 | "- Conditioning of matrix affects stability." 293 | ] 294 | } 295 | ], 296 | "metadata": { 297 | "kernelspec": { 298 | "display_name": "Python 3", 299 | "language": "python", 300 | "name": "python3" 301 | }, 302 | "language_info": { 303 | "codemirror_mode": { 304 | "name": "ipython", 305 | "version": 3 306 | }, 307 | "file_extension": ".py", 308 | "mimetype": "text/x-python", 309 | "name": "python", 310 | "nbconvert_exporter": "python", 311 | "pygments_lexer": "ipython3", 312 | "version": "3.13.7" 313 | } 314 | }, 315 | "nbformat": 4, 316 | "nbformat_minor": 5 317 | } 318 | --------------------------------------------------------------------------------