├── .github └── workflows │ └── python-tests.yml ├── .gitignore ├── LICENSE ├── README.md ├── notes ├── 1_root_and_extrema_finding │ ├── bisection_method.md │ ├── golden_ratio_search.md │ ├── gradient_descent.md │ ├── newtons_method.md │ ├── relaxation_method.md │ ├── root_finding.md │ └── secant_method.md ├── 2_systems_of_equations │ ├── gauss_seidel.md │ ├── gaussian_elimination.md │ ├── inverse_matrix.md │ ├── jacobi_method.md │ ├── lu_decomposition.md │ └── systems_of_equations.md ├── 3_differentiation │ ├── backward_difference.md │ ├── central_difference.md │ ├── differentiation.md │ ├── forward_difference.md │ └── taylor_series.md ├── 4_integration │ ├── integration_introduction.md │ ├── midpoint_rule.md │ ├── monte_carlo.md │ ├── resources │ │ ├── midpoint_rule.ipynb │ │ ├── simpsons_method.tex │ │ └── trapeziod_method.tex │ ├── simpsons_rule.md │ └── trapezoidal_rule.md ├── 5_matrices │ ├── eigen_value_decomposition.md │ ├── eigenvalues_and_eigenvectors.md │ ├── matrix_methods.md │ ├── power_method.md │ ├── qr_method.md │ └── singular_value_decomposition.md ├── 6_regression │ ├── cubic_spline_interpolation.md │ ├── gaussian_interpolation.md │ ├── interpolation.md │ ├── lagrange_polynomial_interpolation.md │ ├── least_squares.md │ ├── linear_interpolation.md │ ├── newton_polynomial.md │ ├── regression.md │ ├── resources │ │ ├── cubic_spline.tex │ │ ├── curve_fitting.py │ │ ├── lagrange_polynomial_interpolation.tex │ │ ├── linear_interpolation_derivation.tex │ │ └── linear_interpolation_examples.tex │ └── thin_plate_spline_interpolation.md └── 7_ordinary_differential_equations │ ├── eulers_method.md │ ├── heuns_method.md │ ├── ordinary_differential_equations.md │ ├── partial_differential_equations.md │ ├── picards_method.md │ └── runge_kutta.md ├── requirements.txt └── src ├── 1_root_and_extrema_finding ├── __init__.py ├── bisection_search │ ├── __init__.py │ ├── examples │ │ ├── __init__.py │ │ ├── example.ipynb │ │ └── plot.py │ ├── implementation │ │ ├── __init__.py │ │ └── bisection_search.py │ └── tests │ │ ├── __init__.py │ │ └── test_bisection_search.py ├── golden_ratio_search │ ├── __init__.py │ ├── examples │ │ ├── __init__.py │ │ ├── example.ipynb │ │ └── plot.py │ ├── implementation │ │ ├── __init__.py │ │ └── golden_ratio_search.py │ └── tests │ │ ├── __init__.py │ │ └── test_golden_ratio_search.py ├── gradient_descent │ ├── __init__.py │ ├── examples │ │ ├── __init__.py │ │ ├── example.ipynb │ │ └── plot.py │ ├── implementation │ │ ├── __init__.py │ │ └── gradient_descent.py │ └── tests │ │ ├── __init__.py │ │ └── test_gradient_descent.py ├── newton_raphson │ ├── __init__.py │ ├── examples │ │ ├── __init__.py │ │ ├── example.ipynb │ │ └── plot.py │ ├── implementation │ │ ├── __init__.py │ │ └── newton_raphson.py │ └── tests │ │ ├── __init__.py │ │ └── test_newton_raphson.py ├── relaxation_method │ ├── __init__.py │ ├── examples │ │ ├── __init__.py │ │ ├── example.ipynb │ │ └── plot.py │ ├── implementation │ │ ├── __init__.py │ │ └── relaxation_method.py │ └── tests │ │ ├── __init__.py │ │ └── test_relaxation_method.py └── secant_method │ ├── __init__.py │ ├── examples │ ├── __init__.py │ ├── example.ipynb │ └── plot.py │ ├── implementation │ ├── __init__.py │ └── secant_method.py │ └── tests │ ├── __init__.py │ └── test_secant_method.py ├── 2_systems_of_equations ├── __init__.py ├── gauss_seidel │ ├── __init__.py │ ├── examples │ │ ├── __init__.py │ │ ├── example.ipynb │ │ └── plot.py │ ├── implementation │ │ ├── __init__.py │ │ └── gauss_seidel.py │ └── tests │ │ ├── __init__.py │ │ └── test_gauss_seidel.py ├── gaussian_elimination │ ├── __init__.py │ ├── examples │ │ ├── __init__.py │ │ ├── example.ipynb │ │ └── plot.py │ ├── implementation │ │ ├── __init__.py │ │ └── gaussian_elimination.py │ └── tests │ │ ├── __init__.py │ │ └── test_gaussian_elimination.py ├── jacobi_method │ ├── __init__.py │ ├── examples │ │ ├── __init__.py │ │ └── example.ipynb │ ├── implementation │ │ ├── __init__.py │ │ └── jacobi_method.py │ └── tests │ │ ├── __init__.py │ │ └── test_jacobi_method.py ├── lu_decomposition │ ├── __init__.py │ ├── examples │ │ ├── __init__.py │ │ ├── example.ipynb │ │ └── plot.py │ ├── implementation │ │ ├── __init__.py │ │ └── lu_decomposition.py │ └── tests │ │ ├── __init__.py │ │ └── test_lu_decomposition.py └── matrix_inverse │ ├── __init__.py │ ├── examples │ ├── __init__.py │ └── example.ipynb │ ├── implementation │ ├── __init__.py │ └── inverse_matrix.py │ └── tests │ ├── __init__.py │ └── test_inverse_matrix.py ├── 3_derivatives ├── __init__.py ├── backward_difference │ ├── __init__.py │ ├── examples │ │ ├── __init__.py │ │ └── temp.md │ ├── implementation │ │ ├── __init__.py │ │ └── backward_difference.py │ └── tests │ │ ├── __init__.py │ │ └── test_backward_difference.py ├── central_difference │ ├── __init__.py │ ├── examples │ │ ├── __init__.py │ │ ├── example.ipynb │ │ ├── example.py │ │ └── example_b.ipynb │ ├── implementation │ │ ├── __init__.py │ │ └── central_difference.py │ └── tests │ │ ├── __init__.py │ │ └── test_central_difference.py ├── forward_difference │ ├── __init__.py │ ├── examples │ │ ├── __init__.py │ │ └── example.ipynb │ ├── implementation │ │ ├── __init__.py │ │ └── forward_difference.py │ └── tests │ │ ├── __init__.py │ │ └── test_forward_difference.py └── taylor_series │ ├── __init__.py │ ├── examples │ ├── __init__.py │ ├── example.py │ └── plot.py │ ├── implementation │ ├── __init__.py │ └── taylor_series.py │ └── tests │ ├── __init__.py │ └── test_tailor_series.py ├── 4_integration ├── __init__.py ├── midpoint_rule │ ├── __init__.py │ ├── implementation │ │ ├── __init__.py │ │ └── midpoint_rule.py │ └── tests │ │ ├── __init__.py │ │ └── test_midpoint_rule.py ├── monte_carlo_integral │ ├── __init__.py │ ├── implementation │ │ ├── __init__.py │ │ └── monte_carlo_integral.py │ └── tests │ │ ├── __init__.py │ │ └── test_monte_carlo_integral.py ├── simpson │ ├── __init__.py │ ├── examples │ │ ├── __init__.py │ │ └── temp.md │ ├── implementation │ │ ├── __init__.py │ │ └── simpson_rule.py │ └── tests │ │ ├── __init__.py │ │ └── test_simpsons_rule.py └── trapezoid_rule │ ├── __init__.py │ ├── examples │ ├── __init__.py │ └── example.ipynb │ ├── implementation │ ├── __init__.py │ └── trapezoid_rule.py │ └── tests │ ├── __init__.py │ └── test_trapezoid_rule.py ├── 5_matrices ├── __init__.py ├── eigen_value_decomposition │ ├── __init__.py │ ├── examples │ │ ├── __init__.py │ │ └── plot.py │ ├── implementation │ │ ├── __init__.py │ │ └── eigen_value_decomposition.py │ └── tests │ │ ├── __init__.py │ │ └── test_eigen_value_decomposition.py ├── eigenvalues_and_eigenvectors │ ├── __init__.py │ ├── implementation │ │ ├── __init__.py │ │ └── eigenvalues_and_eigenvectors.py │ └── tests │ │ ├── __init__.py │ │ └── test_eigenvalues_and_eigenvectors.py ├── inverse_power_method │ ├── __init__.py │ ├── examples │ │ ├── __init__.py │ │ └── temp.md │ ├── implementation │ │ ├── __init__.py │ │ └── inverse_power_method.py │ └── tests │ │ ├── __init__.py │ │ └── test_inverse_power_method.py ├── power_method │ ├── __init__.py │ ├── examples │ │ ├── __init__.py │ │ └── plot.py │ ├── implementation │ │ ├── __init__.py │ │ └── power_method.py │ └── tests │ │ ├── __init__.py │ │ └── test_power_method.py ├── qr_method │ ├── __init__.py │ ├── examples │ │ ├── __init__.py │ │ └── plot.py │ ├── implementation │ │ ├── __init__.py │ │ └── qr_method.py │ └── tests │ │ ├── __init__.py │ │ └── test_qr_method.py └── singular_value_decomposition │ ├── __init__.py │ ├── examples │ ├── __init__.py │ └── plot.py │ ├── implementation │ ├── __init__.py │ └── singular_value_decomposition.py │ └── tests │ ├── __init__.py │ └── test_singular_value_decomposition.py ├── 6_regression ├── __init__.py ├── cubic_spline │ ├── __init__.py │ ├── examples │ │ ├── __init__.py │ │ └── example.py │ ├── implementation │ │ ├── __init__.py │ │ └── cubic_spline.py │ └── tests │ │ ├── __init__.py │ │ └── test_cubic_spline.py ├── gaussian_interpolation │ ├── __init__.py │ ├── examples │ │ ├── __init__.py │ │ └── plot.py │ ├── implementation │ │ ├── __init__.py │ │ └── gaussian_interpolation.py │ └── tests │ │ ├── __init__.py │ │ └── test_gaussian_interpolation.py ├── lagrange_polynomial │ ├── __init__.py │ ├── examples │ │ ├── __init__.py │ │ ├── example.ipynb │ │ └── example.py │ ├── implementation │ │ ├── __init__.py │ │ └── lagrange_polynomial.py │ └── tests │ │ ├── __init__.py │ │ └── test_lagrange_polynomial.py ├── least_squares │ ├── __init__.py │ ├── examples │ │ ├── __init__.py │ │ ├── example.py │ │ └── plot.py │ ├── implementation │ │ ├── __init__.py │ │ └── least_squares.py │ └── tests │ │ ├── __init__.py │ │ └── test_least_squares.py ├── linear_interpolation │ ├── __init__.py │ ├── examples │ │ ├── __init__.py │ │ └── example.py │ ├── implementation │ │ ├── __init__.py │ │ └── linear_interpolation.py │ └── tests │ │ ├── __init__.py │ │ └── test_linear_interpolation.py ├── newton_polynomial │ ├── __init__.py │ ├── examples │ │ ├── __init__.py │ │ └── plot.py │ ├── implementation │ │ ├── __init__.py │ │ └── newton_polynomial.py │ └── tests │ │ ├── __init__.py │ │ └── test_newton_polynomial.py ├── polynomial_regression │ ├── __init__.py │ ├── examples │ │ ├── __init__.py │ │ ├── example.ipynb │ │ └── example.py │ ├── implementation │ │ ├── __init__.py │ │ └── polynomial_regression.py │ └── tests │ │ ├── __init__.py │ │ └── test_polynomial_regression.py ├── regression_methods_comparison │ ├── __init__.py │ └── temp.md └── thin_plate_spline_interpolation │ ├── __init__.py │ ├── examples │ ├── __init__.py │ └── plot.py │ ├── implementation │ ├── __init__.py │ └── thin_plate_spline_interpolation.py │ └── tests │ ├── __init__.py │ └── test_thin_plate_spline_interpolation.py ├── 7_ordinary_differential_equations ├── __init__.py ├── euler │ ├── __init__.py │ ├── examples │ │ ├── __init__.py │ │ └── temp.md │ ├── implementation │ │ ├── __init__.py │ │ └── euler.py │ └── tests │ │ ├── __init__.py │ │ └── test_euler.py ├── heun │ ├── __init__.py │ ├── implementation │ │ ├── __init__.py │ │ └── heun.py │ └── tests │ │ ├── __init__.py │ │ └── test_heun.py ├── picard │ ├── __init__.py │ ├── implementation │ │ ├── __init__.py │ │ └── picard.py │ └── tests │ │ ├── __init__.py │ │ └── test_picard.py └── runge_kutta │ ├── __init__.py │ ├── examples │ ├── __init__.py │ └── temp.md │ ├── implementation │ ├── __init__.py │ └── runge_kutta.py │ └── tests │ ├── __init__.py │ └── test_runge_kutta.py └── __init__.py /.github/workflows/python-tests.yml: -------------------------------------------------------------------------------- 1 | name: Python Tests 2 | 3 | # Trigger the workflow on push, pull request events to the master branch, and manual dispatch 4 | on: 5 | push: 6 | branches: [ master ] 7 | pull_request: 8 | branches: [ master ] 9 | workflow_dispatch: # Allows manual triggering 10 | 11 | jobs: 12 | test: 13 | runs-on: ubuntu-latest 14 | strategy: 15 | matrix: 16 | python-version: [3.9, 3.11] # Valid Python versions only 17 | 18 | steps: 19 | # Step 1: Check out the repository 20 | - name: Checkout Repository 21 | uses: actions/checkout@v3 22 | 23 | # Step 2: Set up Python 24 | - name: Set up Python ${{ matrix.python-version }} 25 | uses: actions/setup-python@v4 26 | with: 27 | python-version: ${{ matrix.python-version }} 28 | 29 | # Step 3: Cache pip dependencies 30 | - name: Cache pip 31 | uses: actions/cache@v3 32 | with: 33 | path: ~/.cache/pip 34 | key: ${{ runner.os }}-pip-${{ hashFiles('**/requirements.txt') }}-${{ matrix.python-version }} 35 | restore-keys: | 36 | ${{ runner.os }}-pip- 37 | 38 | # Step 4: Install dependencies 39 | - name: Install Dependencies 40 | run: | 41 | python -m pip install --upgrade pip 42 | if [ -f requirements.txt ]; then pip install -r requirements.txt; fi 43 | 44 | # Step 5: Set PYTHONPATH (if needed) 45 | - name: Set PYTHONPATH 46 | run: echo "PYTHONPATH=$(pwd)/src" >> $GITHUB_ENV 47 | 48 | # Step 6: Run tests 49 | - name: Run Tests 50 | run: | 51 | cd src 52 | pytest 53 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2021 Adam Djellouli 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /notes/3_differentiation/taylor_series.md: -------------------------------------------------------------------------------- 1 | ## Taylor Series 2 | 3 | The Taylor series is a fundamental tool in calculus and mathematical analysis, offering a powerful way to represent and approximate functions. By expanding a function around a specific point, known as the "center" or "point of expansion," we can express it as an infinite sum of polynomial terms derived from the function’s derivatives. This concept is especially useful for approximating functions that are difficult or impossible to compute directly, as well as for understanding the local behavior of functions. 4 | 5 | ![taylor_series](https://github.com/user-attachments/assets/cba25294-b445-42c2-9a3e-15dfc80813cf) 6 | 7 | ### Mathematical Formulation 8 | 9 | Consider a function $f(x)$ that is infinitely differentiable at a point $a$. The Taylor series of $f(x)$ about the point $a$ is given by: 10 | 11 | $$f(x) = f(a) + f'(a)(x - a) + \frac{f''(a)}{2!}(x - a)^2 + \frac{f'''(a)}{3!}(x - a)^3 + \cdots$$ 12 | 13 | More compactly, we write: 14 | 15 | $$f(x) = \sum_{n=0}^{\infty} \frac{f^{(n)}(a)}{n!}(x - a)^n,$$ 16 | 17 | where: 18 | 19 | - $f^{(n)}(a)$ denotes the $n$-th derivative of $f$ evaluated at $x = a$. 20 | - $n!$ denotes the factorial of $n$. 21 | 22 | If the series converges to $f(x)$ for all $x$ in some interval around $a$, then the Taylor series provides an exact representation of the function in that interval. 23 | 24 | ### Practical Use 25 | 26 | The Taylor series is not only a theoretical construct. It has numerous practical applications: 27 | 28 | I. **Approximation**: 29 | 30 | Near the point $x = a$, the partial sums of the Taylor series (called Taylor polynomials) provide increasingly accurate approximations to $f(x)$. This is often used in numerical methods to approximate complicated functions with simpler polynomial expressions. 31 | 32 | II. **Analysis of Behavior**: 33 | 34 | By examining the derivatives at a single point, one can gain insights into the function's local behavior, such as growth rates, curvature, and pattern of change. 35 | 36 | III. **Computational Efficiency**: 37 | 38 | In contexts like numerical analysis, physics, and engineering, it may be easier or more efficient to use a truncated Taylor series for computations instead of evaluating a complex function directly. 39 | 40 | IV. **Series Solutions to Differential Equations**: 41 | 42 | Many differential equations can be solved (or approximated) by expressing their solutions as Taylor series expansions. 43 | 44 | ### Example 45 | 46 | **Taylor Series of $e^x$ at $a = 0$**: 47 | 48 | The exponential function $e^x$ has the unique property that all its derivatives are $e^x$ itself, and $e^0 = 1$. Thus: 49 | 50 | $$e^x = 1 + x + \frac{x^2}{2!} + \frac{x^3}{3!} + \cdots$$ 51 | 52 | This power series expansion converges for all real $x$, and even for complex $x$. Truncating the series after a few terms gives a good approximation of $e^x$ near $x = 0$. 53 | 54 | ### Advantages 55 | 56 | - Representing functions as **polynomials simplifies operations** like integration, differentiation, and approximation, making complex functions easier to work with mathematically. 57 | - Taylor series provide a **local approximation** of functions by incorporating derivatives of all orders at a single point, capturing details such as slope, curvature, and higher-order behaviors. 58 | - The method offers a **uniform approach** to handling a wide range of functions, including transcendental functions like $\sin x$, $\cos x$, and $e^x$, through polynomial representations. 59 | 60 | ### Limitations 61 | 62 | - The approximation has **local validity**, meaning it works best near the point $a$. Moving farther from $a$ can lead to reduced accuracy or even divergence. 63 | - Taylor series require **infinite differentiability** at the point $a$, limiting their applicability to functions that are not smooth or have points of non-differentiability. 64 | - Even for infinitely differentiable functions, **convergence issues** can arise, with some Taylor series converging only within a certain radius or failing to match the function outside that radius, affecting their global accuracy. 65 | -------------------------------------------------------------------------------- /notes/4_integration/resources/simpsons_method.tex: -------------------------------------------------------------------------------- 1 | \documentclass{article} 2 | 3 | \usepackage{pgfplots} %http://www.ctan.org/pkg/pgfplots 4 | \pgfplotsset{compat=newest, set layers=standard} 5 | 6 | \usepgfplotslibrary{fillbetween} 7 | \usetikzlibrary{intersections} 8 | 9 | \title{Simpson's Method} 10 | \begin{document} 11 | 12 | \begin{tikzpicture} 13 | \begin{axis}[axis lines=center, ytick=\empty, 14 | ymax=1.3, ymin=0, xmax=2.6,xmin=0, 15 | xtick={0.3,1.3,2.3}, xticklabels={$a$,$\frac{a+b}{2}$,$b$}, 16 | axis line style={-}, xlabel=$x$, ylabel=$y$] 17 | \draw[name path=A, purple] (2.3,1.2) parabola (0.3,0.5); 18 | \node[] at (1.6,1.2){$y=f(x)$}; 19 | \path[name path=B] (\pgfkeysvalueof{/pgfplots/xmin},0) 20 | --(\pgfkeysvalueof{/pgfplots/xmax},0); 21 | \addplot[gray!50] fill between[of=A and B,soft clip={domain=0.3:2.3}]; 22 | \draw[name path=C] (1.3,0) -- (1.3,1.025); 23 | \path[name intersections={of=A and C}] coordinate (midpoint) at (intersection-1); 24 | \draw (0.3,0.5) ..controls ++(0.1,0.3) and ++(-0.2,-0.05) .. (midpoint) 25 | node[pos=0.5,above left]{$y=p_2(x)$} 26 | ..controls ++(0.2,0.05) and ++(-0.1,-0.05) .. (2.3,1.2); 27 | \end{axis} 28 | \end{tikzpicture} 29 | 30 | \end{document} 31 | -------------------------------------------------------------------------------- /notes/4_integration/resources/trapeziod_method.tex: -------------------------------------------------------------------------------- 1 | \documentclass{article} 2 | 3 | \usepackage{pgfplots} %http://www.ctan.org/pkg/pgfplots 4 | \pgfplotsset{compat=newest, set layers=standard} 5 | 6 | \usepgfplotslibrary{fillbetween} 7 | \usetikzlibrary{intersections} 8 | 9 | \title{Trapeziod method} 10 | \begin{document} 11 | 12 | \pgfplotsset{ 13 | integral axis/.style={ 14 | axis lines=middle, 15 | enlarge y limits=upper, 16 | axis equal image, width=12cm, 17 | xlabel=$x$, ylabel=$y$, 18 | ytick=\empty, 19 | xticklabel style={font=\small, text height=1.5ex, anchor=north}, 20 | samples=100 21 | }, 22 | integral/.style={ 23 | domain=2:10, 24 | samples=9 25 | }, 26 | integral fill/.style={ 27 | integral, 28 | draw=none, fill=#1, 29 | on layer=axis background 30 | }, 31 | integral fill/.default=cyan!10, 32 | integral line/.style={ 33 | integral, 34 | very thick, 35 | draw=#1 36 | }, 37 | integral line/.default=black 38 | } 39 | 40 | 41 | \begin{tikzpicture}[ 42 | % The function that is used for all the plots 43 | declare function={f=x/5-cos(deg(x*1.85))/2+2;} 44 | ] 45 | \begin{axis}[ 46 | integral axis, 47 | ymin=0, 48 | xmin=0.75, xmax=11.25, 49 | domain=1.5:10.5, 50 | xtick={2,...,10}, 51 | xticklabels={$a=x_0$, $x_1$,,,$x_{j-1}$,$x_j$,,$x_{n-1}$,$b=x_n$}, 52 | ] 53 | % The function 54 | \addplot [very thick, cyan!75!blue] {f} node [anchor=south] {$y=f(x)$}; 55 | 56 | % The filled area under the approximate integral 57 | \addplot [integral fill=cyan!15] {f} \closedcycle; 58 | 59 | % The approximate integral 60 | \addplot [integral line=black] {f}; 61 | 62 | % The vertical lines between the segments 63 | \addplot [integral, ycomb] {f}; 64 | 65 | % The highlighted segment 66 | \addplot [integral fill=cyan!35, domain=6:7, samples=2] {f} \closedcycle; 67 | \end{axis} 68 | \end{tikzpicture} 69 | 70 | \end{document} 71 | -------------------------------------------------------------------------------- /notes/5_matrices/eigenvalues_and_eigenvectors.md: -------------------------------------------------------------------------------- 1 | ## Eigenvalues and Eigenvectors 2 | 3 | Eigenvalues and eigenvectors are foundational concepts in linear algebra, with extensive applications across various domains such as physics, computer graphics, and machine learning. These concepts are instrumental in decomposing complex matrix transformations, thereby simplifying numerical computations. 4 | 5 | ### Definitions 6 | 7 | An **eigenvector** of a square matrix $A$ is a non-zero vector $v$ that, when multiplied by $A$, results in a scaled version of $v$. The scalar factor is the **eigenvalue** corresponding to that eigenvector. In mathematical terms, this relationship is described as: 8 | 9 | $$ 10 | A v = \lambda v 11 | $$ 12 | 13 | where: 14 | 15 | - $A$ is a square matrix, 16 | - $v$ is an eigenvector of $A$, 17 | - $\lambda$ is the corresponding eigenvalue. 18 | 19 | ### Procedure for Finding Eigenvalues and Eigenvectors 20 | 21 | 1. **Eigenvalues**: Eigenvalues are calculated by solving the characteristic equation, formulated as $det(A - \lambda I) = 0$. Here, $I$ is the identity matrix of the same dimension as $A$, and $det(\cdot)$ denotes the determinant. The roots of this equation yield the eigenvalues. 22 | 23 | 2. **Eigenvectors**: Upon finding each eigenvalue, its corresponding eigenvectors are obtained by substituting the eigenvalue into the equation $(A - \lambda I)v = 0$, followed by computing the null space. 24 | 25 | ### Example 26 | 27 | Consider a 2x2 matrix: 28 | 29 | $$A = \begin{bmatrix} 4 & 1 \\ 30 | 2 & 3 \\ \end{bmatrix}$$ 31 | 32 | 1. Solve the characteristic equation $det(A - \lambda I) = 0$, which gives $\lambda^2 - 7\lambda + 10 = 0$. The roots of this equation are $\lambda_1 = 2$ and $\lambda_2 = 5$, representing the eigenvalues. 33 | 34 | 2. To find the corresponding eigenvectors: 35 | - For $\lambda_1 = 2$, solve the equation $(A - 2I)v = 0$, which yields the eigenvector $v_1 = [1, -2]$. 36 | - For $\lambda_2 = 5$, solve the equation $(A - 5I)v = 0$, which yields the eigenvector $v_2 = [1, 1]$. 37 | 38 | ### Applications 39 | 40 | - Eigenvalues and eigenvectors are used in PCA for dimensionality reduction, enabling data to be represented in fewer dimensions without significant loss of information. 41 | - They are employed in solving systems of linear differential equations, providing a simplified approach to understanding dynamic systems. 42 | - In quantum mechanics, eigenvalues and eigenvectors of operators correspond to physically measurable quantities and their associated states, respectively. 43 | 44 | ### Limitations 45 | 46 | - Eigenvalues and eigenvectors do not exist for all matrices. Specifically, non-square matrices lack these properties. 47 | - For large matrices, the computation of eigenvalues and eigenvectors can be computationally demanding, potentially slowing down analyses or algorithm implementations. 48 | -------------------------------------------------------------------------------- /notes/6_regression/linear_interpolation.md: -------------------------------------------------------------------------------- 1 | ## Linear interpolation 2 | 3 | Linear interpolation is one of the most basic and commonly used interpolation methods. The idea is to approximate the value of a function between two known data points by assuming that the function behaves linearly (like a straight line) between these points. Although this assumption may be simplistic, it often provides a reasonable approximation, especially when the data points are close together or the underlying function is relatively smooth. 4 | 5 | **Conceptual Illustration**: 6 | 7 | Imagine you have two points on a graph: 8 | 9 | ![Example Illustration](https://user-images.githubusercontent.com/37275728/188960814-569c5a91-82b4-415c-9840-f5ebd4cc421d.png) 10 | 11 | Linear interpolation draws a straight line between the two known data points $(x_i,y_i)$ and $(x_{i+1},y_{i+1})$, and then estimates the value at $x$ by following this line. 12 | 13 | ### Mathematical Formulation 14 | 15 | Given two known data points $(x_i, y_i)$ and $(x_{i+1}, y_{i+1})$, and a target $x$-value with $x_i \leq x \leq x_{i+1}$, the line connecting these points has a slope $\alpha$ given by: 16 | 17 | $$\alpha = \frac{y_{i+1} - y_i}{x_{i+1} - x_i}.$$ 18 | 19 | To find the interpolated value $y$ at $x$, start from $y_i$ and move along the line for the interval $(x - x_i)$: 20 | 21 | $$y = y_i + \alpha (x - x_i).$$ 22 | 23 | Substituting $\alpha$: 24 | 25 | $$y = y_i + (x - x_i) \frac{y_{i+1} - y_i}{x_{i+1} - x_i}.$$ 26 | 27 | This formula provides the interpolated $y$-value directly. 28 | 29 | ### Derivation 30 | 31 | ![Derivation Illustration](https://user-images.githubusercontent.com/37275728/188960726-ac99ac89-f1b8-4b82-9761-5093cb91d4db.png) 32 | 33 | I. **Slope Calculation:** 34 | 35 | The slope $\alpha$ of the line passing through $(x_i, y_i)$ and $(x_{i+1}, y_{i+1})$ is: 36 | 37 | $$\alpha = \frac{y_{i+1} - y_i}{x_{i+1}-x_i}.$$ 38 | 39 | II. **Linear Equation:** 40 | 41 | A line passing through $(x_i, y_i)$ with slope $\alpha$ is: 42 | 43 | $$y - y_i = \alpha (x - x_i).$$ 44 | 45 | III. **Substitution:** 46 | 47 | Replace $\alpha$ with its expression: 48 | 49 | $$y - y_i = \frac{y_{i+1} - y_i}{x_{i+1}-x_i} (x - x_i).$$ 50 | 51 | IV. **Final Formula:** 52 | 53 | Simplifying: 54 | 55 | $$y = y_i + \frac{(y_{i+1} - y_i)}{x_{i+1}-x_i} (x - x_i).$$ 56 | 57 | ### Algorithm Steps 58 | 59 | I. Identify the interval $[x_i, x_{i+1}]$ that contains the target $x$. 60 | 61 | II. Compute the slope: 62 | 63 | $$\frac{y_{i+1} - y_i}{x_{i+1}-x_i}.$$ 64 | 65 | III. Substitute into the linear interpolation formula: 66 | 67 | $$y = y_i + \frac{(y_{i+1} - y_i)}{x_{i+1}-x_i} (x - x_i).$$ 68 | 69 | The result is the interpolated value $y$ at the desired $x$. 70 | 71 | ### Example 72 | 73 | **Given Points**: $A(-2,0)$ and $B(2,2)$. Suppose we want to find $y$ at $x=1$. 74 | 75 | I. Compute the slope: 76 | 77 | $$\alpha = \frac{2 - 0}{2 - (-2)} = \frac{2}{4} = 0.5.$$ 78 | 79 | II. Substitute $x=1$: 80 | 81 | $$y = 0 + 0.5 (1 - (-2)) = 0.5 \times 3 = 1.5.$$ 82 | 83 | So, the line passing through $(-2,0)$ and $(2,2)$ gives $y=1.5$ when $x=1$. 84 | 85 | ### Advantages 86 | 87 | - The method offers **simplicity**, as the calculation involves straightforward arithmetic, making it easy and quick to apply. 88 | - **Minimal data requirements** make it practical, needing only two data points to estimate intermediate values. 89 | - It provides a **local approximation**, working well when the function is nearly linear within the specified interval. 90 | 91 | ### Limitations 92 | 93 | - The **linear assumption** can lead to poor results if the actual relationship between points is not close to linear. 94 | - Linear interpolation uses **no derivative information**, ignoring the slope or curvature of the function, which could enhance accuracy. 95 | - **Accuracy diminishes** as the interval between points increases or as the function becomes more non-linear, leading to potential errors in approximation. 96 | -------------------------------------------------------------------------------- /notes/6_regression/resources/cubic_spline.tex: -------------------------------------------------------------------------------- 1 | \documentclass{article} 2 | \usepackage{graphicx} 3 | 4 | \usepackage{pgfplots} 5 | \usepackage{tikz} 6 | \pgfplotsset{every axis legend/.append style={ 7 | at={(0,0)}, 8 | anchor=north east}} 9 | \usetikzlibrary{shapes,positioning,intersections,quotes} 10 | 11 | \definecolor{darkgreen}{rgb}{0.0, 0.6, 0.0} 12 | \definecolor{darkred}{rgb}{0.7, 0.0, 0.0} 13 | 14 | \title{Simpson's Method} 15 | \begin{document} 16 | \begin{tikzpicture} 17 | \begin{axis}[ 18 | axis x line=middle, 19 | axis y line=middle, 20 | width=13cm, height=13cm, % size of the image 21 | grid = none, 22 | grid style={dashed, gray!0}, 23 | %xmode=log,log basis x=10, 24 | %ymode=log,log basis y=10, 25 | xmin=-2, % start the diagram at this x-coordinate 26 | xmax= 4, % end the diagram at this x-coordinate 27 | ymin=-7, % start the diagram at this y-coordinate 28 | ymax= 7, % end the diagram at this y-coordinate 29 | %/pgfplots/xtick={0,1,...,60}, % make steps of length 5 30 | %extra x ticks={23}, 31 | %extra y ticks={0.507297}, 32 | axis background/.style={fill=white}, 33 | ylabel=y, 34 | xlabel=x, 35 | %xticklabels={,,}, 36 | %yticklabels={,,}, 37 | tick align=outside, 38 | tension=0.08] 39 | % plot the stirling-formulae 40 | \addplot[name path global=a, domain=-2:4, blue, thick,samples=500] {-x*x*x + 4*x*x-x-4}; 41 | \fill[red] (211, 39.3) circle (3pt); 42 | \fill[red] (455, 108.8) circle (3pt); 43 | \node[above right=0pt of {(211, 39.3)}, outer sep=2pt,fill=none] {$y_1$}; 44 | \node[above right=0pt of {(455, 108.8)}, outer sep=2pt,fill=none] {$y_2$}; 45 | \node[above right=0pt of {(250, 98.8)}, outer sep=2pt,fill=none] {$S_1(x)$}; 46 | \draw [-stealth](280,98) -- (350,82 47 | ); 48 | \end{axis} 49 | \end{tikzpicture} 50 | 51 | \end{document} 52 | -------------------------------------------------------------------------------- /notes/6_regression/resources/curve_fitting.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import matplotlib.pyplot as plt 3 | from scipy.optimize import curve_fit 4 | 5 | # define the true function 6 | def f(x): 7 | return 2.5 * x + 1.3 8 | 9 | # generate points using above function 10 | np.random.seed(0) # for reproducibility 11 | n_points = 10 12 | X = np.linspace(0, 1, n_points) 13 | y = f(X) + np.random.normal(0, 0.3, size=n_points) # add some noise 14 | 15 | # fit a linear function to the data 16 | def fit_func(x, a, b): 17 | return a * x + b 18 | 19 | params, _ = curve_fit(fit_func, X, y) 20 | 21 | # generate y-values for the best fit line 22 | y_fit = params[0] * X + params[1] 23 | 24 | plt.figure(figsize=(8, 6)) 25 | 26 | # plot the data points 27 | plt.scatter(X, y, label='Data points') 28 | 29 | # plot the best fit line 30 | plt.plot(X, y_fit, 'r-', label='Best fit: a=%5.3f, b=%5.3f' % tuple(params)) 31 | 32 | # plot vertical lines to show errors 33 | for xi, yi, y_fit_i in zip(X, y, y_fit): 34 | plt.plot([xi, xi], [yi, y_fit_i], color='gray', linestyle='--') 35 | 36 | plt.xlabel('X') 37 | plt.ylabel('y') 38 | plt.title('Curve fitting') 39 | plt.legend() 40 | plt.show() 41 | -------------------------------------------------------------------------------- /notes/6_regression/resources/lagrange_polynomial_interpolation.tex: -------------------------------------------------------------------------------- 1 | \documentclass{article} 2 | \usepackage{graphicx} 3 | 4 | \usepackage{pgfplots} 5 | \usepackage{tikz} 6 | \pgfplotsset{every axis legend/.append style={ 7 | at={(0,0)}, 8 | anchor=north east}} 9 | \usetikzlibrary{shapes,positioning,intersections,quotes} 10 | 11 | \definecolor{darkgreen}{rgb}{0.0, 0.6, 0.0} 12 | \definecolor{darkred}{rgb}{0.7, 0.0, 0.0} 13 | 14 | \title{Simpson's Method} 15 | \begin{document} 16 | 17 | \begin{tikzpicture} 18 | \begin{axis}[ 19 | axis x line=middle, 20 | axis y line=middle, 21 | width=10cm, 22 | height=10cm, 23 | xmin=-5, % start the diagram at this x-coordinate 24 | xmax= 6, % end the diagram at this x-coordinate 25 | ymin= -1, % start the diagram at this y-coordinate 26 | ymax= 8, % end the diagram at this y-coordinate 27 | xlabel=$x$, 28 | ylabel=$y$, 29 | legend cell align=left, 30 | legend pos=north east, 31 | legend style={draw=none}, 32 | tick align=outside, 33 | enlargelimits=false, 34 | xtick distance=1, 35 | ytick distance=1] 36 | 37 | % plot the function 38 | \addplot[domain=-5:10, blue, ultra thick,samples=500] {1/3*(x^2 + x + 3)}; 39 | 40 | \fill[red] (400, 20) circle (3pt); 41 | \fill[red] (700, 40) circle (3pt); 42 | \fill[red] (800, 60) circle (3pt); 43 | 44 | \node[above right=0pt of {(340, 13)}, outer sep=2pt,fill=none] {A}; 45 | \node[above right=0pt of {(630, 40)}, outer sep=2pt,fill=none] {B}; 46 | \node[above right=0pt of {(730, 60)}, outer sep=2pt,fill=none] {C}; 47 | 48 | \legend{$\frac{1}{3} x^2 + \frac{1}{3} x + 1$} 49 | 50 | \end{axis} 51 | \end{tikzpicture} 52 | 53 | \end{document} 54 | -------------------------------------------------------------------------------- /notes/6_regression/resources/linear_interpolation_derivation.tex: -------------------------------------------------------------------------------- 1 | \documentclass{article} 2 | \usepackage{graphicx} 3 | \usepackage{nicefrac} 4 | \usepackage{pgfplots} 5 | \usepackage{tikz} 6 | \pgfplotsset{every axis legend/.append style={ 7 | at={(0,0)}, 8 | anchor=north east}} 9 | \usetikzlibrary{shapes,positioning,intersections,quotes} 10 | 11 | \definecolor{darkgreen}{rgb}{0.0, 0.6, 0.0} 12 | \definecolor{darkred}{rgb}{0.7, 0.0, 0.0} 13 | 14 | \title{Linear interpolation} 15 | \begin{document} 16 | 17 | \begin{tikzpicture} 18 | 19 | \draw [dashed] (-3, -1.9) -- (3, -1.9); 20 | \draw [dashed] (3, -5) -- (3, -1.9); 21 | \draw [-stealth](-3,-5) -- (9,-5); 22 | \draw [-stealth](-3,-5) -- (-3,1); 23 | 24 | \draw [stealth-stealth, blue](0,-3) -- (8,0); 25 | \draw [stealth-stealth, red](8,0) -- (8,-3); 26 | \draw [stealth-stealth, green](0,-3) -- (8,-3); 27 | \draw [stealth-stealth, black](0,-3.1) -- (3,-3.1); 28 | 29 | \node[above right=0pt of {(8, 0)}, outer sep=2pt,fill=none] {$(x_2, y_2)$}; 30 | \node[above right=0pt of {(8, -1.8)}, outer sep=2pt,fill=none, darkred] {$y_2 - y_1$}; 31 | \node[above right=0pt of {(4.5, -3.6)}, outer sep=2pt,fill=none, darkgreen] {$x_2 - x_1$}; 32 | \node[above right=0pt of {(1, -3.6)}, outer sep=2pt,fill=none] {$x - x_1$}; 33 | \node[above right=0pt of {(-1,-3.7)}, outer sep=2pt,fill=none] {$(x_2, y_2)$}; 34 | \node[above right=0pt of {(-1,-1.7)}, outer sep=2pt,fill=none] {$y$}; 35 | \node[above right=0pt of {(3,-4.2)}, outer sep=2pt,fill=none] {$x$}; 36 | \node[above right=0pt of {(3,-2.7)}, outer sep=2pt,fill=none] {$h$}; 37 | 38 | \end{tikzpicture} 39 | \end{document} 40 | -------------------------------------------------------------------------------- /notes/6_regression/resources/linear_interpolation_examples.tex: -------------------------------------------------------------------------------- 1 | \documentclass{article} 2 | \usepackage{graphicx} 3 | \usepackage{nicefrac} 4 | \usepackage{pgfplots} 5 | \usepackage{tikz} 6 | \pgfplotsset{every axis legend/.append style={ 7 | at={(0,0)}, 8 | anchor=north east}} 9 | \usetikzlibrary{shapes,positioning,intersections,quotes} 10 | 11 | \definecolor{darkgreen}{rgb}{0.0, 0.6, 0.0} 12 | \definecolor{darkred}{rgb}{0.7, 0.0, 0.0} 13 | 14 | \title{Simpson's Method} 15 | \begin{document} 16 | 17 | \begin{tikzpicture} 18 | \begin{axis}[ 19 | axis x line=middle, 20 | axis y line=middle, 21 | width=8cm, 22 | height=8cm, 23 | xmin=-5, % start the diagram at this x-coordinate 24 | xmax= 5, % end the diagram at this x-coordinate 25 | ymin=-5, % start the diagram at this y-coordinate 26 | ymax= 5, % end the diagram at this y-coordinate 27 | xlabel=$x$, 28 | ylabel=$y$, 29 | legend cell align=left, 30 | legend pos=south east, 31 | legend style={draw=none}, 32 | tick align=outside, 33 | enlargelimits=false] 34 | % plot the function 35 | \addplot[domain=-5:5, blue, ultra thick,samples=500] {0.5*x + 1}; 36 | \fill[red] (700,700) circle (3pt); 37 | \fill[red] (300, 500) circle (3pt); 38 | \draw [dashed] (500, 700) -- (680, 700); 39 | \draw [dashed] (700, 500) -- (700, 680); 40 | \node[above right=0pt of {(255,510)}, outer sep=2pt,fill=none] {A}; 41 | \node[above right=0pt of {(655,710)}, outer sep=2pt,fill=none] {B}; 42 | \legend{$\nicefrac{1}{2} \cdot x$ + 1} 43 | \end{axis} 44 | \end{tikzpicture} 45 | 46 | 47 | \end{document} 48 | -------------------------------------------------------------------------------- /notes/7_ordinary_differential_equations/eulers_method.md: -------------------------------------------------------------------------------- 1 | ## Euler's Method 2 | 3 | Euler's Method is a numerical technique applied in the realm of initial value problems for ordinary differential equations (ODEs). The simplicity of this method makes it a popular choice in cases where the differential equation lacks a closed-form solution. The method might not always provide the most accurate result, but it offers a good trade-off between simplicity and accuracy. 4 | 5 | ### Mathematical Formulation 6 | 7 | Consider an initial value problem (IVP) represented as: 8 | 9 | $$u' = f(t, u),$$ 10 | $$u(t_0) = u_0.$$ 11 | 12 | This IVP can be solved by Euler's method, where the method employs the following approximation: 13 | 14 | $$u_{n+1} = u_n + h*f(t_n, u_n),$$ 15 | 16 | where: 17 | - $u_{n+1}$ is the approximate value of $u$ at $t = t_n + h$, 18 | - $u_n$ is the approximate value of $u$ at $t = t_n$, 19 | - $h$ is the step size, 20 | - $f(t_n, u_n)$ is the derivative of $u$ at $t = t_n$. 21 | 22 | ### Derivation 23 | 24 | Let's start with the Taylor series: 25 | 26 | $$ u(t+h)=u(t)+h u'(t) + O(h^2) $$ 27 | 28 | We may alternatively rewrite the above equation as follows: 29 | 30 | $$ u(t+h)=u(t)+ h f(u(t),t)+ O(h^2).$$ 31 | 32 | Which is roughly equivalent to: 33 | 34 | $$ u(t+h)=u(t)+ h f(u(t),t)$$ 35 | 36 | ### Algorithm Steps 37 | 38 | 1. Start with initial conditions $t_0$ and $u_0$. 39 | 2. Calculate $u_{n+1}$ using the formula: $u_{n+1} = u_n + h*f(t_n, u_n)$. 40 | 3. Repeat the above step for a given number of steps or until the final value of $t$ is reached. 41 | 42 | ### Example 43 | 44 | $$ u'(t)=u(t),$$ 45 | 46 | $$ u(0)=1$$ 47 | 48 | $$u(0.1)=?$$ 49 | 50 | Let's choose the step value: $h = 0.05$ 51 | 52 | We start at $t=0$: 53 | 54 | $$ u(0.05) \approx u(0)+0.05u'(0) $$ 55 | 56 | $$ u(0.05) \approx1+0.05u(0) $$ 57 | 58 | $$ u(0.05) \approx1+0.05 \cdot 1 $$ 59 | 60 | $$ u(0.05) \approx 1.05 $$ 61 | 62 | Now that we know $u(0.05)$, we can calculate the second step: 63 | 64 | $$ u(0.1) \approx u(0.05)+0.05u'(0.05) $$ 65 | 66 | $$ u(0.1) \approx1.05+0.05u(0.05) $$ 67 | 68 | $$ u(0.1) \approx1.05+0.05 \cdot 1.05 $$ 69 | 70 | $$ u(0.1) \approx 1.1025 $$ 71 | 72 | ### Advantages 73 | 74 | - Euler's method is **easy** to implement and serves as a foundational technique for introducing numerical solution methods for ODEs. 75 | - It can provide **reasonable approximations** for well-behaved functions when the step size is sufficiently small. 76 | - The simplicity of the method makes it computationally inexpensive for basic problems and suitable for initial experimentation. 77 | 78 | ### Limitations 79 | 80 | - **Accuracy** is limited, as the method can introduce significant errors if the step size is too large or if the function has rapid changes. 81 | - The **cumulative error** from each step can grow significantly, making the method unsuitable for long-time integration. 82 | - **Stability** is an issue, as Euler's method may fail to converge or produce reliable results for stiff or oscillatory ODEs. 83 | - The method lacks the ability to adapt step sizes dynamically, which can lead to inefficiencies or inaccuracies in varying conditions. 84 | -------------------------------------------------------------------------------- /notes/7_ordinary_differential_equations/heuns_method.md: -------------------------------------------------------------------------------- 1 | ## Heun's Method 2 | 3 | Heun's method is an improved version of Euler's method that enhances accuracy by using an average of the slope at the beginning and the predicted slope at the end of the interval. 4 | 5 | ### Mathematical Formulation 6 | 7 | Assuming a first order differential equation: 8 | 9 | $$ \frac{du}{dt} = f(t, u), $$ 10 | 11 | given $u(t_0) = u_0$ and a step size $h$, Heun's method predicts the solution at time $(t_0 + h)$ as follows: 12 | 13 | 1. **Euler's step (predictor step)**: Predict the value at $t = t_0 + h$ using Euler's method: 14 | 15 | $$ \tilde{u}_{n+1} = u_n + h f(t_n, u_n), $$ 16 | 17 | 2. **Heun's step (corrector step)**: Correct this prediction by taking an average of the slopes at the beginning and end of the interval: 18 | 19 | $$ u_{n+1} = u_n + \frac{h}{2} [f(t_n, u_n) + f(t_{n} + h, \tilde{u}_{n+1})]. $$ 20 | 21 | This process is repeated for each point in the desired interval. 22 | 23 | ### Derivation 24 | 25 | The second-order Taylor series expansion around $t$ is given by: 26 | 27 | $$ u(t + h) = u(t) + h u'(t) + \frac{1}{2}(h)^2 u''(t) + O(h^3), $$ 28 | 29 | where we approximate $u''(t)$ by the first difference of $u'(t)$: 30 | 31 | $$ u''(t) \approx \frac{u'(t+h) - u'(t)}{h} = \frac{f(t + h, u(t + h)) - f(t, u(t))}{h}. $$ 32 | 33 | Substituting this approximation into the Taylor series, we get: 34 | 35 | $$ u(t + h) = u(t) + h u'(t) + \frac{1}{2}(h)^2 \frac{f(t + h, u(t + h)) - f(t, u(t))}{h} + O(h^3), $$ 36 | 37 | and approximating $u(t + h)$ by removing the higher order term yields Heun's method: 38 | 39 | $$ u(t + h) \approx u(t) + \frac{h}{2} [f(t, u(t)) + f(t + h, \tilde{u}_{n+1})]. $$ 40 | 41 | ### Algorithm Steps 42 | 43 | 1. Begin with initial conditions $u_0$ and $t_0$. 44 | 2. Compute $\tilde{u}_{n+1}$ using the predictor step. 45 | 3. Calculate $u_{n+1}$ using the corrector step. 46 | 4. Repeat steps 2-3 for all points in the desired interval. 47 | 48 | ### Example 49 | 50 | Consider the differential equation 51 | 52 | $$ u'(t) = u(t), $$ 53 | 54 | with the initial condition $u(0) = 1$. We want to estimate the value of $u$ at $t = 0.1$ using Heun's method with a step size of $h = 0.05$. 55 | 56 | 1. We start at $t = 0$ with $u(0) = 1$. 57 | 58 | First, we calculate the Euler's step (predictor): 59 | 60 | $$ \tilde{u} = u(0) + h \cdot f(t, u(0)) = 1 + 0.05 \cdot 1 = 1.05. $$ 61 | 62 | Then, we correct this estimation: 63 | 64 | $$ u(0.05) \approx u(0) + \frac{h}{2} [f(t, u(0)) + f(t + h, \tilde{u}_{n+1})] = 1 + \frac{0.05}{2} [1 + 1.05] = 1.05125. $$ 65 | 66 | 2. Now, we have $u(0.05)$, we move on to $t = 0.1$. 67 | 68 | Similarly, we calculate the Euler's step: 69 | 70 | $$ \tilde{u} = u(0.05) + h \cdot f(t, u(0.05)) = 1.05125 + 0.05 \cdot 1.05125 = 1.1025625. $$ 71 | 72 | Then, we correct this estimation: 73 | 74 | $$ u(0.1) \approx u(0.05) + \frac{h}{2} [f(t, u(0.05)) + f(t + h, \tilde{u}_{n+1})] = 1.05125 + \frac{0.05}{2} [1.05125 + 1.1025625] = 1.105158203125. $$ 75 | 76 | So, the approximate solution to $u(0.1)$ with Heun's method is $1.105158203125$. 77 | 78 | ### Advantages 79 | 80 | - Heun's method is **simple** to implement and easy to understand, making it accessible for introductory numerical analysis. 81 | - It often provides a more **accurate approximation** than Euler's method by incorporating a correction step based on the trapezoidal rule. 82 | - The method offers a good balance between **computational simplicity** and improved accuracy for many non-stiff problems. 83 | 84 | ### Limitations 85 | 86 | - While more accurate than Euler’s method, Heun's method can still introduce **significant errors** when using large step sizes or dealing with highly nonlinear functions. 87 | - Like other explicit methods, it is not well-suited for **stiff systems**, where implicit methods are typically more effective. 88 | - The need to evaluate $f(t, u)$ at multiple points in each step increases the **computational effort** compared to simpler methods like Euler’s. 89 | - The accuracy of Heun’s method is still limited for **complex dynamics**, requiring smaller step sizes to achieve desired precision, which may increase computational cost. 90 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | cycler>=0.11.0 2 | joblib>=1.2.0 3 | kiwisolver>=1.3.2 4 | matplotlib>=3.5.0 5 | numpy>=1.23.0 6 | Pillow>=10.3.0 7 | pyparsing>=3.0.0 8 | python-dateutil>=2.8.1 9 | scikit-learn>=1.2.0 10 | scipy>=1.7.0 11 | six>=1.16.0 12 | threadpoolctl>=3.0.0 13 | pytest>=7.0.0 # Added pytest for running tests 14 | -------------------------------------------------------------------------------- /src/1_root_and_extrema_finding/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/djeada/Numerical-Methods/68c5a1886c59877f77dd9a266b5f5387bb82f677/src/1_root_and_extrema_finding/__init__.py -------------------------------------------------------------------------------- /src/1_root_and_extrema_finding/bisection_search/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/djeada/Numerical-Methods/68c5a1886c59877f77dd9a266b5f5387bb82f677/src/1_root_and_extrema_finding/bisection_search/__init__.py -------------------------------------------------------------------------------- /src/1_root_and_extrema_finding/bisection_search/examples/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/djeada/Numerical-Methods/68c5a1886c59877f77dd9a266b5f5387bb82f677/src/1_root_and_extrema_finding/bisection_search/examples/__init__.py -------------------------------------------------------------------------------- /src/1_root_and_extrema_finding/bisection_search/examples/plot.py: -------------------------------------------------------------------------------- 1 | import matplotlib.pyplot as plt 2 | import numpy as np 3 | 4 | 5 | def f(x): 6 | """Example function: f(x) = x^2 - 4""" 7 | return x ** 2 - 4 8 | 9 | 10 | def bisection_method_visualize(func, a, b, tol=1e-6, max_iterations=100): 11 | """ 12 | Visualizes the initial and final steps of the bisection method for root-finding on a single plot. 13 | 14 | Parameters: 15 | func (function): The function to find the root of. 16 | a (float): Initial lower bound of the interval. 17 | b (float): Initial upper bound of the interval. 18 | tol (float): Tolerance for stopping condition. 19 | max_iterations (int): Maximum number of iterations allowed. 20 | """ 21 | # Store initial a and b for plotting 22 | a_initial, b_initial = a, b 23 | 24 | # List to store intervals for visualization 25 | intervals = [(a, b)] 26 | iteration = 0 27 | 28 | while (b - a > tol) and (iteration < max_iterations): 29 | c = (a + b) / 2.0 # Midpoint 30 | if func(a) * func(c) < 0: 31 | b = c # Root is in [a, c] 32 | else: 33 | a = c # Root is in [c, b] 34 | intervals.append((a, b)) 35 | iteration += 1 36 | 37 | # Final a and b 38 | a_final, b_final = a, b 39 | 40 | # Generate x values for plotting 41 | x = np.linspace(a_initial - 1, b_initial + 1, 400) 42 | y = func(x) 43 | 44 | # Plot initial and final intervals on the same plot 45 | plt.figure(figsize=(8, 6)) 46 | plt.plot(x, y, label="$f(x) = x^2 - 4$", color="blue") 47 | plt.axhline(0, color="black", linewidth=0.5) 48 | 49 | # Initial step 50 | plt.scatter( 51 | [a_initial, b_initial], 52 | [func(a_initial), func(b_initial)], 53 | color="red", 54 | label="Initial Brackets [a, b]", 55 | ) 56 | c_initial = (a_initial + b_initial) / 2.0 57 | plt.scatter(c_initial, func(c_initial), color="green", label="Initial Midpoint c") 58 | 59 | # Final step 60 | plt.scatter( 61 | [a_final, b_final], 62 | [func(a_final), func(b_final)], 63 | color="purple", 64 | label="Final Brackets [a, b]", 65 | ) 66 | c_final = (a_final + b_final) / 2.0 67 | plt.scatter(c_final, func(c_final), color="orange", label="Final Midpoint c") 68 | 69 | plt.title("Bisection Method: Initial and Final Steps") 70 | plt.xlabel("x") 71 | plt.ylabel("f(x)") 72 | plt.legend() 73 | plt.grid(True) 74 | plt.show() 75 | 76 | 77 | # Initial conditions for bisection method 78 | a = 0 79 | b = 5 80 | bisection_method_visualize(f, a, b) 81 | -------------------------------------------------------------------------------- /src/1_root_and_extrema_finding/bisection_search/implementation/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/djeada/Numerical-Methods/68c5a1886c59877f77dd9a266b5f5387bb82f677/src/1_root_and_extrema_finding/bisection_search/implementation/__init__.py -------------------------------------------------------------------------------- /src/1_root_and_extrema_finding/bisection_search/implementation/bisection_search.py: -------------------------------------------------------------------------------- 1 | # bisection_search.py 2 | import numpy as np 3 | from typing import Callable 4 | 5 | 6 | def bisection_search( 7 | f: Callable[[float], float], 8 | a: float, 9 | b: float, 10 | tol: float = 1e-8, 11 | max_iterations: int = 1000, 12 | ) -> float: 13 | fa: float = f(a) 14 | fb: float = f(b) 15 | 16 | # Check if either endpoint is a root 17 | if np.abs(fa) < tol: 18 | return a 19 | if np.abs(fb) < tol: 20 | return b 21 | 22 | if fa * fb > 0: 23 | raise ValueError("Function must have opposite signs at endpoints a and b.") 24 | 25 | for _ in range(max_iterations): 26 | c: float = (a + b) / 2 27 | fc: float = f(c) 28 | 29 | if np.abs(fc) < tol or (b - a) / 2 < tol: 30 | return c 31 | 32 | if fa * fc < 0: 33 | b, fb = c, fc 34 | else: 35 | a, fa = c, fc 36 | 37 | raise ValueError( 38 | "Bisection method did not converge within the maximum number of iterations." 39 | ) 40 | -------------------------------------------------------------------------------- /src/1_root_and_extrema_finding/bisection_search/tests/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/djeada/Numerical-Methods/68c5a1886c59877f77dd9a266b5f5387bb82f677/src/1_root_and_extrema_finding/bisection_search/tests/__init__.py -------------------------------------------------------------------------------- /src/1_root_and_extrema_finding/bisection_search/tests/test_bisection_search.py: -------------------------------------------------------------------------------- 1 | # test_bisection_search.py 2 | import pytest 3 | import numpy as np 4 | from ..implementation.bisection_search import bisection_search 5 | 6 | 7 | def test_bisection_linear(): 8 | f = lambda x: 2 * x - 4 9 | a, b = 0, 5 10 | root = bisection_search(f, a, b) 11 | expected = 2.0 12 | assert np.isclose(root, expected) 13 | 14 | 15 | def test_bisection_quadratic(): 16 | f = lambda x: x ** 2 - 4 17 | a, b = 0, 3 18 | root = bisection_search(f, a, b) 19 | expected = 2.0 20 | assert np.isclose(root, expected) 21 | 22 | 23 | def test_bisection_sin(): 24 | f = np.sin 25 | a, b = 3, 4 26 | root = bisection_search(f, a, b) 27 | expected = np.pi 28 | assert np.isclose(root, expected, atol=1e-5) 29 | 30 | 31 | def test_bisection_no_root(): 32 | f = lambda x: x ** 2 + 1 33 | a, b = 0, 1 34 | with pytest.raises(ValueError): 35 | bisection_search(f, a, b) 36 | 37 | 38 | def test_bisection_multiple_roots(): 39 | f = lambda x: x ** 3 - x 40 | a, b = 0, 2 41 | root = bisection_search(f, a, b) 42 | expected = 0.0 43 | assert np.isclose(root, expected) 44 | 45 | 46 | def test_bisection_tolerance(): 47 | f = lambda x: x ** 3 - 6 * x ** 2 + 11 * x - 6 48 | a, b = 2.5, 4 49 | root = bisection_search(f, a, b, tol=1e-10) 50 | expected = 3.0 51 | assert np.isclose(root, expected, atol=1e-10) 52 | 53 | 54 | def test_bisection_max_iterations(): 55 | f = lambda x: x - 1 56 | a, b = 0, 2 57 | with pytest.raises(ValueError): 58 | bisection_search(f, a, b, max_iterations=0) 59 | 60 | 61 | def test_bisection_convergence(): 62 | f = lambda x: x ** 3 - 6 * x ** 2 + 11 * x - 6 63 | a, b = 2.5, 4 64 | root = bisection_search(f, a, b) 65 | expected = 3.0 66 | assert np.isclose(root, expected) 67 | 68 | 69 | def test_bisection_exact_root(): 70 | f = lambda x: x - 2 71 | a, b = 2, 3 72 | root = bisection_search(f, a, b) 73 | expected = 2.0 74 | assert np.isclose(root, expected) 75 | 76 | 77 | def test_bisection_close_to_root(): 78 | f = lambda x: x ** 2 - 2 79 | a, b = 1, 2 80 | root = bisection_search(f, a, b) 81 | expected = np.sqrt(2) 82 | assert np.isclose(root, expected, atol=1e-8) 83 | -------------------------------------------------------------------------------- /src/1_root_and_extrema_finding/golden_ratio_search/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/djeada/Numerical-Methods/68c5a1886c59877f77dd9a266b5f5387bb82f677/src/1_root_and_extrema_finding/golden_ratio_search/__init__.py -------------------------------------------------------------------------------- /src/1_root_and_extrema_finding/golden_ratio_search/examples/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/djeada/Numerical-Methods/68c5a1886c59877f77dd9a266b5f5387bb82f677/src/1_root_and_extrema_finding/golden_ratio_search/examples/__init__.py -------------------------------------------------------------------------------- /src/1_root_and_extrema_finding/golden_ratio_search/examples/plot.py: -------------------------------------------------------------------------------- 1 | import matplotlib.pyplot as plt 2 | import numpy as np 3 | 4 | 5 | def f(x): 6 | """Example function: f(x) = x^2""" 7 | return x ** 2 8 | 9 | 10 | def golden_ratio_search_visualize(func, a, b, tol=1e-6, max_iterations=100): 11 | """ 12 | Visualizes the initial and final steps of the Golden Ratio Search method for function minimization on a single plot. 13 | 14 | Parameters: 15 | func (function): The function to minimize. 16 | a (float): Initial lower bound of the interval. 17 | b (float): Initial upper bound of the interval. 18 | tol (float): Tolerance for stopping condition. 19 | max_iterations (int): Maximum number of iterations allowed. 20 | """ 21 | # Golden ratio constant 22 | phi = (1 + np.sqrt(5)) / 2 # ~1.618 23 | inv_phi = 1 / phi # ~0.618 24 | 25 | # Store initial a and b for plotting 26 | a_initial, b_initial = a, b 27 | 28 | # Compute initial internal points 29 | x1 = b - (b - a) * inv_phi 30 | x2 = a + (b - a) * inv_phi 31 | f1, f2 = func(x1), func(x2) 32 | 33 | iteration = 0 34 | intervals = [(a, b, x1, x2)] # Store for visualization 35 | 36 | while (b - a > tol) and (iteration < max_iterations): 37 | if f1 > f2: # Minimum is in [x1, b] 38 | a = x1 39 | x1 = x2 40 | f1 = f2 41 | x2 = a + (b - a) * inv_phi 42 | f2 = func(x2) 43 | else: # Minimum is in [a, x2] 44 | b = x2 45 | x2 = x1 46 | f2 = f1 47 | x1 = b - (b - a) * inv_phi 48 | f1 = func(x1) 49 | intervals.append((a, b, x1, x2)) 50 | iteration += 1 51 | 52 | # Final a and b 53 | a_final, b_final = a, b 54 | 55 | # Generate x values for plotting 56 | x = np.linspace(a_initial - 1, b_initial + 1, 400) 57 | y = func(x) 58 | 59 | # Plot initial and final intervals on the same plot 60 | plt.figure(figsize=(8, 6)) 61 | plt.plot(x, y, label="$f(x) = x^2$", color="blue") 62 | plt.axhline(0, color="black", linewidth=0.5) 63 | 64 | # Initial step 65 | plt.scatter( 66 | [a_initial, b_initial], 67 | [func(a_initial), func(b_initial)], 68 | color="red", 69 | label="Initial Brackets [a, b]", 70 | ) 71 | plt.scatter( 72 | [intervals[0][2], intervals[0][3]], 73 | [func(intervals[0][2]), func(intervals[0][3])], 74 | color="green", 75 | label="Initial Points x1, x2", 76 | ) 77 | 78 | # Final step 79 | plt.scatter( 80 | [a_final, b_final], 81 | [func(a_final), func(b_final)], 82 | color="purple", 83 | label="Final Brackets [a, b]", 84 | ) 85 | plt.scatter( 86 | [x1, x2], [func(x1), func(x2)], color="orange", label="Final Points x1, x2" 87 | ) 88 | 89 | plt.title("Golden Ratio Search: Initial and Final Steps") 90 | plt.xlabel("x") 91 | plt.ylabel("f(x)") 92 | plt.legend() 93 | plt.grid(True) 94 | plt.show() 95 | 96 | 97 | # Initial conditions for Golden Ratio Search 98 | a = -2 99 | b = 2 100 | golden_ratio_search_visualize(f, a, b) 101 | -------------------------------------------------------------------------------- /src/1_root_and_extrema_finding/golden_ratio_search/implementation/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/djeada/Numerical-Methods/68c5a1886c59877f77dd9a266b5f5387bb82f677/src/1_root_and_extrema_finding/golden_ratio_search/implementation/__init__.py -------------------------------------------------------------------------------- /src/1_root_and_extrema_finding/golden_ratio_search/implementation/golden_ratio_search.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | from typing import Callable 3 | 4 | 5 | def golden_ratio_search( 6 | f: Callable[[float], float], 7 | a: float, 8 | b: float, 9 | tol: float = 1e-8, 10 | max_iterations: int = 1000, 11 | ) -> float: 12 | f_mod = lambda x: (f(x)) ** 2 13 | gr = (np.sqrt(5) + 1) / 2 14 | c = b - (b - a) / gr 15 | d = a + (b - a) / gr 16 | for _ in range(max_iterations): 17 | fc = f_mod(c) 18 | fd = f_mod(d) 19 | if fc < fd: 20 | b, d = d, c 21 | c = b - (b - a) / gr 22 | elif fc > fd: 23 | a, c = c, d 24 | d = a + (b - a) / gr 25 | else: 26 | # If fc == fd, return the midpoint 27 | return (a + b) / 2 28 | if np.abs(b - a) < tol: 29 | return (b + a) / 2 30 | raise ValueError( 31 | "Golden ratio search did not converge within the maximum number of iterations." 32 | ) 33 | -------------------------------------------------------------------------------- /src/1_root_and_extrema_finding/golden_ratio_search/tests/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/djeada/Numerical-Methods/68c5a1886c59877f77dd9a266b5f5387bb82f677/src/1_root_and_extrema_finding/golden_ratio_search/tests/__init__.py -------------------------------------------------------------------------------- /src/1_root_and_extrema_finding/golden_ratio_search/tests/test_golden_ratio_search.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | import numpy as np 3 | from ..implementation.golden_ratio_search import golden_ratio_search 4 | 5 | 6 | def test_golden_ratio_search_linear(): 7 | f = lambda x: 2 * x - 4 8 | a, b = 0, 5 9 | root = golden_ratio_search(f, a, b) 10 | expected = 2.0 11 | assert np.isclose(root, expected, atol=1e-8) 12 | 13 | 14 | def test_golden_ratio_search_quadratic(): 15 | f = lambda x: (x - 3) ** 2 16 | a, b = 0, 6 17 | root = golden_ratio_search(f, a, b) 18 | expected = 3.0 19 | assert np.isclose(root, expected, atol=1e-8) 20 | 21 | 22 | def test_golden_ratio_search_sin(): 23 | f = lambda x: np.sin(x) 24 | a, b = 3, 4 25 | root = golden_ratio_search(f, a, b) 26 | expected = 3.141592653589793 27 | assert np.isclose(root, expected, atol=1e-5) 28 | 29 | 30 | def test_golden_ratio_search_multiple_minima(): 31 | f = lambda x: (x - 1) ** 2 * (x - 3) ** 2 32 | a, b = 0, 4 33 | root = golden_ratio_search(f, a, b) 34 | expected = 2.0 35 | assert np.isclose(root, expected, atol=1e-5) 36 | 37 | 38 | def test_golden_ratio_search_tolerance(): 39 | f = lambda x: (x - 2) ** 2 40 | a, b = 0, 4 41 | root = golden_ratio_search(f, a, b, tol=1e-10) 42 | expected = 2.0 43 | assert np.isclose(root, expected, atol=1e-10) 44 | 45 | 46 | def test_golden_ratio_search_max_iterations(): 47 | f = lambda x: (x - 2) ** 2 48 | a, b = 0, 4 49 | with pytest.raises(ValueError): 50 | golden_ratio_search(f, a, b, max_iterations=0) 51 | 52 | 53 | def test_golden_ratio_search_exact_minimum(): 54 | f = lambda x: (x - 5) ** 2 55 | a, b = 3, 7 56 | root = golden_ratio_search(f, a, b) 57 | expected = 5.0 58 | assert np.isclose(root, expected, atol=1e-8) 59 | 60 | 61 | def test_golden_ratio_search_close_to_minimum(): 62 | f = lambda x: x ** 2 - 2 * x + 1 63 | a, b = 0, 3 64 | root = golden_ratio_search(f, a, b) 65 | expected = 1.0 66 | assert np.isclose(root, expected, atol=1e-8) 67 | 68 | 69 | def test_golden_ratio_search_non_unimodal(): 70 | f = lambda x: np.sin(x) 71 | a, b = 0, 2 * np.pi 72 | root = golden_ratio_search(f, a, b) 73 | expected = 3.141592653589793 74 | assert np.isclose(root, expected, atol=1e-5) 75 | 76 | 77 | def test_golden_ratio_search_negative_interval(): 78 | f = lambda x: (x + 3) ** 2 79 | a, b = -5, -1 80 | root = golden_ratio_search(f, a, b) 81 | expected = -3.0 82 | assert np.isclose(root, expected, atol=1e-8) 83 | 84 | 85 | def test_golden_ratio_search_large_interval(): 86 | f = lambda x: (x - 100) ** 2 87 | a, b = 90, 110 88 | root = golden_ratio_search(f, a, b) 89 | expected = 100.0 90 | assert np.isclose(root, expected, atol=1e-8) 91 | 92 | 93 | def test_golden_ratio_search_flat_function(): 94 | f = lambda x: 0.0 95 | a, b = -10, 10 96 | root = golden_ratio_search(f, a, b) 97 | expected = 0.0 98 | assert np.isclose(root, expected, atol=1e-8) 99 | 100 | 101 | def test_golden_ratio_search_integer_bounds(): 102 | f = lambda x: (x - 4) ** 2 103 | a, b = 2, 6 104 | root = golden_ratio_search(f, a, b) 105 | expected = 4.0 106 | assert np.isclose(root, expected, atol=1e-8) 107 | 108 | 109 | def test_golden_ratio_search_fractional_minimum(): 110 | f = lambda x: (x - 2.5) ** 2 111 | a, b = 0, 5 112 | root = golden_ratio_search(f, a, b) 113 | expected = 2.5 114 | assert np.isclose(root, expected, atol=1e-8) 115 | 116 | 117 | def test_golden_ratio_search_function_with_noise(): 118 | np.random.seed(0) 119 | f = lambda x: (x - 3) ** 2 + np.random.normal(0, 1e-6) 120 | a, b = 0, 6 121 | root = golden_ratio_search(f, a, b) 122 | expected = 3.0 123 | assert np.isclose(root, expected, atol=1e-2) 124 | -------------------------------------------------------------------------------- /src/1_root_and_extrema_finding/gradient_descent/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/djeada/Numerical-Methods/68c5a1886c59877f77dd9a266b5f5387bb82f677/src/1_root_and_extrema_finding/gradient_descent/__init__.py -------------------------------------------------------------------------------- /src/1_root_and_extrema_finding/gradient_descent/examples/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/djeada/Numerical-Methods/68c5a1886c59877f77dd9a266b5f5387bb82f677/src/1_root_and_extrema_finding/gradient_descent/examples/__init__.py -------------------------------------------------------------------------------- /src/1_root_and_extrema_finding/gradient_descent/examples/plot.py: -------------------------------------------------------------------------------- 1 | import matplotlib.pyplot as plt 2 | import numpy as np 3 | 4 | 5 | def f(x): 6 | """Example function: f(x) = x^2""" 7 | return x ** 2 8 | 9 | 10 | def gradient_descent_visualize( 11 | func, grad, x0, learning_rate=0.1, tol=1e-6, max_iterations=100 12 | ): 13 | """ 14 | Visualizes the progress of Gradient Descent for function minimization on a single plot. 15 | 16 | Parameters: 17 | func (function): The function to minimize. 18 | grad (function): The gradient of the function. 19 | x0 (float): Initial starting point. 20 | learning_rate (float): Step size. 21 | tol (float): Tolerance for stopping condition. 22 | max_iterations (int): Maximum number of iterations allowed. 23 | """ 24 | # Store values for plotting 25 | x_values = [x0] 26 | y_values = [func(x0)] 27 | iteration = 0 28 | 29 | while iteration < max_iterations: 30 | gradient = grad(x0) 31 | x_new = x0 - learning_rate * gradient 32 | x_values.append(x_new) 33 | y_values.append(func(x_new)) 34 | 35 | # Check convergence 36 | if abs(x_new - x0) < tol: 37 | break 38 | 39 | x0 = x_new 40 | iteration += 1 41 | 42 | # Generate x values for smooth curve plotting 43 | x = np.linspace(min(x_values) - 1, max(x_values) + 1, 400) 44 | y = func(x) 45 | 46 | # Plot the function and gradient descent steps 47 | plt.figure(figsize=(8, 6)) 48 | plt.plot(x, y, label="$f(x) = x^2$", color="blue") 49 | plt.scatter(x_values, y_values, color="red", label="Gradient Descent Steps") 50 | plt.axhline(0, color="black", linewidth=0.5) 51 | 52 | # Highlight the starting and final points 53 | plt.scatter(x_values[0], y_values[0], color="green", label="Starting Point") 54 | plt.scatter(x_values[-1], y_values[-1], color="purple", label="Final Point") 55 | 56 | plt.title("Gradient Descent: Progress of Steps") 57 | plt.xlabel("x") 58 | plt.ylabel("f(x)") 59 | plt.legend() 60 | plt.grid(True) 61 | plt.show() 62 | 63 | 64 | # Example function and gradient 65 | def f(x): 66 | return x ** 2 67 | 68 | 69 | def grad_f(x): 70 | return 2 * x 71 | 72 | 73 | # Initial conditions for Gradient Descent 74 | x0 = 5 # Starting point 75 | learning_rate = 0.1 76 | gradient_descent_visualize(f, grad_f, x0, learning_rate) 77 | -------------------------------------------------------------------------------- /src/1_root_and_extrema_finding/gradient_descent/implementation/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/djeada/Numerical-Methods/68c5a1886c59877f77dd9a266b5f5387bb82f677/src/1_root_and_extrema_finding/gradient_descent/implementation/__init__.py -------------------------------------------------------------------------------- /src/1_root_and_extrema_finding/gradient_descent/implementation/gradient_descent.py: -------------------------------------------------------------------------------- 1 | # gradient_descent.py 2 | import numpy as np 3 | from typing import Callable 4 | 5 | 6 | def gradient_descent( 7 | f: Callable[[np.ndarray], float], 8 | grad_f: Callable[[np.ndarray], np.ndarray], 9 | x0: np.ndarray, 10 | learning_rate: float = 0.01, 11 | tol: float = 1e-6, 12 | max_iterations: int = 1000, 13 | ) -> np.ndarray: 14 | x = x0.astype(float) 15 | for _ in range(max_iterations): 16 | grad = grad_f(x) 17 | grad_norm = np.linalg.norm(grad, ord=2) 18 | if grad_norm < tol: 19 | return x 20 | x = x - learning_rate * grad 21 | raise ValueError( 22 | "Gradient descent did not converge within the maximum number of iterations." 23 | ) 24 | -------------------------------------------------------------------------------- /src/1_root_and_extrema_finding/gradient_descent/tests/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/djeada/Numerical-Methods/68c5a1886c59877f77dd9a266b5f5387bb82f677/src/1_root_and_extrema_finding/gradient_descent/tests/__init__.py -------------------------------------------------------------------------------- /src/1_root_and_extrema_finding/newton_raphson/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/djeada/Numerical-Methods/68c5a1886c59877f77dd9a266b5f5387bb82f677/src/1_root_and_extrema_finding/newton_raphson/__init__.py -------------------------------------------------------------------------------- /src/1_root_and_extrema_finding/newton_raphson/examples/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/djeada/Numerical-Methods/68c5a1886c59877f77dd9a266b5f5387bb82f677/src/1_root_and_extrema_finding/newton_raphson/examples/__init__.py -------------------------------------------------------------------------------- /src/1_root_and_extrema_finding/newton_raphson/examples/plot.py: -------------------------------------------------------------------------------- 1 | import matplotlib.pyplot as plt 2 | import numpy as np 3 | 4 | 5 | def f(x): 6 | """Example function: f(x) = x^2 - 4""" 7 | return x ** 2 - 4 8 | 9 | 10 | def df(x): 11 | """Derivative of the example function: f'(x) = 2x""" 12 | return 2 * x 13 | 14 | 15 | def newton_method_visualize(func, dfunc, x0, tol=1e-6, max_iterations=20): 16 | """ 17 | Visualizes the progress of Newton's Method for root-finding on a single plot. 18 | 19 | Parameters: 20 | func (function): The function whose root is to be found. 21 | dfunc (function): The derivative of the function. 22 | x0 (float): Initial starting point. 23 | tol (float): Tolerance for stopping condition. 24 | max_iterations (int): Maximum number of iterations allowed. 25 | """ 26 | # Store values for plotting 27 | x_values = [x0] 28 | y_values = [func(x0)] 29 | iteration = 0 30 | 31 | while iteration < max_iterations: 32 | f_x = func(x0) 33 | df_x = dfunc(x0) 34 | 35 | # Avoid division by zero 36 | if df_x == 0: 37 | print("Derivative is zero. Stopping iteration.") 38 | break 39 | 40 | # Newton's update 41 | x_new = x0 - f_x / df_x 42 | x_values.append(x_new) 43 | y_values.append(func(x_new)) 44 | 45 | # Check convergence 46 | if abs(x_new - x0) < tol: 47 | break 48 | 49 | x0 = x_new 50 | iteration += 1 51 | 52 | # Generate x values for smooth curve plotting 53 | x = np.linspace(min(x_values) - 1, max(x_values) + 1, 400) 54 | y = func(x) 55 | 56 | # Plot the function and Newton's method steps 57 | plt.figure(figsize=(8, 6)) 58 | plt.plot(x, y, label="$f(x) = x^2 - 4$", color="blue") 59 | plt.scatter(x_values, y_values, color="red", label="Newton's Method Steps") 60 | plt.axhline(0, color="black", linewidth=0.5) 61 | 62 | # Highlight the starting and final points 63 | plt.scatter(x_values[0], y_values[0], color="green", label="Starting Point") 64 | plt.scatter(x_values[-1], y_values[-1], color="purple", label="Final Point") 65 | 66 | plt.title("Newton's Method: Progress of Steps") 67 | plt.xlabel("x") 68 | plt.ylabel("f(x)") 69 | plt.legend() 70 | plt.grid(True) 71 | plt.show() 72 | 73 | 74 | # Example function and derivative 75 | def f(x): 76 | return x ** 2 - 4 77 | 78 | 79 | def df(x): 80 | return 2 * x 81 | 82 | 83 | # Initial conditions for Newton's Method 84 | x0 = 3 # Starting point 85 | newton_method_visualize(f, df, x0) 86 | -------------------------------------------------------------------------------- /src/1_root_and_extrema_finding/newton_raphson/implementation/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/djeada/Numerical-Methods/68c5a1886c59877f77dd9a266b5f5387bb82f677/src/1_root_and_extrema_finding/newton_raphson/implementation/__init__.py -------------------------------------------------------------------------------- /src/1_root_and_extrema_finding/newton_raphson/implementation/newton_raphson.py: -------------------------------------------------------------------------------- 1 | # newton_raphson.py 2 | import numpy as np 3 | from typing import Callable, Optional 4 | 5 | 6 | def newton_raphson( 7 | f: Callable[[float], float], 8 | df: Callable[[float], float], 9 | x0: float, 10 | tol: float = 1e-8, 11 | max_iterations: int = 1000, 12 | ) -> float: 13 | """ 14 | Newton-Raphson method for finding the root of a real-valued function. 15 | 16 | Args: 17 | f (Callable[[float], float]): The function for which to find the root. 18 | df (Callable[[float], float]): The derivative of the function f. 19 | x0 (float): Initial guess for the root. 20 | tol (float, optional): Tolerance for convergence. Defaults to 1e-8. 21 | max_iterations (int, optional): Maximum number of iterations. Defaults to 1000. 22 | 23 | Returns: 24 | float: The estimated root of the function. 25 | 26 | Raises: 27 | ValueError: If the derivative is zero during iteration. 28 | ValueError: If the method does not converge within the maximum number of iterations. 29 | """ 30 | x = x0 31 | for iteration in range(1, max_iterations + 1): 32 | fx = f(x) 33 | dfx = df(x) 34 | if np.isclose(dfx, 0.0, atol=1e-12): 35 | raise ValueError(f"Derivative is zero at iteration {iteration}, x = {x}.") 36 | x_new = x - fx / dfx 37 | if np.abs(x_new - x) < tol: 38 | return x_new 39 | x = x_new 40 | raise ValueError( 41 | "Newton-Raphson method did not converge within the maximum number of iterations." 42 | ) 43 | 44 | 45 | def newton_raphson_system( 46 | F: Callable[[np.ndarray], np.ndarray], 47 | J: Callable[[np.ndarray], np.ndarray], 48 | x0: Optional[np.ndarray] = None, 49 | tol: float = 1e-8, 50 | max_iterations: int = 1000, 51 | ) -> np.ndarray: 52 | """ 53 | Newton-Raphson method for finding the root of a system of nonlinear equations. 54 | 55 | Args: 56 | F (Callable[[np.ndarray], np.ndarray]): The system of functions. 57 | J (Callable[[np.ndarray], np.ndarray]): The Jacobian matrix of F. 58 | x0 (Optional[np.ndarray], optional): Initial guess for the roots. Defaults to None. 59 | tol (float, optional): Tolerance for convergence. Defaults to 1e-8. 60 | max_iterations (int, optional): Maximum number of iterations. Defaults to 1000. 61 | 62 | Returns: 63 | np.ndarray: The estimated roots of the system. 64 | 65 | Raises: 66 | ValueError: If the Jacobian is singular at any iteration. 67 | ValueError: If the method does not converge within the maximum number of iterations. 68 | ValueError: If the initial guess x0 is not provided. 69 | """ 70 | if x0 is None: 71 | raise ValueError("Initial guess x0 must be provided") 72 | x = x0.astype(float) 73 | for iteration in range(1, max_iterations + 1): 74 | Fx = F(x) 75 | Jx = J(x) 76 | try: 77 | delta = np.linalg.solve(Jx, -Fx) 78 | except np.linalg.LinAlgError: 79 | raise ValueError(f"Jacobian is singular at iteration {iteration}, x = {x}.") 80 | x_new = x + delta 81 | if np.linalg.norm(delta, ord=np.inf) < tol: 82 | return x_new 83 | x = x_new 84 | raise ValueError( 85 | "Newton-Raphson method did not converge within the maximum number of iterations." 86 | ) 87 | -------------------------------------------------------------------------------- /src/1_root_and_extrema_finding/newton_raphson/tests/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/djeada/Numerical-Methods/68c5a1886c59877f77dd9a266b5f5387bb82f677/src/1_root_and_extrema_finding/newton_raphson/tests/__init__.py -------------------------------------------------------------------------------- /src/1_root_and_extrema_finding/relaxation_method/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/djeada/Numerical-Methods/68c5a1886c59877f77dd9a266b5f5387bb82f677/src/1_root_and_extrema_finding/relaxation_method/__init__.py -------------------------------------------------------------------------------- /src/1_root_and_extrema_finding/relaxation_method/examples/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/djeada/Numerical-Methods/68c5a1886c59877f77dd9a266b5f5387bb82f677/src/1_root_and_extrema_finding/relaxation_method/examples/__init__.py -------------------------------------------------------------------------------- /src/1_root_and_extrema_finding/relaxation_method/examples/plot.py: -------------------------------------------------------------------------------- 1 | import matplotlib.pyplot as plt 2 | import numpy as np 3 | 4 | 5 | def g(x): 6 | """Fixed-point iteration function: g(x) = (x^2 + 2) / 3""" 7 | return (x ** 2 + 2) / 3 8 | 9 | 10 | def fixed_point_iteration_visualize(g_func, x0, tol=1e-6, max_iterations=50): 11 | """ 12 | Visualizes the progress of the fixed-point iteration method for root-finding. 13 | 14 | Parameters: 15 | g_func (function): The fixed-point iteration function. 16 | x0 (float): Initial starting point. 17 | tol (float): Tolerance for stopping condition. 18 | max_iterations (int): Maximum number of iterations allowed. 19 | """ 20 | # Store iteration values for plotting 21 | x_values = [x0] 22 | iteration = 0 23 | 24 | while iteration < max_iterations: 25 | x_new = g_func(x0) 26 | x_values.append(x_new) 27 | 28 | # Check convergence 29 | if abs(x_new - x0) < tol: 30 | break 31 | 32 | x0 = x_new 33 | iteration += 1 34 | 35 | # Generate x values for smooth curve plotting 36 | x = np.linspace(min(x_values) - 0.5, max(x_values) + 0.5, 400) 37 | y_gx = g_func(x) 38 | 39 | # Plot the fixed-point function and steps 40 | plt.figure(figsize=(8, 6)) 41 | plt.plot(x, y_gx, label="$g(x)$", linestyle="--", color="orange") 42 | plt.plot(x, x, label="$y = x$", color="gray") 43 | plt.scatter( 44 | x_values, [g_func(x) for x in x_values], color="red", label="Iteration Steps" 45 | ) 46 | 47 | # Highlight the starting and final points 48 | plt.scatter(x_values[0], g_func(x_values[0]), color="green", label="Starting Point") 49 | plt.scatter(x_values[-1], g_func(x_values[-1]), color="purple", label="Final Point") 50 | 51 | plt.title("Fixed-Point Iteration Method: Progress of Steps") 52 | plt.xlabel("x") 53 | plt.ylabel("y") 54 | plt.axhline(0, color="black", linewidth=0.5) 55 | plt.legend() 56 | plt.grid(True) 57 | plt.show() 58 | 59 | 60 | # Example fixed-point iteration function 61 | def g(x): 62 | return (x ** 2 + 2) / 3 63 | 64 | 65 | # Initial guess for Fixed-Point Iteration 66 | x0 = 0 67 | fixed_point_iteration_visualize(g, x0) 68 | -------------------------------------------------------------------------------- /src/1_root_and_extrema_finding/relaxation_method/implementation/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/djeada/Numerical-Methods/68c5a1886c59877f77dd9a266b5f5387bb82f677/src/1_root_and_extrema_finding/relaxation_method/implementation/__init__.py -------------------------------------------------------------------------------- /src/1_root_and_extrema_finding/relaxation_method/implementation/relaxation_method.py: -------------------------------------------------------------------------------- 1 | # relaxation_method.py 2 | import numpy as np 3 | from typing import Callable 4 | 5 | 6 | def relaxation_method( 7 | func: Callable[[float], float], 8 | initial_guess: float, 9 | omega: float = None, 10 | tol: float = 1e-6, 11 | max_iterations: int = 100000, 12 | ) -> float: 13 | x = initial_guess 14 | delta = 1e-8 15 | for _ in range(max_iterations): 16 | f_prime = (func(x + delta) - func(x - delta)) / (2 * delta) 17 | if abs(f_prime) >= 1: 18 | if f_prime == 0: 19 | return np.nan 20 | omega = -1.0 / f_prime 21 | else: 22 | omega = 1.0 23 | x_new = (1 - omega) * x + omega * func(x) 24 | if np.abs(x_new - x) < tol: 25 | return x_new 26 | x = x_new 27 | return x 28 | -------------------------------------------------------------------------------- /src/1_root_and_extrema_finding/relaxation_method/tests/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/djeada/Numerical-Methods/68c5a1886c59877f77dd9a266b5f5387bb82f677/src/1_root_and_extrema_finding/relaxation_method/tests/__init__.py -------------------------------------------------------------------------------- /src/1_root_and_extrema_finding/secant_method/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/djeada/Numerical-Methods/68c5a1886c59877f77dd9a266b5f5387bb82f677/src/1_root_and_extrema_finding/secant_method/__init__.py -------------------------------------------------------------------------------- /src/1_root_and_extrema_finding/secant_method/examples/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/djeada/Numerical-Methods/68c5a1886c59877f77dd9a266b5f5387bb82f677/src/1_root_and_extrema_finding/secant_method/examples/__init__.py -------------------------------------------------------------------------------- /src/1_root_and_extrema_finding/secant_method/examples/plot.py: -------------------------------------------------------------------------------- 1 | import matplotlib.pyplot as plt 2 | import numpy as np 3 | 4 | 5 | def f(x): 6 | """Example function: f(x) = x^2 - 4""" 7 | return x ** 2 - 4 8 | 9 | 10 | def secant_method_visualize(f_func, x0, x1, tol=1e-6, max_iterations=50): 11 | """ 12 | Visualizes the progress of the Secant Method for root-finding. 13 | 14 | Parameters: 15 | f_func (function): The function whose root is to be found. 16 | x0 (float): First initial guess. 17 | x1 (float): Second initial guess. 18 | tol (float): Tolerance for stopping condition. 19 | max_iterations (int): Maximum number of iterations allowed. 20 | """ 21 | # Store iteration values for plotting 22 | x_values = [x0, x1] 23 | y_values = [f_func(x0), f_func(x1)] 24 | iteration = 0 25 | 26 | while iteration < max_iterations: 27 | f_x0 = f_func(x0) 28 | f_x1 = f_func(x1) 29 | 30 | # Avoid division by zero 31 | if f_x1 - f_x0 == 0: 32 | print("Division by zero encountered. Stopping iteration.") 33 | break 34 | 35 | # Secant Method update 36 | x_new = x1 - f_x1 * (x1 - x0) / (f_x1 - f_x0) 37 | x_values.append(x_new) 38 | y_values.append(f_func(x_new)) 39 | 40 | # Check convergence 41 | if abs(x_new - x1) < tol or abs(f_func(x_new)) < tol: 42 | break 43 | 44 | # Update points 45 | x0, x1 = x1, x_new 46 | iteration += 1 47 | 48 | # Generate x values for smooth curve plotting 49 | x = np.linspace(min(x_values) - 1, max(x_values) + 1, 400) 50 | y = f_func(x) 51 | 52 | # Plot the function and Secant Method steps 53 | plt.figure(figsize=(8, 6)) 54 | plt.plot(x, y, label="$f(x) = x^2 - 4$", color="blue") 55 | plt.axhline(0, color="black", linewidth=0.5) 56 | plt.scatter(x_values, y_values, color="red", label="Secant Method Steps") 57 | 58 | # Highlight the starting and final points 59 | plt.scatter(x_values[0], y_values[0], color="green", label="Starting Point") 60 | plt.scatter(x_values[-1], y_values[-1], color="purple", label="Final Point") 61 | 62 | plt.title("Secant Method: Progress of Steps") 63 | plt.xlabel("x") 64 | plt.ylabel("f(x)") 65 | plt.legend() 66 | plt.grid(True) 67 | plt.show() 68 | 69 | 70 | # Example function for Secant Method 71 | def f(x): 72 | return x ** 2 - 4 73 | 74 | 75 | # Initial guesses for Secant Method 76 | x0 = 0 77 | x1 = 1 78 | secant_method_visualize(f, x0, x1) 79 | -------------------------------------------------------------------------------- /src/1_root_and_extrema_finding/secant_method/implementation/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/djeada/Numerical-Methods/68c5a1886c59877f77dd9a266b5f5387bb82f677/src/1_root_and_extrema_finding/secant_method/implementation/__init__.py -------------------------------------------------------------------------------- /src/1_root_and_extrema_finding/secant_method/implementation/secant_method.py: -------------------------------------------------------------------------------- 1 | # secant_method.py 2 | import numpy as np 3 | from typing import Callable 4 | 5 | 6 | def secant_method( 7 | f: Callable[[float], float], 8 | x0: float, 9 | x1: float, 10 | tol: float = 1e-6, 11 | max_iterations: int = 100000, 12 | ) -> float: 13 | for _ in range(max_iterations): 14 | f_x0 = f(x0) 15 | f_x1 = f(x1) 16 | denominator = f_x1 - f_x0 17 | if np.isclose(denominator, 0.0): 18 | raise ValueError("Denominator is zero. Division by zero encountered.") 19 | x2 = x1 - f_x1 * (x1 - x0) / denominator 20 | if np.abs(x2 - x1) < tol: 21 | return x2 22 | x0, x1 = x1, x2 23 | raise ValueError( 24 | "Secant method did not converge within the maximum number of iterations." 25 | ) 26 | -------------------------------------------------------------------------------- /src/1_root_and_extrema_finding/secant_method/tests/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/djeada/Numerical-Methods/68c5a1886c59877f77dd9a266b5f5387bb82f677/src/1_root_and_extrema_finding/secant_method/tests/__init__.py -------------------------------------------------------------------------------- /src/1_root_and_extrema_finding/secant_method/tests/test_secant_method.py: -------------------------------------------------------------------------------- 1 | # test_secant_method.py 2 | import pytest 3 | import numpy as np 4 | from ..implementation.secant_method import secant_method 5 | 6 | 7 | def test_secant_linear(): 8 | f = lambda x: 2 * x - 4 9 | root = secant_method(f, 0, 5) 10 | expected = 2.0 11 | assert np.isclose(root, expected, atol=1e-6) 12 | 13 | 14 | def test_secant_quadratic(): 15 | f = lambda x: x ** 2 - 4 16 | root = secant_method(f, 1, 3) 17 | expected = 2.0 18 | assert np.isclose(root, expected, atol=1e-6) 19 | 20 | 21 | def test_secant_sin(): 22 | f = np.sin 23 | root = secant_method(f, 3, 4) 24 | expected = np.pi 25 | assert np.isclose(root, expected, atol=1e-5) 26 | 27 | 28 | def test_secant_no_root(): 29 | f = lambda x: x ** 2 + 1 30 | with pytest.raises(ValueError): 31 | secant_method(f, 0, 1) 32 | 33 | 34 | def test_secant_multiple_roots(): 35 | f = lambda x: x ** 3 - x 36 | root = secant_method(f, 0.5, 2.0) 37 | expected = 1.0 38 | assert np.isclose(root, expected, atol=1e-6) 39 | 40 | 41 | def test_secant_tolerance(): 42 | f = lambda x: x ** 3 - 6 * x ** 2 + 11 * x - 6 43 | root = secant_method(f, 2.5, 3.5, tol=1e-10) 44 | expected = 3.0 45 | assert np.isclose(root, expected, atol=1e-10) 46 | 47 | 48 | def test_secant_max_iterations(): 49 | f = lambda x: 1 - x 50 | with pytest.raises(ValueError): 51 | secant_method(f, -100, 100, max_iterations=1) 52 | 53 | 54 | def test_secant_exact_root(): 55 | f = lambda x: x - 5 56 | root = secant_method(f, 0, 10) 57 | expected = 5.0 58 | assert np.isclose(root, expected, atol=1e-8) 59 | 60 | 61 | def test_secant_close_to_root(): 62 | f = lambda x: x ** 2 - 2 63 | root = secant_method(f, 1.4, 1.5) 64 | expected = np.sqrt(2) 65 | assert np.isclose(root, expected, atol=1e-8) 66 | 67 | 68 | def test_secant_negative_root(): 69 | f = lambda x: x + 3 70 | root = secant_method(f, 0, 1) 71 | expected = -3.0 72 | assert np.isclose(root, expected, atol=1e-6) 73 | 74 | 75 | def test_secant_high_precision(): 76 | f = lambda x: x ** 3 - 6 * x ** 2 + 11 * x - 6 77 | root = secant_method(f, 3.5, 4.0, tol=1e-12) 78 | expected = 3.0 79 | assert np.isclose(root, expected, atol=1e-12) 80 | 81 | 82 | def test_secant_initial_guess(): 83 | f = lambda x: x ** 2 - 1 84 | root = secant_method(f, 0.5, 2.0) 85 | expected = 1.0 86 | assert np.isclose(root, expected, atol=1e-6) 87 | 88 | 89 | def test_secant_fractional_minimum(): 90 | f = lambda x: x - 2.5 91 | root = secant_method(f, 1.0, 3.0) 92 | expected = 2.5 93 | assert np.isclose(root, expected, atol=1e-6) 94 | 95 | 96 | def test_secant_function_with_noise(): 97 | np.random.seed(0) 98 | f = lambda x: x - 3 + np.random.normal(0, 1e-6) 99 | root = secant_method(f, 2.0, 4.0) 100 | expected = 3.0 101 | assert np.isclose(root, expected, atol=1e-5) 102 | -------------------------------------------------------------------------------- /src/2_systems_of_equations/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/djeada/Numerical-Methods/68c5a1886c59877f77dd9a266b5f5387bb82f677/src/2_systems_of_equations/__init__.py -------------------------------------------------------------------------------- /src/2_systems_of_equations/gauss_seidel/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/djeada/Numerical-Methods/68c5a1886c59877f77dd9a266b5f5387bb82f677/src/2_systems_of_equations/gauss_seidel/__init__.py -------------------------------------------------------------------------------- /src/2_systems_of_equations/gauss_seidel/examples/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/djeada/Numerical-Methods/68c5a1886c59877f77dd9a266b5f5387bb82f677/src/2_systems_of_equations/gauss_seidel/examples/__init__.py -------------------------------------------------------------------------------- /src/2_systems_of_equations/gauss_seidel/implementation/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/djeada/Numerical-Methods/68c5a1886c59877f77dd9a266b5f5387bb82f677/src/2_systems_of_equations/gauss_seidel/implementation/__init__.py -------------------------------------------------------------------------------- /src/2_systems_of_equations/gauss_seidel/implementation/gauss_seidel.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | from typing import Optional 3 | 4 | 5 | def inverse_matrix(A: np.ndarray) -> np.ndarray: 6 | n: int = A.shape[0] 7 | AI: np.ndarray = np.hstack((A.copy(), np.eye(n))) 8 | for i in range(n): 9 | if np.abs(AI[i:, i]).max() == 0: 10 | raise ValueError("Matrix is singular and cannot be inverted.") 11 | max_row: int = np.argmax(np.abs(AI[i:, i])) + i 12 | AI[[i, max_row]] = AI[[max_row, i]] 13 | AI[i] = AI[i] / AI[i, i] 14 | for j in range(n): 15 | if i != j: 16 | AI[j] -= AI[j, i] * AI[i] 17 | return AI[:, n:] 18 | 19 | 20 | def solve_inverse_matrix(A: np.ndarray, b: np.ndarray) -> np.ndarray: 21 | A_inv: np.ndarray = inverse_matrix(A) 22 | return A_inv @ b 23 | 24 | 25 | def gauss_seidel( 26 | A: np.ndarray, 27 | b: np.ndarray, 28 | x0: Optional[np.ndarray] = None, 29 | epsilon: float = 1e-8, 30 | max_iter: int = 100, 31 | ) -> np.ndarray: 32 | n: int = A.shape[0] 33 | x: np.ndarray = ( 34 | np.zeros_like(b, dtype=np.double) if x0 is None else x0.astype(float) 35 | ) 36 | for _ in range(max_iter): 37 | x_prev: np.ndarray = x.copy() 38 | for i in range(n): 39 | x[i] = ( 40 | b[i] - np.dot(A[i, :i], x[:i]) - np.dot(A[i, i + 1 :], x_prev[i + 1 :]) 41 | ) / A[i, i] 42 | if np.linalg.norm(x - x_prev) < epsilon: 43 | return x 44 | raise ValueError( 45 | "Gauss-Seidel method did not converge within the maximum number of iterations." 46 | ) 47 | -------------------------------------------------------------------------------- /src/2_systems_of_equations/gauss_seidel/tests/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/djeada/Numerical-Methods/68c5a1886c59877f77dd9a266b5f5387bb82f677/src/2_systems_of_equations/gauss_seidel/tests/__init__.py -------------------------------------------------------------------------------- /src/2_systems_of_equations/gauss_seidel/tests/test_gauss_seidel.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | import numpy as np 3 | from ..implementation.gauss_seidel import ( 4 | inverse_matrix, 5 | solve_inverse_matrix, 6 | gauss_seidel, 7 | ) 8 | 9 | 10 | def test_inverse_matrix_identity(): 11 | A = np.eye(3) 12 | A_inv = inverse_matrix(A) 13 | assert np.allclose(A_inv, np.eye(3)) 14 | 15 | 16 | def test_inverse_matrix_simple(): 17 | A = np.array([[4, 7], [2, 6]], dtype=float) 18 | expected = np.linalg.inv(A) 19 | A_inv = inverse_matrix(A) 20 | assert np.allclose(A_inv, expected) 21 | 22 | 23 | def test_inverse_matrix_singular(): 24 | A = np.array([[1, 2], [2, 4]], dtype=float) 25 | with pytest.raises(Exception): 26 | inverse_matrix(A) 27 | 28 | 29 | def test_solve_inverse_matrix(): 30 | A = np.array([[3, 0], [0, 2]], dtype=float) 31 | b = np.array([9, 8], dtype=float) 32 | expected = np.linalg.solve(A, b) 33 | x = solve_inverse_matrix(A, b) 34 | assert np.allclose(x, expected) 35 | 36 | 37 | def test_solve_inverse_matrix_large(): 38 | A = np.random.rand(5, 5) 39 | b = np.random.rand(5) 40 | x_expected = np.linalg.solve(A, b) 41 | x = solve_inverse_matrix(A, b) 42 | assert np.allclose(x, x_expected) 43 | 44 | 45 | def test_gauss_seidel_convergence(): 46 | A = np.array([[4, 1], [2, 3]], dtype=float) 47 | b = np.array([1, 2], dtype=float) 48 | expected = np.linalg.solve(A, b) 49 | x = gauss_seidel(A, b) 50 | assert np.allclose(x, expected, atol=1e-6) 51 | 52 | 53 | def test_gauss_seidel_no_convergence(): 54 | A = np.array([[1, 2], [3, 4]], dtype=float) 55 | b = np.array([5, 6], dtype=float) 56 | with pytest.raises(ValueError): 57 | gauss_seidel(A, b, max_iter=5) 58 | 59 | 60 | def test_gauss_seidel_with_initial_guess(): 61 | A = np.array([[3, 1], [1, 2]], dtype=float) 62 | b = np.array([9, 8], dtype=float) 63 | x0 = np.array([0, 0], dtype=float) 64 | expected = np.linalg.solve(A, b) 65 | x = gauss_seidel(A, b, x0=x0) 66 | assert np.allclose(x, expected, atol=1e-6) 67 | 68 | 69 | def test_gauss_seidel_high_precision(): 70 | A = np.array([[10, 1], [1, 10]], dtype=float) 71 | b = np.array([11, 11], dtype=float) 72 | expected = np.linalg.solve(A, b) 73 | x = gauss_seidel(A, b, epsilon=1e-12) 74 | assert np.allclose(x, expected, atol=1e-10) 75 | -------------------------------------------------------------------------------- /src/2_systems_of_equations/gaussian_elimination/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/djeada/Numerical-Methods/68c5a1886c59877f77dd9a266b5f5387bb82f677/src/2_systems_of_equations/gaussian_elimination/__init__.py -------------------------------------------------------------------------------- /src/2_systems_of_equations/gaussian_elimination/examples/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/djeada/Numerical-Methods/68c5a1886c59877f77dd9a266b5f5387bb82f677/src/2_systems_of_equations/gaussian_elimination/examples/__init__.py -------------------------------------------------------------------------------- /src/2_systems_of_equations/gaussian_elimination/examples/plot.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import matplotlib.pyplot as plt 3 | 4 | 5 | def gaussian_elimination(A, b): 6 | """ 7 | Solve the linear system Ax = b using Gaussian elimination with back substitution. 8 | 9 | Parameters: 10 | A (numpy.ndarray): Coefficient matrix (n x n). 11 | b (numpy.ndarray): Right-hand side vector (n,). 12 | 13 | Returns: 14 | x (numpy.ndarray): Solution vector. 15 | """ 16 | n = len(b) 17 | # Augmented matrix 18 | Ab = np.hstack([A, b.reshape(-1, 1)]) 19 | 20 | # Forward elimination 21 | for i in range(n): 22 | # Partial pivoting: Find the maximum element in the current column 23 | max_row = np.argmax(np.abs(Ab[i:n, i])) + i 24 | if i != max_row: 25 | Ab[[i, max_row]] = Ab[[max_row, i]] 26 | 27 | # Normalize pivot row 28 | Ab[i] = Ab[i] / Ab[i, i] 29 | 30 | # Eliminate below 31 | for j in range(i + 1, n): 32 | Ab[j] -= Ab[j, i] * Ab[i] 33 | 34 | # Back substitution 35 | x = np.zeros(n) 36 | for i in range(n - 1, -1, -1): 37 | x[i] = Ab[i, -1] - np.sum(Ab[i, i + 1 : n] * x[i + 1 : n]) 38 | 39 | return x 40 | 41 | 42 | def plot_gaussian_elimination_process(A, b): 43 | """ 44 | Visualize the step-by-step process of Gaussian elimination. 45 | 46 | Parameters: 47 | A (numpy.ndarray): Coefficient matrix. 48 | b (numpy.ndarray): Right-hand side vector. 49 | """ 50 | n = len(b) 51 | Ab = np.hstack([A, b.reshape(-1, 1)]) 52 | steps = [Ab.copy()] 53 | 54 | # Forward elimination with visualization steps 55 | for i in range(n): 56 | max_row = np.argmax(np.abs(Ab[i:n, i])) + i 57 | if i != max_row: 58 | Ab[[i, max_row]] = Ab[[max_row, i]] 59 | Ab[i] = Ab[i] / Ab[i, i] 60 | for j in range(i + 1, n): 61 | Ab[j] -= Ab[j, i] * Ab[i] 62 | steps.append(Ab.copy()) 63 | 64 | # Plot each step of the Gaussian elimination process 65 | fig, axes = plt.subplots(1, len(steps), figsize=(20, 4)) 66 | for idx, step in enumerate(steps): 67 | axes[idx].imshow(step, cmap="coolwarm", aspect="auto") 68 | axes[idx].set_title(f"Step {idx}") 69 | axes[idx].axis("off") 70 | plt.suptitle("Gaussian Elimination: Forward Elimination Steps") 71 | plt.tight_layout() 72 | plt.show() 73 | 74 | 75 | if __name__ == "__main__": 76 | # Example system 77 | A = np.array([[2, 1, -1], [-3, -1, 2], [-2, 1, 2]], dtype=float) 78 | b = np.array([8, -11, -3], dtype=float) 79 | 80 | print("Solving system using Gaussian elimination:") 81 | solution = gaussian_elimination(A, b) 82 | print(f"Solution: {solution}") 83 | 84 | # Visualize the elimination process 85 | plot_gaussian_elimination_process(A, b) 86 | -------------------------------------------------------------------------------- /src/2_systems_of_equations/gaussian_elimination/implementation/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/djeada/Numerical-Methods/68c5a1886c59877f77dd9a266b5f5387bb82f677/src/2_systems_of_equations/gaussian_elimination/implementation/__init__.py -------------------------------------------------------------------------------- /src/2_systems_of_equations/gaussian_elimination/implementation/gaussian_elimination.py: -------------------------------------------------------------------------------- 1 | # gaussian_elimination.py 2 | import numpy as np 3 | from typing import Tuple 4 | 5 | 6 | def gaussian_elimination(A: np.ndarray, b: np.ndarray) -> np.ndarray: 7 | if A.shape[0] != A.shape[1]: 8 | raise ValueError("Matrix A must be square.") 9 | if A.shape[0] != b.shape[0]: 10 | raise ValueError("Matrix A and vector b dimensions do not match.") 11 | n: int = A.shape[0] 12 | augmented: np.ndarray = np.hstack((A.astype(float), b.reshape(-1, 1).astype(float))) 13 | for i in range(n): 14 | pivot: int = np.argmax(np.abs(augmented[i:, i])) + i 15 | if np.isclose(augmented[pivot, i], 0.0): 16 | raise ValueError("Matrix is singular or nearly singular.") 17 | if pivot != i: 18 | augmented[[i, pivot]] = augmented[[pivot, i]] 19 | augmented[i] = augmented[i] / augmented[i, i] 20 | for j in range(i + 1, n): 21 | augmented[j] -= augmented[j, i] * augmented[i] 22 | x: np.ndarray = np.zeros(n) 23 | for i in range(n - 1, -1, -1): 24 | x[i] = augmented[i, -1] - np.dot(augmented[i, i + 1 : n], x[i + 1 : n]) 25 | return x 26 | 27 | 28 | def solve_gaussian_elimination(A: np.ndarray, b: np.ndarray) -> np.ndarray: 29 | x = gaussian_elimination(A, b) 30 | return np.round(x, decimals=8) 31 | -------------------------------------------------------------------------------- /src/2_systems_of_equations/gaussian_elimination/tests/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/djeada/Numerical-Methods/68c5a1886c59877f77dd9a266b5f5387bb82f677/src/2_systems_of_equations/gaussian_elimination/tests/__init__.py -------------------------------------------------------------------------------- /src/2_systems_of_equations/jacobi_method/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/djeada/Numerical-Methods/68c5a1886c59877f77dd9a266b5f5387bb82f677/src/2_systems_of_equations/jacobi_method/__init__.py -------------------------------------------------------------------------------- /src/2_systems_of_equations/jacobi_method/examples/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/djeada/Numerical-Methods/68c5a1886c59877f77dd9a266b5f5387bb82f677/src/2_systems_of_equations/jacobi_method/examples/__init__.py -------------------------------------------------------------------------------- /src/2_systems_of_equations/jacobi_method/implementation/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/djeada/Numerical-Methods/68c5a1886c59877f77dd9a266b5f5387bb82f677/src/2_systems_of_equations/jacobi_method/implementation/__init__.py -------------------------------------------------------------------------------- /src/2_systems_of_equations/jacobi_method/implementation/jacobi_method.py: -------------------------------------------------------------------------------- 1 | # jacobi_method.py 2 | import numpy as np 3 | from typing import Optional 4 | 5 | 6 | def jacobi_method( 7 | A: np.ndarray, 8 | b: np.ndarray, 9 | x0: Optional[np.ndarray] = None, 10 | epsilon: float = 1e-8, 11 | max_iterations: int = 1000, 12 | ) -> np.ndarray: 13 | if A.shape[0] != A.shape[1]: 14 | raise ValueError("Matrix A must be square.") 15 | n = A.shape[0] 16 | D = np.diag(A) 17 | if np.any(D == 0): 18 | raise ValueError("Matrix A has zero diagonal elements.") 19 | R = A - np.diagflat(D) 20 | D_inv = 1.0 / D 21 | if x0 is None: 22 | x = np.zeros(n) 23 | else: 24 | x = x0.astype(float) 25 | for _ in range(max_iterations): 26 | x_new = D_inv * (b - np.dot(R, x)) 27 | if np.linalg.norm(x_new - x, ord=np.inf) < epsilon: 28 | return x_new 29 | x = x_new 30 | raise ValueError( 31 | "Jacobi method did not converge within the maximum number of iterations." 32 | ) 33 | -------------------------------------------------------------------------------- /src/2_systems_of_equations/jacobi_method/tests/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/djeada/Numerical-Methods/68c5a1886c59877f77dd9a266b5f5387bb82f677/src/2_systems_of_equations/jacobi_method/tests/__init__.py -------------------------------------------------------------------------------- /src/2_systems_of_equations/lu_decomposition/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/djeada/Numerical-Methods/68c5a1886c59877f77dd9a266b5f5387bb82f677/src/2_systems_of_equations/lu_decomposition/__init__.py -------------------------------------------------------------------------------- /src/2_systems_of_equations/lu_decomposition/examples/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/djeada/Numerical-Methods/68c5a1886c59877f77dd9a266b5f5387bb82f677/src/2_systems_of_equations/lu_decomposition/examples/__init__.py -------------------------------------------------------------------------------- /src/2_systems_of_equations/lu_decomposition/examples/plot.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import matplotlib.pyplot as plt 3 | 4 | 5 | def lu_decomposition(A): 6 | """ 7 | Perform LU decomposition of a square matrix A. 8 | 9 | Parameters: 10 | A (numpy.ndarray): Coefficient matrix (n x n). 11 | 12 | Returns: 13 | L (numpy.ndarray): Lower triangular matrix with unit diagonal. 14 | U (numpy.ndarray): Upper triangular matrix. 15 | """ 16 | n = A.shape[0] 17 | L = np.eye(n) 18 | U = np.zeros_like(A, dtype=float) 19 | 20 | for i in range(n): 21 | # Compute upper triangular matrix U 22 | for j in range(i, n): 23 | U[i, j] = A[i, j] - np.sum(L[i, :i] * U[:i, j]) 24 | 25 | # Compute lower triangular matrix L 26 | for j in range(i + 1, n): 27 | L[j, i] = (A[j, i] - np.sum(L[j, :i] * U[:i, i])) / U[i, i] 28 | 29 | return L, U 30 | 31 | 32 | def forward_substitution(L, b): 33 | """ 34 | Solve the lower triangular system Lc = b using forward substitution. 35 | 36 | Parameters: 37 | L (numpy.ndarray): Lower triangular matrix. 38 | b (numpy.ndarray): Right-hand side vector. 39 | 40 | Returns: 41 | c (numpy.ndarray): Solution vector. 42 | """ 43 | n = L.shape[0] 44 | c = np.zeros_like(b, dtype=float) 45 | for i in range(n): 46 | c[i] = (b[i] - np.dot(L[i, :i], c[:i])) / L[i, i] 47 | return c 48 | 49 | 50 | def backward_substitution(U, c): 51 | """ 52 | Solve the upper triangular system Ux = c using backward substitution. 53 | 54 | Parameters: 55 | U (numpy.ndarray): Upper triangular matrix. 56 | c (numpy.ndarray): Solution vector from forward substitution. 57 | 58 | Returns: 59 | x (numpy.ndarray): Solution vector. 60 | """ 61 | n = U.shape[0] 62 | x = np.zeros_like(c, dtype=float) 63 | for i in range(n - 1, -1, -1): 64 | x[i] = (c[i] - np.dot(U[i, i + 1 :], x[i + 1 :])) / U[i, i] 65 | return x 66 | 67 | 68 | def solve_lu(A, b): 69 | """ 70 | Solve the system Ax = b using LU decomposition. 71 | 72 | Parameters: 73 | A (numpy.ndarray): Coefficient matrix (n x n). 74 | b (numpy.ndarray): Right-hand side vector. 75 | 76 | Returns: 77 | x (numpy.ndarray): Solution vector. 78 | """ 79 | L, U = lu_decomposition(A) 80 | c = forward_substitution(L, b) 81 | x = backward_substitution(U, c) 82 | return x, L, U 83 | 84 | 85 | def plot_matrix_decomposition(L, U): 86 | """ 87 | Visualize the L and U matrices from LU decomposition. 88 | 89 | Parameters: 90 | L (numpy.ndarray): Lower triangular matrix. 91 | U (numpy.ndarray): Upper triangular matrix. 92 | """ 93 | fig, axes = plt.subplots(1, 2, figsize=(12, 6)) 94 | axes[0].imshow(L, cmap="coolwarm", interpolation="none") 95 | axes[0].set_title("Lower Triangular Matrix L") 96 | axes[0].axis("off") 97 | 98 | axes[1].imshow(U, cmap="coolwarm", interpolation="none") 99 | axes[1].set_title("Upper Triangular Matrix U") 100 | axes[1].axis("off") 101 | 102 | plt.suptitle("LU Decomposition Visualization") 103 | plt.tight_layout() 104 | plt.show() 105 | 106 | 107 | if __name__ == "__main__": 108 | # Example system 109 | A = np.array([[2, 3, -4], [3, -3, 2], [-2, 6, -1]], dtype=float) 110 | b = np.array([1, -2, 3], dtype=float) 111 | 112 | print("Solving system using LU decomposition:") 113 | x, L, U = solve_lu(A, b) 114 | print(f"Solution: {x}") 115 | print("L (Lower Triangular Matrix):") 116 | print(L) 117 | print("U (Upper Triangular Matrix):") 118 | print(U) 119 | 120 | # Plot LU decomposition 121 | plot_matrix_decomposition(L, U) 122 | -------------------------------------------------------------------------------- /src/2_systems_of_equations/lu_decomposition/implementation/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/djeada/Numerical-Methods/68c5a1886c59877f77dd9a266b5f5387bb82f677/src/2_systems_of_equations/lu_decomposition/implementation/__init__.py -------------------------------------------------------------------------------- /src/2_systems_of_equations/lu_decomposition/implementation/lu_decomposition.py: -------------------------------------------------------------------------------- 1 | # lu_decomposition.py 2 | import numpy as np 3 | from typing import Tuple 4 | 5 | 6 | def lu_decomposition(A: np.ndarray) -> Tuple[np.ndarray, np.ndarray, np.ndarray]: 7 | n: int = A.shape[0] 8 | if A.shape[0] != A.shape[1]: 9 | raise ValueError("Matrix A must be square.") 10 | P: np.ndarray = np.eye(n) 11 | L: np.ndarray = np.zeros((n, n), dtype=float) 12 | U: np.ndarray = A.copy().astype(float) 13 | for i in range(n): 14 | pivot: int = np.argmax(np.abs(U[i:, i])) + i 15 | if np.isclose(U[pivot, i], 0.0): 16 | raise ValueError("Matrix is singular.") 17 | if pivot != i: 18 | U[[i, pivot]] = U[[pivot, i]] 19 | P[[i, pivot]] = P[[pivot, i]] 20 | if i > 0: 21 | L[[i, pivot], :i] = L[[pivot, i], :i] 22 | L[i, i] = 1.0 23 | for j in range(i + 1, n): 24 | factor: float = U[j, i] / U[i, i] 25 | L[j, i] = factor 26 | U[j] -= factor * U[i] 27 | return P, L, U 28 | 29 | 30 | def solve_lu(P: np.ndarray, L: np.ndarray, U: np.ndarray, b: np.ndarray) -> np.ndarray: 31 | Pb: np.ndarray = P @ b 32 | y: np.ndarray = np.zeros_like(b, dtype=float) 33 | n: int = L.shape[0] 34 | for i in range(n): 35 | y[i] = Pb[i] - np.dot(L[i, :i], y[:i]) 36 | x: np.ndarray = np.zeros_like(b, dtype=float) 37 | for i in range(n - 1, -1, -1): 38 | x[i] = (y[i] - np.dot(U[i, i + 1 :], x[i + 1 :])) / U[i, i] 39 | return x 40 | -------------------------------------------------------------------------------- /src/2_systems_of_equations/lu_decomposition/tests/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/djeada/Numerical-Methods/68c5a1886c59877f77dd9a266b5f5387bb82f677/src/2_systems_of_equations/lu_decomposition/tests/__init__.py -------------------------------------------------------------------------------- /src/2_systems_of_equations/matrix_inverse/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/djeada/Numerical-Methods/68c5a1886c59877f77dd9a266b5f5387bb82f677/src/2_systems_of_equations/matrix_inverse/__init__.py -------------------------------------------------------------------------------- /src/2_systems_of_equations/matrix_inverse/examples/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/djeada/Numerical-Methods/68c5a1886c59877f77dd9a266b5f5387bb82f677/src/2_systems_of_equations/matrix_inverse/examples/__init__.py -------------------------------------------------------------------------------- /src/2_systems_of_equations/matrix_inverse/implementation/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/djeada/Numerical-Methods/68c5a1886c59877f77dd9a266b5f5387bb82f677/src/2_systems_of_equations/matrix_inverse/implementation/__init__.py -------------------------------------------------------------------------------- /src/2_systems_of_equations/matrix_inverse/implementation/inverse_matrix.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | from typing import Any 3 | 4 | 5 | def inverse_matrix(A: np.ndarray) -> np.ndarray: 6 | n: int = A.shape[0] 7 | AI: np.ndarray = np.hstack((A.copy(), np.eye(n))) 8 | for i in range(n): 9 | max_row: int = np.argmax(np.abs(AI[i:, i])) + i 10 | if AI[max_row, i] == 0: 11 | raise ValueError("Matrix is singular and cannot be inverted.") 12 | AI[[i, max_row]] = AI[[max_row, i]] 13 | AI[i] = AI[i] / AI[i, i] 14 | for j in range(n): 15 | if i != j: 16 | AI[j] -= AI[j, i] * AI[i] 17 | return AI[:, n:] 18 | 19 | 20 | def solve_inverse_matrix(A: np.ndarray, b: np.ndarray) -> np.ndarray: 21 | A_inv: np.ndarray = inverse_matrix(A) 22 | return A_inv @ b 23 | return A_inv @ b 24 | -------------------------------------------------------------------------------- /src/2_systems_of_equations/matrix_inverse/tests/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/djeada/Numerical-Methods/68c5a1886c59877f77dd9a266b5f5387bb82f677/src/2_systems_of_equations/matrix_inverse/tests/__init__.py -------------------------------------------------------------------------------- /src/2_systems_of_equations/matrix_inverse/tests/test_inverse_matrix.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | import numpy as np 3 | from ..implementation.inverse_matrix import inverse_matrix, solve_inverse_matrix 4 | 5 | 6 | def test_inverse_matrix_identity(): 7 | A = np.eye(3) 8 | A_inv = inverse_matrix(A) 9 | assert np.allclose(A_inv, np.eye(3)) 10 | 11 | 12 | def test_inverse_matrix_simple(): 13 | A = np.array([[4, 7], [2, 6]], dtype=float) 14 | expected = np.linalg.inv(A) 15 | A_inv = inverse_matrix(A) 16 | assert np.allclose(A_inv, expected) 17 | 18 | 19 | def test_inverse_matrix_singular(): 20 | A = np.array([[1, 2], [2, 4]], dtype=float) 21 | with pytest.raises(Exception): 22 | inverse_matrix(A) 23 | 24 | 25 | def test_solve_inverse_matrix(): 26 | A = np.array([[3, 0], [0, 2]], dtype=float) 27 | b = np.array([9, 8], dtype=float) 28 | expected = np.linalg.solve(A, b) 29 | x = solve_inverse_matrix(A, b) 30 | assert np.allclose(x, expected) 31 | 32 | 33 | def test_solve_inverse_matrix_large(): 34 | A = np.random.rand(5, 5) 35 | b = np.random.rand(5) 36 | A_inv = inverse_matrix(A) 37 | x_expected = np.linalg.solve(A, b) 38 | x = solve_inverse_matrix(A, b) 39 | assert np.allclose(x, x_expected) 40 | -------------------------------------------------------------------------------- /src/3_derivatives/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/djeada/Numerical-Methods/68c5a1886c59877f77dd9a266b5f5387bb82f677/src/3_derivatives/__init__.py -------------------------------------------------------------------------------- /src/3_derivatives/backward_difference/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/djeada/Numerical-Methods/68c5a1886c59877f77dd9a266b5f5387bb82f677/src/3_derivatives/backward_difference/__init__.py -------------------------------------------------------------------------------- /src/3_derivatives/backward_difference/examples/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/djeada/Numerical-Methods/68c5a1886c59877f77dd9a266b5f5387bb82f677/src/3_derivatives/backward_difference/examples/__init__.py -------------------------------------------------------------------------------- /src/3_derivatives/backward_difference/examples/temp.md: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/djeada/Numerical-Methods/68c5a1886c59877f77dd9a266b5f5387bb82f677/src/3_derivatives/backward_difference/examples/temp.md -------------------------------------------------------------------------------- /src/3_derivatives/backward_difference/implementation/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/djeada/Numerical-Methods/68c5a1886c59877f77dd9a266b5f5387bb82f677/src/3_derivatives/backward_difference/implementation/__init__.py -------------------------------------------------------------------------------- /src/3_derivatives/backward_difference/implementation/backward_difference.py: -------------------------------------------------------------------------------- 1 | from typing import Callable, Optional 2 | import numpy as np 3 | 4 | 5 | def backward_difference( 6 | f: Callable[[float], float], x: float, h: float = 1e-5 7 | ) -> float: 8 | return (f(x) - f(x - h)) / h 9 | 10 | 11 | def backward_difference_gradient( 12 | f: Callable[[np.ndarray], float], x: np.ndarray, h: float = 1e-5 13 | ) -> np.ndarray: 14 | gradient = np.zeros_like(x, dtype=float) 15 | for i in range(len(x)): 16 | x_forward = x.copy() 17 | x_forward[i] += h 18 | x_backward = x.copy() 19 | x_backward[i] -= h 20 | gradient[i] = (f(x_forward) - f(x_backward)) / (2 * h) 21 | return gradient 22 | -------------------------------------------------------------------------------- /src/3_derivatives/backward_difference/tests/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/djeada/Numerical-Methods/68c5a1886c59877f77dd9a266b5f5387bb82f677/src/3_derivatives/backward_difference/tests/__init__.py -------------------------------------------------------------------------------- /src/3_derivatives/central_difference/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/djeada/Numerical-Methods/68c5a1886c59877f77dd9a266b5f5387bb82f677/src/3_derivatives/central_difference/__init__.py -------------------------------------------------------------------------------- /src/3_derivatives/central_difference/examples/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/djeada/Numerical-Methods/68c5a1886c59877f77dd9a266b5f5387bb82f677/src/3_derivatives/central_difference/examples/__init__.py -------------------------------------------------------------------------------- /src/3_derivatives/central_difference/examples/example.py: -------------------------------------------------------------------------------- 1 | """ 2 | Calculate the derivative of f(x) = cos(x) at PI/3. 3 | """ 4 | import numpy as np 5 | import matplotlib.pyplot as plt 6 | from central_difference import central_difference 7 | 8 | 9 | def function(x): 10 | return np.cos(x) 11 | 12 | 13 | def exact_derivative(x): 14 | return -np.sin(x) 15 | 16 | 17 | point = np.pi / 3 18 | 19 | 20 | computed_derivative_history = list() 21 | step = point / 2 22 | 23 | for _ in range(100): 24 | computed_derivative = central_difference(function, point, step) 25 | computed_derivative_history.append(computed_derivative) 26 | step /= 2 27 | 28 | exact_result = exact_derivative(point) 29 | error_history = np.array( 30 | [ 31 | abs(exact_result - computed_derivative) 32 | for computed_derivative in computed_derivative_history 33 | ] 34 | ) 35 | best_computed_result = computed_derivative_history[np.argmin(error_history)] 36 | 37 | print("") 38 | print(f"Exact derivative of cos(x) at point {point:.3f} is {exact_result:.7f}") 39 | print( 40 | f"The derivative of cos(x) at point {point:.3f} computed using central differencing is {best_computed_result:.7f}" 41 | ) 42 | print("") 43 | 44 | plt.plot(np.linspace(0, error_history.size, error_history.size), error_history) 45 | plt.xlabel("Iteration") 46 | plt.ylabel("Absolute Error") 47 | plt.show() 48 | -------------------------------------------------------------------------------- /src/3_derivatives/central_difference/implementation/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/djeada/Numerical-Methods/68c5a1886c59877f77dd9a266b5f5387bb82f677/src/3_derivatives/central_difference/implementation/__init__.py -------------------------------------------------------------------------------- /src/3_derivatives/central_difference/implementation/central_difference.py: -------------------------------------------------------------------------------- 1 | # central_difference.py 2 | from typing import Callable 3 | import numpy as np 4 | 5 | 6 | def central_difference(f: Callable[[float], float], x: float, h: float = 1e-5) -> float: 7 | return (f(x + h) - f(x - h)) / (2 * h) 8 | 9 | 10 | def central_difference_gradient( 11 | f: Callable[[np.ndarray], float], x: np.ndarray, h: float = 1e-5 12 | ) -> np.ndarray: 13 | gradient = np.zeros_like(x, dtype=float) 14 | for i in range(len(x)): 15 | x_forward = x.copy() 16 | x_backward = x.copy() 17 | x_forward[i] += h 18 | x_backward[i] -= h 19 | gradient[i] = (f(x_forward) - f(x_backward)) / (2 * h) 20 | return gradient * np.sign(x) 21 | -------------------------------------------------------------------------------- /src/3_derivatives/central_difference/tests/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/djeada/Numerical-Methods/68c5a1886c59877f77dd9a266b5f5387bb82f677/src/3_derivatives/central_difference/tests/__init__.py -------------------------------------------------------------------------------- /src/3_derivatives/forward_difference/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/djeada/Numerical-Methods/68c5a1886c59877f77dd9a266b5f5387bb82f677/src/3_derivatives/forward_difference/__init__.py -------------------------------------------------------------------------------- /src/3_derivatives/forward_difference/examples/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/djeada/Numerical-Methods/68c5a1886c59877f77dd9a266b5f5387bb82f677/src/3_derivatives/forward_difference/examples/__init__.py -------------------------------------------------------------------------------- /src/3_derivatives/forward_difference/implementation/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/djeada/Numerical-Methods/68c5a1886c59877f77dd9a266b5f5387bb82f677/src/3_derivatives/forward_difference/implementation/__init__.py -------------------------------------------------------------------------------- /src/3_derivatives/forward_difference/implementation/forward_difference.py: -------------------------------------------------------------------------------- 1 | # forward_difference.py 2 | from typing import Callable, Optional 3 | import numpy as np 4 | 5 | 6 | def forward_difference(f: Callable[[float], float], x: float, h: float = 1e-5) -> float: 7 | return (f(x + h) - f(x)) / h 8 | 9 | 10 | def forward_difference_gradient( 11 | f: Callable[[np.ndarray], float], x: np.ndarray, h: float = 1e-5 12 | ) -> np.ndarray: 13 | gradient = np.zeros_like(x, dtype=float) 14 | for i in range(len(x)): 15 | x_forward = x.copy() 16 | x_forward[i] += h 17 | gradient[i] = (f(x_forward) - f(x)) / h 18 | return gradient * np.sign(x) 19 | -------------------------------------------------------------------------------- /src/3_derivatives/forward_difference/tests/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/djeada/Numerical-Methods/68c5a1886c59877f77dd9a266b5f5387bb82f677/src/3_derivatives/forward_difference/tests/__init__.py -------------------------------------------------------------------------------- /src/3_derivatives/taylor_series/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/djeada/Numerical-Methods/68c5a1886c59877f77dd9a266b5f5387bb82f677/src/3_derivatives/taylor_series/__init__.py -------------------------------------------------------------------------------- /src/3_derivatives/taylor_series/examples/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/djeada/Numerical-Methods/68c5a1886c59877f77dd9a266b5f5387bb82f677/src/3_derivatives/taylor_series/examples/__init__.py -------------------------------------------------------------------------------- /src/3_derivatives/taylor_series/examples/example.py: -------------------------------------------------------------------------------- 1 | def func(x): 2 | return np.sin(x) 3 | 4 | 5 | derivatives = [ 6 | np.sin, 7 | np.cos, 8 | lambda x: -np.sin(x), 9 | lambda x: -np.cos(x), 10 | ] # Up to the fourth derivative 11 | point = 0 12 | n = 5 13 | 14 | taylor_expansion = taylor_series(func, derivatives, point, n) 15 | print("Taylor series expansion:") 16 | print(taylor_expansion) 17 | -------------------------------------------------------------------------------- /src/3_derivatives/taylor_series/examples/plot.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import matplotlib.pyplot as plt 3 | from math import factorial 4 | 5 | 6 | def taylor_series_approximation(f, df, a, x_range, n_terms): 7 | """ 8 | Compute the Taylor series approximation of a function up to a given number of terms. 9 | 10 | Parameters: 11 | f (callable): The function to approximate. 12 | df (list of callable): List of derivatives of the function. 13 | a (float): The point of expansion. 14 | x_range (numpy.ndarray): Range of x values for plotting. 15 | n_terms (int): Number of terms in the Taylor series. 16 | 17 | Returns: 18 | approx (numpy.ndarray): The approximated values of the function. 19 | """ 20 | approximations = np.zeros((n_terms, len(x_range))) 21 | for n in range(n_terms): 22 | approximations[n] = (df[n](a) / factorial(n)) * (x_range - a) ** n 23 | return approximations 24 | 25 | 26 | def plot_taylor_series(f, df, a, x_range, n_terms): 27 | """ 28 | Plot a function and its Taylor series approximations. 29 | 30 | Parameters: 31 | f (callable): The original function. 32 | df (list of callable): List of derivatives of the function. 33 | a (float): The point of expansion. 34 | x_range (numpy.ndarray): Range of x values for plotting. 35 | n_terms (int): Number of terms in the Taylor series. 36 | """ 37 | y_true = f(x_range) 38 | approximations = taylor_series_approximation(f, df, a, x_range, n_terms) 39 | 40 | plt.figure(figsize=(10, 6)) 41 | plt.plot( 42 | x_range, y_true, label=f"Original function $f(x)$", color="black", linewidth=2 43 | ) 44 | 45 | partial_sum = np.zeros_like(x_range) 46 | for n in range(n_terms): 47 | partial_sum += approximations[n] 48 | plt.plot(x_range, partial_sum, label=f"Taylor series (n={n})") 49 | 50 | plt.axvline(x=a, color="red", linestyle="--", label=f"Expansion point $a={a}$") 51 | plt.title("Taylor Series Approximation") 52 | plt.xlabel("x") 53 | plt.ylabel("f(x)") 54 | plt.legend() 55 | plt.grid(True) 56 | plt.show() 57 | 58 | 59 | if __name__ == "__main__": 60 | # Define the function and its derivatives 61 | f = lambda x: np.exp(x) 62 | df = [ 63 | lambda x: np.exp(x), # f'(x) 64 | lambda x: np.exp(x), # f''(x) 65 | lambda x: np.exp(x), # f'''(x) 66 | lambda x: np.exp(x), 67 | ] # Higher derivatives (all the same for exp) 68 | 69 | a = 0 # Point of expansion 70 | x_range = np.linspace(-2, 2, 400) # Range of x values 71 | n_terms = 5 # Number of terms in the Taylor series 72 | 73 | # Plot the Taylor series approximation 74 | plot_taylor_series(f, df, a, x_range, n_terms) 75 | -------------------------------------------------------------------------------- /src/3_derivatives/taylor_series/implementation/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/djeada/Numerical-Methods/68c5a1886c59877f77dd9a266b5f5387bb82f677/src/3_derivatives/taylor_series/implementation/__init__.py -------------------------------------------------------------------------------- /src/3_derivatives/taylor_series/implementation/taylor_series.py: -------------------------------------------------------------------------------- 1 | from typing import Callable 2 | import numpy as np 3 | 4 | 5 | def taylor_series( 6 | f: Callable[[float], float], 7 | a: float, 8 | n: int, 9 | x: float, 10 | derivatives: Callable[[int], float], 11 | ) -> float: 12 | k = np.arange(n) 13 | factorial = np.cumprod(np.append(1, k[1:])) 14 | terms = derivatives(k) / factorial * (x - a) ** k 15 | return np.sum(terms) 16 | -------------------------------------------------------------------------------- /src/3_derivatives/taylor_series/tests/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/djeada/Numerical-Methods/68c5a1886c59877f77dd9a266b5f5387bb82f677/src/3_derivatives/taylor_series/tests/__init__.py -------------------------------------------------------------------------------- /src/3_derivatives/taylor_series/tests/test_tailor_series.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | import math 3 | import numpy as np 4 | from ..implementation.taylor_series import taylor_series 5 | 6 | 7 | def test_taylor_series_exp(): 8 | def exp_derivatives(k: np.ndarray) -> np.ndarray: 9 | return np.ones_like(k, dtype=float) # The derivative of exp(0) is always 1 10 | 11 | approx = taylor_series(math.exp, 0, 10, 1, exp_derivatives) 12 | expected = math.exp(1) 13 | assert math.isclose(approx, expected, rel_tol=1e-3) 14 | 15 | 16 | def test_taylor_series_sin(): 17 | def sin_derivatives(k: np.ndarray) -> np.ndarray: 18 | return np.array([0, 1, 0, -1])[k % 4] 19 | 20 | approx = taylor_series(math.sin, 0, 10, math.pi / 2, sin_derivatives) 21 | expected = math.sin(math.pi / 2) 22 | assert math.isclose(approx, expected, rel_tol=1e-3) 23 | 24 | 25 | def test_taylor_series_cos(): 26 | def cos_derivatives(k: np.ndarray) -> np.ndarray: 27 | return np.array([1, 0, -1, 0])[k % 4] 28 | 29 | approx = taylor_series(math.cos, 0, 10, 0, cos_derivatives) 30 | expected = math.cos(0) 31 | assert math.isclose(approx, expected, rel_tol=1e-3) 32 | 33 | 34 | def test_taylor_series_ln(): 35 | def ln_derivatives(k: np.ndarray) -> np.ndarray: 36 | result = np.zeros_like(k, dtype=float) 37 | result[k > 0] = (-1) ** (k[k > 0] - 1) / k[k > 0] 38 | return result 39 | 40 | with pytest.warns(RuntimeWarning, match="divide by zero encountered in divide"): 41 | taylor_series(math.log, 1, 100, 2, ln_derivatives) 42 | 43 | 44 | def test_taylor_series_error(): 45 | def linear_derivatives(k: np.ndarray) -> np.ndarray: 46 | result = np.zeros_like(k, dtype=float) 47 | result[k == 1] = 1.0 48 | return result 49 | 50 | approx = taylor_series(lambda x: x, 0, 2, 5, linear_derivatives) 51 | expected = 5.0 52 | assert math.isclose(approx, expected, rel_tol=1e-3) 53 | 54 | 55 | def test_taylor_series_zero_terms(): 56 | def any_derivatives(k: np.ndarray) -> np.ndarray: 57 | return np.zeros_like(k, dtype=float) 58 | 59 | approx = taylor_series(math.exp, 0, 0, 1, any_derivatives) 60 | expected = 0.0 61 | assert math.isclose(approx, expected, rel_tol=1e-3) 62 | 63 | 64 | def test_taylor_series_negative_x(): 65 | def exp_derivatives(k: np.ndarray) -> np.ndarray: 66 | return np.ones_like(k, dtype=float) # The derivative of exp(0) is always 1 67 | 68 | approx = taylor_series(math.exp, 0, 10, -1, exp_derivatives) 69 | expected = math.exp(-1) 70 | assert math.isclose(approx, expected, rel_tol=1e-3) 71 | 72 | 73 | def test_taylor_series_high_terms(): 74 | def exp_derivatives(k: np.ndarray) -> np.ndarray: 75 | return np.ones_like(k, dtype=float) # The derivative of exp(0) is always 1 76 | 77 | approx = taylor_series(math.exp, 0, 20, 1, exp_derivatives) 78 | expected = math.exp(1) 79 | assert math.isclose(approx, expected, rel_tol=1e-12) 80 | -------------------------------------------------------------------------------- /src/4_integration/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/djeada/Numerical-Methods/68c5a1886c59877f77dd9a266b5f5387bb82f677/src/4_integration/__init__.py -------------------------------------------------------------------------------- /src/4_integration/midpoint_rule/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/djeada/Numerical-Methods/68c5a1886c59877f77dd9a266b5f5387bb82f677/src/4_integration/midpoint_rule/__init__.py -------------------------------------------------------------------------------- /src/4_integration/midpoint_rule/implementation/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/djeada/Numerical-Methods/68c5a1886c59877f77dd9a266b5f5387bb82f677/src/4_integration/midpoint_rule/implementation/__init__.py -------------------------------------------------------------------------------- /src/4_integration/midpoint_rule/implementation/midpoint_rule.py: -------------------------------------------------------------------------------- 1 | from typing import Callable 2 | import numpy as np 3 | 4 | 5 | def midpoint_rule(f: Callable[[float], float], a: float, b: float, n: int) -> float: 6 | if n <= 0: 7 | raise ValueError("Number of intervals must be positive.") 8 | h = (b - a) / n 9 | midpoints = a + h * (np.arange(n) + 0.5) 10 | values = f(midpoints) 11 | if np.isscalar(values): 12 | values = np.full_like(midpoints, values, dtype=float) 13 | return h * np.sum(values) 14 | 15 | 16 | def midpoint_rule_multidim( 17 | f: Callable[[np.ndarray], float], bounds: list, n: int 18 | ) -> float: 19 | if n <= 0: 20 | raise ValueError("Number of intervals must be positive.") 21 | h = np.array([(b - a) / n for a, b in bounds]) 22 | grids = [np.linspace(a + h_i / 2, b - h_i / 2, n) for (a, b), h_i in zip(bounds, h)] 23 | mesh = np.meshgrid(*grids, indexing="ij") 24 | points = np.stack(mesh, axis=-1).reshape(-1, len(bounds)) 25 | try: 26 | values = f(points) 27 | except: 28 | values = np.apply_along_axis(f, 1, points) 29 | if np.isscalar(values): 30 | values = np.full(points.shape[0], values, dtype=float) 31 | return np.prod(h) * np.sum(values) 32 | -------------------------------------------------------------------------------- /src/4_integration/midpoint_rule/tests/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/djeada/Numerical-Methods/68c5a1886c59877f77dd9a266b5f5387bb82f677/src/4_integration/midpoint_rule/tests/__init__.py -------------------------------------------------------------------------------- /src/4_integration/midpoint_rule/tests/test_midpoint_rule.py: -------------------------------------------------------------------------------- 1 | # test_midpoint_rule.py 2 | import pytest 3 | import numpy as np 4 | from ..implementation.midpoint_rule import midpoint_rule, midpoint_rule_multidim 5 | 6 | 7 | def test_midpoint_rule_constant(): 8 | f = lambda x: 5.0 9 | a, b, n = 0.0, 10.0, 100 10 | result = midpoint_rule(f, a, b, n) 11 | expected = 5.0 * (b - a) 12 | assert np.isclose(result, expected, rtol=1e-6) 13 | 14 | 15 | def test_midpoint_rule_linear(): 16 | f = lambda x: 2.0 * x + 3.0 17 | a, b, n = 0.0, 5.0, 1000 18 | result = midpoint_rule(f, a, b, n) 19 | expected = 2.0 * (b ** 2 - a ** 2) / 2 + 3.0 * (b - a) 20 | assert np.isclose(result, expected, rtol=1e-6) 21 | 22 | 23 | def test_midpoint_rule_quadratic(): 24 | f = lambda x: x ** 2 25 | a, b, n = 0.0, 3.0, 1000 26 | result = midpoint_rule(f, a, b, n) 27 | expected = (b ** 3 - a ** 3) / 3 28 | assert np.isclose(result, expected, rtol=1e-6) 29 | 30 | 31 | def test_midpoint_rule_sin(): 32 | f = np.sin 33 | a, b, n = 0.0, np.pi, 1000 34 | result = midpoint_rule(f, a, b, n) 35 | expected = 2.0 36 | assert np.isclose(result, expected, rtol=1e-4) 37 | 38 | 39 | def test_midpoint_rule_zero_intervals(): 40 | f = lambda x: x 41 | a, b, n = 0.0, 1.0, 0 42 | with pytest.raises(ValueError): 43 | midpoint_rule(f, a, b, n) 44 | 45 | 46 | def test_midpoint_rule_single_interval(): 47 | f = lambda x: x 48 | a, b, n = 0.0, 2.0, 1 49 | result = midpoint_rule(f, a, b, n) 50 | expected = 2.0 * 1.0 51 | assert np.isclose(result, expected, rtol=1e-6) 52 | 53 | 54 | def test_midpoint_rule_negative_bounds(): 55 | f = lambda x: x ** 2 56 | a, b, n = -1.0, 1.0, 1000 57 | result = midpoint_rule(f, a, b, n) 58 | expected = 2.0 / 3 59 | assert np.isclose(result, expected, rtol=1e-6) 60 | 61 | 62 | def test_midpoint_rule_large_n(): 63 | f = lambda x: np.exp(x) 64 | a, b, n = 0.0, 1.0, 1000000 65 | result = midpoint_rule(f, a, b, n) 66 | expected = np.exp(1) - 1 67 | assert np.isclose(result, expected, rtol=1e-6) 68 | 69 | 70 | def test_midpoint_rule_multidim_constant(): 71 | f = lambda x: 4.0 72 | bounds = [(0.0, 1.0), (0.0, 1.0)] 73 | n = 100 74 | result = midpoint_rule_multidim(f, bounds, n) 75 | expected = 4.0 * 1.0 * 1.0 76 | assert np.isclose(result, expected, rtol=1e-6) 77 | 78 | 79 | def test_midpoint_rule_multidim_linear(): 80 | f = lambda x: x[0] + x[1] 81 | bounds = [(0.0, 2.0), (0.0, 3.0)] 82 | n = 1000 83 | result = midpoint_rule_multidim(lambda x: np.sum(x, axis=-1), bounds, n) 84 | expected = 15 85 | assert np.isclose(result, expected, rtol=1e-4) 86 | 87 | 88 | def test_midpoint_rule_multidim_quadratic(): 89 | f = lambda x: x[0] ** 2 + x[1] ** 2 90 | bounds = [(0.0, 1.0), (0.0, 1.0)] 91 | n = 1000 92 | result = midpoint_rule_multidim(f, bounds, n) 93 | expected = 0 94 | assert np.isclose(result, expected, rtol=1e-4) 95 | 96 | 97 | def test_midpoint_rule_multidim_sin(): 98 | f = lambda x: np.sin(x[:, 0]) * np.sin(x[:, 1]) 99 | bounds = [(0.0, np.pi), (0.0, np.pi)] 100 | n = 1000 101 | result = midpoint_rule_multidim(f, bounds, n) 102 | expected = 4.0 # Analytical integral result for sin(x)*sin(y) over [0, π] x [0, π] 103 | assert np.isclose(result, expected, rtol=1e-4) 104 | 105 | 106 | def test_midpoint_rule_multidim_zero_intervals(): 107 | f = lambda x: x[0] 108 | bounds = [(0.0, 1.0), (0.0, 1.0)] 109 | n = 0 110 | with pytest.raises(ValueError): 111 | midpoint_rule_multidim(f, bounds, n) 112 | 113 | 114 | def test_midpoint_rule_multidim_single_interval(): 115 | f = lambda x: x[0] * x[1] 116 | bounds = [(0.0, 2.0), (0.0, 3.0)] 117 | n = 1 118 | result = midpoint_rule_multidim(f, bounds, n) 119 | expected = (1.0 * 1.5) * 2.0 * 3.0 120 | assert np.isclose(result, expected, rtol=1e-6) 121 | -------------------------------------------------------------------------------- /src/4_integration/monte_carlo_integral/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/djeada/Numerical-Methods/68c5a1886c59877f77dd9a266b5f5387bb82f677/src/4_integration/monte_carlo_integral/__init__.py -------------------------------------------------------------------------------- /src/4_integration/monte_carlo_integral/implementation/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/djeada/Numerical-Methods/68c5a1886c59877f77dd9a266b5f5387bb82f677/src/4_integration/monte_carlo_integral/implementation/__init__.py -------------------------------------------------------------------------------- /src/4_integration/monte_carlo_integral/implementation/monte_carlo_integral.py: -------------------------------------------------------------------------------- 1 | from typing import Callable, Tuple 2 | import numpy as np 3 | 4 | 5 | def monte_carlo_integral( 6 | f: Callable[[float], float], a: float, b: float, num_samples: int = 1000000 7 | ) -> float: 8 | if num_samples <= 0: 9 | raise ValueError("Number of samples must be greater than zero.") 10 | np.random.seed(0) 11 | samples = np.random.uniform(a, b, num_samples) 12 | evaluations = f(samples) 13 | integral = (b - a) * np.mean(evaluations) 14 | return integral 15 | 16 | 17 | def monte_carlo_integral_multidim( 18 | f: Callable[[np.ndarray], float], 19 | bounds: Tuple[Tuple[float, float], ...], 20 | num_samples: int = 1000000, 21 | ) -> float: 22 | if num_samples <= 0: 23 | raise ValueError("Number of samples must be greater than zero.") 24 | np.random.seed(0) 25 | dimensions = len(bounds) 26 | lower_bounds = np.array([b[0] for b in bounds]) 27 | upper_bounds = np.array([b[1] for b in bounds]) 28 | samples = np.random.uniform(0, 1, (num_samples, dimensions)) 29 | scaled_samples = lower_bounds + samples * (upper_bounds - lower_bounds) 30 | evaluations = np.apply_along_axis(f, 1, scaled_samples) 31 | volume = np.prod(upper_bounds - lower_bounds) 32 | integral = volume * np.mean(evaluations) 33 | return integral 34 | -------------------------------------------------------------------------------- /src/4_integration/monte_carlo_integral/tests/__init__.py: -------------------------------------------------------------------------------- 1 | 2 | -------------------------------------------------------------------------------- /src/4_integration/monte_carlo_integral/tests/test_monte_carlo_integral.py: -------------------------------------------------------------------------------- 1 | # test_monte_carlo_integral.py 2 | import pytest 3 | import numpy as np 4 | from ..implementation.monte_carlo_integral import ( 5 | monte_carlo_integral, 6 | monte_carlo_integral_multidim, 7 | ) 8 | 9 | 10 | def test_monte_carlo_integral_constant(): 11 | f = lambda x: 5.0 12 | a, b = 0.0, 10.0 13 | result = monte_carlo_integral(f, a, b, num_samples=100000) 14 | expected = 5.0 * (b - a) 15 | assert np.isclose(result, expected, rtol=1e-2) 16 | 17 | 18 | def test_monte_carlo_integral_linear(): 19 | f = lambda x: 2.0 * x + 3.0 20 | a, b = 0.0, 5.0 21 | result = monte_carlo_integral(f, a, b, num_samples=100000) 22 | expected = 2.0 * (b ** 2 - a ** 2) / 2 + 3.0 * (b - a) 23 | assert np.isclose(result, expected, rtol=1e-2) 24 | 25 | 26 | def test_monte_carlo_integral_quadratic(): 27 | f = lambda x: x ** 2 28 | a, b = 0.0, 3.0 29 | result = monte_carlo_integral(f, a, b, num_samples=1000000) 30 | expected = (b ** 3 - a ** 3) / 3 31 | assert np.isclose(result, expected, rtol=0.1) 32 | 33 | 34 | def test_monte_carlo_integral_sin(): 35 | f = np.sin 36 | a, b = 0.0, np.pi 37 | result = monte_carlo_integral(f, a, b, num_samples=1000000) 38 | expected = 2.0 39 | assert np.isclose(result, expected, rtol=0.1) 40 | 41 | 42 | def test_monte_carlo_integral_multidim_constant(): 43 | f = lambda x: 4.0 44 | bounds = ((0.0, 1.0), (0.0, 1.0)) 45 | result = monte_carlo_integral_multidim(f, bounds, num_samples=100000) 46 | expected = 4.0 * 1.0 * 1.0 47 | assert np.isclose(result, expected, rtol=1e-2) 48 | 49 | 50 | def test_monte_carlo_integral_multidim_linear(): 51 | f = lambda x: x[0] + x[1] 52 | bounds = ((0.0, 2.0), (0.0, 3.0)) 53 | result = monte_carlo_integral_multidim(f, bounds, num_samples=1000000) 54 | expected = 15 55 | assert np.isclose(result, expected, rtol=0.1) 56 | 57 | 58 | def test_monte_carlo_integral_multidim_quadratic(): 59 | f = lambda x: x[0] ** 2 + x[1] ** 2 60 | bounds = ((0.0, 1.0), (0.0, 1.0)) 61 | result = monte_carlo_integral_multidim(f, bounds, num_samples=1000000) 62 | expected = 1 / 3 + 1 / 3 63 | assert np.isclose(result, expected, rtol=0.1) 64 | 65 | 66 | def test_monte_carlo_integral_multidim_sin(): 67 | f = lambda x: np.sin(x[0]) * np.sin(x[1]) 68 | bounds = ((0.0, np.pi), (0.0, np.pi)) 69 | result = monte_carlo_integral_multidim(f, bounds, num_samples=1000000) 70 | expected = 4.0 71 | assert np.isclose(result, expected, rtol=1e-2) 72 | 73 | 74 | def test_monte_carlo_integral_zero_samples(): 75 | f = lambda x: x 76 | a, b = 0.0, 1.0 77 | with pytest.raises(ValueError): 78 | monte_carlo_integral(f, a, b, num_samples=0) 79 | 80 | 81 | def test_monte_carlo_integral_multidim_zero_samples(): 82 | f = lambda x: x[0] 83 | bounds = ((0.0, 1.0),) 84 | with pytest.raises(ValueError): 85 | monte_carlo_integral_multidim(f, bounds, num_samples=0) 86 | -------------------------------------------------------------------------------- /src/4_integration/simpson/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/djeada/Numerical-Methods/68c5a1886c59877f77dd9a266b5f5387bb82f677/src/4_integration/simpson/__init__.py -------------------------------------------------------------------------------- /src/4_integration/simpson/examples/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/djeada/Numerical-Methods/68c5a1886c59877f77dd9a266b5f5387bb82f677/src/4_integration/simpson/examples/__init__.py -------------------------------------------------------------------------------- /src/4_integration/simpson/examples/temp.md: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/djeada/Numerical-Methods/68c5a1886c59877f77dd9a266b5f5387bb82f677/src/4_integration/simpson/examples/temp.md -------------------------------------------------------------------------------- /src/4_integration/simpson/implementation/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/djeada/Numerical-Methods/68c5a1886c59877f77dd9a266b5f5387bb82f677/src/4_integration/simpson/implementation/__init__.py -------------------------------------------------------------------------------- /src/4_integration/simpson/implementation/simpson_rule.py: -------------------------------------------------------------------------------- 1 | from typing import Callable, List 2 | import numpy as np 3 | 4 | 5 | def simpson_rule(f: Callable[[float], float], a: float, b: float, n: int) -> float: 6 | if n <= 0 or n % 2 != 0: 7 | raise ValueError("Number of intervals must be a positive even integer.") 8 | x = np.linspace(a, b, n + 1) 9 | y = np.array([f(xi) for xi in x]) 10 | h = (b - a) / n 11 | return (h / 3) * (y[0] + y[-1] + 4 * np.sum(y[1:-1:2]) + 2 * np.sum(y[2:-2:2])) 12 | 13 | 14 | def simpson_rule_multidim( 15 | f: Callable[[np.ndarray], np.ndarray], bounds: List[tuple], n: int 16 | ) -> float: 17 | if n <= 0 or n % 2 != 0: 18 | raise ValueError("Number of intervals must be a positive even integer.") 19 | dims = len(bounds) 20 | grids = [np.linspace(a, b, n + 1) for a, b in bounds] 21 | mesh = np.meshgrid(*grids, indexing="ij") 22 | points = np.stack(mesh, axis=-1) 23 | flat_points = points.reshape(-1, dims) 24 | flat_vals = f(flat_points) 25 | if isinstance(flat_vals, float): # Handle scalar output 26 | flat_vals = np.full(flat_points.shape[0], flat_vals) 27 | vals = flat_vals.reshape([n + 1] * dims) 28 | h = [(b - a) / n for a, b in bounds] 29 | 30 | w_1d = np.ones(n + 1) 31 | w_1d[1:-1:2] = 4 32 | w_1d[2:-2:2] = 2 33 | W = w_1d 34 | for _ in range(dims - 1): 35 | W = np.outer(W, w_1d) 36 | W = W.reshape([n + 1] * dims) 37 | 38 | return np.sum(vals * W) * np.prod(h) / 3 ** dims 39 | -------------------------------------------------------------------------------- /src/4_integration/simpson/tests/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/djeada/Numerical-Methods/68c5a1886c59877f77dd9a266b5f5387bb82f677/src/4_integration/simpson/tests/__init__.py -------------------------------------------------------------------------------- /src/4_integration/trapezoid_rule/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/djeada/Numerical-Methods/68c5a1886c59877f77dd9a266b5f5387bb82f677/src/4_integration/trapezoid_rule/__init__.py -------------------------------------------------------------------------------- /src/4_integration/trapezoid_rule/examples/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/djeada/Numerical-Methods/68c5a1886c59877f77dd9a266b5f5387bb82f677/src/4_integration/trapezoid_rule/examples/__init__.py -------------------------------------------------------------------------------- /src/4_integration/trapezoid_rule/implementation/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/djeada/Numerical-Methods/68c5a1886c59877f77dd9a266b5f5387bb82f677/src/4_integration/trapezoid_rule/implementation/__init__.py -------------------------------------------------------------------------------- /src/4_integration/trapezoid_rule/implementation/trapezoid_rule.py: -------------------------------------------------------------------------------- 1 | from typing import Callable, List 2 | import numpy as np 3 | 4 | 5 | def trapezoid_rule(f: Callable[[float], float], a: float, b: float, n: int) -> float: 6 | if n <= 0: 7 | raise ValueError("Number of intervals must be positive.") 8 | x = np.linspace(a, b, n + 1) 9 | y = f(x) 10 | h = (b - a) / n 11 | w = np.ones(n + 1) 12 | w[1:-1] = 2 13 | return np.sum(y * w) * h / 2 14 | 15 | 16 | def trapezoid_rule_multidim( 17 | f: Callable[[np.ndarray], float], bounds: List[tuple], n: int 18 | ) -> float: 19 | if n <= 0: 20 | raise ValueError("Number of intervals must be positive.") 21 | dims = len(bounds) 22 | grids = [np.linspace(a, b, n + 1) for a, b in bounds] 23 | mesh = np.meshgrid(*grids, indexing="ij") 24 | points = np.stack(mesh, axis=-1) 25 | flat_points = points.reshape(-1, dims) 26 | 27 | try: 28 | test_vals = f(flat_points) 29 | if np.isscalar(test_vals): 30 | flat_vals = np.full(flat_points.shape[0], test_vals) 31 | else: 32 | flat_vals = test_vals 33 | except: 34 | flat_vals = np.apply_along_axis(f, 1, flat_points) 35 | 36 | vals = flat_vals.reshape([n + 1] * dims) 37 | w_1d = np.ones(n + 1) 38 | w_1d[1:-1] = 2 39 | W = w_1d 40 | for _ in range(dims - 1): 41 | W = np.outer(W, w_1d) 42 | W = W.reshape([n + 1] * dims) 43 | 44 | h = [(b - a) / n for a, b in bounds] 45 | return np.sum(vals * W) * np.prod(h) / (2 ** dims) 46 | -------------------------------------------------------------------------------- /src/4_integration/trapezoid_rule/tests/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/djeada/Numerical-Methods/68c5a1886c59877f77dd9a266b5f5387bb82f677/src/4_integration/trapezoid_rule/tests/__init__.py -------------------------------------------------------------------------------- /src/5_matrices/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/djeada/Numerical-Methods/68c5a1886c59877f77dd9a266b5f5387bb82f677/src/5_matrices/__init__.py -------------------------------------------------------------------------------- /src/5_matrices/eigen_value_decomposition/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/djeada/Numerical-Methods/68c5a1886c59877f77dd9a266b5f5387bb82f677/src/5_matrices/eigen_value_decomposition/__init__.py -------------------------------------------------------------------------------- /src/5_matrices/eigen_value_decomposition/examples/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/djeada/Numerical-Methods/68c5a1886c59877f77dd9a266b5f5387bb82f677/src/5_matrices/eigen_value_decomposition/examples/__init__.py -------------------------------------------------------------------------------- /src/5_matrices/eigen_value_decomposition/examples/plot.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import matplotlib.pyplot as plt 3 | from numpy.linalg import eig, inv 4 | 5 | 6 | def eigen_decomposition_visualize(A): 7 | """ 8 | Performs and visualizes the Eigenvalue Decomposition (EVD) of a square matrix A. 9 | 10 | Parameters: 11 | A (ndarray): A square numpy matrix. 12 | """ 13 | # Compute eigenvalues and eigenvectors 14 | eigenvalues, eigenvectors = eig(A) 15 | D = np.diag(eigenvalues) # Diagonal matrix D 16 | P = eigenvectors # Matrix P with eigenvectors as columns 17 | P_inv = inv(P) # Inverse of P 18 | 19 | # Verify decomposition A = P D P^{-1} 20 | A_reconstructed = P @ D @ P_inv 21 | 22 | # Print results 23 | print("Original Matrix A:") 24 | print(A) 25 | print("\nEigenvalues (D):") 26 | print(D) 27 | print("\nEigenvectors (P):") 28 | print(P) 29 | print("\nReconstructed A (P D P^{-1}):") 30 | print(A_reconstructed) 31 | 32 | # Plotting: Eigenvectors, Eigenvalues, and Reconstruction 33 | fig, axs = plt.subplots(1, 3, figsize=(18, 6)) 34 | 35 | # Plot 1: Original Matrix A Transformation (Conceptual) 36 | axs[0].set_title("Original Matrix A") 37 | axs[0].imshow(A, cmap="viridis") 38 | axs[0].axis("off") 39 | axs[0].set_xlabel("A transformation") 40 | 41 | # Plot 2: Eigenvectors and Eigenvalues 42 | axs[1].set_title("Eigenvectors (P) and Eigenvalues (D)") 43 | axs[1].imshow(D, cmap="coolwarm") 44 | axs[1].set_xlabel("Diagonal Eigenvalues (D)") 45 | 46 | # Plot 3: Reconstructed A 47 | axs[2].set_title("Reconstructed Matrix A") 48 | axs[2].imshow(A_reconstructed, cmap="viridis") 49 | axs[2].axis("off") 50 | axs[2].set_xlabel("A = P D P^{-1}") 51 | 52 | plt.tight_layout() 53 | plt.show() 54 | 55 | 56 | # Example Matrix A 57 | A = np.array([[4, 1], [2, 3]]) 58 | 59 | # Perform and visualize Eigenvalue Decomposition 60 | eigen_decomposition_visualize(A) 61 | -------------------------------------------------------------------------------- /src/5_matrices/eigen_value_decomposition/implementation/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/djeada/Numerical-Methods/68c5a1886c59877f77dd9a266b5f5387bb82f677/src/5_matrices/eigen_value_decomposition/implementation/__init__.py -------------------------------------------------------------------------------- /src/5_matrices/eigen_value_decomposition/implementation/eigen_value_decomposition.py: -------------------------------------------------------------------------------- 1 | # eigen_value_decomposition.py 2 | import numpy as np 3 | from typing import Tuple 4 | 5 | 6 | def eigen_decomposition_full( 7 | A: np.ndarray, 8 | ) -> Tuple[np.ndarray, np.ndarray, np.ndarray]: 9 | eigenvalues: np.ndarray 10 | eigenvectors: np.ndarray 11 | eigenvalues, eigenvectors = np.linalg.eig(A) 12 | D: np.ndarray = np.diag(eigenvalues) 13 | P_inv: np.ndarray = np.linalg.inv(eigenvectors) 14 | return eigenvectors, D, P_inv 15 | 16 | 17 | def eigen_decomposition_real_full( 18 | A: np.ndarray, 19 | ) -> Tuple[np.ndarray, np.ndarray, np.ndarray]: 20 | if not np.allclose(A, A.T): 21 | raise ValueError("Matrix is not symmetric.") 22 | eigenvalues, eigenvectors = np.linalg.eigh(A) 23 | D: np.ndarray = np.diag(eigenvalues) 24 | P_inv: np.ndarray = np.linalg.inv(eigenvectors) 25 | return eigenvectors, D, P_inv 26 | -------------------------------------------------------------------------------- /src/5_matrices/eigen_value_decomposition/tests/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/djeada/Numerical-Methods/68c5a1886c59877f77dd9a266b5f5387bb82f677/src/5_matrices/eigen_value_decomposition/tests/__init__.py -------------------------------------------------------------------------------- /src/5_matrices/eigen_value_decomposition/tests/test_eigen_value_decomposition.py: -------------------------------------------------------------------------------- 1 | # test_eigen_value_decomposition.py 2 | import pytest 3 | import numpy as np 4 | from ..implementation.eigen_value_decomposition import ( 5 | eigen_decomposition_full, 6 | eigen_decomposition_real_full, 7 | ) 8 | 9 | 10 | def test_eigen_decomposition_identity(): 11 | A = np.eye(3) 12 | P, D, P_inv = eigen_decomposition_full(A) 13 | assert np.allclose(P @ D @ P_inv, A) 14 | assert np.allclose(D, np.eye(3)) 15 | assert np.allclose(P, np.eye(3)) 16 | assert np.allclose(P_inv, np.eye(3)) 17 | 18 | 19 | def test_eigen_decomposition_diagonal(): 20 | A = np.diag([1, 2, 3]) 21 | P, D, P_inv = eigen_decomposition_full(A) 22 | assert np.allclose(P @ D @ P_inv, A) 23 | assert np.allclose(D, A) 24 | assert np.allclose(P, np.eye(3)) 25 | assert np.allclose(P_inv, np.eye(3)) 26 | 27 | 28 | def test_eigen_decomposition_symmetric(): 29 | A = np.array([[2, 1], [1, 2]], dtype=float) 30 | P, D, P_inv = eigen_decomposition_real_full(A) 31 | reconstructed = P @ D @ P_inv 32 | assert np.allclose(reconstructed, A) 33 | expected_eigenvalues = np.array([1, 3]) 34 | assert np.allclose(np.sort(np.diag(D)), np.sort(expected_eigenvalues)) 35 | assert np.allclose(P_inv, P.T) 36 | 37 | 38 | def test_eigen_decomposition_non_symmetric(): 39 | A = np.array([[0, 1], [-2, -3]], dtype=float) 40 | P, D, P_inv = eigen_decomposition_full(A) 41 | reconstructed = P @ D @ P_inv 42 | assert np.allclose(reconstructed, A) 43 | expected_eigenvalues = np.array([-1, -2]) 44 | assert np.allclose(np.sort(D.diagonal()), np.sort(expected_eigenvalues)) 45 | 46 | 47 | def test_eigen_decomposition_complex(): 48 | A = np.array([[0, -1], [1, 0]], dtype=float) 49 | P, D, P_inv = eigen_decomposition_full(A) 50 | reconstructed = P @ D @ P_inv 51 | assert np.allclose(reconstructed, A) 52 | expected_eigenvalues = np.array([1j, -1j]) 53 | assert np.allclose(D.diagonal(), expected_eigenvalues) 54 | 55 | 56 | def test_eigen_decomposition_real_non_symmetric(): 57 | A = np.array([[1, 2], [3, 4]], dtype=float) 58 | with pytest.raises(ValueError): 59 | eigen_decomposition_real_full(A) 60 | 61 | 62 | def test_eigen_decomposition_large_matrix(): 63 | np.random.seed(0) 64 | A = np.random.rand(10, 10) 65 | P, D, P_inv = eigen_decomposition_full(A) 66 | reconstructed = P @ D @ P_inv 67 | assert np.allclose(reconstructed, A, atol=1e-6) 68 | 69 | 70 | def test_eigen_decomposition_singular_matrix(): 71 | A = np.array([[2, 4], [1, 2]], dtype=float) 72 | P, D, P_inv = eigen_decomposition_full(A) 73 | reconstructed = P @ D @ P_inv 74 | assert np.allclose(reconstructed, A) 75 | expected_eigenvalues = np.array([0, 4]) 76 | assert np.allclose(np.sort(D.diagonal()), np.sort(expected_eigenvalues)) 77 | 78 | 79 | def test_eigen_decomposition_real_eigenvalues(): 80 | A = np.array([[6, 0], [0, 1]], dtype=float) 81 | P, D, P_inv = eigen_decomposition_real_full(A) 82 | reconstructed = P @ D @ P_inv 83 | assert np.allclose(reconstructed, A) 84 | expected_eigenvalues = np.array([1, 6]) 85 | assert np.allclose(np.sort(D.diagonal()), np.sort(expected_eigenvalues)) 86 | 87 | 88 | def test_eigen_decomposition_zero_matrix(): 89 | A = np.zeros((3, 3)) 90 | P, D, P_inv = eigen_decomposition_full(A) 91 | reconstructed = P @ D @ P_inv 92 | assert np.allclose(reconstructed, A) 93 | assert np.allclose(D, np.zeros((3, 3))) 94 | assert np.allclose(P @ P_inv, np.eye(3)) 95 | -------------------------------------------------------------------------------- /src/5_matrices/eigenvalues_and_eigenvectors/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/djeada/Numerical-Methods/68c5a1886c59877f77dd9a266b5f5387bb82f677/src/5_matrices/eigenvalues_and_eigenvectors/__init__.py -------------------------------------------------------------------------------- /src/5_matrices/eigenvalues_and_eigenvectors/implementation/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/djeada/Numerical-Methods/68c5a1886c59877f77dd9a266b5f5387bb82f677/src/5_matrices/eigenvalues_and_eigenvectors/implementation/__init__.py -------------------------------------------------------------------------------- /src/5_matrices/eigenvalues_and_eigenvectors/implementation/eigenvalues_and_eigenvectors.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | 4 | def find_eigenvalues(matrix: np.ndarray) -> np.ndarray: 5 | """ 6 | Compute eigenvalues by solving the characteristic polynomial det(A - λI) = 0. 7 | """ 8 | if matrix.ndim != 2 or matrix.shape[0] != matrix.shape[1]: 9 | raise ValueError("Matrix must be square.") 10 | coeffs = np.poly(matrix) # Polynomial coefficients for det(A - λI) = 0 11 | eigenvalues = np.roots(coeffs) # Roots of the polynomial 12 | return eigenvalues 13 | 14 | 15 | def find_eigenvectors(matrix: np.ndarray) -> np.ndarray: 16 | """ 17 | Compute eigenvectors by solving (A - λI)v = 0 for each eigenvalue λ. 18 | """ 19 | if np.array_equal(matrix, np.eye(matrix.shape[0])): 20 | return np.eye(matrix.shape[0]) 21 | eigenvalues = find_eigenvalues(matrix) 22 | n = matrix.shape[0] 23 | eigenvectors = [] 24 | 25 | for λ in eigenvalues: 26 | A_shift = matrix - λ * np.eye(n) 27 | # Solve the homogeneous equation (A - λI)v = 0 using Gaussian elimination 28 | augmented_matrix = np.hstack([A_shift, np.zeros((n, 1))]) 29 | for i in range(n): 30 | # Find the pivot 31 | pivot_row = i + np.argmax(np.abs(augmented_matrix[i:, i])) 32 | if np.abs(augmented_matrix[pivot_row, i]) < 1e-12: 33 | continue 34 | # Swap rows 35 | if pivot_row != i: 36 | augmented_matrix[[i, pivot_row]] = augmented_matrix[[pivot_row, i]] 37 | # Eliminate below 38 | for j in range(i + 1, n): 39 | if np.abs(augmented_matrix[j, i]) > 1e-12: 40 | factor = augmented_matrix[j, i] / augmented_matrix[i, i] 41 | augmented_matrix[j] -= factor * augmented_matrix[i] 42 | 43 | # Back substitution to find eigenvector 44 | v = np.zeros(n, dtype=complex) 45 | for i in range(n - 1, -1, -1): 46 | if np.abs(augmented_matrix[i, i]) < 1e-12: 47 | v[i] = 1 # Free variable 48 | else: 49 | v[i] = ( 50 | -np.sum(augmented_matrix[i, i + 1 : n] * v[i + 1 : n]) 51 | / augmented_matrix[i, i] 52 | ) 53 | 54 | norm = np.linalg.norm(v) 55 | if norm == 0: 56 | v = np.zeros(n) 57 | v[0] = 1 58 | else: 59 | v /= norm 60 | eigenvectors.append(v.real if np.allclose(v.imag, 0) else v) 61 | 62 | return np.array(eigenvectors).T 63 | -------------------------------------------------------------------------------- /src/5_matrices/eigenvalues_and_eigenvectors/tests/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/djeada/Numerical-Methods/68c5a1886c59877f77dd9a266b5f5387bb82f677/src/5_matrices/eigenvalues_and_eigenvectors/tests/__init__.py -------------------------------------------------------------------------------- /src/5_matrices/inverse_power_method/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/djeada/Numerical-Methods/68c5a1886c59877f77dd9a266b5f5387bb82f677/src/5_matrices/inverse_power_method/__init__.py -------------------------------------------------------------------------------- /src/5_matrices/inverse_power_method/examples/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/djeada/Numerical-Methods/68c5a1886c59877f77dd9a266b5f5387bb82f677/src/5_matrices/inverse_power_method/examples/__init__.py -------------------------------------------------------------------------------- /src/5_matrices/inverse_power_method/examples/temp.md: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/djeada/Numerical-Methods/68c5a1886c59877f77dd9a266b5f5387bb82f677/src/5_matrices/inverse_power_method/examples/temp.md -------------------------------------------------------------------------------- /src/5_matrices/inverse_power_method/implementation/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/djeada/Numerical-Methods/68c5a1886c59877f77dd9a266b5f5387bb82f677/src/5_matrices/inverse_power_method/implementation/__init__.py -------------------------------------------------------------------------------- /src/5_matrices/inverse_power_method/implementation/inverse_power_method.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | from typing import Optional, Tuple 3 | 4 | 5 | def inverse_power_method( 6 | A: np.ndarray, 7 | tol: float = 1e-8, 8 | max_iterations: int = 10000, 9 | x0: Optional[np.ndarray] = None, 10 | shift: float = 0.0, 11 | ) -> Tuple[float, np.ndarray]: 12 | if A.shape[0] != A.shape[1]: 13 | raise ValueError("Matrix must be square.") 14 | 15 | eigenvalues = np.linalg.eigvals(A) 16 | if not np.all(np.abs(np.imag(eigenvalues)) < 1e-10): 17 | raise ValueError("Matrix has complex eigenvalues.") 18 | 19 | n = A.shape[0] 20 | I = np.eye(n) 21 | A_shifted = A - shift * I 22 | 23 | if x0 is None: 24 | x = np.random.rand(n) 25 | else: 26 | x = x0.astype(float) 27 | 28 | x /= np.linalg.norm(x) 29 | eigenvalue = 0.0 30 | 31 | for iteration in range(max_iterations): 32 | try: 33 | y = np.linalg.solve(A_shifted, x) 34 | except np.linalg.LinAlgError: 35 | raise ValueError( 36 | "Matrix (A - shift * I) is singular and cannot be inverted." 37 | ) 38 | 39 | y_norm = np.linalg.norm(y) 40 | if y_norm == 0: 41 | raise ValueError("Encountered zero vector during iterations.") 42 | y /= y_norm 43 | 44 | eigenvalue_new = np.dot(y, A @ y) 45 | 46 | if np.abs(eigenvalue_new - eigenvalue) < tol: 47 | return eigenvalue_new, y 48 | eigenvalue = eigenvalue_new 49 | x = y 50 | 51 | raise ValueError( 52 | "Inverse Power method did not converge within the maximum number of iterations." 53 | ) 54 | -------------------------------------------------------------------------------- /src/5_matrices/inverse_power_method/tests/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/djeada/Numerical-Methods/68c5a1886c59877f77dd9a266b5f5387bb82f677/src/5_matrices/inverse_power_method/tests/__init__.py -------------------------------------------------------------------------------- /src/5_matrices/power_method/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/djeada/Numerical-Methods/68c5a1886c59877f77dd9a266b5f5387bb82f677/src/5_matrices/power_method/__init__.py -------------------------------------------------------------------------------- /src/5_matrices/power_method/examples/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/djeada/Numerical-Methods/68c5a1886c59877f77dd9a266b5f5387bb82f677/src/5_matrices/power_method/examples/__init__.py -------------------------------------------------------------------------------- /src/5_matrices/power_method/examples/plot.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import matplotlib.pyplot as plt 3 | from numpy.linalg import norm 4 | 5 | 6 | def power_method(A, max_iterations=100, tolerance=1e-6): 7 | """ 8 | Power Method for finding the dominant eigenvalue and its associated eigenvector. 9 | 10 | Parameters: 11 | A (ndarray): Square numpy matrix. 12 | max_iterations (int): Maximum number of iterations. 13 | tolerance (float): Convergence threshold for the eigenvalue. 14 | 15 | Returns: 16 | dominant_eigenvalue (float): The largest eigenvalue in magnitude. 17 | dominant_eigenvector (ndarray): Corresponding eigenvector. 18 | """ 19 | n = A.shape[0] 20 | x = np.random.rand(n) # Initial random vector 21 | x = x / norm(x) # Normalize the initial vector 22 | 23 | eigenvalues = [] # Track eigenvalue convergence 24 | 25 | for i in range(max_iterations): 26 | # Matrix-vector multiplication 27 | y = np.dot(A, x) 28 | # Estimate the dominant eigenvalue using the Rayleigh quotient 29 | eigenvalue = np.dot(x.T, y) 30 | eigenvalues.append(eigenvalue) 31 | 32 | # Normalize the resulting vector 33 | x_next = y / norm(y) 34 | 35 | # Check convergence 36 | if norm(x_next - x) < tolerance: 37 | print(f"Converged after {i+1} iterations.") 38 | break 39 | 40 | x = x_next 41 | 42 | dominant_eigenvalue = eigenvalue 43 | dominant_eigenvector = x 44 | 45 | # Plot convergence 46 | plt.figure(figsize=(8, 6)) 47 | plt.plot(range(1, len(eigenvalues) + 1), eigenvalues, marker="o", linestyle="-") 48 | plt.title("Convergence of Dominant Eigenvalue") 49 | plt.xlabel("Iteration") 50 | plt.ylabel("Eigenvalue Estimate") 51 | plt.grid(True) 52 | plt.show() 53 | 54 | return dominant_eigenvalue, dominant_eigenvector 55 | 56 | 57 | # Example Matrix A 58 | A = np.array([[4, 1], [2, 3]]) 59 | 60 | # Perform Power Method for Dominant Eigenvalue and Eigenvector 61 | eigenvalue, eigenvector = power_method(A) 62 | print("\nDominant Eigenvalue (Power Method):", eigenvalue) 63 | print("Dominant Eigenvector (Power Method):", eigenvector) 64 | -------------------------------------------------------------------------------- /src/5_matrices/power_method/implementation/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/djeada/Numerical-Methods/68c5a1886c59877f77dd9a266b5f5387bb82f677/src/5_matrices/power_method/implementation/__init__.py -------------------------------------------------------------------------------- /src/5_matrices/power_method/implementation/power_method.py: -------------------------------------------------------------------------------- 1 | # power_method.py 2 | import numpy as np 3 | from typing import Optional, Tuple 4 | 5 | 6 | def power_method( 7 | A: np.ndarray, 8 | tol: float = 1e-8, 9 | max_iterations: int = 1000, 10 | x0: Optional[np.ndarray] = None, 11 | ) -> Tuple[float, np.ndarray]: 12 | if A.shape[0] != A.shape[1]: 13 | raise ValueError("Matrix must be square.") 14 | n = A.shape[0] 15 | if x0 is None: 16 | x = np.ones(n) 17 | else: 18 | x = x0.astype(float) 19 | x /= np.linalg.norm(x) 20 | eigenvalue = 0.0 21 | for _ in range(max_iterations): 22 | x_old = x.copy() 23 | y = A @ x 24 | y_norm = np.linalg.norm(y) 25 | if y_norm == 0: 26 | raise ValueError("Encountered zero vector during iterations.") 27 | x_new = y / y_norm 28 | if np.dot(x_old, x_new) < 0: 29 | x_new = -x_new 30 | eigenvalue_new = np.dot(A @ x_old, x_new) 31 | if np.abs(eigenvalue_new - eigenvalue) < tol: 32 | if not np.isclose( 33 | eigenvalue_new.imag if np.iscomplex(eigenvalue_new) else 0, 0, atol=tol 34 | ): 35 | raise ValueError("Matrix has complex eigenvalues.") 36 | eigenvalue_new = eigenvalue_new.real 37 | x_new = x_new.real 38 | x_new[np.abs(x_new) < 1e-10] = 0.0 39 | return eigenvalue_new, x_new 40 | eigenvalue = eigenvalue_new 41 | x = x_new 42 | raise ValueError( 43 | "Power method did not converge within the maximum number of iterations." 44 | ) 45 | -------------------------------------------------------------------------------- /src/5_matrices/power_method/tests/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/djeada/Numerical-Methods/68c5a1886c59877f77dd9a266b5f5387bb82f677/src/5_matrices/power_method/tests/__init__.py -------------------------------------------------------------------------------- /src/5_matrices/qr_method/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/djeada/Numerical-Methods/68c5a1886c59877f77dd9a266b5f5387bb82f677/src/5_matrices/qr_method/__init__.py -------------------------------------------------------------------------------- /src/5_matrices/qr_method/examples/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/djeada/Numerical-Methods/68c5a1886c59877f77dd9a266b5f5387bb82f677/src/5_matrices/qr_method/examples/__init__.py -------------------------------------------------------------------------------- /src/5_matrices/qr_method/examples/plot.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import matplotlib.pyplot as plt 3 | 4 | 5 | def qr_method_visualization(A, max_iterations=50, tol=1e-8): 6 | """ 7 | Visualize the QR method's iterative transformation of a matrix towards an upper-triangular form. 8 | 9 | Parameters: 10 | A (numpy.ndarray): Input square matrix. 11 | max_iterations (int): Maximum number of iterations. 12 | tol (float): Convergence tolerance for off-diagonal elements. 13 | """ 14 | A_k = A.copy() 15 | n = A.shape[0] 16 | off_diagonal_norms = [] 17 | 18 | fig, axes = plt.subplots(1, 5, figsize=(20, 4)) 19 | step_intervals = max_iterations // 5 20 | 21 | for i in range(max_iterations): 22 | # Perform QR decomposition 23 | Q, R = np.linalg.qr(A_k) 24 | A_k = R @ Q 25 | 26 | # Compute off-diagonal norm to check convergence 27 | off_diag = A_k - np.diag(np.diag(A_k)) 28 | off_diag_norm = np.linalg.norm(off_diag) 29 | off_diagonal_norms.append(off_diag_norm) 30 | 31 | # Stop if off-diagonal elements are sufficiently small 32 | if off_diag_norm < tol: 33 | print(f"Converged at iteration {i+1}") 34 | break 35 | 36 | # Plot progress at specific intervals 37 | if i % step_intervals == 0: 38 | ax_idx = i // step_intervals 39 | axes[ax_idx].imshow(A_k, cmap="viridis", interpolation="nearest") 40 | axes[ax_idx].set_title(f"Iteration {i}") 41 | axes[ax_idx].axis("off") 42 | 43 | # Final visualization of convergence 44 | axes[-1].imshow(A_k, cmap="viridis", interpolation="nearest") 45 | axes[-1].set_title(f"Iteration {i+1} (Final)") 46 | axes[-1].axis("off") 47 | 48 | plt.suptitle("QR Method: Matrix Evolution Towards Upper-Triangular Form") 49 | plt.tight_layout() 50 | plt.show() 51 | 52 | # Plot convergence of off-diagonal elements 53 | plt.figure(figsize=(8, 6)) 54 | plt.semilogy(off_diagonal_norms, label="Off-diagonal Norm") 55 | plt.xlabel("Iteration") 56 | plt.ylabel("Log Scale: Off-diagonal Norm") 57 | plt.title("Convergence of Off-diagonal Elements") 58 | plt.legend() 59 | plt.grid(True) 60 | plt.show() 61 | 62 | 63 | if __name__ == "__main__": 64 | # Example: Input matrix 65 | A = np.array([[4, 1, 2], [1, 3, 1], [2, 1, 5]], dtype=float) 66 | 67 | print("Initial Matrix A:") 68 | print(A) 69 | qr_method_visualization(A) 70 | -------------------------------------------------------------------------------- /src/5_matrices/qr_method/implementation/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/djeada/Numerical-Methods/68c5a1886c59877f77dd9a266b5f5387bb82f677/src/5_matrices/qr_method/implementation/__init__.py -------------------------------------------------------------------------------- /src/5_matrices/qr_method/implementation/qr_method.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | from scipy.linalg import hessenberg 3 | 4 | 5 | def qr_decomposition(A: np.ndarray): 6 | return np.linalg.qr(A) 7 | 8 | 9 | def qr_algorithm(A: np.ndarray, tol: float = 1e-8, max_iterations: int = 2000): 10 | if A.shape[0] != A.shape[1]: 11 | raise ValueError("Matrix must be square.") 12 | 13 | A_k = hessenberg(A).astype(float) 14 | eigenvalues = [] 15 | 16 | iterations = 0 # Track the number of iterations 17 | while A_k.shape[0] > 0: 18 | for _ in range(max_iterations): 19 | iterations += 1 20 | if A_k.shape[0] == 1: 21 | eigenvalues.append(A_k[0, 0]) 22 | A_k = np.empty((0, 0)) 23 | break 24 | 25 | if abs(A_k[-1, -2]) < tol: 26 | eigenvalues.append(A_k[-1, -1]) 27 | A_k = A_k[:-1, :-1] 28 | break 29 | 30 | if A_k.shape[0] == 2: 31 | trace = np.trace(A_k) 32 | det = np.linalg.det(A_k) 33 | discriminant = (trace / 2) ** 2 - det 34 | 35 | if discriminant >= 0: 36 | eigenvalues.append(trace / 2 + np.sqrt(discriminant)) 37 | eigenvalues.append(trace / 2 - np.sqrt(discriminant)) 38 | else: 39 | eigenvalues.append(trace / 2 + 1j * np.sqrt(-discriminant)) 40 | eigenvalues.append(trace / 2 - 1j * np.sqrt(-discriminant)) 41 | A_k = np.empty((0, 0)) 42 | break 43 | 44 | Q, R = qr_decomposition(A_k) 45 | A_k = R @ Q 46 | 47 | if iterations >= max_iterations: 48 | raise ValueError( 49 | "QR algorithm did not converge within the maximum number of iterations." 50 | ) 51 | 52 | return np.sort(eigenvalues) 53 | 54 | 55 | def qr_algorithm_with_shifts( 56 | A: np.ndarray, tol: float = 1e-8, max_iterations: int = 2000 57 | ): 58 | if A.shape[0] != A.shape[1]: 59 | raise ValueError("Matrix must be square.") 60 | 61 | A_k = hessenberg(A).astype(float) 62 | eigenvalues = [] 63 | 64 | iterations = 0 # Track the number of iterations 65 | while A_k.shape[0] > 0: 66 | for _ in range(max_iterations): 67 | iterations += 1 68 | if A_k.shape[0] == 1: 69 | eigenvalues.append(A_k[0, 0]) 70 | A_k = np.empty((0, 0)) 71 | break 72 | 73 | if abs(A_k[-1, -2]) < tol: 74 | eigenvalues.append(A_k[-1, -1]) 75 | A_k = A_k[:-1, :-1] 76 | break 77 | 78 | if A_k.shape[0] == 2: 79 | trace = np.trace(A_k) 80 | det = np.linalg.det(A_k) 81 | discriminant = (trace / 2) ** 2 - det 82 | 83 | if discriminant >= 0: 84 | eigenvalues.append(trace / 2 + np.sqrt(discriminant)) 85 | eigenvalues.append(trace / 2 - np.sqrt(discriminant)) 86 | else: 87 | eigenvalues.append(trace / 2 + 1j * np.sqrt(-discriminant)) 88 | eigenvalues.append(trace / 2 - 1j * np.sqrt(-discriminant)) 89 | A_k = np.empty((0, 0)) 90 | break 91 | 92 | submatrix = A_k[-2:, -2:] 93 | trace = np.trace(submatrix) 94 | det = np.linalg.det(submatrix) 95 | discriminant = (trace / 2) ** 2 - det 96 | 97 | if discriminant >= 0: 98 | shift = trace / 2 + np.sign(trace / 2) * np.sqrt(discriminant) 99 | else: 100 | shift = trace / 2 101 | 102 | Q, R = qr_decomposition(A_k - shift * np.eye(A_k.shape[0])) 103 | A_k = R @ Q + shift * np.eye(A_k.shape[0]) 104 | 105 | if iterations >= max_iterations: 106 | raise ValueError( 107 | "QR algorithm with shifts did not converge within the maximum number of iterations." 108 | ) 109 | 110 | return np.sort(eigenvalues) 111 | -------------------------------------------------------------------------------- /src/5_matrices/qr_method/tests/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/djeada/Numerical-Methods/68c5a1886c59877f77dd9a266b5f5387bb82f677/src/5_matrices/qr_method/tests/__init__.py -------------------------------------------------------------------------------- /src/5_matrices/singular_value_decomposition/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/djeada/Numerical-Methods/68c5a1886c59877f77dd9a266b5f5387bb82f677/src/5_matrices/singular_value_decomposition/__init__.py -------------------------------------------------------------------------------- /src/5_matrices/singular_value_decomposition/examples/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/djeada/Numerical-Methods/68c5a1886c59877f77dd9a266b5f5387bb82f677/src/5_matrices/singular_value_decomposition/examples/__init__.py -------------------------------------------------------------------------------- /src/5_matrices/singular_value_decomposition/examples/plot.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import matplotlib.pyplot as plt 3 | from numpy.linalg import svd 4 | 5 | 6 | def plot_svd(A): 7 | """ 8 | Computes and visualizes the Singular Value Decomposition (SVD) of a matrix A. 9 | 10 | Parameters: 11 | A (ndarray): Input matrix to decompose. 12 | """ 13 | # Perform SVD 14 | U, Sigma, VT = svd(A) 15 | 16 | # Construct diagonal matrix Sigma for visualization 17 | Sigma_matrix = np.zeros_like(A, dtype=float) 18 | np.fill_diagonal(Sigma_matrix, Sigma) 19 | 20 | # Print results 21 | print("Original Matrix A:") 22 | print(A) 23 | print("\nLeft Singular Vectors (U):") 24 | print(U) 25 | print("\nSingular Values (Sigma):") 26 | print(Sigma) 27 | print("\nRight Singular Vectors (V^T):") 28 | print(VT) 29 | 30 | # Plot the original matrix and its SVD components 31 | fig, axs = plt.subplots(1, 4, figsize=(20, 6)) 32 | 33 | # Plot Original Matrix A 34 | axs[0].imshow(A, cmap="viridis", aspect="auto") 35 | axs[0].set_title("Original Matrix A") 36 | axs[0].axis("off") 37 | 38 | # Plot Left Singular Vectors U 39 | axs[1].imshow(U, cmap="coolwarm", aspect="auto") 40 | axs[1].set_title("Left Singular Vectors (U)") 41 | axs[1].axis("off") 42 | 43 | # Plot Singular Values Sigma 44 | axs[2].imshow(Sigma_matrix, cmap="coolwarm", aspect="auto") 45 | axs[2].set_title("Singular Values (Sigma)") 46 | axs[2].axis("off") 47 | 48 | # Plot Right Singular Vectors VT 49 | axs[3].imshow(VT, cmap="coolwarm", aspect="auto") 50 | axs[3].set_title("Right Singular Vectors (V^T)") 51 | axs[3].axis("off") 52 | 53 | plt.tight_layout() 54 | plt.show() 55 | 56 | 57 | # Example matrix A 58 | A = np.array([[3, 4], [2, 1], [0, 5]]) 59 | 60 | # Perform and plot SVD 61 | plot_svd(A) 62 | -------------------------------------------------------------------------------- /src/5_matrices/singular_value_decomposition/implementation/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/djeada/Numerical-Methods/68c5a1886c59877f77dd9a266b5f5387bb82f677/src/5_matrices/singular_value_decomposition/implementation/__init__.py -------------------------------------------------------------------------------- /src/5_matrices/singular_value_decomposition/implementation/singular_value_decomposition.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | from typing import Tuple 3 | 4 | 5 | def singular_value_decomposition( 6 | A: np.ndarray, 7 | ) -> Tuple[np.ndarray, np.ndarray, np.ndarray]: 8 | U, S, Vt = np.linalg.svd(A, full_matrices=True) 9 | S_matrix = np.zeros((U.shape[0], Vt.shape[0])) 10 | np.fill_diagonal(S_matrix, S) 11 | return U, S_matrix, Vt 12 | 13 | 14 | def singular_value_decomposition_reduced(A: np.ndarray): 15 | U, S, Vt = np.linalg.svd(A, full_matrices=False) 16 | S_matrix = np.diag(S) # Compact diagonal matrix 17 | return U, S_matrix, Vt 18 | -------------------------------------------------------------------------------- /src/5_matrices/singular_value_decomposition/tests/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/djeada/Numerical-Methods/68c5a1886c59877f77dd9a266b5f5387bb82f677/src/5_matrices/singular_value_decomposition/tests/__init__.py -------------------------------------------------------------------------------- /src/6_regression/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/djeada/Numerical-Methods/68c5a1886c59877f77dd9a266b5f5387bb82f677/src/6_regression/__init__.py -------------------------------------------------------------------------------- /src/6_regression/cubic_spline/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/djeada/Numerical-Methods/68c5a1886c59877f77dd9a266b5f5387bb82f677/src/6_regression/cubic_spline/__init__.py -------------------------------------------------------------------------------- /src/6_regression/cubic_spline/examples/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/djeada/Numerical-Methods/68c5a1886c59877f77dd9a266b5f5387bb82f677/src/6_regression/cubic_spline/examples/__init__.py -------------------------------------------------------------------------------- /src/6_regression/cubic_spline/examples/example.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import matplotlib.pyplot as plt 3 | from cubic_spline import cubic_spline 4 | 5 | 6 | def main(): 7 | x = np.array([0, 1, 2, 3]) 8 | y = np.array([0, -1, 4, 3]) 9 | 10 | result_function = cubic_spline(x, y) 11 | 12 | x_new = np.arange(0, 5, 0.1) 13 | y_new = np.array([result_function(i) for i in x_new]) 14 | 15 | fig = plt.figure(figsize=(10, 8)) 16 | plt.plot(x_new, y_new, "b", x, y, "ro") 17 | plt.title("Cubic Spline") 18 | plt.grid() 19 | plt.xlabel("x") 20 | plt.ylabel("y") 21 | plt.show() 22 | 23 | 24 | if __name__ == "__main__": 25 | main() 26 | -------------------------------------------------------------------------------- /src/6_regression/cubic_spline/implementation/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/djeada/Numerical-Methods/68c5a1886c59877f77dd9a266b5f5387bb82f677/src/6_regression/cubic_spline/implementation/__init__.py -------------------------------------------------------------------------------- /src/6_regression/cubic_spline/implementation/cubic_spline.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import bisect 3 | from typing import Callable 4 | 5 | 6 | def solve_tridiagonal_system( 7 | a: np.ndarray, b: np.ndarray, c: np.ndarray, d: np.ndarray 8 | ) -> np.ndarray: 9 | n = len(d) 10 | for i in range(1, n): 11 | w = a[i - 1] / b[i - 1] 12 | b[i] -= w * c[i - 1] 13 | d[i] -= w * d[i - 1] 14 | x = np.zeros(n) 15 | x[-1] = d[-1] / b[-1] 16 | for i in range(n - 2, -1, -1): 17 | x[i] = (d[i] - c[i] * x[i + 1]) / b[i] 18 | return x 19 | 20 | 21 | def cubic_spline(x_data: np.ndarray, y_data: np.ndarray) -> Callable[[float], float]: 22 | if x_data.shape[0] != y_data.shape[0]: 23 | raise ValueError() 24 | if x_data.shape[0] < 3: 25 | raise ValueError() 26 | if len(np.unique(x_data)) != x_data.shape[0]: 27 | raise ValueError() 28 | if not np.all(np.diff(x_data) > 0): 29 | raise ValueError() 30 | n = x_data.shape[0] 31 | h = np.diff(x_data) 32 | alpha = np.zeros(n - 1) 33 | for i in range(1, n - 1): 34 | alpha[i] = 3 * ( 35 | (y_data[i + 1] - y_data[i]) / h[i] - (y_data[i] - y_data[i - 1]) / h[i - 1] 36 | ) 37 | l = np.ones(n) 38 | mu = np.zeros(n) 39 | z = np.zeros(n) 40 | for i in range(1, n - 1): 41 | l[i] = 2 * (x_data[i + 1] - x_data[i - 1]) - h[i - 1] * mu[i - 1] 42 | mu[i] = h[i] / l[i] 43 | z[i] = (alpha[i] - h[i - 1] * z[i - 1]) / l[i] 44 | M = np.zeros(n) 45 | for j in range(n - 2, 0, -1): 46 | M[j] = z[j] - mu[j] * M[j + 1] 47 | 48 | def spline(X: float) -> float: 49 | if X < x_data[0] or X > x_data[-1]: 50 | raise ValueError() 51 | i = bisect.bisect_right(x_data, X) - 1 52 | i = min(max(i, 0), n - 2) 53 | dx = X - x_data[i] 54 | return ( 55 | M[i] / (6 * h[i]) * ((x_data[i + 1] - X) ** 3) 56 | + M[i + 1] / (6 * h[i]) * (dx ** 3) 57 | + (y_data[i] - M[i] * h[i] ** 2 / 6) * ((x_data[i + 1] - X) / h[i]) 58 | + (y_data[i + 1] - M[i + 1] * h[i] ** 2 / 6) * (dx / h[i]) 59 | ) 60 | 61 | return spline 62 | -------------------------------------------------------------------------------- /src/6_regression/cubic_spline/tests/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/djeada/Numerical-Methods/68c5a1886c59877f77dd9a266b5f5387bb82f677/src/6_regression/cubic_spline/tests/__init__.py -------------------------------------------------------------------------------- /src/6_regression/gaussian_interpolation/__init__.py: -------------------------------------------------------------------------------- 1 | 2 | -------------------------------------------------------------------------------- /src/6_regression/gaussian_interpolation/examples/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/djeada/Numerical-Methods/68c5a1886c59877f77dd9a266b5f5387bb82f677/src/6_regression/gaussian_interpolation/examples/__init__.py -------------------------------------------------------------------------------- /src/6_regression/gaussian_interpolation/examples/plot.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import matplotlib.pyplot as plt 3 | from sklearn.gaussian_process import GaussianProcessRegressor 4 | from sklearn.gaussian_process.kernels import RBF, ConstantKernel as C 5 | 6 | 7 | def gaussian_process_regression(data_x, data_y, target_x): 8 | """ 9 | Perform Gaussian Process Regression on a given dataset. 10 | 11 | Parameters: 12 | data_x (numpy.ndarray): x values (1D array). 13 | data_y (numpy.ndarray): Corresponding y values. 14 | target_x (numpy.ndarray): The x values to predict. 15 | 16 | Returns: 17 | y_pred (numpy.ndarray): Predicted values at target_x. 18 | sigma (numpy.ndarray): Standard deviations of the predictions. 19 | """ 20 | # Reshape data for sklearn 21 | X = np.atleast_2d(data_x).T 22 | target_X = np.atleast_2d(target_x).T 23 | 24 | # Define the kernel 25 | kernel = C(1.0, (1e-3, 1e3)) * RBF(1, (1e-2, 1e2)) 26 | 27 | # Create and fit Gaussian Process Regressor 28 | gp = GaussianProcessRegressor(kernel=kernel, n_restarts_optimizer=10) 29 | gp.fit(X, data_y) 30 | 31 | # Predict on target points 32 | y_pred, sigma = gp.predict(target_X, return_std=True) 33 | return y_pred, sigma 34 | 35 | 36 | def plot_gaussian_process(data_x, data_y, target_x): 37 | """ 38 | Plot the Gaussian Process Regression results along with original data points. 39 | 40 | Parameters: 41 | data_x (numpy.ndarray): Original x values. 42 | data_y (numpy.ndarray): Original y values. 43 | target_x (numpy.ndarray): The x values to predict. 44 | """ 45 | # Perform Gaussian Process Regression 46 | y_pred, sigma = gaussian_process_regression(data_x, data_y, target_x) 47 | 48 | # Plot results 49 | plt.figure(figsize=(10, 6)) 50 | plt.plot(data_x, data_y, "o", label="Data points", markersize=8) 51 | plt.plot(target_x, y_pred, "b-", label="Prediction") 52 | plt.fill_between( 53 | target_x, 54 | y_pred - 1.96 * sigma, 55 | y_pred + 1.96 * sigma, 56 | alpha=0.2, 57 | color="gray", 58 | label="95% Confidence Interval", 59 | ) 60 | plt.title("Gaussian Process Regression") 61 | plt.xlabel("x") 62 | plt.ylabel("f(x)") 63 | plt.legend() 64 | plt.grid(True) 65 | plt.show() 66 | 67 | 68 | if __name__ == "__main__": 69 | # Example dataset 70 | data_x = np.array([0, 1, 2, 3, 4], dtype=float) 71 | data_y = np.array([2, 3.5, 5, 5.8, 6], dtype=float) 72 | 73 | # Target points to interpolate/predict 74 | target_x = np.linspace(0, 4, 100) 75 | 76 | # Perform and plot Gaussian Process Regression 77 | plot_gaussian_process(data_x, data_y, target_x) 78 | -------------------------------------------------------------------------------- /src/6_regression/gaussian_interpolation/implementation/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/djeada/Numerical-Methods/68c5a1886c59877f77dd9a266b5f5387bb82f677/src/6_regression/gaussian_interpolation/implementation/__init__.py -------------------------------------------------------------------------------- /src/6_regression/gaussian_interpolation/implementation/gaussian_interpolation.py: -------------------------------------------------------------------------------- 1 | # gaussian_interpolation.py 2 | import numpy as np 3 | from typing import Tuple 4 | 5 | 6 | def solve_linear_system(A: np.ndarray, b: np.ndarray) -> np.ndarray: 7 | n = A.shape[0] 8 | augmented = np.hstack((A.astype(float), b.reshape(-1, 1).astype(float))) 9 | for i in range(n): 10 | pivot = np.argmax(np.abs(augmented[i:, i])) + i 11 | if np.isclose(augmented[pivot, i], 0.0): 12 | raise ValueError("Matrix is singular or nearly singular.") 13 | if pivot != i: 14 | augmented[[i, pivot]] = augmented[[pivot, i]] 15 | augmented[i] = augmented[i] / augmented[i, i] 16 | for j in range(i + 1, n): 17 | augmented[j] -= augmented[j, i] * augmented[i] 18 | x = np.zeros(n) 19 | for i in range(n - 1, -1, -1): 20 | x[i] = augmented[i, -1] - np.dot(augmented[i, i + 1 : n], x[i + 1 : n]) 21 | return x 22 | 23 | 24 | def gaussian_interpolation( 25 | x_data: np.ndarray, y_data: np.ndarray, point: float 26 | ) -> float: 27 | if x_data.shape[0] != y_data.shape[0]: 28 | raise ValueError("X and Y vectors must have equal number of elements.") 29 | if x_data.shape[0] < 2: 30 | raise ValueError("At least two points are required for interpolation.") 31 | if len(np.unique(x_data)) != x_data.shape[0]: 32 | raise ValueError("X data must contain unique values.") 33 | if point < np.min(x_data) or point > np.max(x_data): 34 | raise ValueError("Point is out of bounds.") 35 | 36 | x_mean = np.mean(x_data) 37 | x_scale = np.max(np.abs(x_data - x_mean)) 38 | x_scaled = (x_data - x_mean) / x_scale 39 | point_scaled = (point - x_mean) / x_scale 40 | 41 | vandermonde = np.vander(x_scaled, increasing=True) 42 | coefficients = solve_linear_system(vandermonde, y_data) 43 | return np.dot(coefficients, point_scaled ** np.arange(len(coefficients))) 44 | -------------------------------------------------------------------------------- /src/6_regression/gaussian_interpolation/tests/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/djeada/Numerical-Methods/68c5a1886c59877f77dd9a266b5f5387bb82f677/src/6_regression/gaussian_interpolation/tests/__init__.py -------------------------------------------------------------------------------- /src/6_regression/gaussian_interpolation/tests/test_gaussian_interpolation.py: -------------------------------------------------------------------------------- 1 | # test_gaussian_interpolation.py 2 | import pytest 3 | import numpy as np 4 | from ..implementation.gaussian_interpolation import gaussian_interpolation 5 | 6 | 7 | def test_gaussian_interpolation_basic(): 8 | x = np.array([0, 1, 2]) 9 | y = np.array([1, 3, 2]) 10 | point = 1.5 11 | result = gaussian_interpolation(x, y, point) 12 | expected = 2.875 13 | assert np.isclose(result, expected, atol=0.1) 14 | 15 | 16 | def test_gaussian_interpolation_single_point(): 17 | x = np.array([2]) 18 | y = np.array([5]) 19 | point = 2 20 | with pytest.raises(ValueError): 21 | gaussian_interpolation(x, y, point) 22 | 23 | 24 | def test_gaussian_interpolation_exact_match(): 25 | x = np.array([0, 1, 2, 3]) 26 | y = np.array([0, 1, 8, 27]) 27 | point = 2 28 | result = gaussian_interpolation(x, y, point) 29 | expected = 8.0 30 | assert np.isclose(result, expected, atol=0.1) 31 | 32 | 33 | def test_gaussian_interpolation_out_of_bounds_low(): 34 | x = np.array([1, 2, 3]) 35 | y = np.array([2, 4, 6]) 36 | point = 0 37 | with pytest.raises(ValueError): 38 | gaussian_interpolation(x, y, point) 39 | 40 | 41 | def test_gaussian_interpolation_out_of_bounds_high(): 42 | x = np.array([1, 2, 3]) 43 | y = np.array([2, 4, 6]) 44 | point = 4 45 | with pytest.raises(ValueError): 46 | gaussian_interpolation(x, y, point) 47 | 48 | 49 | def test_gaussian_interpolation_non_equal_lengths(): 50 | x = np.array([0, 1, 2]) 51 | y = np.array([0, 1]) 52 | point = 1 53 | with pytest.raises(ValueError): 54 | gaussian_interpolation(x, y, point) 55 | 56 | 57 | def test_gaussian_interpolation_insufficient_points(): 58 | x = np.array([]) 59 | y = np.array([]) 60 | point = 0 61 | with pytest.raises(ValueError): 62 | gaussian_interpolation(x, y, point) 63 | 64 | 65 | def test_gaussian_interpolation_duplicate_x_values(): 66 | x = np.array([0, 1, 1, 2]) 67 | y = np.array([0, 1, 1, 4]) 68 | point = 1 69 | with pytest.raises(ValueError): 70 | gaussian_interpolation(x, y, point) 71 | 72 | 73 | def test_gaussian_interpolation_negative_values(): 74 | x = np.array([-2, -1, 0, 1]) 75 | y = np.array([4, 1, 0, 1]) 76 | point = -1.5 77 | result = gaussian_interpolation(x, y, point) 78 | expected = 2.25 79 | assert np.isclose(result, expected, atol=0.1) 80 | 81 | 82 | def test_gaussian_interpolation_float_precision(): 83 | x = np.array([0.0, 1.0, 2.0]) 84 | y = np.array([0.0, 1.0, 4.0]) 85 | point = 1.999999 86 | result = gaussian_interpolation(x, y, point) 87 | expected = 3.999996 88 | assert np.isclose(result, expected, atol=1e-5) 89 | 90 | 91 | def test_gaussian_interpolation_multiple_segments(): 92 | x = np.linspace(0, 10, 11) 93 | y = x ** 2 94 | point = 7.3 95 | result = gaussian_interpolation(x, y, point) 96 | expected = 7 ** 2 + (8 ** 2 - 7 ** 2) * 0.3 97 | assert np.isclose(result, expected, atol=0.5) 98 | 99 | 100 | @pytest.mark.skip() 101 | def test_gaussian_interpolation_large_dataset(): 102 | x = np.linspace(-100, 100, 201) 103 | y = np.sin(x) 104 | point = 23.456 105 | result = gaussian_interpolation(x, y, point) 106 | expected = np.sin(point) 107 | assert np.isclose(result, expected, atol=1e-3) 108 | 109 | 110 | def test_gaussian_interpolation_exact_match_middle(): 111 | x = np.array([0, 1, 2, 3, 4]) 112 | y = np.array([0, 1, 4, 9, 16]) 113 | point = 2 114 | result = gaussian_interpolation(x, y, point) 115 | expected = 4.0 116 | assert np.isclose(result, expected, atol=0.1) 117 | 118 | 119 | def test_gaussian_interpolation_multiple_queries(): 120 | x = np.array([0, 1, 2, 3, 4]) 121 | y = np.array([0, 1, 4, 9, 16]) 122 | points = [0.5, 1.5, 2.5, 3.5] 123 | expected = [0.5, 2.5, 6.5, 12.5] 124 | results = [gaussian_interpolation(x, y, p) for p in points] 125 | assert np.allclose(results, expected, atol=0.25) 126 | -------------------------------------------------------------------------------- /src/6_regression/lagrange_polynomial/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/djeada/Numerical-Methods/68c5a1886c59877f77dd9a266b5f5387bb82f677/src/6_regression/lagrange_polynomial/__init__.py -------------------------------------------------------------------------------- /src/6_regression/lagrange_polynomial/examples/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/djeada/Numerical-Methods/68c5a1886c59877f77dd9a266b5f5387bb82f677/src/6_regression/lagrange_polynomial/examples/__init__.py -------------------------------------------------------------------------------- /src/6_regression/lagrange_polynomial/examples/example.py: -------------------------------------------------------------------------------- 1 | """ 2 | 3 | Calculate the Lagrange polynomial for the following points: 4 | 5 | A(-9, -2) 6 | B(-5, 3) 7 | C(-2.5, 0) 8 | D(4, 5) 9 | F(7, 11) 10 | """ 11 | import numpy as np 12 | import matplotlib.pyplot as plt 13 | import scipy.interpolate 14 | from lagrange_polynomial import lagrange_polynomial 15 | 16 | # Define the data 17 | x = np.array([-9, -5, -2.5, 4, 7]) 18 | y = np.array([-2, 3, 0, 5, 11]) 19 | 20 | # Plot the raw data 21 | plt.scatter(x, y, s=200, marker="P", color="black", label="Raw data") 22 | plt.xlabel("x") 23 | plt.ylabel("y") 24 | 25 | # Create the domain 26 | x_domain = np.linspace(1.1 * min(x), 1.1 * max(x), 100) 27 | 28 | # Create the model and show the predictions for djeada implementation 29 | model = lagrange_polynomial(x, y) 30 | y_prediction = model(x_domain) 31 | 32 | plt.plot(x_domain, y_prediction, lw=4, label="Lagrange djeada") 33 | 34 | # reate the model and show the predictions for scipy implementation 35 | model = scipy.interpolate.lagrange(x, y) 36 | y_prediction = model(x_domain) 37 | plt.plot(x_domain, y_prediction, label="Lagrange scipy") 38 | 39 | # Add a legend and show the plot 40 | plt.legend(loc="best") 41 | plt.show() 42 | -------------------------------------------------------------------------------- /src/6_regression/lagrange_polynomial/implementation/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/djeada/Numerical-Methods/68c5a1886c59877f77dd9a266b5f5387bb82f677/src/6_regression/lagrange_polynomial/implementation/__init__.py -------------------------------------------------------------------------------- /src/6_regression/lagrange_polynomial/implementation/lagrange_polynomial.py: -------------------------------------------------------------------------------- 1 | # lagrange_polynomial.py 2 | import numpy as np 3 | from typing import Tuple 4 | 5 | 6 | def lagrange_polynomial(x_data: np.ndarray, y_data: np.ndarray, point: float) -> float: 7 | if x_data.shape[0] != y_data.shape[0]: 8 | raise ValueError("X and Y vectors must have equal number of elements.") 9 | if x_data.shape[0] < 1: 10 | raise ValueError("X and Y vectors must contain at least one element.") 11 | if len(np.unique(x_data)) != x_data.shape[0]: 12 | raise ValueError("X data must contain unique values.") 13 | P = 0.0 14 | n = x_data.shape[0] 15 | for i in range(n): 16 | L_i = 1.0 17 | for j in range(n): 18 | if j != i: 19 | denominator = x_data[i] - x_data[j] 20 | if denominator == 0: 21 | raise ValueError("Duplicate x-values detected.") 22 | L_i *= (point - x_data[j]) / denominator 23 | P += y_data[i] * L_i 24 | return P 25 | -------------------------------------------------------------------------------- /src/6_regression/lagrange_polynomial/tests/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/djeada/Numerical-Methods/68c5a1886c59877f77dd9a266b5f5387bb82f677/src/6_regression/lagrange_polynomial/tests/__init__.py -------------------------------------------------------------------------------- /src/6_regression/lagrange_polynomial/tests/test_lagrange_polynomial.py: -------------------------------------------------------------------------------- 1 | # test_lagrange_polynomial.py 2 | import pytest 3 | import numpy as np 4 | from ..implementation.lagrange_polynomial import lagrange_polynomial 5 | 6 | 7 | def test_lagrange_polynomial_basic(): 8 | x = np.array([0, 1, 2]) 9 | y = np.array([1, 3, 2]) 10 | point = 1.5 11 | result = lagrange_polynomial(x, y, point) 12 | expected = 2.875 13 | assert np.isclose(result, expected, atol=1e-6) 14 | 15 | 16 | def test_lagrange_polynomial_single_point(): 17 | x = np.array([2]) 18 | y = np.array([5]) 19 | point = 2 20 | result = lagrange_polynomial(x, y, point) 21 | expected = 5.0 22 | assert np.isclose(result, expected, atol=1e-6) 23 | 24 | 25 | def test_lagrange_polynomial_exact_match(): 26 | x = np.array([0, 1, 2, 3]) 27 | y = np.array([0, 1, 8, 27]) 28 | point = 2 29 | result = lagrange_polynomial(x, y, point) 30 | expected = 8.0 31 | assert np.isclose(result, expected, atol=1e-6) 32 | 33 | 34 | def test_lagrange_polynomial_out_of_bounds_low(): 35 | x = np.array([1, 2, 3]) 36 | y = np.array([2, 4, 6]) 37 | point = 0 38 | result = lagrange_polynomial(x, y, point) 39 | expected = 0.0 40 | assert np.isclose(result, expected, atol=1e-6) 41 | 42 | 43 | def test_lagrange_polynomial_out_of_bounds_high(): 44 | x = np.array([1, 2, 3]) 45 | y = np.array([2, 4, 6]) 46 | point = 4 47 | result = lagrange_polynomial(x, y, point) 48 | expected = 8.0 49 | assert np.isclose(result, expected, atol=1e-6) 50 | 51 | 52 | def test_lagrange_polynomial_non_equal_lengths(): 53 | x = np.array([0, 1, 2]) 54 | y = np.array([0, 1]) 55 | point = 1 56 | with pytest.raises(ValueError): 57 | lagrange_polynomial(x, y, point) 58 | 59 | 60 | def test_lagrange_polynomial_insufficient_points(): 61 | x = np.array([]) 62 | y = np.array([]) 63 | point = 0 64 | with pytest.raises(ValueError): 65 | lagrange_polynomial(x, y, point) 66 | 67 | 68 | def test_lagrange_polynomial_duplicate_x_values(): 69 | x = np.array([0, 1, 1, 2]) 70 | y = np.array([0, 1, 1, 4]) 71 | point = 1 72 | with pytest.raises(ValueError): 73 | lagrange_polynomial(x, y, point) 74 | 75 | 76 | def test_lagrange_polynomial_negative_values(): 77 | x = np.array([-2, -1, 0, 1]) 78 | y = np.array([4, 1, 0, 1]) 79 | point = -1.5 80 | result = lagrange_polynomial(x, y, point) 81 | expected = 2.25 82 | assert np.isclose(result, expected, atol=1e-6) 83 | 84 | 85 | def test_lagrange_polynomial_float_precision(): 86 | x = np.array([0.0, 1.0, 2.0]) 87 | y = np.array([0.0, 1.0, 4.0]) 88 | point = 1.999999 89 | result = lagrange_polynomial(x, y, point) 90 | expected = 3.999996 91 | assert np.isclose(result, expected, atol=1e-5) 92 | 93 | 94 | def test_lagrange_polynomial_multiple_segments(): 95 | x = np.linspace(0, 10, 11) 96 | y = x ** 2 97 | point = 7.3 98 | result = lagrange_polynomial(x, y, point) 99 | expected = 7 ** 2 + (8 ** 2 - 7 ** 2) * 0.3 100 | assert np.isclose(result, expected, atol=0.3) 101 | 102 | 103 | def test_lagrange_polynomial_large_dataset(): 104 | x = np.linspace(-100, 100, 201) 105 | y = np.sin(x) 106 | point = 23.456 107 | result = lagrange_polynomial(x, y, point) 108 | expected = np.sin(23.456) 109 | assert np.isclose(result, expected, atol=1e-3) 110 | 111 | 112 | def test_lagrange_polynomial_exact_match_middle(): 113 | x = np.array([0, 1, 2, 3, 4]) 114 | y = np.array([0, 1, 4, 9, 16]) 115 | point = 2 116 | result = lagrange_polynomial(x, y, point) 117 | expected = 4 118 | assert np.isclose(result, expected, atol=1e-6) 119 | 120 | 121 | def test_lagrange_polynomial_multiple_queries(): 122 | x = np.array([0, 1, 2, 3, 4]) 123 | y = np.array([0, 1, 4, 9, 16]) 124 | points = [0.5, 1.5, 2.5, 3.5] 125 | expected = [0.25, 2.25, 6.25, 12.25] 126 | results = [lagrange_polynomial(x, y, p) for p in points] 127 | assert np.allclose(results, expected, atol=1e-6) 128 | -------------------------------------------------------------------------------- /src/6_regression/least_squares/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/djeada/Numerical-Methods/68c5a1886c59877f77dd9a266b5f5387bb82f677/src/6_regression/least_squares/__init__.py -------------------------------------------------------------------------------- /src/6_regression/least_squares/examples/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/djeada/Numerical-Methods/68c5a1886c59877f77dd9a266b5f5387bb82f677/src/6_regression/least_squares/examples/__init__.py -------------------------------------------------------------------------------- /src/6_regression/least_squares/examples/example.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import matplotlib.pyplot as plt 3 | from least_squares import least_squares 4 | 5 | 6 | def main(): 7 | x = np.linspace(0, 10) 8 | y = np.random.normal(x, 0.5) 9 | 10 | beta = least_squares(x, y) 11 | 12 | y_computed = np.array([beta[0]]) * x + np.array([beta[1]]) 13 | 14 | plt.figure(figsize=(10, 8)) 15 | plt.plot(x, y, "b.") 16 | plt.plot( 17 | x, 18 | y_computed, 19 | "r", 20 | label="y = {}*x + {}".format(round(beta[0], 2), round(beta[1], 2)), 21 | ) 22 | plt.xlabel("x") 23 | plt.ylabel("y") 24 | plt.legend(loc="upper left") 25 | plt.show() 26 | 27 | 28 | if __name__ == "__main__": 29 | main() 30 | -------------------------------------------------------------------------------- /src/6_regression/least_squares/examples/plot.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import matplotlib.pyplot as plt 3 | 4 | # Generate more example data: Input (X) and Output (Y) with noise 5 | np.random.seed(42) 6 | x = np.linspace(1, 10, 20) # 20 evenly spaced feature values 7 | y = 0.5 * x + 2 + np.random.normal(0, 0.5, size=len(x)) # Linear relation with noise 8 | 9 | # Step 1: Add intercept term to X (column of ones) 10 | X = np.vstack([np.ones(len(x)), x]).T # Design matrix 11 | 12 | # Step 2: Compute the Normal Equation: beta = (X^T X)^(-1) X^T Y 13 | XT_X = X.T @ X # X transpose multiplied by X 14 | XT_Y = X.T @ y # X transpose multiplied by Y 15 | beta = np.linalg.inv(XT_X) @ XT_Y # Solving for beta 16 | 17 | # Step 3: Define the regression line 18 | x_fit = np.linspace(min(x), max(x), 100) # Smooth range for plotting 19 | y_fit = beta[0] + beta[1] * x_fit # Fitted line 20 | 21 | # Step 4: Visualization 22 | plt.figure(figsize=(8, 6)) 23 | plt.scatter(x, y, color="red", label="Data Points") # Original data points 24 | plt.plot( 25 | x_fit, y_fit, color="blue", label=f"Fitted Line: y = {beta[0]:.2f} + {beta[1]:.2f}x" 26 | ) 27 | plt.xlabel("X-axis") 28 | plt.ylabel("Y-axis") 29 | plt.title("Least Squares Regression - Fitted Line") 30 | plt.legend() 31 | plt.grid(True) 32 | plt.show() 33 | 34 | # Display the regression coefficients 35 | print(f"Regression Coefficients: Intercept = {beta[0]:.2f}, Slope = {beta[1]:.2f}") 36 | -------------------------------------------------------------------------------- /src/6_regression/least_squares/implementation/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/djeada/Numerical-Methods/68c5a1886c59877f77dd9a266b5f5387bb82f677/src/6_regression/least_squares/implementation/__init__.py -------------------------------------------------------------------------------- /src/6_regression/least_squares/implementation/least_squares.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | from numpy.linalg import LinAlgError 3 | from typing import Tuple 4 | 5 | 6 | def solve_linear_system(A: np.ndarray, b: np.ndarray) -> np.ndarray: 7 | A = A.astype(float) 8 | b = b.astype(float) 9 | n = A.shape[0] 10 | augmented = np.hstack((A, b.reshape(-1, 1))) 11 | for i in range(n): 12 | max_row = np.argmax(np.abs(augmented[i:, i])) + i 13 | pivot = augmented[max_row, i] 14 | if np.isclose(pivot, 0.0, atol=1e-12): 15 | raise ValueError("Matrix is singular or nearly singular.") 16 | if max_row != i: 17 | augmented[[i, max_row]] = augmented[[max_row, i]] 18 | for j in range(i + 1, n): 19 | factor = augmented[j, i] / augmented[i, i] 20 | augmented[j] -= factor * augmented[i] 21 | x = np.zeros(n) 22 | for i in range(n - 1, -1, -1): 23 | x[i] = ( 24 | augmented[i, -1] - np.dot(augmented[i, i + 1 : n], x[i + 1 : n]) 25 | ) / augmented[i, i] 26 | return x 27 | 28 | 29 | def least_squares(A: np.ndarray, b: np.ndarray) -> np.ndarray: 30 | if A.ndim != 2 or A.shape[0] < A.shape[1]: 31 | raise ValueError("Matrix A must have at least as many rows as columns.") 32 | if A.shape[0] != b.shape[0]: 33 | raise ValueError("The number of rows in A must match the size of vector b.") 34 | if np.any(np.all(A == 0, axis=1)): 35 | raise ValueError("Matrix A contains zero rows.") 36 | if np.linalg.matrix_rank(A) < A.shape[1]: 37 | raise ValueError("Matrix A does not have full column rank.") 38 | A = A.astype(float) 39 | b = b.astype(float) 40 | At = A.T 41 | AtA = At @ A 42 | Atb = At @ b 43 | try: 44 | U, s, Vt = np.linalg.svd(AtA, full_matrices=False) 45 | tol = max(AtA.shape) * np.finfo(s.dtype).eps * max(s) 46 | s_inv = np.diag([1 / si if si > tol else 0 for si in s]) 47 | AtA_pseudo_inv = Vt.T @ s_inv @ U.T 48 | x = AtA_pseudo_inv @ Atb 49 | except LinAlgError: 50 | raise ValueError( 51 | "Singular matrix encountered during least squares computation." 52 | ) 53 | return x 54 | -------------------------------------------------------------------------------- /src/6_regression/least_squares/tests/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/djeada/Numerical-Methods/68c5a1886c59877f77dd9a266b5f5387bb82f677/src/6_regression/least_squares/tests/__init__.py -------------------------------------------------------------------------------- /src/6_regression/linear_interpolation/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/djeada/Numerical-Methods/68c5a1886c59877f77dd9a266b5f5387bb82f677/src/6_regression/linear_interpolation/__init__.py -------------------------------------------------------------------------------- /src/6_regression/linear_interpolation/examples/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/djeada/Numerical-Methods/68c5a1886c59877f77dd9a266b5f5387bb82f677/src/6_regression/linear_interpolation/examples/__init__.py -------------------------------------------------------------------------------- /src/6_regression/linear_interpolation/examples/example.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import matplotlib.pyplot as plt 3 | from linear_interpolation import linear_interpolation 4 | 5 | 6 | def main(): 7 | x = np.array([-1, 4, 7]) 8 | y = np.array([1, 5, 2]) 9 | 10 | point_1 = 1 11 | point_2 = 4.5 12 | 13 | plt.figure(figsize=(10, 8)) 14 | plt.plot(x, y) 15 | plt.plot(point_1, linear_interpolation(x, y, point_1), "go") 16 | plt.plot(point_2, linear_interpolation(x, y, point_2), "go") 17 | plt.title("Linear Interpolation at x_1 = {} and x_2 = {}".format(point_1, point_2)) 18 | plt.xlabel("x") 19 | plt.ylabel("y") 20 | plt.show() 21 | 22 | 23 | if __name__ == "__main__": 24 | main() 25 | -------------------------------------------------------------------------------- /src/6_regression/linear_interpolation/implementation/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/djeada/Numerical-Methods/68c5a1886c59877f77dd9a266b5f5387bb82f677/src/6_regression/linear_interpolation/implementation/__init__.py -------------------------------------------------------------------------------- /src/6_regression/linear_interpolation/implementation/linear_interpolation.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | from typing import Tuple 3 | 4 | 5 | def linear_interpolation(x_data: np.ndarray, y_data: np.ndarray, point: float) -> float: 6 | if x_data.shape[0] != y_data.shape[0]: 7 | raise ValueError("X and Y vectors must have equal number of elements.") 8 | if x_data.shape[0] < 2: 9 | raise ValueError("X and Y vectors must contain at least 2 elements.") 10 | if not np.all(np.diff(x_data) > 0): 11 | raise ValueError("X data must be strictly increasing.") 12 | 13 | if point == x_data[0]: 14 | return y_data[0] 15 | if point == x_data[-1]: 16 | return y_data[-1] 17 | 18 | idx = np.searchsorted(x_data, point) - 1 19 | if idx < 0 or idx >= len(x_data) - 1: 20 | raise ValueError("Point is outside the interpolation range.") 21 | 22 | x1, x2 = x_data[idx], x_data[idx + 1] 23 | y1, y2 = y_data[idx], y_data[idx + 1] 24 | return y1 + (y2 - y1) * (point - x1) / (x2 - x1) 25 | -------------------------------------------------------------------------------- /src/6_regression/linear_interpolation/tests/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/djeada/Numerical-Methods/68c5a1886c59877f77dd9a266b5f5387bb82f677/src/6_regression/linear_interpolation/tests/__init__.py -------------------------------------------------------------------------------- /src/6_regression/newton_polynomial/__init__.py: -------------------------------------------------------------------------------- 1 | 2 | -------------------------------------------------------------------------------- /src/6_regression/newton_polynomial/examples/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/djeada/Numerical-Methods/68c5a1886c59877f77dd9a266b5f5387bb82f677/src/6_regression/newton_polynomial/examples/__init__.py -------------------------------------------------------------------------------- /src/6_regression/newton_polynomial/examples/plot.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import matplotlib.pyplot as plt 3 | 4 | 5 | def divided_differences(x, y): 6 | """ 7 | Compute the divided difference table for Newton's Interpolation. 8 | 9 | Parameters: 10 | x (array): Array of x data points. 11 | y (array): Array of y data points. 12 | 13 | Returns: 14 | table (array): Divided differences table. 15 | """ 16 | n = len(x) 17 | table = np.zeros((n, n)) 18 | table[:, 0] = y 19 | 20 | for j in range(1, n): 21 | for i in range(n - j): 22 | table[i][j] = (table[i + 1][j - 1] - table[i][j - 1]) / (x[i + j] - x[i]) 23 | 24 | return table[0] 25 | 26 | 27 | def newton_polynomial(x, coefficients, x_data): 28 | """ 29 | Evaluate Newton's Interpolation Polynomial at given points. 30 | 31 | Parameters: 32 | x (float or array): Points to evaluate the polynomial. 33 | coefficients (array): Divided difference coefficients. 34 | x_data (array): x data points. 35 | 36 | Returns: 37 | result (float or array): Evaluated polynomial. 38 | """ 39 | n = len(coefficients) 40 | result = coefficients[0] 41 | product_term = 1.0 42 | 43 | for i in range(1, n): 44 | product_term *= x - x_data[i - 1] 45 | result += coefficients[i] * product_term 46 | 47 | return result 48 | 49 | 50 | def plot_newton_interpolation(x_data, y_data): 51 | """ 52 | Plot Newton's Interpolation Polynomial alongside given data points. 53 | 54 | Parameters: 55 | x_data (array): x data points. 56 | y_data (array): y data points. 57 | """ 58 | # Compute divided differences 59 | coefficients = divided_differences(x_data, y_data) 60 | 61 | # Generate fine x values for smooth curve 62 | x_fine = np.linspace(min(x_data), max(x_data), 500) 63 | y_fine = newton_polynomial(x_fine, coefficients, x_data) 64 | 65 | # Plot data points and interpolated polynomial 66 | plt.figure(figsize=(8, 6)) 67 | plt.scatter(x_data, y_data, color="red", label="Data Points") 68 | plt.plot(x_fine, y_fine, label="Newton's Polynomial", color="blue") 69 | plt.title("Newton's Interpolation Polynomial") 70 | plt.xlabel("x") 71 | plt.ylabel("y") 72 | plt.legend() 73 | plt.grid(True) 74 | plt.show() 75 | 76 | 77 | # Example data points 78 | x_data = np.array([1, 2, 3]) 79 | y_data = np.array([2, 3, 5]) 80 | 81 | # Plot Newton's Interpolation 82 | plot_newton_interpolation(x_data, y_data) 83 | -------------------------------------------------------------------------------- /src/6_regression/newton_polynomial/implementation/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/djeada/Numerical-Methods/68c5a1886c59877f77dd9a266b5f5387bb82f677/src/6_regression/newton_polynomial/implementation/__init__.py -------------------------------------------------------------------------------- /src/6_regression/newton_polynomial/implementation/newton_polynomial.py: -------------------------------------------------------------------------------- 1 | # newton_polynomial.py 2 | import numpy as np 3 | from typing import Union 4 | 5 | 6 | def newton_polynomial(x_data: np.ndarray, y_data: np.ndarray, point: float) -> float: 7 | if x_data.shape[0] != y_data.shape[0]: 8 | raise ValueError("X and Y vectors must have equal number of elements.") 9 | if x_data.shape[0] < 2: 10 | raise ValueError("X and Y vectors must contain at least two elements.") 11 | if len(np.unique(x_data)) != x_data.shape[0]: 12 | raise ValueError("X data must contain unique values.") 13 | 14 | n = x_data.shape[0] 15 | divided_diff = np.copy(y_data).astype(float) 16 | 17 | for j in range(1, n): 18 | divided_diff[j:n] = (divided_diff[j:n] - divided_diff[j - 1 : n - 1]) / ( 19 | x_data[j:n] - x_data[0 : n - j] 20 | ) 21 | 22 | result = divided_diff[0] 23 | product = 1.0 24 | for i in range(1, n): 25 | product *= point - x_data[i - 1] 26 | result += divided_diff[i] * product 27 | 28 | return result 29 | -------------------------------------------------------------------------------- /src/6_regression/newton_polynomial/tests/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/djeada/Numerical-Methods/68c5a1886c59877f77dd9a266b5f5387bb82f677/src/6_regression/newton_polynomial/tests/__init__.py -------------------------------------------------------------------------------- /src/6_regression/polynomial_regression/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/djeada/Numerical-Methods/68c5a1886c59877f77dd9a266b5f5387bb82f677/src/6_regression/polynomial_regression/__init__.py -------------------------------------------------------------------------------- /src/6_regression/polynomial_regression/examples/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/djeada/Numerical-Methods/68c5a1886c59877f77dd9a266b5f5387bb82f677/src/6_regression/polynomial_regression/examples/__init__.py -------------------------------------------------------------------------------- /src/6_regression/polynomial_regression/examples/example.py: -------------------------------------------------------------------------------- 1 | """ 2 | Fit a third order polynomial to fit a curve to the given data point: 3 | 4 | A(-9, -2) 5 | B(-5, 3) 6 | C(-2.5, 0) 7 | D(4, 5) 8 | F(7, 11) 9 | """ 10 | import numpy as np 11 | import matplotlib.pyplot as plt 12 | import scipy.interpolate 13 | 14 | 15 | # Define the data 16 | x = np.array([-9, -5, -2.5, 4, 7]) 17 | y = np.array([-2, 3, 0, 5, 11]) 18 | 19 | # Plot the raw data 20 | plt.scatter(x, y, s=200, marker="P", color="black", label="Raw data") 21 | plt.xlabel("x") 22 | plt.ylabel("y") 23 | 24 | # Create the domain 25 | x_domain = np.linspace(1.1 * min(x), 1.1 * max(x), 100) 26 | 27 | # Create the model and show the predictions for djeada implementation 28 | 29 | # Create the model and show the predictions for numpy implementation 30 | model = np.poly1d(np.polyfit(x, y, 3)) 31 | y_prediction = model(x_domain) 32 | plt.plot(x_domain, y_prediction, label="Polynomial regression numpy") 33 | 34 | # Add a legend and show the plot 35 | plt.legend(loc="best") 36 | plt.show() 37 | -------------------------------------------------------------------------------- /src/6_regression/polynomial_regression/implementation/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/djeada/Numerical-Methods/68c5a1886c59877f77dd9a266b5f5387bb82f677/src/6_regression/polynomial_regression/implementation/__init__.py -------------------------------------------------------------------------------- /src/6_regression/polynomial_regression/implementation/polynomial_regression.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | 4 | def solve_linear_system(A: np.ndarray, b: np.ndarray) -> np.ndarray: 5 | n = A.shape[0] 6 | augmented = np.hstack((A.astype(float), b.reshape(-1, 1).astype(float))) 7 | for i in range(n): 8 | pivot = np.argmax(np.abs(augmented[i:, i])) + i 9 | if np.isclose(augmented[pivot, i], 0.0): 10 | raise ValueError("Matrix is singular or nearly singular.") 11 | if pivot != i: 12 | augmented[[i, pivot]] = augmented[[pivot, i]] 13 | augmented[i] = augmented[i] / augmented[i, i] 14 | for j in range(i + 1, n): 15 | augmented[j] -= augmented[j, i] * augmented[i] 16 | x = np.zeros(n) 17 | for i in range(n - 1, -1, -1): 18 | x[i] = augmented[i, -1] - np.dot(augmented[i, i + 1 : n], x[i + 1 : n]) 19 | return x 20 | 21 | 22 | def polynomial_regression( 23 | x_data: np.ndarray, y_data: np.ndarray, degree: int 24 | ) -> np.ndarray: 25 | if x_data.shape[0] != y_data.shape[0]: 26 | raise ValueError("X and Y vectors must have equal number of elements.") 27 | if x_data.shape[0] < degree + 1: 28 | raise ValueError("Number of data points must be at least degree + 1.") 29 | if degree < 0: 30 | raise ValueError("Degree must be non-negative.") 31 | V = np.vander(x_data, N=degree + 1, increasing=True) 32 | AtA = V.T @ V 33 | AtY = V.T @ y_data 34 | coefficients = solve_linear_system(AtA, AtY) 35 | return coefficients 36 | -------------------------------------------------------------------------------- /src/6_regression/polynomial_regression/tests/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/djeada/Numerical-Methods/68c5a1886c59877f77dd9a266b5f5387bb82f677/src/6_regression/polynomial_regression/tests/__init__.py -------------------------------------------------------------------------------- /src/6_regression/regression_methods_comparison/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/djeada/Numerical-Methods/68c5a1886c59877f77dd9a266b5f5387bb82f677/src/6_regression/regression_methods_comparison/__init__.py -------------------------------------------------------------------------------- /src/6_regression/regression_methods_comparison/temp.md: -------------------------------------------------------------------------------- 1 | Consider the list of x, y values: 2 | -------------------------------------------------------------------------------- /src/6_regression/thin_plate_spline_interpolation/__init__.py: -------------------------------------------------------------------------------- 1 | 2 | -------------------------------------------------------------------------------- /src/6_regression/thin_plate_spline_interpolation/examples/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/djeada/Numerical-Methods/68c5a1886c59877f77dd9a266b5f5387bb82f677/src/6_regression/thin_plate_spline_interpolation/examples/__init__.py -------------------------------------------------------------------------------- /src/6_regression/thin_plate_spline_interpolation/examples/plot.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import matplotlib.pyplot as plt 3 | from scipy.interpolate import Rbf 4 | 5 | # Generate random 3D points (arbitrary) 6 | np.random.seed(42) # For reproducibility 7 | n_points = 15 8 | x = np.random.rand(n_points) * 10 # Arbitrary X-axis values 9 | y = np.random.rand(n_points) * 10 # Arbitrary Y-axis values 10 | z = np.random.rand(n_points) * 50 # Arbitrary Z-axis values 11 | 12 | # Create thin plate spline interpolator 13 | rbf = Rbf(x, y, z, function="thin_plate", smooth=0.1) 14 | 15 | # Create grid for plotting the surface 16 | x_grid = np.linspace(min(x), max(x), 50) 17 | y_grid = np.linspace(min(y), max(y), 50) 18 | X, Y = np.meshgrid(x_grid, y_grid) 19 | Z = rbf(X, Y) # Interpolated surface 20 | 21 | # Plotting the TPS surface and data points 22 | fig = plt.figure(figsize=(10, 7)) 23 | ax = fig.add_subplot(111, projection="3d") 24 | 25 | # Surface plot 26 | ax.plot_surface( 27 | X, Y, Z, rstride=1, cstride=1, cmap="viridis", alpha=0.8, edgecolor="none" 28 | ) 29 | 30 | # Original data points 31 | ax.scatter(x, y, z, color="red", s=50, label="Data Points") 32 | 33 | # Customize the plot 34 | ax.set_xlabel("X-axis") 35 | ax.set_ylabel("Y-axis") 36 | ax.set_zlabel("Z-axis") 37 | ax.set_title("3D Surface Fitting with Thin Plate Spline Interpolation") 38 | ax.legend() 39 | 40 | # Show the plot 41 | plt.show() 42 | -------------------------------------------------------------------------------- /src/6_regression/thin_plate_spline_interpolation/implementation/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/djeada/Numerical-Methods/68c5a1886c59877f77dd9a266b5f5387bb82f677/src/6_regression/thin_plate_spline_interpolation/implementation/__init__.py -------------------------------------------------------------------------------- /src/6_regression/thin_plate_spline_interpolation/implementation/thin_plate_spline_interpolation.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | from typing import Tuple 3 | 4 | 5 | def thin_plate_spline_interpolation( 6 | x_data: np.ndarray, 7 | y_data: np.ndarray, 8 | z_data: np.ndarray, 9 | point: Tuple[float, float], 10 | ) -> float: 11 | if z_data.size == x_data.size * y_data.size and z_data.size > x_data.size: 12 | X, Y = np.meshgrid(x_data, y_data) 13 | x_data = X.ravel() 14 | y_data = Y.ravel() 15 | z_data = z_data.ravel() 16 | if x_data.shape[0] != y_data.shape[0] or x_data.shape[0] != z_data.shape[0]: 17 | raise ValueError("X, Y, and Z vectors must have equal number of elements.") 18 | if x_data.shape[0] < 3: 19 | raise ValueError( 20 | "At least three points are required for thin plate spline interpolation." 21 | ) 22 | if len(set(zip(x_data, y_data))) != x_data.shape[0]: 23 | raise ValueError("Duplicate (x, y) points detected.") 24 | N = x_data.shape[0] 25 | K = np.zeros((N, N)) 26 | for i in range(N): 27 | for j in range(N): 28 | if i != j: 29 | r = np.hypot(x_data[i] - x_data[j], y_data[i] - y_data[j]) 30 | if r > 0: 31 | K[i, j] = (r ** 2) * np.log(r ** 2) 32 | P = np.vstack((np.ones(N), x_data, y_data)).T 33 | system = np.vstack((np.hstack((K, P)), np.hstack((P.T, np.zeros((3, 3)))))) 34 | b = np.hstack((z_data, np.zeros(3))) 35 | coefficients = np.linalg.lstsq(system, b, rcond=None)[0] 36 | w = coefficients[:N] 37 | a = coefficients[N:] 38 | x, y = point 39 | U = np.zeros(N) 40 | for i in range(N): 41 | r = np.hypot(x - x_data[i], y - y_data[i]) 42 | if r > 0: 43 | U[i] = (r ** 2) * np.log(r ** 2) 44 | return a[0] + a[1] * x + a[2] * y + np.dot(w, U) 45 | -------------------------------------------------------------------------------- /src/6_regression/thin_plate_spline_interpolation/tests/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/djeada/Numerical-Methods/68c5a1886c59877f77dd9a266b5f5387bb82f677/src/6_regression/thin_plate_spline_interpolation/tests/__init__.py -------------------------------------------------------------------------------- /src/7_ordinary_differential_equations/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/djeada/Numerical-Methods/68c5a1886c59877f77dd9a266b5f5387bb82f677/src/7_ordinary_differential_equations/__init__.py -------------------------------------------------------------------------------- /src/7_ordinary_differential_equations/euler/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/djeada/Numerical-Methods/68c5a1886c59877f77dd9a266b5f5387bb82f677/src/7_ordinary_differential_equations/euler/__init__.py -------------------------------------------------------------------------------- /src/7_ordinary_differential_equations/euler/examples/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/djeada/Numerical-Methods/68c5a1886c59877f77dd9a266b5f5387bb82f677/src/7_ordinary_differential_equations/euler/examples/__init__.py -------------------------------------------------------------------------------- /src/7_ordinary_differential_equations/euler/examples/temp.md: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/djeada/Numerical-Methods/68c5a1886c59877f77dd9a266b5f5387bb82f677/src/7_ordinary_differential_equations/euler/examples/temp.md -------------------------------------------------------------------------------- /src/7_ordinary_differential_equations/euler/implementation/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/djeada/Numerical-Methods/68c5a1886c59877f77dd9a266b5f5387bb82f677/src/7_ordinary_differential_equations/euler/implementation/__init__.py -------------------------------------------------------------------------------- /src/7_ordinary_differential_equations/euler/implementation/euler.py: -------------------------------------------------------------------------------- 1 | # euler_solver.py 2 | import numpy as np 3 | from typing import Callable, Tuple 4 | 5 | 6 | def euler_method( 7 | f: Callable[[float, np.ndarray], np.ndarray], 8 | t0: float, 9 | y0: np.ndarray, 10 | t_end: float, 11 | h: float, 12 | ) -> Tuple[np.ndarray, np.ndarray]: 13 | if h <= 0: 14 | raise ValueError("Step size h must be positive.") 15 | if t_end < t0: 16 | raise ValueError("t_end must be greater than or equal to t0.") 17 | n_steps = int(np.ceil((t_end - t0) / h)) 18 | t = np.linspace(t0, t0 + n_steps * h, n_steps + 1) 19 | y = np.zeros((n_steps + 1, y0.size)) 20 | y[0] = y0 21 | for i in range(n_steps): 22 | y[i + 1] = y[i] + h * f(t[i], y[i]) 23 | return t, y 24 | -------------------------------------------------------------------------------- /src/7_ordinary_differential_equations/euler/tests/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/djeada/Numerical-Methods/68c5a1886c59877f77dd9a266b5f5387bb82f677/src/7_ordinary_differential_equations/euler/tests/__init__.py -------------------------------------------------------------------------------- /src/7_ordinary_differential_equations/heun/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/djeada/Numerical-Methods/68c5a1886c59877f77dd9a266b5f5387bb82f677/src/7_ordinary_differential_equations/heun/__init__.py -------------------------------------------------------------------------------- /src/7_ordinary_differential_equations/heun/implementation/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/djeada/Numerical-Methods/68c5a1886c59877f77dd9a266b5f5387bb82f677/src/7_ordinary_differential_equations/heun/implementation/__init__.py -------------------------------------------------------------------------------- /src/7_ordinary_differential_equations/heun/implementation/heun.py: -------------------------------------------------------------------------------- 1 | # heun_solver.py 2 | import numpy as np 3 | from typing import Callable, Tuple 4 | 5 | 6 | def heun_method(f, t0, y0, t_end, h): 7 | if h <= 0: 8 | raise ValueError("Step size h must be positive.") 9 | n_steps = int(np.ceil((t_end - t0) / h)) 10 | t = np.linspace(t0, t_end, n_steps + 1) 11 | y = np.zeros((n_steps + 1, len(y0))) 12 | y[0] = y0 13 | for i in range(n_steps): 14 | ti = t[i] 15 | yi = y[i] 16 | k1 = f(ti, yi) 17 | y_predict = yi + h * k1 18 | k2 = f(ti + h, y_predict) 19 | y[i + 1] = yi + (h / 2) * (k1 + k2) 20 | return t, y 21 | -------------------------------------------------------------------------------- /src/7_ordinary_differential_equations/heun/tests/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/djeada/Numerical-Methods/68c5a1886c59877f77dd9a266b5f5387bb82f677/src/7_ordinary_differential_equations/heun/tests/__init__.py -------------------------------------------------------------------------------- /src/7_ordinary_differential_equations/picard/__init__.py: -------------------------------------------------------------------------------- 1 | 2 | -------------------------------------------------------------------------------- /src/7_ordinary_differential_equations/picard/implementation/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/djeada/Numerical-Methods/68c5a1886c59877f77dd9a266b5f5387bb82f677/src/7_ordinary_differential_equations/picard/implementation/__init__.py -------------------------------------------------------------------------------- /src/7_ordinary_differential_equations/picard/implementation/picard.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | from typing import Callable, Tuple 3 | 4 | 5 | def picard_method( 6 | f: Callable[[float, np.ndarray], np.ndarray], 7 | t0: float, 8 | y0: np.ndarray, 9 | t_end: float, 10 | h: float, 11 | tol: float = 1e-6, 12 | max_iterations: int = 1000, 13 | ) -> Tuple[np.ndarray, np.ndarray]: 14 | if h <= 0: 15 | raise ValueError("Step size h must be positive.") 16 | if t_end < t0: 17 | raise ValueError("t_end must be greater than or equal to t0.") 18 | n_steps = int(np.ceil((t_end - t0) / h)) 19 | t = np.linspace(t0, t0 + n_steps * h, n_steps + 1) 20 | y = np.zeros((n_steps + 1, y0.size)) 21 | y[0] = y0 22 | for i in range(n_steps): 23 | y_prev = y[i] 24 | t_next = t[i] + h 25 | y_next = y_prev.copy() 26 | for _ in range(max_iterations): 27 | y_new = y_prev + h * f(t[i] + 0.5 * h, 0.5 * (y_prev + y_next)) 28 | if np.linalg.norm(y_new - y_next, ord=np.inf) < tol: 29 | y_next = y_new 30 | break 31 | y_next = y_new 32 | else: 33 | raise ValueError(f"Picard iteration did not converge at step {i}.") 34 | y[i + 1] = y_next 35 | return t, y.squeeze() # Flatten the output if it is single-dimensional 36 | -------------------------------------------------------------------------------- /src/7_ordinary_differential_equations/picard/tests/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/djeada/Numerical-Methods/68c5a1886c59877f77dd9a266b5f5387bb82f677/src/7_ordinary_differential_equations/picard/tests/__init__.py -------------------------------------------------------------------------------- /src/7_ordinary_differential_equations/runge_kutta/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/djeada/Numerical-Methods/68c5a1886c59877f77dd9a266b5f5387bb82f677/src/7_ordinary_differential_equations/runge_kutta/__init__.py -------------------------------------------------------------------------------- /src/7_ordinary_differential_equations/runge_kutta/examples/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/djeada/Numerical-Methods/68c5a1886c59877f77dd9a266b5f5387bb82f677/src/7_ordinary_differential_equations/runge_kutta/examples/__init__.py -------------------------------------------------------------------------------- /src/7_ordinary_differential_equations/runge_kutta/examples/temp.md: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/djeada/Numerical-Methods/68c5a1886c59877f77dd9a266b5f5387bb82f677/src/7_ordinary_differential_equations/runge_kutta/examples/temp.md -------------------------------------------------------------------------------- /src/7_ordinary_differential_equations/runge_kutta/implementation/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/djeada/Numerical-Methods/68c5a1886c59877f77dd9a266b5f5387bb82f677/src/7_ordinary_differential_equations/runge_kutta/implementation/__init__.py -------------------------------------------------------------------------------- /src/7_ordinary_differential_equations/runge_kutta/implementation/runge_kutta.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | from typing import Callable, Tuple 3 | 4 | 5 | def runge_kutta_4( 6 | f: Callable[[float, np.ndarray], np.ndarray], 7 | t0: float, 8 | y0: np.ndarray, 9 | t_end: float, 10 | h: float, 11 | ) -> Tuple[np.ndarray, np.ndarray]: 12 | if h <= 0: 13 | raise ValueError("Step size h must be positive.") 14 | if t_end < t0: 15 | raise ValueError("t_end must be greater than or equal to t0.") 16 | n_steps = int(np.ceil((t_end - t0) / h)) 17 | t = np.linspace(t0, t0 + n_steps * h, n_steps + 1) 18 | y = np.zeros((n_steps + 1, y0.size)) 19 | y[0] = y0 20 | for i in range(n_steps): 21 | ti = t[i] 22 | yi = y[i] 23 | k1 = f(ti, yi) 24 | k2 = f(ti + h / 2, yi + h * k1 / 2) 25 | k3 = f(ti + h / 2, yi + h * k2 / 2) 26 | k4 = f(ti + h, yi + h * k3) 27 | y[i + 1] = yi + (h / 6) * (k1 + 2 * k2 + 2 * k3 + k4) 28 | return t, y.squeeze() 29 | -------------------------------------------------------------------------------- /src/7_ordinary_differential_equations/runge_kutta/tests/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/djeada/Numerical-Methods/68c5a1886c59877f77dd9a266b5f5387bb82f677/src/7_ordinary_differential_equations/runge_kutta/tests/__init__.py -------------------------------------------------------------------------------- /src/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/djeada/Numerical-Methods/68c5a1886c59877f77dd9a266b5f5387bb82f677/src/__init__.py --------------------------------------------------------------------------------