├── julia.pdf ├── midterm.pdf ├── 05_krylov.pdf ├── 10_summary.pdf ├── 04_sparse_lu.pdf ├── assignment_1.pdf ├── assignment_2.pdf ├── assignment_3.pdf ├── 00_introduction.pdf ├── 06_runge_kutta.pdf ├── 08_monte_carlo.pdf ├── 01_big_o_notation.pdf ├── tutorial_01_big_o.pdf ├── tutorial_05_krylov.pdf ├── 03_finite_differences.pdf ├── 07_molecular_dynamics.pdf ├── tutorial_04_sparse_lu.pdf ├── 02_nonlinear_equations.pdf ├── 09_stochastic_sir_model.pdf ├── tutorial_08_monte_carlo.pdf ├── 01_big_o_notation_updated.pdf ├── tutorial_05_krylov_solutions.pdf ├── tutorial_02_nonlinear_equations.pdf ├── tutorial_03_finite_differences.pdf ├── tutorial_09_sampling_theorems.pdf ├── tutorial_06_explicit_runge_kutta.pdf ├── tutorial_07_implicit_runge_kutta.pdf ├── tutorial_06_explicit_runge_kutta_solution.pdf ├── tutorial_07_implicit_runge_kutta_solution.pdf ├── README.md ├── tutorial_08_monte_carlo.jl ├── tutorial_08_monte_carlo_solutions.jl ├── Project.toml ├── tutorial_04_sparse_lu.jl ├── assignment_1.jl ├── 00_introduction.jl ├── assignment_2.jl ├── tutorial_02_nonlinear_equations_solution.jl ├── tutorial_03_finite_differences_solution.jl ├── tutorial_03_finite_differences.jl ├── tutorial_06_explicit_runge_kutta_solution.jl ├── tutorial_06_explicit_runge_kutta.jl ├── tutorial_09_sampling_theorems.jl ├── tutorial_01_big_o_solution.jl ├── tutorial_07_implicit_runge_kutta_solution.jl ├── tutorial_07_implicit_runge_kutta.jl ├── assignment_3.jl ├── midterm.jl ├── 03_finite_differences.jl ├── tutorial_09_sampling_theorems_solution.jl ├── 07_molecular_dynamics.jl ├── 04_sparse_lu.jl ├── 01_big_o_notation_updated.jl ├── 01_big_o_notation.jl ├── 02_nonlinear_equations.jl ├── 05_krylov.jl ├── 06_runge_kutta.jl ├── 08_monte_carlo.jl ├── 09_stochastic_sir_model.jl └── Manifest.toml /julia.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ettersi/NumericalAnalysisII/HEAD/julia.pdf -------------------------------------------------------------------------------- /midterm.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ettersi/NumericalAnalysisII/HEAD/midterm.pdf -------------------------------------------------------------------------------- /05_krylov.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ettersi/NumericalAnalysisII/HEAD/05_krylov.pdf -------------------------------------------------------------------------------- /10_summary.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ettersi/NumericalAnalysisII/HEAD/10_summary.pdf -------------------------------------------------------------------------------- /04_sparse_lu.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ettersi/NumericalAnalysisII/HEAD/04_sparse_lu.pdf -------------------------------------------------------------------------------- /assignment_1.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ettersi/NumericalAnalysisII/HEAD/assignment_1.pdf -------------------------------------------------------------------------------- /assignment_2.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ettersi/NumericalAnalysisII/HEAD/assignment_2.pdf -------------------------------------------------------------------------------- /assignment_3.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ettersi/NumericalAnalysisII/HEAD/assignment_3.pdf -------------------------------------------------------------------------------- /00_introduction.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ettersi/NumericalAnalysisII/HEAD/00_introduction.pdf -------------------------------------------------------------------------------- /06_runge_kutta.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ettersi/NumericalAnalysisII/HEAD/06_runge_kutta.pdf -------------------------------------------------------------------------------- /08_monte_carlo.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ettersi/NumericalAnalysisII/HEAD/08_monte_carlo.pdf -------------------------------------------------------------------------------- /01_big_o_notation.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ettersi/NumericalAnalysisII/HEAD/01_big_o_notation.pdf -------------------------------------------------------------------------------- /tutorial_01_big_o.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ettersi/NumericalAnalysisII/HEAD/tutorial_01_big_o.pdf -------------------------------------------------------------------------------- /tutorial_05_krylov.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ettersi/NumericalAnalysisII/HEAD/tutorial_05_krylov.pdf -------------------------------------------------------------------------------- /03_finite_differences.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ettersi/NumericalAnalysisII/HEAD/03_finite_differences.pdf -------------------------------------------------------------------------------- /07_molecular_dynamics.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ettersi/NumericalAnalysisII/HEAD/07_molecular_dynamics.pdf -------------------------------------------------------------------------------- /tutorial_04_sparse_lu.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ettersi/NumericalAnalysisII/HEAD/tutorial_04_sparse_lu.pdf -------------------------------------------------------------------------------- /02_nonlinear_equations.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ettersi/NumericalAnalysisII/HEAD/02_nonlinear_equations.pdf -------------------------------------------------------------------------------- /09_stochastic_sir_model.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ettersi/NumericalAnalysisII/HEAD/09_stochastic_sir_model.pdf -------------------------------------------------------------------------------- /tutorial_08_monte_carlo.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ettersi/NumericalAnalysisII/HEAD/tutorial_08_monte_carlo.pdf -------------------------------------------------------------------------------- /01_big_o_notation_updated.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ettersi/NumericalAnalysisII/HEAD/01_big_o_notation_updated.pdf -------------------------------------------------------------------------------- /tutorial_05_krylov_solutions.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ettersi/NumericalAnalysisII/HEAD/tutorial_05_krylov_solutions.pdf -------------------------------------------------------------------------------- /tutorial_02_nonlinear_equations.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ettersi/NumericalAnalysisII/HEAD/tutorial_02_nonlinear_equations.pdf -------------------------------------------------------------------------------- /tutorial_03_finite_differences.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ettersi/NumericalAnalysisII/HEAD/tutorial_03_finite_differences.pdf -------------------------------------------------------------------------------- /tutorial_09_sampling_theorems.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ettersi/NumericalAnalysisII/HEAD/tutorial_09_sampling_theorems.pdf -------------------------------------------------------------------------------- /tutorial_06_explicit_runge_kutta.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ettersi/NumericalAnalysisII/HEAD/tutorial_06_explicit_runge_kutta.pdf -------------------------------------------------------------------------------- /tutorial_07_implicit_runge_kutta.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ettersi/NumericalAnalysisII/HEAD/tutorial_07_implicit_runge_kutta.pdf -------------------------------------------------------------------------------- /tutorial_06_explicit_runge_kutta_solution.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ettersi/NumericalAnalysisII/HEAD/tutorial_06_explicit_runge_kutta_solution.pdf -------------------------------------------------------------------------------- /tutorial_07_implicit_runge_kutta_solution.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ettersi/NumericalAnalysisII/HEAD/tutorial_07_implicit_runge_kutta_solution.pdf -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # MA3227 Numerical Analysis II 2 | 3 | Lecture slides and homework assignments for MA3227 Numerical Analysis II at the National University of Singapore. 4 | 5 | TeX files are available upon request. -------------------------------------------------------------------------------- /tutorial_08_monte_carlo.jl: -------------------------------------------------------------------------------- 1 | using PyPlot 2 | 3 | function compute_pi(N) 4 | # TODO: Your code here! 5 | end 6 | 7 | function convergence() 8 | # TODO: Your code here! 9 | end 10 | 11 | 12 | -------------------------------------------------------------------------------- /tutorial_08_monte_carlo_solutions.jl: -------------------------------------------------------------------------------- 1 | using PyPlot 2 | 3 | function compute_pi(N) 4 | r = 0 5 | for i = 1:N 6 | r += ( rand()^2 + rand()^2 < 1 ) 7 | end 8 | return 4r/N 9 | end 10 | 11 | function convergence() 12 | N = round.(Int,10.0.^LinRange(0,5,100)) 13 | q = compute_pi.(N) 14 | clf() 15 | loglog(N,N.^(-1/2), "k--") 16 | loglog(N, abs.(q.-π)) 17 | xlabel(L"N") 18 | ylabel("error") 19 | display(gcf()) 20 | end 21 | -------------------------------------------------------------------------------- /Project.toml: -------------------------------------------------------------------------------- 1 | [deps] 2 | BenchmarkTools = "6e4b80f9-dd63-53aa-95a3-0cdb28fa8baf" 3 | Combinatorics = "861a8166-3701-5b0c-9a16-15d98fcdc6aa" 4 | DifferentialEquations = "0c46a032-eb83-5123-abaf-570d42b7fbaa" 5 | Distributions = "31c24e10-a181-5473-b8eb-7969acd0382f" 6 | FFTW = "7a1cc6ca-52ef-59f5-83cd-3a7055c09341" 7 | IterativeSolvers = "42fd0dbc-a981-5370-80f2-aaf504508153" 8 | PyCall = "438e738f-606a-5dbb-bf0a-cddfbfd45ab0" 9 | PyPlot = "d330b81b-6aea-500a-939a-2ce795aea3ee" 10 | Roots = "f2b01f46-fcfa-551c-844a-d8ac1e96c665" 11 | SpecialFunctions = "276daf66-3868-5448-9aa4-cd146d93841b" 12 | StaticArrays = "90137ffa-7385-5640-81b9-e52037218182" 13 | -------------------------------------------------------------------------------- /tutorial_04_sparse_lu.jl: -------------------------------------------------------------------------------- 1 | using Random 2 | using SparseArrays 3 | using LinearAlgebra 4 | 5 | function path_theorems() 6 | Random.seed!(1) 7 | # ^ Resets the random number generator. This ensures we get the same random 8 | # matrix each time we run this function. You can create additional exercise 9 | # material by changing the above seed. 10 | 11 | A = Matrix(5I + sprand(5,5,0.25)) 12 | 13 | println("Sparsity pattern of A:") 14 | display(2 .* (A .!= 0)) 15 | println() 16 | 17 | println("Sparsity pattern of A^2:") 18 | display((A .!= 0) .+ (A^2 .!= 0)) 19 | println() 20 | 21 | println("Sparsity pattern of inv(A):") 22 | display((A .!= 0) .+ (inv(A) .!= 0)) 23 | println() 24 | 25 | println("Sparsity pattern of LU factorisation:") 26 | L,U = lu(A, Val(false)) 27 | display((A .!= 0) .+ (L+U .!= 0)) 28 | println() 29 | println("(2 = original nonzero, 1 = fill-in)") 30 | end 31 | -------------------------------------------------------------------------------- /assignment_1.jl: -------------------------------------------------------------------------------- 1 | function newton_inv(y) 2 | x = 24/17 - (8/17) * y 3 | for k = 1:TODO # Your code here! 4 | x = x * (2 - y*x) 5 | end 6 | return x 7 | end 8 | 9 | function bisection_inv(y) 10 | a,b = TODO # Your code here! 11 | for k = 1:TODO # Your code here! 12 | m = (a+b)/2 13 | if y*m > 1 14 | a,b = a,m 15 | else 16 | a,b = m,b 17 | end 18 | end 19 | return (a+b)/2 20 | end 21 | 22 | using Printf 23 | using BenchmarkTools 24 | 25 | # Increase the number of benchmarking samples to get more accurate runtimes 26 | BenchmarkTools.DEFAULT_PARAMETERS.samples = 1_000_000 27 | 28 | function benchmark() 29 | @printf(" inv runtime: %6.2f nanoseconds\n", @belapsed( inv($(Ref(1.0))[]))*1e9) 30 | @printf(" Newton runtime: %6.2f nanoseconds\n", @belapsed( newton_inv($(Ref(1.0))[]))*1e9) 31 | @printf("Bisection runtime: %6.2f nanoseconds\n", @belapsed(bisection_inv($(Ref(1.0))[]))*1e9) 32 | end -------------------------------------------------------------------------------- /00_introduction.jl: -------------------------------------------------------------------------------- 1 | # Type `] activate .; instantiate` in the REPL to install these packages 2 | using PyPlot 3 | using Distributions 4 | 5 | function run_scenario(p1,p2) 6 | n = [1] 7 | while 0 < n[end] < 100 8 | push!(n, 9 | n[end] 10 | + rand(Binomial(n[end], p1)) # newly infected 11 | - rand(Binomial(n[end], p2)) # recovered 12 | ) 13 | end 14 | return n 15 | end 16 | 17 | function pandemic() 18 | p1 = 0.5 19 | p2 = 0.45 20 | n_samples = 10 21 | # n_samples = 10_000 22 | 23 | clf() # "clf" == "clear figure" 24 | count = 0 25 | for i = 1:n_samples 26 | n = run_scenario(p1,p2) 27 | if i < 20 # Don't plot an excessive number of scenarios 28 | plot(n, "o-", ms=4) 29 | end 30 | count += (n[end] > 0) 31 | end 32 | xlabel("Day") 33 | ylabel("Active cases") 34 | display(gcf()) # Make figure appear in VSCode. "gcf" == "get current figure" 35 | println("Estimated probability for pandemic: ", count/n_samples) 36 | end -------------------------------------------------------------------------------- /assignment_2.jl: -------------------------------------------------------------------------------- 1 | using PyPlot 2 | using Roots # Install by typing `] add Roots` 3 | 4 | function simpson_step(f,y0,t) 5 | # TODO: Your code here! 6 | end 7 | 8 | function implicit_simpson_step(f,y0,t) 9 | # TODO: Your code here! 10 | end 11 | 12 | function propagate(f,y0,T,n,step) 13 | y = Vector{typeof(y0)}(undef,n) 14 | y[1] = y0 15 | for i = 2:n 16 | y[i] = step(f,y[i-1],T/(n-1)) 17 | end 18 | return y 19 | end 20 | 21 | function convergence() 22 | f = y->y^2 23 | y0 = 1.0 24 | T = 0.5 25 | y = t-> y0/(1-y0*t) 26 | 27 | clf() 28 | n = round.(Int, 10.0.^LinRange(1,3,30)) 29 | for (name,step) in ( 30 | ("explicit", simpson_step), 31 | # ("implict", implicit_simpson_step), 32 | ) 33 | error = [begin 34 | ỹ = propagate(f,y0,T,n, step) 35 | abs(y(T) - ỹ[end]) 36 | end for n in n] 37 | loglog(n, error, label=name) 38 | end 39 | loglog(n, inv.(n).^2, "k--") 40 | legend(loc="best") 41 | xlabel(L"n") 42 | ylabel(L"|\tilde y(T) - y(T)|") 43 | display(gcf()) 44 | end 45 | -------------------------------------------------------------------------------- /tutorial_02_nonlinear_equations_solution.jl: -------------------------------------------------------------------------------- 1 | ####################### 2 | # Bisection vs Newton 3 | 4 | using BenchmarkTools 5 | using PyPlot 6 | using Roots 7 | 8 | f(x) = 4 - 3x + 2x^2 - x^3 9 | df(x) = -3 + 4x - 3x^2 10 | 11 | function plot_function() 12 | x = LinRange(1,2,1000) 13 | clf() 14 | plot(x[[1,end]], [0,0], "k-", lw=0.5) 15 | plot(x, f.(x)) 16 | display(gcf()) 17 | end 18 | 19 | function find_root() 20 | x_bisect = find_zero(f, (1.0,2.0), Roots.Bisection()) 21 | x_Newton = find_zero((f,df), 1.0, Roots.Newton()) 22 | 23 | println("Bisection result: ", x_bisect) 24 | println(" Newton result: ", x_bisect) 25 | end 26 | 27 | 28 | 29 | ####################### 30 | # Complex square roots 31 | 32 | using LinearAlgebra 33 | using Printf 34 | using Test 35 | 36 | function square_root(w; print_error=false) 37 | f = x -> [ 38 | x[1]^2 - x[2]^2 - real(w), 39 | 2*x[1]*x[2] - imag(w) 40 | ] 41 | df = x-> [ 42 | 2x[1] -2x[2] 43 | 2x[2] 2x[1] 44 | ] 45 | 46 | x = [real(w),imag(w)] 47 | for k = 1:20 48 | if print_error 49 | @printf("error(k = %1.d) = %.2e\n", k, norm(f(x))) 50 | end 51 | 52 | if norm(f(x)) < 10*eps()*norm(x) 53 | return x[1] + x[2]*im 54 | end 55 | x -= df(x) \ f(x) 56 | end 57 | error("Newton's method did not converge. Final iterate is x = $(x[1] + x[2]*im).") 58 | end 59 | 60 | function test_square_root() 61 | @testset "square_root" begin 62 | @test square_root(1.0) == 1.0 63 | @test square_root(2.0) ≈ sqrt(2.0) 64 | @test square_root(1.0im) ≈ sqrt(1.0im) 65 | @test square_root(2.0im) ≈ sqrt(2.0im) 66 | @test square_root(1.0+1.0im) ≈ sqrt(1.0+1.0im) 67 | @test_throws Exception square_root(-1.0) 68 | end 69 | end 70 | 71 | function quadratic_convergence() 72 | square_root(2im, print_error=true) 73 | #= 74 | Output: 75 | error(k = 1) = 4.47e+00 76 | error(k = 2) = 1.25e+00 77 | error(k = 3) = 3.12e-01 78 | error(k = 4) = 1.28e-02 # ≈ 1e-1^2 79 | error(k = 5) = 2.05e-05 # ≈ 1e-2^2 80 | error(k = 6) = 5.24e-11 # ≈ 1e-5^2 81 | error(k = 7) = 0.00e+00 82 | =# 83 | end 84 | -------------------------------------------------------------------------------- /tutorial_03_finite_differences_solution.jl: -------------------------------------------------------------------------------- 1 | using PyPlot 2 | using LinearAlgebra 3 | 4 | function laplacian(x) 5 | n = length(x) 6 | x = [0;x;1] 7 | d1 = 1.0./(x[2:end] .- x[1:end-1]) 8 | d2 = 2.0./(x[3:end] .- x[1:end-2]) 9 | return Tridiagonal( 10 | @.( d2[2:end] * d1[2:end-1] ), 11 | @.( -d2 * (d1[1:end-1] + d1[2:end]) ), 12 | @.( d2[1:end-1] * d1[2:end-1] ) 13 | ) 14 | end 15 | 16 | using Test 17 | function test_laplacian() 18 | @testset ("laplacian") begin 19 | @test laplacian([0.1,0.2]) ≈ [ 20 | -200 100 21 | 200/9 -25 22 | ] 23 | end 24 | end 25 | 26 | function plot_solution() 27 | # Problem parameters 28 | n = 10 29 | p = 2 # Power for grid biasing 30 | f = x -> 0.25 * x^(-3/2) 31 | u = x -> sqrt(x) - x 32 | 33 | clf() 34 | 35 | # Plot reference solution 36 | xx = LinRange(0,1,1000) 37 | plot(xx, u.(xx), "k-", label="exact solution") 38 | 39 | # Plot finite difference solutions 40 | for (grid,x) = ( 41 | ("uniform", LinRange(0,1,n+2)[2:end-1]), 42 | ("adaptive", LinRange(0,1,n+2)[2:end-1].^p), 43 | ) 44 | Δ = laplacian(x) 45 | ũ = -Δ\f.(x) 46 | plot([0;x;1], [0;ũ;0], "-o", label="$grid grid") 47 | end 48 | 49 | # Add finishing touches to the plot 50 | xlabel(L"x") 51 | legend() 52 | display(gcf()) 53 | end 54 | 55 | function convergence() 56 | # Problem parameters 57 | n = 2 .^ (1:14) 58 | p = 2 # Power for grid biasing 59 | f = x -> 0.25 * x^(-3/2) 60 | u = x -> sqrt(x) - x 61 | 62 | clf() 63 | 64 | # Plot reference lines 65 | loglog(n, n.^-(1/2), "k:", label=L"O(n^{-1/2})") 66 | loglog(n, n.^-2, "k--", label=L"O(n^{-2})") 67 | 68 | # Plot the convergence both for uniform and adaptive grids 69 | for (grid,xfun) = ( 70 | ("uniform", n->LinRange(0,1,n+2)[2:end-1]), 71 | ("adaptive", n->LinRange(0,1,n+2)[2:end-1].^p), 72 | ) 73 | errors = [begin 74 | x = xfun(n) 75 | Δ = laplacian(x) 76 | ũ = -Δ\f.(x) 77 | norm(ũ .- u.(x), Inf) 78 | end for n in n] 79 | loglog(n, errors, label="$grid grid") 80 | end 81 | 82 | # Add finishing touches to the plot 83 | xlabel(L"n") 84 | legend(frameon=false) 85 | display(gcf()) 86 | end -------------------------------------------------------------------------------- /tutorial_03_finite_differences.jl: -------------------------------------------------------------------------------- 1 | using PyPlot 2 | using LinearAlgebra 3 | 4 | function laplacian(x) 5 | # The `x` passed to this function do not contain the boundary points `0` 6 | # and `1`. Computing the entries of the Laplacian will be easier once we 7 | # add these points. 8 | x = [0;x;1] 9 | 10 | # TODO: Your code here! 11 | 12 | return Tridiagonal( 13 | # TODO: Your code here! 14 | ) 15 | end 16 | 17 | using Test 18 | function test_laplacian() 19 | @testset ("laplacian") begin 20 | @test laplacian([0.1,0.2]) ≈ [ 21 | -200 100 22 | 200/9 -25 23 | ] 24 | end 25 | end 26 | 27 | function plot_solution() 28 | # Problem parameters 29 | n = 10 30 | p = 2 # Power for grid biasing 31 | f = x -> 0.25 * x^(-3/2) 32 | u = x -> sqrt(x) - x 33 | 34 | clf() 35 | 36 | # Plot reference solution 37 | xx = LinRange(0,1,1000) 38 | plot(xx, u.(xx), "k-", label="exact solution") 39 | 40 | # Plot finite difference solutions 41 | for (grid,x) = ( 42 | ("uniform", LinRange(0,1,n+2)[2:end-1]), 43 | ("adaptive", LinRange(0,1,n+2)[2:end-1].^p), 44 | ) 45 | Δ = laplacian(x) 46 | ũ = -Δ\f.(x) 47 | plot([0;x;1], [0;ũ;0], "-o", label="$grid grid") 48 | end 49 | 50 | # Add finishing touches to the plot 51 | xlabel(L"x") 52 | legend() 53 | display(gcf()) 54 | end 55 | 56 | function convergence() 57 | # Problem parameters 58 | n = 2 .^ (1:14) 59 | p = 2 # Power for grid biasing 60 | f = x -> 0.25 * x^(-3/2) 61 | u = x -> sqrt(x) - x 62 | 63 | clf() 64 | 65 | # Plot reference lines 66 | loglog(n, n.^-(1/2), "k:", label=L"O(n^{-1/2})") 67 | loglog(n, n.^-2, "k--", label=L"O(n^{-2})") 68 | 69 | # Plot the convergence both for uniform and adaptive grids 70 | for (grid,xfun) = ( 71 | ("uniform", n->LinRange(0,1,n+2)[2:end-1]), 72 | ("adaptive", n->LinRange(0,1,n+2)[2:end-1].^p), 73 | ) 74 | errors = [begin 75 | x = xfun(n) 76 | Δ = laplacian(x) 77 | ũ = -Δ\f.(x) 78 | norm(ũ .- u.(x), Inf) 79 | end for n in n] 80 | loglog(n, errors, label="$grid grid") 81 | end 82 | 83 | # Add finishing touches to the plot 84 | xlabel(L"n") 85 | legend(frameon=false) 86 | display(gcf()) 87 | end -------------------------------------------------------------------------------- /tutorial_06_explicit_runge_kutta_solution.jl: -------------------------------------------------------------------------------- 1 | using PyPlot 2 | 3 | ############# 4 | # Exercise 1 5 | 6 | function euler_step(f,y0,t) 7 | return y0 + f(y0)*t 8 | end 9 | 10 | function propagate(f,y0,T,n,step) 11 | y = Vector{typeof(y0)}(undef,n) 12 | y[1] = y0 13 | for i = 2:n 14 | y[i] = step(f,y[i-1],T/(n-1)) 15 | end 16 | return y 17 | end 18 | 19 | function cannonball_f(y,D,g) 20 | x1,x2,v1,v2 = y 21 | v = sqrt(v1^2 + v2^2) 22 | return [ v1, v2, -D*v*v1, -D*v*v2-g ] 23 | end 24 | 25 | function cannonball_trajectory() 26 | # Physical parameters 27 | D = 5.0 28 | g = 1.0 29 | y0 = Float64[0,0,1,2] 30 | T = 2.0 31 | 32 | # Numerical parameters 33 | n = 100 34 | step = euler_step 35 | 36 | # Solve the ODE 37 | y = propagate( 38 | y->cannonball_f(y,D,g), 39 | y0, T, n, step 40 | ) 41 | x1 = [y[i][1] for i = 1:length(y)] 42 | x2 = [y[i][2] for i = 1:length(y)] 43 | v1 = [y[i][3] for i = 1:length(y)] 44 | v2 = [y[i][4] for i = 1:length(y)] 45 | 46 | # Plot the solution 47 | clf() 48 | plot(x1,x2) 49 | axis("equal") 50 | xlabel(L"x_1") 51 | ylabel(L"x_2") 52 | display(gcf()) 53 | end 54 | 55 | 56 | 57 | ############# 58 | # Exercise 2 59 | 60 | function midpoint_step(f,y0,t) 61 | f0 = f(y0) 62 | f1 = f(y0+f0*t/2) 63 | return y0 + f1*t 64 | end 65 | 66 | function ssprk3_step(f,y0,t) 67 | f0 = f(y0) 68 | f1 = f(y0+f0*t) 69 | f2 = f(y0+f0*t/4+f1*t/4) 70 | return y0 + f0*t/6 + f1*t/6 + f2*2t/3 71 | end 72 | 73 | function convergence() 74 | f = y->y^2 75 | y0 = 1.0 76 | T = 0.5 77 | y = t-> y0/(1-y0*t) 78 | 79 | clf() 80 | n = round.(Int, 10.0.^LinRange(0,3,30)) 81 | for (name,step) in ( 82 | ("Euler", euler_step), 83 | ("midpoint", midpoint_step), 84 | ("SSPRK3", ssprk3_step), 85 | ) 86 | error = [begin 87 | ỹ = propagate(f,y0,T,n, step) 88 | abs(y(T) - ỹ[end]) 89 | end for n in n] 90 | loglog(n, error, label=name) 91 | end 92 | loglog(n, inv.(n), "k--") 93 | loglog(n, inv.(n).^2, "k-.") 94 | loglog(n, inv.(n).^3, "k:") 95 | legend(loc="best") 96 | xlabel("Number of time steps") 97 | ylabel(L"Error at final time") 98 | display(gcf()) 99 | end 100 | -------------------------------------------------------------------------------- /tutorial_06_explicit_runge_kutta.jl: -------------------------------------------------------------------------------- 1 | using PyPlot 2 | 3 | ############# 4 | # Exercise 1 5 | 6 | function euler_step(f,y0,t) 7 | return y0 + f(y0)*t 8 | end 9 | 10 | function propagate(f,y0,T,n,step) 11 | y = Vector{typeof(y0)}(undef,n) 12 | y[1] = y0 13 | for i = 2:n 14 | y[i] = step(f,y[i-1],T/(n-1)) 15 | end 16 | return y 17 | end 18 | 19 | function cannonball_f(y,D,g) 20 | x1,x2,v1,v2 = y # Split `y` into its position and velocity components 21 | 22 | # TODO: Your code here 23 | 24 | return [ dx1,dx2,dv1,dv2 ] # Assemble the time dervatives into the 4-vector `f(y)` 25 | end 26 | 27 | function cannonball_trajectory() 28 | # Physical parameters 29 | D = 5.0 30 | g = 1.0 31 | y0 = Float64[0,0,1,2] 32 | T = 2.0 33 | 34 | # Numerical parameters 35 | n = 100 36 | step = euler_step 37 | 38 | # Solve the ODE 39 | y = propagate( 40 | y->cannonball_f(y,D,g), 41 | y0, T, n, step 42 | ) 43 | x1 = [y[i][1] for i = 1:length(y)] 44 | x2 = [y[i][2] for i = 1:length(y)] 45 | v1 = [y[i][3] for i = 1:length(y)] 46 | v2 = [y[i][4] for i = 1:length(y)] 47 | 48 | # Plot the solution 49 | clf() 50 | plot(x1,x2) 51 | axis("equal") 52 | xlabel(L"x_1") 53 | ylabel(L"x_2") 54 | display(gcf()) 55 | end 56 | 57 | 58 | 59 | ############# 60 | # Exercise 2 61 | 62 | function midpoint_step(f,y0,t) 63 | # TODO: Your code here 64 | return yt # numerical solution after time `t` into the future 65 | end 66 | 67 | function ssprk3_step(f,y0,t) 68 | # TODO: Your code here 69 | return yt # numerical solution after time `t` into the future 70 | end 71 | 72 | function convergence() 73 | f = y->y^2 74 | y0 = 1.0 75 | T = 0.5 76 | y = t-> y0/(1-y0*t) 77 | 78 | clf() 79 | n = round.(Int, 10.0.^LinRange(0,3,30)) 80 | for (name,step) in ( 81 | ("Euler", euler_step), 82 | # ("midpoint", midpoint_step), 83 | # ("SSPRK3", ssprk3_step), 84 | ) 85 | error = [begin 86 | ỹ = propagate(f,y0,T,n, step) 87 | abs(y(T) - ỹ[end]) 88 | end for n in n] 89 | loglog(n, error, label=name) 90 | end 91 | loglog(n, inv.(n), "k--") 92 | loglog(n, inv.(n).^2, "k-.") 93 | loglog(n, inv.(n).^3, "k:") 94 | legend(loc="best") 95 | xlabel("Number of time steps") 96 | ylabel(L"Error at final time") 97 | display(gcf()) 98 | end 99 | -------------------------------------------------------------------------------- /tutorial_09_sampling_theorems.jl: -------------------------------------------------------------------------------- 1 | using PyPlot 2 | using Statistics 3 | using Printf 4 | using BenchmarkTools 5 | 6 | sin_pdf(x) = π/2*sin(π*x) 7 | quad_pdf(x) = 6*x*(1-x) 8 | 9 | rand_sin(n) = [rand_sin() for i = 1:n] 10 | function rand_sin() 11 | # TODO: Your code here! 12 | return NaN 13 | end 14 | 15 | rand_quad(n) = [rand_quad() for i = 1:n] 16 | function rand_quad() 17 | # TODO: Your code here! 18 | return NaN 19 | end 20 | 21 | function plot_pdfs() 22 | x = LinRange(0,1,1000) 23 | clf() 24 | plot(x, quad_pdf.(x), label=L"p(x)") 25 | plot(x, sin_pdf.(x), label=L"q(x)") 26 | xlabel("x") 27 | legend() 28 | display(gcf()) 29 | end 30 | 31 | function plot_pdf_ratio() 32 | x = LinRange(0,1,1000)[2:end-1] 33 | clf() 34 | plot(x, quad_pdf.(x)./sin_pdf.(x), label=L"p(x) / q(x)") 35 | xlabel("x") 36 | legend() 37 | display(gcf()) 38 | end 39 | 40 | function histogram(rand_fun) 41 | n = 1_000_000 42 | if rand_fun == rand_sin 43 | pdf = sin_pdf 44 | elseif rand_fun == rand_quad 45 | pdf = quad_pdf 46 | else 47 | error("Invalid argument rand_fun = $rand_fun") 48 | end 49 | 50 | clf() 51 | hist(rand_fun(n); bins = 100, density = true, label="empirical PDF") 52 | x = LinRange(0,1,1000) 53 | plot(x, pdf.(x), "k-", label="theoretical PDF") 54 | xlabel(L"x") 55 | legend() 56 | display(gcf()) 57 | end 58 | 59 | function monte_carlo() 60 | N = 1000 61 | X = rand_quad(N) 62 | Y = rand_sin(N) 63 | 64 | println("Monte Carlo estimate for E[X]") 65 | @printf(" Direct sampling: %.3f\n", NaN) # TODO: Your code here! 66 | @printf(" Importance sampling: %.3f\n", NaN) # TODO: Your code here! 67 | end 68 | 69 | function comparison() 70 | ########### 71 | # Variance 72 | N = 1_000_000 73 | X = rand_quad(N) 74 | Y = rand_sin(N) 75 | var_dir = var(X) 76 | var_imp = var(Y.*quad_pdf.(Y)./sin_pdf.(Y)) 77 | 78 | println("Variance:") 79 | @printf(" Direct sampling: %.4f\n", var_dir) 80 | @printf(" Importance sampling: %.4f\n", var_imp) 81 | println() 82 | 83 | ########## 84 | # Runtime 85 | t_dir = @belapsed(rand_quad(), seconds=0.1) 86 | t_imp = @belapsed((Y = rand_sin(); Y * quad_pdf(Y) / sin_pdf(Y)), seconds=0.1) 87 | 88 | println("Runtime per sample:") 89 | @printf(" Direct sampling: %2.0f nanoseconds\n", 1e9*t_dir) 90 | @printf(" Importance sampling: %2.0f nanoseconds\n", 1e9*t_imp) 91 | println() 92 | 93 | ############# 94 | # Comparison 95 | 96 | # TODO: Your code here! 97 | end 98 | -------------------------------------------------------------------------------- /tutorial_01_big_o_solution.jl: -------------------------------------------------------------------------------- 1 | using PyPlot 2 | 3 | function fibonacci_sequence() 4 | n = 30 5 | 6 | # Evaluate 7 | f = ones(n) 8 | for i = 3:n 9 | f[i] = f[i-1] + f[i-2] 10 | end 11 | 12 | # Plot 13 | clf() 14 | semilogy(1:n, f, "-o", label=L"f(n)") 15 | xlabel(L"n") 16 | ylabel(L"f(n)") 17 | legend() 18 | display(gcf()) 19 | # Conclusion: `f(n)` looks like a straight line on a `semilogy` plot; 20 | # hence `f(n)` scales exponentially. 21 | end 22 | 23 | function triangular_loop() 24 | n = round.(Int, 10.0.^LinRange(0,4,20)) 25 | 26 | # Evaluate 27 | f = [sum(sum(1 for j = 1:i) for i = 1:n) for n in n] 28 | 29 | # Plot 30 | clf() 31 | loglog(n, f, "-o", label=L"f(n)") 32 | loglog(n, n.^2, "k--", label=L"O(n^2)") 33 | xlabel(L"n") 34 | ylabel(L"f(n)") 35 | legend() 36 | display(gcf()) 37 | # Conclusion: `f(n)` looks like a straight line on a `loglog` plot with 38 | # slope parallel to `n^2`. hence `f(n)` scales algebraically with order 2. 39 | end 40 | 41 | function geometric_series() 42 | n = 1:20 43 | 44 | # Evaluate 45 | f = [sum(2.0.^(1:n)) for n in n] 46 | 47 | # Plot 48 | clf() 49 | semilogy(n, f, "-o", label=L"f(n)") 50 | xlabel(L"n") 51 | ylabel(L"f(n)") 52 | legend() 53 | display(gcf()) 54 | # Conclusion: `f(n)` looks like a straight line on a `semilogy` plot; 55 | # hence `f(n)` scales exponentially. 56 | end 57 | 58 | function recursive() 59 | n = 2 .^ (1:20) 60 | 61 | # Define function 62 | f = n -> (if n == 1; return 1; else return 2*f(n÷2)+1; end) 63 | 64 | # Plot 65 | clf() 66 | loglog(n, f.(n), "-o", label=L"f(n)") 67 | loglog(n, n, "k--", label=L"O(n)") 68 | xlabel(L"n") 69 | ylabel(L"f(n)") 70 | legend() 71 | display(gcf()) 72 | # Conclusion: `f(n)` looks like a straight line on a `loglog` plot with 73 | # slope parallel to `n`. hence `f(n)` scales algebraically with order 1. 74 | end 75 | 76 | function exp_sqrt() 77 | n = 1:100 78 | 79 | # Evaluate 80 | f = @. exp(sqrt(n)) 81 | 82 | # Plot 83 | clf() 84 | 85 | subplot(1,2,1) 86 | loglog(n, f, "-o", label=L"f(n)") 87 | xlabel(L"n") 88 | ylabel(L"f(n)") 89 | 90 | subplot(1,2,2) 91 | semilogy(n, f, "-o", label=L"f(n)") 92 | xlabel(L"n") 93 | ylabel(L"f(n)") 94 | legend() 95 | display(gcf()) 96 | # Conclusion: `f(n)` is curved upwards in the `loglog` plot but curved 97 | # downwards in the `semilogy` plot; hence `f(n)` scales super-algebraically 98 | # but sub-exponentially. 99 | end 100 | 101 | function recursive_2() 102 | n = 2 .^ (1:20) 103 | 104 | # Define function 105 | f = n -> (if n == 1; return 1; else return f(n÷2)+1; end) 106 | 107 | # Plot 108 | clf() 109 | semilogx(n, f.(n), "-o", label=L"f(n)") 110 | xlabel(L"n") 111 | ylabel(L"f(n)") 112 | legend() 113 | display(gcf()) 114 | # Conclusion: `f(n)` looks like a straight line on a `semilogx` plot; 115 | # hence it scales logarithmically. 116 | end -------------------------------------------------------------------------------- /tutorial_07_implicit_runge_kutta_solution.jl: -------------------------------------------------------------------------------- 1 | using PyPlot 2 | using LinearAlgebra 3 | 4 | function euler_step(f,y0,t) 5 | return y0 + f(y0)*t 6 | end 7 | 8 | function trapezoidal_step(f,y0,t) 9 | f1 = t*f(y0) 10 | f2 = t*f(y0+f1) 11 | return y0 + (f1 + f2)/2 12 | end 13 | 14 | function semi_implicit_euler_step(f,y0,t) 15 | D,g = f.D,f.g 16 | x1,x2,v1,v2 = y0 17 | v = sqrt(v1^2 + v2^2) 18 | new_x1 = x1 + v1*t 19 | new_x2 = x2 + v2*t 20 | new_v1 = v1/(1+D*v*t) 21 | new_v2 = (v2 - g*t)/(1+D*v*t) 22 | return [new_x1,new_x2,new_v1,new_v2] 23 | end 24 | 25 | function propagate(f,y0,T,n,step) 26 | y = Vector{typeof(y0)}(undef,n) 27 | y[1] = y0 28 | for i = 2:n 29 | y[i] = step(f,y[i-1],T/(n-1)) 30 | end 31 | return y 32 | end 33 | 34 | function cannonball_f(y,D,g) 35 | x1,x2,v1,v2 = y 36 | v = sqrt(v1^2 + v2^2) 37 | return [ v1, v2, -D*v*v1, -D*v*v2-g ] 38 | end 39 | 40 | function cannonball_trajectory() 41 | # Physical parameters 42 | D = 5.0 43 | g = 1.0 44 | y0 = Float64[0,0,1,2] 45 | T = 2.0 46 | 47 | # Numerical parameters 48 | n = 100 49 | step = euler_step 50 | 51 | # Solve the ODE 52 | y = propagate( 53 | y->cannonball_f(y,D,g), 54 | y0, T, n, step 55 | ) 56 | x1 = [y[i][1] for i = 1:length(y)] 57 | x2 = [y[i][2] for i = 1:length(y)] 58 | v1 = [y[i][3] for i = 1:length(y)] 59 | v2 = [y[i][4] for i = 1:length(y)] 60 | 61 | # Plot the solution 62 | clf() 63 | plot(x1,x2) 64 | axis("equal") 65 | xlabel(L"x_1") 66 | ylabel(L"x_2") 67 | display(gcf()) 68 | end 69 | 70 | function stability() 71 | D = 5.0 72 | g = 1.0 73 | y0 = Float64[0,0,0, -0.99*sqrt(g/D)] 74 | n = 100 75 | 76 | clf() 77 | for (name, step, dt) in ( 78 | ("Euler", euler_step, 1/sqrt(g*D)), 79 | ("trapezoidal", trapezoidal_step, 1/sqrt(g*D)), 80 | ("semi-implicit Euler", semi_implicit_euler_step, 1e3), 81 | ) 82 | y = propagate( 83 | y->cannonball_f(y,D,g), 84 | y0,dt*(n-1),n,step 85 | ) 86 | v2 = [y[i][4] for i = 1:length(y)] 87 | semilogy(abs.(v2 .+ sqrt(g/D)), label=name) 88 | end 89 | legend(loc="best") 90 | xlabel("step number") 91 | ylabel(L"|v_2 - v_{F,2}|") 92 | display(gcf()) 93 | end 94 | 95 | function convergence() 96 | # Model parameters 97 | D = 1.0 98 | g = 1.0 99 | y0 = Float64[0,0,1,1] 100 | T = 2.0 101 | 102 | # Compute reference solution 103 | y = propagate( 104 | y->cannonball_f(y,D,g), 105 | y0,T,10000,trapezoidal_step 106 | ) 107 | 108 | clf() 109 | n = round.(Int, 10.0.^LinRange(0,3,30)) 110 | for (name,step) in ( 111 | ("Euler", euler_step), 112 | ("trapezoidal", trapezoidal_step), 113 | ("semi-implicit Euler", semi_implicit_euler_step), 114 | ) 115 | error = [begin 116 | ỹ = propagate( 117 | y->cannonball_f(y,D,g), 118 | y0,T,n, step 119 | ) 120 | norm(y[end] - ỹ[end]) 121 | end for n in n] 122 | loglog(n, error, label=name) 123 | end 124 | loglog(n, inv.(n), "k--") 125 | loglog(n, inv.(n).^2, "k-.") 126 | legend(loc="best") 127 | xlabel(L"n") 128 | ylabel(L"\|\tilde y(T) - y(T)\|") 129 | display(gcf()) 130 | end 131 | -------------------------------------------------------------------------------- /tutorial_07_implicit_runge_kutta.jl: -------------------------------------------------------------------------------- 1 | using PyPlot 2 | using LinearAlgebra 3 | 4 | function euler_step(f,y0,t) 5 | return y0 + f(y0)*t 6 | end 7 | 8 | function trapezoidal_step(f,y0,t) 9 | f1 = t*f(y0) 10 | f2 = t*f(y0+f1) 11 | return y0 + (f1 + f2)/2 12 | end 13 | 14 | function semi_implicit_euler_step(f,y0,t) 15 | D,g = f.D,f.g 16 | x1,x2,v1,v2 = y0 17 | v = sqrt(v1^2 + v2^2) 18 | new_x1 = # TODO: Your code here 19 | new_x2 = # TODO: Your code here 20 | new_v1 = # TODO: Your code here 21 | new_v2 = # TODO: Your code here 22 | return [new_x1,new_x2,new_v1,new_v2] 23 | end 24 | 25 | function propagate(f,y0,T,n,step) 26 | y = Vector{typeof(y0)}(undef,n) 27 | y[1] = y0 28 | for i = 2:n 29 | y[i] = step(f,y[i-1],T/(n-1)) 30 | end 31 | return y 32 | end 33 | 34 | function cannonball_f(y,D,g) 35 | x1,x2,v1,v2 = y 36 | v = sqrt(v1^2 + v2^2) 37 | return [ v1, v2, -D*v*v1, -D*v*v2-g ] 38 | end 39 | 40 | function cannonball_trajectory() 41 | # Physical parameters 42 | D = 5.0 43 | g = 1.0 44 | y0 = Float64[0,0,1,2] 45 | T = 2.0 46 | 47 | # Numerical parameters 48 | n = 100 49 | step = euler_step 50 | 51 | # Solve the ODE 52 | y = propagate( 53 | y->cannonball_f(y,D,g), 54 | y0, T, n, step 55 | ) 56 | x1 = [y[i][1] for i = 1:length(y)] 57 | x2 = [y[i][2] for i = 1:length(y)] 58 | v1 = [y[i][3] for i = 1:length(y)] 59 | v2 = [y[i][4] for i = 1:length(y)] 60 | 61 | # Plot the solution 62 | clf() 63 | plot(x1,x2) 64 | axis("equal") 65 | xlabel(L"x_1") 66 | ylabel(L"x_2") 67 | display(gcf()) 68 | end 69 | 70 | function stability() 71 | D = 5.0 72 | g = 1.0 73 | y0 = Float64[0,0,0, -0.99*sqrt(g/D)] 74 | n = 100 75 | 76 | clf() 77 | for (name, step, dt) in ( 78 | ("Euler", euler_step, NaN), # TODO: replace `NaN` with your value for `dt` 79 | ("trapezoidal", trapezoidal_step, NaN), # TODO: replace `NaN` with your value for `dt` 80 | # ("semi-implicit Euler", semi_implicit_euler_step, 1e3), 81 | ) 82 | y = propagate( 83 | y->cannonball_f(y,D,g), 84 | y0,dt*(n-1),n,step 85 | ) 86 | v2 = [y[i][4] for i = 1:length(y)] 87 | semilogy(abs.(v2 .+ sqrt(g/D)), label=name) 88 | end 89 | legend(loc="best") 90 | xlabel("step number") 91 | ylabel(L"|v_2 - v_{F,2}|") 92 | display(gcf()) 93 | end 94 | 95 | function convergence() 96 | # Model parameters 97 | D = 1.0 98 | g = 1.0 99 | y0 = Float64[0,0,1,1] 100 | T = 2.0 101 | 102 | # Compute reference solution 103 | y = propagate( 104 | y->cannonball_f(y,D,g), 105 | y0,T,10000,trapezoidal_step 106 | ) 107 | 108 | clf() 109 | n = round.(Int, 10.0.^LinRange(0,3,30)) 110 | for (name,step) in ( 111 | ("Euler", euler_step), 112 | ("trapezoidal", trapezoidal_step), 113 | # ("semi-implicit Euler", semi_implicit_euler_step), 114 | ) 115 | error = [begin 116 | ỹ = propagate( 117 | y->cannonball_f(y,D,g), 118 | y0,T,n, step 119 | ) 120 | norm(y[end] - ỹ[end]) 121 | end for n in n] 122 | loglog(n, error, label=name) 123 | end 124 | loglog(n, inv.(n), "k--") 125 | loglog(n, inv.(n).^2, "k-.") 126 | legend(loc="best") 127 | xlabel(L"n") 128 | ylabel(L"\|\tilde y(T) - y(T)\|") 129 | display(gcf()) 130 | end 131 | -------------------------------------------------------------------------------- /assignment_3.jl: -------------------------------------------------------------------------------- 1 | using PyPlot 2 | using Statistics 3 | using BenchmarkTools 4 | 5 | 6 | # ############################################################################## 7 | # Uniformly distributed points on the disk 8 | 9 | function randdisk_rejection() 10 | # TODO: Your code here 11 | end 12 | 13 | function randdisk_transform() 14 | r = NaN # TODO: Your code here 15 | phi = NaN # TODO: Your code here 16 | s,c = sincos(phi) 17 | # ^^^^^^ Evaluate both sin and cos at once. This turns out to be slightly 18 | # faster than evaluating them individually. 19 | return ( r*c, r*s ) 20 | end 21 | 22 | function plot_samples() 23 | n_samples = 1000 24 | 25 | clf() 26 | for (randdisk,label) in ( 27 | (randdisk_rejection, "Rejection"), 28 | (randdisk_transform, "Inverse transform"), 29 | ) 30 | x = zeros(2,n_samples) 31 | for i = 1:n_samples 32 | x[:,i] .= randdisk() 33 | end 34 | plot(x[1,:], x[2,:], "o", ms=3, label=label) 35 | end 36 | axis("square") 37 | display(gcf()) 38 | end 39 | 40 | function performance_shootout() 41 | println("Runtime randdisk_rejection():") 42 | @btime randdisk_rejection() 43 | println("Runtime randdisk_transform():") 44 | @btime randdisk_transform() 45 | end 46 | 47 | 48 | 49 | # ############################################################################## 50 | # Importance sampling for highly concentrated integrals 51 | 52 | normal_pdf(m,s,x) = exp(-0.5*(x-m)^2/s^2)/(sqrt(2π)*s) 53 | 54 | function uniform_sampling(f,N) 55 | # TODO: your code here 56 | return E,Var 57 | end 58 | 59 | function importance_sampling(f,N,m,s) 60 | # TODO: your code here 61 | return E,Var 62 | end 63 | 64 | function plot_histogram(estimator,f,N; label = "") 65 | # Perform one long run to get accurate estimates for E[X] and Var[X] 66 | E_X,Var_X = estimator(f,1_000_000) 67 | 68 | # Estimate the expectation E_E and variance Var_E of the Monte Carlo 69 | # estimator using the expectation E_X and variance Var_X of the underlying 70 | # random variable 71 | E_E = E_X 72 | Var_E = Var_X/N 73 | 74 | # Plot the estimator PDF predicted by the central limit theorem 75 | x = E_E .+ sqrt(Var_E) .* LinRange(-3,3,1000) 76 | plot(x, normal_pdf.(E_E, sqrt(Var_E), x), "k") 77 | 78 | # Generate a large number of Monte Carlo estimates and plot the resulting 79 | # empirical PDF for comparison 80 | E = [estimator(f,N)[1] for i = 1:10_000] 81 | hist(E, bins=20, density=true, label=label) 82 | end 83 | 84 | function sin_integral() 85 | f = x->π/2*sin(π*x) 86 | N = 100 87 | 88 | clf() 89 | plot_histogram(uniform_sampling,f,N, label="Uniform sampling") 90 | plot([1,1], 0.5.*ylim(),"-",lw=5, label="Exact integral") 91 | legend(loc="best", frameon=false) 92 | display(gcf()) 93 | end 94 | 95 | function concentrated_integral() 96 | f = x->exp(-(20*(x-0.5))^4) / 0.0906401 97 | N = 100 98 | 99 | clf() 100 | plot_histogram(uniform_sampling,f,N, label="Uniform sampling") 101 | if (importance = false) 102 | plot_histogram((f,N)->importance_sampling(f,N,0.5,0.03), f,N, label="Importance sampling") 103 | end 104 | plot([1,1],0.5.*ylim(),"-",lw=5, label="Exact integral") 105 | legend(loc="best", frameon=false) 106 | display(gcf()) 107 | end 108 | 109 | function plot_integrand() 110 | x = LinRange(0,1,1000) 111 | f = x->exp(-(20*(x-0.5))^4) / 0.0906401 112 | clf() 113 | plot(x, f.(x), label=L"f(x)") 114 | plot(x, normal_pdf.(0.5,0.03,x), label="Gaussian") 115 | xlabel(L"x") 116 | legend(loc="best") 117 | display(gcf()) 118 | end 119 | -------------------------------------------------------------------------------- /midterm.jl: -------------------------------------------------------------------------------- 1 | using PyPlot 2 | using LinearAlgebra 3 | using SparseArrays 4 | using IterativeSolvers 5 | 6 | function laplacian(D) 7 | n = length(D) 8 | return (n+1)^2 .* spdiagm( 9 | -1 => D[2:end-1], 10 | 0 => .-(D[1:end-1] .+ D[2:end]), 11 | 1 => D[2:end-1], 12 | ) 13 | end 14 | 15 | function example() 16 | # Specify the problem parameters 17 | n = 100 18 | D = x -> ifelse(x < 0.5, 0.1, 1.0) 19 | f = x -> 1.0 20 | 21 | # Assemble the linear system 22 | x = LinRange(0,1,n+2)[2:end-1] 23 | m = LinRange(0,1,2n+3)[2:2:end-1] 24 | Δ = laplacian(D.(m)) 25 | u = -Δ\f.(x) 26 | 27 | # Plot the solution 28 | clf() 29 | plot([0;x;1], [0;u;0]) 30 | xlabel(L"x") 31 | ylabel(L"u(x)") 32 | display(gcf()) 33 | end 34 | 35 | 36 | 37 | function n_iterations() 38 | D = x -> ifelse(x < 0.5, 0.1, 1.0) 39 | f = x -> 1.0 40 | 41 | n = round.(Int, 10.0.^LinRange(0,4,10)) 42 | error = [begin 43 | x = LinRange(0,1,n+2)[2:end-1] 44 | m = LinRange(0,1,2n+3)[2:2:end-1] 45 | Δ = laplacian(D.(m)) 46 | 47 | _,hist = cg(-Δ,f.(x), log=true) 48 | hist.iters 49 | end for n in n] 50 | 51 | clf() 52 | loglog(n, error, "-o", label="# CG iterations") 53 | loglog(n, 1.5e0.*n, "k--", label=L"O(n)") 54 | xlabel(L"# grid points $n$") 55 | # ylabel("# CG iterations") 56 | legend() 57 | display(gcf()) 58 | end 59 | 60 | 61 | ############################################################################### 62 | # WARNING: The following code uses advanced Julia features. You are not 63 | # expected to understand the details of how this code works, but you are 64 | # welcome to ask me questions in case you would like to know more. 65 | 66 | struct FourierPreconditioner{DST,Diag,iDST} 67 | V::DST 68 | iD::Diag 69 | Vt::iDST 70 | end 71 | 72 | using FFTW 73 | 74 | function FourierPreconditioner(n) 75 | # Eigenector matrix and its inverse == transpose 76 | u = zeros(n) 77 | V = FFTW.plan_r2r(u, FFTW.RODFT00) 78 | Vt = inv(V) 79 | 80 | # Inverse of eigenvalue matrix 81 | iD = Diagonal(@.( inv(2*(n+1)^2*(1 - cos(π*(1:n)/(n+1)))))) 82 | 83 | # Assemble `FourierPreconditioner` object 84 | return FourierPreconditioner(V,iD,Vt) 85 | end 86 | 87 | function LinearAlgebra.ldiv!(v,P::FourierPreconditioner,u) 88 | # Extract matrices from `FourierPreconditioner` object 89 | V = P.V 90 | iD = P.iD 91 | Vt = P.Vt 92 | 93 | # Apply preconditioner to `u` and store result in `v` 94 | return v .= V*(iD*(Vt*u)) 95 | end 96 | 97 | # End of advanced Julia code 98 | ############################################################################### 99 | 100 | function fourier_preconditioning() 101 | fig = figure(figsize=(8,6)) 102 | 103 | for (i,ε) in enumerate(10.0.^.-(0:3)) 104 | # Specify the problem parameters 105 | n = 50 106 | D = x -> 1-4*(1-ε)*x*(1-x) 107 | f = x -> x^2 #1.0 #(x-0.5)^2 108 | 109 | # Assemble the linear system 110 | x = LinRange(0,1,n+2)[2:end-1] 111 | m = LinRange(0,1,2n+3)[2:2:end-1] 112 | A = -laplacian(D.(m)) 113 | b = f.(x) 114 | 115 | # Plot convergence histories 116 | subplot(2,2,i) 117 | 118 | _,hist = cg(A,b, log=true) 119 | semilogy([norm(b); hist[:resnorm]], label="no preconditioning") 120 | 121 | _,hist = cg(A,b, Pl=FourierPreconditioner(n), log=true) 122 | semilogy([norm(b); hist[:resnorm]], label="Fourier preconditioning") 123 | 124 | title(latexstring("\\varepsilon = $ε")) 125 | xlabel("Number of iterations") 126 | ylabel("CG residual") 127 | ylim(1e-6,1e2) 128 | if i == 4; legend(loc="lower right"); end 129 | end 130 | tight_layout(pad=3.0) 131 | display(gcf()) 132 | close(fig) 133 | end 134 | -------------------------------------------------------------------------------- /03_finite_differences.jl: -------------------------------------------------------------------------------- 1 | using PyPlot 2 | using LinearAlgebra 3 | using SparseArrays 4 | 5 | function laplacian_1d(n) 6 | # Use `Matrix(laplacian_1d(n))` to print the output of this function in a 7 | # readable form 8 | return (n+1)^2 * spdiagm( 9 | -1 => fill( 1.0,n-1), # subdiagonal 10 | 0 => fill(-2.0,n), # diagonal 11 | 1 => fill( 1.0,n-1) # superdiagonal 12 | ) 13 | end 14 | 15 | function solve_poisson_1d(f) 16 | n = length(f) 17 | Δ = laplacian_1d(n) 18 | return -Δ\f 19 | end 20 | 21 | function example_1d() 22 | n = 4 23 | x = LinRange(0,1,n+2)[2:end-1] 24 | f = x -> π^2*sin(π*x) 25 | uref = x -> sin(π*x) 26 | u = solve_poisson_1d(f.(x)) 27 | 28 | clf() 29 | xx = LinRange(0,1,1000) 30 | plot(xx,uref.(xx), "k-", label="exact solution") 31 | plot([0;x;1],[0;u;0], label="FD solution") 32 | legend(frameon=false) 33 | xlabel(L"x") 34 | ylabel(L"u(x)") 35 | display(gcf()) 36 | end 37 | 38 | function convergence_1d() 39 | # Define problem and solution 40 | f = x -> π^2 * sin(π*x) 41 | u = x -> sin(π*x) 42 | 43 | # Compute errors 44 | n = 2 .^ (1:15) 45 | error = [begin 46 | x = LinRange(0,1,n+2)[2:end-1] 47 | ũ = solve_poisson_1d(f.(x)) 48 | norm(ũ .- u.(x), 2)/sqrt(n+1) 49 | end for n in n] 50 | 51 | # Plot 52 | clf() 53 | loglog(n, error, label=L"\|u - u_n\|_{2,n}") 54 | loglog(n, n.^-2, "k--", label=L"O(n^{-2})") 55 | xlabel(L"n") 56 | legend(frameon=false) 57 | display(gcf()) 58 | end 59 | 60 | 61 | 62 | function laplacian_2d(n) 63 | Δ = laplacian_1d(n) 64 | Id = sparse(I,n,n) # n x n identity matrix 65 | return kron(Id,Δ) + kron(Δ,Id) 66 | end 67 | 68 | function solve_poisson_2d(f) 69 | @assert size(f,1) == size(f,2) 70 | n = size(f,1) 71 | Δ = laplacian_2d(n) 72 | return reshape(-Δ\vec(f), (n,n)) 73 | end 74 | 75 | function example_2d() 76 | n = 100 77 | x = LinRange(0,1,n+2)[2:end-1] 78 | f = (x1,x2)->x1*x2 79 | u = solve_poisson_2d(f.(x,x')) 80 | 81 | clf() 82 | imshow(u, extent=(0,1,0,1), origin="bottom left") 83 | colorbar() 84 | display(gcf()) 85 | end 86 | 87 | function convergence_2d() 88 | # Define problem and solution 89 | f = (x1,x2) -> 5*π^2 * sin(π*x1) * sin(2π*x2) 90 | u = (x1,x2) -> sin(π*x1) * sin(2π*x2) 91 | 92 | # Compute errors 93 | n = 2 .^ (1:9) 94 | error = [begin 95 | x = LinRange(0,1,n+2)[2:end-1] 96 | ũ = solve_poisson_2d(f.(x,x')) 97 | norm(ũ .- u.(x,x'), 2)/(n+1) 98 | end for n in n] 99 | 100 | # Plot 101 | clf() 102 | loglog(n, error, label=L"\|u - u_n\|_{2,n}") 103 | loglog(n, 2e0*n.^-2, "k--", label=L"O(n^{-2})") 104 | xlabel(L"n") 105 | legend(frameon=false) 106 | display(gcf()) 107 | end 108 | 109 | 110 | 111 | function laplacian_3d(n) 112 | Δ = laplacian_1d(n) 113 | Id = sparse(I,n,n) # n x n identity matrix 114 | return kron(Id,Id,Δ) + kron(Id,Δ,Id) + kron(Δ,Id,Id) 115 | end 116 | 117 | function solve_poisson_3d(f) 118 | @assert size(f,1) == size(f,2) == size(f,3) 119 | n = size(f,1) 120 | Δ = laplacian_3d(n) 121 | return reshape(-Δ\vec(f), (n,n,n)) 122 | end 123 | 124 | function convergence_3d() 125 | # Define problem and solution 126 | f = (x1,x2,x3) -> 3*π^2 * sin(π*x1) * sin(π*x2) * sin(π*x3) 127 | u = (x1,x2,x3) -> sin(π*x1) * sin(π*x2) * sin(π*x3) 128 | 129 | # Compute errors 130 | n = 2 .^ (1:5) 131 | error = [begin 132 | x = LinRange(0,1,n+2)[2:end-1] 133 | ũ = solve_poisson_3d(f.(x,x',reshape(x,(1,1,n)))) 134 | norm(ũ .- u.(x,x',reshape(x,(1,1,n))), 2)/(n+1)^(3/2) 135 | end for n in n] 136 | 137 | # Plot 138 | clf() 139 | loglog(n, error, label=L"\|u - u_n\|_{2,n}") 140 | loglog(n, 5e-1*n.^-2, "k--", label=L"O(n^{-2})") 141 | xlabel(L"n") 142 | legend(frameon=false) 143 | display(gcf()) 144 | end -------------------------------------------------------------------------------- /tutorial_09_sampling_theorems_solution.jl: -------------------------------------------------------------------------------- 1 | using PyPlot 2 | using Statistics 3 | using Printf 4 | using BenchmarkTools 5 | 6 | sin_pdf(x) = π/2*sin(π*x) 7 | quad_pdf(x) = 6*x*(1-x) 8 | 9 | rand_sin(n) = [rand_sin() for i = 1:n] 10 | rand_sin() = acos(1-2rand())/π 11 | 12 | rand_quad(n) = [rand_quad() for i = 1:n] 13 | function rand_quad() 14 | M = 12/π^2 15 | while true 16 | Y = rand_sin() 17 | if rand() <= quad_pdf(Y)/(M*sin_pdf(Y)) 18 | return Y 19 | end 20 | end 21 | end 22 | 23 | function plot_pdfs() 24 | x = LinRange(0,1,1000) 25 | clf() 26 | plot(x, quad_pdf.(x), label=L"p(x)") 27 | plot(x, sin_pdf.(x), label=L"q(x)") 28 | xlabel("x") 29 | legend() 30 | display(gcf()) 31 | end 32 | 33 | function plot_pdf_ratio() 34 | x = LinRange(0,1,1000)[2:end-1] 35 | clf() 36 | plot(x, quad_pdf.(x)./sin_pdf.(x), label=L"p(x) / q(x)") 37 | xlabel("x") 38 | legend() 39 | display(gcf()) 40 | end 41 | 42 | function histogram(rand_fun) 43 | n = 1_000_000 44 | if rand_fun == rand_sin 45 | pdf = sin_pdf 46 | elseif rand_fun == rand_quad 47 | pdf = quad_pdf 48 | else 49 | error("Invalid argument rand_fun = $rand_fun") 50 | end 51 | 52 | clf() 53 | hist(rand_fun(n); bins = 100, density = true, label="empirical PDF") 54 | x = LinRange(0,1,1000) 55 | plot(x, pdf.(x), "k-", label="theoretical PDF") 56 | xlabel(L"x") 57 | legend() 58 | display(gcf()) 59 | end 60 | 61 | function monte_carlo() 62 | N = 1000 63 | X = rand_quad(N) 64 | Y = rand_sin(N) 65 | 66 | println("Monte Carlo estimate for E[X]") 67 | @printf(" Direct sampling: %.3f\n", mean(X)) 68 | @printf(" Importance sampling: %.3f\n", mean(Y.*quad_pdf.(Y)./sin_pdf.(Y))) 69 | end 70 | 71 | function comparison() 72 | ########### 73 | # Variance 74 | N = 1_000_000 75 | X = rand_quad(N) 76 | Y = rand_sin(N) 77 | var_dir = var(X) 78 | var_imp = var(Y.*quad_pdf.(Y)./sin_pdf.(Y)) 79 | 80 | println("Variance:") 81 | @printf(" Direct sampling: %.4f\n", var_dir) 82 | @printf(" Importance sampling: %.4f\n", var_imp) 83 | println() 84 | 85 | ########## 86 | # Runtime 87 | t_dir = @belapsed(rand_quad(), seconds=0.1) 88 | t_imp = @belapsed((Y = rand_sin(); Y * quad_pdf(Y) / sin_pdf(Y)), seconds=0.1) 89 | 90 | println("Runtime per sample:") 91 | @printf(" Direct sampling: %2.0f nanoseconds\n", 1e9*t_dir) 92 | @printf(" Importance sampling: %2.0f nanoseconds\n", 1e9*t_imp) 93 | println() 94 | 95 | ############# 96 | # Comparison 97 | println("Comparison metric ( sqrt([runtime per sample]*[variance]) ):") 98 | @printf(" Direct sampling: %.3e\n", sqrt(t_dir*var_dir)) 99 | @printf(" Importance sampling: %.3e\n", sqrt(t_imp*var_imp)) 100 | println() 101 | 102 | # Justifaction for comparison metric: 103 | # [Monte Carlo error] 104 | # = sqrt( [variance] / [# samples] ) 105 | # = sqrt( [variance] / ( [runtime] / [runtime per sample] ) ) 106 | # = sqrt( [runtime per sample] * [variance] / [runtime] ) 107 | 108 | # Example output: 109 | # Variance: 110 | # Direct sampling: 0.0500 111 | # Importance sampling: 0.0535 112 | # 113 | # Runtime per sample: 114 | # Direct sampling: 61 nanoseconds 115 | # Importance sampling: 40 nanoseconds 116 | # 117 | # Comparison metric ( sqrt([runtime per sample]*[variance]) ): 118 | # Direct sampling: 5.511e-05 119 | # Importance sampling: 4.601e-05 120 | 121 | # Conclusions: 122 | # - Direct sampling has a lower variance but larger runtime per sample than 123 | # importance sampling. 124 | # - All in all, importance sampling is about 5.5 / 4.6 ≈ 1.2 times more 125 | # efficient than direct sampling. 126 | end 127 | 128 | 129 | # Answer to Q7: 130 | # We know from the lecture that rejection sampling on average requires M proposals 131 | # to generate a single sample of the target distribution. Under the given assumptions, 132 | # the runtime-per-sample ratio between direct and importance sampling is hence exactly 133 | # M = 12/π^2 ≈ 1.2, which is reasonably close to the empirically observed factor 1.5. 134 | -------------------------------------------------------------------------------- /07_molecular_dynamics.jl: -------------------------------------------------------------------------------- 1 | using PyPlot 2 | using DifferentialEquations 3 | using Random 4 | using LinearAlgebra 5 | using Statistics 6 | using Printf 7 | 8 | using PyCall 9 | FuncAnimation = pyimport("matplotlib.animation").FuncAnimation 10 | 11 | 12 | 13 | ################################################################################ 14 | # Equations of motion 15 | 16 | function kinetic_energy(v) 17 | # `v` is an `2×n` matrix such that `v[:,i] = (velocity of atom i)` 18 | n = size(v,2) 19 | E = 0.0 20 | for i = 1:n 21 | E += (v[1,i]^2 + v[2,i]^2)/2 22 | end 23 | return E 24 | end 25 | 26 | function potential_energy(x,L) 27 | # `x` is an `2×n` matrix such that `x[:,i] = (position of atom i)` 28 | # `[0,√3*L] × [0,L]` is the simulation box 29 | n = size(x,2) 30 | E = 0.0 31 | for i = 1:n 32 | for j = 1:i-1 33 | # Distance between atoms `i` and `j` under periodic boundary conditions. 34 | # Ignore at first and simply assume `r2 = norm(x[:,i] - x[:,j])^2`. 35 | r2 = minimum( 36 | (x[1,i] - x[1,j] + sqrt(3)*L*s1)^2 + 37 | (x[2,i] - x[2,j] + L*s2)^2 38 | for s1 = (-1,0,1), s2 = (-1,0,1) 39 | ) 40 | ir2 = inv(r2) 41 | ir6 = ir2^3 42 | E += ir6^2 - 2*ir6 43 | end 44 | end 45 | return E 46 | end 47 | 48 | energy(x,v,L) = kinetic_energy(v) + potential_energy(x,L) 49 | 50 | ################################################################################ 51 | 52 | 53 | ################################################################################ 54 | # Initial conditions 55 | 56 | function initial_positions(n,L) 57 | x = Matrix{Float64}(undef, 2,2n^2) 58 | o = (L/2-n/2) .* (sqrt(3),1) 59 | for i = 1:n 60 | for j = 1:n 61 | x[:,2n*(i-1) + 2j-1] .= (sqrt(3)*(i-1 ), j-1 ) .+ o 62 | x[:,2n*(i-1) + 2j ] .= (sqrt(3)*(i-0.5), j-0.5) .+ o 63 | end 64 | end 65 | return x 66 | end 67 | 68 | function initial_velocities(n,E) 69 | v = randn(2,2n^2) 70 | v .-= mean(v,dims=2) 71 | EE = kinetic_energy(v,) 72 | v .*= sqrt(E/EE) 73 | return v 74 | end 75 | 76 | ################################################################################ 77 | 78 | 79 | 80 | function map_to_box!(x,L) # Ignore at first 81 | # Enfore periodic boundary conditions: when an atom leaves the simulation 82 | # box on one side, make it reappear on the other side. 83 | n = size(x,2) 84 | for i = 1:n 85 | if x[1,i] < 0 86 | x[1,i] += sqrt(3)*L 87 | elseif x[1,i] > sqrt(3)*L 88 | x[1,i] -= sqrt(3)*L 89 | end 90 | if x[2,i] < 0 91 | x[2,i] += L 92 | elseif x[2,i] > L 93 | x[2,i] -= L 94 | end 95 | end 96 | return x 97 | end 98 | 99 | function simulate() 100 | # Need to escape from VS Code to make the animation work 101 | pygui(true) 102 | 103 | # Physical parameters 104 | n = 3 # Number of atoms per dimension 105 | L = 5*n # Sidelength of simulation box 106 | E = 2n^2 * 1.0 # Initial kinetic energy 107 | # 0.0 -> solid 108 | # 1.0 -> liquid 109 | # 2.0 -> gas 110 | 111 | # Numerical parameters 112 | step = Heun() # Runge-Kutta method 113 | # `Heun()` is same as `trapezoidal_step()` from Lecture 6 114 | dt = 0.01 # Step size 115 | 116 | # Assemble ODE problem and solver 117 | Random.seed!(42) 118 | x = initial_positions(n,L) 119 | v = initial_velocities(n,E) 120 | 121 | problem = HamiltonianProblem( 122 | (x,v,_)->energy(x,v,L), 123 | x,v, (0,Inf) 124 | ) 125 | 126 | # Ignore at first 127 | enforce_pbc = DiscreteCallback( 128 | (u,t,integrator) -> true, 129 | integrator -> begin 130 | x = integrator.u.x[1] 131 | map_to_box!(x,L) 132 | end 133 | ) 134 | 135 | integrator = init( 136 | problem, 137 | step, 138 | adaptive=false, dt = dt, 139 | alias_u0 = true, 140 | save_on = false, 141 | callback=enforce_pbc 142 | ) 143 | 144 | # Plot initial configuration 145 | clf() 146 | x = integrator.u.x[1] 147 | p, = plot(x[1,:],x[2,:], "o", ms = 10) 148 | xlim([0, sqrt(3)*L]) 149 | ylim([0, L]) 150 | gca().set_aspect("equal","box") 151 | 152 | tlabel = text(0.05*sqrt(3)*L,0.95*L, "", ha="left", va="top") 153 | 154 | # Start animation 155 | dt_per_frame = max(0.1,dt) 156 | frames_per_second = 20 157 | FuncAnimation( 158 | gcf(), 159 | i->begin 160 | step!(integrator, dt_per_frame) 161 | p.set_data(integrator.u.x[1]) 162 | tlabel.set_text(@sprintf("t = %.1f", i*dt_per_frame)) 163 | return (p,tlabel) 164 | end, 165 | interval = 1000/frames_per_second, 166 | blit=true, 167 | init_func=()->(p,tlabel) 168 | ) 169 | end 170 | -------------------------------------------------------------------------------- /04_sparse_lu.jl: -------------------------------------------------------------------------------- 1 | using PyPlot 2 | using LinearAlgebra 3 | using SparseArrays 4 | using Printf 5 | 6 | function upleft_arrow_matrix(n) 7 | A = spdiagm(0 => fill(n,n)) 8 | A[2:end, 1] .= A[1, 2:end] .= 1.0 9 | return A 10 | end 11 | 12 | function downright_arrow_matrix(n) 13 | A = spdiagm(0 => fill(n,n)) 14 | A[1:end-1, end] .= A[end, 1:end-1] .= 1.0 15 | return A 16 | end 17 | 18 | function lu_benchmark() 19 | n = 1000 20 | println("Runtime of LU factorisation:") 21 | for matrix in ( 22 | upleft_arrow_matrix, 23 | downright_arrow_matrix, 24 | ) 25 | A = matrix(n) 26 | time = @elapsed(ldlt(A, perm=1:n)) 27 | # LDLt is the symmetric version of the LU factorisation. 28 | # Ignore the `perm = 1:n` argument for now. 29 | 30 | @printf("%22s: %6f seconds\n", matrix, time) 31 | end 32 | end 33 | 34 | function lu_structures() 35 | n = 5 36 | for matrix in (upleft_arrow_matrix, downright_arrow_matrix) 37 | A = matrix(n) 38 | L,U = lu(Matrix(A), Val(false)) # Ignore the `Val(false)` argument 39 | println(matrix, ":") 40 | println("L:") 41 | display(L .!= 0) 42 | println("U:") 43 | display(U .!= 0) 44 | println() 45 | end 46 | end 47 | 48 | function matrix() 49 | p = [4,2,1,3] 50 | A = [ 51 | 1 0 0 1 52 | 1 1 0 0 53 | 1 0 1 0 54 | 0 0 0 1 55 | ] 56 | println("A:") 57 | display(A) 58 | println() 59 | println("P*A*P':") 60 | display(A[p,p]) 61 | println() 62 | end 63 | 64 | function permutation() 65 | p = [4,2,1,3] 66 | q = [3,2,4,1] 67 | @show p[q] 68 | end 69 | 70 | 71 | 72 | using Combinatorics 73 | 74 | function check_fillin() 75 | # Define matrix 76 | Random.seed!(42) 77 | A = [ 78 | 9 rand() rand() 0 79 | rand() 9 0 rand() 80 | rand() 0 9 rand() 81 | 0 rand() rand() 9 82 | ] 83 | 84 | # Loop over all permutations 85 | for p in permutations(1:4) 86 | for q in permutations(1:4) 87 | # Skip if top-left entry is 0 88 | if A[p[1],q[1]] == 0; continue; end 89 | 90 | # Check that the number of fill-in entries is exactly 2 91 | L,U = lu(A[p,q], Val(false)) 92 | n_fill = sum((L+U) .!= 0) - sum(A .!= 0) 93 | if n_fill != 2 94 | error("Permutations p = ", p, " and q = ", q, " lead to ", n_fill, " fill-in entries") 95 | end 96 | end 97 | end 98 | println("All permutations lead to exactly two fill-in entries") 99 | end 100 | 101 | 102 | 103 | laplacian_1d(n) = (n+1)^2*Tridiagonal( 104 | fill( 1.0,n-1), # subdiagonal 105 | fill(-2.0,n), # diagonal 106 | fill( 1.0,n-1) # superdiagonal 107 | ) 108 | 109 | function laplacian_2d(n) 110 | Δ = sparse(laplacian_1d(n)) 111 | Id = sparse(I,n,n) 112 | return kron(Δ,Id) + kron(Id,Δ) 113 | end 114 | 115 | """ 116 | nested_dissection(n) -> p 117 | 118 | Compute the nested dissection permutation for `laplacian_2d(n)`. 119 | 120 | Note that `n` must be of the form `n = 2^k - 1`. 121 | 122 | For convenience, this function works with two-dimensional indices `i1,i2` 123 | rather than the one-dimensional index `i = i1 + n*(i2-1)`. The output of this 124 | function therefore needs to be converted using the 125 | `matrix_to_vector_indices()` function provided below before it can be used to 126 | permute a matrix. 127 | """ 128 | function nested_dissection(n) 129 | @assert isodd(n) 130 | n == 1 && return [(1,1)] 131 | 132 | nn = n÷2 133 | pp = nested_dissection(nn) 134 | return [ 135 | [(i[1] , i[2] ) for i in pp]; # top left quadrant 136 | [(i[1]+nn+1, i[2] ) for i in pp]; # bottom left quadrant 137 | [(i[1] , i[2]+nn+1) for i in pp]; # top right quadrant 138 | [(i[1]+nn+1, i[2]+nn+1) for i in pp]; # bottom right quadrant 139 | [(nn+1, i2) for i2 in 1:nn]; # left horizontal separator 140 | [(nn+1, nn+1+i2) for i2 in 1:nn]; # right horizontal separator 141 | [(i1, nn+1) for i1 in 1:n] # vertical separator 142 | ] 143 | end 144 | 145 | matrix_to_vector_indices(n,p) = [i[1] + n*(i[2]-1) for i in p] 146 | 147 | """ 148 | runtimes(n = 127) 149 | 150 | Print the runtimes of LU factorisation applied to `P*laplacian_2d(n)*P'` for 151 | `P in [I, nested_dissection]`. 152 | 153 | Note that `n` must be of the form `n = 2^k - 1`. 154 | """ 155 | function runtimes(n = 127) 156 | Δ = -laplacian_2d(n) 157 | 158 | t = @elapsed( ldlt(Δ; perm = 1:n^2) ) 159 | @printf(" Original: %5.3f seconds\n", t) 160 | 161 | p = matrix_to_vector_indices(n,nested_dissection(n)) 162 | t = @elapsed( ldlt(Δ; perm = p) ) 163 | @printf("Nested dissection: %5.3f seconds\n", t) 164 | 165 | t = @elapsed( ldlt(Δ) ) 166 | @printf(" Default: %5.3f seconds\n", t) 167 | end 168 | 169 | """ 170 | sparsity_pattern(n, perm = "") 171 | 172 | Plot the sparsity pattern of `L+U`, where `L`,`U` is the LU factorisation of 173 | `Δ = laplacian_2d(n)`. 174 | 175 | `perm in ["","nd"]` denotes the permutation to apply to `Δ` before 176 | the factorisation. 177 | 178 | Note that for `perm == "nd"`, `n` must be of the form `n = 2^k - 1`. 179 | """ 180 | function sparsity_pattern(n,perm = "") 181 | A = -laplacian_2d(n) 182 | if perm == "nd" 183 | p = matrix_to_vector_indices(n, nested_dissection(n)) 184 | A = A[p,p] 185 | end 186 | 187 | L,U = lu(Matrix(A), Val(false)) # `Val(false)` disables pivoting for stability 188 | 189 | clf() 190 | spy(L.+U, marker="s", c="r", ms=3e2*n^(-2)) 191 | spy( A , marker="s", c="k", ms=3e2*n^(-2)) 192 | display(gcf()) 193 | end 194 | 195 | -------------------------------------------------------------------------------- /01_big_o_notation_updated.jl: -------------------------------------------------------------------------------- 1 | function matrix_product(A,B) 2 | @assert size(A,2) == size(B,1) 3 | # `@assert condition` does nothing if `condition` is `true`, and throws an 4 | # error otherwise. 5 | 6 | C = zeros(size(A,1),size(B,2)) 7 | for i = 1:size(A,1) 8 | for j = 1:size(B,2) 9 | for k = 1:size(A,2) 10 | C[i,j] += A[i,k]*B[k,j] 11 | end 12 | end 13 | end 14 | return C 15 | end 16 | 17 | function matrix_product() 18 | n = 1_000 19 | A = zeros(n,n) 20 | B = zeros(n,n) 21 | println(" Naive matrix product: ", @elapsed(matrix_product(A,B)), " seconds") 22 | println("Export matrix product: ", @elapsed(A*B), " seconds") 23 | end 24 | 25 | 26 | 27 | function sum_ij(A) 28 | s = 0.0 29 | for i = 1:size(A,1) 30 | for j = 1:size(A,2) 31 | s += A[i,j] 32 | end 33 | end 34 | return s 35 | end 36 | 37 | function sum_ji(A) 38 | s = 0.0 39 | for j = 1:size(A,2) 40 | for i = 1:size(A,1) 41 | s += A[i,j] 42 | end 43 | end 44 | return s 45 | end 46 | 47 | function matrix_sum() 48 | n = 10_000 49 | A = zeros(n,n) 50 | println("Summing over columns first: ", @elapsed(sum_ji(A)), " seconds") 51 | println(" Summing over rows first: ", @elapsed(sum_ij(A)), " seconds") 52 | end 53 | 54 | 55 | 56 | function sum_if(x,y) 57 | s = 0.0 58 | for i = 1:length(x) 59 | if x[i] 60 | s += y[i] 61 | end 62 | end 63 | return s 64 | end 65 | 66 | function branch_prediction() 67 | # Based on https://stackoverflow.com/q/11227809 68 | 69 | n = 20_000_000 70 | x_rand = rand(Bool,n) 71 | x_sort = sort(x_rand) 72 | y = rand(n) 73 | 74 | println("Conditional sum with random input: ", @elapsed(sum_if(x_rand,y)), " seconds") 75 | println("Conditional sum with sorted input: ", @elapsed(sum_if(x_sort,y)), " seconds") 76 | # `sum_if(x,y)` is much faster if `x` is sorted because branch prediction 77 | # is easier if `x` is of the form `x = [0,0, ..., 0,0,1,1, ... 1,1]` 78 | end 79 | 80 | 81 | 82 | using BenchmarkTools 83 | # Defines the `@belapsed()` macro which estimates the runtime of very short 84 | # operations more accurately by running the operation several times. 85 | 86 | function preasymptotic() 87 | x = zeros(32) 88 | println("Summing ", length(x)," numbers: ", 1e9*@belapsed(sum($x)), " nanoseconds") 89 | x = zeros(64) 90 | println("Summing ", length(x)," numbers: ", 1e9*@belapsed(sum($x)), " nanoseconds") 91 | end 92 | 93 | #= 94 | Example output: 95 | 96 | Summing 32 numbers: 13.683049147442327 nanoseconds 97 | Summing 64 numbers: 16.608040201005025 nanoseconds 98 | 99 | Twice the amount of computations in same amount of time! 100 | =# 101 | 102 | 103 | 104 | function asymptotic() 105 | x = zeros(1024) 106 | println("Summing ", length(x)," numbers: ", 1e9*@belapsed(sum($x)), " nanoseconds") 107 | x = zeros(2048) 108 | println("Summing ", length(x)," numbers: ", 1e9*@belapsed(sum($x)), " nanoseconds") 109 | x = zeros(4096) 110 | println("Summing ", length(x)," numbers: ", 1e9*@belapsed(sum($x)), " nanoseconds") 111 | end 112 | 113 | #= 114 | Example output: 115 | 116 | Summing 1024 numbers: 77.34751037344398 nanoseconds 117 | Summing 2048 numbers: 162.4756258234519 nanoseconds 118 | Summing 4096 numbers: 325.6359649122807 nanoseconds 119 | 120 | No 121 | twice the number of operations => twice the runtime. 122 | =# 123 | 124 | 125 | 126 | using PyPlot 127 | 128 | function algebraic_scaling() 129 | f = x -> (2 + sin(x))*sqrt(x) 130 | # `x -> ...` defines an anonymous function with a single argument. 131 | # In Matlab, the equivalent syntax is `@(x) ...`. 132 | # In Python, the equivalent syntax is `lambda x: ...`. 133 | 134 | clf() 135 | if true 136 | # Bad: Plot `f(x)` using linear axes. 137 | # Linear axes do not allow us to reliably distinguish between, say, 138 | # `O(x^(1/2))`, `O(x^(1/3))` or even `O(log(x))`. 139 | x = LinRange(1,100,1000) 140 | plot(x, f.(x), label=L"f(x)") # `f.(x)` applies `f` to every element of `x`. 141 | else 142 | # Good: Plot `f(x)` using doubly logarithmic axes 143 | # `f(x)` is then upper-bounded by a straight line whose slope indicates 144 | # the power `p` in `f(x) = O(x^p)`. 145 | x = 10.0.^LinRange(0,3,10000) 146 | loglog(x, f.(x), label=L"f(x)") 147 | loglog(x, 4e0.*sqrt.(x), "k--", label=L"O(\sqrt{x})") 148 | end 149 | xlabel(L"x") # `L"[maths]"` is like writing `$[maths]` in LaTeX 150 | legend() 151 | display(gcf()) 152 | end 153 | 154 | function exponential_scaling() 155 | f = x -> (2 + sin(x))*exp(x) 156 | 157 | clf() 158 | if true 159 | # Bad: Plot `f(x)` using linear axes. 160 | # Linear axes do not allow us to reliably distinguish between, say, 161 | # `O(x^2)` or `O(exp(x))`. 162 | x = LinRange(1,5,1000) 163 | plot(x, f.(x), label=L"f(x)") 164 | else 165 | # Good: Plot `f(x)` using a linear x-axis and a logarithmic y-axis. 166 | # `f(x)` is then upper-bounded by a straight line whose slope indicates 167 | # the base `a` in `f(x) = O(a^x)`. 168 | x = LinRange(1,100,1000) 169 | semilogy(x, f.(x), label=L"f(x)") 170 | semilogy(x, 1e2.*exp.(x), "k--", label=L"O(\exp(x))") 171 | end 172 | xlabel(L"x") 173 | legend() 174 | display(gcf()) 175 | end 176 | 177 | 178 | 179 | function machine_precision() 180 | # Compute `log(2)` using truncated Taylor series 181 | log2_via_taylor = n -> sum(2.0^(-k)/k for k = 1:n) 182 | 183 | # Plot the convergence 184 | n = 1:100 185 | clf() 186 | semilogy(n[[1,end]], eps().*[1,1], "k--", label=L"\mathrm{eps}()") 187 | semilogy(n, abs.(log(2) .- log2_via_taylor.(n)), "-", label=L"\mathrm{error}(n)") 188 | xlabel(L"n") 189 | legend() 190 | display(gcf()) 191 | end -------------------------------------------------------------------------------- /01_big_o_notation.jl: -------------------------------------------------------------------------------- 1 | function matrix_product(A,B) 2 | @assert size(A,2) == size(B,1) 3 | # `@assert condition` does nothing if `condition` is `true`, and throws an 4 | # error otherwise. 5 | 6 | C = zeros(size(A,1),size(B,2)) 7 | for i = 1:size(A,1) 8 | for j = 1:size(B,2) 9 | for k = 1:size(A,2) 10 | C[i,j] += A[i,k]*B[k,j] 11 | end 12 | end 13 | end 14 | return C 15 | end 16 | 17 | function matrix_product() 18 | n = 1_000 19 | A = zeros(n,n) 20 | B = zeros(n,n) 21 | println(" Naive matrix product: ", @elapsed(matrix_product(A,B)), " seconds") 22 | println("Export matrix product: ", @elapsed(A*B), " seconds") 23 | end 24 | 25 | 26 | 27 | function sum_ij(A) 28 | s = 0.0 29 | for i = 1:size(A,1) 30 | for j = 1:size(A,2) 31 | s += A[i,j] 32 | end 33 | end 34 | return s 35 | end 36 | 37 | function sum_ji(A) 38 | s = 0.0 39 | for j = 1:size(A,2) 40 | for i = 1:size(A,1) 41 | s += A[i,j] 42 | end 43 | end 44 | return s 45 | end 46 | 47 | function matrix_sum() 48 | n = 10_000 49 | A = zeros(n,n) 50 | println("Summing over columns first: ", @elapsed(sum_ji(A)), " seconds") 51 | println(" Summing over rows first: ", @elapsed(sum_ij(A)), " seconds") 52 | end 53 | 54 | 55 | 56 | function sum_if(x,y) 57 | s = 0.0 58 | for i = 1:length(x) 59 | if x[i] 60 | s += y[i] 61 | end 62 | end 63 | return s 64 | end 65 | 66 | function branch_prediction() 67 | # Based on https://stackoverflow.com/q/11227809 68 | 69 | n = 10_000_000 70 | x_rand = rand(Bool,n) 71 | x_sort = sort(x_rand) 72 | y = rand(n) 73 | 74 | println("Conditional sum with random input: ", @elapsed(sum_if(x_rand,y)), " seconds") 75 | println("Conditional sum with sorted input: ", @elapsed(sum_if(x_sort,y)), " seconds") 76 | # `sum_if(x,y)` is much faster if `x` is sorted because branch prediction 77 | # is easier if `x` is of the form `x = [0,0, ..., 0,0,1,1, ... 1,1]` 78 | end 79 | 80 | 81 | 82 | using BenchmarkTools 83 | # Defines the `@belapsed()` macro which estimates the runtime of very short 84 | # operations more accurately by running the operation several times. 85 | 86 | function preasymptotic() 87 | x = zeros(32) 88 | println("Summing ", length(x)," numbers: ", 1e9*@belapsed(sum($x)), " nanoseconds") 89 | x = zeros(64) 90 | println("Summing ", length(x)," numbers: ", 1e9*@belapsed(sum($x)), " nanoseconds") 91 | end 92 | 93 | #= 94 | Example output: 95 | 96 | Summing 32 numbers: 13.683049147442327 nanoseconds 97 | Summing 64 numbers: 16.608040201005025 nanoseconds 98 | 99 | Twice the amount of computations in same amount of time! 100 | =# 101 | 102 | 103 | 104 | function asymptotic() 105 | x = zeros(1024) 106 | println("Summing ", length(x)," numbers: ", 1e9*@belapsed(sum($x)), " nanoseconds") 107 | x = zeros(2048) 108 | println("Summing ", length(x)," numbers: ", 1e9*@belapsed(sum($x)), " nanoseconds") 109 | x = zeros(4096) 110 | println("Summing ", length(x)," numbers: ", 1e9*@belapsed(sum($x)), " nanoseconds") 111 | end 112 | 113 | #= 114 | Example output: 115 | 116 | Summing 1024 numbers: 77.34751037344398 nanoseconds 117 | Summing 2048 numbers: 162.4756258234519 nanoseconds 118 | Summing 4096 numbers: 325.6359649122807 nanoseconds 119 | 120 | No 121 | twice the number of operations => twice the runtime. 122 | =# 123 | 124 | 125 | 126 | using PyPlot 127 | 128 | function algebraic_scaling() 129 | f = x -> (2 + sin(x))*sqrt(x) 130 | # `x -> ...` defines an anonymous function with a single argument. 131 | # In Matlab, the equivalent syntax is `@(x) ...`. 132 | # In Python, the equivalent syntax is `lambda x: ...`. 133 | 134 | clf() 135 | if true 136 | # Bad: Plot `f(x)` using linear axes. 137 | # Linear axes do not allow us to reliably distinguish between, say, 138 | # `O(x^(1/2))`, `O(x^(1/3))` or even `O(log(x))`. 139 | x = LinRange(1,100,1000) 140 | plot(x, f.(x), label=L"f(x)") # `f.(x)` applies `f` to every element of `x`. 141 | else 142 | # Good: Plot `f(x)` using doubly logarithmic axes 143 | # `f(x)` is then upper-bounded by a straight line whose slope indicates 144 | # the power `p` in `f(x) = O(x^p)`. 145 | x = 10.0.^LinRange(0,3,10000) 146 | loglog(x, f.(x), label=L"f(x)") 147 | loglog(x, 4e0.*sqrt.(x), "k--", label=L"O(\sqrt{x})") 148 | end 149 | xlabel(L"x") # `L"[maths]"` is like writing `$[maths]` in LaTeX 150 | legend() 151 | display(gcf()) 152 | end 153 | 154 | function exponential_scaling() 155 | f = x -> (2 + sin(x))*exp(x) 156 | 157 | clf() 158 | if true 159 | # Bad: Plot `f(x)` using linear axes. 160 | # Linear axes do not allow us to reliably distinguish between, say, 161 | # `O(x^2)` or `O(exp(x))`. 162 | x = LinRange(1,5,1000) 163 | plot(x, f.(x), label=L"f(x)") 164 | else 165 | # Good: Plot `f(x)` using a linear x-axis and a logarithmic y-axis. 166 | # `f(x)` is then upper-bounded by a straight line whose slope indicates 167 | # the base `a` in `f(x) = O(a^x)`. 168 | x = LinRange(1,100,1000) 169 | semilogy(x, f.(x), label=L"f(x") 170 | semilogy(x, 1e2.*exp.(x), "k--", label=L"O(\exp(x))") 171 | end 172 | xlabel(L"x") 173 | legend() 174 | display(gcf()) 175 | end 176 | 177 | 178 | 179 | function machine_precision() 180 | # Compute Euler's number using the series expansion of `exp(x)`. 181 | exp_sum = n -> Float64(sum(1/factorial(big(k)) for k = 0:n)) 182 | 183 | # Plot the convergence 184 | n = 1:30 185 | clf() 186 | semilogy(n, abs.(exp(1) .- exp_sum.(n)), "o-", label=L"\mathrm{error}(n)") 187 | semilogy(n[[1,end]], eps().*[1,1], "k--", label=L"\mathrm{eps}()") 188 | xlabel(L"n") 189 | legend() 190 | display(gcf()) 191 | # Note how the error stagnates at roughly eps() == 2e-16. 192 | end -------------------------------------------------------------------------------- /02_nonlinear_equations.jl: -------------------------------------------------------------------------------- 1 | function bisection(f, a::Float64,b::Float64) # Ignore the `::Float64` here 2 | # Check that `[a,b]` is a bracketing interval 3 | fa = f(a) 4 | fb = f(b) 5 | @assert sign(fa) != sign(fb) 6 | 7 | # Ensure `a < b` 8 | if b < a; a,b = b,a; end 9 | 10 | # Do the bisection 11 | while b > nextfloat(a) 12 | # Bisect `[a,b]` such that `[a,m]` and `[m,b]` 13 | # contain the same number of `Float64` 14 | m = bisect(a,b) 15 | fm = f(m) 16 | 17 | # Decide which interval to pursue 18 | if sign(fa) != sign(fm) 19 | a,b = a,m 20 | fa,fb = fa,fm 21 | else 22 | a,b = m,b 23 | fa,fb = fm,fb 24 | end 25 | end 26 | 27 | # Return the result. Either `a` or `b` could be returned here. 28 | return b 29 | end 30 | 31 | function bisection_demo() 32 | # Compute the root of `exp(x) - 2` (i.e. `log(2)`) using the bisection 33 | # method. In addition, count the number of function evaluations to 34 | # demonstrate that this number is upper-bounded by roughly 64. 35 | # (It is not exactly 64 because the `bisect()` function is not perfect, and 36 | # because we do check that `[a,b]` is a bracketing interval.) 37 | 38 | count = 0 # counts the number of calls to `f(x)` 39 | f = x -> begin 40 | count +=1 41 | return exp(x)-2 42 | end 43 | x = bisection(f, -Inf,Inf) 44 | println(" Error: ", x - log(2)) 45 | println("# evaluations: ", count) 46 | end 47 | 48 | 49 | ######################################################################### 50 | # WARNING: The following code uses Julia and floating-point features not 51 | # discussed in this module. You are not expected to understand 52 | # how it works (but you are welcome to come talk to me if you 53 | # would like to learn more). 54 | 55 | # Convert `a` and `b` to `Float64` 56 | # This method is needd to ensure that e.g. `bisection(sin,3,4)` works since `3` 57 | # and `4` are ints, not floats. 58 | bisection(f,a,b) = bisection(f, Float64(a),Float64(b)) 59 | 60 | """ 61 | bisect(a::Float64, b::Float64) 62 | 63 | Compute `m::Float64` such that `[a,m]` and `[m,b]` contain the same number of 64 | `Float64` (± some small constant). 65 | """ 66 | function bisect(a::Float64,b::Float64) 67 | a,b = to_int.((a,b)) 68 | m = (a&b) + xor(a,b) >> 1 # Compute `(a+b)÷2` without overflow 69 | return to_float(m) 70 | end 71 | 72 | """ 73 | to_int(float) -> int 74 | 75 | Bijective, monotonous map from `[-Inf,Inf]` to `{-a,...,a}` for some `a > 0`. 76 | Effectively an enumeration of all non-`NaN` `Float64`. 77 | """ 78 | function to_int(float::Float64) 79 | int = reinterpret(Int64,float) 80 | if signbit(float) 81 | int = xor(int, 2^63-1) 82 | end 83 | return int 84 | end 85 | 86 | """ 87 | to_float(int) 88 | 89 | Inverse of `to_int()`. 90 | """ 91 | function to_float(int::Int64) 92 | if signbit(int) 93 | int = xor(int, 2^63-1) 94 | end 95 | return reinterpret(Float64,int) 96 | end 97 | 98 | ######################################################################### 99 | 100 | 101 | 102 | using Printf 103 | 104 | function bisection_convergence() 105 | # Problem parameters 106 | f = sin 107 | a,b = 3,4 108 | 109 | for k = 0:12 110 | # Print the width of the current bracketing interval 111 | @printf("error(k = %2.d) = %.10f", k, b-a) 112 | println() 113 | if mod(k,3) == 0; println(); end 114 | 115 | # Bisect 116 | m = (b + a)/2 117 | if sign(f(a)) != sign(f(m)) 118 | a,b = a,m 119 | else 120 | a,b = m,b 121 | end 122 | end 123 | end 124 | 125 | function newton_convergence() 126 | # Problem parameters 127 | f = sin 128 | df = cos 129 | x = big(1.0) 130 | 131 | for k = 0:6 132 | # Print the current error 133 | @printf("error(k = %d) = %.100f", k, abs(x)) 134 | println() 135 | 136 | # Do a Newton step 137 | x -= f(x) / df(x) 138 | end 139 | end 140 | 141 | 142 | 143 | using PyPlot 144 | 145 | function newton_linear_convergence() 146 | clf() 147 | for (i,m) = enumerate(2:4) 148 | # Problem parameters 149 | f = x -> x^m 150 | df = x -> m*x^(m-1) 151 | x = 1.0 152 | n = 100 153 | 154 | # Run Newton's method and keep a history of the iterates 155 | x_hist = zeros(n) 156 | for k = 1:n 157 | x_hist[k] = x 158 | x -= f(x) / df(x) 159 | end 160 | 161 | # Plot the convergence history 162 | nn = [50,n] 163 | r = 1 - inv(m) 164 | s = 2 * abs(x_hist[nn[1]-1]) * r^-nn[1] 165 | semilogy(0:n-1, abs.(x_hist), "C$(i-1)", label=latexstring("f(x) = x^$m")) 166 | semilogy(nn, s.*r.^nn, "C$(i-1)--", label=latexstring("O(($(m-1)/$m)^{k})")) 167 | end 168 | xlabel(L"k") 169 | ylabel(L"|x_k - x^\star|") 170 | legend(frameon=false) 171 | display(gcf()) 172 | end 173 | 174 | 175 | 176 | using Printf 177 | 178 | function newton_termination() 179 | # Compute `sqrt(2) = root(x->x^2-2)` using Newton's method and print the 180 | # error in each iteration. 181 | # We observe that after six iterations, Newton's method simply jumps back 182 | # and forth between two values. 183 | 184 | f = x -> x^2 - 2 185 | df = x -> 2x 186 | x = 2.0 187 | for k = 1:15 188 | @printf("error(k = %2d) = % .2e\n", k, (x - sqrt(big(2)))) 189 | x -= f(x) / df(x) 190 | end 191 | end 192 | 193 | 194 | 195 | using Roots 196 | 197 | function roots_examples() 198 | @show find_zero(sin, (3.0,4.0), Roots.Bisection()) 199 | @show find_zero((sin,cos), 3.0, Roots.Newton()) 200 | println() 201 | # @show find_zero(sin, (3.0,4.0), Roots.Bisection(), xatol=0.1) 202 | # @show find_zero((sin,cos), 4.0, Roots.Newton(), xatol=0.1, atol=1e-3) 203 | # ^ `atol = 1e-3` is required due to some quirks in how `find_zero()` 204 | # determines convergence. 205 | end 206 | -------------------------------------------------------------------------------- /05_krylov.jl: -------------------------------------------------------------------------------- 1 | using PyPlot 2 | using LinearAlgebra 3 | using IterativeSolvers 4 | using Random 5 | using SparseArrays 6 | using Printf 7 | 8 | function gmres_example() 9 | # Define linear system 10 | n = 100 11 | A = Diagonal(LinRange(0.1,1.0,n)) 12 | b = ones(n) 13 | 14 | # Call GMRES 15 | x,log = gmres( 16 | A,b; 17 | abstol = 1e-8, # absolute tolerance 18 | reltol = 1e-2, # relative tolerance 19 | maxiter = 1000, # max number of iterations / max degree 20 | restart = n, # ignore for now 21 | log = true, # enable the `log` return value 22 | ) 23 | 24 | # Show errors 25 | println() 26 | @printf("Number of iterations: %d\n", log.iters) 27 | println() 28 | @printf("Absolute residual: %.3e\n", norm(b - A*x)) 29 | @printf(" Absolute error: %.3e\n", norm(A\b - x)) 30 | println() 31 | @printf("Relative residual: %.3e\n", norm(b - A*x)/norm(b)) 32 | @printf(" Relative error: %.3e\n", norm(A\b - x)/norm(A\b)) 33 | end 34 | 35 | 36 | 37 | function convergence_subtleties() 38 | # Define matrices 39 | n = 50 40 | A1 = Diagonal(LinRange(0.1,1.0,n)) 41 | A2 = spdiagm( 42 | 0 => LinRange(0.1,1.0,n), 43 | 1 => fill(0.55, n-1) 44 | ) 45 | b1 = ones(n) 46 | b2 = [zeros(n÷2); ones(n÷2)] 47 | 48 | # Compute condition number of eigenvectors 49 | d1,V1 = eigen(Matrix(A1)) 50 | d2,V2 = eigen(Matrix(A2)) 51 | println(); 52 | @printf("cond(V1,2) = %.1e\n", cond(V1,2)) 53 | @printf("cond(V2,2) = %.1e\n", cond(V2,2)) 54 | 55 | # Plot convergence histories and big O reference lines 56 | clf() 57 | 58 | _,hist = gmres(A1,b1, abstol=1e-6, restart=n, log=true) 59 | semilogy([norm(b1); hist[:resnorm]], "C0", label=L"A_1 x = b_1") 60 | 61 | nn = (5,24) 62 | κ = 10; r = (sqrt(κ)-1)/(sqrt(κ)+1) 63 | semilogy(nn, 2e1.*r.^nn, "C0--", label=L"O(\rho(10)^n)") 64 | 65 | _,hist = gmres(A1,b2, abstol=1e-6, restart=n, log=true) 66 | semilogy([norm(b2); hist[:resnorm]], "C1", label=L"A_1 x = b_2") 67 | 68 | nn = (1,8) 69 | κ = 2; r = (sqrt(κ)-1)/(sqrt(κ)+1) 70 | semilogy(nn, 2e-1.*r.^nn, "C1--", label=L"O(\rho(2)^n)") 71 | 72 | _,hist = gmres(A2,b1, abstol=1e-6, restart=n, log=true) 73 | semilogy([norm(b1); hist[:resnorm]], "C2", label=L"A_2 x = b_1") 74 | 75 | ylim(clamp.(ylim(), 1e-7,Inf)) 76 | xlabel("# iterations") 77 | ylabel("GMRES residual") 78 | legend() 79 | display(gcf()) 80 | end 81 | 82 | 83 | 84 | function restarted_gmres_good() 85 | n = 100 86 | A = Diagonal(LinRange(1,10,n)) 87 | b = ones(n) 88 | 89 | clf() 90 | for (i,k) = enumerate((2,5,10,100)) 91 | _,log = gmres(A,b; log=true, restart = k) 92 | semilogy( 93 | 1:log.iters, 94 | log[:resnorm], 95 | "C$(i-1)-" 96 | ) 97 | semilogy( 98 | 1:k:log.iters, 99 | log[:resnorm][1:k:end], 100 | "C$(i-1)o", 101 | ms = 4 102 | ) 103 | semilogy( 104 | [NaN], [NaN], 105 | "C$(i-1)-o", 106 | label="restart = $k", 107 | ms = 4 108 | ) 109 | end 110 | xlabel("# iterations") 111 | ylabel("GMRES residual") 112 | legend(loc="best") 113 | display(gcf()) 114 | end 115 | 116 | function restarted_gmres_bad() 117 | n = 100 118 | A = Diagonal([0.1; LinRange(1,10,n-1)]) 119 | b = ones(n) 120 | 121 | clf() 122 | for (i,k) = enumerate((2,5,10,100)) 123 | _,log = gmres(A,b; log=true, restart = k) 124 | semilogy( 125 | 1:log.iters, 126 | log[:resnorm], 127 | "C$(i-1)-" 128 | ) 129 | semilogy( 130 | 1:k:log.iters, 131 | log[:resnorm][1:k:end], 132 | "C$(i-1)o", 133 | ms = 4 134 | ) 135 | semilogy( 136 | [NaN], [NaN], 137 | "C$(i-1)-o", 138 | label="restart = $k", 139 | ms = 4 140 | ) 141 | end 142 | xlabel("# iterations") 143 | ylabel("GMRES residual") 144 | legend(loc="best") 145 | display(gcf()) 146 | end 147 | 148 | 149 | 150 | function gmres_vs_minres() 151 | n = 200 152 | Random.seed!(42) 153 | A = rand(n,n) 154 | A = A+A' + 12*I 155 | b = rand(n) 156 | 157 | clf() 158 | for (label, log) = ( 159 | ("GMRES", gmres(A,b, log=true, restart=length(b))[2]), 160 | ("MinRes", minres(A,b, log=true)[2]), 161 | ) 162 | semilogy(1:log.iters, log[:resnorm], "-o", ms=2, label=label) 163 | end 164 | xlabel("# iterations") 165 | ylabel("Residual") 166 | legend(frameon=false) 167 | display(gcf()) 168 | end 169 | 170 | function finite_termination() 171 | n = 50 172 | Random.seed!(42) 173 | A = rand(n,n) 174 | A = A+A' 175 | b = rand(n) 176 | 177 | clf() 178 | for (label, log) = ( 179 | ("GMRES", gmres(A,b, log=true, maxiter=2n, restart=2n)[2]), 180 | ("MinRes", minres(A,b, log=true, maxiter=2n)[2]), 181 | ) 182 | semilogy(1:log.iters, log[:resnorm], "-o", ms=2, label=label) 183 | if label == "MinRes" 184 | semilogy([n], [log[:resnorm][n]], "C3o", ms=5, label="Should be zero") 185 | end 186 | end 187 | xlabel("# iterations") 188 | ylabel("Residual") 189 | legend(loc="lower left") 190 | display(gcf()) 191 | end 192 | 193 | 194 | 195 | function laplacian_1d(n) 196 | return (n+1)^2 * spdiagm( 197 | -1 => fill( 1.0,n-1), 198 | 0 => fill(-2.0,n), 199 | 1 => fill( 1.0,n-1) 200 | ) 201 | end 202 | 203 | function laplacian_2d(n) 204 | Δ = laplacian_1d(n) 205 | Id = sparse(I,n,n) 206 | return kron(Id,Δ) + kron(Δ,Id) 207 | end 208 | 209 | function laplacian_3d(n) 210 | Δ = laplacian_1d(n) 211 | Id = sparse(I,n,n) 212 | return kron(Id,Id,Δ) + kron(Id,Δ,Id) + kron(Δ,Id,Id) 213 | end 214 | 215 | 216 | function cg_poisson_1d() 217 | clf() 218 | for n = (500,1000,1500,2000) 219 | Random.seed!(42) 220 | A = -laplacian_1d(n) 221 | b = rand(n) 222 | r = cg(A,b, log = true, reltol = eps())[2][:resnorm] 223 | semilogy(1:length(r), r, label=latexstring("n = $n")) 224 | end 225 | xlabel("# iterations") 226 | ylabel("CG residual") 227 | legend() 228 | display(gcf()) 229 | end 230 | 231 | function cg_poisson_2d() 232 | clf() 233 | for n = (50,100,150,200) 234 | Random.seed!(42) 235 | A = -laplacian_2d(n) 236 | b = rand(n^2) 237 | r = cg(A,b, log = true, reltol = eps())[2][:resnorm] 238 | semilogy(1:length(r), r, label=latexstring("n = $n")) 239 | end 240 | xlabel("# iterations") 241 | ylabel("CG residual") 242 | legend() 243 | display(gcf()) 244 | end 245 | 246 | function cg_poisson_3d() 247 | clf() 248 | for n = (10,20,30,40) 249 | Random.seed!(42) 250 | A = -laplacian_3d(n) 251 | b = rand(n^3) 252 | r = cg(A,b, log = true, reltol = eps())[2][:resnorm] 253 | κ = 4*(n+1)^2/π^2 254 | semilogy(1:length(r), r, label=latexstring("n = $n")) 255 | end 256 | xlabel("# iterations") 257 | ylabel("CG residual") 258 | legend() 259 | display(gcf()) 260 | end -------------------------------------------------------------------------------- /06_runge_kutta.jl: -------------------------------------------------------------------------------- 1 | using PyPlot 2 | using LinearAlgebra 3 | using Roots 4 | 5 | function euler_step(f,y0,t) 6 | return y0 + f(y0)*t 7 | end 8 | 9 | function trapezoidal_step(f,y0,t) 10 | f1 = t*f(y0) 11 | f2 = t*f(y0 + f1) 12 | return y0 + (f1 + f2)/2 13 | end 14 | 15 | function rk4_step(f,y0,t) 16 | f1 = t*f(y0) 17 | f2 = t*f(y0 + f1/2) 18 | f3 = t*f(y0 + f2/2) 19 | f4 = t*f(y0 + f3) 20 | return y0 + f1/6 + f2/3 + f3/3 + f4/6 21 | end 22 | 23 | function propagate(f,y0,T,n,step) 24 | y = Vector{typeof(y0)}(undef,n) 25 | y[1] = y0 26 | for i = 2:n 27 | y[i] = step(f,y[i-1],T/(n-1)) 28 | end 29 | return y 30 | end 31 | 32 | function example() 33 | f = y->[ y[2], -y[1] ] 34 | y0 = [ 1.0, 0.0 ] 35 | n = 20 36 | t = LinRange(0,2π,n) 37 | 38 | clf() 39 | tt = LinRange(0,t[end],1000) 40 | plot(tt, cos.(tt), "k", label="exact") 41 | for (name,step) in ( 42 | ("Euler", euler_step), 43 | # ("trapezoidal", trapezoidal_step), 44 | # ("RK4", rk4_step), 45 | ) 46 | ỹ = propagate(f,y0,t[end],n, step) 47 | plot(t, [ỹ[i][1] for i = 1:n], label=name) 48 | end 49 | xlabel(L"t") 50 | ylabel(L"y(t)") 51 | legend(frameon=false) 52 | display(gcf()) 53 | end 54 | 55 | function convergence() 56 | f = y -> -y 57 | y0 = 1.0 58 | T = 0.5 59 | y = t -> exp(-t) 60 | 61 | clf() 62 | for (i,(name,step,p)) in enumerate(( 63 | ("Euler", euler_step, 1), 64 | # ("trapezoidal", trapezoidal_step, 2), 65 | # ("RK4", rk4_step, 4), 66 | # ("implicit Euler", implicit_euler_step, 1), 67 | # ("implicit trapezoidal", implicit_trapezoidal_step, 2), 68 | )) 69 | n = round.(Int, 10.0.^LinRange(0.0,log10(1e3/p),30)) 70 | error = [begin 71 | ỹ = propagate(f,y0,T,n, step) 72 | abs(y(T) - ỹ[end]) / abs(y(T)) 73 | end for n in n] 74 | loglog(p.*n, error, label=name) 75 | nn = (4e1,1e3) 76 | loglog(nn, 2e-1.*inv.(nn).^p, "C$(i-1)--", label=latexstring("O(n^{-$p})")) 77 | end 78 | legend(frameon=false) 79 | xlabel("# function evaluations") 80 | ylabel("Error at final time") 81 | display(gcf()) 82 | end 83 | 84 | function nsteps() 85 | λ = 1.0 86 | f = y->λ*y 87 | y0 = one(λ) 88 | y = t->exp(λ*t) 89 | T = LinRange(0,3,11) 90 | τ = 1e-3 91 | 92 | n = [begin 93 | n = 2 94 | while true 95 | n = round(Int, n*1.3) 96 | ỹ = propagate(f,y0,T,n, euler_step) 97 | t = LinRange(0,T,n) 98 | if abs(y(t[end]) - ỹ[end]) < τ 99 | break 100 | end 101 | end 102 | n 103 | end for T in T] 104 | 105 | clf() 106 | if (liny = true) 107 | plot(T,n) 108 | else 109 | semilogy(T,n) 110 | end 111 | xlabel(L"Final time $T$") 112 | ylabel(L"Number of steps $n$ to meet error tolerance") 113 | display(gcf()) 114 | end 115 | 116 | 117 | 118 | function embedded_ET_step(f,y0,t) 119 | f1 = t*f(y0) 120 | y_euler = y0 + f1 121 | f2 = t*f(y_euler) 122 | y_trapezoidal = y0 + (f1+f2)/2 123 | return y_euler, y_trapezoidal 124 | end 125 | 126 | function propagate_adaptively(f,y0,T,τ,step,p) 127 | 128 | # Initialise storage for the trajectory 129 | t = Vector{Float64}() 130 | y = Vector{typeof(y0)}() 131 | push!(t, 0.0) 132 | push!(y, y0) 133 | 134 | # Initial trial step size 135 | Δt = T 136 | 137 | # Counts how often we had to recompute a step. For demonstration purposes only 138 | n_rejected = 0 139 | 140 | # Loop until we reach the final time 141 | while t[end] < T 142 | 143 | # Compute Runge-Kutta solutions with the trial step size 144 | ŷ,ŷref = step(f,y[end],Δt) 145 | 146 | # Estimate what the optimal step size would have been 147 | Δt_opt = (τ * Δt^p / norm(ŷ - ŷref))^(1/(p-1)) 148 | 149 | # Check if trial step size was small enough 150 | if Δt <= Δt_opt 151 | # If so, add the trial step to the trajectory 152 | push!(t,t[end]+Δt) 153 | push!(y,ŷref) 154 | else 155 | n_rejected += 1 156 | end 157 | 158 | # Update the trial step size 159 | Δt = min(0.9*Δt_opt, T-t[end]) 160 | end 161 | 162 | return t,y, n_rejected 163 | end 164 | 165 | function step_example() 166 | f = y->cos(y)^2 167 | y0 = -1.56 168 | T = 200 169 | τ = 1e-4 170 | 171 | t,y,n_rejected = propagate_adaptively(f,y0,T,τ, embedded_ET_step, 2) 172 | 173 | Δt = minimum(diff(t)) 174 | n_fixed = round(Int, T/Δt) 175 | println(" # adaptive steps: ", length(t)-1) 176 | println("# fixed-size steps: ", n_fixed) 177 | println(" Ratio: ", round(n_fixed/(length(t)-1), sigdigits=3)) 178 | println(" # rejected steps: ", n_rejected) 179 | 180 | clf() 181 | plot(t, zero.(t), "ko", ms=2, label=L"t_k") 182 | plot(t,y, "-", label=L"y(t)") 183 | xlabel(L"t") 184 | legend(frameon=false) 185 | display(gcf()) 186 | end 187 | 188 | function decay_example() 189 | f = y->-y 190 | y0 = 1.0 191 | T = 50 192 | τ = 1e-4 193 | 194 | if (explicit = true) 195 | t,y,_ = propagate_adaptively(f,y0,T,τ, embedded_ET_step, 2) 196 | else 197 | t,y,_ = propagate_adaptively(f,y0,T,τ, embedded_implicit_ET_step, 2) 198 | end 199 | 200 | clf() 201 | plot(t, fill(0.5, length(t)), "ko", ms=3, label=L"t_k") 202 | if (logy = false) 203 | semilogy(t,abs.(y), label=L"|y(t)|") 204 | else 205 | plot(t,y, label=L"y(t)") 206 | end 207 | xlabel(L"t") 208 | legend(frameon=false) 209 | display(gcf()) 210 | end 211 | 212 | function stepsize() 213 | f = y->-y 214 | y0 = 1.0 215 | T = 140 216 | τ = 1e-6 217 | 218 | if (explicit = true) 219 | t,y,_ = propagate_adaptively(f,y0,T,τ, embedded_ET_step, 2) 220 | else 221 | t,y,_ = propagate_adaptively(f,y0,T,τ, embedded_implicit_ET_step, 2) 222 | end 223 | 224 | clf() 225 | semilogy(t[2:end],diff(t)) 226 | semilogy([t[1],t[end]],[2,2], "k", lw=0.5, label=L"\Delta t = 2") 227 | xlabel(L"t") 228 | ylabel(L"step size $\Delta t$") 229 | display(gcf()) 230 | end 231 | 232 | function implicit_euler_step(f,y0,t) 233 | return find_zero( 234 | y -> y0 + f(y)*t - y, 235 | euler_step(f,y0,t) 236 | ) 237 | end 238 | 239 | function implicit_trapezoidal_step(f,y0,t) 240 | f1 = t*f(y0) 241 | return find_zero( 242 | y -> y0 + (f1 + f(y)*t)/2 - y, 243 | trapezoidal_step(f,y0,t) 244 | ) 245 | end 246 | 247 | function embedded_implicit_ET_step(f,y0,t) 248 | f1 = t*f(y0) 249 | y_trapezoidal = find_zero( 250 | y -> y0 + (f1 + f(y)*t)/2 - y, 251 | trapezoidal_step(f,y0,t) 252 | ) 253 | y_quasi_euler = y0 + f(y_trapezoidal)*t 254 | return y_quasi_euler, y_trapezoidal 255 | end 256 | 257 | function stability_example() 258 | f = y->-y 259 | y0 = 1.0 260 | T = 10 261 | 262 | Δt = (1.8,2.0,2.2) 263 | step = euler_step 264 | # step = trapezoidal_step 265 | 266 | # Δt = 1:5 267 | # step = implicit_euler_step 268 | # step = implicit_trapezoidal_step 269 | 270 | clf() 271 | t = LinRange(0,T,1000) 272 | plot(t, exp.(-t), "k", label=L"y(t)") 273 | for Δt = Δt 274 | n = round(Int,T/Δt) 275 | t = Δt.*(0:n) 276 | y = propagate(f,y0,n*Δt,n+1,step) 277 | plot(t,y, label=latexstring("\\Delta t = $Δt")) 278 | end 279 | xlabel(L"t") 280 | legend(frameon=false, loc="center left", bbox_to_anchor=(1,0.5)) 281 | display(gcf()) 282 | end 283 | -------------------------------------------------------------------------------- /08_monte_carlo.jl: -------------------------------------------------------------------------------- 1 | using PyPlot 2 | using SpecialFunctions 3 | using StaticArrays 4 | using Printf 5 | using Distributions 6 | 7 | 8 | ############################################################################### 9 | # mnk-game 10 | 11 | function mnk_probabilities() 12 | m,n,k = 3,3,3 13 | N = 100 14 | 15 | println("Runtimes:") 16 | time_sum = @elapsed p_sum = mnk_probabilities_via_summation(m,n,k) 17 | time_mc = @elapsed p_mc = mnk_probabilities_via_monte_carlo(m,n,k,N) 18 | 19 | println(" Summation: ", @sprintf("%.6f", time_sum), " sec") 20 | println(" Monte Carlo: ", @sprintf("%.6f", time_mc), " sec") 21 | 22 | s = Matrix{String}(undef,3,3) 23 | s[:,1] = (p->@sprintf("%.3f",p)).(p_sum) 24 | s[:,2] = (p->@sprintf("%.3f",p)).(p_mc) 25 | s[:,3] = (p->@sprintf("%.3f",p)).(abs.(p_mc .- p_sum)) 26 | 27 | println() 28 | println("Results:") 29 | println(" | sum | MC | error ") 30 | println("-------------------+-------+-------+-------") 31 | println("Win for player 1: | ", s[1,1], " | ", s[1,2], " | ", s[1,3]) 32 | println("Win for player 2: | ", s[2,1], " | ", s[2,2], " | ", s[2,3]) 33 | println(" Draw: | ", s[3,1], " | ", s[3,2], " | ", s[3,3]) 34 | println() 35 | end 36 | 37 | function mnk_probabilities_via_summation(m,n,k) 38 | board = zeros(Int,m,n) # Current board state 39 | moves = vec([(i,j) for i = 1:m, j = 1:n]) # Remaining unoccupied squares 40 | player = 1 # Current player 41 | 42 | function recurse(board,moves,player) 43 | # If we run out of moves, then the game is a draw. 44 | if isempty(moves) 45 | return [0.0,0.0,1.0] 46 | end 47 | 48 | p = zeros(3) 49 | for idx = 1:length(moves) 50 | # Play out the current move 51 | i,j = pop_fast!(moves,idx) 52 | board[i,j] = player 53 | 54 | # Claim victory or recurse 55 | if winning_move(board,k,i,j) 56 | p[player] += 1/(length(moves)+1) 57 | else 58 | p .+= recurse(board,moves,mod1(player+1,2))./(length(moves)+1) 59 | end 60 | 61 | # Reset the board and list of moves 62 | board[i,j] = 0 63 | push!(moves,(i,j)) 64 | moves[idx],moves[end] = moves[end],moves[idx] 65 | end 66 | return p 67 | end 68 | 69 | return recurse(board,moves,player) 70 | end 71 | 72 | function mnk_probabilities_via_monte_carlo(m,n,k,N) 73 | score = zeros(3) 74 | for i = 1:N 75 | board = zeros(Int,m,n) 76 | moves = vec([(i,j) for i = 1:m, j = 1:n]) 77 | player = 1 78 | winner = 3 79 | 80 | # Make random moves until none left 81 | while !isempty(moves) 82 | i,j = pop_fast!(moves, rand(1:length(moves))) 83 | board[i,j] = player 84 | if winning_move(board,k,i,j) 85 | winner = player 86 | break 87 | end 88 | player = mod1(player+1,2) 89 | end 90 | score[winner] += 1 91 | end 92 | return score./N 93 | end 94 | 95 | function pop_fast!(vec,idx) 96 | vec[idx],vec[end] = vec[end],vec[idx] 97 | return pop!(vec) 98 | end 99 | 100 | """ 101 | count_equals(board,i,j,di,dj) 102 | 103 | Find the largest integer `c` such that 104 | ``` 105 | board[i+k*di,j+k*di] == board[i,j] for k = 1:c 106 | ``` 107 | """ 108 | function count_equals(board,i,j,di,dj) 109 | c = 1 110 | while i+c*di in 1:size(board,1) && 111 | j+c*dj in 1:size(board,2) && 112 | board[i+c*di,j+c*dj] == board[i,j] 113 | c += 1 114 | end 115 | return c - 1 116 | end 117 | 118 | """ 119 | winning_move(board,k,i,j) 120 | 121 | Check whether the player who owns `board[i,j]` has won the game by taking 122 | that square. 123 | """ 124 | winning_move(board,k,i,j) = 125 | count_equals(board,i,j, 0,-1) + count_equals(board,i,j, 0, 1) + 1 >= k || 126 | count_equals(board,i,j,-1, 0) + count_equals(board,i,j, 1, 0) + 1 >= k || 127 | count_equals(board,i,j,-1,-1) + count_equals(board,i,j, 1, 1) + 1 >= k || 128 | count_equals(board,i,j,-1, 1) + count_equals(board,i,j, 1,-1) + 1 >= k 129 | 130 | ############################################################################### 131 | 132 | 133 | ############################################################################### 134 | # High-dimensional integrals 135 | 136 | function integral_via_quadrature() 137 | d = (2,4,8,16) 138 | N = round.(Int, 2.0.^LinRange(0,16,17)) 139 | f = x-> exp(-sum(x.^2)) 140 | I = sqrt(π)*erf(1)/2 141 | ylims = [1e-8,4e0] 142 | 143 | clf() 144 | title(L"Midpoint quadrature ($p = 2$)") 145 | for (i,d) in enumerate(d) 146 | n = round.(Int, N.^(1/d)) 147 | loglog(n.^d, [abs(I^d - midpoint(f,d,n))/I^d for n in n], "C$(i-1)", label="d = $d") 148 | 149 | NN = (1e2,N[end]) 150 | offset = (5e-2,1e-1,2e-1,5e-1) 151 | loglog(NN, offset[i].*inv.(NN).^(2/d), "C$(i-1)--", label=latexstring("O(N^{-$(2/d)})")); 152 | end 153 | xlabel(L"# function evaluations $N$") 154 | ylabel("Error") 155 | legend() 156 | ylim(ylims) 157 | display(gcf()) 158 | end 159 | 160 | """ 161 | midpoint(f,d,n) 162 | 163 | Compute the integral of `f` over `[0,1]^d` using the midpoint rule with `n` 164 | quadrature points in each dimension. 165 | """ 166 | midpoint(f,d,n) = midpoint_nested(f,n,ntuple(k->n,d)) 167 | function midpoint_nested(f,n,nn) 168 | q = 0.0 169 | x = LinRange(0,1,2n+1)[2:2:end-1] 170 | for i in CartesianIndices(nn) 171 | q += f((ik->x[ik]).(i.I)) 172 | end 173 | return q/n^length(nn) 174 | end 175 | 176 | function integral_via_monte_carlo() 177 | d = (2,4,8,16) 178 | N = round.(Int, 2.0.^LinRange(0,16,101)) 179 | f = x-> exp(-sum(x.^2)) 180 | I = sqrt(π)*erf(1)/2 181 | ylims = [1e-8,4e0] 182 | 183 | clf() 184 | title("Monte Carlo sampling") 185 | for d in d 186 | loglog(N, [abs(I^d - monte_carlo_integral(f,d,N))/I^d for N in N], label="d = $d") 187 | end 188 | NN = (1e2,N[end]) 189 | loglog(NN, 6e0.*sqrt.(inv.(NN)), "k--", label=L"O(N^{-0.5})") 190 | xlabel(L"# function evaluations $N$") 191 | ylabel("Error") 192 | legend() 193 | ylim(ylims) 194 | display(gcf()) 195 | end 196 | 197 | """ 198 | monte_carlo_integral(f,d,N) 199 | 200 | Compute the integral of `f` over `[0,1]^d` using `N` uniformly distributed 201 | samples. 202 | """ 203 | monte_carlo_integral(f,d,N) = monte_carlo_integral(f,Val(d),N) 204 | function monte_carlo_integral(f,::Val{d},N) where {d} 205 | # The above `Val{d}` and the below `@SVector` are Julia-specific 206 | # performance optimisation. I recommend you ignore them. 207 | q = 0.0 208 | for i = 1:N 209 | q += f(@SVector rand(d)) 210 | end 211 | return q/N 212 | end 213 | 214 | 215 | 216 | ############################################################################### 217 | # Random number generation 218 | 219 | using BenchmarkTools 220 | using Random 221 | 222 | function rng_benchmark() 223 | println("Pseudo-random number generator:") 224 | rng = MersenneTwister() 225 | @btime rand($rng) 226 | println() 227 | println("True random number generator:") 228 | rng = RandomDevice() 229 | @btime rand($rng) 230 | end 231 | 232 | function inverse_transform() 233 | U = rand(10_000) 234 | X = sqrt.(U) 235 | 236 | clf() 237 | hist(X; bins = 100, density = true, label="empirical PDF") 238 | xx = LinRange(0,1,1000) 239 | plot(xx, 2xx, "k", label="theoretical PDF") 240 | xlabel(L"x") 241 | legend() 242 | display(gcf()) 243 | end 244 | 245 | function bernoulli() 246 | p = 0.3 247 | U = rand(10_000) 248 | X = U .>= (1-p) 249 | 250 | println("Theoretical P(X = 1): ", p) 251 | println(" Empirical P(X = 1): ", mean(X)) 252 | end 253 | 254 | function rejection_sampling() 255 | # Target distribution 256 | p = x -> ifelse(0 <= x <= 1, 2x, 0.0) 257 | # p = x -> ifelse(0 <= x <= 1, 6*x*(1-x), 0.0) 258 | # p = x -> pdf(Normal(0.5,0.1),x) 259 | 260 | # Proposal distribution 261 | DQ = Uniform(0,1) 262 | # DQ = Normal(0.5,0.25) 263 | q = x -> pdf(DQ,x) 264 | 265 | # Compute `M` such that `p(x) ≤ M*q(x)` 266 | x = rand(DQ,1_000_000) 267 | M = maximum(p.(x)./q.(x)) 268 | 269 | # Log of proposals. For demonstration only 270 | Qlog = Float64[] 271 | 272 | # Do the rejection sampling 273 | function sample() 274 | while true 275 | Q = rand(DQ) 276 | push!(Qlog,Q) 277 | if rand() <= p(Q)/(M*q(Q)) 278 | return Q 279 | end 280 | end 281 | end 282 | t = @elapsed X = [sample() for k = 1:10_000] 283 | 284 | println("Runtime: ", round(t, digits=4), " seconds") 285 | println() 286 | println("# proposals until acceptance:") 287 | println(" Theoretical: ", M) 288 | println(" Empirical: ", length(Qlog)/length(X)) 289 | 290 | clf() 291 | 292 | # Plot empirical PDF 293 | q̃,x = hist(Qlog; bins = 100, density = true, color="white"); 294 | bar(x[1:end-1],M*q̃, diff(x), align="edge", color="C4", alpha=0.5, label="empirical proposal PDF") 295 | hist(X; bins = 100, density = true, label="empirical target PDF") 296 | 297 | # Plot theoretical PDF 298 | xx = LinRange(0,1,1000) 299 | plot(xx, p.(xx), "k", label="theoretical PDF") 300 | 301 | xlabel(L"x") 302 | legend(frameon=false, bbox_to_anchor=(1.0,0.5), loc="center left") 303 | display(gcf()) 304 | end 305 | 306 | function importance_sampling() 307 | Y = rand(10_000) 308 | E_X = mean(2.0.*Y.^2) 309 | 310 | println(" Exact expectation: ", 2/3) 311 | println("Estimated expectation: ", E_X) 312 | end 313 | -------------------------------------------------------------------------------- /09_stochastic_sir_model.jl: -------------------------------------------------------------------------------- 1 | using PyPlot 2 | using DifferentialEquations 3 | using Random 4 | using Statistics 5 | using BenchmarkTools 6 | using StatsBase 7 | 8 | 9 | ################################################################################ 10 | # Deterministic SIR 11 | 12 | function solve_dSIR(N,I0,a,b,T) 13 | problem = ODEProblem( 14 | (y,p,t) -> begin 15 | S,I = y 16 | return [ 17 | -a*S*I/N, 18 | +a*S*I/N - b*I 19 | ] 20 | end, 21 | Float64[N-I0,I0], 22 | (0.0,T) 23 | ) 24 | solution = solve(problem, Heun()) 25 | t = solution.t 26 | S = solution[1,:] 27 | I = solution[2,:] 28 | return t,S,I 29 | end 30 | 31 | function plot_dSIR() 32 | N = 1000 33 | I0 = round(Int, 0.01*N) 34 | a = 2 35 | b = 1 36 | T = 15.0 37 | 38 | clf() 39 | 40 | t,S,I = solve_dSIR(N,I0,a,b,T) 41 | R = N.-S.-I 42 | plot(t,S, "C0-", label="susceptible") 43 | plot(t,I, "C1-", label="infected") 44 | plot(t,R, "C2-", label="recovered") 45 | 46 | legend() 47 | xlim([0,T]) 48 | ylim([0,N]) 49 | xlabel("time") 50 | display(gcf()) 51 | end 52 | 53 | 54 | 55 | ################################################################################ 56 | # Stochastic SIR 57 | 58 | function solve_sSIR(N,I0,a,b,T) 59 | # Current state. Only these variables are needed to run the algorithm. 60 | t = 0.0 61 | S = N-I0 62 | I = I0 63 | 64 | # History of past states for postprocessing 65 | t_history = [0.0] 66 | S_history = [S] 67 | I_history = [I0] 68 | 69 | # Play out infection and recovery events until we reach the final time 70 | while t < T 71 | # Sample times until the next infection and recovery events 72 | dt_infect = randexp() / (a/N*S*I) 73 | dt_recover = randexp() / (b*I) 74 | 75 | # Play out whichever event happens first 76 | if dt_infect < dt_recover 77 | t += dt_infect 78 | S -= 1 79 | I += 1 80 | else 81 | t += dt_recover 82 | I -= 1 83 | end 84 | 85 | # Push the current state onto the history 86 | push!(t_history,t) 87 | push!(S_history,S) 88 | push!(I_history,I) 89 | end 90 | 91 | # Final history entry corresponds to time > T 92 | # Throw out that entry and instead duplicate the penultimate entry 93 | t_history[end] = T 94 | S_history[end] = S_history[end-1] 95 | I_history[end] = I_history[end-1] 96 | 97 | return t_history,S_history,I_history 98 | end 99 | 100 | function plot_sSIR() 101 | if (regime = true) 102 | N = 1000 103 | I0 = round(Int, 0.01*N) 104 | a = 2 105 | b = 1 106 | T = 15.0 107 | ylims = [0,N] 108 | else 109 | # Random.seed!(1) 110 | N = 1_000_000 111 | I0 = 10 112 | a = 1 113 | b = 1.1 114 | T = 50.0 115 | ylims = [0,60] 116 | end 117 | 118 | clf() 119 | 120 | t,S,I = solve_dSIR(N,I0,a,b,T) 121 | R = N.-S.-I 122 | plot(t,S, "C0--") 123 | plot(t,I, "C1--") 124 | plot(t,R, "C2--") 125 | 126 | t,S,I = solve_sSIR(N,I0,a,b,T) 127 | R = N.-S.-I 128 | step(t,S, color="C0", where="post", label="susceptible") 129 | step(t,I, color="C1", where="post", label="infected") 130 | step(t,R, color="C2", where="post", label="recovered") 131 | 132 | plot([],[], "k--", label="deterministic solutions") 133 | 134 | legend(frameon=false, bbox_to_anchor=(1.0,0.5), loc="center left") 135 | xlim([0,T]) 136 | ylim(ylims) 137 | xlabel("time") 138 | 139 | display(gcf()) 140 | end 141 | 142 | 143 | 144 | ################################################################################ 145 | # Max I distribution 146 | 147 | function max_I_distribution() 148 | # Model parameters 149 | N = 1_000_000 150 | I0 = 10 151 | a = 1 152 | b = 1.1 153 | T = Inf 154 | 155 | # Numerical parameters 156 | n = 10_000 157 | I_cut = 80 158 | ylims = [1e-5,1] 159 | 160 | # Play out many epidemics and keep counts of the outcomes 161 | p_dir = zeros(I_cut) 162 | for i = 1:n 163 | t,S,I = solve_sSIR(N,I0,a,b,T) 164 | max_I = maximum(I) 165 | if max_I <= I_cut 166 | p_dir[max_I] += 1/n 167 | end 168 | end 169 | 170 | clf() 171 | plot(1:length(p_dir),p_dir, "C0") 172 | if (errors = false) 173 | e = @. 3 * sqrt( p_dir * (1-p_dir) / n ) 174 | fill_between(1:I_cut, p_dir.-e, p_dir.+e, color="C0", alpha=0.5) 175 | end 176 | yscale("log") 177 | xlabel("[max I]") 178 | ylabel("P([max I])") 179 | xlim([I0,I_cut]) 180 | ylim(ylims) 181 | display(gcf()) 182 | end 183 | 184 | 185 | ################################################################################ 186 | # Importance sampling 187 | 188 | function solve_isSIR(N,I0,a,b,p_floor,T) 189 | # Current state. Only these variables are needed to run the algorithm. 190 | t = 0.0 191 | S = N-I0 192 | I = I0 193 | p = 1.0 194 | 195 | # History of past states for postprocessing 196 | t_history = [0.0] 197 | S_history = [S] 198 | I_history = [I0] 199 | p_history = [p] 200 | 201 | # Play out infection and recovery events until we reach the final time 202 | while t < T 203 | # Precompute infection and recovery propensities 204 | A = a/N*S*I 205 | B = b*I 206 | 207 | # Sample the time until the next event (of either kind) 208 | dt = randexp() / (A+B) 209 | t += dt 210 | 211 | # Decide whether the next event should be an infection or recovery. 212 | # We want to simulate trajectories of varying but not exceedingly small 213 | # probability. I therefore introduce a probability floor `p_floor` 214 | # and proceed as follows. 215 | 216 | # Check whether infection is at all possible 217 | if S > 0 218 | # If the current probability budget allows it... 219 | q = 0.5 220 | if p * min( A/q, B/(1-q) ) / (A+B) > p_floor 221 | # ... pick an event type at random and update the 222 | # probability score using the importance sampling formula 223 | infect = rand() < q 224 | p *= ifelse( infect, A/q, B/(1-q) ) / (A+B) 225 | else 226 | # Otherwise, follow the natural dynamics so we do not lose any 227 | # further probability mass 228 | infect = rand() < A/(A+B) 229 | end 230 | else 231 | infect = false 232 | end 233 | 234 | # Play out the event 235 | if infect 236 | S -= 1 237 | I += 1 238 | else 239 | I -= 1 240 | end 241 | 242 | # Push the current state onto the history 243 | push!(t_history,t) 244 | push!(S_history,S) 245 | push!(I_history,I) 246 | push!(p_history,p) 247 | end 248 | 249 | # Final history entry corresponds to time > T 250 | # Throw out that entry and instead duplicate the penultimate entry 251 | t_history[end] = T 252 | S_history[end] = S_history[end-1] 253 | I_history[end] = I_history[end-1] 254 | p_history[end] = p_history[end-1] 255 | 256 | return t_history,S_history,I_history,p_history 257 | end 258 | 259 | function plot_isSIR() 260 | # Model parameters 261 | N = 1_000_000 262 | I0 = 10 263 | a = 1 264 | b = 1.1 265 | T = 50.0 266 | 267 | # Numerical parameters 268 | p_floor = 1e-3 269 | 270 | clf() 271 | 272 | subplot(2,1,1) 273 | t,S,I = solve_dSIR(N,I0,a,b,T) 274 | R = N.-S.-I 275 | plot(t,I, "k--", label=L"deterministic $I$") 276 | 277 | t,S,I = solve_sSIR(N,I0,a,b,T) 278 | step(t,I, color="C1", where="post", label=L"unbiased $I$") 279 | 280 | t,S,I,p = solve_isSIR(N,I0,a,b,p_floor,T) 281 | step(t,I, color="C3", where="post", label=L"biased $I$") 282 | 283 | legend(frameon=false, bbox_to_anchor=(1.0,0.5), loc="center left") 284 | xlim([0,T]) 285 | gca().get_xaxis().set_visible(false) 286 | 287 | subplot(2,1,2, sharex=gca()) 288 | step(t,p, label="probability") 289 | legend(frameon=false, bbox_to_anchor=(1.0,0.5), loc="center left") 290 | yscale("log") 291 | xlabel("time") 292 | 293 | display(gcf()) 294 | end 295 | 296 | function max_I_distribution_with_bias() 297 | # Model parameters 298 | N = 1_000_000 299 | I0 = 10 300 | a = 1 301 | b = 1.1 302 | T = Inf 303 | 304 | # Numerical parameters 305 | n = 10_000 306 | I_cut = 80 307 | ylims = [1e-5,1] 308 | p_floor = 0.1 309 | 310 | # Play out many epidemics and keep counts of the outcomes 311 | p_dir = zeros(I_cut) 312 | for i = 1:n 313 | t,S,I = solve_sSIR(N,I0,a,b,T) 314 | max_I = maximum(I) 315 | if max_I <= I_cut 316 | p_dir[max_I] += 1/n 317 | end 318 | end 319 | 320 | # Same as above, but now with importance sampling bias 321 | p_imp = zeros(I_cut) 322 | p2_imp = zeros(I_cut) 323 | for i = 1:n 324 | t,S,I,p = solve_isSIR(N,I0,a,b,p_floor,T) 325 | max_I = maximum(I) 326 | if max_I <= I_cut 327 | p_imp[max_I] += p[end]/n 328 | p2_imp[max_I] += p[end]^2/n 329 | end 330 | end 331 | 332 | clf() 333 | 334 | plot(1:I_cut,p_dir, "C0") 335 | e = @. 3 * sqrt( p_dir * (1-p_dir) / n ) 336 | fill_between(1:I_cut, p_dir.-e, p_dir.+e, color="C0", alpha=0.5) 337 | 338 | plot(1:I_cut,p_imp, "C1") 339 | e = @. 3 * sqrt( (p2_imp - p_imp^2) / n ) 340 | fill_between(1:I_cut, p_imp.-e, p_imp.+e, color="C1", alpha=0.5) 341 | yscale("log") 342 | xlabel("[max I]") 343 | ylabel("P([max I])") 344 | xlim([I0,I_cut]) 345 | ylim(ylims) 346 | display(gcf()) 347 | end 348 | 349 | 350 | ################################################################################ 351 | # Sampling Exponential(p) 352 | 353 | function exp_sampling() 354 | p = 2 355 | U = rand(10_000) 356 | X = @. -log(1-U)/p 357 | 358 | clf() 359 | xx = LinRange(0,maximum(X),1000) 360 | plot(xx, @.(p*exp(-p*xx)), "k-", label="theoretical PDF") 361 | hist(X; bins = 100, density = true, label="empirical PDF") 362 | legend() 363 | display(gcf()) 364 | end 365 | 366 | function exp_benchmark() 367 | println("Runtime -log(1-rand())") 368 | @btime -log(1-rand()) 369 | println() 370 | println("Runtime randexp()") 371 | @btime randexp() 372 | end 373 | -------------------------------------------------------------------------------- /Manifest.toml: -------------------------------------------------------------------------------- 1 | # This file is machine-generated - editing it directly is not advised 2 | 3 | [[AbstractAlgebra]] 4 | deps = ["InteractiveUtils", "LinearAlgebra", "Markdown", "Random", "RandomExtensions", "SparseArrays", "Test"] 5 | git-tree-sha1 = "0633f6981ad1f6fc01c26daef94a5241c6632e86" 6 | uuid = "c3fe647b-3220-5bb0-a1ea-a7954cac585d" 7 | version = "0.13.6" 8 | 9 | [[AbstractFFTs]] 10 | deps = ["LinearAlgebra"] 11 | git-tree-sha1 = "051c95d6836228d120f5f4b984dd5aba1624f716" 12 | uuid = "621f4979-c628-5d54-868e-fcf4e3e8185c" 13 | version = "0.5.0" 14 | 15 | [[AbstractTrees]] 16 | git-tree-sha1 = "03e0550477d86222521d254b741d470ba17ea0b5" 17 | uuid = "1520ce14-60c1-5f80-bbc7-55ef81b5835c" 18 | version = "0.3.4" 19 | 20 | [[Adapt]] 21 | deps = ["LinearAlgebra"] 22 | git-tree-sha1 = "ffcfa2d345aaee0ef3d8346a073d5dd03c983ebe" 23 | uuid = "79e6a3ab-5dfb-504d-930d-738a2a938a0e" 24 | version = "3.2.0" 25 | 26 | [[ArnoldiMethod]] 27 | deps = ["LinearAlgebra", "Random", "StaticArrays"] 28 | git-tree-sha1 = "f87e559f87a45bece9c9ed97458d3afe98b1ebb9" 29 | uuid = "ec485272-7323-5ecc-a04f-4719b315124d" 30 | version = "0.1.0" 31 | 32 | [[ArrayInterface]] 33 | deps = ["IfElse", "LinearAlgebra", "Requires", "SparseArrays", "Static"] 34 | git-tree-sha1 = "ce17bad65d0842b34a15fffc8879a9f68f08a67f" 35 | uuid = "4fba245c-0d91-5ea0-9b3e-6abc04ee57a9" 36 | version = "3.1.6" 37 | 38 | [[ArrayLayouts]] 39 | deps = ["Compat", "FillArrays", "LinearAlgebra", "SparseArrays"] 40 | git-tree-sha1 = "a577e27915fdcb3f6b96118b56655b38e3b466f2" 41 | uuid = "4c555306-a7a7-4459-81d9-ec55ddd5c99a" 42 | version = "0.4.12" 43 | 44 | [[Artifacts]] 45 | deps = ["Pkg"] 46 | git-tree-sha1 = "c30985d8821e0cd73870b17b0ed0ce6dc44cb744" 47 | uuid = "56f22d72-fd6d-98f1-02f0-08ddc0907c33" 48 | version = "1.3.0" 49 | 50 | [[BandedMatrices]] 51 | deps = ["ArrayLayouts", "FillArrays", "LinearAlgebra", "Random", "SparseArrays"] 52 | git-tree-sha1 = "bad4640d622657238f1144da2a639ba1703c5352" 53 | uuid = "aae01518-5342-5314-be14-df237901396f" 54 | version = "0.16.8" 55 | 56 | [[Base64]] 57 | uuid = "2a0f44e3-6c83-55bd-87e4-b1978d98bd5f" 58 | 59 | [[BenchmarkTools]] 60 | deps = ["JSON", "Logging", "Printf", "Statistics", "UUIDs"] 61 | git-tree-sha1 = "9e62e66db34540a0c919d72172cc2f642ac71260" 62 | uuid = "6e4b80f9-dd63-53aa-95a3-0cdb28fa8baf" 63 | version = "0.5.0" 64 | 65 | [[BinaryProvider]] 66 | deps = ["Libdl", "Logging", "SHA"] 67 | git-tree-sha1 = "ecdec412a9abc8db54c0efc5548c64dfce072058" 68 | uuid = "b99e7846-7c00-51b0-8f62-c81ae34c0232" 69 | version = "0.5.10" 70 | 71 | [[BoundaryValueDiffEq]] 72 | deps = ["BandedMatrices", "DiffEqBase", "FiniteDiff", "ForwardDiff", "LinearAlgebra", "NLsolve", "Reexport", "SparseArrays"] 73 | git-tree-sha1 = "fe34902ac0c3a35d016617ab7032742865756d7d" 74 | uuid = "764a87c0-6b3e-53db-9096-fe964310641d" 75 | version = "2.7.1" 76 | 77 | [[CEnum]] 78 | git-tree-sha1 = "215a9aa4a1f23fbd05b92769fdd62559488d70e9" 79 | uuid = "fa961155-64e5-5f13-b03f-caf6b980ea82" 80 | version = "0.4.1" 81 | 82 | [[ChainRulesCore]] 83 | deps = ["Compat", "LinearAlgebra", "SparseArrays"] 84 | git-tree-sha1 = "de4f08843c332d355852721adb1592bce7924da3" 85 | uuid = "d360d2e6-b24c-11e9-a2a3-2a2ae2dbcce4" 86 | version = "0.9.29" 87 | 88 | [[ColorTypes]] 89 | deps = ["FixedPointNumbers", "Random"] 90 | git-tree-sha1 = "32a2b8af383f11cbb65803883837a149d10dfe8a" 91 | uuid = "3da002f7-5984-5a60-b8a6-cbb66c0b333f" 92 | version = "0.10.12" 93 | 94 | [[Colors]] 95 | deps = ["ColorTypes", "FixedPointNumbers", "InteractiveUtils", "Reexport"] 96 | git-tree-sha1 = "ac5f2213e56ed8a34a3dd2f681f4df1166b34929" 97 | uuid = "5ae59095-9a9b-59fe-a467-6f913c188581" 98 | version = "0.12.6" 99 | 100 | [[Combinatorics]] 101 | git-tree-sha1 = "08c8b6831dc00bfea825826be0bc8336fc369860" 102 | uuid = "861a8166-3701-5b0c-9a16-15d98fcdc6aa" 103 | version = "1.0.2" 104 | 105 | [[CommonSolve]] 106 | git-tree-sha1 = "68a0743f578349ada8bc911a5cbd5a2ef6ed6d1f" 107 | uuid = "38540f10-b2f7-11e9-35d8-d573e4eb0ff2" 108 | version = "0.2.0" 109 | 110 | [[CommonSubexpressions]] 111 | deps = ["MacroTools", "Test"] 112 | git-tree-sha1 = "7b8a93dba8af7e3b42fecabf646260105ac373f7" 113 | uuid = "bbf7d656-a473-5ed7-a52c-81e309532950" 114 | version = "0.3.0" 115 | 116 | [[Compat]] 117 | deps = ["Base64", "Dates", "DelimitedFiles", "Distributed", "InteractiveUtils", "LibGit2", "Libdl", "LinearAlgebra", "Markdown", "Mmap", "Pkg", "Printf", "REPL", "Random", "SHA", "Serialization", "SharedArrays", "Sockets", "SparseArrays", "Statistics", "Test", "UUIDs", "Unicode"] 118 | git-tree-sha1 = "919c7f3151e79ff196add81d7f4e45d91bbf420b" 119 | uuid = "34da2185-b29b-5c13-b0c7-acf172513d20" 120 | version = "3.25.0" 121 | 122 | [[CompilerSupportLibraries_jll]] 123 | deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg"] 124 | git-tree-sha1 = "8e695f735fca77e9708e795eda62afdb869cbb70" 125 | uuid = "e66e0078-7015-5450-92f7-15fbd957f2ae" 126 | version = "0.3.4+0" 127 | 128 | [[Conda]] 129 | deps = ["JSON", "VersionParsing"] 130 | git-tree-sha1 = "6231e40619c15148bcb80aa19d731e629877d762" 131 | uuid = "8f4d0f93-b110-5947-807f-2305c1781a2d" 132 | version = "1.5.1" 133 | 134 | [[ConstructionBase]] 135 | deps = ["LinearAlgebra"] 136 | git-tree-sha1 = "48920211c95a6da1914a06c44ec94be70e84ffff" 137 | uuid = "187b0558-2788-49d3-abe0-74a17ed4e7c9" 138 | version = "1.1.0" 139 | 140 | [[DataAPI]] 141 | git-tree-sha1 = "dfb3b7e89e395be1e25c2ad6d7690dc29cc53b1d" 142 | uuid = "9a962f9c-6df0-11e9-0e5d-c546b8b5ee8a" 143 | version = "1.6.0" 144 | 145 | [[DataStructures]] 146 | deps = ["Compat", "InteractiveUtils", "OrderedCollections"] 147 | git-tree-sha1 = "4437b64df1e0adccc3e5d1adbc3ac741095e4677" 148 | uuid = "864edb3b-99cc-5e75-8d2d-829cb0a9cfe8" 149 | version = "0.18.9" 150 | 151 | [[DataValueInterfaces]] 152 | git-tree-sha1 = "bfc1187b79289637fa0ef6d4436ebdfe6905cbd6" 153 | uuid = "e2d170a0-9d28-54be-80f0-106bbe20a464" 154 | version = "1.0.0" 155 | 156 | [[Dates]] 157 | deps = ["Printf"] 158 | uuid = "ade2ca70-3891-5945-98fb-dc099432e06a" 159 | 160 | [[DelayDiffEq]] 161 | deps = ["DataStructures", "DiffEqBase", "LinearAlgebra", "Logging", "NonlinearSolve", "OrdinaryDiffEq", "Printf", "RecursiveArrayTools", "Reexport", "UnPack"] 162 | git-tree-sha1 = "0c7f367792397f754728a3db960d7ef1eb3d8f09" 163 | uuid = "bcd4f6db-9728-5f36-b5f7-82caef46ccdb" 164 | version = "5.29.1" 165 | 166 | [[DelimitedFiles]] 167 | deps = ["Mmap"] 168 | uuid = "8bb1440f-4735-579b-a4ab-409b98df4dab" 169 | 170 | [[DiffEqBase]] 171 | deps = ["ArrayInterface", "ChainRulesCore", "DataStructures", "DocStringExtensions", "FunctionWrappers", "IterativeSolvers", "LabelledArrays", "LinearAlgebra", "Logging", "MuladdMacro", "NonlinearSolve", "Parameters", "Printf", "RecursiveArrayTools", "RecursiveFactorization", "Reexport", "Requires", "SciMLBase", "SparseArrays", "StaticArrays", "Statistics", "SuiteSparse", "ZygoteRules"] 172 | git-tree-sha1 = "c2ff625248a0967adff1dc1f79c6a41e2531f081" 173 | uuid = "2b5f629d-d688-5b77-993f-72d75c75574e" 174 | version = "6.57.8" 175 | 176 | [[DiffEqCallbacks]] 177 | deps = ["DataStructures", "DiffEqBase", "ForwardDiff", "LinearAlgebra", "NLsolve", "OrdinaryDiffEq", "RecipesBase", "RecursiveArrayTools", "StaticArrays"] 178 | git-tree-sha1 = "d4c4a3f442ab749b6b895c514b0be984c75d6d67" 179 | uuid = "459566f4-90b8-5000-8ac3-15dfb0a30def" 180 | version = "2.16.0" 181 | 182 | [[DiffEqFinancial]] 183 | deps = ["DiffEqBase", "DiffEqNoiseProcess", "LinearAlgebra", "Markdown", "RandomNumbers"] 184 | git-tree-sha1 = "db08e0def560f204167c58fd0637298e13f58f73" 185 | uuid = "5a0ffddc-d203-54b0-88ba-2c03c0fc2e67" 186 | version = "2.4.0" 187 | 188 | [[DiffEqJump]] 189 | deps = ["ArrayInterface", "Compat", "DataStructures", "DiffEqBase", "FunctionWrappers", "LinearAlgebra", "PoissonRandom", "Random", "RandomNumbers", "RecursiveArrayTools", "StaticArrays", "TreeViews", "UnPack"] 190 | git-tree-sha1 = "3ec8d5eeb792897b28ef79a851d834ce1415498f" 191 | uuid = "c894b116-72e5-5b58-be3c-e6d8d4ac2b12" 192 | version = "6.13.0" 193 | 194 | [[DiffEqNoiseProcess]] 195 | deps = ["DiffEqBase", "Distributions", "LinearAlgebra", "Optim", "PoissonRandom", "QuadGK", "Random", "Random123", "RandomNumbers", "RecipesBase", "RecursiveArrayTools", "Requires", "ResettableStacks", "StaticArrays", "Statistics"] 196 | git-tree-sha1 = "f300e85c99c79bdd0434df097d2803ae36e6ccca" 197 | uuid = "77a26b50-5914-5dd7-bc55-306e6241c503" 198 | version = "5.6.0" 199 | 200 | [[DiffEqPhysics]] 201 | deps = ["DiffEqBase", "DiffEqCallbacks", "ForwardDiff", "LinearAlgebra", "Printf", "Random", "RecipesBase", "RecursiveArrayTools", "Reexport", "StaticArrays"] 202 | git-tree-sha1 = "8f23c6f36f6a6eb2cbd6950e28ec7c4b99d0e4c9" 203 | uuid = "055956cb-9e8b-5191-98cc-73ae4a59e68a" 204 | version = "3.9.0" 205 | 206 | [[DiffResults]] 207 | deps = ["StaticArrays"] 208 | git-tree-sha1 = "c18e98cba888c6c25d1c3b048e4b3380ca956805" 209 | uuid = "163ba53b-c6d8-5494-b064-1a9d43ac40c5" 210 | version = "1.0.3" 211 | 212 | [[DiffRules]] 213 | deps = ["NaNMath", "Random", "SpecialFunctions"] 214 | git-tree-sha1 = "214c3fcac57755cfda163d91c58893a8723f93e9" 215 | uuid = "b552c78f-8df3-52c6-915a-8e097449b14b" 216 | version = "1.0.2" 217 | 218 | [[DifferentialEquations]] 219 | deps = ["BoundaryValueDiffEq", "DelayDiffEq", "DiffEqBase", "DiffEqCallbacks", "DiffEqFinancial", "DiffEqJump", "DiffEqNoiseProcess", "DiffEqPhysics", "DimensionalPlotRecipes", "LinearAlgebra", "MultiScaleArrays", "OrdinaryDiffEq", "ParameterizedFunctions", "Random", "RecursiveArrayTools", "Reexport", "SteadyStateDiffEq", "StochasticDiffEq", "Sundials"] 220 | git-tree-sha1 = "221b9a427fc8970be5b65171c25f0a6ba0e1f394" 221 | uuid = "0c46a032-eb83-5123-abaf-570d42b7fbaa" 222 | version = "6.16.0" 223 | 224 | [[DimensionalPlotRecipes]] 225 | deps = ["LinearAlgebra", "RecipesBase"] 226 | git-tree-sha1 = "af883a26bbe6e3f5f778cb4e1b81578b534c32a6" 227 | uuid = "c619ae07-58cd-5f6d-b883-8f17bd6a98f9" 228 | version = "1.2.0" 229 | 230 | [[Distances]] 231 | deps = ["LinearAlgebra", "Statistics"] 232 | git-tree-sha1 = "366715149014943abd71aa647a07a43314158b2d" 233 | uuid = "b4f34e82-e78d-54a5-968a-f98e89d6e8f7" 234 | version = "0.10.2" 235 | 236 | [[Distributed]] 237 | deps = ["Random", "Serialization", "Sockets"] 238 | uuid = "8ba89e20-285c-5b6f-9357-94700520ee1b" 239 | 240 | [[Distributions]] 241 | deps = ["FillArrays", "LinearAlgebra", "PDMats", "Printf", "QuadGK", "Random", "SparseArrays", "SpecialFunctions", "Statistics", "StatsBase", "StatsFuns"] 242 | git-tree-sha1 = "5c6a645ea65af6aac4f96b5dedaae660b7b56267" 243 | uuid = "31c24e10-a181-5473-b8eb-7969acd0382f" 244 | version = "0.24.10" 245 | 246 | [[DocStringExtensions]] 247 | deps = ["LibGit2", "Markdown", "Pkg", "Test"] 248 | git-tree-sha1 = "50ddf44c53698f5e784bbebb3f4b21c5807401b1" 249 | uuid = "ffbed154-4ef7-542d-bbb7-c09d3a79fcae" 250 | version = "0.8.3" 251 | 252 | [[ExponentialUtilities]] 253 | deps = ["LinearAlgebra", "Printf", "Requires", "SparseArrays"] 254 | git-tree-sha1 = "712cb5af8db62836913970ee035a5fa742986f00" 255 | uuid = "d4d017d3-3776-5f7e-afef-a10c40355c18" 256 | version = "1.8.1" 257 | 258 | [[ExprTools]] 259 | git-tree-sha1 = "10407a39b87f29d47ebaca8edbc75d7c302ff93e" 260 | uuid = "e2ba6199-217a-4e67-a87a-7c52f15ade04" 261 | version = "0.1.3" 262 | 263 | [[FFTW]] 264 | deps = ["AbstractFFTs", "BinaryProvider", "Conda", "Libdl", "LinearAlgebra", "Reexport"] 265 | git-tree-sha1 = "e0823a0ea2990b28a8398e958327333e8af53b27" 266 | uuid = "7a1cc6ca-52ef-59f5-83cd-3a7055c09341" 267 | version = "1.1.1" 268 | 269 | [[FastClosures]] 270 | git-tree-sha1 = "acebe244d53ee1b461970f8910c235b259e772ef" 271 | uuid = "9aa1b823-49e4-5ca5-8b0f-3971ec8bab6a" 272 | version = "0.3.2" 273 | 274 | [[FillArrays]] 275 | deps = ["LinearAlgebra", "Random", "SparseArrays"] 276 | git-tree-sha1 = "ff537e5a3cba92fb48f30fec46723510450f2c0e" 277 | uuid = "1a297f60-69ca-5386-bcde-b61e274b549b" 278 | version = "0.10.2" 279 | 280 | [[FiniteDiff]] 281 | deps = ["ArrayInterface", "LinearAlgebra", "Requires", "SparseArrays", "StaticArrays"] 282 | git-tree-sha1 = "f6f80c8f934efd49a286bb5315360be66956dfc4" 283 | uuid = "6a86dc24-6348-571c-b903-95158fe2bd41" 284 | version = "2.8.0" 285 | 286 | [[FixedPointNumbers]] 287 | deps = ["Statistics"] 288 | git-tree-sha1 = "335bfdceacc84c5cdf16aadc768aa5ddfc5383cc" 289 | uuid = "53c48c17-4a7d-5ca2-90c5-79b7896eea93" 290 | version = "0.8.4" 291 | 292 | [[Formatting]] 293 | deps = ["Printf"] 294 | git-tree-sha1 = "8339d61043228fdd3eb658d86c926cb282ae72a8" 295 | uuid = "59287772-0a20-5a39-b81b-1366585eb4c0" 296 | version = "0.4.2" 297 | 298 | [[ForwardDiff]] 299 | deps = ["CommonSubexpressions", "DiffResults", "DiffRules", "NaNMath", "Random", "SpecialFunctions", "StaticArrays"] 300 | git-tree-sha1 = "c68fb7481b71519d313114dca639b35262ff105f" 301 | uuid = "f6369f11-7733-5829-9624-2563aa707210" 302 | version = "0.10.17" 303 | 304 | [[FunctionWrappers]] 305 | git-tree-sha1 = "241552bc2209f0fa068b6415b1942cc0aa486bcc" 306 | uuid = "069b7b12-0de2-55c6-9aab-29f3d0a68a2e" 307 | version = "1.1.2" 308 | 309 | [[Future]] 310 | deps = ["Random"] 311 | uuid = "9fa8497b-333b-5362-9e8d-4d0656e87820" 312 | 313 | [[Hwloc]] 314 | deps = ["Hwloc_jll"] 315 | git-tree-sha1 = "ffdcd4272a7cc36442007bca41aa07ca3cc5fda4" 316 | uuid = "0e44f5e4-bd66-52a0-8798-143a42290a1d" 317 | version = "1.3.0" 318 | 319 | [[Hwloc_jll]] 320 | deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg"] 321 | git-tree-sha1 = "aac91e34ef4c166e0857e3d6052a3467e5732ceb" 322 | uuid = "e33a78d0-f292-5ffc-b300-72abe9b543c8" 323 | version = "2.4.1+0" 324 | 325 | [[IfElse]] 326 | git-tree-sha1 = "28e837ff3e7a6c3cdb252ce49fb412c8eb3caeef" 327 | uuid = "615f187c-cbe4-4ef1-ba3b-2fcf58d6d173" 328 | version = "0.1.0" 329 | 330 | [[Inflate]] 331 | git-tree-sha1 = "f5fc07d4e706b84f72d54eedcc1c13d92fb0871c" 332 | uuid = "d25df0c9-e2be-5dd7-82c8-3ad0b3e990b9" 333 | version = "0.1.2" 334 | 335 | [[InteractiveUtils]] 336 | deps = ["Markdown"] 337 | uuid = "b77e0a4c-d291-57a0-90e8-8db25a27a240" 338 | 339 | [[IterativeSolvers]] 340 | deps = ["LinearAlgebra", "Printf", "Random", "RecipesBase", "SparseArrays"] 341 | git-tree-sha1 = "6f5ef3206d9dc6510a8b8e2334b96454a2ade590" 342 | uuid = "42fd0dbc-a981-5370-80f2-aaf504508153" 343 | version = "0.9.0" 344 | 345 | [[IteratorInterfaceExtensions]] 346 | git-tree-sha1 = "a3f24677c21f5bbe9d2a714f95dcd58337fb2856" 347 | uuid = "82899510-4779-5014-852e-03e436cf321d" 348 | version = "1.0.0" 349 | 350 | [[JLLWrappers]] 351 | git-tree-sha1 = "a431f5f2ca3f4feef3bd7a5e94b8b8d4f2f647a0" 352 | uuid = "692b3bcd-3c85-4b1f-b108-f13ce0eb3210" 353 | version = "1.2.0" 354 | 355 | [[JSON]] 356 | deps = ["Dates", "Mmap", "Parsers", "Unicode"] 357 | git-tree-sha1 = "81690084b6198a2e1da36fcfda16eeca9f9f24e4" 358 | uuid = "682c06a0-de6a-54ab-a142-c8b1cf79cde6" 359 | version = "0.21.1" 360 | 361 | [[LaTeXStrings]] 362 | git-tree-sha1 = "c7f1c695e06c01b95a67f0cd1d34994f3e7db104" 363 | uuid = "b964fa9f-0449-5b57-a5c2-d3ea65f4040f" 364 | version = "1.2.1" 365 | 366 | [[LabelledArrays]] 367 | deps = ["ArrayInterface", "LinearAlgebra", "MacroTools", "StaticArrays"] 368 | git-tree-sha1 = "5e288800819c323de5897fa6d5a002bdad54baf7" 369 | uuid = "2ee39098-c373-598a-b85f-a56591580800" 370 | version = "1.5.0" 371 | 372 | [[Latexify]] 373 | deps = ["Formatting", "InteractiveUtils", "LaTeXStrings", "MacroTools", "Markdown", "Printf", "Requires"] 374 | git-tree-sha1 = "fbc08b5a78e264ba3d19da90b36ce1789ca67a40" 375 | uuid = "23fbe1c1-3f47-55db-b15f-69d7ec21a316" 376 | version = "0.14.11" 377 | 378 | [[LibGit2]] 379 | deps = ["Printf"] 380 | uuid = "76f85450-5226-5b5a-8eaa-529ad045b433" 381 | 382 | [[Libdl]] 383 | uuid = "8f399da3-3557-5675-b5ff-fb832c97cbdb" 384 | 385 | [[LightGraphs]] 386 | deps = ["ArnoldiMethod", "DataStructures", "Distributed", "Inflate", "LinearAlgebra", "Random", "SharedArrays", "SimpleTraits", "SparseArrays", "Statistics"] 387 | git-tree-sha1 = "432428df5f360964040ed60418dd5601ecd240b6" 388 | uuid = "093fc24a-ae57-5d10-9952-331d41423f4d" 389 | version = "1.3.5" 390 | 391 | [[LineSearches]] 392 | deps = ["LinearAlgebra", "NLSolversBase", "NaNMath", "Parameters", "Printf"] 393 | git-tree-sha1 = "f27132e551e959b3667d8c93eae90973225032dd" 394 | uuid = "d3d80556-e9d4-5f37-9878-2ab0fcc64255" 395 | version = "7.1.1" 396 | 397 | [[LinearAlgebra]] 398 | deps = ["Libdl"] 399 | uuid = "37e2e46d-f89d-539d-b4ee-838fcccc9c8e" 400 | 401 | [[Logging]] 402 | uuid = "56ddb016-857b-54e1-b83d-db4d58db5568" 403 | 404 | [[LoopVectorization]] 405 | deps = ["ArrayInterface", "DocStringExtensions", "IfElse", "LinearAlgebra", "OffsetArrays", "Requires", "SLEEFPirates", "ThreadingUtilities", "UnPack", "VectorizationBase"] 406 | git-tree-sha1 = "5684e4aafadaf668dce27f12d67df4888fa58181" 407 | uuid = "bdcacae8-1622-11e9-2a5c-532679323890" 408 | version = "0.11.2" 409 | 410 | [[METIS_jll]] 411 | deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg"] 412 | git-tree-sha1 = "2dc1a9fc87e57e32b1fc186db78811157b30c118" 413 | uuid = "d00139f3-1899-568f-a2f0-47f597d42d70" 414 | version = "5.1.0+5" 415 | 416 | [[MacroTools]] 417 | deps = ["Markdown", "Random"] 418 | git-tree-sha1 = "6a8a2a625ab0dea913aba95c11370589e0239ff0" 419 | uuid = "1914dd2f-81c6-5fcd-8719-6d5c9610ff09" 420 | version = "0.5.6" 421 | 422 | [[Markdown]] 423 | deps = ["Base64"] 424 | uuid = "d6f4376e-aef5-505a-96c1-9c027394607a" 425 | 426 | [[Missings]] 427 | deps = ["DataAPI"] 428 | git-tree-sha1 = "f8c673ccc215eb50fcadb285f522420e29e69e1c" 429 | uuid = "e1d29d7a-bbdc-5cf2-9ac0-f12de2c33e28" 430 | version = "0.4.5" 431 | 432 | [[Mmap]] 433 | uuid = "a63ad114-7e13-5084-954f-fe012c677804" 434 | 435 | [[ModelingToolkit]] 436 | deps = ["ArrayInterface", "ConstructionBase", "DataStructures", "DiffEqBase", "DiffEqJump", "DiffRules", "Distributed", "Distributions", "DocStringExtensions", "IfElse", "LabelledArrays", "Latexify", "Libdl", "LightGraphs", "LinearAlgebra", "MacroTools", "NaNMath", "NonlinearSolve", "RecursiveArrayTools", "Reexport", "Requires", "RuntimeGeneratedFunctions", "SafeTestsets", "SciMLBase", "Serialization", "Setfield", "SparseArrays", "SpecialFunctions", "StaticArrays", "SymbolicUtils", "Symbolics", "TreeViews", "UnPack", "Unitful"] 437 | git-tree-sha1 = "5354faf8d8799a5fdc5a72fba4817b64ade8b8c0" 438 | uuid = "961ee093-0014-501f-94e3-6117800e7a78" 439 | version = "5.13.6" 440 | 441 | [[MuladdMacro]] 442 | git-tree-sha1 = "c6190f9a7fc5d9d5915ab29f2134421b12d24a68" 443 | uuid = "46d2c3a1-f734-5fdb-9937-b9b9aeba4221" 444 | version = "0.2.2" 445 | 446 | [[MultiScaleArrays]] 447 | deps = ["DiffEqBase", "FiniteDiff", "ForwardDiff", "LinearAlgebra", "OrdinaryDiffEq", "Random", "RecursiveArrayTools", "SparseDiffTools", "Statistics", "StochasticDiffEq", "TreeViews"] 448 | git-tree-sha1 = "258f3be6770fe77be8870727ba9803e236c685b8" 449 | uuid = "f9640e96-87f6-5992-9c3b-0743c6a49ffa" 450 | version = "1.8.1" 451 | 452 | [[NLSolversBase]] 453 | deps = ["DiffResults", "Distributed", "FiniteDiff", "ForwardDiff"] 454 | git-tree-sha1 = "39d6bc45e99c96e6995cbddac02877f9b61a1dd1" 455 | uuid = "d41bc354-129a-5804-8e4c-c37616107c6c" 456 | version = "7.7.1" 457 | 458 | [[NLsolve]] 459 | deps = ["Distances", "LineSearches", "LinearAlgebra", "NLSolversBase", "Printf", "Reexport"] 460 | git-tree-sha1 = "019f12e9a1a7880459d0173c182e6a99365d7ac1" 461 | uuid = "2774e3e8-f4cf-5e23-947b-6d7e65073b56" 462 | version = "4.5.1" 463 | 464 | [[NaNMath]] 465 | git-tree-sha1 = "bfe47e760d60b82b66b61d2d44128b62e3a369fb" 466 | uuid = "77ba4419-2d1f-58cd-9bb1-8ffee604a2e3" 467 | version = "0.3.5" 468 | 469 | [[NonlinearSolve]] 470 | deps = ["ArrayInterface", "FiniteDiff", "ForwardDiff", "IterativeSolvers", "LinearAlgebra", "RecursiveArrayTools", "RecursiveFactorization", "Reexport", "SciMLBase", "Setfield", "StaticArrays", "UnPack"] 471 | git-tree-sha1 = "ef18e47df4f3917af35be5e5d7f5d97e8a83b0ec" 472 | uuid = "8913a72c-1f9b-4ce2-8d82-65094dcecaec" 473 | version = "0.3.8" 474 | 475 | [[OffsetArrays]] 476 | deps = ["Adapt"] 477 | git-tree-sha1 = "b3dfef5f2be7d7eb0e782ba9146a5271ee426e90" 478 | uuid = "6fe1bfb0-de20-5000-8ca7-80f57d26f881" 479 | version = "1.6.2" 480 | 481 | [[OpenBLAS_jll]] 482 | deps = ["CompilerSupportLibraries_jll", "Libdl", "Pkg"] 483 | git-tree-sha1 = "0c922fd9634e358622e333fc58de61f05a048492" 484 | uuid = "4536629a-c528-5b80-bd46-f80d51c5b363" 485 | version = "0.3.9+5" 486 | 487 | [[OpenSpecFun_jll]] 488 | deps = ["Artifacts", "CompilerSupportLibraries_jll", "JLLWrappers", "Libdl", "Pkg"] 489 | git-tree-sha1 = "9db77584158d0ab52307f8c04f8e7c08ca76b5b3" 490 | uuid = "efe28fd5-8261-553b-a9e1-b2916fc3738e" 491 | version = "0.5.3+4" 492 | 493 | [[Optim]] 494 | deps = ["Compat", "FillArrays", "LineSearches", "LinearAlgebra", "NLSolversBase", "NaNMath", "Parameters", "PositiveFactorizations", "Printf", "SparseArrays", "StatsBase"] 495 | git-tree-sha1 = "3286df38aba45acf7445f3acd87b7b57b7c7feb7" 496 | uuid = "429524aa-4258-5aef-a3af-852621145aeb" 497 | version = "1.2.4" 498 | 499 | [[OrderedCollections]] 500 | git-tree-sha1 = "4fa2ba51070ec13fcc7517db714445b4ab986bdf" 501 | uuid = "bac558e1-5e72-5ebc-8fee-abe8a469f55d" 502 | version = "1.4.0" 503 | 504 | [[OrdinaryDiffEq]] 505 | deps = ["Adapt", "ArrayInterface", "DataStructures", "DiffEqBase", "ExponentialUtilities", "FastClosures", "FiniteDiff", "ForwardDiff", "LinearAlgebra", "Logging", "MacroTools", "MuladdMacro", "NLsolve", "RecursiveArrayTools", "Reexport", "SparseArrays", "SparseDiffTools", "StaticArrays", "UnPack"] 506 | git-tree-sha1 = "d22a75b8ae5b77543c4e1f8eae1ff01ce1f64453" 507 | uuid = "1dea7af3-3e70-54e6-95c3-0bf5283fa5ed" 508 | version = "5.52.2" 509 | 510 | [[PDMats]] 511 | deps = ["LinearAlgebra", "SparseArrays", "SuiteSparse", "Test"] 512 | git-tree-sha1 = "95a4038d1011dfdbde7cecd2ad0ac411e53ab1bc" 513 | uuid = "90014a1f-27ba-587c-ab20-58faa44d9150" 514 | version = "0.10.1" 515 | 516 | [[ParameterizedFunctions]] 517 | deps = ["DataStructures", "DiffEqBase", "Latexify", "LinearAlgebra", "ModelingToolkit", "Reexport", "SciMLBase"] 518 | git-tree-sha1 = "3610913402be3856074668741326d82d02cbba5a" 519 | uuid = "65888b18-ceab-5e60-b2b9-181511a3b968" 520 | version = "5.9.0" 521 | 522 | [[Parameters]] 523 | deps = ["OrderedCollections", "UnPack"] 524 | git-tree-sha1 = "2276ac65f1e236e0a6ea70baff3f62ad4c625345" 525 | uuid = "d96e819e-fc66-5662-9728-84c9c7592b0a" 526 | version = "0.12.2" 527 | 528 | [[Parsers]] 529 | deps = ["Dates"] 530 | git-tree-sha1 = "223a825cccef2228f3fdbf2ecc7ca93363059073" 531 | uuid = "69de0a69-1ddd-5017-9359-2bf0b02dc9f0" 532 | version = "1.0.16" 533 | 534 | [[Pkg]] 535 | deps = ["Dates", "LibGit2", "Libdl", "Logging", "Markdown", "Printf", "REPL", "Random", "SHA", "UUIDs"] 536 | uuid = "44cfe95a-1eb2-52ea-b672-e2afdf69b78f" 537 | 538 | [[PoissonRandom]] 539 | deps = ["Random", "Statistics", "Test"] 540 | git-tree-sha1 = "44d018211a56626288b5d3f8c6497d28c26dc850" 541 | uuid = "e409e4f3-bfea-5376-8464-e040bb5c01ab" 542 | version = "0.4.0" 543 | 544 | [[PositiveFactorizations]] 545 | deps = ["LinearAlgebra"] 546 | git-tree-sha1 = "17275485f373e6673f7e7f97051f703ed5b15b20" 547 | uuid = "85a6dd25-e78a-55b7-8502-1745935b8125" 548 | version = "0.2.4" 549 | 550 | [[Printf]] 551 | deps = ["Unicode"] 552 | uuid = "de0858da-6303-5e67-8744-51eddeeeb8d7" 553 | 554 | [[PyCall]] 555 | deps = ["Conda", "Dates", "Libdl", "LinearAlgebra", "MacroTools", "Serialization", "VersionParsing"] 556 | git-tree-sha1 = "dd1a970b543bd02efce2984582e996af28cab27f" 557 | uuid = "438e738f-606a-5dbb-bf0a-cddfbfd45ab0" 558 | version = "1.92.2" 559 | 560 | [[PyPlot]] 561 | deps = ["Colors", "LaTeXStrings", "PyCall", "Sockets", "Test", "VersionParsing"] 562 | git-tree-sha1 = "67dde2482fe1a72ef62ed93f8c239f947638e5a2" 563 | uuid = "d330b81b-6aea-500a-939a-2ce795aea3ee" 564 | version = "2.9.0" 565 | 566 | [[QuadGK]] 567 | deps = ["DataStructures", "LinearAlgebra"] 568 | git-tree-sha1 = "12fbe86da16df6679be7521dfb39fbc861e1dc7b" 569 | uuid = "1fd47b50-473d-5c70-9696-f719f8f3bcdc" 570 | version = "2.4.1" 571 | 572 | [[REPL]] 573 | deps = ["InteractiveUtils", "Markdown", "Sockets"] 574 | uuid = "3fa0cd96-eef1-5676-8a61-b3b8758bbffb" 575 | 576 | [[Random]] 577 | deps = ["Serialization"] 578 | uuid = "9a3f8284-a2c9-5f02-9a11-845980a1fd5c" 579 | 580 | [[Random123]] 581 | deps = ["Libdl", "Random", "RandomNumbers"] 582 | git-tree-sha1 = "7c6710c8198fd4444b5eb6a3840b7d47bd3593c5" 583 | uuid = "74087812-796a-5b5d-8853-05524746bad3" 584 | version = "1.3.1" 585 | 586 | [[RandomExtensions]] 587 | deps = ["Random", "SparseArrays"] 588 | git-tree-sha1 = "062986376ce6d394b23d5d90f01d81426113a3c9" 589 | uuid = "fb686558-2515-59ef-acaa-46db3789a887" 590 | version = "0.4.3" 591 | 592 | [[RandomNumbers]] 593 | deps = ["Random", "Requires"] 594 | git-tree-sha1 = "441e6fc35597524ada7f85e13df1f4e10137d16f" 595 | uuid = "e6cf234a-135c-5ec9-84dd-332b85af5143" 596 | version = "1.4.0" 597 | 598 | [[RecipesBase]] 599 | git-tree-sha1 = "b3fb709f3c97bfc6e948be68beeecb55a0b340ae" 600 | uuid = "3cdcf5f2-1ef4-517c-9805-6587b60abb01" 601 | version = "1.1.1" 602 | 603 | [[RecursiveArrayTools]] 604 | deps = ["ArrayInterface", "LinearAlgebra", "RecipesBase", "Requires", "StaticArrays", "Statistics", "ZygoteRules"] 605 | git-tree-sha1 = "271a36e18c8806332b7bd0f57e50fcff0d428b11" 606 | uuid = "731186ca-8d62-57ce-b412-fbd966d074cd" 607 | version = "2.11.0" 608 | 609 | [[RecursiveFactorization]] 610 | deps = ["LinearAlgebra", "LoopVectorization"] 611 | git-tree-sha1 = "20f0ad1b2760da770d31be71f777740d25807631" 612 | uuid = "f2c3362d-daeb-58d1-803e-2bc74f2840b4" 613 | version = "0.1.11" 614 | 615 | [[Reexport]] 616 | git-tree-sha1 = "57d8440b0c7d98fc4f889e478e80f268d534c9d5" 617 | uuid = "189a3867-3050-52da-a836-e630ba90ab69" 618 | version = "1.0.0" 619 | 620 | [[Requires]] 621 | deps = ["UUIDs"] 622 | git-tree-sha1 = "4036a3bd08ac7e968e27c203d45f5fff15020621" 623 | uuid = "ae029012-a4dd-5104-9daa-d747884805df" 624 | version = "1.1.3" 625 | 626 | [[ResettableStacks]] 627 | deps = ["StaticArrays"] 628 | git-tree-sha1 = "622b3e491fb0a85fbfeed6f17dc320a9f46d8929" 629 | uuid = "ae5879a3-cd67-5da8-be7f-38c6eb64a37b" 630 | version = "1.1.0" 631 | 632 | [[Rmath]] 633 | deps = ["Random", "Rmath_jll"] 634 | git-tree-sha1 = "86c5647b565873641538d8f812c04e4c9dbeb370" 635 | uuid = "79098fc4-a85e-5d69-aa6a-4863f24498fa" 636 | version = "0.6.1" 637 | 638 | [[Rmath_jll]] 639 | deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg"] 640 | git-tree-sha1 = "1b7bf41258f6c5c9c31df8c1ba34c1fc88674957" 641 | uuid = "f50d1b31-88e8-58de-be2c-1cc44531875f" 642 | version = "0.2.2+2" 643 | 644 | [[Roots]] 645 | deps = ["Printf"] 646 | git-tree-sha1 = "8f743e4f4368d1d753f3806bf635899dad6b4847" 647 | uuid = "f2b01f46-fcfa-551c-844a-d8ac1e96c665" 648 | version = "1.0.7" 649 | 650 | [[RuntimeGeneratedFunctions]] 651 | deps = ["ExprTools", "SHA", "Serialization"] 652 | git-tree-sha1 = "e02f14dfe3a8d3b8fc92ca80c1882bfdbc015e07" 653 | uuid = "7e49a35a-f44a-4d26-94aa-eba1b4ca6b47" 654 | version = "0.5.1" 655 | 656 | [[SHA]] 657 | uuid = "ea8e919c-243c-51af-8825-aaa63cd721ce" 658 | 659 | [[SLEEFPirates]] 660 | deps = ["IfElse", "Libdl", "VectorizationBase"] 661 | git-tree-sha1 = "ab6194c92dcf38036cd9513e4ab12cd76a613da1" 662 | uuid = "476501e8-09a2-5ece-8869-fb82de89a1fa" 663 | version = "0.6.10" 664 | 665 | [[SafeTestsets]] 666 | deps = ["Test"] 667 | git-tree-sha1 = "36ebc5622c82eb9324005cc75e7e2cc51181d181" 668 | uuid = "1bc83da4-3b8d-516f-aca4-4fe02f6d838f" 669 | version = "0.0.1" 670 | 671 | [[SciMLBase]] 672 | deps = ["ArrayInterface", "CommonSolve", "Distributed", "DocStringExtensions", "IteratorInterfaceExtensions", "LinearAlgebra", "Logging", "RecipesBase", "RecursiveArrayTools", "StaticArrays", "Statistics", "Tables", "TreeViews"] 673 | git-tree-sha1 = "b85634c97d8b2df8f85d3ce7f7b78d794efea704" 674 | uuid = "0bca4576-84f4-4d90-8ffe-ffa030f20462" 675 | version = "1.8.4" 676 | 677 | [[Serialization]] 678 | uuid = "9e88b42a-f829-5b0c-bbe9-9e923198166b" 679 | 680 | [[Setfield]] 681 | deps = ["ConstructionBase", "Future", "MacroTools", "Requires"] 682 | git-tree-sha1 = "d5640fc570fb1b6c54512f0bd3853866bd298b3e" 683 | uuid = "efcf1570-3423-57d1-acb7-fd33fddbac46" 684 | version = "0.7.0" 685 | 686 | [[SharedArrays]] 687 | deps = ["Distributed", "Mmap", "Random", "Serialization"] 688 | uuid = "1a1011a3-84de-559e-8e89-a11a2f7dc383" 689 | 690 | [[SimpleTraits]] 691 | deps = ["InteractiveUtils", "MacroTools"] 692 | git-tree-sha1 = "daf7aec3fe3acb2131388f93a4c409b8c7f62226" 693 | uuid = "699a6c99-e7fa-54fc-8d76-47d257e15c1d" 694 | version = "0.9.3" 695 | 696 | [[Sockets]] 697 | uuid = "6462fe0b-24de-5631-8697-dd941f90decc" 698 | 699 | [[SortingAlgorithms]] 700 | deps = ["DataStructures", "Random", "Test"] 701 | git-tree-sha1 = "03f5898c9959f8115e30bc7226ada7d0df554ddd" 702 | uuid = "a2af1166-a08f-5f64-846c-94a0d3cef48c" 703 | version = "0.3.1" 704 | 705 | [[SparseArrays]] 706 | deps = ["LinearAlgebra", "Random"] 707 | uuid = "2f01184e-e22b-5df5-ae63-d93ebab69eaf" 708 | 709 | [[SparseDiffTools]] 710 | deps = ["Adapt", "ArrayInterface", "Compat", "DataStructures", "FiniteDiff", "ForwardDiff", "LightGraphs", "LinearAlgebra", "Requires", "SparseArrays", "VertexSafeGraphs"] 711 | git-tree-sha1 = "d05bc362e3fa1b0e2361594a706fc63ffbd140f3" 712 | uuid = "47a9eef4-7e08-11e9-0b38-333d64bd3804" 713 | version = "1.13.0" 714 | 715 | [[SpecialFunctions]] 716 | deps = ["ChainRulesCore", "OpenSpecFun_jll"] 717 | git-tree-sha1 = "5919936c0e92cff40e57d0ddf0ceb667d42e5902" 718 | uuid = "276daf66-3868-5448-9aa4-cd146d93841b" 719 | version = "1.3.0" 720 | 721 | [[Static]] 722 | deps = ["IfElse"] 723 | git-tree-sha1 = "ddec5466a1d2d7e58adf9a427ba69763661aacf6" 724 | uuid = "aedffcd0-7271-4cad-89d0-dc628f76c6d3" 725 | version = "0.2.4" 726 | 727 | [[StaticArrays]] 728 | deps = ["LinearAlgebra", "Random", "Statistics"] 729 | git-tree-sha1 = "9da72ed50e94dbff92036da395275ed114e04d49" 730 | uuid = "90137ffa-7385-5640-81b9-e52037218182" 731 | version = "1.0.1" 732 | 733 | [[Statistics]] 734 | deps = ["LinearAlgebra", "SparseArrays"] 735 | uuid = "10745b16-79ce-11e8-11f9-7d13ad32a3b2" 736 | 737 | [[StatsBase]] 738 | deps = ["DataAPI", "DataStructures", "LinearAlgebra", "Missings", "Printf", "Random", "SortingAlgorithms", "SparseArrays", "Statistics"] 739 | git-tree-sha1 = "a83fa3021ac4c5a918582ec4721bc0cf70b495a9" 740 | uuid = "2913bbd2-ae8a-5f71-8c99-4fb6c76f3a91" 741 | version = "0.33.4" 742 | 743 | [[StatsFuns]] 744 | deps = ["Rmath", "SpecialFunctions"] 745 | git-tree-sha1 = "3b9f665c70712af3264b61c27a7e1d62055dafd1" 746 | uuid = "4c63d2b9-4356-54db-8cca-17b64c39e42c" 747 | version = "0.9.6" 748 | 749 | [[SteadyStateDiffEq]] 750 | deps = ["DiffEqBase", "DiffEqCallbacks", "LinearAlgebra", "NLsolve", "Reexport", "SciMLBase"] 751 | git-tree-sha1 = "9b908c7a5933b8ec7c2da44a477f74127baa2ce9" 752 | uuid = "9672c7b4-1e72-59bd-8a11-6ac3964bc41f" 753 | version = "1.6.1" 754 | 755 | [[StochasticDiffEq]] 756 | deps = ["ArrayInterface", "DataStructures", "DiffEqBase", "DiffEqJump", "DiffEqNoiseProcess", "FillArrays", "FiniteDiff", "ForwardDiff", "LinearAlgebra", "Logging", "MuladdMacro", "NLsolve", "OrdinaryDiffEq", "Random", "RandomNumbers", "RecursiveArrayTools", "Reexport", "SparseArrays", "SparseDiffTools", "StaticArrays", "UnPack"] 757 | git-tree-sha1 = "3109ce733c907b941eea4345b0644308e2c6da2d" 758 | uuid = "789caeaf-c7a9-5a7d-9973-96adeb23e2a0" 759 | version = "6.33.1" 760 | 761 | [[SuiteSparse]] 762 | deps = ["Libdl", "LinearAlgebra", "Serialization", "SparseArrays"] 763 | uuid = "4607b0f0-06f3-5cda-b6b1-a6196a1729e9" 764 | 765 | [[SuiteSparse_jll]] 766 | deps = ["Libdl", "METIS_jll", "OpenBLAS_jll", "Pkg"] 767 | git-tree-sha1 = "4a2295b63d67e6f13a0b539c935ccbf218fa1143" 768 | uuid = "bea87d4a-7f5b-5778-9afe-8cc45184846c" 769 | version = "5.4.0+9" 770 | 771 | [[Sundials]] 772 | deps = ["CEnum", "DataStructures", "DiffEqBase", "Libdl", "LinearAlgebra", "Logging", "Reexport", "SparseArrays", "Sundials_jll"] 773 | git-tree-sha1 = "28db9e1a8fdd1b8e95cee064a6c2066897cf39c5" 774 | uuid = "c3572dad-4567-51f8-b174-8c6c989267f4" 775 | version = "4.4.1" 776 | 777 | [[Sundials_jll]] 778 | deps = ["CompilerSupportLibraries_jll", "Libdl", "OpenBLAS_jll", "Pkg", "SuiteSparse_jll"] 779 | git-tree-sha1 = "013ff4504fc1d475aa80c63b455b6b3a58767db2" 780 | uuid = "fb77eaff-e24c-56d4-86b1-d163f2edb164" 781 | version = "5.2.0+1" 782 | 783 | [[SymbolicUtils]] 784 | deps = ["AbstractAlgebra", "AbstractTrees", "Combinatorics", "ConstructionBase", "DataStructures", "IfElse", "LabelledArrays", "NaNMath", "Setfield", "SparseArrays", "SpecialFunctions", "StaticArrays", "TimerOutputs"] 785 | git-tree-sha1 = "7176e06fb4ad726e828c72642000a2076808ea58" 786 | uuid = "d1185830-fcd6-423d-90d6-eec64667417b" 787 | version = "0.9.1" 788 | 789 | [[Symbolics]] 790 | deps = ["AbstractAlgebra", "DiffRules", "Distributions", "DocStringExtensions", "IfElse", "Latexify", "Libdl", "LinearAlgebra", "MacroTools", "NaNMath", "RecipesBase", "Reexport", "RuntimeGeneratedFunctions", "SciMLBase", "Setfield", "SparseArrays", "SpecialFunctions", "SymbolicUtils", "TreeViews"] 791 | git-tree-sha1 = "0a8f3d870e735e9b68077316c6cd732266cb10aa" 792 | uuid = "0c5d862f-8b57-4792-8d23-62f2024744c7" 793 | version = "0.1.9" 794 | 795 | [[TableTraits]] 796 | deps = ["IteratorInterfaceExtensions"] 797 | git-tree-sha1 = "b1ad568ba658d8cbb3b892ed5380a6f3e781a81e" 798 | uuid = "3783bdb8-4a98-5b6b-af9a-565f29a5fe9c" 799 | version = "1.0.0" 800 | 801 | [[Tables]] 802 | deps = ["DataAPI", "DataValueInterfaces", "IteratorInterfaceExtensions", "LinearAlgebra", "TableTraits", "Test"] 803 | git-tree-sha1 = "f03fc113290ee7726b173fc7ea661260d204b3f2" 804 | uuid = "bd369af6-aec1-5ad0-b16a-f7cc5008161c" 805 | version = "1.4.0" 806 | 807 | [[Test]] 808 | deps = ["Distributed", "InteractiveUtils", "Logging", "Random"] 809 | uuid = "8dfed614-e22c-5e08-85e1-65c5234f0b40" 810 | 811 | [[ThreadingUtilities]] 812 | deps = ["VectorizationBase"] 813 | git-tree-sha1 = "e3032c97b183e6e2baf4d2cc4fe60c4292a4a707" 814 | uuid = "8290d209-cae3-49c0-8002-c8c24d57dab5" 815 | version = "0.2.5" 816 | 817 | [[TimerOutputs]] 818 | deps = ["Printf"] 819 | git-tree-sha1 = "32cdbe6cd2d214c25a0b88f985c9e0092877c236" 820 | uuid = "a759f4b9-e2f1-59dc-863e-4aeb61b1ea8f" 821 | version = "0.5.8" 822 | 823 | [[TreeViews]] 824 | deps = ["Test"] 825 | git-tree-sha1 = "8d0d7a3fe2f30d6a7f833a5f19f7c7a5b396eae6" 826 | uuid = "a2a6695c-b41b-5b7d-aed9-dbfdeacea5d7" 827 | version = "0.3.0" 828 | 829 | [[UUIDs]] 830 | deps = ["Random", "SHA"] 831 | uuid = "cf7118a7-6976-5b1a-9a39-7adc72f591a4" 832 | 833 | [[UnPack]] 834 | git-tree-sha1 = "387c1f73762231e86e0c9c5443ce3b4a0a9a0c2b" 835 | uuid = "3a884ed6-31ef-47d7-9d2a-63182c4928ed" 836 | version = "1.0.2" 837 | 838 | [[Unicode]] 839 | uuid = "4ec0a83e-493e-50e2-b9ac-8f72acf5a8f5" 840 | 841 | [[Unitful]] 842 | deps = ["ConstructionBase", "LinearAlgebra", "Random"] 843 | git-tree-sha1 = "fdfbea79b5b9a305bf226eb4730321f603281290" 844 | uuid = "1986cc42-f94f-5a68-af5c-568840ba703d" 845 | version = "1.6.0" 846 | 847 | [[VectorizationBase]] 848 | deps = ["ArrayInterface", "Hwloc", "IfElse", "Libdl", "LinearAlgebra"] 849 | git-tree-sha1 = "486842a62c4a1bc23f7c8457d64e683a00d6d0e9" 850 | uuid = "3d5dd08c-fd9d-11e8-17fa-ed2836048c2f" 851 | version = "0.18.14" 852 | 853 | [[VersionParsing]] 854 | git-tree-sha1 = "80229be1f670524750d905f8fc8148e5a8c4537f" 855 | uuid = "81def892-9a0e-5fdd-b105-ffc91e053289" 856 | version = "1.2.0" 857 | 858 | [[VertexSafeGraphs]] 859 | deps = ["LightGraphs"] 860 | git-tree-sha1 = "b9b450c99a3ca1cc1c6836f560d8d887bcbe356e" 861 | uuid = "19fa3120-7c27-5ec5-8db8-b0b0aa330d6f" 862 | version = "0.1.2" 863 | 864 | [[ZygoteRules]] 865 | deps = ["MacroTools"] 866 | git-tree-sha1 = "9e7a1e8ca60b742e508a315c17eef5211e7fbfd7" 867 | uuid = "700de1a5-db45-46bc-99cf-38207098b444" 868 | version = "0.2.1" 869 | --------------------------------------------------------------------------------