├── .github └── workflows │ └── CI.yml ├── .gitignore ├── ADSeismic ├── Project.toml ├── README.md ├── examples │ ├── Project.toml │ ├── detector.jl │ ├── main.jl │ └── seismic.jl ├── src │ ├── ADSeismic.jl │ ├── cuda.jl │ ├── detector.jl │ ├── simulation.jl │ ├── treeverse.jl │ └── utils.jl └── test │ ├── cuda.jl │ ├── runtests.jl │ ├── simulation.jl │ └── treeverse.jl ├── AutodiffAndOptimization ├── Project.toml ├── README.md ├── examples │ ├── Project.toml │ ├── main.jl │ ├── simplex.mp4 │ └── trebuchet.jl ├── src │ ├── AutodiffAndOptimization.jl │ ├── Cracker │ │ ├── Cracker.jl │ │ ├── rrule.jl │ │ ├── trace.jl │ │ └── trackedarray.jl │ └── simplex.jl └── test │ ├── cracker.jl │ └── runtests.jl ├── CompressedSensing ├── .gitignore ├── Project.toml ├── README.md ├── data │ └── waterfall.jpeg ├── examples │ ├── Project.toml │ └── main.jl ├── src │ ├── CompressedSensing.jl │ ├── compressed_sensing_2d.jl │ └── owlqn.jl └── test │ ├── compressed_sensing_2d.jl │ ├── owlqn.jl │ └── runtests.jl ├── GraphClustering ├── Project.toml ├── README.md ├── examples │ ├── Project.toml │ └── main.jl ├── src │ ├── GraphClustering.jl │ └── clustering.jl └── test │ ├── clustering.jl │ └── runtests.jl ├── HappyMolecules ├── .gitignore ├── Project.toml ├── README.md ├── examples │ └── Project.toml ├── notebooks │ ├── Project.toml │ └── demo.jl ├── project │ ├── Project.toml │ └── triplepoint.jl ├── src │ ├── Core.jl │ ├── HappyMolecules.jl │ ├── applications.jl │ └── enzyme.jl └── test │ └── runtests.jl ├── HiddenMarkovModel ├── Project.toml ├── README.md ├── examples │ ├── Project.toml │ └── main.jl ├── src │ ├── HiddenMarkovModel.jl │ └── hmm.jl └── test │ ├── hmm.jl │ └── runtests.jl ├── ImageProcessing ├── .gitignore ├── LICENSE ├── Project.toml ├── README.md ├── data │ ├── amat.png │ ├── art.png │ └── cat.png ├── examples │ ├── Project.toml │ ├── fft.jl │ ├── fftcompress.jl │ ├── main.jl │ └── svdcompress.jl ├── src │ ├── ImageProcessing.jl │ ├── fft.jl │ ├── pca.jl │ ├── polymul.jl │ └── utils.jl └── test │ ├── fft.jl │ ├── pca.jl │ ├── polymul.jl │ └── runtests.jl ├── IsingModel ├── .gitignore ├── Project.toml ├── README.md ├── examples │ ├── Project.toml │ └── main.jl ├── src │ ├── IsingModel.jl │ ├── ising2d.jl │ └── swendsen_wang.jl └── test │ ├── ising2d.jl │ ├── runtests.jl │ └── swendsen_wang.jl ├── KernelPCA ├── .gitignore ├── LICENSE ├── Project.toml ├── README.md ├── data │ └── solar_system.csv ├── examples │ ├── Project.toml │ ├── kernelf.jl │ └── main.jl ├── src │ ├── KernelPCA.jl │ ├── dataset.jl │ ├── kernels.jl │ └── kpca.jl └── test │ ├── kernels.jl │ ├── kpca.jl │ └── runtests.jl ├── LICENSE ├── LatticeBoltzmannModel ├── .gitignore ├── LICENSE ├── Project.toml ├── README.md ├── examples │ ├── Project.toml │ ├── barrier.jl │ ├── barrier.mp4 │ └── barrier_gpu.jl ├── ext │ └── LatticeBoltzmannCUDAExt.jl ├── src │ ├── LatticeBoltzmannModel.jl │ ├── fluid.jl │ └── point.jl └── test │ ├── cuda.jl │ ├── fluid.jl │ └── runtests.jl ├── LatticeGasCA ├── .gitignore ├── Project.toml ├── README.md ├── examples │ ├── Project.toml │ └── main.jl ├── src │ ├── LatticeGasCA.jl │ ├── cuda.jl │ └── hpp.jl └── test │ ├── cuda.jl │ ├── hpp.jl │ └── runtests.jl ├── Makefile ├── MyFirstPackage ├── .gitignore ├── Project.toml ├── README.md ├── examples │ ├── Project.toml │ ├── lorenz.jl │ └── main.jl ├── src │ ├── MyFirstPackage.jl │ ├── lorenz.jl │ └── point.jl └── test │ ├── lorenz.jl │ └── runtests.jl ├── PhysicsSimulation ├── .gitignore ├── LICENSE ├── Project.toml ├── README.md ├── data │ └── solar_system.csv ├── examples │ ├── Project.toml │ └── main.jl ├── src │ ├── PhysicsSimulation.jl │ ├── planet.jl │ └── point.jl └── test │ ├── planet.jl │ ├── point.jl │ └── runtests.jl ├── README.md ├── SimpleKrylov ├── Project.toml ├── README.md ├── examples │ ├── Project.toml │ └── main.jl ├── src │ ├── SimpleKrylov.jl │ ├── arnoldi.jl │ ├── coo.jl │ ├── csc.jl │ └── lanczos.jl └── test │ ├── arnoldi.jl │ ├── coo.jl │ ├── csc.jl │ ├── lanczos.jl │ └── runtests.jl ├── SimpleLinearAlgebra ├── .gitignore ├── LICENSE ├── Project.toml ├── README.md ├── examples │ ├── Project.toml │ └── main.jl ├── src │ ├── SimpleLinearAlgebra.jl │ ├── fft.jl │ ├── lu.jl │ ├── qr.jl │ └── strassen.jl └── test │ ├── fft.jl │ ├── lu.jl │ ├── qr.jl │ ├── runtests.jl │ └── strassen.jl ├── SimpleTensorNetwork ├── Project.toml ├── README.md ├── assets │ ├── asia.png │ └── graph-petersen.png ├── examples │ ├── Project.toml │ ├── asia.jl │ ├── main.jl │ ├── model.uai │ └── spinglass.jl ├── src │ ├── SimpleTensorNetwork.jl │ ├── inference.jl │ ├── mnist784_bin_1000.npy │ ├── sampling.jl │ ├── spinglass.jl │ └── tucker.jl └── test │ ├── runtests.jl │ ├── sampling.jl │ └── spinglass.jl ├── SpinDynamics ├── Project.toml ├── README.md ├── examples │ ├── Project.toml │ └── main.jl ├── src │ ├── SpinDynamics.jl │ ├── simulated_bifurcation.jl │ └── simulation.jl └── test │ ├── runtests.jl │ ├── simulated_bifurcation.jl │ └── simulation.jl ├── Spinglass ├── Project.toml ├── README.md ├── data │ └── example.txt ├── examples │ ├── 2_spin_plot.png │ ├── Project.toml │ ├── cuda.jl │ ├── main.jl │ ├── mis.jl │ ├── solutionspace.jl │ ├── tropical_tensor_network.jl │ └── twospin.jl ├── ext │ └── CUDAExt.jl ├── src │ ├── Spinglass.jl │ ├── dynamics.jl │ ├── mis_sa.jl │ └── simulated_annealing.jl └── test │ ├── CUDAExt.jl │ ├── dynamics.jl │ ├── logic_gates.jl │ ├── runtests.jl │ └── simulated_annealing.jl └── SpringSystem ├── Project.toml ├── README.md ├── examples ├── Project.toml ├── main.jl └── spring_sample.jl ├── src ├── SpringSystem.jl ├── chain.jl ├── leapfrog.jl └── point.jl └── test ├── chain.jl ├── leapfrog.jl ├── point.jl └── runtests.jl /.github/workflows/CI.yml: -------------------------------------------------------------------------------- 1 | name: CI 2 | on: 3 | push: 4 | branches: 5 | - main 6 | pull_request: 7 | branches: 8 | - main 9 | jobs: 10 | test: 11 | name: ${{ matrix.package }} - Julia ${{ matrix.version }} - ${{ matrix.os }} - ${{ matrix.arch }} - ${{ github.event_name }} 12 | runs-on: ${{ matrix.os }} 13 | strategy: 14 | fail-fast: false 15 | matrix: 16 | version: 17 | - '1' # latest stable 18 | os: 19 | - ubuntu-latest 20 | arch: 21 | - x64 22 | package: 23 | - MyFirstPackage 24 | - PhysicsSimulation 25 | - LatticeGasCA 26 | - ImageProcessing 27 | - KernelPCA 28 | - CompressedSensing 29 | - HappyMolecules 30 | - SimpleLinearAlgebra 31 | - SimpleKrylov 32 | - SimpleTensorNetwork 33 | - GraphClustering 34 | - IsingModel 35 | - SpringSystem 36 | - Spinglass 37 | - SpinDynamics 38 | steps: 39 | - uses: actions/checkout@v4 40 | - uses: julia-actions/setup-julia@v2 41 | with: 42 | version: ${{ matrix.version }} 43 | arch: ${{ matrix.arch }} 44 | - uses: actions/cache@v4 45 | env: 46 | cache-name: cache-artifacts 47 | with: 48 | path: ~/.julia/artifacts 49 | key: ${{ runner.os }}-test-${{ env.cache-name }}-${{ hashFiles('**/Project.toml') }} 50 | restore-keys: | 51 | ${{ runner.os }}-test-${{ env.cache-name }}- 52 | ${{ runner.os }}-test- 53 | ${{ runner.os }}- 54 | - name: "develop lib packages" 55 | run: make init-${{ matrix.package }} 56 | - uses: julia-actions/julia-buildpkg@v1 57 | - name: "run tests" 58 | run: make test-${{ matrix.package }} 59 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | __pycache__/ 2 | *.aux 3 | *.log 4 | *.out 5 | *.synctex.gz 6 | *.synctex(busy) 7 | *.synctex 8 | .DS_Store 9 | *~ 10 | *.svg 11 | *.bbl 12 | *.blg 13 | *.fdb_latexmk 14 | *.fls 15 | *.out 16 | *.dvi 17 | *.log 18 | *.aux 19 | _*.tex 20 | _*.bib 21 | _*.pdf 22 | *.auxlock 23 | *.dpth 24 | *.md5 25 | unweighted/images/gadgets/ 26 | unweighted/images/*.tex 27 | *.thm 28 | *.so 29 | website/notebooks/*.html 30 | lorenz.mp4 31 | Manifest.toml 32 | .vscode/ 33 | /*.png 34 | *.json 35 | *.mp4 36 | GraphClustering/examples/*.png 37 | SimpleKrylov/examples/*.png 38 | Spinglass/examples/*.png 39 | SpinDynamics/examples/*.png 40 | ADSeismic/examples/*.png 41 | HiddenMarkovModel/examples/*.png 42 | *.gif 43 | -------------------------------------------------------------------------------- /ADSeismic/Project.toml: -------------------------------------------------------------------------------- 1 | name = "ADSeismic" 2 | uuid = "91750005-49c4-47b3-98eb-6aafcffa9e28" 3 | authors = ["GiggleLiu and contributors"] 4 | version = "1.0.0-DEV" 5 | 6 | [deps] 7 | CUDA = "052768ef-5323-5732-b1bb-66c8b64840ba" 8 | Enzyme = "7da242da-08ed-463a-9acd-ee780be4f1d9" 9 | LinearAlgebra = "37e2e46d-f89d-539d-b4ee-838fcccc9c8e" 10 | TreeverseAlgorithm = "e1c63c57-2fea-45bf-a8bf-df3ea6afb545" 11 | 12 | [compat] 13 | CUDA = "5.7.2" 14 | Enzyme = "0.13.30" 15 | LinearAlgebra = "1.11.0" 16 | julia = "1" 17 | 18 | [extras] 19 | ForwardDiff = "f6369f11-7733-5829-9624-2563aa707210" 20 | Pkg = "44cfe95a-1eb2-52ea-b672-e2afdf69b78f" 21 | Test = "8dfed614-e22c-5e08-85e1-65c5234f0b40" 22 | 23 | [targets] 24 | test = ["Test", "Pkg", "ForwardDiff"] 25 | -------------------------------------------------------------------------------- /ADSeismic/README.md: -------------------------------------------------------------------------------- 1 | # ADSeismic 2 | 3 | Automatic differentiating PML solver in seismic simulation. 4 | 5 | ## Features 6 | 7 | * Optimal check-pointing (Treeverse algorithm), 8 | * Differentiating CUDA kernel functions with [CUDA.jl](https://github.com/JuliaGPU/CUDA.jl). 9 | 10 | It can differentiate a PML solver defined on a 5000 x 5000 grid with 10000 time steps on a 32G memory GPU in one hour. 11 | 12 | ## References 13 | 14 | * https://github.com/kailaix/ADSeismic.jl/ 15 | * https://github.com/geodynamics/seismic_cpml 16 | * Efficient PML for the wave equation [arXiv: 1001.0319](https://arxiv.org/abs/1001.0319) -------------------------------------------------------------------------------- /ADSeismic/examples/Project.toml: -------------------------------------------------------------------------------- 1 | [deps] 2 | ADSeismic = "91750005-49c4-47b3-98eb-6aafcffa9e28" 3 | CairoMakie = "13f3f980-e62b-5c42-98c6-ff1f3baf88f0" 4 | Enzyme = "7da242da-08ed-463a-9acd-ee780be4f1d9" 5 | ForwardDiff = "f6369f11-7733-5829-9624-2563aa707210" 6 | Optimisers = "3bd65402-5787-11e9-1adc-39752487f4e2" 7 | TreeverseAlgorithm = "e1c63c57-2fea-45bf-a8bf-df3ea6afb545" 8 | -------------------------------------------------------------------------------- /ADSeismic/src/ADSeismic.jl: -------------------------------------------------------------------------------- 1 | module ADSeismic 2 | 3 | using Enzyme 4 | using TreeverseAlgorithm 5 | using LinearAlgebra 6 | 7 | export AcousticPropagatorParams, solve 8 | export treeverse, treeverse_gradient 9 | 10 | include("simulation.jl") 11 | include("utils.jl") 12 | #include("detector.jl") 13 | include("treeverse.jl") 14 | 15 | include("cuda.jl") 16 | 17 | end 18 | -------------------------------------------------------------------------------- /ADSeismic/src/cuda.jl: -------------------------------------------------------------------------------- 1 | using CUDA 2 | export togpu 3 | 4 | function togpu(a::AcousticPropagatorParams{DIM}) where DIM 5 | AcousticPropagatorParams(a.NX, a.NY, a.NSTEP, a.DELTAX, a.DELTAY, a.DELTAT, CuArray(a.Σx), CuArray(a.Σy)) 6 | end 7 | 8 | const CuSeismicState{MT} = SeismicState{MT} where MT<:CuArray 9 | 10 | export CuSeismicState 11 | function CuSeismicState(::Type{T}, nx::Int, ny::Int) where T 12 | SeismicState([CUDA.zeros(T, nx+2, ny+2) for i=1:4]..., Ref(0)) 13 | end 14 | 15 | function togpu(x::SeismicState) 16 | SeismicState([CuArray(t) for t in [x.upre, x.u, x.φ, x.ψ]]..., Ref(0)) 17 | end 18 | 19 | togpu(x::Number) = x 20 | togpu(x::AbstractArray) = CuArray(x) 21 | 22 | @inline function cudiv(x::Int, y::Int) 23 | max_threads = 256 24 | threads_x = min(max_threads, x) 25 | threads_y = min(max_threads ÷ threads_x, y) 26 | threads = (threads_x, threads_y) 27 | blocks = ceil.(Int, (x, y) ./ threads) 28 | threads, blocks 29 | end 30 | 31 | function one_step!(param::AcousticPropagatorParams, u, w, wold, φ, ψ, σ, τ, c::CuArray) 32 | @inline function one_step_kernel1(u, w, wold, φ, ψ, σ, τ, c, Δt, Δtx, Δty) 33 | i = (blockIdx().x-1) * blockDim().x + threadIdx().x + 1 34 | j = (blockIdx().y-1) * blockDim().y + threadIdx().y + 1 35 | Δtx2 = Δtx * Δtx 36 | Δty2 = Δty * Δty 37 | Dx = 0.5Δt*Δtx 38 | Dy = 0.5Δt*Δty 39 | @inbounds if i < size(c, 1) && j < size(c, 2) 40 | cij = c[i,j] 41 | δ = (σ[i,j]+τ[i,j])*Δt*0.5 42 | uij = (2 - σ[i,j]*τ[i,j]*(Δt*Δt) - 2*Δtx2 * cij - 2*Δty2 * cij) * w[i,j] + 43 | cij * Δtx2 * (w[i+1,j]+w[i-1,j]) + 44 | cij * Δty2 * (w[i,j+1]+w[i,j-1]) + 45 | Dx*(φ[i+1,j]-φ[i-1,j]) + 46 | Dy*(ψ[i,j+1]-ψ[i,j-1]) - 47 | (1 - δ) * wold[i,j] 48 | u[i,j] = uij / (1 + δ) 49 | end 50 | return nothing 51 | end 52 | 53 | @inline function one_step_kernel2(u, φ, ψ, σ, τ, c, Δt, Δtx_2, Δty_2) 54 | i = (blockIdx().x-1) * blockDim().x + threadIdx().x + 1 55 | j = (blockIdx().y-1) * blockDim().y + threadIdx().y + 1 56 | @inbounds if i < size(c, 1) && j < size(c, 2) 57 | φ[i,j] = (1-Δt*σ[i,j]) * φ[i,j] + Δtx_2 * c[i,j] * (τ[i,j] -σ[i,j]) * 58 | (u[i+1,j]-u[i-1,j]) 59 | ψ[i,j] = (1-Δt*τ[i,j]) * ψ[i,j] + Δty_2 * c[i,j] * (σ[i,j] -τ[i,j]) * 60 | (u[i,j+1]-u[i,j-1]) 61 | end 62 | return nothing 63 | end 64 | 65 | Δt = param.DELTAT 66 | hx, hy = param.DELTAX, param.DELTAY 67 | 68 | threads, blocks = cudiv(param.NX, param.NY) 69 | @cuda threads=threads blocks=blocks one_step_kernel1(u, w, wold, φ, ψ, σ, τ, c, Δt, Δt/hx, Δt/hy) 70 | @cuda threads=threads blocks=blocks one_step_kernel2(u, φ, ψ, σ, τ, c, Δt, 0.5*Δt/hx, 0.5*Δt/hy) 71 | return nothing 72 | end 73 | 74 | 75 | @inline function delete_state!(state::Dict{Int,<:CuSeismicState}, i::Int) 76 | s = pop!(state, i) 77 | CUDA.unsafe_free!(s.upre) 78 | CUDA.unsafe_free!(s.u) 79 | CUDA.unsafe_free!(s.φ) 80 | CUDA.unsafe_free!(s.ψ) 81 | return s 82 | end 83 | 84 | function Base.getindex(x::CuArray, si::SafeIndex) 85 | Array(x[[si.arg]])[] 86 | end 87 | 88 | function Base.setindex!(x::CuArray, val, si::SafeIndex) 89 | x[[si.arg]] = val 90 | end -------------------------------------------------------------------------------- /ADSeismic/src/treeverse.jl: -------------------------------------------------------------------------------- 1 | # Inputs: 2 | # - `s`: The current seismic state containing wavefield values 3 | # - `param`: Acoustic propagator parameters 4 | # - `src`: Source location coordinates 5 | # - `srcv`: Source time function values 6 | # - `c`: Velocity model 7 | # Returns the next seismic state after one time step 8 | function treeverse_step!(s, s2, param, src, srcv, c) 9 | s2.upre .= s.u 10 | one_step!(param, s2.u, s.u, s.upre, s2.φ, s2.ψ, param.Σx, param.Σy, c) 11 | s2.u[src...] += srcv[s2.step[]]*param.DELTAT^2 12 | return s2 13 | end 14 | 15 | # Inputs: 16 | # - `x`: The current seismic state 17 | # - `g`: The gradient of the loss with respect to the next state 18 | # - `param`: Acoustic propagator parameters 19 | # - `src`: Source location coordinates 20 | # - `srcv`: Source time function values 21 | # - `gsrcv`: Gradient with respect to source time function 22 | # - `c`: Velocity model 23 | # - `gc`: Gradient with respect to velocity model 24 | # Returns the gradients with respect to the current state, source time function, and velocity model 25 | function treeverse_grad!(x, g, param, src, srcv, gsrcv, c, gc) 26 | # TODO: implement this with Enzyme.jl 27 | # one_step!(param, unext, s.u, s.upre, φ, ψ, param.Σx, param.Σy, c) 28 | end 29 | 30 | """ 31 | treeverse_gradient(s0; param, src, srcv, c, δ=20, logger=TreeverseLog()) 32 | 33 | * `s0` is the initial state, 34 | """ 35 | function treeverse_gradient(s0, gnf; param, src, srcv, c, δ=20, logger=TreeverseLog()) 36 | f = x->treeverse_step!(x, SeismicState(x.u, copy(x.u), copy(x.φ), copy(x.ψ), Ref(x.step[]+1)), param, src, srcv, c) 37 | res = [] 38 | function gf(x, g) # g is a triple of (gx, gsrcv, gc) 39 | if g === nothing 40 | y = f(x) 41 | push!(res, y) 42 | g = gnf(y) 43 | end 44 | gx2, gsrcv2, gc2 = g 45 | unext, φ, ψ = zero(x.u), copy(x.φ), copy(x.ψ) 46 | x2 = SeismicState(zero(x.u), unext, φ, ψ, Ref(x.step[]+1)) 47 | gx = SeismicState(zero(x.u), zero(x.u), zero(x.φ), zero(x.ψ), Ref(x.step[])) 48 | Enzyme.autodiff(Reverse, treeverse_step!, Const, Duplicated(x, gx), Duplicated(x2, gx2), Const(param), Const(src), Duplicated(srcv, gsrcv2), Duplicated(c, gc2)) 49 | return (gx, gsrcv2, gc2) 50 | end 51 | g = treeverse(f, gf, copy(s0); δ=δ, N=param.NSTEP-1, f_inplace=true, logger=logger) 52 | return res[], g 53 | end 54 | -------------------------------------------------------------------------------- /ADSeismic/src/utils.jl: -------------------------------------------------------------------------------- 1 | export Ricker 2 | """ 3 | Ricker(epp::Union{ElasticPropagatorParams, AcousticPropagatorParams}, 4 | a::Union{PyObject, <:Real}, 5 | shift::Union{PyObject, <:Real}, 6 | amp::Union{PyObject, <:Real}=1.0) 7 | 8 | Returns a Ricker wavelet (a tensor). 9 | - `epp`: a `ElasticPropagatorParams` or an `AcousticPropagatorParams` 10 | - `a`: Width parameter 11 | - `shift`: Center of the Ricker wavelet 12 | - `amp`: Amplitude of the Ricker wavelet 13 | 14 | ```math 15 | f(x) = \\mathrm{amp}A (1 - x^2/a^2) exp(-x^2/2 a^2) 16 | ``` 17 | where 18 | ```math 19 | A = 2/sqrt(3a)pi^1/4 20 | ``` 21 | """ 22 | function Ricker(epp, 23 | a, 24 | shift, 25 | amp=1.0) 26 | NT, T = epp.NSTEP, epp.NSTEP*epp.DELTAT 27 | A = @. 2 / (sqrt(3 * a) * (pi^0.25)) 28 | wsq = @. a^2 29 | vec = collect(1:NT) .-shift 30 | xsq = @. vec^2 31 | mod = @. (1 - xsq / wsq) 32 | gauss = @. exp(-xsq / (2 * wsq)) 33 | total = @. amp * A * mod * gauss 34 | return total 35 | end 36 | 37 | struct SafeIndex{T} 38 | arg::T 39 | end 40 | 41 | function SafeIndex(args::Tuple) 42 | SafeIndex(CartesianIndex(args)) 43 | end 44 | SafeIndex(args::Int...) = SafeIndex(args) 45 | 46 | function Base.getindex(x::AbstractArray, si::SafeIndex) 47 | getindex(x, si.arg) 48 | end 49 | 50 | function Base.setindex!(x::AbstractArray, val, si::SafeIndex) 51 | setindex!(x, val, si.arg) 52 | end 53 | 54 | -------------------------------------------------------------------------------- /ADSeismic/test/cuda.jl: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/GiggleLiu/ScientificComputingDemos/c0ad0c00e5d0bf90ed1167d27d4dffe70c34dea7/ADSeismic/test/cuda.jl -------------------------------------------------------------------------------- /ADSeismic/test/runtests.jl: -------------------------------------------------------------------------------- 1 | using ADSeismic, CUDA 2 | using Test 3 | 4 | @testset "simulation" begin 5 | include("simulation.jl") 6 | end 7 | 8 | @testset "treeverse" begin 9 | include("treeverse.jl") 10 | end 11 | 12 | if CUDA.functional() 13 | @testset "cuda" begin 14 | include("cuda.jl") 15 | end 16 | end 17 | -------------------------------------------------------------------------------- /ADSeismic/test/simulation.jl: -------------------------------------------------------------------------------- 1 | using ADSeismic, Test 2 | 3 | @testset "loss" begin 4 | nx = 100 5 | ny = 100 6 | param = AcousticPropagatorParams(nx=nx, ny=ny, 7 | Rcoef=0.2, dx=20.0, dy=20.0, dt=0.05, nstep=2000) 8 | 9 | c = 1000*ones(param.NX+2, param.NY+2) 10 | src = (param.NX÷2, param.NY÷2) 11 | srcv = Ricker(param, 100.0, 500.0) 12 | tu = solve(param, src, srcv, c) 13 | tu2 = ADSeismic.solve_final(param, src, srcv, c) 14 | @test tu[:,:,end] ≈ tu2 15 | loss = sum(tu .^ 2) 16 | @test loss ≈ 10.931466822080788 17 | end 18 | -------------------------------------------------------------------------------- /AutodiffAndOptimization/Project.toml: -------------------------------------------------------------------------------- 1 | name = "AutodiffAndOptimization" 2 | uuid = "105ffcd2-060d-495b-a985-7b4e117e6040" 3 | authors = ["GiggleLiu"] 4 | version = "1.0.0-DEV" 5 | 6 | [deps] 7 | ChainRules = "082447d4-558c-5d27-93f4-14fc19e9eca2" 8 | LinearAlgebra = "37e2e46d-f89d-539d-b4ee-838fcccc9c8e" 9 | 10 | [compat] 11 | ChainRules = "1.72.2" 12 | LinearAlgebra = "1.11.0" 13 | julia = "1" 14 | 15 | [extras] 16 | Test = "8dfed614-e22c-5e08-85e1-65c5234f0b40" 17 | 18 | [targets] 19 | test = ["Test"] 20 | -------------------------------------------------------------------------------- /AutodiffAndOptimization/README.md: -------------------------------------------------------------------------------- 1 | # AutodiffAndOptimization -------------------------------------------------------------------------------- /AutodiffAndOptimization/examples/Project.toml: -------------------------------------------------------------------------------- 1 | [deps] 2 | AutodiffAndOptimization = "105ffcd2-060d-495b-a985-7b4e117e6040" 3 | CairoMakie = "13f3f980-e62b-5c42-98c6-ff1f3baf88f0" 4 | Printf = "de0858da-6303-5e67-8744-51eddeeeb8d7" 5 | -------------------------------------------------------------------------------- /AutodiffAndOptimization/examples/main.jl: -------------------------------------------------------------------------------- 1 | using CairoMakie 2 | using Printf 3 | using AutodiffAndOptimization 4 | 5 | bestx, bestf, history = simplex(rosenbrock, [-1.2, -1.0]; tol=1e-3) 6 | bestx, bestf 7 | 8 | x = -2:0.02:2 9 | y = -2:0.02:2 10 | f = [rosenbrock((a, b)) for b in y, a in x] 11 | 12 | bestx, bestf, history = simplex(rosenbrock, [-1.2, -1.0]; tol=1e-3) 13 | @info "converged in $(length(history)) steps, with error $bestf" 14 | 15 | fig = Figure() 16 | ax = Axis(fig[1, 1]; xlabel="x₁", ylabel="x₂", limits=(-2, 2, -2, 2)) 17 | heatmap!(ax, x, y, log.(f)) 18 | triangles = [[[item[:,1]..., item[1,1]], [item[:,2]..., item[1, 2]]] for item in history] 19 | triangle_x = Observable(triangles[1][1]) 20 | triangle_y = Observable(triangles[1][2]) 21 | lines!(ax, triangle_x, triangle_y; label="", color="white") 22 | txt = Observable("step = 0") 23 | text!(ax, -1.5, 1.5; text=txt, color=:white, fontsize=20) 24 | 25 | filename = joinpath(@__DIR__, "simplex.mp4") 26 | record(fig, filename, 1:length(triangles); framerate = 24) do i 27 | txt[] = @sprintf "step = %d, f = %.2e" i minimum(k->rosenbrock(history[i][k,:]), 1:3) 28 | "step = $(i-1), f = $(rosenbrock(history[i]))" 29 | triangle_x[] = triangles[i][1] 30 | triangle_y[] = triangles[i][2] 31 | end 32 | -------------------------------------------------------------------------------- /AutodiffAndOptimization/examples/simplex.mp4: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/GiggleLiu/ScientificComputingDemos/c0ad0c00e5d0bf90ed1167d27d4dffe70c34dea7/AutodiffAndOptimization/examples/simplex.mp4 -------------------------------------------------------------------------------- /AutodiffAndOptimization/examples/trebuchet.jl: -------------------------------------------------------------------------------- 1 | # leap frog simulator of a trebuchet 2 | function simulate(x0, v0, θ0, ω0, τ, n) 3 | x, v, θ, ω = x0, v0, θ0, ω0 4 | for i in 1:n 5 | x += τ*v 6 | v += τ*(-9.8*sin(θ)) 7 | θ += τ*ω 8 | ω += τ*(9.8/2)*cos(θ) 9 | end 10 | return x, v, θ, ω 11 | end 12 | -------------------------------------------------------------------------------- /AutodiffAndOptimization/src/AutodiffAndOptimization.jl: -------------------------------------------------------------------------------- 1 | module AutodiffAndOptimization 2 | 3 | export rosenbrock 4 | export simplex, simplex1d 5 | 6 | function rosenbrock(x) 7 | (1.0 - x[1])^2 + 100.0 * (x[2] - x[1]^2)^2 8 | end 9 | 10 | include("simplex.jl") 11 | include("Cracker/Cracker.jl") 12 | 13 | end 14 | -------------------------------------------------------------------------------- /AutodiffAndOptimization/src/Cracker/Cracker.jl: -------------------------------------------------------------------------------- 1 | module Cracker 2 | 3 | using ChainRules: rrule, unthunk 4 | import ChainRules 5 | using LinearAlgebra 6 | 7 | export track, untrack 8 | 9 | include("trackedarray.jl") 10 | include("trace.jl") 11 | include("rrule.jl") 12 | 13 | end 14 | -------------------------------------------------------------------------------- /AutodiffAndOptimization/src/Cracker/rrule.jl: -------------------------------------------------------------------------------- 1 | # NOTE: it's simpler to just manually define the overloads of ChainRules 2 | # so let's not generate them automatically for now, until we can handle 3 | # the function signatures better, e.g wrap tracked-types over supported 4 | # types of the rrule signatures automatically 5 | Base.reshape(A::TrackedArray, shape::Int...) = trace(reshape, A, shape...) 6 | Base.reshape(A::TrackedArray, shape::NTuple{N, Int}) where N = trace(reshape, A, shape) 7 | Base.vect(X::Vararg{<:TrackedArray}) = trace(Base.vect, X) 8 | Base.hcat(Xs::TrackedArray...) = trace(Base.hcat, Xs...) 9 | 10 | Base.:(+)(A::TrackedArray, B::TrackedArray) = trace(+, A, B) 11 | Base.:(-)(A::TrackedArray, B::TrackedArray) = trace(-, A, B) 12 | Base.:(*)(A::TrackedArray, B::TrackedArray) = trace(*, A, B) 13 | Base.:(-)(A::TrackedArray) = trace(-, A) 14 | 15 | function Base.sum(A::TrackedArray) 16 | ret = fill!(similar(A.value, ()), sum(A.value)) 17 | A̅ = zero(A) 18 | record = Record(sum, (A,), y̅ -> (ChainRules.NoTangent(), fill!(A̅, y̅[])), zero(ret), false) 19 | return TrackedArray(ret, record) 20 | end 21 | function Base.getindex(A::TrackedArray, indices::Int...) 22 | ret = fill!(similar(A.value, ()), A.value[indices...]) 23 | A̅ = zero(A) 24 | A̅[indices...] = 1 25 | record = Record(getindex, (A, indices), y̅ -> (ChainRules.NoTangent(), A̅), zero(ret), false) 26 | return TrackedArray(ret, record) 27 | end 28 | 29 | function Base.abs2(A::TrackedArray) 30 | @assert ndims(A) == 0 "expect a scalar input for abs2" 31 | ret = map(abs2, A.value) 32 | A̅ = zero(A.value) 33 | record = Record(abs2, (A,), y̅ -> (ChainRules.NoTangent(), map((x, y)-> 2*x*y, A.value, y̅)), zero(ret), false) 34 | return TrackedArray(ret, record) 35 | end 36 | -------------------------------------------------------------------------------- /AutodiffAndOptimization/src/Cracker/trace.jl: -------------------------------------------------------------------------------- 1 | function trace(f, args...) 2 | untracked_args = untrack.(args) 3 | @debug "tracking `$f$((untracked_args...,)))`" 4 | ret, pullback = rrule(f, untracked_args...) 5 | function pullback_unthunk(grad) 6 | grad_args = pullback(grad) 7 | return unthunk.(grad_args) 8 | end 9 | @assert ret isa AbstractArray "expect the output a primitive function to be an array, got $(typeof(ret))" 10 | record = Record(f, args, pullback_unthunk, zero(ret), false) 11 | return TrackedArray(ret, record) 12 | end 13 | 14 | backpropagate!(tracked_value::Tuple, grad::AbstractArray) = backpropagate!.(tracked_value, grad) 15 | function backpropagate!(tracked_value, grad::AbstractArray) 16 | @debug "backpropagate! `$(tracked_value)`" 17 | is_tracked(tracked_value) || error("expect tracked value") 18 | record = tracked_value.record 19 | record.grad .+= grad # accumulate grad 20 | record.is_leaf && return 21 | @debug "back propagating `pullback$((record.f, untrack.(record.args)...))($grad)`" 22 | grad_args = Base.tail(record.pullback(record.grad)) 23 | for (arg, grad_arg) in zip(record.args, grad_args) 24 | is_tracked(arg) && backpropagate!(arg, grad_arg) 25 | end 26 | return 27 | end 28 | 29 | function gradient(f, args::Tuple) 30 | tracked_args = track.(args) 31 | ret = f(tracked_args...) 32 | @assert eltype(ret) <: Real && ndims(ret) == 0 "expect a scalar real output as the loss function! got $(typeof(ret))" 33 | backpropagate!(ret, fill!(similar(ret), 1)) # the gradient of the loss function is 1 34 | return map(arg -> arg.record.grad, tracked_args) 35 | end 36 | -------------------------------------------------------------------------------- /AutodiffAndOptimization/src/Cracker/trackedarray.jl: -------------------------------------------------------------------------------- 1 | mutable struct Record{T <: AbstractArray} 2 | f 3 | args 4 | pullback 5 | grad::T 6 | is_leaf::Bool 7 | end 8 | 9 | leaf_record(x) = Record(nothing, nothing, nothing, zero(x), true) 10 | # we use traits over types 11 | is_tracked(x) = false 12 | untrack(x) = is_tracked(x) ? x.value : x 13 | record(x) = x.record 14 | 15 | struct TrackedArray{T, N, S <: AbstractArray{T, N}} <: AbstractArray{T, N} 16 | value::S 17 | record::Record{S} 18 | end 19 | 20 | function Base.show(io::IO, mime::MIME"text/plain", x::TrackedArray) 21 | print(io, "tracked ") 22 | show(io, mime, x.value) 23 | end 24 | 25 | track(A::AbstractArray, record::Record=leaf_record(A)) = TrackedArray(A, record) 26 | is_tracked(::TrackedArray) = true 27 | Base.IndexStyle(X::TrackedArray) = IndexStyle(untrack(X)) 28 | Base.size(X::TrackedArray, idx::Int...) = size(untrack(X), idx...) 29 | Base.length(X::TrackedArray) = length(untrack(X)) 30 | 31 | is_tracked(x::Tuple) = any(is_tracked, x) 32 | untrack(x::Tuple) = untrack.(x) 33 | function track(A::Tuple, record::Record=leaf_record(A)) 34 | return track.(A, Ref(record)) 35 | end 36 | 37 | function Base.show(io::IO, x::TrackedArray) 38 | print(io, "track(") 39 | show(io, x.value) 40 | print(io, ")") 41 | end -------------------------------------------------------------------------------- /AutodiffAndOptimization/src/simplex.jl: -------------------------------------------------------------------------------- 1 | function simplex1d(f, x1, x2; tol=1e-6) 2 | # initial simplex 3 | history = [[x1, x2]] 4 | f1, f2 = f(x1), f(x2) 5 | while abs(x2 - x1) > tol 6 | xc = 2x1 - x2 7 | fc = f(xc) 8 | if fc < f1 # flip 9 | x1, f1, x2, f2 = xc, fc, x1, f1 10 | else # shrink 11 | if fc < f2 # let the smaller one be x2. 12 | x2, f2 = xc, fc 13 | end 14 | xd = (x1 + x2) / 2 15 | fd = f(xd) 16 | if fd < f1 # update x1 and x2 17 | x1, f1, x2, f2 = xd, fd, x1, f1 18 | else 19 | x2, f2 = xd, fd 20 | end 21 | end 22 | push!(history, [x1, x2]) 23 | end 24 | return x1, f1, history 25 | end 26 | 27 | function simplex(f, x0; tol=1e-6, maxiter=1000) 28 | n = length(x0) 29 | x = zeros(n+1, n) 30 | fvals = zeros(n+1) 31 | x[1,:] = x0 32 | fvals[1] = f(x0) 33 | alpha = 1.0 34 | beta = 0.5 35 | gamma = 2.0 36 | for i in 1:n 37 | x[i+1,:] = x[i,:] 38 | x[i+1,i] += 1.0 39 | fvals[i+1] = f(x[i+1,:]) 40 | end 41 | history = [x] 42 | for iter in 1:maxiter 43 | # Sort the vertices by function value 44 | order = sortperm(fvals) 45 | x = x[order,:] 46 | fvals = fvals[order] 47 | # Calculate the centroid of the n best vertices 48 | xbar = dropdims(sum(x[1:n,:], dims=1) ./ n, dims=1) 49 | # Reflection 50 | xr = xbar + alpha*(xbar - x[n+1,:]) 51 | fr = f(xr) 52 | if fr < fvals[1] 53 | # Expansion 54 | xe = xbar + gamma*(xr - xbar) 55 | fe = f(xe) 56 | if fe < fr 57 | x[n+1,:] = xe 58 | fvals[n+1] = fe 59 | else 60 | x[n+1,:] = xr 61 | fvals[n+1] = fr 62 | end 63 | elseif fr < fvals[n] 64 | x[n+1,:] = xr 65 | fvals[n+1] = fr 66 | else 67 | # Contraction 68 | if fr < fvals[n+1] 69 | xc = xbar + beta*(x[n+1,:] - xbar) 70 | fc = f(xc) 71 | if fc < fr 72 | x[n+1,:] = xc 73 | fvals[n+1] = fc 74 | else 75 | # Shrink 76 | for i in 2:n+1 77 | x[i,:] = x[1,:] + beta*(x[i,:] - x[1,:]) 78 | fvals[i] = f(x[i,:]) 79 | end 80 | end 81 | else 82 | # Shrink 83 | for i in 2:n+1 84 | x[i,:] = x[1,:] + beta*(x[i,:] - x[1,:]) 85 | fvals[i] = f(x[i,:]) 86 | end 87 | end 88 | end 89 | push!(history, x) 90 | # Check for convergence 91 | if maximum(abs.(x[2:end,:] .- x[1,:])) < tol && maximum(abs.(fvals[2:end] .- fvals[1])) < tol 92 | break 93 | end 94 | end 95 | # Return the best vertex and function value 96 | bestx = x[1,:] 97 | bestf = fvals[1] 98 | return (bestx, bestf, history) 99 | end 100 | -------------------------------------------------------------------------------- /AutodiffAndOptimization/test/cracker.jl: -------------------------------------------------------------------------------- 1 | using Test 2 | using AutodiffAndOptimization.Cracker 3 | using AutodiffAndOptimization.Cracker.ChainRules: rrule, unthunk 4 | 5 | @testset "basic propagate ($T)" for T in [Float64, ComplexF64] 6 | A, B, C = track(rand(T, 2, 2)), track(rand(T, 2, 2)), track(rand(T, 4)) 7 | Z = A + B * reshape(C, 2, 2) 8 | 9 | ret = abs2(sum(Z)) 10 | Cracker.backpropagate!(ret, fill!(similar(ret), 1.0)) 11 | 12 | uA,uB,uC = untrack.((A, B, C)) 13 | T1, pb1 = rrule(reshape, uC, 2, 2) 14 | T2, pb2 = rrule(*, uB, T1) 15 | T3, pb3 = rrule(+, uA, T2) 16 | T4, pb4 = rrule(sum, T3) 17 | T5, pb5 = rrule(abs2, T4) 18 | 19 | dT5 = 1.0 20 | _, dT4 = pb5(dT5) 21 | _, dT3 = pb4(dT4) 22 | _, duA, dT2 = pb3(dT3) 23 | _, duB, dT1 = pb2(dT2) 24 | _, duC, _, _ = pb1(dT1) 25 | 26 | @test unthunk(duA) ≈ A.record.grad 27 | @test unthunk(duB) ≈ B.record.grad 28 | @test unthunk(duC) ≈ C.record.grad 29 | end 30 | 31 | @testset "gradient 1" begin 32 | a = rand(2, 2) 33 | @test Cracker.gradient(sum, (a,))[1] == ones(2, 2) 34 | end 35 | 36 | @testset "gradient 2" begin 37 | A, B, C = rand(Float64, 2, 2), rand(Float64, 2, 2), rand(Float64, 4) 38 | function loss(A, B, C) 39 | Z = A + B * reshape(C, 2, 2) 40 | return abs2(sum(Z)) 41 | end 42 | grads = Cracker.gradient(loss, (A, B, C)) 43 | @test grads[1] ≈ ones(2, 2) 44 | @test grads[2] ≈ ones(2, 2) 45 | @test grads[3] ≈ ones(2, 2) 46 | end -------------------------------------------------------------------------------- /AutodiffAndOptimization/test/runtests.jl: -------------------------------------------------------------------------------- 1 | using AutodiffAndOptimization 2 | using Test 3 | 4 | @testset "AutodiffAndOptimization.jl" begin 5 | # Write your tests here. 6 | end 7 | -------------------------------------------------------------------------------- /CompressedSensing/.gitignore: -------------------------------------------------------------------------------- 1 | *.jl.*.cov 2 | *.jl.cov 3 | *.jl.mem 4 | /Manifest.toml 5 | .vscode/ -------------------------------------------------------------------------------- /CompressedSensing/Project.toml: -------------------------------------------------------------------------------- 1 | name = "CompressedSensing" 2 | uuid = "eab4ea05-e27a-4e05-9e06-444d2403730e" 3 | authors = ["GiggleLiu"] 4 | version = "1.0.0-DEV" 5 | 6 | [deps] 7 | FFTW = "7a1cc6ca-52ef-59f5-83cd-3a7055c09341" 8 | FiniteDifferences = "26cc04aa-876d-5657-8c51-4c34ba976000" 9 | Images = "916415d5-f1e6-5110-898d-aaa5f9f070e0" 10 | LinearAlgebra = "37e2e46d-f89d-539d-b4ee-838fcccc9c8e" 11 | NLSolversBase = "d41bc354-129a-5804-8e4c-c37616107c6c" 12 | Optim = "429524aa-4258-5aef-a3af-852621145aeb" 13 | Random = "9a3f8284-a2c9-5f02-9a11-845980a1fd5c" 14 | StatsBase = "2913bbd2-ae8a-5f71-8c99-4fb6c76f3a91" 15 | 16 | [compat] 17 | FFTW = "1" 18 | FiniteDifferences = "0.12" 19 | Images = "0.26" 20 | NLSolversBase = "7" 21 | Optim = "1" 22 | StatsBase = "0.34" 23 | LinearAlgebra = "1" 24 | Random = "1" 25 | julia = "1" 26 | 27 | [extras] 28 | Test = "8dfed614-e22c-5e08-85e1-65c5234f0b40" 29 | 30 | [targets] 31 | test = ["Test"] 32 | -------------------------------------------------------------------------------- /CompressedSensing/README.md: -------------------------------------------------------------------------------- 1 | # CompressedSensing -------------------------------------------------------------------------------- /CompressedSensing/data/waterfall.jpeg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/GiggleLiu/ScientificComputingDemos/c0ad0c00e5d0bf90ed1167d27d4dffe70c34dea7/CompressedSensing/data/waterfall.jpeg -------------------------------------------------------------------------------- /CompressedSensing/examples/Project.toml: -------------------------------------------------------------------------------- 1 | [deps] 2 | CompressedSensing = "eab4ea05-e27a-4e05-9e06-444d2403730e" 3 | -------------------------------------------------------------------------------- /CompressedSensing/examples/main.jl: -------------------------------------------------------------------------------- 1 | using CompressedSensing -------------------------------------------------------------------------------- /CompressedSensing/src/CompressedSensing.jl: -------------------------------------------------------------------------------- 1 | module CompressedSensing 2 | using FFTW, StatsBase, LinearAlgebra, Optim 3 | 4 | export sensing_image, sample_image_pixels, ImageSamples 5 | export SimpleLinesearch 6 | 7 | include("owlqn.jl") 8 | include("compressed_sensing_2d.jl") 9 | 10 | end 11 | -------------------------------------------------------------------------------- /CompressedSensing/test/compressed_sensing_2d.jl: -------------------------------------------------------------------------------- 1 | using CompressedSensing 2 | using CompressedSensing: objective_dct, gradient_dct!, gradient_dct 3 | using Test 4 | using Images, FiniteDifferences, FFTW, Optim 5 | using Random, LinearAlgebra 6 | 7 | @testset "waterfall gradient test" begin 8 | Random.seed!(6) 9 | C = 0.0 10 | sample_probability = 1.0 11 | #img = Float64.(Gray.(Images.load(pkgdir(CompressedSensing, "data", "waterfall.jpeg")))) 12 | img = randn(4, 4) 13 | samples = sample_image_pixels(img, sample_probability) 14 | 15 | x0 = rand(size(img)...) 16 | # the gradient obtained with the finite difference method 17 | gs = [FiniteDifferences.central_fdm(5, 1)(x->objective_dct((y=copy(x0); y[ci]=x; y), samples; C), x0[ci]) for ci in CartesianIndices(x0)] 18 | # the gradient obtained with the manual gradient 19 | gobj = gradient_dct(x0, samples; C) 20 | @test isapprox(gs, gobj; rtol=1e-2) 21 | end 22 | 23 | @testset "optimization LBFGS" begin 24 | Random.seed!(3) 25 | # compressed sensing optimization 26 | C = 0.01 27 | sample_probability = 0.1 28 | img = Float64.(Gray.(Images.load(pkgdir(CompressedSensing, "data", "waterfall.jpeg")))) 29 | samples = sample_image_pixels(img, sample_probability) 30 | restored_img = sensing_image(samples; C, optimizer=:OWLQN, show_trace=true) 31 | 32 | @test norm(restored_img[samples.indices] - img[samples.indices]) <= 5 33 | @test norm(dct(restored_img), 1) <= 30000 34 | #display(Gray.(FFTW.idct(optres.minimizer))) 35 | #display(Gray.(optres.minimizer)) 36 | end 37 | 38 | @testset "optimization OWL-QN" begin 39 | Random.seed!(2) 40 | C = 0.002 41 | sample_probability = 0.1 42 | img = Float64.(Gray.(Images.load(pkgdir(CompressedSensing, "data", "waterfall.jpeg")))) 43 | samples = sample_image_pixels(img, sample_probability) 44 | restored_img = sensing_image(samples; C, optimizer=:OWLQN, show_trace=true, linesearch=Optim.HagerZhang()) 45 | @test norm(restored_img[samples.indices] - img[samples.indices]) <= 5 46 | @test norm(dct(restored_img), 1) <= 30000 47 | 48 | #display(Gray.(FFTW.idct(res))) 49 | #display(Gray.(res)) 50 | end -------------------------------------------------------------------------------- /CompressedSensing/test/owlqn.jl: -------------------------------------------------------------------------------- 1 | using Test, CompressedSensing.L1Convex 2 | using Random 3 | 4 | @testset "original" begin 5 | Random.seed!(111) 6 | N = 100 7 | X = randn(N, 3) 8 | y = X * [2, -2, 0.0] .+ randn(N) .+ 0.1 9 | X = hcat(X, ones(N)) # add intercept term 10 | 11 | # convex function to minimize (here, MSE) 12 | # note that this function should not include L1 penalty, OWL-QN algorithm applies L1 penalty during gradient update step 13 | # parameters beta must be a 1D vector 14 | # in this implementation, all elements of beta are L1 regularized (e.g., including intercept parameter) 15 | function f(beta::Vector{Float64}) 16 | diff = y - X * beta; 17 | sum(diff.^2.0) / size(y)[1] 18 | end 19 | 20 | # gradient function 21 | function ∇f(beta::Vector{Float64}) 22 | -2 .* X' * (y - X * beta) / size(y)[1] 23 | end 24 | 25 | # initialize parameters 26 | beta = ones(size(X)[2]) 27 | 28 | # optimization with λ = 0.2 regularization strength 29 | λ = 0.2 30 | M = OWLQN(typeof(beta); λ); 31 | 32 | for i in 1:17 33 | beta = step!(M, f, ∇f, beta); 34 | 35 | mse = f(beta); 36 | nrm = sum(abs.(beta)) 37 | loss = mse + λ * nrm 38 | 39 | print(string("Iteration: ", i, " Loss: ", loss, " MSE: ", mse, "\n")) 40 | end 41 | 42 | # OWL-QN 43 | print(string("OWL-QN L1-regularized solution: ", round.(beta, digits=3),"\n")) 44 | 45 | # OLS 46 | print(string("OLS solution: ", round.(inv(X' * X) * X' * y, digits=3),"\n")) 47 | 48 | @test isapprox(round.(beta, digits=3), round.(inv(X' * X) * X' * y, digits=3); atol=0.3) 49 | end -------------------------------------------------------------------------------- /CompressedSensing/test/runtests.jl: -------------------------------------------------------------------------------- 1 | using Test 2 | 3 | @testset "compressed sensing 2d" begin 4 | include("compressed_sensing_2d.jl") 5 | end 6 | 7 | @testset "owlqn" begin 8 | include("owlqn.jl") 9 | end -------------------------------------------------------------------------------- /GraphClustering/Project.toml: -------------------------------------------------------------------------------- 1 | name = "GraphClustering" 2 | uuid = "8ff72e77-ac1c-440e-8064-6af5ea7625d8" 3 | authors = ["GiggleLiu"] 4 | version = "1.0.0-DEV" 5 | 6 | [deps] 7 | Clustering = "aaaa29a8-35af-508c-8bc3-b662a17a0fe5" 8 | Graphs = "86223c79-3864-5bf0-83f7-82e725a168b6" 9 | KrylovKit = "0b1a1467-8014-51b9-945f-bf0ae24f4b77" 10 | LinearAlgebra = "37e2e46d-f89d-539d-b4ee-838fcccc9c8e" 11 | SparseArrays = "2f01184e-e22b-5df5-ae63-d93ebab69eaf" 12 | 13 | [compat] 14 | Clustering = "0.15" 15 | Graphs = "1" 16 | KrylovKit = "0.9" 17 | LinearAlgebra = "1" 18 | SparseArrays = "1" 19 | julia = "1" 20 | 21 | [extras] 22 | Test = "8dfed614-e22c-5e08-85e1-65c5234f0b40" 23 | 24 | [targets] 25 | test = ["Test"] 26 | -------------------------------------------------------------------------------- /GraphClustering/README.md: -------------------------------------------------------------------------------- 1 | # GraphClustering 2 | 3 | This package implements the spectral clustering algorithm[^Ng2001] in Julia. 4 | 5 | ## To run 6 | 7 | Clone the repository to your local machine and install the required packages (in a terminal): 8 | 9 | ```bash 10 | $ git clone https://github.com/GiggleLiu/ScientificComputingDemos.git 11 | $ cd ScientificComputingDemos 12 | $ make init-GraphClustering # initialize the environment in Spinglass and GraphClustering/examples 13 | $ make example-GraphClustering # run the script GraphClustering/examples/main.jl 14 | ``` 15 | 16 | ## References 17 | 18 | [^Ng2001]: Ng, Andrew, Michael Jordan, and Yair Weiss. [On spectral clustering: Analysis and an algorithm.](https://papers.nips.cc/paper_files/paper/2001/hash/801272ee79cfde7fa5960571fee36b9b-Abstract.html) Advances in neural information processing systems 14 (2001). 19 | -------------------------------------------------------------------------------- /GraphClustering/examples/Project.toml: -------------------------------------------------------------------------------- 1 | [deps] 2 | GraphClustering = "8ff72e77-ac1c-440e-8064-6af5ea7625d8" 3 | Graphs = "86223c79-3864-5bf0-83f7-82e725a168b6" 4 | LuxorGraphPlot = "1f49bdf2-22a7-4bc4-978b-948dc219fbbc" 5 | 6 | [compat] 7 | GraphClustering = "1" 8 | Graphs = "1" 9 | LuxorGraphPlot = "0.5" 10 | julia = "1" -------------------------------------------------------------------------------- /GraphClustering/src/GraphClustering.jl: -------------------------------------------------------------------------------- 1 | module GraphClustering 2 | 3 | using LinearAlgebra 4 | using KrylovKit 5 | using Graphs 6 | using Clustering 7 | 8 | export glue_graphs, spectral_clustering 9 | 10 | include("clustering.jl") 11 | 12 | end 13 | -------------------------------------------------------------------------------- /GraphClustering/src/clustering.jl: -------------------------------------------------------------------------------- 1 | """ 2 | connected_components(g::SimpleGraph, kmax::Int; atol=1e-8) 3 | 4 | Returns the connected components of the graph using the spectral graph theory. 5 | 6 | ### Arguments 7 | - `g::SimpleGraph`: the input graph. 8 | - `kmax::Int`: the number of eigenvectors to compute. 9 | 10 | ### Keyword arguments 11 | - `atol::Real`: the tolerance for the zero eigenvalues. 12 | """ 13 | function connected_components(g::SimpleGraph, kmax::Int; atol=1e-8) 14 | # Returns the connected components of the graph. 15 | lap = laplacian_matrix(g) 16 | # hermitian matrices have real eigenvalues 17 | eigvals, eigvecs = eigsolve(lap, randn(nv(g)), kmax, :SR; ishermitian=true) 18 | # find zero eigenvalues 19 | idx = findall(x->abs(x) < atol, eigvals) 20 | # pick any of the eigenvectors, a connected cluster have the same amplitute 21 | v = eigvecs[first(idx)] 22 | # cluster the vertices by the eigenvector 23 | clusters = Dict{Int, Vector{Int}}() 24 | for (j, x) in enumerate(v) 25 | key = round(Int, x * 10^8) 26 | if haskey(clusters, key) 27 | push!(clusters[key], j) 28 | else 29 | clusters[key] = [j] 30 | end 31 | end 32 | return collect(values(clusters)) 33 | end 34 | 35 | """ 36 | glue_graphs(g1::SimpleGraph, g2::SimpleGraph) 37 | 38 | Glue two graphs together. 39 | """ 40 | function glue_graphs(g1::SimpleGraph, g2::SimpleGraph) 41 | g = SimpleGraph(nv(g1)+nv(g2)) 42 | for e in edges(g1) 43 | add_edge!(g, src(e), dst(e)) 44 | end 45 | for e in edges(g2) 46 | add_edge!(g, src(e)+nv(g1), dst(e)+nv(g1)) 47 | end 48 | return g 49 | end 50 | 51 | """ 52 | spectral_clustering(points::AbstractVector, k; sigma) 53 | 54 | Spectral clustering algorithm. 55 | 56 | ### Arguments 57 | - `points::AbstractVector`: the data points to be clustered. 58 | - `k::Int`: the number of clusters. 59 | 60 | ### Keyword arguments 61 | - `sigma::Real`: the parameter for the Gaussian kernel. 62 | 63 | ### Reference 64 | - Ng, Andrew, Michael Jordan, and Yair Weiss. "On spectral clustering: Analysis and an algorithm." Advances in neural information processing systems 14 (2001). 65 | https://papers.nips.cc/paper_files/paper/2001/hash/801272ee79cfde7fa5960571fee36b9b-Abstract.html 66 | """ 67 | function spectral_clustering(points::AbstractVector, k; sigma) 68 | expdist(x, y) = exp(-(sum(abs2, x .- y)/sigma)^2) 69 | adj = expdist.(reshape(points, 1, :), points) 70 | D = Diagonal(inv.(sqrt.(dropdims(sum(adj; dims=1); dims=1)))) 71 | normalized_adj = D * adj * D 72 | vals, _vecs = eigsolve(-normalized_adj, randn(length(points)), k, :LM; ishermitian=true) 73 | vecs = hcat(_vecs[1:k]...) 74 | # normalize along the row 75 | vecs ./= sqrt.(sum(abs2, vecs; dims=2)) 76 | return kmeans(vecs', k) 77 | end 78 | onehot(k, i) = Float64[i == j for j in 1:k] -------------------------------------------------------------------------------- /GraphClustering/test/clustering.jl: -------------------------------------------------------------------------------- 1 | using Test, GraphClustering, GraphClustering.Graphs 2 | 3 | @testset "clustering" begin 4 | g = GraphClustering.Graphs.smallgraph(:petersen) 5 | g2 = GraphClustering.Graphs.smallgraph(:tutte) 6 | g3 = GraphClustering.glue_graphs(g, g2) 7 | xs = vcat([rand() for x in 1:nv(g)], [rand() + 2 for x in 1:nv(g2)]) 8 | ys = vcat([rand() for y in 1:nv(g)], [rand() + 2 for y in 1:nv(g2)]) 9 | res = spectral_clustering([[x, y] for (x, y) in zip(xs, ys)], 2; sigma=2.0) 10 | @test res.assignments == vcat(ones(Int, nv(g)), 2 .* ones(Int, nv(g2))) || res.assignments == vcat(2 .* ones(Int, nv(g)), ones(Int, nv(g2))) 11 | end -------------------------------------------------------------------------------- /GraphClustering/test/runtests.jl: -------------------------------------------------------------------------------- 1 | using GraphClustering 2 | using Test 3 | 4 | @testset "clustering" begin 5 | include("clustering.jl") 6 | end -------------------------------------------------------------------------------- /HappyMolecules/.gitignore: -------------------------------------------------------------------------------- 1 | /Manifest.toml 2 | .vscode/ 3 | *.swp 4 | .CondaPkg/ 5 | docs/Manifest.toml 6 | docs/build/ 7 | -------------------------------------------------------------------------------- /HappyMolecules/Project.toml: -------------------------------------------------------------------------------- 1 | name = "HappyMolecules" 2 | uuid = "800cb13d-1614-43bf-9060-6b9e570c014e" 3 | authors = ["GiggleLiu and contributors"] 4 | version = "0.1.0" 5 | 6 | [deps] 7 | DocStringExtensions = "ffbed154-4ef7-542d-bbb7-c09d3a79fcae" 8 | Enzyme = "7da242da-08ed-463a-9acd-ee780be4f1d9" 9 | Random = "9a3f8284-a2c9-5f02-9a11-845980a1fd5c" 10 | StaticArrays = "90137ffa-7385-5640-81b9-e52037218182" 11 | Statistics = "10745b16-79ce-11e8-11f9-7d13ad32a3b2" 12 | 13 | [compat] 14 | DocStringExtensions = "0.8, 0.9" 15 | Enzyme = "0.13" 16 | Random = "1" 17 | StaticArrays = "1" 18 | Statistics = "1" 19 | julia = "1" 20 | 21 | [extras] 22 | Test = "8dfed614-e22c-5e08-85e1-65c5234f0b40" 23 | 24 | [targets] 25 | test = ["Test"] 26 | -------------------------------------------------------------------------------- /HappyMolecules/README.md: -------------------------------------------------------------------------------- 1 | # HappyMolecules 2 | 3 | [![CI](https://github.com/CodingThrust/HappyMolecules.jl/actions/workflows/ci.yml/badge.svg)](https://github.com/CodingThrust/HappyMolecules.jl/actions/workflows/ci.yml) 4 | [![codecov](https://codecov.io/gh/CodingThrust/HappyMolecules.jl/branch/main/graph/badge.svg?token=PHXRKNRSCV)](https://codecov.io/gh/CodingThrust/HappyMolecules.jl) 5 | [![Dev](https://img.shields.io/badge/docs-dev-blue.svg)](https://codingthrust.github.io/HappyMolecules.jl/build) 6 | 7 | This is a tutorial for reproducing the examples in Chapter 4 of the book 8 | 9 | > Understanding Molecular Simulation, From Algorithms to Applications 10 | -------------------------------------------------------------------------------- /HappyMolecules/examples/Project.toml: -------------------------------------------------------------------------------- 1 | [deps] 2 | HappyMolecules = "800cb13d-1614-43bf-9060-6b9e570c014e" 3 | -------------------------------------------------------------------------------- /HappyMolecules/notebooks/Project.toml: -------------------------------------------------------------------------------- 1 | [deps] 2 | PlutoSliderServer = "2fc8631c-6f24-4c5b-bca7-cbb509c42db4" 3 | 4 | [compat] 5 | PlutoSliderServer = "0.3" -------------------------------------------------------------------------------- /HappyMolecules/project/Project.toml: -------------------------------------------------------------------------------- 1 | [deps] 2 | HappyMolecules = "800cb13d-1614-43bf-9060-6b9e570c014e" 3 | 4 | [compat] 5 | julia = "1" 6 | HappyMolecules = "0.1" 7 | -------------------------------------------------------------------------------- /HappyMolecules/project/triplepoint.jl: -------------------------------------------------------------------------------- 1 | using HappyMolecules 2 | using HappyMolecules.Applications: lennard_jones_triple_point 3 | using Makie, CairoMakie 4 | 5 | res = lennard_jones_triple_point() 6 | 7 | # plot the radial distribution 8 | lines(res.radial_ticks, res.radial_distribution) 9 | 10 | # plot the potential energy and kinetic energy as a function of time. 11 | times = LinRange(0, res.runtime.t, length(res.kinetic_energy)) 12 | plt.plot(times, res.potential_energy; label="Potential energy") 13 | plt.plot(times, res.kinetic_energy; label="Kinetic energy") 14 | plt.plot(times, res.potential_energy + res.kinetic_energy; label="Total energy", color="k", ls="--") 15 | plt.xlabel("step") 16 | plt.ylabel("Energy/N") 17 | plt.legend() 18 | plt.show() 19 | 20 | # TODO:display the time evolution process 21 | let 22 | filename = tempname() * ".mp4" 23 | fig = Figure(; resolution=(800, 800)) 24 | ax = Axis3(fig[1,1]; aspect=:data) 25 | limits = CairoMakie.FRect3D((0, 0, 0),(box.dimensions...,)) 26 | limits!(ax, limits) 27 | points = Observable([Point3f(x...,) for x in md.x]) 28 | directions = Observable([Point3f(x/100...,) for x in md.field]) 29 | scatter!(ax, points) 30 | arrows!(ax, points, directions; linewidth=0.02, arrowsize=0.1) 31 | record(fig, filename, 1:500; framerate = 30, sleep=true) do i 32 | for j=1:10 33 | step!(md) 34 | end 35 | points[] = [Point3f(mod.(x, box.dimensions)...,) for x in md.x] 36 | directions[] = [Point3f(x/100...,) for x in md.field] 37 | end 38 | end -------------------------------------------------------------------------------- /HappyMolecules/src/HappyMolecules.jl: -------------------------------------------------------------------------------- 1 | module HappyMolecules 2 | 3 | using StaticArrays 4 | using Statistics 5 | using DocStringExtensions 6 | 7 | export Bin, ticks, ncounts 8 | export molecule_dynamics, step! 9 | export PeriodicBox, Box, random_locations, uniform_locations, volume 10 | export PotentialField, LennardJones, potential_energy, force 11 | export positions, velocities, forces, num_particles, 12 | mean_kinetic_energy, temperature, mean_potential_energy, 13 | pressure 14 | 15 | # setup docstring format 16 | DocStringExtensions.@template (FUNCTIONS, METHODS, MACROS) = 17 | """ 18 | $(SIGNATURES) 19 | $(DOCSTRING) 20 | $(METHODLIST) 21 | """ 22 | 23 | include("Core.jl") 24 | include("enzyme.jl") 25 | include("applications.jl") 26 | 27 | end 28 | -------------------------------------------------------------------------------- /HappyMolecules/src/applications.jl: -------------------------------------------------------------------------------- 1 | module Applications 2 | 3 | using Random, StaticArrays 4 | using DocStringExtensions 5 | using ..HappyMolecules 6 | 7 | export lennard_jones_triple_point 8 | 9 | # setup docstring format 10 | DocStringExtensions.@template (FUNCTIONS, METHODS, MACROS) = 11 | """ 12 | $(SIGNATURES) 13 | $(DOCSTRING) 14 | $(METHODLIST) 15 | """ 16 | 17 | """ 18 | Case study in Chapter 4 of the book "Understanding Molecular Simulation, From Algorithms to Applications". 19 | It is about the molecule dynamics simulation of a Lennard-Jones Fluid in a 3D periodic box. 20 | The parameters are set close to the triple point. 21 | 22 | ### Keyword arguments 23 | * `natoms` is the number of atoms. 24 | * `temperature` is the initial temperature. 25 | * `density` is the density of atoms. 26 | * `Nt` is the number of tims steps. 27 | * `Δt` is the time step. 28 | * `seed` is the random seed. 29 | * `gr_lastn` is the number of last n samples for collecting radial distribution. 30 | """ 31 | function lennard_jones_triple_point(; 32 | natoms::Int = 108, # number of atoms 33 | temperature::Real = 0.728, # initial temperature 34 | density::Real = 0.8442, # density of particles 35 | Nt = 2000, 36 | Δt = 0.001, 37 | seed::Int = 2, 38 | gr_lastn::Int = 500, 39 | ) 40 | Random.seed!(seed) 41 | 42 | # the box 43 | volume = natoms / density 44 | L = volume ^ (1/3) 45 | box = PeriodicBox(SVector(L, L, L)) 46 | 47 | # initial status 48 | lattice_pos = uniform_locations(box, natoms) 49 | velocities = [rand(SVector{3, Float64}) .- 0.5 for _ = 1:natoms] 50 | rc = L/2 51 | 52 | # create a `MDRuntime` instance 53 | md = molecule_dynamics(; lattice_pos, velocities, box, temperature, rc, Δt, potential=LennardJones(; rc)) 54 | 55 | # Q: how to match the initial potential energy? 56 | # Anderson thermalstat. 57 | # Nose-Hoover thermalstat, difficult but better. 58 | ps = Float64[] 59 | ks = Float64[] 60 | temps = Float64[] 61 | 62 | bin = Bin(0.0, L/2, 200) 63 | for j=1:Nt 64 | step!(md) 65 | push!(ps, mean_potential_energy(md)) 66 | push!(ks, mean_kinetic_energy(md)) 67 | push!(temps, HappyMolecules.temperature(md)) 68 | if j > Nt - gr_lastn 69 | HappyMolecules.collect_gr!(md, bin) 70 | end 71 | end 72 | return (; 73 | runtime = md, 74 | potential_energy = ps, 75 | kinetic_energy = ks, 76 | radial_ticks = ticks(bin), 77 | radial_distribution = HappyMolecules.finalize_gr(md, bin, gr_lastn) 78 | ) 79 | end 80 | 81 | end 82 | -------------------------------------------------------------------------------- /HappyMolecules/src/enzyme.jl: -------------------------------------------------------------------------------- 1 | using Enzyme 2 | 3 | # Const{T}(val) 4 | # Argument is assumed constant and not to participate in gradient calculation. 5 | 6 | # Active{T}(val) 7 | # Argument is scale/immutable value to differentiate w.r.t. gradient is propagated through the return value. 8 | 9 | # Duplicated{T}(val, shadow) 10 | # Argument is mutable and the original output val is needed. 11 | 12 | # DuplicatedNoNeed{T}(shadow) 13 | # Like Duplicated, except Enzyme can assume the original result isn't nessesary. 14 | 15 | # BatchedDuplicated{T}(val, shaodows) 16 | # Like Duplicated, but expects a Tuple of shadow values. 17 | 18 | # BatchedDuplicatedNoNeed{T}(shaodows) 19 | # Like DuplicatedNoNeed, but expects a Tuple of shadow values. 20 | 21 | function enzyme_potential_field(potential::PotentialField, distance_vector::SVector) 22 | _, g = Enzyme.autodiff(Reverse, potential_energy, Active, Const(potential), Active(distance_vector))[1] 23 | return g 24 | end -------------------------------------------------------------------------------- /HappyMolecules/test/runtests.jl: -------------------------------------------------------------------------------- 1 | using HappyMolecules 2 | using Test, Random 3 | 4 | using StaticArrays 5 | using HappyMolecules.Applications: lennard_jones_triple_point 6 | 7 | @testset "location initialization" begin 8 | Random.seed!(2) 9 | # random locations in an integer box size 10 | box = PeriodicBox(10, 10) 11 | @test HappyMolecules.largest_distance(box) == 5 * sqrt(2) 12 | locs = random_locations(box, 10000) 13 | @test length(locs) == 10000 14 | @test isapprox(sum(locs)/10000, SVector(4.5, 4.5); atol=5e-2) 15 | @test HappyMolecules.volume(box) == 100 16 | 17 | # random locations in a floating point box size 18 | box = PeriodicBox(10.0, 10.0) 19 | locs = random_locations(box, 10000) 20 | @test length(locs) == 10000 21 | @test isapprox(sum(locs)/10000, SVector(5.0, 5.0); atol=5e-2) 22 | 23 | # uniform locations in a floating point box size 24 | box = PeriodicBox(10.0, 10.0) 25 | locs = uniform_locations(box, 4) 26 | @test length(locs) == 4 27 | @test locs ≈ [SVector(0.0, 0.0), SVector(5.0, 0.0), SVector(0.0, 5.0), SVector(5.0, 5.0)] 28 | end 29 | 30 | @testset "binning" begin 31 | # constructor 32 | bin = Bin(-1.0, 1.0, 20) 33 | # ticks 34 | @test ticks(bin) ≈ [-1.05 + 0.1 * i for i=1:20] 35 | 36 | # push! 37 | @test_throws AssertionError push!(bin, -2.0) 38 | @test_throws AssertionError push!(bin, 2.0) 39 | r1 = zeros(Int, 20); r1[end] += 1 40 | @test push!(bin, 0.99).counts == r1 41 | r1[end] += 1 42 | @test push!(bin, 0.91).counts == r1 43 | r1[end-1] += 1 44 | @test push!(bin, 0.89).counts == r1 45 | 46 | # ncounts 47 | @test ncounts(bin) == 3 48 | 49 | # empty! 50 | empty!(bin) 51 | @test ncounts(bin) == 0 52 | end 53 | 54 | @testset "enzyme potential field" begin 55 | potential, vector = LennardJones(), SVector(1.0, 2.0, 1.0) 56 | ef = HappyMolecules.enzyme_potential_field(potential, vector) 57 | field = force(potential, vector) 58 | @test field ≈ ef 59 | end 60 | 61 | @testset "LennardJones potential" begin 62 | rc = 2.519394287073761 63 | rc2 = rc ^ 2 64 | ecut = 4 * (1/rc2^6 - 1/rc2^3) 65 | p = LennardJones(; rc) 66 | @test p.ecut ≈ ecut 67 | @test isapprox(potential_energy(p, SVector(0.0, rc)), 0; atol=1e-8) 68 | end 69 | 70 | @testset "LennardJones triple point" begin 71 | res = lennard_jones_triple_point() 72 | md = res.runtime 73 | @test isapprox(HappyMolecules.temperature(md), 1.4595; atol=0.01) 74 | @test isapprox(HappyMolecules.pressure(md), 5.27; atol=5e-2) 75 | # energy conservation 76 | ks, ps = res.kinetic_energy, res.potential_energy 77 | @test isapprox(ps[1] + ks[1], ps[end] + ks[end]; atol=1e-2) 78 | end -------------------------------------------------------------------------------- /HiddenMarkovModel/Project.toml: -------------------------------------------------------------------------------- 1 | name = "HiddenMarkovModel" 2 | uuid = "9fa66fbd-4335-469e-840d-99b9fe4b4253" 3 | authors = ["GiggleLiu and contributors"] 4 | version = "1.0.0-DEV" 5 | 6 | [deps] 7 | LinearAlgebra = "37e2e46d-f89d-539d-b4ee-838fcccc9c8e" 8 | OMEinsum = "ebe7aa44-baf0-506c-a96f-8464559b3922" 9 | StatsBase = "2913bbd2-ae8a-5f71-8c99-4fb6c76f3a91" 10 | 11 | [compat] 12 | LinearAlgebra = "1.11.0" 13 | OMEinsum = "0.8.5" 14 | StatsBase = "0.34.4" 15 | julia = "1.6.7" 16 | 17 | [extras] 18 | Test = "8dfed614-e22c-5e08-85e1-65c5234f0b40" 19 | 20 | [targets] 21 | test = ["Test"] 22 | -------------------------------------------------------------------------------- /HiddenMarkovModel/README.md: -------------------------------------------------------------------------------- 1 | # HiddenMarkovModel 2 | 3 | Solving the Hidden Markov Model[^Rabiner1986] using the tensor network method. 4 | 5 | ## Contents 6 | 7 | - Hidden Markov Model 8 | - Viterbi algorithm 9 | - Baum-Welch algorithm 10 | 11 | ## To run 12 | 13 | Clone the repository to your local machine and install the required packages (in a terminal): 14 | 15 | ```bash 16 | $ git clone https://github.com/GiggleLiu/ScientificComputingDemos.git 17 | $ cd ScientificComputingDemos 18 | $ make init-HiddenMarkovModel # initialize the environment in HiddenMarkovModel and HiddenMarkovModel/examples 19 | $ make example-HiddenMarkovModel # run the script HiddenMarkovModel/examples/main.jl 20 | ``` 21 | 22 | 23 | ## References 24 | [^Rabiner1986]: Rabiner, Lawrence, and Biinghwang Juang. "An introduction to hidden Markov models." ieee assp magazine 3.1 (1986): 4-16. -------------------------------------------------------------------------------- /HiddenMarkovModel/examples/Project.toml: -------------------------------------------------------------------------------- 1 | [deps] 2 | CairoMakie = "13f3f980-e62b-5c42-98c6-ff1f3baf88f0" 3 | HiddenMarkovModel = "9fa66fbd-4335-469e-840d-99b9fe4b4253" 4 | -------------------------------------------------------------------------------- /HiddenMarkovModel/src/HiddenMarkovModel.jl: -------------------------------------------------------------------------------- 1 | module HiddenMarkovModel 2 | 3 | using OMEinsum 4 | using LinearAlgebra 5 | using StatsBase 6 | 7 | export HMM, viterbi, baum_welch, generate_sequence 8 | export HMMNetwork, likelihood, likelihood_and_gradient 9 | 10 | include("hmm.jl") 11 | 12 | end 13 | -------------------------------------------------------------------------------- /HiddenMarkovModel/test/runtests.jl: -------------------------------------------------------------------------------- 1 | using HiddenMarkovModel 2 | using Test 3 | 4 | @testset "hmm" begin 5 | include("hmm.jl") 6 | end 7 | -------------------------------------------------------------------------------- /ImageProcessing/.gitignore: -------------------------------------------------------------------------------- 1 | *.jl.*.cov 2 | *.jl.cov 3 | *.jl.mem 4 | /Manifest.toml 5 | -------------------------------------------------------------------------------- /ImageProcessing/LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2024 GiggleLiu 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /ImageProcessing/Project.toml: -------------------------------------------------------------------------------- 1 | name = "ImageProcessing" 2 | uuid = "344abb40-58da-4aaa-a4f9-f9081c0a4796" 3 | authors = ["GiggleLiu"] 4 | version = "1.0.0-DEV" 5 | 6 | [deps] 7 | FFTW = "7a1cc6ca-52ef-59f5-83cd-3a7055c09341" 8 | Images = "916415d5-f1e6-5110-898d-aaa5f9f070e0" 9 | LinearAlgebra = "37e2e46d-f89d-539d-b4ee-838fcccc9c8e" 10 | 11 | [compat] 12 | FFTW = "1" 13 | Images = "0.26" 14 | julia = "1" 15 | 16 | [extras] 17 | Test = "8dfed614-e22c-5e08-85e1-65c5234f0b40" 18 | 19 | [targets] 20 | test = ["Test"] 21 | -------------------------------------------------------------------------------- /ImageProcessing/README.md: -------------------------------------------------------------------------------- 1 | # ImageProcessing 2 | 3 | This demo implements two methods for image compression, the Singular Value Decomposition (SVD) and the Fast Fourier Transform (FFT). The main reference is: https://book.jinguo-group.science/stable/chap3/fft/ 4 | 5 | ## Contents 6 | - Fast Fourier Transform (FFT) 7 | - Singular Value Decomposition (SVD) 8 | - Image processing toolkit: [Images.jl](https://github.com/JuliaImages/Images.jl) 9 | 10 | ## To run 11 | 12 | Clone the repository to your local machine and install the required packages (in a terminal): 13 | 14 | ```bash 15 | $ git clone https://github.com/GiggleLiu/ScientificComputingDemos.git 16 | $ cd ScientificComputingDemos 17 | $ make init-ImageProcessing # initialize the environment in ImageProcessing and ImageProcessing/examples 18 | $ make example-ImageProcessing # run the script ImageProcessing/examples/main.jl 19 | ``` 20 | -------------------------------------------------------------------------------- /ImageProcessing/data/amat.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/GiggleLiu/ScientificComputingDemos/c0ad0c00e5d0bf90ed1167d27d4dffe70c34dea7/ImageProcessing/data/amat.png -------------------------------------------------------------------------------- /ImageProcessing/data/art.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/GiggleLiu/ScientificComputingDemos/c0ad0c00e5d0bf90ed1167d27d4dffe70c34dea7/ImageProcessing/data/art.png -------------------------------------------------------------------------------- /ImageProcessing/data/cat.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/GiggleLiu/ScientificComputingDemos/c0ad0c00e5d0bf90ed1167d27d4dffe70c34dea7/ImageProcessing/data/cat.png -------------------------------------------------------------------------------- /ImageProcessing/examples/Project.toml: -------------------------------------------------------------------------------- 1 | [deps] 2 | CairoMakie = "13f3f980-e62b-5c42-98c6-ff1f3baf88f0" 3 | ImageProcessing = "344abb40-58da-4aaa-a4f9-f9081c0a4796" 4 | UnicodePlots = "b8865327-cd53-5732-bb35-84acbb429228" 5 | -------------------------------------------------------------------------------- /ImageProcessing/examples/fft.jl: -------------------------------------------------------------------------------- 1 | using UnicodePlots, ImageProcessing.FFTW, LinearAlgebra, ImageProcessing 2 | 3 | dft_matrix(n) = fft(Matrix{Complex{Float64}}(I, n, n), 1) 4 | 5 | @info "Running example: fast fourier transformation (FFT)" 6 | @info "The DFT matrix of size 6 x 6 is:" 7 | display(dft_matrix(6)) 8 | x = randn(ComplexF64, 6) 9 | @assert dft_matrix(6) * x ≈ fft(x) 10 | 11 | σ = 3 12 | y = Complex.(exp.(-abs2.(((1:256) .- 128) ./ σ))) 13 | @info """Let y be a gaussian function (σ = $σ):""" 14 | display(lineplot(abs.(y), title="y = exp(-((n-128)/$σ)^2)", xlabel="n", ylabel="y[n]", color=:red)) 15 | 16 | Y = fft(y) 17 | @info """After applying the FFT to y, we get:""" 18 | display(lineplot(abs.(Y), title="Y = FFT(y)", xlabel="k", ylabel="Y[k]", color=:blue)) 19 | 20 | y2 = ifft(Y) 21 | @info """After applying the IFFT to Y, we get:""" 22 | display(lineplot(abs.(y2), title="y = IFFT(Y)", xlabel="n", ylabel="y[k]", color=:red)) 23 | 24 | σ = 50 25 | y = Complex.(exp.(-abs2.(((1:256) .- 128) ./ σ))) 26 | @info """Let y be a (broader) gaussian function:""" 27 | display(lineplot(abs.(y), title="y = exp(-((n-128)/$σ)^2)", xlabel="n", ylabel="y[n]", color=:red)) 28 | 29 | Y = fft(y) 30 | @info """After applying the FFT to y, we get:""" 31 | display(lineplot(abs.(Y), title="Y = FFT(y)", xlabel="k", ylabel="Y[k]", color=:blue)) 32 | 33 | p, q = [1, 2, 3], [4, 5, 6] 34 | @info """Multiplying two polynomials using FFT: 35 | - p(x) = $(join(["$(p[i])x^$i" for i=1:length(p)], " + ")) 36 | - q(x) = $(join(["$(q[i])x^$i" for i=1:length(q)], " + ")) 37 | """ 38 | result = fast_polymul(p, q) 39 | @info """The result of the multiplication is: 40 | - p(x) * q(x) = $(join(["$(result[i])x^$i" for i=1:length(result)], " + "))" 41 | """ 42 | 43 | -------------------------------------------------------------------------------- /ImageProcessing/examples/fftcompress.jl: -------------------------------------------------------------------------------- 1 | using ImageProcessing, ImageProcessing.Images, ImageProcessing.LinearAlgebra 2 | 3 | fname = "amat.png" 4 | @info "Running FFT compression example, loaded image: $fname" 5 | img = demo_image(fname) 6 | 7 | ##### FFT ##### 8 | img_k = fft_compress(img, size(img)...) 9 | # the momentum space is sparse! 10 | red_channel = Gray.(real.(img_k.channels[1]) ./ sqrt(length(img))) 11 | fname = "red-momentum_space.png" 12 | Images.save(fname, red_channel) 13 | @info "Converting image to momentum space, red channel saved to: $fname" 14 | Images.save(fname, toimage(RGBA{N0f8}, img_k)) 15 | fname = "recovered.png" 16 | @info "Recovered image from momentum space is saved to: $fname" 17 | 18 | nx, ny = isqrt(2 * size(img, 1)), isqrt(2 * size(img, 2)) 19 | img_k_fft = lower_rank(img_k, nx, ny) 20 | cratio = compression_ratio(img_k_fft) 21 | fname = "fft_compressed.png" 22 | Images.save(fname, toimage(RGBA{N0f8}, img_k_fft)) 23 | @info "Compressing to size: $nx x $ny, compression ratio: $cratio, saved to: $fname" -------------------------------------------------------------------------------- /ImageProcessing/examples/main.jl: -------------------------------------------------------------------------------- 1 | include("svdcompress.jl") 2 | include("fft.jl") 3 | include("fftcompress.jl") -------------------------------------------------------------------------------- /ImageProcessing/examples/svdcompress.jl: -------------------------------------------------------------------------------- 1 | using ImageProcessing, ImageProcessing.Images, ImageProcessing.LinearAlgebra 2 | using CairoMakie 3 | 4 | fname = "amat.png" 5 | @info "Loading image: $fname" 6 | img = demo_image(fname) 7 | 8 | @info "The loaded image has type: $(typeof(img))" 9 | 10 | # the RGBA type is a 4‑tuple of red, green, blue and alpha values, each ranging from 0 to 1. 11 | transparent = RGBA(0/255, 0/255, 0/255, 0/255) 12 | black = RGBA(0/255, 0/255, 0/255, 255/255) 13 | white = RGBA(255/255, 255/255, 255/255, 255/255) 14 | red = RGBA(255/255, 0/255, 0/255, 255/255) 15 | green = RGBA(0/255, 255/255, 0/255, 255/255) 16 | blue = RGBA(0/255, 0/255, 255/255, 255/255) 17 | @info """Colors are defined as: 18 | - transparent: $transparent 19 | - black: $black 20 | - white: $white 21 | - red: $red 22 | - green: $green 23 | - blue: $blue 24 | """ 25 | 26 | # get one of the color channel 27 | red_channel = getfield.(img[:, :], :r) 28 | # to visualize as a grayscale image 29 | Gray.(red_channel) 30 | fname = "red_channel.png" 31 | Images.save(fname, Gray.(red_channel)) 32 | @info "The red channel is saved to: $fname" 33 | 34 | # in Images, the color channels are stored as a 3D array with the first dimension being the color channel. 35 | Gray.(channelview(img)[1, :, :]) 36 | Gray.(channelview(img)[2, :, :]) 37 | Gray.(channelview(img)[3, :, :]) 38 | 39 | red_svd = svd(red_channel) 40 | fig, = CairoMakie.lines(red_svd.S) 41 | fname = "red_svd_spectrum.png" 42 | CairoMakie.save(fname, fig) 43 | @info "Singular values of the red channel are stored in file: $fname" 44 | 45 | # We can decompose a given image into the three color channels red, green and blue. 46 | # Each channel can be represented as a (m × n)‑matrix with values ranging from 0 to 255. 47 | target_rank = 10 48 | compressed = svd_compress(img, target_rank) 49 | compression_ratio(compressed) 50 | newimage = toimage(RGBA{N0f8}, compressed) 51 | fname = "compressed.png" 52 | Images.save(fname, newimage) 53 | @info """Compressing with SVD: 54 | - target rank is: $target_rank 55 | - the compression ratio is: $(compression_ratio(compressed)) 56 | - the compressed image is saved to: $fname 57 | """ 58 | 59 | # convert to image 60 | toimage(RGBA{N0f8}, compressed) 61 | compressed_rank1 = lower_rank(compressed, 1) 62 | compression_ratio(compressed_rank1) 63 | newimage1 = toimage(RGBA{N0f8}, compressed_rank1) 64 | fname = "compressed_rank1.png" 65 | Images.save(fname, newimage1) 66 | @info """Lowering the rank to 1: 67 | - the compression ratio is: $(compression_ratio(compressed_rank1)) 68 | - the compressed image is saved to: $fname 69 | """ -------------------------------------------------------------------------------- /ImageProcessing/src/ImageProcessing.jl: -------------------------------------------------------------------------------- 1 | module ImageProcessing 2 | 3 | using LinearAlgebra 4 | using Images 5 | using FFTW 6 | 7 | export demo_image, svd_compress, lower_rank, toimage, compression_ratio 8 | export fft_compress, FFTCompressedImage 9 | export fast_polymul 10 | 11 | include("utils.jl") 12 | include("fft.jl") 13 | include("pca.jl") 14 | include("polymul.jl") 15 | 16 | end 17 | -------------------------------------------------------------------------------- /ImageProcessing/src/fft.jl: -------------------------------------------------------------------------------- 1 | struct FFTCompressedImage{D, MT<:AbstractMatrix} 2 | Nx::Int 3 | Ny::Int 4 | channels::NTuple{D, MT} 5 | end 6 | 7 | """ 8 | fft_compress(image, nx, ny) 9 | 10 | Convert an image to momentum space using the FFT algorithm and compress it by truncating the Fourier coefficients. 11 | `nx` and `ny` are the number of rows and columns to keep in the Fourier space. 12 | """ 13 | function fft_compress(image, nx::Int, ny::Int) 14 | channels = channelview(image) 15 | return FFTCompressedImage(size(image)..., ntuple(i->truncate_k(channels[i, :, :] |> fft |> fftshift, nx, ny), 4)) 16 | end 17 | truncated_fft(m::AbstractMatrix, nx::Int, ny::Int) = truncate_k(fftshift(fft(m)), nx, ny) 18 | function truncate_k(m::AbstractMatrix, nx::Int, ny::Int) 19 | nx = min(nx, size(m, 1)) 20 | ny = min(ny, size(m, 2)) 21 | startx = (size(m, 1) + 1) ÷ 2 - (nx-1) ÷ 2 22 | starty = (size(m, 2) + 1) ÷ 2 - (ny-1) ÷ 2 23 | return m[startx:startx+nx-1, starty:starty+ny-1] 24 | end 25 | 26 | # pad zeros to matrix m, to make it of size Nx x Ny 27 | function pad_zeros(m::AbstractMatrix{T}, Nx::Int, Ny::Int) where T 28 | output = similar(m, Nx, Ny) 29 | fill!(output, zero(T)) 30 | Nx = max(Nx, size(m, 1)) 31 | Ny = max(Ny, size(m, 2)) 32 | startx = (Nx + 1) ÷ 2 - (size(m, 1)-1) ÷ 2 33 | starty = (Ny + 1) ÷ 2 - (size(m, 2)-1) ÷ 2 34 | output[startx:startx+size(m, 1)-1, starty:starty+size(m, 2)-1] .= m 35 | return output 36 | end 37 | 38 | # lower the size of the image by truncating the Fourier coefficients 39 | function lower_rank(img::FFTCompressedImage, nx::Int, ny::Int) 40 | FFTCompressedImage(img.Nx, img.Ny, ntuple(i->truncate_k(img.channels[i], nx, ny), 4)) 41 | end 42 | 43 | # convert to image 44 | function toimage(::Type{CT}, img::FFTCompressedImage) where {T,N,CT<:Colorant{T,N}} 45 | colorview(CT, cat([reshape(safe_convert.(T, pad_zeros(c, img.Nx, img.Ny) |> ifftshift |> ifft! .|> real), 1, img.Nx, img.Ny) for c in img.channels]...; dims=1)) 46 | end 47 | 48 | # compression ratio 49 | function compression_ratio(img::FFTCompressedImage) 50 | new_size = sum(length(ch) for ch in img.channels) 51 | return new_size / (img.Nx * img.Ny * length(img.channels)) 52 | end -------------------------------------------------------------------------------- /ImageProcessing/src/pca.jl: -------------------------------------------------------------------------------- 1 | # We can decompose a given image into the color channels, e.g. red, green, blue and alpha. 2 | # Each channel can be represented as a (m × n)‑matrix with values ranging from 0 to 255. 3 | struct SVDCompressedImage{D, RT<:Real, MT<:SVD{RT}} 4 | channels::NTuple{D, MT} 5 | end 6 | 7 | """ 8 | svd_compress(image, k) 9 | 10 | Convert an image to the SVD space and compress it by truncating the singular values. 11 | `k` is the number of singular values to keep. 12 | """ 13 | function svd_compress(image, k::Int) 14 | channels = channelview(image) 15 | return SVDCompressedImage(ntuple(i->truncated_svd(channels[i, :, :]; maxrank=k), 4)) 16 | end 17 | truncated_svd(m::AbstractMatrix; maxrank=typemax(Int), atol=0.0) = truncate(svd(m); maxrank, atol) 18 | function truncate(m::SVD; atol, maxrank) 19 | k = min(maxrank, findlast(>=(atol), m.S)) 20 | SVD(m.U[:, 1:k], m.S[1:k], m.Vt[1:k, :]) 21 | end 22 | function lower_rank(img::SVDCompressedImage, k::Int) 23 | SVDCompressedImage(ntuple(i->truncate(img.channels[i]; atol=0, maxrank=k), 4)) 24 | end 25 | 26 | # convert to image 27 | function toimage(::Type{CT}, img::SVDCompressedImage) where {T,N,CT<:Colorant{T,N}} 28 | colorview(CT, cat([reshape(safe_convert.(T, Matrix(c)), 1, size(c)...) for c in img.channels]...; dims=1)) 29 | end 30 | 31 | # compression ratio 32 | function compression_ratio(img::SVDCompressedImage) 33 | new_size = sum(length(ch.S) * (1 + size(ch, 1) + size(ch, 2)) for ch in img.channels) 34 | origin = sum(prod(size(ch)) for ch in img.channels) 35 | return new_size / origin 36 | end -------------------------------------------------------------------------------- /ImageProcessing/src/polymul.jl: -------------------------------------------------------------------------------- 1 | # here, we use the FFTW library to compute the convolution 2 | function fast_polymul(p::AbstractVector{T}, q::AbstractVector{T}) where T 3 | pvals = FFTW.fft(vcat(p, zeros(T, length(q)-1))) 4 | qvals = FFTW.fft(vcat(q, zeros(T, length(p)-1))) 5 | pqvals = pvals .* qvals 6 | return real.(FFTW.ifft!(pqvals)) 7 | end 8 | -------------------------------------------------------------------------------- /ImageProcessing/src/utils.jl: -------------------------------------------------------------------------------- 1 | """ 2 | demo_image(name::String) 3 | 4 | Load an image from the ImageProcessing package data directory. 5 | The argument `name` can be "art.png", "cat.png" or "amat.png" 6 | """ 7 | function demo_image(name::String) 8 | @assert name in ["art.png", "cat.png", "amat.png"] "Invalid image name, should be one of 'art.png', 'cat.png' or 'amat.png', got : $name." 9 | filename = pkgdir(ImageProcessing, "data", name) 10 | return Images.load(filename) 11 | end 12 | 13 | # convert floating point number to N0f8, fixed point number with 8 bits, safely 14 | safe_convert(::Type{N0f8}, x::T) where T = map(x->N0f8(min(max(x, zero(T)), one(T))), x) 15 | 16 | ##### APIs ##### 17 | """ 18 | lower_rank(img::FFTCompressedImage, nx::Int, ny::Int) 19 | lower_rank(img::SVDCompressedImage, rank::Int) 20 | 21 | Lower the size of the image by truncating the Fourier coefficients or the singular values. 22 | """ 23 | function lower_rank end 24 | 25 | """ 26 | compression_ratio(img::FFTCompressedImage) 27 | compression_ratio(img::SVDCompressedImage) 28 | 29 | Compute the compression ratio of the compressed image. 30 | """ 31 | function compression_ratio end 32 | 33 | """ 34 | toimage(::Type{CT}, img::FFTCompressedImage) 35 | toimage(::Type{CT}, img::SVDCompressedImage) 36 | 37 | Convert a compressed image to an image, an array of elements of type `CT`. 38 | """ 39 | function toimage end -------------------------------------------------------------------------------- /ImageProcessing/test/fft.jl: -------------------------------------------------------------------------------- 1 | using Test, ImageProcessing, ImageProcessing.Images 2 | 3 | @testset "truncate_k and pad_zeros" begin 4 | x = randn(3, 4) 5 | res = ImageProcessing.truncate_k(x, 3, 4) 6 | @test res ≈ x 7 | res = ImageProcessing.pad_zeros(x, 3, 4) 8 | @test res ≈ x 9 | res = ImageProcessing.truncate_k(x, 1, 2) 10 | @test size(res) == (1, 2) 11 | res = ImageProcessing.pad_zeros(x, 3, 4) 12 | @test size(res) == (3, 4) 13 | x = randn(1, 2) 14 | res = ImageProcessing.truncate_k(x, 3, 4) 15 | @test res ≈ x 16 | end 17 | 18 | @testset "compression ratio" begin 19 | img = demo_image("cat.png") 20 | compressed = fft_compress(img, typemax(Int), typemax(Int)) 21 | img_recovered = toimage(RGBA{N0f8}, compressed) 22 | @test img_recovered ≈ img 23 | compressed = fft_compress(img, 5, 5) 24 | old_size = length(img) 25 | new_size = 25 26 | @test compression_ratio(compressed) ≈ new_size / old_size 27 | compressed2 = lower_rank(compressed, 3, 3) 28 | new_size = 9 29 | @test compression_ratio(compressed2) ≈ new_size / old_size 30 | @test toimage(RGBA{N0f8}, compressed) isa AbstractArray{RGBA{N0f8}, 2} 31 | end -------------------------------------------------------------------------------- /ImageProcessing/test/pca.jl: -------------------------------------------------------------------------------- 1 | using Test, ImageProcessing, ImageProcessing.Images 2 | 3 | @testset "compression ratio" begin 4 | img = demo_image("cat.png") 5 | compressed = svd_compress(img, 5) 6 | old_size = length(img) 7 | new_size = size(img, 1) * 5 + 5 + size(img, 2) * 5 8 | @test compression_ratio(compressed) ≈ new_size / old_size 9 | compressed2 = lower_rank(compressed, 3) 10 | new_size = size(img, 1) * 3 + 3 + size(img, 2) * 3 11 | @test compression_ratio(compressed2) ≈ new_size / old_size 12 | @test toimage(RGBA{N0f8}, compressed) isa AbstractArray{RGBA{N0f8}, 2} 13 | end 14 | 15 | @testset "truncated svd" begin 16 | x = randn(10, 3) 17 | A = x * x' 18 | res = ImageProcessing.truncated_svd(A; maxrank=3, atol=0) 19 | @test Matrix(res) ≈ A 20 | end -------------------------------------------------------------------------------- /ImageProcessing/test/polymul.jl: -------------------------------------------------------------------------------- 1 | using Test, ImageProcessing 2 | 3 | @testset "fast_polymul" begin 4 | @test fast_polymul([1, 2, 3], [4, 5, 6]) ≈ [4, 13, 28, 27, 18] 5 | end -------------------------------------------------------------------------------- /ImageProcessing/test/runtests.jl: -------------------------------------------------------------------------------- 1 | using ImageProcessing 2 | using Test 3 | 4 | @testset "fft" begin 5 | include("fft.jl") 6 | end 7 | 8 | @testset "pca" begin 9 | include("pca.jl") 10 | end 11 | 12 | @testset "polymul" begin 13 | include("polymul.jl") 14 | end 15 | -------------------------------------------------------------------------------- /IsingModel/.gitignore: -------------------------------------------------------------------------------- 1 | *.dat 2 | *.mp4 3 | *.png -------------------------------------------------------------------------------- /IsingModel/Project.toml: -------------------------------------------------------------------------------- 1 | name = "IsingModel" 2 | uuid = "d6d5e6cc-7d26-42f1-a815-3fdb1c237969" 3 | authors = ["GiggleLiu"] 4 | version = "1.0.0-DEV" 5 | 6 | [deps] 7 | DelimitedFiles = "8bb1440f-4735-579b-a4ab-409b98df4dab" 8 | Random = "9a3f8284-a2c9-5f02-9a11-845980a1fd5c" 9 | 10 | [compat] 11 | julia = "1" 12 | 13 | [extras] 14 | Test = "8dfed614-e22c-5e08-85e1-65c5234f0b40" 15 | 16 | [targets] 17 | test = ["Test"] 18 | -------------------------------------------------------------------------------- /IsingModel/README.md: -------------------------------------------------------------------------------- 1 | # IsingModel 2 | 3 | Solving the Ferromagnetic Ising model using the Monte Carlo method, including a Swendsen-Wang algorithms that implements cluster updates. 4 | 5 | ## Contents 6 | - Ferromagnetic Ising model, simple Monte Carlo method 7 | - Ferromagnetic Ising model, Swendsen-Wang algorithm 8 | 9 | ## To run 10 | 11 | Clone the repository to your local machine and install the required packages (in a terminal): 12 | 13 | ```bash 14 | $ git clone https://github.com/GiggleLiu/ScientificComputingDemos.git 15 | $ cd ScientificComputingDemos 16 | $ make init-IsingModel # initialize the environment in IsingModel and IsingModel/examples 17 | $ make example-IsingModel # run the script IsingModel/examples/main.jl 18 | ``` 19 | 20 | 21 | ## References 22 | The main reference is the Computational Physics (PY502) course at BU. The course is taught by Prof. Anders Sandvik. The course material is available at [https://physics.bu.edu/~py502/](https://physics.bu.edu/~py502/). 23 | 24 | This demo is based on the following lecture notes: 25 | https://physics.bu.edu/~py502/lectures5/mc.pdf -------------------------------------------------------------------------------- /IsingModel/examples/Project.toml: -------------------------------------------------------------------------------- 1 | [deps] 2 | CairoMakie = "13f3f980-e62b-5c42-98c6-ff1f3baf88f0" 3 | IsingModel = "d6d5e6cc-7d26-42f1-a815-3fdb1c237969" 4 | LuxorGraphPlot = "1f49bdf2-22a7-4bc4-978b-948dc219fbbc" 5 | -------------------------------------------------------------------------------- /IsingModel/src/IsingModel.jl: -------------------------------------------------------------------------------- 1 | module IsingModel 2 | 3 | using DelimitedFiles 4 | 5 | export IsingSpinModel, mcstep!, SimulationResult, energy, measure!, simulate!, num_spin 6 | export SpinGlassModel, load_spinglass, anneal, random_config 7 | export SwendsenWangModel, castbonds, SwendsenWangConfig 8 | 9 | include("ising2d.jl") 10 | include("swendsen_wang.jl") 11 | 12 | end 13 | -------------------------------------------------------------------------------- /IsingModel/test/ising2d.jl: -------------------------------------------------------------------------------- 1 | using Test, IsingModel, DelimitedFiles 2 | using IsingModel: pflip 3 | 4 | @testset "pflip" begin 5 | model = IsingSpinModel(10, 0.1, 0.5) 6 | @test isapprox(pflip(model, -1, -4), 0.0202419; rtol=1e-4) 7 | @test isapprox(pflip(model, 1, -4), 49.4024; rtol=1e-4) 8 | @test isapprox(pflip(model, -1, -2), 0.149569; rtol=1e-4) 9 | @test isapprox(pflip(model, 1, -2), 6.68589; rtol=1e-4) 10 | @test isapprox(pflip(model, -1, 0), 1.10517; rtol=1e-4) 11 | @test isapprox(pflip(model, 1, 0), 0.904837; rtol=1e-4) 12 | @test isapprox(pflip(model, -1, 2), 8.16617; rtol=1e-4) 13 | @test isapprox(pflip(model, 1, 2), 0.122456; rtol=1e-4) 14 | @test isapprox(pflip(model, -1, 4), 60.3403; rtol=1e-4) 15 | @test isapprox(pflip(model, 1, 4), 0.0165727; rtol=1e-4) 16 | end 17 | 18 | @testset "energy" begin 19 | model = IsingSpinModel(10, 0.0, 0.1) 20 | spin = fill(-1, model.l, model.l) 21 | @test energy(model, spin) ≈ -200 22 | model = IsingSpinModel(10, 0.1, 0.0) 23 | spin = fill(-1, model.l, model.l) 24 | @test energy(model, spin) ≈ -190 25 | end 26 | 27 | @testset "simulate and save" begin 28 | model = IsingSpinModel(10, 0.1, 0.5) 29 | spin = rand([-1,1], model.l, model.l) 30 | result, tcorr = simulate!(model, spin; nsteps_heatbath = 100, nsteps_eachbin = 100, nbins = 100, taumax = 100) 31 | @test length(tcorr) == 100 32 | filename = joinpath(@__DIR__, "res.dat") 33 | write(filename, result) 34 | data = readdlm(filename) 35 | @testset "data" begin 36 | @test size(data) == (100, 5) 37 | @test all(data[:,2:5] .>= 0) 38 | @test all(data[:,1] .<= 0) 39 | end 40 | end -------------------------------------------------------------------------------- /IsingModel/test/runtests.jl: -------------------------------------------------------------------------------- 1 | using IsingModel 2 | using Test 3 | 4 | @testset "ising2d" begin 5 | include("ising2d.jl") 6 | end 7 | 8 | @testset "swendsen_wang" begin 9 | include("swendsen_wang.jl") 10 | end -------------------------------------------------------------------------------- /IsingModel/test/swendsen_wang.jl: -------------------------------------------------------------------------------- 1 | using Test, IsingModel, DelimitedFiles 2 | 3 | @testset "lattice" begin 4 | function original_lattice(ll,nn,neighbor,bondspin,spinbond) 5 | for s0=1:nn 6 | x0=mod(s0-1,ll) 7 | y0=div(s0-1,ll) 8 | x1=mod(x0+1,ll) 9 | x2=mod(x0-1+ll,ll) 10 | y1=mod(y0+1,ll) 11 | y2=mod(y0-1+ll,ll) 12 | s1=1+x1+y0*ll 13 | s2=1+x0+y1*ll 14 | s3=1+x2+y0*ll 15 | s4=1+x0+y2*ll 16 | neighbor[1,s0]=s1 17 | neighbor[2,s0]=s2 18 | neighbor[3,s0]=s3 19 | neighbor[4,s0]=s4 20 | bondspin[1,s0]=s0 21 | bondspin[2,s0]=s1 22 | bondspin[1,s0+nn]=s0 23 | bondspin[2,s0+nn]=s2 24 | spinbond[1,s0]=s0 25 | spinbond[2,s0]=s0+nn 26 | spinbond[3,s1]=s0 27 | spinbond[4,s2]=s0+nn 28 | end 29 | return nothing 30 | end 31 | neighbor, bondspin, spinbond = zeros(Int, 4, 9), zeros(Int, 2, 18), zeros(Int, 4, 9) 32 | original_lattice(3, 9, neighbor, bondspin, spinbond) 33 | neighbor_ = IsingModel.lattice(3) 34 | bondspin_, spinbond_ = IsingModel.spinbondmap(neighbor_) 35 | @test neighbor == neighbor_ 36 | @test bondspin == bondspin_ 37 | @test spinbond == spinbond_ 38 | end 39 | 40 | @testset "fixed sized stack" begin 41 | stack = IsingModel.FixedSizedStack{Int}(10) 42 | @test isempty(stack) 43 | for i = 1:10 44 | push!(stack, i) 45 | end 46 | @test !isempty(stack) 47 | @test length(stack) == 10 48 | @test_throws BoundsError push!(stack, 11) 49 | for i = 10:-1:1 50 | @test pop!(stack) == i 51 | end 52 | @test isempty(stack) 53 | @test_throws BoundsError pop!(stack) 54 | 55 | for i = 1:10 56 | push!(stack, i) 57 | end 58 | IsingModel.reset!(stack) 59 | @test isempty(stack) 60 | end 61 | 62 | @testset "energy" begin 63 | model = SwendsenWangModel(10, 0.0, 0.1) 64 | spin = fill(-1, model.l, model.l) 65 | @test energy(model, spin) ≈ -200 66 | model = SwendsenWangModel(10, 0.1, 0.0) 67 | spin = fill(-1, model.l, model.l) 68 | @test energy(model, spin) ≈ -190 69 | end 70 | 71 | @testset "simulate and save" begin 72 | model = SwendsenWangModel(10, 0.1, 0.5) 73 | spin = rand([-1,1], model.l, model.l) 74 | result, tcorr = simulate!(model, spin; nsteps_heatbath = 100, nsteps_eachbin = 100, nbins = 100, taumax = 100) 75 | @test length(tcorr) == 100 76 | filename = joinpath(@__DIR__, "res.dat") 77 | write(filename, result) 78 | data = readdlm(filename) 79 | @testset "data" begin 80 | @test size(data) == (100, 5) 81 | @test all(data[:,2:5] .>= 0) 82 | @test all(data[:,1] .<= 0) 83 | end 84 | end -------------------------------------------------------------------------------- /KernelPCA/.gitignore: -------------------------------------------------------------------------------- 1 | *.jl.*.cov 2 | *.jl.cov 3 | *.jl.mem 4 | /Manifest.toml 5 | -------------------------------------------------------------------------------- /KernelPCA/LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2023 GiggleLiu and contributors 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /KernelPCA/Project.toml: -------------------------------------------------------------------------------- 1 | name = "KernelPCA" 2 | uuid = "659736db-e33a-4d97-a2b7-5a5dcfea6c5b" 3 | authors = ["GiggleLiu and contributors"] 4 | version = "1.0.0-DEV" 5 | 6 | [deps] 7 | CSV = "336ed68f-0bac-5ca0-87d4-7b16caf5d00b" 8 | DataFrames = "a93c6f00-e57d-5684-b7b6-d8193f3e46c0" 9 | LinearAlgebra = "37e2e46d-f89d-539d-b4ee-838fcccc9c8e" 10 | Random = "9a3f8284-a2c9-5f02-9a11-845980a1fd5c" 11 | 12 | [compat] 13 | CSV = "0.10" 14 | DataFrames = "1" 15 | LinearAlgebra = "1" 16 | Random = "1" 17 | julia = "1" 18 | 19 | [extras] 20 | Test = "8dfed614-e22c-5e08-85e1-65c5234f0b40" 21 | 22 | [targets] 23 | test = ["Test"] 24 | -------------------------------------------------------------------------------- /KernelPCA/README.md: -------------------------------------------------------------------------------- 1 | # KernelPCA 2 | 3 | [![Build Status](https://github.com/GiggleLiu/KernelPCA.jl/actions/workflows/CI.yml/badge.svg?branch=main)](https://github.com/GiggleLiu/KernelPCA.jl/actions/workflows/CI.yml?query=branch%3Amain) 4 | [![Coverage](https://codecov.io/gh/GiggleLiu/KernelPCA.jl/branch/main/graph/badge.svg)](https://codecov.io/gh/GiggleLiu/KernelPCA.jl) 5 | -------------------------------------------------------------------------------- /KernelPCA/data/solar_system.csv: -------------------------------------------------------------------------------- 1 | name,mass,x,y,z,vx,vy,vz 2 | sun, 1.988544e30, -3.430031536367300E-03, 1.761881027012596E-03, 1.246691303879918E-05, 3.433119412673547E-06, -5.231300927361546E-06, -2.972974735550750E-08 3 | mercury, 3.302e23, 8.887985138765460E-02, -4.426150338141062E-01, -4.475716356484761E-02, 2.190877912081542E-02, 7.161568136528000E-03, -1.425929443086507E-03 4 | venus, 48.685e23, 4.043738093622098E-02, -7.239789211502183E-01, -1.241560658530024E-02, 2.005742309538389E-02, 1.141448268256643E-03, -1.142174441569258E-03 5 | earth, 5.97219e24, -2.020844529756663E-02, -1.014332737790859E+00, -1.358267619371298E-05, 1.692836723212859E-02, -3.484006532982474E-04, 6.028542314557626E-07 6 | mars, 6.4185e23, 7.462481663749645E-01, -1.181663652521456E+00, -4.321921404013512E-02, 1.235610918162121E-02, 8.680869489377649E-03, -1.220500608452554E-04 7 | jupyter, 1898.13e24, 3.384805319103406E+00, 3.658805636759595E+00, -9.100441946210819E-02, -5.634671617093230E-03, 5.479180979634376E-03, 1.034981407898108E-04 8 | saturn, 5.68319e26, -1.083899692644216E-01, -1.003995196286016E+01, 1.793391553155583E-01, 5.278410787728323E-03, -7.712342079566598E-05, -2.084447335785041E-04 9 | neptune, 102.41e24, 4.675566709791660E+00, -2.985428200863175E+01, 5.070034142531887E-01, 3.080716380724798E-03, 5.030733458293977E-04, -8.101711269674541E-05 10 | uranus, 86.8103e24, -2.693448460292631E-01, -1.927606446869220E+01, -6.808868692550485E-02, 3.903100242621723E-03, -2.380111092360100E-04, -5.164025224695875E-05 11 | pluto, 1.307e22, -2.129074273328636E+01, -1.896633337434039E+01, 8.187955378677129E+00, 2.276295756013608E-03, -2.670481848836963E-03, -3.669545371032554E-04 -------------------------------------------------------------------------------- /KernelPCA/examples/Project.toml: -------------------------------------------------------------------------------- 1 | [deps] 2 | CairoMakie = "13f3f980-e62b-5c42-98c6-ff1f3baf88f0" 3 | KernelPCA = "659736db-e33a-4d97-a2b7-5a5dcfea6c5b" 4 | Makie = "ee78f7c6-11fb-53f2-987a-cfe4a2b5a57a" 5 | -------------------------------------------------------------------------------- /KernelPCA/examples/kernelf.jl: -------------------------------------------------------------------------------- 1 | import Makie, CairoMakie 2 | using KernelPCA 3 | 4 | ################## Linear Kernel ################### 5 | xaxis, yaxis = -2:0.05:2, -2:0.05:2 6 | x2 = [KernelPCA.Point(a, b) for a in xaxis, b in yaxis] 7 | constants = [0.8, 0.1, 0.5] 8 | anchors2 = KernelPCA.Point.([(0.8, 0.2), (0.01, -0.9), (-0.5, -0.5)]) 9 | lkf = kernelf(KernelPCA.LinearKernel(), constants, anchors2) 10 | Makie.contour(xaxis, yaxis, lkf.(x2); label="2D function") 11 | 12 | # linear kernel can always be reduced to single component 13 | constants_simplified = [1.0] 14 | anchors_simplified = [[0.8, 0.1, 0.5]' * KernelPCA.Point.([(0.8, 0.2), (0.01, -0.9), (-0.5, -0.5)])] 15 | lkf = kernelf(KernelPCA.LinearKernel(), constants_simplified, anchors_simplified) 16 | Makie.contour(xaxis, yaxis, lkf.(x2); label="2D function") 17 | 18 | ################## Polynomial Kernel ################### 19 | xaxis, yaxis = -2:0.05:2, -2:0.05:2 20 | x2 = [KernelPCA.Point(a, b) for a in xaxis, b in yaxis] 21 | constants2 = [0.8, 0.1, 0.5] 22 | anchors2 = KernelPCA.Point.([(0.8, 0.2), (0.01, -0.9), (-0.5, -0.5)]) 23 | kfp = kernelf(PolyKernel{2}(), constants, anchors2) 24 | Makie.contour(xaxis, yaxis, kfp.(x2); label="2D function") 25 | 26 | kfp = kernelf(PolyKernel{2}(), constants_simplified, anchors_simplified) 27 | Makie.contour(xaxis, yaxis, kfp.(x2); label="2D function") 28 | 29 | ################## RBF Kernel ################### 30 | # 1D 31 | x = -2:0.01:2 32 | constants = [0.8, 0.1, 0.5] 33 | anchors = [0.8, 0.01, -0.5] 34 | ker = RBFKernel(0.1) 35 | kf = kernelf(ker, constants, anchors) 36 | Makie.plot(x, kf.(x); label="1D function") 37 | 38 | # 2D 39 | xaxis, yaxis = -2:0.05:2, -2:0.05:2 40 | x2 = [KernelPCA.Point(a, b) for a in xaxis, b in yaxis] 41 | constants2 = [0.8, 0.1, 0.5] 42 | anchors2 = KernelPCA.Point.([(0.8, 0.2), (0.01, 0.9), (-0.5, -0.5)]) 43 | kf2 = kernelf(ker, constants, anchors2) 44 | Makie.contour(xaxis, yaxis, kf2.(x2); label="2D function") -------------------------------------------------------------------------------- /KernelPCA/examples/main.jl: -------------------------------------------------------------------------------- 1 | using KernelPCA, Makie, CairoMakie 2 | 3 | function showres(res) 4 | dataset = res.anchors 5 | x, y = getindex.(dataset, 1), getindex.(dataset, 2) 6 | @show res.lambda 7 | kf = kernelf(res, 1) 8 | X, Y = minimum(x):0.01:maximum(x), minimum(y):0.01:maximum(y) 9 | @show X, Y 10 | #levels = -0.1:0.01:0.1 11 | plt = Plots.contour(X, Y, kf.(KernelPCA.Point.(X', Y)); label="") 12 | Plots.scatter!(plt, x, y; label="data") 13 | end 14 | 15 | 16 | # centered K-PCA 17 | Random.seed!(2) 18 | kernel = PolyKernel{2}() 19 | #kernel = LinearKernel() 20 | dataset = KernelPCA.DataSets.curve(100) 21 | res = kpca(kernel, dataset; centered=true) 22 | 23 | Φ = [ϕ(kernel, x) for x in dataset] 24 | Φ = Φ .- Ref(sum(Φ) ./ length(Φ)) 25 | C = sum(x->1/length(dataset) .* x * x', Φ) 26 | V1 = sum([alpha * x for (alpha, x) in zip(res.vectors[:, 1], Φ)]) 27 | 28 | @info res.lambda 29 | for k in 1:length(res.lambda) 30 | @test res.lambda[1] * V1 ≈ C * V1 31 | end 32 | display(showres(res)) -------------------------------------------------------------------------------- /KernelPCA/src/KernelPCA.jl: -------------------------------------------------------------------------------- 1 | module KernelPCA 2 | 3 | using LinearAlgebra 4 | 5 | export DataSets 6 | export RBFKernel, PolyKernel, LinearKernel, kernelf, matrix, Point 7 | export kpca, KPCAResult, ϕ 8 | 9 | # include("pca.jl") 10 | include("kernels.jl") 11 | include("kpca.jl") 12 | include("dataset.jl") 13 | 14 | end 15 | -------------------------------------------------------------------------------- /KernelPCA/src/dataset.jl: -------------------------------------------------------------------------------- 1 | module DataSets 2 | 3 | using ..KernelPCA: Point 4 | import ..KernelPCA 5 | export quadratic, linear, curve, rings, solar_system 6 | using CSV, DataFrames 7 | 8 | function quadratic(n::Int; xspan=1, noise=0.1) 9 | xs = randn(n) .* xspan 10 | ys = xs .^ 2 .+ noise .* randn(n) 11 | return Point{2, Float64}[Point(x, y) for (x, y) in zip(xs, ys)] 12 | end 13 | 14 | function linear(n::Int; xspan=1, noise=0.1, offsety=0.0) 15 | xs = randn(n) .* xspan 16 | ys = xs .+ noise .* randn(n) .+ offsety 17 | return Point{2, Float64}[Point(x, y) for (x, y) in zip(xs, ys)] 18 | end 19 | 20 | function curve(n::Int; xspan=1, noise=0.1) 21 | xs = randn(n) .* xspan 22 | ys = (xs .- 1) .^ 2 .+ noise .* randn(n) 23 | return Point{2, Float64}[Point(x, y) for (x, y) in zip(xs, ys)] 24 | end 25 | 26 | function rings(n::Int; radius=1.0, width=0.1) 27 | vcat([let 28 | angles = rand(n) * 2π 29 | radis = radi .+ randn(n) * width 30 | [Point(r*cos(angle), r*sin(angle)) for (r, angle) in zip(radis, angles)] 31 | end for radi in radius]...) 32 | end 33 | 34 | function solar_system() 35 | data = CSV.read(joinpath(pkgdir(KernelPCA), "data", "solar_system.csv"), DataFrame) 36 | return hcat(data.x, data.y, data.z) 37 | end 38 | 39 | end -------------------------------------------------------------------------------- /KernelPCA/src/kernels.jl: -------------------------------------------------------------------------------- 1 | struct Point{N,T} 2 | coo::NTuple{N,T} 3 | end 4 | Point(arg::T, args::T...) where T<:Number = Point((arg, args...)) 5 | Base.:(+)(x::Point{N,T}, y::Point{N,T}) where {N, T} = Point(x.coo .+ y.coo) 6 | Base.:(-)(x::Point{N,T}, y::Point{N,T}) where {N, T} = Point(x.coo .- y.coo) 7 | Base.:(-)(x::Point{N,T}) where {N, T} = Point(Base.:(-).(x.coo)) 8 | Base.adjoint(x::Point) = x 9 | Base.:(*)(x::Number, y::Point) = Point(y.coo .* x) 10 | Base.:(*)(y::Point, x::Number) = Point(y.coo .* x) 11 | Base.iterate(x::Point, args...) = Base.iterate(x.coo, args...) 12 | Base.getindex(x::Point, i::Int) = x.coo[i] 13 | Base.length(::Point{N}) where N = N 14 | 15 | dist2(x::Number, y::Number) = abs2(x - y) 16 | dist2(x::Point, y::Point) = sum(abs2, x - y) 17 | 18 | ######################### Kernels 19 | abstract type AbstractKernel end 20 | # the matrix representation of Kernel on a basis 21 | matrix(kf::AbstractKernel, basis::AbstractVector) = kf.(basis, reshape(basis, 1, :)) 22 | 23 | # a function represented as a combination of kernel functions 24 | function kernelf(kernel::AbstractKernel, constants::AbstractVector, anchors::AbstractVector) 25 | @assert length(constants) == length(anchors) "the lengths of constants and anchors must be the same." 26 | return x -> sum(i->constants[i] * kernel(x, anchors[i]), 1:length(constants)) 27 | end 28 | 29 | """ 30 | RBFKernel <: AbstractKernel 31 | RBFKernel(σ::Real) 32 | 33 | RBF Kernel. 34 | """ 35 | struct RBFKernel <: AbstractKernel 36 | sigma::Float64 37 | end 38 | (k::RBFKernel)(x, y) = rbf_kernel_function(x, y, k.sigma) 39 | rbf_kernel_function(x, y, σ::Real) = exp(-1/2σ * dist2(x, y)) 40 | 41 | """ 42 | PolyKernel <: AbstractKernel 43 | PolyKernel{order}() 44 | 45 | Polynomial Kernel. 46 | """ 47 | struct PolyKernel{order} <: AbstractKernel end 48 | (k::PolyKernel{order})(x, y) where order = poly_kernel_function(x, y, order) 49 | poly_kernel_function(x, y, order::Int) = dot(x, y)^order 50 | get_order(::PolyKernel{order}) where order = order 51 | 52 | # take `k.order` elements from input vector `x`. 53 | function ϕ(::PolyKernel{order}, x) where order 54 | vec([prod(i->x[i], ci.I) for ci in CartesianIndices(ntuple(i->length(x), order))]) 55 | end 56 | 57 | const LinearKernel = PolyKernel{1} -------------------------------------------------------------------------------- /KernelPCA/src/kpca.jl: -------------------------------------------------------------------------------- 1 | # https://link.springer.com/chapter/10.1007/BFb0020217 2 | 3 | struct KPCAResult{KT<:AbstractKernel, T, VT<:AbstractVector{T}, MT<:AbstractMatrix{T}, AVT<:AbstractVector} 4 | kernel::KT 5 | lambda::VT 6 | vectors::MT 7 | anchors::AVT 8 | end 9 | 10 | # Kernel Principle Component analysis 11 | # The inverse map could be found by using the technique of "Learning to Find Pre-Images" 12 | # https://papers.nips.cc/paper_files/paper/2003/hash/ac1ad983e08ad3304a97e147f522747e-Abstract.html 13 | function kpca(kernel::AbstractKernel, dataset::AbstractVector; atol=1e-10, centered::Bool=true) 14 | K = matrix(kernel, dataset) 15 | n = length(dataset) 16 | # centralize: K = K - 1ₙK - K1ₙ + 1ₙK1ₙ, where (1ₙ)_{i,j} := 1/n 17 | if centered 18 | K .= K .- sum(K; dims=1) ./ n .- sum(K; dims=2) ./ n .+ sum(K) ./ n^2 19 | end 20 | # solving the eigenvalue problem λα = Kα 21 | E, U = eigen(Hermitian(K)) 22 | 23 | # truncate small values 24 | idx = findfirst(E) do lambda 25 | if lambda < -1e-8 26 | error("Kernel matrix is not possitive definite, got eigenvalue: $lambda") 27 | end 28 | lambda >= atol 29 | end 30 | # re-order the eigenvalues from large to small, and normalize it 31 | E = E[end:-1:idx] ./ n 32 | # normalize the eigen vectors in the RKHS 33 | U = U[:, end:-1:idx] .* inv.(sqrt.(reshape(E, 1, :))) 34 | return KPCAResult(kernel, E, U, dataset) 35 | end 36 | 37 | function kernelf(kpca::KPCAResult, i::Int) 38 | return kernelf(kpca.kernel, kpca.vectors[:,i], kpca.anchors) 39 | end -------------------------------------------------------------------------------- /KernelPCA/test/kernels.jl: -------------------------------------------------------------------------------- 1 | using KernelPCA, Test, LinearAlgebra 2 | 3 | @testset "rbf" begin 4 | ker = RBFKernel(0.56) 5 | m = matrix(ker, 0:0.01:0.99) 6 | @test m isa Matrix && size(m) == (100, 100) 7 | @test diag(m) ≈ ones(100) 8 | end 9 | 10 | @testset "poly" begin 11 | ker = PolyKernel{2}() 12 | x, y = Point(0.2, 0.5), Point(0.4, 0.9) 13 | @test ker(x, y) == ϕ(ker, x)' * ϕ(ker, y) 14 | 15 | ker = PolyKernel{3}() 16 | x, y = Point(0.2, 0.5), Point(0.4, 0.9) 17 | @test ker(x, y) == ϕ(ker, x)' * ϕ(ker, y) 18 | 19 | # linear kernel 20 | ker = LinearKernel() 21 | @test ker(x, y) == ϕ(ker, x)' * ϕ(ker, y) 22 | @test ker(x, y) == collect(x)' * collect(y) 23 | end -------------------------------------------------------------------------------- /KernelPCA/test/kpca.jl: -------------------------------------------------------------------------------- 1 | using KernelPCA, Test 2 | using LinearAlgebra, Random 3 | 4 | @testset "kpca" begin 5 | Random.seed!(4) 6 | # normalization condition 7 | #kernel = RBFKernel(0.5) 8 | kernel = PolyKernel{2}() 9 | dataset = KernelPCA.DataSets.rings(100) 10 | #dataset = KernelPCA.DataSets.linear(100) 11 | res = kpca(kernel, dataset; centered=false) 12 | # check normalization 13 | for k in 1:length(res.lambda) 14 | @test res.lambda[k] * norm(res.vectors[:, k])^2 ≈ 1 15 | end 16 | @show res.lambda 17 | end 18 | 19 | @testset "kpca" begin 20 | Random.seed!(4) 21 | # normalization condition 22 | #kernel = RBFKernel(0.5) 23 | kernel = PolyKernel{2}() 24 | dataset = KernelPCA.DataSets.curve(100) 25 | #dataset = KernelPCA.DataSets.linear(100) 26 | res = kpca(kernel, dataset; centered=false) 27 | # check normalization 28 | for k in 1:length(res.lambda) 29 | @test res.lambda[k] * norm(res.vectors[:, k])^2 ≈ 1 30 | end 31 | # check the eigenvalue problem 32 | Φ = [ϕ(kernel, x) for x in dataset] 33 | C = sum(x->1/length(dataset) .* x * x', Φ) 34 | V1 = sum([alpha * x for (alpha, x) in zip(res.vectors[:, 1], Φ)]) 35 | for k in 1:length(res.lambda) 36 | @test res.lambda[1] * V1 ≈ C * V1 37 | end 38 | end 39 | 40 | @testset "centered kpca" begin 41 | Random.seed!(2) 42 | kernel = PolyKernel{2}() 43 | #kernel = LinearKernel() 44 | dataset = KernelPCA.DataSets.curve(100) 45 | res = kpca(kernel, dataset; centered=true) 46 | 47 | Φ = [ϕ(kernel, x) for x in dataset] 48 | Φ = Φ .- Ref(sum(Φ) ./ length(Φ)) 49 | C = sum(x->1/length(dataset) .* x * x', Φ) 50 | V1 = sum([alpha * x for (alpha, x) in zip(res.vectors[:, 1], Φ)]) 51 | 52 | @info res.lambda 53 | for k in 1:length(res.lambda) 54 | @test res.lambda[1] * V1 ≈ C * V1 55 | end 56 | end -------------------------------------------------------------------------------- /KernelPCA/test/runtests.jl: -------------------------------------------------------------------------------- 1 | using KernelPCA 2 | using Test 3 | 4 | @testset "kernels" begin 5 | include("kernels.jl") 6 | end 7 | 8 | @testset "kpca" begin 9 | include("kpca.jl") 10 | end -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2024 GiggleLiu 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /LatticeBoltzmannModel/.gitignore: -------------------------------------------------------------------------------- 1 | /Manifest.toml 2 | -------------------------------------------------------------------------------- /LatticeBoltzmannModel/LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2024 GiggleLiu 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /LatticeBoltzmannModel/Project.toml: -------------------------------------------------------------------------------- 1 | name = "LatticeBoltzmannModel" 2 | uuid = "ab0093b4-0b25-4baf-a3dd-3bdf3b7dfdbc" 3 | authors = ["GiggleLiu"] 4 | version = "1.0.0-DEV" 5 | 6 | [deps] 7 | LinearAlgebra = "37e2e46d-f89d-539d-b4ee-838fcccc9c8e" 8 | 9 | [weakdeps] 10 | CUDA = "052768ef-5323-5732-b1bb-66c8b64840ba" 11 | 12 | [extensions] 13 | LatticeBoltzmannCUDAExt = "CUDA" 14 | 15 | [compat] 16 | CUDA = "5" 17 | LinearAlgebra = "1" 18 | julia = "1.10" 19 | 20 | [extras] 21 | Test = "8dfed614-e22c-5e08-85e1-65c5234f0b40" 22 | 23 | [targets] 24 | test = ["Test"] 25 | -------------------------------------------------------------------------------- /LatticeBoltzmannModel/README.md: -------------------------------------------------------------------------------- 1 | # LatticeBoltzmannModel 2 | 3 | This package implements Lattice Boltzmann Model (LBM) for fluid dynamics simulation in Julia. Please check the following link to learn more about LBM: 4 | * https://physics.weber.edu/schroeder/fluids/ 5 | 6 | ## Contents 7 | * D2Q9 model - a 2D LBM with 9 velocities 8 | * CUDA acceleration 9 | * Fluid dynamics visualization 10 | 11 | ## Get started 12 | 13 | Please clone this repository to your local machine and switch to the `ScientificComputingDemos` directory and run the following command in the terminal: 14 | 15 | ```bash 16 | $ make init-LatticeBoltzmannModel # Initialize the environment 17 | $ make example-LatticeBoltzmannModel # Run the examples 18 | ``` -------------------------------------------------------------------------------- /LatticeBoltzmannModel/examples/Project.toml: -------------------------------------------------------------------------------- 1 | [deps] 2 | CairoMakie = "13f3f980-e62b-5c42-98c6-ff1f3baf88f0" 3 | LatticeBoltzmannModel = "ab0093b4-0b25-4baf-a3dd-3bdf3b7dfdbc" 4 | Makie = "ee78f7c6-11fb-53f2-987a-cfe4a2b5a57a" 5 | -------------------------------------------------------------------------------- /LatticeBoltzmannModel/examples/barrier.jl: -------------------------------------------------------------------------------- 1 | using CairoMakie: RGBA 2 | using CairoMakie 3 | using LatticeBoltzmannModel 4 | 5 | # simulate a fluid with a barrier 6 | lb = example_d2q9() 7 | states = [copy(lb.grid)] 8 | for i=1:2000 9 | step!(lb) 10 | i % 20 == 0 && push!(states, copy(lb.grid)) 11 | end 12 | curls = [curl(momentum.(Ref(lb.config), s)) for s in states] 13 | 14 | # Set up the visualization with Makie: 15 | vorticity = Observable(curls[1]') 16 | fig, ax, plot = image(vorticity, colormap = :jet, colorrange = (-0.1, 0.1)) 17 | 18 | # Add barrier visualization: 19 | barrier_img = map(x -> x ? RGBA(0, 0, 0, 1) : RGBA(0, 0, 0, 0), lb.barrier) 20 | image!(ax, barrier_img') 21 | 22 | using BenchmarkTools 23 | @benchmark step!($(deepcopy(lb))) 24 | 25 | record(fig, joinpath(@__DIR__, "barrier.mp4"), 1:100; framerate = 10) do i 26 | vorticity[] = curls[i+1]' 27 | end 28 | -------------------------------------------------------------------------------- /LatticeBoltzmannModel/examples/barrier.mp4: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/GiggleLiu/ScientificComputingDemos/c0ad0c00e5d0bf90ed1167d27d4dffe70c34dea7/LatticeBoltzmannModel/examples/barrier.mp4 -------------------------------------------------------------------------------- /LatticeBoltzmannModel/examples/barrier_gpu.jl: -------------------------------------------------------------------------------- 1 | using CairoMakie: RGBA 2 | using CairoMakie 3 | using LatticeBoltzmannModel 4 | 5 | using CUDA 6 | 7 | # simulate a fluid with a barrier 8 | lb = CUDA.cu(example_d2q9()) 9 | states = [copy(lb.grid)] 10 | for i=1:2000 11 | step!(lb) 12 | i % 20 == 0 && push!(states, copy(lb.grid)) 13 | end 14 | curls = [curl(Matrix(momentum.(Ref(lb.config), s))) for s in states] 15 | 16 | # Set up the visualization with Makie: 17 | vorticity = Observable(curls[1]') 18 | fig, ax, plot = image(vorticity, colormap = :jet, colorrange = (-0.1, 0.1)) 19 | 20 | # Add barrier visualization: 21 | barrier_img = map(x -> x ? RGBA(0, 0, 0, 1) : RGBA(0, 0, 0, 0), lb.barrier) 22 | image!(ax, barrier_img') 23 | 24 | using BenchmarkTools 25 | @benchmark step!($(deepcopy(lb))) 26 | 27 | CairoMakie.record(fig, joinpath(@__DIR__, "barrier_gpu.mp4"), 1:100; framerate = 10) do i 28 | vorticity[] = curls[i+1]' 29 | end 30 | -------------------------------------------------------------------------------- /LatticeBoltzmannModel/ext/LatticeBoltzmannCUDAExt.jl: -------------------------------------------------------------------------------- 1 | module LatticeBoltzmannCUDAExt 2 | using CUDA: @kernel, get_backend, @index 3 | using LatticeBoltzmannModel: Cell, AbstractLBConfig, directions, flip_direction_index, density, LatticeBoltzmann 4 | using LatticeBoltzmannModel 5 | 6 | function LatticeBoltzmannModel.stream!(lb::AbstractLBConfig{2, N}, newgrid::CuMatrix{D}, grid::CuMatrix{D}, barrier::CuMatrix{Bool}) where {N, T, D<:Cell{N, T}} 7 | ds = directions(lb) 8 | @kernel function kernel(newgrid, grid, barrier, ds) 9 | ci = @index(Global, Cartesian) 10 | i, j = ci.I 11 | @inbounds newgrid[ci] = Cell(ntuple(N) do k 12 | ei = ds[k] 13 | m, n = size(grid) 14 | i2, j2 = mod1(i - ei[1], m), mod1(j - ei[2], n) 15 | if barrier[i2, j2] 16 | density(grid[i, j], flip_direction_index(lb, k)) 17 | else 18 | density(grid[i2, j2], k) 19 | end 20 | end) 21 | end 22 | kernel(get_backend(newgrid))(newgrid, grid, barrier, ds; ndrange=size(newgrid)) 23 | return newgrid 24 | end 25 | 26 | function CUDA.cu(lb::LatticeBoltzmann{D, N}) where {D, N} 27 | return LatticeBoltzmann(lb.config, CuArray(lb.grid), CuArray(lb.barrier)) 28 | end 29 | end 30 | -------------------------------------------------------------------------------- /LatticeBoltzmannModel/src/LatticeBoltzmannModel.jl: -------------------------------------------------------------------------------- 1 | module LatticeBoltzmannModel 2 | 3 | # import packages 4 | using LinearAlgebra 5 | 6 | export Point, Point2D, Point3D 7 | export D2Q9, LatticeBoltzmann, step!, equilibrium_density, momentum, curl, example_d2q9, density 8 | 9 | include("point.jl") 10 | include("fluid.jl") 11 | 12 | end 13 | -------------------------------------------------------------------------------- /LatticeBoltzmannModel/src/point.jl: -------------------------------------------------------------------------------- 1 | """ 2 | Point{D, T} 3 | 4 | A point in D-dimensional space, with coordinates of type T. 5 | 6 | # Examples 7 | ```jldoctest 8 | julia> p1 = Point(1.0, 2.0) 9 | Point{2, Float64}((1.0, 2.0)) 10 | 11 | julia> p2 = Point(3.0, 4.0) 12 | Point{2, Float64}((3.0, 4.0)) 13 | 14 | julia> p1 + p2 15 | Point{2, Float64}((4.0, 6.0)) 16 | ``` 17 | """ 18 | struct Point{D, T <: Real} 19 | data::NTuple{D, T} 20 | end 21 | const Point2D{T} = Point{2, T} 22 | const Point3D{T} = Point{3, T} 23 | Point(x::Real...) = Point((x...,)) 24 | LinearAlgebra.dot(x::Point, y::Point) = mapreduce(*, +, x.data .* y.data) 25 | Base.:*(x::Real, y::Point) = Point(x .* y.data) 26 | Base.:*(x::Point, y::Real) = Point(x.data .* y) 27 | Base.:/(y::Point, x::Real) = Point(y.data ./ x) 28 | Base.:+(x::Point, y::Point) = Point(x.data .+ y.data) 29 | Base.:-(x::Point, y::Point) = Point(x.data .- y.data) 30 | Base.isapprox(x::Point, y::Point; kwargs...) = all(isapprox.(x.data, y.data; kwargs...)) 31 | Base.getindex(p::Point, i::Int) = p.data[i] 32 | Base.broadcastable(p::Point) = p.data 33 | Base.iterate(p::Point, args...) = iterate(p.data, args...) 34 | Base.zero(::Type{Point{D, T}}) where {D, T} = Point(ntuple(i->zero(T), D)) 35 | Base.zero(::Point{D, T}) where {D, T} = Point(ntuple(i->zero(T), D)) 36 | distance(p::Point, q::Point) = sqrt(sum((p - q) .^ 2)) 37 | 38 | -------------------------------------------------------------------------------- /LatticeBoltzmannModel/test/cuda.jl: -------------------------------------------------------------------------------- 1 | using LatticeBoltzmannModel, Test, CUDA; CUDA.allowscalar(false) 2 | 3 | @testset "step!" begin 4 | lb0 = example_d2q9(; u0=Point(0.0, 0.1)) 5 | lb = deepcopy(lb0) 6 | for i=1:100 step!(lb) end 7 | lbc = CUDA.cu(lb0) 8 | for i=1:100 step!(lbc) end 9 | # the conservation of mass 10 | @test all(lb.grid .≈ Array(lbc.grid)) 11 | end 12 | -------------------------------------------------------------------------------- /LatticeBoltzmannModel/test/fluid.jl: -------------------------------------------------------------------------------- 1 | using Test, LatticeBoltzmannModel 2 | 3 | @testset "momentum" begin 4 | lb = D2Q9() 5 | ds = equilibrium_density(lb, 2.0, Point(0.1, 0.0)) 6 | # the conservation of momentum 7 | @test momentum(lb, ds) ≈ Point(0.1, 0.0) 8 | # the conservation of mass 9 | @test density(ds) ≈ 2.0 10 | end 11 | 12 | @testset "step!" begin 13 | lb0 = example_d2q9(; u0=Point(0.0, 0.1)) 14 | lb = deepcopy(lb0) 15 | for i=1:100 step!(lb) end 16 | # the conservation of mass 17 | @test isapprox(sum(density.(lb.grid)), sum(density.(lb0.grid)); rtol=1e-4) 18 | # the conservation of momentum 19 | mean_u = sum(momentum.(Ref(lb.config), lb.grid))/length(lb.grid) 20 | @test mean_u[2] < 0.1 - 1e-3 21 | end 22 | -------------------------------------------------------------------------------- /LatticeBoltzmannModel/test/runtests.jl: -------------------------------------------------------------------------------- 1 | using LatticeBoltzmannModel 2 | using Test 3 | @testset "fluid" begin 4 | include("fluid.jl") 5 | end -------------------------------------------------------------------------------- /LatticeGasCA/.gitignore: -------------------------------------------------------------------------------- 1 | *.jl.*.cov 2 | *.jl.cov 3 | *.jl.mem 4 | /Manifest.toml 5 | /docs/build/ 6 | .vscode/ -------------------------------------------------------------------------------- /LatticeGasCA/Project.toml: -------------------------------------------------------------------------------- 1 | name = "LatticeGasCA" 2 | uuid = "38b09334-5df9-4e03-8969-f8217d04990c" 3 | authors = ["GiggleLiu and contributors"] 4 | version = "1.0.0-DEV" 5 | 6 | [deps] 7 | CUDA = "052768ef-5323-5732-b1bb-66c8b64840ba" 8 | UnicodePlots = "b8865327-cd53-5732-bb35-84acbb429228" 9 | 10 | [compat] 11 | CUDA = "5" 12 | UnicodePlots = "3" 13 | julia = "1" 14 | 15 | [extras] 16 | Test = "8dfed614-e22c-5e08-85e1-65c5234f0b40" 17 | 18 | [targets] 19 | test = ["Test"] 20 | -------------------------------------------------------------------------------- /LatticeGasCA/README.md: -------------------------------------------------------------------------------- 1 | # LatticeGasCA 2 | 3 | [![Stable](https://img.shields.io/badge/docs-stable-blue.svg)](https://GiggleLiu.github.io/LatticeGasCA.jl/stable/) 4 | [![Dev](https://img.shields.io/badge/docs-dev-blue.svg)](https://GiggleLiu.github.io/LatticeGasCA.jl/dev/) 5 | [![Build Status](https://github.com/GiggleLiu/LatticeGasCA.jl/actions/workflows/CI.yml/badge.svg?branch=main)](https://github.com/GiggleLiu/LatticeGasCA.jl/actions/workflows/CI.yml?query=branch%3Amain) 6 | [![Coverage](https://codecov.io/gh/GiggleLiu/LatticeGasCA.jl/branch/main/graph/badge.svg)](https://codecov.io/gh/GiggleLiu/LatticeGasCA.jl) 7 | -------------------------------------------------------------------------------- /LatticeGasCA/examples/Project.toml: -------------------------------------------------------------------------------- 1 | [deps] 2 | CairoMakie = "13f3f980-e62b-5c42-98c6-ff1f3baf88f0" 3 | -------------------------------------------------------------------------------- /LatticeGasCA/examples/main.jl: -------------------------------------------------------------------------------- 1 | using CairoMakie 2 | using LatticeGasCA 3 | 4 | function make_video(lg::AbstractLatticeGas; filename::String="lattice-gas.mp4", nframes::Int=120) 5 | lattice_data = Observable(LatticeGasCA.density(lg)) 6 | fig, axis, plt = Makie.heatmap(lattice_data) 7 | 8 | Makie.record(fig, filename, 1:nframes) do frame 9 | notify.((lattice_data,)) 10 | lattice_data[] = LatticeGasCA.density(update!(lg)) 11 | end 12 | end 13 | 14 | 15 | hpp = hpp_center_square(200, 200, 0.01); 16 | make_video(hpp) -------------------------------------------------------------------------------- /LatticeGasCA/src/LatticeGasCA.jl: -------------------------------------------------------------------------------- 1 | module LatticeGasCA 2 | 3 | import UnicodePlots 4 | using CUDA.GPUArrays: @index, get_backend, @kernel 5 | using CUDA: synchronize, CuArray 6 | import CUDA 7 | 8 | export cpu 9 | export hpp_center_square, hpp_singledot, HPPLatticeGas, simulate, update!, AbstractLatticeGas, density 10 | 11 | include("hpp.jl") 12 | include("cuda.jl") 13 | 14 | end 15 | -------------------------------------------------------------------------------- /LatticeGasCA/src/cuda.jl: -------------------------------------------------------------------------------- 1 | function update!(lg::HPPLatticeGas{ET, <:CuArray{ET}}) where ET 2 | @kernel function kernel(lattice::AbstractArray{T}, cache) where T 3 | i, j = @index(Global, NTuple) 4 | nx, ny = size(lattice) 5 | @inbounds state = (i == nx ? zero(T) : left(lattice[i+1, j])) + 6 | (i == 1 ? zero(T) : right(lattice[i-1, j])) + 7 | (j == ny ? zero(T) : down(lattice[i, j+1])) + 8 | (j == 1 ? zero(T) : up(lattice[i, j-1])) 9 | newstate = hpp_state_transfer_rule(state, i, j, nx, ny) 10 | @inbounds cache[i, j] = newstate 11 | end 12 | backend = get_backend(lg.lattice) 13 | CUDA.@sync kernel(backend)(lg.lattice, lg.cache; ndrange=size(lg.lattice)) 14 | copyto!(lg.lattice, lg.cache) 15 | return lg 16 | end 17 | 18 | function CUDA.cu(lg::HPPLatticeGas) 19 | return HPPLatticeGas(CuArray(lg.lattice), CuArray(lg.cache)) 20 | end 21 | 22 | function cpu(lg::HPPLatticeGas) 23 | return HPPLatticeGas(Array(lg.lattice), Array(lg.cache)) 24 | end -------------------------------------------------------------------------------- /LatticeGasCA/test/cuda.jl: -------------------------------------------------------------------------------- 1 | using LatticeGasCA, Test, CUDA 2 | CUDA.allowscalar(false) 3 | 4 | @testset "hpp" begin 5 | hpp = cu(hpp_singledot()) 6 | hpp2 = simulate(hpp, 76; verbose=true) 7 | @test !(hpp === hpp2) && hpp == hpp2 8 | end -------------------------------------------------------------------------------- /LatticeGasCA/test/hpp.jl: -------------------------------------------------------------------------------- 1 | using LatticeGasCA, Test 2 | 3 | @testset "hpp" begin 4 | hpp = hpp_singledot() 5 | hpp2 = simulate(hpp, 76; verbose=true) 6 | @test !(hpp === hpp2) && hpp == hpp2 7 | end -------------------------------------------------------------------------------- /LatticeGasCA/test/runtests.jl: -------------------------------------------------------------------------------- 1 | using LatticeGasCA 2 | using Test, CUDA 3 | 4 | @testset "hpp" begin 5 | include("hpp.jl") 6 | end 7 | 8 | if CUDA.functional() 9 | include("cuda.jl") 10 | end -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | JL = julia 2 | 3 | init-%: 4 | $(JL) -e 'using Pkg; dir="$*"; @assert isdir(dir); Pkg.activate(dir); Pkg.instantiate(); Pkg.activate(joinpath(dir, "examples")); Pkg.develop(path = dir); Pkg.instantiate(); Pkg.precompile();' 5 | echo 'environment initialized at: $* and $*/examples' 6 | 7 | update-%: 8 | $(JL) -e 'using Pkg; dir="$*"; @assert isdir(dir); Pkg.activate(dir); Pkg.update(); Pkg.activate(joinpath(dir, "examples")); Pkg.update(); Pkg.precompile();' 9 | echo 'environment updated at: $* and $*/examples' 10 | 11 | test-%: 12 | echo 'testing package at: $*' 13 | $(JL) -e 'using Pkg; dir="$*"; @assert isdir(dir); Pkg.activate(dir); Pkg.test();' 14 | 15 | example-%: 16 | echo 'running example at: $*/examples/main.jl' 17 | $(JL) -e 'using Pkg; dir=joinpath("$*", "examples"); @assert isdir(dir); Pkg.activate(dir); include(joinpath(dir, "main.jl"));' 18 | 19 | testall: init-CompressedSensing test-CompressedSensing init-HappyMolecules test-HappyMolecules init-ImageProcessing test-ImageProcessing init-KernelPCA test-KernelPCA init-LatticeBolzmannModel test-LatticeBolzmannModel init-LatticeGasCA test-LatticeGasCA init-MyFirstPackage test-MyFirstPackage init-PhysicsSimulation test-PhysicsSimulation init-SimpleLinearAlgebra test-SimpleLinearAlgebra init-Spinglass test-Spinglass 20 | echo 'all done' -------------------------------------------------------------------------------- /MyFirstPackage/.gitignore: -------------------------------------------------------------------------------- 1 | *.jl.*.cov 2 | *.jl.cov 3 | *.jl.mem 4 | /Manifest.toml 5 | /docs/Manifest.toml 6 | /docs/build/ 7 | *.mp4 8 | -------------------------------------------------------------------------------- /MyFirstPackage/Project.toml: -------------------------------------------------------------------------------- 1 | name = "MyFirstPackage" 2 | uuid = "594718ca-da39-4ff3-a299-6d8961b2aa49" 3 | authors = ["GiggleLiu"] 4 | version = "1.0.0-DEV" 5 | 6 | [deps] 7 | LinearAlgebra = "37e2e46d-f89d-539d-b4ee-838fcccc9c8e" 8 | 9 | [compat] 10 | julia = "1.10" 11 | 12 | [extras] 13 | Test = "8dfed614-e22c-5e08-85e1-65c5234f0b40" 14 | 15 | [targets] 16 | test = ["Test"] 17 | -------------------------------------------------------------------------------- /MyFirstPackage/README.md: -------------------------------------------------------------------------------- 1 | # MyFirstPackage 2 | 3 | This is a test package for learning how to create a package in Julia with the help of the [PkgTemplates.jl](https://github.com/JuliaCI/PkgTemplates.jl). 4 | 5 | ## References 6 | https://book.jinguo-group.science/stable/chap2/julia-release/ -------------------------------------------------------------------------------- /MyFirstPackage/examples/Project.toml: -------------------------------------------------------------------------------- 1 | [deps] 2 | BenchmarkTools = "6e4b80f9-dd63-53aa-95a3-0cdb28fa8baf" 3 | CairoMakie = "13f3f980-e62b-5c42-98c6-ff1f3baf88f0" 4 | Makie = "ee78f7c6-11fb-53f2-987a-cfe4a2b5a57a" 5 | MyFirstPackage = "594718ca-da39-4ff3-a299-6d8961b2aa49" 6 | -------------------------------------------------------------------------------- /MyFirstPackage/examples/lorenz.jl: -------------------------------------------------------------------------------- 1 | using Makie, CairoMakie, MyFirstPackage 2 | set_theme!(theme_black()) 3 | 4 | lz = Lorenz(10, 28, 8/3) 5 | y = MyFirstPackage.Point(1.0, 1.0, 1.0) 6 | 7 | points = Observable(Point3f[]) # Signal that can be used to update plots efficiently 8 | colors = Observable(Int[]) 9 | 10 | fig, ax, l = lines(points, color = colors, 11 | colormap = :inferno, transparency = true, 12 | axis = (; type = Axis3, protrusions = (0, 0, 0, 0), 13 | viewmode = :fit, limits = (-30, 30, -30, 30, 0, 50))) 14 | 15 | integrator = RungeKutta{4}() 16 | # record the animation 17 | record(fig, joinpath(@__DIR__, "lorenz.mp4"), 1:120) do frame 18 | global y 19 | for i in 1:50 20 | # update arrays inplace 21 | y = integrate_step(lz, integrator, y, 0.01) 22 | push!(points[], Point3f(y...)) 23 | push!(colors[], frame) 24 | end 25 | ax.azimuth[] = 1.7pi + 0.3 * sin(2pi * frame / 120) # set the view angle of the axis 26 | notify(points); notify(colors) # tell points and colors that their value has been updated 27 | l.colorrange = (0, frame) # update plot attribute directly 28 | end -------------------------------------------------------------------------------- /MyFirstPackage/examples/main.jl: -------------------------------------------------------------------------------- 1 | include("lorenz.jl") 2 | include("barrier.jl") -------------------------------------------------------------------------------- /MyFirstPackage/src/MyFirstPackage.jl: -------------------------------------------------------------------------------- 1 | module MyFirstPackage 2 | 3 | using LinearAlgebra 4 | 5 | # export interfaces 6 | export Lorenz, integrate_step 7 | export Point, Point2D, Point3D 8 | export RungeKutta, Euclidean 9 | 10 | include("point.jl") 11 | include("lorenz.jl") 12 | 13 | end 14 | -------------------------------------------------------------------------------- /MyFirstPackage/src/lorenz.jl: -------------------------------------------------------------------------------- 1 | struct Lorenz 2 | σ::Float64 3 | ρ::Float64 4 | β::Float64 5 | end 6 | 7 | function field(p::Lorenz, u) 8 | x, y, z = u 9 | Point(p.σ*(y-x), x*(p.ρ-z)-y, x*y-p.β*z) 10 | end 11 | 12 | abstract type AbstractIntegrator end 13 | struct RungeKutta{K} <: AbstractIntegrator end 14 | struct Euclidean <: AbstractIntegrator end 15 | 16 | # Runge-Kutta 4th order method 17 | function integrate_step(f, ::RungeKutta{4}, t, y, Δt) 18 | k1 = Δt * f(t, y) 19 | k2 = Δt * f(t+Δt/2, y + k1 / 2) 20 | k3 = Δt * f(t+Δt/2, y + k2 / 2) 21 | k4 = Δt * f(t+Δt, y + k3) 22 | return y + k1/6 + k2/3 + k3/3 + k4/6 23 | end 24 | 25 | # Euclidean integration 26 | function integrate_step(f, ::Euclidean, t, y, Δt) 27 | return y + Δt * f(t, y) 28 | end 29 | 30 | function integrate_step(lz::Lorenz, int::AbstractIntegrator, u, Δt) 31 | return integrate_step((t, u) -> field(lz, u), int, zero(Δt), u, Δt) 32 | end -------------------------------------------------------------------------------- /MyFirstPackage/src/point.jl: -------------------------------------------------------------------------------- 1 | """ 2 | Point{D, T} 3 | 4 | A point in D-dimensional space, with coordinates of type T. 5 | 6 | # Examples 7 | ```jldoctest 8 | julia> p1 = Point(1.0, 2.0) 9 | Point{2, Float64}((1.0, 2.0)) 10 | 11 | julia> p2 = Point(3.0, 4.0) 12 | Point{2, Float64}((3.0, 4.0)) 13 | 14 | julia> p1 + p2 15 | Point{2, Float64}((4.0, 6.0)) 16 | ``` 17 | """ 18 | struct Point{D, T <: Real} 19 | data::NTuple{D, T} 20 | end 21 | const Point2D{T} = Point{2, T} 22 | const Point3D{T} = Point{3, T} 23 | Point(x::Real...) = Point((x...,)) 24 | LinearAlgebra.dot(x::Point, y::Point) = mapreduce(*, +, x.data .* y.data) 25 | Base.:*(x::Real, y::Point) = Point(x .* y.data) 26 | Base.:*(x::Point, y::Real) = Point(x.data .* y) 27 | Base.:/(y::Point, x::Real) = Point(y.data ./ x) 28 | Base.:+(x::Point, y::Point) = Point(x.data .+ y.data) 29 | Base.:-(x::Point, y::Point) = Point(x.data .- y.data) 30 | Base.isapprox(x::Point, y::Point; kwargs...) = all(isapprox.(x.data, y.data; kwargs...)) 31 | Base.getindex(p::Point, i::Int) = p.data[i] 32 | Base.broadcastable(p::Point) = p.data 33 | Base.iterate(p::Point, args...) = iterate(p.data, args...) 34 | Base.zero(::Type{Point{D, T}}) where {D, T} = Point(ntuple(i->zero(T), D)) 35 | Base.zero(::Point{D, T}) where {D, T} = Point(ntuple(i->zero(T), D)) 36 | distance(p::Point, q::Point) = sqrt(sum((p - q) .^ 2)) 37 | 38 | -------------------------------------------------------------------------------- /MyFirstPackage/test/lorenz.jl: -------------------------------------------------------------------------------- 1 | using Test, MyFirstPackage 2 | 3 | @testset "Point" begin 4 | p1 = Point(1.0, 2.0) 5 | p2 = Point(3.0, 4.0) 6 | @test p1 + p2 ≈ Point(4.0, 6.0) 7 | end 8 | 9 | @testset "step" begin 10 | lz = Lorenz(10.0, 28.0, 8/3) 11 | int = RungeKutta{4}() 12 | r1 = integrate_step(lz, int, Point(1.0, 1.0, 1.0), 0.0001) 13 | eu = Euclidean() 14 | r2 = integrate_step(lz, eu, Point(1.0, 1.0, 1.0), 0.0001) 15 | @test isapprox(r1, r2; rtol=1e-5) 16 | end -------------------------------------------------------------------------------- /MyFirstPackage/test/runtests.jl: -------------------------------------------------------------------------------- 1 | using Test 2 | using MyFirstPackage 3 | 4 | @testset "lorenz" begin 5 | include("lorenz.jl") 6 | end -------------------------------------------------------------------------------- /PhysicsSimulation/.gitignore: -------------------------------------------------------------------------------- 1 | *.jl.*.cov 2 | *.jl.cov 3 | *.jl.mem 4 | /Manifest.toml 5 | *.mp4 6 | *.png 7 | *.gif -------------------------------------------------------------------------------- /PhysicsSimulation/LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2024 GiggleLiu 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /PhysicsSimulation/Project.toml: -------------------------------------------------------------------------------- 1 | name = "PhysicsSimulation" 2 | uuid = "32e3dae3-5f43-4b4d-84f0-918360eff303" 3 | authors = ["GiggleLiu"] 4 | version = "1.0.0-DEV" 5 | 6 | [deps] 7 | CSV = "336ed68f-0bac-5ca0-87d4-7b16caf5d00b" 8 | DataFrames = "a93c6f00-e57d-5684-b7b6-d8193f3e46c0" 9 | LinearAlgebra = "37e2e46d-f89d-539d-b4ee-838fcccc9c8e" 10 | 11 | [compat] 12 | julia = "1" 13 | 14 | [extras] 15 | Test = "8dfed614-e22c-5e08-85e1-65c5234f0b40" 16 | 17 | [targets] 18 | test = ["Test"] 19 | -------------------------------------------------------------------------------- /PhysicsSimulation/README.md: -------------------------------------------------------------------------------- 1 | # PhysicsSimulation 2 | 3 | 4 | This demo implements the physics simulation of spring-mass systems and planet orbits. The main tool we are using is the geometric numerical integration method. The main reference is the book by Hairer[^Hairer2006]. 5 | 6 | ## Contents 7 | - Planet orbits ([examples/planets.jl](examples/planets.jl)) 8 | - Automatic differentiation ([examples/planets.jl](examples/planets.jl)) 9 | 10 | ## To run 11 | 12 | Clone the repository to your local machine and install the required packages (in a terminal): 13 | 14 | ```bash 15 | $ git clone https://github.com/GiggleLiu/ScientificComputingDemos.git 16 | $ cd ScientificComputingDemos 17 | $ make init-PhysicsSimulation # initialize the environment in PhysicsSimulation and PhysicsSimulation/examples 18 | $ make example-PhysicsSimulation # run the script PhysicsSimulation/examples/main.jl 19 | ``` 20 | 21 | ## References 22 | [^Hairer2006]: Hairer, Ernst, et al. "Geometric numerical integration." Oberwolfach Reports 3.1 (2006): 805-882. 23 | -------------------------------------------------------------------------------- /PhysicsSimulation/data/solar_system.csv: -------------------------------------------------------------------------------- 1 | name,mass,x,y,z,vx,vy,vz 2 | sun, 1.988544e30, -3.430031536367300E-03, 1.761881027012596E-03, 1.246691303879918E-05, 3.433119412673547E-06, -5.231300927361546E-06, -2.972974735550750E-08 3 | mercury, 3.302e23, 8.887985138765460E-02, -4.426150338141062E-01, -4.475716356484761E-02, 2.190877912081542E-02, 7.161568136528000E-03, -1.425929443086507E-03 4 | venus, 48.685e23, 4.043738093622098E-02, -7.239789211502183E-01, -1.241560658530024E-02, 2.005742309538389E-02, 1.141448268256643E-03, -1.142174441569258E-03 5 | earth, 5.97219e24, -2.020844529756663E-02, -1.014332737790859E+00, -1.358267619371298E-05, 1.692836723212859E-02, -3.484006532982474E-04, 6.028542314557626E-07 6 | mars, 6.4185e23, 7.462481663749645E-01, -1.181663652521456E+00, -4.321921404013512E-02, 1.235610918162121E-02, 8.680869489377649E-03, -1.220500608452554E-04 7 | jupyter, 1898.13e24, 3.384805319103406E+00, 3.658805636759595E+00, -9.100441946210819E-02, -5.634671617093230E-03, 5.479180979634376E-03, 1.034981407898108E-04 8 | saturn, 5.68319e26, -1.083899692644216E-01, -1.003995196286016E+01, 1.793391553155583E-01, 5.278410787728323E-03, -7.712342079566598E-05, -2.084447335785041E-04 9 | neptune, 102.41e24, 4.675566709791660E+00, -2.985428200863175E+01, 5.070034142531887E-01, 3.080716380724798E-03, 5.030733458293977E-04, -8.101711269674541E-05 10 | uranus, 86.8103e24, -2.693448460292631E-01, -1.927606446869220E+01, -6.808868692550485E-02, 3.903100242621723E-03, -2.380111092360100E-04, -5.164025224695875E-05 11 | pluto, 1.307e22, -2.129074273328636E+01, -1.896633337434039E+01, 8.187955378677129E+00, 2.276295756013608E-03, -2.670481848836963E-03, -3.669545371032554E-04 -------------------------------------------------------------------------------- /PhysicsSimulation/examples/Project.toml: -------------------------------------------------------------------------------- 1 | [deps] 2 | CairoMakie = "13f3f980-e62b-5c42-98c6-ff1f3baf88f0" 3 | Enzyme = "7da242da-08ed-463a-9acd-ee780be4f1d9" 4 | FiniteDifferences = "26cc04aa-876d-5657-8c51-4c34ba976000" 5 | Optim = "429524aa-4258-5aef-a3af-852621145aeb" 6 | PhysicsSimulation = "32e3dae3-5f43-4b4d-84f0-918360eff303" 7 | -------------------------------------------------------------------------------- /PhysicsSimulation/src/PhysicsSimulation.jl: -------------------------------------------------------------------------------- 1 | module PhysicsSimulation 2 | 3 | using LinearAlgebra 4 | 5 | export Point, Point2D, Point3D 6 | export Body, NewtonSystem, LeapFrogSystem, step!, solar_system, leapfrog_simulation 7 | 8 | include("point.jl") 9 | include("planet.jl") 10 | 11 | end 12 | -------------------------------------------------------------------------------- /PhysicsSimulation/src/point.jl: -------------------------------------------------------------------------------- 1 | """ 2 | Point{D, T} 3 | 4 | A point in D-dimensional space, with coordinates of type T. 5 | 6 | # Examples 7 | ```jldoctest 8 | julia> p1 = Point(1.0, 2.0) 9 | Point{2, Float64}((1.0, 2.0)) 10 | 11 | julia> p2 = Point(3.0, 4.0) 12 | Point{2, Float64}((3.0, 4.0)) 13 | 14 | julia> p1 + p2 15 | Point{2, Float64}((4.0, 6.0)) 16 | ``` 17 | """ 18 | struct Point{D, T <: Real} 19 | data::NTuple{D, T} 20 | end 21 | const Point2D{T} = Point{2, T} 22 | const Point3D{T} = Point{3, T} 23 | Point(x::Real...) = Point((x...,)) 24 | LinearAlgebra.dot(x::Point, y::Point) = mapreduce(*, +, x.data .* y.data) 25 | Base.:*(x::Real, y::Point) = Point(x .* y.data) 26 | Base.:*(x::Point, y::Real) = Point(x.data .* y) 27 | Base.:/(y::Point, x::Real) = Point(y.data ./ x) 28 | Base.:+(x::Point, y::Point) = Point(x.data .+ y.data) 29 | Base.:-(x::Point, y::Point) = Point(x.data .- y.data) 30 | Base.isapprox(x::Point, y::Point; kwargs...) = all(isapprox.(x.data, y.data; kwargs...)) 31 | Base.getindex(p::Point, i::Int) = p.data[i] 32 | Base.broadcastable(p::Point) = p.data 33 | Base.iterate(p::Point, args...) = iterate(p.data, args...) 34 | Base.zero(::Type{Point{D, T}}) where {D, T} = Point(ntuple(i->zero(T), D)) 35 | Base.zero(::Point{D, T}) where {D, T} = Point(ntuple(i->zero(T), D)) 36 | Base.length(p::Point) = length(p.data) 37 | distance(p::Point, q::Point) = sqrt(sum((p - q) .^ 2)) -------------------------------------------------------------------------------- /PhysicsSimulation/test/planet.jl: -------------------------------------------------------------------------------- 1 | using Test, PhysicsSimulation 2 | 3 | @testset "planets" begin 4 | @test length(solar_system()) == 10 5 | acc = zeros(Point3D{Float64}, length(solar_system())) 6 | @test length(PhysicsSimulation.update_acceleration!(acc, solar_system())) == 10 7 | end 8 | 9 | @testset "leapfrog" begin 10 | cached = LeapFrogSystem(solar_system()) 11 | newcache = step!(cached, 0.1) 12 | @test newcache isa LeapFrogSystem 13 | 14 | res = leapfrog_simulation(solar_system(); dt=0.01, nsteps=55) 15 | @test res[end].sys.bodies[1].r ≈ PhysicsSimulation.Point(-0.002580393612084354, 0.0008688001295124886, 2.269033380228867e-6) 16 | end -------------------------------------------------------------------------------- /PhysicsSimulation/test/point.jl: -------------------------------------------------------------------------------- 1 | using Test, PhysicsSimulation, LinearAlgebra 2 | 3 | @testset "Point Construction" begin 4 | # Basic construction 5 | p = Point(1.0, 2.0) 6 | @test p isa Point{2, Float64} 7 | @test p.data == (1.0, 2.0) 8 | 9 | # Type specific construction 10 | p_int = Point(1, 2) 11 | @test p_int isa Point{2, Int} 12 | 13 | # Convenience types 14 | p2d = Point2D{Float64}((1.0, 2.0)) 15 | p3d = Point3D{Float64}((1.0, 2.0, 3.0)) 16 | @test p2d isa Point2D{Float64} 17 | @test p3d isa Point3D{Float64} 18 | end 19 | 20 | @testset "Point Arithmetic" begin 21 | p1 = Point(1.0, 2.0) 22 | p2 = Point(3.0, 4.0) 23 | 24 | # Addition 25 | @test p1 + p2 == Point(4.0, 6.0) 26 | 27 | # Subtraction 28 | @test p2 - p1 == Point(2.0, 2.0) 29 | 30 | # Scalar multiplication 31 | @test 2 * p1 == Point(2.0, 4.0) 32 | @test p1 * 2 == Point(2.0, 4.0) 33 | 34 | # Division 35 | @test p1 / 2 == Point(0.5, 1.0) 36 | end 37 | 38 | @testset "Point Operations" begin 39 | p1 = Point(1.0, 2.0) 40 | p2 = Point(3.0, 4.0) 41 | 42 | # Dot product 43 | @test dot(p1, p2) ≈ 11.0 44 | 45 | # Distance 46 | @test PhysicsSimulation.distance(p1, p2) ≈ sqrt(8.0) 47 | 48 | # Zero 49 | @test zero(Point{2, Float64}) == Point(0.0, 0.0) 50 | @test zero(p1) == Point(0.0, 0.0) 51 | end 52 | 53 | @testset "Point Utilities" begin 54 | p = Point(1.0, 2.0) 55 | @test length(p) == 2 56 | 57 | # Indexing 58 | @test p[1] == 1.0 59 | @test p[2] == 2.0 60 | 61 | # Iteration 62 | collected = collect(p) 63 | @test collected == [1.0, 2.0] 64 | 65 | # Approximate equality 66 | p1 = Point(1.0, 2.0) 67 | p2 = Point(1.0 + 1e-10, 2.0 - 1e-10) 68 | @test isapprox(p1, p2, atol=1e-9) 69 | end 70 | -------------------------------------------------------------------------------- /PhysicsSimulation/test/runtests.jl: -------------------------------------------------------------------------------- 1 | using PhysicsSimulation 2 | using Test 3 | 4 | @testset "point" begin 5 | include("point.jl") 6 | end 7 | 8 | @testset "planet" begin 9 | include("planet.jl") 10 | end -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | (Work in progress) Demos projects in the book: [Scientific Computing for Physicists](https://scfp.jinguo-group.science/) 2 | 3 | [![CI](https://github.com/GiggleLiu/ScientificComputingDemos/actions/workflows/CI.yml/badge.svg)](https://github.com/GiggleLiu/ScientificComputingDemos/actions/workflows/CI.yml) 4 | 5 | ## Get started 6 | Please make sure you have Julia installed on your local machine. If not, please download and install it with [juliaup](https://github.com/JuliaLang/juliaup). 7 | 8 | 1. Clone this repository to your local machine: 9 | ```bash 10 | $ git clone https://github.com/GiggleLiu/ScientificComputingDemos.git 11 | ``` 12 | 2. Initialize the environment first by running the following command in the terminal: 13 | ```bash 14 | $ make init-PhysicsSimulation 15 | ``` 16 | 3. Run the demos by running the following command in the terminal: 17 | ```bash 18 | $ make test-PhysicsSimulation 19 | $ make example-PhysicsSimulation 20 | ``` 21 | `make-test-%` is used to run the tests in the `PhysicsSimulation` directory. `make-example-%` is used to run the examples in the `PhysicsSimulation` directory. The `PhysicsSimulation` is the name of the directory where the demos are located. You can replace it with the name of the directory where the demos are located. 22 | 23 | ## Contents 24 | ### Basic 25 | 1. [MyFirstPackage](MyFirstPackage/) - Lorenz attractor 26 | 27 | ### Matrix computation 28 | 1. [SimpleLinearAlgebra](SimpleLinearAlgebra/) - Implement LU decomposition, QR decomposition and FFT et al. 29 | 3. [ImageProcessing](ImageProcessing/) - FFT and SVD for image processing 30 | 4. [CompressedSensing](CompressedSensing/) - Compressed sensing for image compression 31 | 32 | ### Machine learning and optimization 33 | 1. [GraphClustering](GraphClustering/) - Spectral clustering algorithm 34 | 2. [KernelPCA](KernelPCA/) - Kernel method and Kernel PCA 35 | 36 | ### Physics simulation 37 | 1. [PhysicsSimulation](PhysicsSimulation/) - Simulate a spring system, leapfrog method, and eigenvalue problem 38 | 2. [LatticeBoltzmannModel](LatticeBoltzmannModel/) - Lattice Boltzmann Fluid Dynamics 39 | 3. [LatticeGasCA](LatticeGasCA/) - Lattice gas cellular automata 40 | 41 | ### Statistical physics and computational complexity 42 | 1. [IsingModel](IsingModel/) - Ferromagnetic Ising model using the Monte Carlo method 43 | 2. [Spinglass](Spinglass/) - Spin glass model using tensor networks and simulated annealing. 44 | 3. [SimpleTensorNetwork](SimpleTensorNetwork/) - Tensor networks for solving spin glass and inference problems 45 | -------------------------------------------------------------------------------- /SimpleKrylov/Project.toml: -------------------------------------------------------------------------------- 1 | name = "SimpleKrylov" 2 | uuid = "de1cc367-7f88-4530-9c42-f9b2800cde19" 3 | authors = ["GiggleLiu and contributors"] 4 | version = "1.0.0-DEV" 5 | 6 | [deps] 7 | LinearAlgebra = "37e2e46d-f89d-539d-b4ee-838fcccc9c8e" 8 | 9 | [compat] 10 | LinearAlgebra = "1.11.0" 11 | julia = "1.6.7" 12 | 13 | [extras] 14 | Test = "8dfed614-e22c-5e08-85e1-65c5234f0b40" 15 | 16 | [targets] 17 | test = ["Test"] 18 | -------------------------------------------------------------------------------- /SimpleKrylov/README.md: -------------------------------------------------------------------------------- 1 | # SimpleKrylov 2 | 3 | This package implements a simple sparse matrix type and a simple Lanczos algorithm. 4 | 5 | ## To run 6 | 7 | Clone the repository to your local machine and install the required packages (in a terminal): 8 | 9 | ```bash 10 | $ git clone https://github.com/GiggleLiu/ScientificComputingDemos.git 11 | $ cd ScientificComputingDemos 12 | $ make init-SimpleKrylov # initialize the environment in SimpleKrylov 13 | $ make test-SimpleKrylov # run the tests 14 | ``` 15 | 16 | ## References 17 | For professional use, please use the standard library `SparseArrays` and the [`KrylovKit.jl`](https://github.com/Jutho/KrylovKit.jl) package. -------------------------------------------------------------------------------- /SimpleKrylov/examples/Project.toml: -------------------------------------------------------------------------------- 1 | [deps] 2 | CairoMakie = "13f3f980-e62b-5c42-98c6-ff1f3baf88f0" 3 | Graphs = "86223c79-3864-5bf0-83f7-82e725a168b6" 4 | LinearAlgebra = "37e2e46d-f89d-539d-b4ee-838fcccc9c8e" 5 | SimpleKrylov = "de1cc367-7f88-4530-9c42-f9b2800cde19" 6 | Test = "8dfed614-e22c-5e08-85e1-65c5234f0b40" 7 | -------------------------------------------------------------------------------- /SimpleKrylov/src/SimpleKrylov.jl: -------------------------------------------------------------------------------- 1 | module SimpleKrylov 2 | 3 | export lanczos_reorthogonalize 4 | export arnoldi_iteration 5 | 6 | # NOTE: This module is only for tutoring, you should use the standard library `SparseArrays` in your project 7 | module SimpleSparseArrays 8 | 9 | export COOMatrix, CSCMatrix 10 | 11 | include("coo.jl") 12 | include("csc.jl") 13 | 14 | end 15 | 16 | using .SimpleSparseArrays 17 | using LinearAlgebra 18 | 19 | include("lanczos.jl") 20 | include("arnoldi.jl") 21 | 22 | end 23 | -------------------------------------------------------------------------------- /SimpleKrylov/src/arnoldi.jl: -------------------------------------------------------------------------------- 1 | function arnoldi_iteration(A::AbstractMatrix{T}, x0::AbstractVector{T}; maxiter) where T 2 | # Storage for Hessenberg matrix entries (column by column) 3 | h = Vector{T}[] 4 | # Storage for orthonormal basis vectors of the Krylov subspace 5 | q = [normalize(x0)] 6 | n = length(x0) 7 | # Ensure A is a square matrix of appropriate dimensions 8 | @assert size(A) == (n, n) 9 | 10 | # Main Arnoldi iteration loop 11 | for k = 1:min(maxiter, n) 12 | # Apply the matrix to the latest basis vector 13 | u = A * q[k] # generate next vector 14 | 15 | # Initialize the k-th column of the Hessenberg matrix 16 | hk = zeros(T, k+1) 17 | 18 | # Orthogonalize against all previous basis vectors (Gram-Schmidt process) 19 | for j = 1:k # subtract from new vector its components in all preceding vectors 20 | hk[j] = q[j]' * u # Calculate projection coefficient 21 | u = u - hk[j] * q[j] # Subtract projection 22 | end 23 | 24 | # Calculate the norm of the remaining vector 25 | hkk = norm(u) 26 | hk[k+1] = hkk # This will be the subdiagonal entry 27 | push!(h, hk) # Store this column of coefficients 28 | 29 | # Check for convergence or breakdown 30 | if abs(hkk) < 1e-8 || k >= n # stop if matrix is reducible 31 | break 32 | else 33 | # Normalize the new basis vector and add to our collection 34 | push!(q, u ./ hkk) 35 | end 36 | end 37 | 38 | # Construct the Hessenberg matrix H from the stored coefficients 39 | kmax = length(h) 40 | H = zeros(T, kmax, kmax) 41 | for k = 1:length(h) 42 | if k == kmax 43 | # Last column might be shorter if we had early termination 44 | H[1:k, k] .= h[k][1:k] 45 | else 46 | # Standard case: copy the full column including subdiagonal entry 47 | H[1:k+1, k] .= h[k] 48 | end 49 | end 50 | 51 | # Return the Hessenberg matrix and the orthonormal basis matrix 52 | return H, hcat(q...) 53 | end 54 | -------------------------------------------------------------------------------- /SimpleKrylov/src/coo.jl: -------------------------------------------------------------------------------- 1 | struct COOMatrix{Tv, Ti} <: AbstractArray{Tv, 2} # Julia does not have a COO data type 2 | m::Ti # number of rows 3 | n::Ti # number of columns 4 | colval::Vector{Ti} # column indices 5 | rowval::Vector{Ti} # row indices 6 | nzval::Vector{Tv} # values 7 | function COOMatrix(m::Ti, n::Ti, colval::Vector{Ti}, rowval::Vector{Ti}, nzval::Vector{Tv}) where {Tv, Ti} 8 | @assert length(colval) == length(rowval) == length(nzval) 9 | new{Tv, Ti}(m, n, colval, rowval, nzval) 10 | end 11 | end 12 | 13 | Base.size(coo::COOMatrix) = (coo.m, coo.n) 14 | Base.size(coo::COOMatrix, i::Int) = getindex((coo.m, coo.n), i) 15 | # the number of non-zero elements 16 | nnz(coo::COOMatrix) = length(coo.nzval) 17 | 18 | function Base.getindex(coo::COOMatrix{Tv}, i::Integer, j::Integer) where Tv 19 | @boundscheck checkbounds(coo, i, j) 20 | v = zero(Tv) 21 | for (i2, j2, v2) in zip(coo.rowval, coo.colval, coo.nzval) 22 | if i == i2 && j == j2 23 | v += v2 # accumulate the value, since repeated indices are allowed. 24 | end 25 | end 26 | return v 27 | end 28 | 29 | function Base.:(*)(A::COOMatrix{T1}, B::COOMatrix{T2}) where {T1, T2} 30 | @assert size(A, 2) == size(B, 1) 31 | rowval = Int[] 32 | colval = Int[] 33 | nzval = promote_type(T1, T2)[] 34 | for (i, j, v) in zip(A.rowval, A.colval, A.nzval) 35 | for (i2, j2, v2) in zip(B.rowval, B.colval, B.nzval) 36 | if j == i2 37 | push!(rowval, i) 38 | push!(colval, j2) 39 | push!(nzval, v * v2) 40 | end 41 | end 42 | end 43 | return COOMatrix(size(A, 1), size(B, 2), colval, rowval, nzval) 44 | end -------------------------------------------------------------------------------- /SimpleKrylov/src/csc.jl: -------------------------------------------------------------------------------- 1 | struct CSCMatrix{Tv,Ti} <: AbstractMatrix{Tv} 2 | m::Int 3 | n::Int 4 | colptr::Vector{Ti} 5 | rowval::Vector{Ti} 6 | nzval::Vector{Tv} 7 | function CSCMatrix(m::Int, n::Int, colptr::Vector{Ti}, rowval::Vector{Ti}, nzval::Vector{Tv}) where {Tv, Ti} 8 | @assert length(colptr) == n + 1 9 | @assert length(rowval) == length(nzval) == colptr[end] - 1 10 | new{Tv, Ti}(m, n, colptr, rowval, nzval) 11 | end 12 | end 13 | Base.size(A::CSCMatrix) = (A.m, A.n) 14 | Base.size(A::CSCMatrix, i::Int) = getindex((A.m, A.n), i) 15 | # the number of non-zero elements 16 | nnz(csc::CSCMatrix) = length(csc.nzval) 17 | 18 | function CSCMatrix(coo::COOMatrix{Tv, Ti}) where {Tv, Ti} 19 | m, n = size(coo) 20 | # sort the COO matrix by column 21 | order = sortperm(1:nnz(coo); by=i->coo.rowval[i] + m * (coo.colval[i]-1)) 22 | colptr, rowval, nzval = similar(coo.rowval, n+1), similar(coo.rowval), similar(coo.nzval) 23 | k = 0 24 | ipre, jpre = 0, 0 25 | colptr[1] = 1 26 | for idx in order 27 | i, j, v = coo.rowval[idx], coo.colval[idx], coo.nzval[idx] 28 | # values with the same indices are accumulated 29 | if i == ipre && j == jpre 30 | nzval[k] += v 31 | else 32 | k += 1 33 | if j != jpre 34 | # a new column starts 35 | colptr[jpre+1:j+1] .= k 36 | end 37 | rowval[k] = i 38 | nzval[k] = v 39 | ipre, jpre = i, j 40 | end 41 | end 42 | colptr[jpre+1:end] .= k + 1 43 | resize!(rowval, k) 44 | resize!(nzval, k) 45 | return CSCMatrix(m, n, colptr, rowval, nzval) 46 | end 47 | 48 | function Base.getindex(A::CSCMatrix{T}, i::Int, j::Int) where T 49 | @boundscheck checkbounds(A, i, j) 50 | for k in nzrange(A, j) 51 | if A.rowval[k] == i 52 | return A.nzval[k] 53 | end 54 | end 55 | return zero(T) 56 | end 57 | 58 | function Base.:*(A::CSCMatrix{T1}, B::CSCMatrix{T2}) where {T1, T2} 59 | T = promote_type(T1, T2) 60 | @assert size(A, 2) == size(B, 1) 61 | rowval, colval, nzval = Int[], Int[], T[] 62 | for j2 in 1:size(B, 2) # enumerate the columns of B 63 | for k2 in nzrange(B, j2) # enumerate the rows of B 64 | v2 = B.nzval[k2] 65 | for k1 in nzrange(A, B.rowval[k2]) # enumerate the rows of A 66 | push!(rowval, A.rowval[k1]) 67 | push!(colval, j2) 68 | push!(nzval, A.nzval[k1] * v2) 69 | end 70 | end 71 | end 72 | return CSCMatrix(COOMatrix(size(A, 1), size(B, 2), colval, rowval, nzval)) 73 | end 74 | 75 | # return the range of non-zero elements in the j-th column 76 | nzrange(A::CSCMatrix, j::Int) = A.colptr[j]:A.colptr[j+1]-1 -------------------------------------------------------------------------------- /SimpleKrylov/test/arnoldi.jl: -------------------------------------------------------------------------------- 1 | using LinearAlgebra, SimpleKrylov, Test 2 | 3 | @testset "arnoldi" begin 4 | # Create a sparse random matrix 5 | n = 100 6 | A = rand(n, n) 7 | 8 | # Create a random starting vector and normalize it 9 | q1 = randn(n) 10 | q1 = normalize(q1) 11 | 12 | # Run our Arnoldi iteration implementation 13 | H, Q = arnoldi_iteration(A, q1; maxiter=20) 14 | 15 | # Test that Q is orthonormal 16 | @test Q'Q ≈ I(size(Q, 2)) atol=1e-10 17 | 18 | # Test eigenvalue approximations 19 | # The Ritz values (eigenvalues of H) should approximate some eigenvalues of A 20 | evals_arnoldi = eigen(H).values 21 | evals_exact = eigen(A).values 22 | 23 | # Sort eigenvalues by magnitude for comparison 24 | sort_by_magnitude(x) = sort(x, by=abs, rev=true) 25 | evals_arnoldi_sorted = sort_by_magnitude(evals_arnoldi) 26 | evals_exact_sorted = sort_by_magnitude(evals_exact) 27 | 28 | # The largest eigenvalues should be well-approximated 29 | @test abs(evals_arnoldi_sorted[1]) ≈ abs(evals_exact_sorted[1]) rtol=1e-2 30 | 31 | # Test with a symmetric matrix where Arnoldi should be equivalent to Lanczos 32 | A_sym = A + A' 33 | H_sym, Q_sym = arnoldi_iteration(A_sym, q1; maxiter=20) 34 | 35 | # For symmetric matrices, H should be nearly tridiagonal 36 | for i in 1:size(H_sym, 1) 37 | for j in 1:size(H_sym, 2) 38 | if j > i+1 39 | @test abs(H_sym[i, j]) < 1e-10 40 | end 41 | end 42 | end 43 | end 44 | -------------------------------------------------------------------------------- /SimpleKrylov/test/coo.jl: -------------------------------------------------------------------------------- 1 | using Test, SimpleKrylov.SimpleSparseArrays 2 | 3 | @testset "coo matmul" begin 4 | stiffmatrix = COOMatrix(3, 3, [1, 2, 1, 2, 3, 2, 3], [1, 1, 2, 2, 2, 3, 3], [-1.0, 1, 1, -2, 1, 1, -1]) 5 | @test stiffmatrix[2, 3] == 1 6 | dense_matrix = Matrix(stiffmatrix) 7 | @test stiffmatrix * stiffmatrix ≈ dense_matrix ^ 2 8 | end -------------------------------------------------------------------------------- /SimpleKrylov/test/csc.jl: -------------------------------------------------------------------------------- 1 | using Test, SimpleKrylov.SimpleSparseArrays 2 | 3 | @testset "coo2csc" begin 4 | stiffmatrix = COOMatrix(3, 3, [1, 2, 1, 2, 3, 2, 3], [1, 1, 2, 2, 2, 3, 3], [-1.0, 1, 1, -2, 1, 1, -1]) 5 | csc_matrix = CSCMatrix(stiffmatrix) 6 | @test Matrix(csc_matrix) ≈ Matrix(stiffmatrix) 7 | end 8 | 9 | @testset "csc matmul" begin 10 | csc_matrix = CSCMatrix(6, 6, [1, 3, 6, 6, 7, 7, 8], [1, 2, 1, 2, 3, 2, 3], [-1.0, 1, 1, -2, 1, 1, -1]) 11 | @test Matrix(csc_matrix)^2 ≈ csc_matrix * csc_matrix 12 | end 13 | 14 | @testset "repeated entries" begin 15 | coo_matrix = COOMatrix(5, 4, [2, 3, 1, 4, 3, 4], [1, 1, 2, 2, 4, 4], [1, 2, 3, 4, 5, 6]) 16 | csc_matrix = CSCMatrix(coo_matrix) 17 | csc_matrix2 = CSCMatrix(COOMatrix(coo_matrix.n, coo_matrix.m, coo_matrix.rowval, coo_matrix.colval, coo_matrix.nzval)) # transpose 18 | @test Matrix(csc_matrix) * Matrix(csc_matrix2) ≈ csc_matrix * csc_matrix2 19 | end -------------------------------------------------------------------------------- /SimpleKrylov/test/lanczos.jl: -------------------------------------------------------------------------------- 1 | using LinearAlgebra, SimpleKrylov, SimpleKrylov.SimpleSparseArrays 2 | 3 | @testset "lanczos" begin 4 | # Create a random 3-regular graph with 1000 vertices 5 | n = 1000 6 | is = rand(1:n, 3n) 7 | js = rand(1:n, 3n) 8 | vals = randn(3n) 9 | # create a symmetric matrix 10 | coo_matrix = SimpleSparseArrays.COOMatrix(n, n, vcat(is, js), vcat(js, is), vcat(vals, vals)) 11 | A = SimpleSparseArrays.CSCMatrix(coo_matrix) 12 | 13 | # Generate a random initial vector 14 | q1 = randn(n) 15 | 16 | # Apply our Lanczos implementation 17 | T, Q = lanczos_reorthogonalize(A, q1; abstol=1e-5, maxiter=200) 18 | 19 | # Compute eigenvalues of the resulting tridiagonal matrix 20 | eigenvalues = eigen(T).values 21 | 22 | 23 | # Find the two smallest eigenvalues using KrylovKit 24 | # :SR means "smallest real part" 25 | evals, evecs = eigen(Matrix(A)) 26 | @test evals[1:2] ≈ eigenvalues[1:2] atol=1e-5 27 | end 28 | -------------------------------------------------------------------------------- /SimpleKrylov/test/runtests.jl: -------------------------------------------------------------------------------- 1 | using SimpleKrylov 2 | using Test 3 | 4 | @testset "coo" begin 5 | include("coo.jl") 6 | end 7 | 8 | @testset "csc" begin 9 | include("csc.jl") 10 | end 11 | 12 | @testset "lanczos" begin 13 | include("lanczos.jl") 14 | end 15 | 16 | @testset "arnoldi" begin 17 | include("arnoldi.jl") 18 | end -------------------------------------------------------------------------------- /SimpleLinearAlgebra/.gitignore: -------------------------------------------------------------------------------- 1 | *.jl.*.cov 2 | *.jl.cov 3 | *.jl.mem 4 | /Manifest.toml 5 | /docs/Manifest.toml 6 | /docs/build/ 7 | -------------------------------------------------------------------------------- /SimpleLinearAlgebra/LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2024 YidaiZhang 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /SimpleLinearAlgebra/Project.toml: -------------------------------------------------------------------------------- 1 | name = "SimpleLinearAlgebra" 2 | uuid = "4b12b69c-63dd-4c1b-a448-fe77b38563e3" 3 | authors = ["YidaiZhang"] 4 | version = "1.0.0-DEV" 5 | 6 | [deps] 7 | FFTW = "7a1cc6ca-52ef-59f5-83cd-3a7055c09341" 8 | LinearAlgebra = "37e2e46d-f89d-539d-b4ee-838fcccc9c8e" 9 | SparseArrays = "2f01184e-e22b-5df5-ae63-d93ebab69eaf" 10 | 11 | [compat] 12 | FFTW = "1" 13 | LinearAlgebra = "1" 14 | SparseArrays = "1" 15 | julia = "1.10" 16 | 17 | [extras] 18 | Test = "8dfed614-e22c-5e08-85e1-65c5234f0b40" 19 | 20 | [targets] 21 | test = ["Test"] 22 | -------------------------------------------------------------------------------- /SimpleLinearAlgebra/README.md: -------------------------------------------------------------------------------- 1 | # SimpleLinearAlgebra 2 | 3 | Implement fundamental linear algebra operations from scratch. This educational package demonstrates how common linear algebra algorithms work under the hood, with clear implementations that prioritize readability over performance. 4 | The main reference is the book "Matrix Computations" by Golub[^Golub2016]. 5 | 6 | ## Features 7 | 8 | This package implements the following linear algebra operations: 9 | 10 | - **Matrix Factorizations** 11 | - LU Decomposition (with and without pivoting) 12 | - QR Decomposition (using Householder reflections and Givens rotations) 13 | - Gram-Schmidt Orthogonalization (classical and modified) 14 | 15 | - **Linear System Solvers** 16 | - Forward and backward substitution 17 | - Linear system solving via LU factorization 18 | 19 | - **Fast Algorithms** 20 | - Strassen's matrix multiplication algorithm 21 | - Fast Fourier Transform (FFT) and Inverse FFT 22 | 23 | ## Run examples 24 | 25 | Clone the repository to your local machine and install the required packages (in a terminal): 26 | 27 | ```bash 28 | $ git clone https://github.com/GiggleLiu/ScientificComputingDemos.git 29 | $ cd ScientificComputingDemos 30 | $ make init-SimpleLinearAlgebra # initialize the environment in SimpleLinearAlgebra and SimpleLinearAlgebra/examples 31 | $ make test-SimpleLinearAlgebra # run the tests in SimpleLinearAlgebra/test 32 | $ make example-SimpleLinearAlgebra # run the script SimpleLinearAlgebra/examples/main.jl 33 | ``` 34 | 35 | ## References 36 | [^Golub2016]: Golub, G.H., 2016. Matrix Computation 25, 228–234. https://doi.org/10.4037/ajcc2016979 37 | -------------------------------------------------------------------------------- /SimpleLinearAlgebra/examples/Project.toml: -------------------------------------------------------------------------------- 1 | [deps] 2 | LinearAlgebra = "37e2e46d-f89d-539d-b4ee-838fcccc9c8e" 3 | SimpleLinearAlgebra = "4b12b69c-63dd-4c1b-a448-fe77b38563e3" 4 | -------------------------------------------------------------------------------- /SimpleLinearAlgebra/src/SimpleLinearAlgebra.jl: -------------------------------------------------------------------------------- 1 | module SimpleLinearAlgebra 2 | # import packages 3 | using LinearAlgebra 4 | import FFTW 5 | 6 | # `include` other source files into this module 7 | include("strassen.jl") 8 | include("fft.jl") 9 | include("lu.jl") 10 | include("qr.jl") 11 | 12 | end # module 13 | 14 | -------------------------------------------------------------------------------- /SimpleLinearAlgebra/src/strassen.jl: -------------------------------------------------------------------------------- 1 | function strassen(A::AbstractMatrix{T}, B::AbstractMatrix{T}) where T 2 | n = size(A, 1) 3 | n == 1 && return A * B 4 | @assert iseven(n) && size(A) == size(B) "matrix sizes must be even and equal" 5 | 6 | m = div(n, 2) 7 | A11, A12 = A[1:m, 1:m], A[1:m, m+1:n] 8 | A21, A22 = A[m+1:n, 1:m], A[m+1:n, m+1:n] 9 | B11, B12 = B[1:m, 1:m], B[1:m, m+1:n] 10 | B21, B22 = B[m+1:n, 1:m], B[m+1:n, m+1:n] 11 | 12 | M1 = strassen(A11 + A22, B11 + B22) 13 | M2 = strassen(A21 + A22, B11) 14 | M3 = strassen(A11, B12 - B22) 15 | M4 = strassen(A22, B21 - B11) 16 | M5 = strassen(A11 + A12, B22) 17 | M6 = strassen(A21 - A11, B11 + B12) 18 | M7 = strassen(A12 - A22, B21 + B22) 19 | 20 | C11 = M1 + M4 - M5 + M7 21 | C12 = M3 + M5 22 | C21 = M2 + M4 23 | C22 = M1 - M2 + M3 + M6 24 | 25 | C = similar(A) 26 | C[1:m, 1:m] = C11 27 | C[1:m, m+1:n] = C12 28 | C[m+1:n, 1:m] = C21 29 | C[m+1:n, m+1:n] = C22 30 | 31 | return C 32 | end -------------------------------------------------------------------------------- /SimpleLinearAlgebra/test/fft.jl: -------------------------------------------------------------------------------- 1 | using Test, LinearAlgebra, SparseArrays 2 | using SimpleLinearAlgebra: fft!, ifft!, dft_matrix 3 | 4 | @testset "fft" begin 5 | x = randn(ComplexF64, 8) 6 | @test fft!(copy(x)) ≈ dft_matrix(8) * x 7 | end 8 | 9 | @testset "ifft" begin 10 | x = randn(ComplexF64, 8) 11 | @test ifft!(copy(x)) ≈ inv(dft_matrix(8)) * x 12 | end 13 | 14 | @testset "fft decomposition" begin 15 | n = 4 16 | Fn = dft_matrix(n) 17 | F2n = dft_matrix(2n) 18 | 19 | # the permutation matrix to permute elements at 1:2:n (odd) to 1:n÷2 (top half) 20 | pm = sparse([iseven(j) ? (j÷2+n) : (j+1)÷2 for j=1:2n], 1:2n, ones(2n), 2n, 2n) 21 | 22 | # construct the D matrix 23 | ω = exp(-π*im/n) 24 | d1 = Diagonal([ω^(i-1) for i=1:n]) 25 | 26 | # construct F_{2n} from F_n 27 | F2n_ = [Fn d1 * Fn; Fn -d1 * Fn] 28 | @test F2n * pm' ≈ F2n_ 29 | end -------------------------------------------------------------------------------- /SimpleLinearAlgebra/test/lu.jl: -------------------------------------------------------------------------------- 1 | using Test, LinearAlgebra 2 | using SimpleLinearAlgebra: classical_gram_schmidt, modified_gram_schmidt!, lufact_pivot!, forward_substitution!, lufact 3 | 4 | @testset "classical GS" begin 5 | n = 10 6 | A = randn(n, n) 7 | Q, R = classical_gram_schmidt(A) 8 | @test Q * R ≈ A 9 | @test Q * Q' ≈ I 10 | @info R 11 | end 12 | 13 | @testset "modified GS" begin 14 | n = 10 15 | A = randn(n, n) 16 | Q, R = modified_gram_schmidt!(copy(A)) 17 | @test Q * R ≈ A 18 | @test Q * Q' ≈ I 19 | @info R 20 | end 21 | 22 | @testset "lufact with pivot" begin 23 | n = 5 24 | A = randn(n, n) 25 | L, U, P = lufact_pivot!(copy(A)) 26 | pmat = zeros(Int, n, n) 27 | setindex!.(Ref(pmat), 1, 1:n, P) 28 | @test L ≈ lu(A).L 29 | @test U ≈ lu(A).U 30 | @test pmat * A ≈ L * U 31 | end 32 | 33 | @testset "forward substitution" begin 34 | # create a random lower triangular matrix 35 | l = LinearAlgebra.tril(randn(4, 4)) 36 | # target vector 37 | b = randn(4) 38 | # solve the linear equation with our algorithm 39 | x = forward_substitution!(l, copy(b)) 40 | @test l * x ≈ b 41 | 42 | # The Julia's standard library `LinearAlgebra` contains a native implementation. 43 | x_native = LowerTriangular(l) \ b 44 | @test l * x_native ≈ b 45 | end 46 | 47 | 48 | @testset "LU factorization" begin 49 | a = randn(4, 4) 50 | L, U = lufact(a) 51 | @test istril(L) 52 | @test istriu(U) 53 | @test L * U ≈ a 54 | end -------------------------------------------------------------------------------- /SimpleLinearAlgebra/test/qr.jl: -------------------------------------------------------------------------------- 1 | using Test, LinearAlgebra 2 | using SimpleLinearAlgebra: classical_gram_schmidt, modified_gram_schmidt, givens_qr!, householder_qr!, HouseholderMatrix 3 | 4 | @testset "givens QR" begin 5 | n = 3 6 | A = randn(n, n) 7 | R = copy(A) 8 | Q, R = givens_qr!(Matrix{Float64}(I, n, n), R) 9 | @test Q * R ≈ A 10 | @test Q * Q' ≈ I 11 | @info R 12 | end 13 | 14 | 15 | @testset "householder property" begin 16 | v = randn(3) 17 | H = HouseholderMatrix(v) 18 | A = randn(3, 3) 19 | @test H * A ≈ Matrix(H) * A 20 | @test A * H ≈ A * Matrix(H) 21 | CA = copy(A) 22 | mul!(CA, H, CA) 23 | @test CA ≈ H * A 24 | # symmetric 25 | @test H' ≈ H 26 | # reflexive 27 | @test H^2 ≈ I 28 | # orthogonal 29 | @test H' * H ≈ I 30 | end 31 | 32 | @testset "householder QR" begin 33 | A = randn(3, 3) 34 | Q = Matrix{Float64}(I, 3, 3) 35 | R = copy(A) 36 | householder_qr!(Q, R) 37 | @info R 38 | @test Q * R ≈ A 39 | @test Q' * Q ≈ I 40 | end -------------------------------------------------------------------------------- /SimpleLinearAlgebra/test/runtests.jl: -------------------------------------------------------------------------------- 1 | using SimpleLinearAlgebra 2 | using Test 3 | 4 | @testset "strassen.jl" begin 5 | include("strassen.jl") 6 | end 7 | 8 | @testset "qr.jl" begin 9 | include("qr.jl") 10 | end 11 | 12 | @testset "lu.jl" begin 13 | include("lu.jl") 14 | end 15 | 16 | @testset "fft.jl" begin 17 | include("fft.jl") 18 | end -------------------------------------------------------------------------------- /SimpleLinearAlgebra/test/strassen.jl: -------------------------------------------------------------------------------- 1 | using SimpleLinearAlgebra: strassen 2 | using Test 3 | 4 | @testset "Strassen's Algorithm" begin 5 | A = rand(4, 4) 6 | B = rand(4, 4) 7 | C = strassen(A, B) 8 | @test C ≈ A * B 9 | end -------------------------------------------------------------------------------- /SimpleTensorNetwork/Project.toml: -------------------------------------------------------------------------------- 1 | name = "SimpleTensorNetwork" 2 | uuid = "78e1b9ab-115c-454e-b043-970603f427b2" 3 | authors = ["GiggleLiu"] 4 | version = "1.0.0-DEV" 5 | 6 | [deps] 7 | CairoMakie = "13f3f980-e62b-5c42-98c6-ff1f3baf88f0" 8 | Graphs = "86223c79-3864-5bf0-83f7-82e725a168b6" 9 | NPZ = "15e1cf62-19b3-5cfa-8e77-841668bca605" 10 | OMEinsum = "ebe7aa44-baf0-506c-a96f-8464559b3922" 11 | 12 | [compat] 13 | CairoMakie = "0.13.1" 14 | Graphs = "1" 15 | NPZ = "0.4.3" 16 | OMEinsum = "0.7, 0.8" 17 | julia = "1" 18 | 19 | [extras] 20 | Test = "8dfed614-e22c-5e08-85e1-65c5234f0b40" 21 | 22 | [targets] 23 | test = ["Test"] 24 | -------------------------------------------------------------------------------- /SimpleTensorNetwork/README.md: -------------------------------------------------------------------------------- 1 | # SimpleTensorNetwork 2 | 3 | ## Example 1: Spin-glass partition function 4 | This example computes the partition function of an anti-ferromagnetic Ising model on the Petersen graph. The Petersen graph is a graph with 10 vertices and 15 edges. 5 | ```julia 6 | edgs = [(1, 2), (1, 5), (1, 6), (2, 3), 7 | (2, 7), (3, 4), (3, 8), (4, 5), 8 | (4, 9), (5, 10), (6, 8), (6, 9), 9 | (7, 9), (7, 10), (8, 10)] 10 | ``` 11 | ![](assets/graph-petersen.png) 12 | 13 | The energy model is given by the Hamiltonian 14 | ```math 15 | H(\boldsymbol{\sigma}) = -\sum_{(i,j) \in E} J_{ij} \sigma_i \sigma_j 16 | ``` 17 | where the coupling constants are given by the adjacency matrix of the Petersen graph. 18 | 19 | The partition function is given by 20 | ```math 21 | Z = \sum_{\boldsymbol{\sigma}} \exp(-\beta H(\boldsymbol{\sigma})) 22 | ``` 23 | where the sum is over all possible spin configurations. 24 | 25 | ## Example 2: Bayesian network 26 | 27 | This example is from the [TensorInference.jl](https://github.com/TensorBFS/TensorInference.jl) package. The graph below corresponds to the *ASIA network*, a simple Bayesian model used extensively in educational settings. It was introduced by Lauritzen in 1988 [^lauritzen1988local]. 28 | 29 | ![](assets/asia.png) 30 | 31 | Conditional probability tables (CPTs) are given by 32 | ```julia 33 | ``` 34 | 35 | ## References 36 | [^lauritzen1988local]: Steffen L Lauritzen and David J Spiegelhalter. Local computations with probabilities on graphical structures and their application to expert systems. *Journal of the Royal Statistical Society: Series B (Methodological)*, 50(2):157–194, 1988. -------------------------------------------------------------------------------- /SimpleTensorNetwork/assets/asia.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/GiggleLiu/ScientificComputingDemos/c0ad0c00e5d0bf90ed1167d27d4dffe70c34dea7/SimpleTensorNetwork/assets/asia.png -------------------------------------------------------------------------------- /SimpleTensorNetwork/assets/graph-petersen.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/GiggleLiu/ScientificComputingDemos/c0ad0c00e5d0bf90ed1167d27d4dffe70c34dea7/SimpleTensorNetwork/assets/graph-petersen.png -------------------------------------------------------------------------------- /SimpleTensorNetwork/examples/Project.toml: -------------------------------------------------------------------------------- 1 | [deps] 2 | GenericTensorNetworks = "3521c873-ad32-4bb4-b63d-f4f178f42b49" 3 | LuxorGraphPlot = "1f49bdf2-22a7-4bc4-978b-948dc219fbbc" 4 | SimpleTensorNetwork = "78e1b9ab-115c-454e-b043-970603f427b2" 5 | -------------------------------------------------------------------------------- /SimpleTensorNetwork/examples/main.jl: -------------------------------------------------------------------------------- 1 | include("spinglass.jl") 2 | include("asia.jl") -------------------------------------------------------------------------------- /SimpleTensorNetwork/examples/model.uai: -------------------------------------------------------------------------------- 1 | MARKOV 2 | 8 3 | 2 2 2 2 2 2 2 2 4 | 8 5 | 1 0 6 | 2 1 0 7 | 1 2 8 | 2 3 2 9 | 2 4 2 10 | 3 5 3 1 11 | 2 6 5 12 | 3 7 5 4 13 | 14 | 2 15 | 0.01 16 | 0.99 17 | 18 | 4 19 | 0.05 0.01 20 | 0.95 0.99 21 | 22 | 2 23 | 0.5 24 | 0.5 25 | 26 | 4 27 | 0.1 0.01 28 | 0.9 0.99 29 | 30 | 4 31 | 0.6 0.3 32 | 0.4 0.7 33 | 34 | 8 35 | 1 1 1 0 36 | 0 0 0 1 37 | 38 | 4 39 | 0.98 0.05 40 | 0.02 0.95 41 | 42 | 8 43 | 0.9 0.7 0.8 0.1 44 | 0.1 0.3 0.2 0.9 45 | -------------------------------------------------------------------------------- /SimpleTensorNetwork/examples/spinglass.jl: -------------------------------------------------------------------------------- 1 | using SimpleTensorNetwork 2 | using SimpleTensorNetwork.Graphs, SimpleTensorNetwork.OMEinsum 3 | using LuxorGraphPlot 4 | 5 | graphname = :petersen 6 | graph = smallgraph(graphname) 7 | filename = "graph-$graphname.png" 8 | LuxorGraphPlot.show_graph(graph; filename, texts=string.(1:nv(graph))) 9 | @info "loaded graph: $graphname, saved to `$filename`" 10 | 11 | sg = Spinglass(graph, ones(15), zeros(10)) 12 | @info """initialized spin-glass on graph: 13 | - edges: $(collect(edges(sg.graph))) 14 | - J: $(sg.J) 15 | - h: $(sg.h) 16 | """ 17 | β = 0.1 18 | tn = generate_tensor_network(sg, β) 19 | @info """generated tensor network at β=$β: 20 | - input labels: $(tn.ixs) 21 | - output labels: $(tn.iy) 22 | """ 23 | complexity_before = contraction_complexity(DynamicEinCode(tn.ixs, tn.iy), Dict(i=>2 for i in vertices(graph))) 24 | opttn = optimize_tensornetwork(tn) 25 | complexity_after = contraction_complexity(opttn.ein, Dict(i=>2 for i in vertices(graph))) 26 | @info """optimized tensor network, the computational complexity change: 27 | - original: 28 | - optimized: 29 | """ 30 | result = partition_function(sg, β) 31 | exact_result = partition_function_exact(sg, β) 32 | @info "partition function: $result (exact: $exact_result)" 33 | 34 | using GenericTensorNetworks 35 | problem = GenericTensorNetworks.SpinGlass(graph; J=fill(-1, ne(graph))) 36 | configs = solve(problem, ConfigsMin(3; bounded=false))[] 37 | 38 | function connect_by_hamming_distance(configs) 39 | nc = length(configs) 40 | g = SimpleGraph(nc) 41 | for i in 1:nc-1, j in i+1:nc 42 | hamming_distance(configs[i], configs[j]) <= 2 && add_edge!(g, i, j) 43 | end 44 | return g 45 | end 46 | hamming_distance(a, b) = sum(a .!= b) 47 | 48 | using LuxorGraphPlot 49 | function multipartite_layout(graph, sets; C=2.0) 50 | locs = Vector{Tuple{Float64, Float64}}[] 51 | @show sets 52 | for (meanloc, set) in sets 53 | gi, = Graphs.induced_subgraph(graph, set) 54 | xs, ys = LuxorGraphPlot.spring_layout(gi; C) 55 | f = nv(gi)^1.5/1000 56 | push!(locs, map(xs, ys) do x, y 57 | (f * x + meanloc[1], f * y + meanloc[2]) 58 | end) 59 | end 60 | locs 61 | end 62 | 63 | function zstack_layout(graph, sets; C=2.0, xyratio=3, deltaz=10.0) 64 | @show [(0, -(k-1)*deltaz)=>s for (k, s) in enumerate(sets)] 65 | locs = multipartite_layout(graph, [(0, (k-1)*deltaz)=>s for (k, s) in enumerate(sets)]; C) 66 | return vcat(map(loc->map(x->(x[1] * xyratio, x[2]), loc), locs)...) 67 | end 68 | 69 | cgraph = connect_by_hamming_distance(vcat(configs.coeffs[3].data, configs.coeffs[1].data)) 70 | nc1, nc2 = length(configs.coeffs[3].data), length(configs.coeffs[1].data) 71 | locs_zstack = zstack_layout(cgraph, [1:nc1, nc1+1:nc1+nc2]; deltaz=3) 72 | LuxorGraphPlot.show_graph(cgraph; locs=locs_zstack, vertex_color="red", vertex_size=0.1) -------------------------------------------------------------------------------- /SimpleTensorNetwork/src/SimpleTensorNetwork.jl: -------------------------------------------------------------------------------- 1 | module SimpleTensorNetwork 2 | 3 | using OMEinsum, OMEinsum.LinearAlgebra 4 | using Graphs 5 | using CairoMakie 6 | using NPZ 7 | 8 | export Spinglass, TensorNetwork, OptimizedTensorNetwork 9 | export generate_tensor_network, optimize_tensornetwork, partition_function 10 | export partition_function_exact 11 | 12 | include("tucker.jl") 13 | include("spinglass.jl") 14 | include("sampling.jl") 15 | 16 | end 17 | -------------------------------------------------------------------------------- /SimpleTensorNetwork/src/inference.jl: -------------------------------------------------------------------------------- 1 | r = 20 2 | W = 200 3 | vars = [ 4 | ("A", 0.0, 0.0), ("S", 0.75, 0.0), 5 | ("T", 0.0, 0.3), ("L", 0.5, 0.3), ("B", 1.0, 0.3), 6 | ("E", 0.25, 0.6), ("X", 0.0, 0.9), ("D", 0.75, 0.9)] 7 | @drawsvg begin 8 | origin(200, 0) 9 | nodes = [] 10 | for (t, x, y) in vars 11 | push!(nodes, node(circle, Point(x*W+0.15W, y*W+0.15W), r, :stroke)) 12 | end 13 | for (k, node) in enumerate(nodes) 14 | LuxorGraphPlot.draw_vertex(node, stroke_color="black", 15 | fill_color="white", line_width=2, line_style="solid") 16 | LuxorGraphPlot.draw_text(node.loc, vars[k][1]; fontsize=14, color="black", fontface="") 17 | end 18 | for (i, j) in [(1, 3), (2, 4), (2, 5), (3, 6), (4, 6), (5, 8), (6, 7), (6, 8)] 19 | LuxorGraphPlot.draw_edge(nodes[i], nodes[j], color="black", line_width=2, line_style="solid", arrow=true) 20 | end 21 | end 600 W*1.3 22 | 23 | 24 | eincode = ein"at,ex,sb,sl,tle,ebd,a,s,t,l,b,e,x,d->" 25 | 26 | optimized_eincode = optimize_code(eincode, uniformsize(eincode, 2), TreeSA()) 27 | 28 | contraction_complexity(optimized_eincode, uniformsize(optimized_eincode, 2)) 29 | 30 | function contract(ancillas...) 31 | # 0 -> NO 32 | # 1 -> YES 33 | AT = [0.98 0.02; 0.95 0.05] 34 | EX = [0.99 0.01; 0.02 0.98] 35 | SB = [0.96 0.04; 0.88 0.12] 36 | SL = [0.99 0.01; 0.92 0.08] 37 | TLE = zeros(2, 2, 2) 38 | TLE[1,:,:] .= [1.0 0.0; 0.0 1.0] 39 | TLE[2,:,:] .= [0.0 1.0; 0.0 1.0] 40 | EBD = zeros(2, 2, 2) 41 | EBD[1,:,:] .= [0.8 0.2; 0.3 0.7] 42 | EBD[2,:,:] .= [0.2 0.8; 0.05 0.95] 43 | return optimized_eincode(AT, EX, SB, SL, TLE, EBD, ancillas...)[] 44 | end 45 | 46 | 47 | contract([0.0, 1.0], [1.0, 0.0], [1.0, 1.0], # A, S, T 48 | [0.0, 1.0], [1.0, 1.0], # L, B 49 | [1.0, 1.0], # E 50 | [1.0, 1.0], [1.0, 1.0] # X, D 51 | ) 52 | 53 | -------------------------------------------------------------------------------- /SimpleTensorNetwork/src/mnist784_bin_1000.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/GiggleLiu/ScientificComputingDemos/c0ad0c00e5d0bf90ed1167d27d4dffe70c34dea7/SimpleTensorNetwork/src/mnist784_bin_1000.npy -------------------------------------------------------------------------------- /SimpleTensorNetwork/src/spinglass.jl: -------------------------------------------------------------------------------- 1 | # A simple spinglass model: 2 | # H = -\sum_{i,j} J_{ij} \sigma_i \sigma_j + \sum_i h_i \sigma_i 3 | struct Spinglass{T} 4 | graph::SimpleGraph{Int} 5 | J::Vector{T} 6 | h::Vector{T} 7 | end 8 | 9 | struct TensorNetwork{T, LT} 10 | tensors::AbstractArray{T} 11 | ixs::Vector{Vector{LT}} 12 | iy::Vector{LT} 13 | end 14 | 15 | struct OptimizedTensorNetwork{T, ET<:AbstractEinsum} 16 | tensors::AbstractArray{T} 17 | ein::ET 18 | end 19 | 20 | function generate_tensor_network(sg::Spinglass{T}, β; output_indices = Int[]) where T 21 | tensors = AbstractArray[] 22 | ixs = Vector{Int}[] 23 | for (e, Jij) in zip(edges(sg.graph), sg.J) 24 | push!(tensors, spinglass_edgetensor(Jij, T(β))) 25 | push!(ixs, [src(e), dst(e)]) 26 | end 27 | for (v, hi) in zip(vertices(sg.graph), sg.h) 28 | push!(tensors, spinglass_vertextensor(hi, T(β))) 29 | push!(ixs, [v]) 30 | end 31 | return TensorNetwork(tensors, ixs, output_indices) 32 | end 33 | spinglass_edgetensor(J::T, β::T) where T = exp.(-β .* [-J J; J -J]) 34 | spinglass_vertextensor(h::T, β::T) where T = exp.(-β .* [h, -h]) 35 | 36 | function optimize_tensornetwork(tnet::TensorNetwork; optimizer=TreeSA()) 37 | eincode = DynamicEinCode(tnet.ixs, tnet.iy) 38 | optimized_eincode = optimize_code(eincode, uniformsize(eincode, 2), optimizer, MergeVectors()) 39 | return OptimizedTensorNetwork(tnet.tensors, optimized_eincode) 40 | end 41 | 42 | function partition_function(sg::Spinglass, β; optimizer=TreeSA()) 43 | @debug "generating the tensor network" 44 | tnet = generate_tensor_network(sg, β) 45 | @debug "optimize the contraction order with: $optimizer" 46 | optnet = optimize_tensornetwork(tnet; optimizer) 47 | @debug "contract the tensor network" 48 | return optnet.ein(tnet.tensors...)[] 49 | end 50 | 51 | function partition_function_exact(sg::Spinglass, β) 52 | Z = 0.0 53 | for σ in Iterators.product(fill([-1, 1], nv(sg.graph))...) 54 | E = 0.0 55 | for (e, Jij) in zip(edges(sg.graph), sg.J) 56 | srcv, dstv = src(e), dst(e) 57 | E -= Jij * σ[srcv] * σ[dstv] 58 | end 59 | for (v, hi) in zip(vertices(sg.graph), sg.h) 60 | E += hi * σ[v] 61 | end 62 | Z += exp(-β * E) 63 | end 64 | return Z 65 | end -------------------------------------------------------------------------------- /SimpleTensorNetwork/src/tucker.jl: -------------------------------------------------------------------------------- 1 | function tucker_movefirst(X::AbstractArray{T, N}, Us, k::Int) where {N, T} 2 | Ak = X 3 | for i=1:N 4 | # move i-th dimension to the first 5 | if i!=1 6 | pm = collect(1:N) 7 | pm[1], pm[i] = pm[i], pm[1] 8 | Ak = permutedims(Ak, pm) 9 | end 10 | if i != k 11 | # multiply Uk on the i-th dimension 12 | remain = size(Ak)[2:end] 13 | Ak = Us[i]' * reshape(Ak, size(Ak, 1), :) 14 | Ak = reshape(Ak, size(Ak, 1), remain...) 15 | end 16 | end 17 | A_ = permutedims(Ak, (2:N..., 1)) 18 | return permutedims(A_, (k, setdiff(1:N, k)...)) 19 | end 20 | function tucker_project(X::AbstractArray{T, N}, Us; inverse=false) where {N, T} 21 | Ak = X 22 | for i=1:N 23 | # move i-th dimension to the first 24 | if i!=1 25 | pm = collect(1:N) 26 | pm[1], pm[i] = pm[i], pm[1] 27 | Ak = permutedims(Ak, pm) 28 | end 29 | remain = size(Ak)[2:end] 30 | Ak = (inverse ? Us[i] : Us[i]') * reshape(Ak, size(Ak, 1), :) 31 | Ak = reshape(Ak, size(Ak, 1), remain...) 32 | end 33 | return permutedims(Ak, (2:N..., 1)) 34 | end 35 | 36 | function tucker_decomp(X::AbstractArray{T,N}, rs::Vector{Int}; nrepeat::Int) where {T, N} 37 | # the first sweep, to generate U_k 38 | Us = [Matrix{T}(I, size(X, i), size(X, i)) for i=1:N] 39 | Ak = X 40 | for n=1:nrepeat 41 | for i=1:N 42 | Ak = tucker_movefirst(X, Us, i) 43 | ret = svd(reshape(Ak, size(Ak, 1), :)) 44 | Us[i] = ret.U[:,1:rs[i]] 45 | end 46 | Ak = permutedims(Ak, (2:N..., 1)) 47 | dist = norm(tucker_project(tucker_project(X, Us), Us; inverse=true) .- X) 48 | @info "The Frobenius norm distance is: $dist" 49 | end 50 | return tucker_project(X, Us), Us 51 | end 52 | 53 | # X = randn(20, 10, 15); 54 | 55 | # Cor, Us = tucker_decomp(X, [4, 5, 6]; nrepeat=10) 56 | -------------------------------------------------------------------------------- /SimpleTensorNetwork/test/runtests.jl: -------------------------------------------------------------------------------- 1 | using SimpleTensorNetwork 2 | using Test 3 | 4 | @testset "spinglass" begin 5 | include("spinglass.jl") 6 | end 7 | -------------------------------------------------------------------------------- /SimpleTensorNetwork/test/sampling.jl: -------------------------------------------------------------------------------- 1 | using Test, SimpleTensorNetwork 2 | using SimpleTensorNetwork: MPS, random_mps, right_canonicalize!, left_canonicalize!, get_psi, train, show_imgs, load_mnist, bond_dims, gen_samples, generate_images, plot_distribution 3 | using LinearAlgebra 4 | 5 | @testset "random mps" begin 6 | mps = random_mps(Float32, 784, 30) 7 | @test length(mps.tensors) == 784 8 | @test count(size(i) == (30, 2, 30) for i in mps.tensors) == 782 9 | @test bond_dims(mps) == [fill(30, 783)..., 1] 10 | end 11 | 12 | @testset "canonicalize" begin 13 | nsite = 20 14 | mps = random_mps(Float32, nsite, 30) 15 | mps2 = right_canonicalize!(deepcopy(mps)) 16 | mps3 = left_canonicalize!(deepcopy(mps)) 17 | data = rand(0:1, 11, nsite) 18 | psi1 = normalize!(vec(get_psi(mps, data))) 19 | psi2 = normalize!(vec(get_psi(mps2, data))) 20 | @test psi1 ≈ psi2 21 | psi3 = normalize!(vec(get_psi(mps3, data))) 22 | @test psi1 ≈ psi3 23 | end 24 | 25 | @testset "load data" begin 26 | m = 20 # number of images 27 | data = load_mnist()[1:m, :] # Load and slice first m rows 28 | @test size(data) == (m, 784) 29 | fig = show_imgs(data, 2, 10) 30 | @test fig isa SimpleTensorNetwork.CairoMakie.Figure 31 | end 32 | 33 | @testset "sampling" begin 34 | mps = random_mps(Float32, 42, 30) 35 | samples = gen_samples(mps, 25) 36 | @test size(samples) == (25, 42) 37 | end 38 | 39 | data = load_mnist()[1:25, :] 40 | mps = random_mps(Float32, size(data, 2), 30) 41 | results = train(mps, data) 42 | generate_images(results[1]) 43 | plot_distribution(results[1], data) 44 | generate_images(results[2]) 45 | generate_images(results[3]) 46 | generate_images(results[4]) 47 | generate_images(results[5]) 48 | plot_distribution(results[5], data) 49 | -------------------------------------------------------------------------------- /SimpleTensorNetwork/test/spinglass.jl: -------------------------------------------------------------------------------- 1 | using Test, SimpleTensorNetwork, SimpleTensorNetwork.Graphs, SimpleTensorNetwork.OMEinsum 2 | 3 | @testset "spinglass" begin 4 | sg = Spinglass(smallgraph(:petersen), ones(15), zeros(10)) 5 | β = 0.1 6 | tn = generate_tensor_network(sg, β) 7 | @test length(tn.tensors) == 25 8 | opttn = optimize_tensornetwork(tn) 9 | @test length(opttn.tensors) == 25 10 | @test opttn.ein isa OMEinsum.SlicedEinsum 11 | @test partition_function(sg, β) ≈ partition_function_exact(sg, β) 12 | end -------------------------------------------------------------------------------- /SpinDynamics/Project.toml: -------------------------------------------------------------------------------- 1 | name = "SpinDynamics" 2 | uuid = "14ef36e5-90ab-499e-a97f-72c4e36abdb5" 3 | authors = ["GiggleLiu and contributors"] 4 | version = "1.0.0-DEV" 5 | 6 | [deps] 7 | Graphs = "86223c79-3864-5bf0-83f7-82e725a168b6" 8 | LinearAlgebra = "37e2e46d-f89d-539d-b4ee-838fcccc9c8e" 9 | StaticArrays = "90137ffa-7385-5640-81b9-e52037218182" 10 | 11 | [compat] 12 | Graphs = "1.12.0" 13 | LinearAlgebra = "1.11.0" 14 | StaticArrays = "1.9.13" 15 | julia = "1.6.7" 16 | 17 | [extras] 18 | Test = "8dfed614-e22c-5e08-85e1-65c5234f0b40" 19 | 20 | [targets] 21 | test = ["Test"] 22 | -------------------------------------------------------------------------------- /SpinDynamics/README.md: -------------------------------------------------------------------------------- 1 | # SpinDynamics 2 | 3 | This package provides tools for simulating the dynamics of classical spin systems using numerical integration methods. It allows researchers and students to explore magnetic phenomena at the atomistic level. 4 | 5 | ## Contents 6 | 7 | - Landau-Lifshitz-Gilbert equation for 3D spin dynamics simulation based on the work of [^Tsai2008]. 8 | ```math 9 | \frac{d\mathbf{m}_i}{dt} = \gamma \left( \mathbf{m}_i \times \mathbf{H}_i + \alpha \left( \mathbf{m}_i \times \frac{d\mathbf{m}_i}{dt} \right) \right) 10 | ``` 11 | where $\mathbf{m}_i$ is the magnetization vector, $\mathbf{H}_i = \sum_{j} J_{ij} \mathbf{m}_j + \mathbf{h}_i$ is the effective field, $\gamma$ is the gyromagnetic ratio, and $\alpha$ is the damping constant. 12 | - Simulated bifurcation for finding the energy minimum of a spin glass model on a graph $G = (V, E)$[^Goto2021]. 13 | ```math 14 | V_{\rm aSB} = \sum_{i \in V} \frac{x_i^4}{4} + \frac{a}{2} x_i^2 - c_0 \sum_{(i,j) \in E} J_{ij} x_i x_j\\ 15 | V_{\rm bSB} = \sum_{i \in V} \frac{a}{2} x_i^2 - c_0 \sum_{(i,j) \in E} J_{ij} x_i x_j\\ 16 | V_{\rm dSB} = \sum_{i \in V} \frac{a}{2} x_i^2 - c_0 \sum_{(i,j) \in E} J_{ij} (x_i \mathrm{sign}(x_j) + x_j \mathrm{sign}(x_i)) 17 | ``` 18 | where $x_i$ is the spin variable, $a$ is the bifurcation parameter, $c_0$ is the coupling strength, and $J_{ij}$ is the coupling matrix. For $\mathrm{aSB}$, $a$ ramps from $1$ to $-1$, while for $\mathrm{bSB}$ and $\mathrm{dSB}$, $a$ ramps from $1$ to $0$. 19 | 20 | ## To run 21 | 22 | Clone the repository to your local machine and install the required packages (in a terminal): 23 | 24 | ```bash 25 | $ git clone https://github.com/GiggleLiu/ScientificComputingDemos.git 26 | $ cd ScientificComputingDemos 27 | $ make init-SpinDynamics # initialize the environment in SpinDynamics and SpinDynamics/examples 28 | $ make example-SpinDynamics # run the script SpinDynamics/examples/main.jl 29 | ``` 30 | 31 | ## References 32 | [^Tsai2008]: Tsai, S.-H., Landau, D.P., 2008. Spin Dynamics: An Atomistic Simulation Tool for Magnetic Systems. Computing in Science & Engineering 10, 72–79. https://doi.org/10.1109/MCSE.2008.12 33 | [^Goto2021]: Goto, H., Endo, K., Suzuki, M., Sakai, Y., Kanao, T., Hamakawa, Y., Hidaka, R., Yamasaki, M., Tatsumura, K., 2021. High-performance combinatorial optimization based on classical mechanics. Science Advances 7, eabe7953. https://doi.org/10.1126/sciadv.abe7953 34 | -------------------------------------------------------------------------------- /SpinDynamics/examples/Project.toml: -------------------------------------------------------------------------------- 1 | [deps] 2 | CairoMakie = "13f3f980-e62b-5c42-98c6-ff1f3baf88f0" 3 | Graphs = "86223c79-3864-5bf0-83f7-82e725a168b6" 4 | SpinDynamics = "14ef36e5-90ab-499e-a97f-72c4e36abdb5" 5 | -------------------------------------------------------------------------------- /SpinDynamics/src/SpinDynamics.jl: -------------------------------------------------------------------------------- 1 | module SpinDynamics 2 | 3 | using LinearAlgebra, StaticArrays, Graphs 4 | 5 | export simulate!, ClassicalSpinSystem, random_spins, TrotterSuzuki, TimeDependent, energy 6 | export SimulatedBifurcation, SimulatedBifurcationState, simulate_bifurcation! 7 | 8 | include("simulation.jl") 9 | include("simulated_bifurcation.jl") 10 | 11 | end -------------------------------------------------------------------------------- /SpinDynamics/test/runtests.jl: -------------------------------------------------------------------------------- 1 | using SpinDynamics 2 | using Test 3 | 4 | @testset "Spin dynamics" begin 5 | include("simulation.jl") 6 | end 7 | 8 | @testset "Simulated bifurcation" begin 9 | include("simulated_bifurcation.jl") 10 | end -------------------------------------------------------------------------------- /SpinDynamics/test/simulated_bifurcation.jl: -------------------------------------------------------------------------------- 1 | using SpinDynamics, Test, Graphs 2 | 3 | @testset "SimulatedBifurcation" begin 4 | g = smallgraph(:petersen) 5 | for KIND in (:aSB, :bSB, :dSB) 6 | sys = SimulatedBifurcation{KIND}(1.0, 0.2, g, randn(ne(g))) 7 | x = randn(nv(g)) 8 | f = SpinDynamics.force(sys, x) 9 | δx = randn(nv(g)) * 1e-5 10 | engdiff = SpinDynamics.potential_energy(sys, x + δx/2) - SpinDynamics.potential_energy(sys, x - δx/2) 11 | @test isapprox(sum(f .* δx), -engdiff, rtol=1e-3) 12 | end 13 | @test SimulatedBifurcation{:aSB}(g, randn(ne(g))) isa SimulatedBifurcation{Float64, :aSB} 14 | end 15 | 16 | @testset "simulate_bifurcation!" begin 17 | g = smallgraph(:petersen) 18 | sys = SimulatedBifurcation{:aSB}(g, randn(ne(g))) 19 | state = SimulatedBifurcationState(randn(nv(g)), randn(nv(g))) 20 | simulate_bifurcation!(state, sys; nsteps=100, dt=0.01, clamp=true) 21 | @test length(state.x) == nv(g) 22 | @test length(state.p) == nv(g) 23 | @test all(x -> x >= -1 && x <= 1, state.x) 24 | end 25 | -------------------------------------------------------------------------------- /SpinDynamics/test/simulation.jl: -------------------------------------------------------------------------------- 1 | using Test 2 | using SpinDynamics: ClassicalSpinSystem, simulate!, greedy_coloring, is_valid_coloring, partite_edges, single_spin_dynamics_operator, single_spin_dynamics, SVector, TrotterSuzuki, random_spins, TimeDependent, SVector 3 | using Graphs 4 | 5 | @testset "Spin dynamics" begin 6 | topology = grid((3, 3)) 7 | sys = ClassicalSpinSystem(topology, ones(ne(topology))) 8 | spins = [SVector(1.0, 0.0, 0.0) for _ in 1:nv(sys.topology)] 9 | state, history = simulate!(spins, sys; nsteps=100, dt=0.1, checkpoint_steps=10, algorithm=TrotterSuzuki{2}(topology)) 10 | @test state ≈ [SVector(1.0, 0.0, 0.0) for _ in 1:nv(sys.topology)] 11 | @test length(history) == 10 12 | @test length(state) == 9 13 | end 14 | 15 | @testset "Greedy coloring" begin 16 | g = grid((10, 10)) 17 | coloring = greedy_coloring(g) 18 | @test is_valid_coloring(g, coloring) 19 | @test length(unique(coloring)) <= 5 20 | eparts = partite_edges(g) 21 | @test length(eparts) <= 4 22 | 23 | g = path_graph(10) 24 | eparts = partite_edges(g) 25 | @test length(eparts) <= 2 26 | end 27 | 28 | @testset "single spin dynamics" begin 29 | #s = SVector(randn(), randn(), randn()) 30 | s = SVector(1.0, 0.0, 0.0) 31 | field = SVector(randn(), randn(), randn()) 32 | op = single_spin_dynamics_operator(field) 33 | @test op * s ≈ single_spin_dynamics(field, s) 34 | end 35 | 36 | @testset "time dependent" begin 37 | J = TimeDependent(zeros(ne(grid((3, 3)))), (J, t) -> (J .= 0.0)) 38 | h = TimeDependent(fill(SVector(0.0, 0.0, 0.0), nv(grid((3, 3)))), (h, t) -> (h .= Ref(t * SVector(0.0, 0.0, 1.0)))) 39 | topology = grid((3, 3)) 40 | sys = ClassicalSpinSystem(topology, J, h) 41 | spins = [SVector(0.0, 0.0, 1.0) for _ in 1:nv(sys.topology)] 42 | E0 = energy(SpinDynamics.instantiate(sys, 0.0), spins) 43 | @test E0 ≈ 0.0 atol=1e-6 44 | state, history = simulate!(spins, sys; nsteps=100, dt=0.1, checkpoint_steps=10, algorithm=TrotterSuzuki{2}(topology)) 45 | E1 = energy(SpinDynamics.instantiate(sys, 10.0), state) 46 | @test E1 ≈ 90.0 atol=1e-6 47 | 48 | 49 | h = TimeDependent(fill(SVector(0.0, 0.0, 0.0), nv(grid((3, 3)))), (h, t) -> (h .= Ref(SVector(t * -1.0/10, 0.0, -1.0/10 * (10 - t))))) 50 | sys = ClassicalSpinSystem(topology, J, h) 51 | spins = [SVector(0.0, 0.0, 1.0) for _ in 1:nv(sys.topology)] 52 | E0 = energy(SpinDynamics.instantiate(sys, 0.0), spins) 53 | E0b = energy(SpinDynamics.instantiate(sys, 10.0), spins) 54 | @test E0 ≈ -9.0 atol=1e-6 55 | @test E0b ≈ 0.0 atol=1e-6 56 | state, history = simulate!(spins, sys; nsteps=100, dt=0.1, checkpoint_steps=10, algorithm=TrotterSuzuki{2}(topology)) 57 | @test energy(SpinDynamics.instantiate(sys, 10.0), state) ≈ -9.0 atol=1e-6 58 | @test energy(SpinDynamics.instantiate(sys, 0.0), state) ≈ 0.0 atol=1e-2 59 | end -------------------------------------------------------------------------------- /Spinglass/Project.toml: -------------------------------------------------------------------------------- 1 | name = "Spinglass" 2 | uuid = "0a303999-d7af-4ce6-be35-aa14885e5515" 3 | authors = ["GiggleLiu"] 4 | version = "1.0.0-DEV" 5 | 6 | [deps] 7 | DelimitedFiles = "8bb1440f-4735-579b-a4ab-409b98df4dab" 8 | Graphs = "86223c79-3864-5bf0-83f7-82e725a168b6" 9 | ProblemReductions = "899c297d-f7d2-4ebf-8815-a35996def416" 10 | Random = "9a3f8284-a2c9-5f02-9a11-845980a1fd5c" 11 | 12 | [weakdeps] 13 | CUDA = "052768ef-5323-5732-b1bb-66c8b64840ba" 14 | 15 | [extensions] 16 | CUDAExt = "CUDA" 17 | 18 | [compat] 19 | CUDA = "5.7" 20 | DelimitedFiles = "1" 21 | Graphs = "1" 22 | ProblemReductions = "0.3.1" 23 | julia = "1" 24 | 25 | [extras] 26 | Test = "8dfed614-e22c-5e08-85e1-65c5234f0b40" 27 | 28 | [targets] 29 | test = ["Test", "CUDA"] 30 | -------------------------------------------------------------------------------- /Spinglass/README.md: -------------------------------------------------------------------------------- 1 | # Spinglass 2 | 3 | The spin-glass model is a model of a disordered magnet, where the spins are coupled to each other in a random way. 4 | Let $G=(V,E)$ be a graph, where $V$ is the set of vertices and $E$ is the set of edges. The Hamiltonian of the model is given by: 5 | 6 | ```math 7 | H = \sum_{(i,j)\in E}J_{ij}\sigma_i\sigma_j + \sum_{i\in V}h_i\sigma_i, 8 | ``` 9 | where $J_{ij}$ and $h_i$ are the coupling constants and the external fields, respectively, $\sigma_i\in\{-1,1\}$ are the spins, and the sum is over all edges of the graph. 10 | 11 | ## Contents 12 | 13 | This repository focuses on solving the ground state of the spin-glass model on a 3-regular graph. We provide the following methods: 14 | 15 | 1. Reducing a circuit satisfiability problem to a spin-glass problem.[^Nguyen2023][^Glover2019] 16 | 2. Simulated annealing for solving a spin-glass ground state problem.[^Cain2023][^SSSS] 17 | 18 | ## To run 19 | 20 | Clone the repository to your local machine and install the required packages (in a terminal): 21 | 22 | ```bash 23 | $ git clone https://github.com/GiggleLiu/ScientificComputingDemos.git 24 | $ cd ScientificComputingDemos 25 | $ make init-Spinglass # initialize the environment in Spinglass and Spinglass/examples 26 | $ make example-Spinglass # run the script Spinglass/examples/main.jl 27 | ``` 28 | 29 | ## To run the CUDA example 30 | 31 | ```bash 32 | $ julia --project=Spinglass/examples Spinglass/examples/cuda.jl 33 | ``` 34 | 35 | ## References 36 | [^SSSS]: Deep Learning and Quantum Programming: A Spring School, https://github.com/QuantumBFS/SSSS 37 | [^Cain2023]: Cain, M., et al. "Quantum speedup for combinatorial optimization with flat energy landscapes (2023)." arXiv preprint arXiv:2306.13123. 38 | [^Nguyen2023]: Nguyen, Minh-Thi, et al. "Quantum optimization with arbitrary connectivity using Rydberg atom arrays." PRX Quantum 4.1 (2023): 010316. 39 | [^Glover2019]: Glover, Fred, Gary Kochenberger, and Yu Du. "Quantum Bridge Analytics I: a tutorial on formulating and using QUBO models." 4or 17.4 (2019): 335-371. 40 | -------------------------------------------------------------------------------- /Spinglass/examples/2_spin_plot.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/GiggleLiu/ScientificComputingDemos/c0ad0c00e5d0bf90ed1167d27d4dffe70c34dea7/Spinglass/examples/2_spin_plot.png -------------------------------------------------------------------------------- /Spinglass/examples/Project.toml: -------------------------------------------------------------------------------- 1 | [deps] 2 | CUDA = "052768ef-5323-5732-b1bb-66c8b64840ba" 3 | CairoMakie = "13f3f980-e62b-5c42-98c6-ff1f3baf88f0" 4 | GenericTensorNetworks = "3521c873-ad32-4bb4-b63d-f4f178f42b49" 5 | Graphs = "86223c79-3864-5bf0-83f7-82e725a168b6" 6 | ProblemReductions = "899c297d-f7d2-4ebf-8815-a35996def416" 7 | Spinglass = "0a303999-d7af-4ce6-be35-aa14885e5515" 8 | -------------------------------------------------------------------------------- /Spinglass/examples/cuda.jl: -------------------------------------------------------------------------------- 1 | using Spinglass, Test 2 | using CUDA 3 | 4 | @info "#### Example: GPU-accelerated Simulated Annealing with CUDA ####" 5 | 6 | # Load a spin glass problem from file 7 | filename = pkgdir(Spinglass, "data", "example.txt") 8 | sap = load_spinglass(filename) 9 | @info "Loaded spinglass from: $filename, number of spins = $(Spinglass.num_variables(sap))" 10 | 11 | # Define temperature schedule for annealing 12 | # Starting at 10 and decreasing by 0.15 for each of the 64 steps 13 | tempscales = 10 .- (1:64 .- 1) .* 0.15 |> collect 14 | 15 | # Convert the spin glass problem to run on CUDA GPU 16 | cusap = SpinGlassSA(sap) |> CUDA.cu 17 | @info "Transferred problem to GPU for acceleration" 18 | 19 | # Configure annealing parameters 20 | nrun = 30 # Number of independent runs 21 | nupdate_each_temperature = 4000 # Updates per temperature 22 | 23 | @info """Start GPU annealing: 24 | - Temperatures: from $(tempscales[1]) to $(tempscales[end]) 25 | - Number of updates each temperature: $nupdate_each_temperature 26 | - Number of runs: $nrun 27 | """ 28 | 29 | # Run simulated annealing with: 30 | # - 30 independent runs 31 | # - Temperature schedule on GPU 32 | # - 4000 updates per temperature step 33 | opt_cost, opt_config = anneal(nrun, cusap, CUDA.CuVector(tempscales), nupdate_each_temperature) 34 | 35 | @info """Annealing results: 36 | - Optimal cost: $opt_cost (known optimal: -3858) 37 | - Optimal configuration found: $(opt_config) 38 | """ 39 | 40 | # Verify we found the known optimal solution 41 | @test opt_cost == -3858 42 | -------------------------------------------------------------------------------- /Spinglass/examples/main.jl: -------------------------------------------------------------------------------- 1 | ##################################################### 2 | # Example 1: Reduction to Circuit Satisfiability 3 | ##################################################### 4 | @info "#### Example 1: Reduction to Circuit Satisfiability ####" 5 | using Spinglass, Graphs, ProblemReductions, GenericTensorNetworks 6 | 7 | # Demonstrate logic gates as spin glass models 8 | # NOT gate 9 | gadget_not = ProblemReductions.spinglass_gadget(Val(:¬)) 10 | @info "Logic NOT gate is represented by the spinglass model:\n$gadget_not" 11 | gs_not = ProblemReductions.findbest(gadget_not.problem, BruteForce()) 12 | @info "Ground state energy and ground states of the NOT gate:\n$gs_not" 13 | 14 | # AND gate 15 | gadget_and = ProblemReductions.spinglass_gadget(Val(:∧)) 16 | @info "Logic AND gate is represented by the spinglass model:\n$gadget_and" 17 | gs_and = ProblemReductions.findbest(gadget_and.problem, BruteForce()) 18 | @info "Ground state energy and ground states of the AND gate:\n$gs_and" 19 | 20 | # OR gate 21 | gadget_or = ProblemReductions.spinglass_gadget(Val(:∨)) 22 | @info "Logic OR gate is represented by the spinglass model:\n$gadget_or" 23 | gs_or = ProblemReductions.findbest(gadget_or.problem, BruteForce()) 24 | @info "Ground state energy and ground states of the OR gate:\n$gs_or" 25 | 26 | # Compose a 2-bit x 2-bit multiplier 27 | @info "Composing a 2-bit x 2-bit multiplier..." 28 | fact = ProblemReductions.Factoring(2, 2, 4) 29 | paths = ProblemReductions.reduction_paths(Factoring, SpinGlass) 30 | mapres = ProblemReductions.reduceto(paths[1], fact) 31 | 32 | solution = ProblemReductions.findbest(target_problem(mapres), GenericTensorNetworks.GTNSolver()) 33 | @assert length(solution) == 1 # the solution is unique in this case 34 | extracted = ProblemReductions.extract_solution(mapres, solution[1]) 35 | @info "Multiplier of 2 bits x 2 bits is represented by the spinglass model:\n$mapres" 36 | @info "The extracted solution is: $extracted, decoded as $(ProblemReductions.read_solution(fact, extracted)), the multiplication of which should be $(fact.input)" 37 | 38 | ##################################################### 39 | # Example 2: Simulated Annealing Method (approximate) 40 | ##################################################### 41 | @info "#### Example 2: Simulated Annealing for Spinglass solving ####" 42 | # Load a spin glass problem from file 43 | filename = pkgdir(Spinglass, "data", "example.txt") 44 | sap = load_spinglass(filename) 45 | @info "Loaded spinglass from: $filename, number of spins = $(ProblemReductions.num_variables(sap))" 46 | 47 | # Configure annealing parameters 48 | tempscales = 10 .- (1:64 .- 1) .* 0.15 # Temperature schedule 49 | nupdate_each_temperature = 4000 # Updates per temperature 50 | nrun = 30 # Number of independent runs 51 | 52 | @info """Start annealing: 53 | - Temperatures: $(tempscales) 54 | - Number of updates each temperature: $nupdate_each_temperature 55 | - Number of runs: $nrun 56 | """ 57 | 58 | # Run simulated annealing 59 | opt_cost, opt_config = anneal(nrun, sap, collect(tempscales), nupdate_each_temperature) 60 | 61 | @info """Annealing results: 62 | - Optimal cost: $opt_cost (known optimal: 3858) 63 | - Optimal configuration: $opt_config 64 | """ -------------------------------------------------------------------------------- /Spinglass/examples/mis.jl: -------------------------------------------------------------------------------- 1 | using DelimitedFiles # for file reading 2 | 3 | # load graph 4 | graphsize = 8 5 | graph_index = 100 6 | graph_mask = reshape(Bool.(readdlm("mis_degeneracy_L$(graphsize).dat")[graph_index+1, 4:end]), (graphsize, graphsize)) 7 | graph = unit_disk_grid_graph(graph_mask) 8 | 9 | # load MIS graphsize 10 | MIS_size = Int(readdlm("mis_degeneracy_L$(graphsize).dat")[graph_index+1, 1]) 11 | 12 | # run! 13 | track_equilibration!(SimulatedAnnealingMIS(graph), -(MIS_size - 1.0), 20000) -------------------------------------------------------------------------------- /Spinglass/examples/solutionspace.jl: -------------------------------------------------------------------------------- 1 | using Spinglass 2 | 3 | using Random 4 | Random.seed!(2) 5 | tempscales = 10 .- ((1:64) .- 1) .* 0.15 |> collect 6 | sap = load_spinglass(pkgdir(Spinglass, "data", "example.txt")) 7 | 8 | @time anneal(30, sap, tempscales, 4000) 9 | 10 | using LuxorGraphPlot 11 | function multipartite_layout(graph, sets; C=2.0) 12 | locs = Vector{Tuple{Float64, Float64}}[] 13 | for (meanloc, set) in sets 14 | gi, = Graphs.induced_subgraph(graph, set) 15 | xs, ys = LuxorGraphPlot.spring_layout(gi; C) 16 | f = nv(gi)^1.5/1000 17 | push!(locs, map(xs, ys) do x, y 18 | (f * x + meanloc[1], f * y + meanloc[2]) 19 | end) 20 | end 21 | locs 22 | end 23 | 24 | function zstack_layout(graph, sets; C=2.0, xyratio=3, deltaz=10.0) 25 | @show [(0, -(k-1)*deltaz)=>s for (k, s) in enumerate(sets)] 26 | locs = multipartite_layout(graph, [(0, (k-1)*deltaz)=>s for (k, s) in enumerate(sets)]; C) 27 | return vcat(map(loc->map(x->(x[1] * xyratio, x[2]), loc), locs)...) 28 | end 29 | 30 | configs = solve() 31 | cgraph = connect_by_hamming_distance(vcat(configs.coeffs[3].data, configs.coeffs[1].data)) 32 | nc1, nc2 = length(configs.coeffs[3].data), length(configs.coeffs[1].data) 33 | locs_zstack = zstack_layout(cgraph, [1:nc1, nc1+1:nc1+nc2]; deltaz=3) 34 | LuxorGraphPlot.show_graph(cgraph; locs=locs_zstack, vertex_color="red", vertex_size=0.1) -------------------------------------------------------------------------------- /Spinglass/examples/tropical_tensor_network.jl: -------------------------------------------------------------------------------- 1 | using GenericTensorNetworks, GenericTensorNetworks.Graphs, GenericTensorNetworks.OMEinsum 2 | using GenericTensorNetworks.LuxorGraphPlot 3 | using Random; Random.seed!(42) 4 | 5 | # The 3-regular graph 6 | n = parse(Int, get(ENV, "NV", "100")) 7 | graph = Graphs.random_regular_graph(n, 3) 8 | @info "We use a 3-regular graph as a demo, which has $(nv(graph)) vertices and $(ne(graph)) edges." 9 | 10 | # Visualize the 3-regular graph 11 | locs = render_locs(graph, Layout(:spring; optimal_distance=100)) 12 | show_graph(graph, locs; format=:png, filename="regular-$n.png") 13 | @info "The demo graph is saved as `regular-$n.png`, optimizing the contraction order..." 14 | 15 | # An anti-ferromagnetic spin glass problem 16 | jsonfile = "regular-$n.json" 17 | J = fill(1, ne(graph)) 18 | if !isfile(jsonfile) 19 | problem = GenericTensorNetwork(SpinGlass(graph, J); optimizer=TreeSA()) 20 | # save the tensor network as a JSON file 21 | writejson(jsonfile, problem.code) 22 | @info "The tensor network topology is saved as `$jsonfile`" 23 | else 24 | @info "loading optimized contraction order from $jsonfile" 25 | code = readjson(jsonfile) 26 | h = ZeroWeight() 27 | problem = GenericTensorNetwork(SpinGlass(graph, J, h), code, Dict{Int, Int}()) 28 | end 29 | 30 | @info "We consider an anti-ferromagnetic spin glass problem on the 3-regular graph. Its couling constants are J = $(problem.problem.weights[1:ne(graph)])." 31 | # The output is a tensor network with optimized contraction order. 32 | @info "The contraction order is $(problem.code)" 33 | @info "The contraction complexity is $(contraction_complexity(problem))" 34 | 35 | show_einsum(problem.code; optimal_distance=100, format=:png, filename="regular-$(n)_einsum.png") 36 | @info "The tensor network diagram is saved as `regular-$(n)_einsum.png`" 37 | 38 | # The lowest energy of the spin glass problem. 39 | Emin = solve(problem, SizeMin())[] 40 | @info "The ground state energy is: $Emin" 41 | 42 | # the tensor labels are: 43 | labels = OMEinsum.getixsv(problem.code) 44 | 45 | # the ground state configuration 46 | ground_state = solve(problem, SingleConfigMin(; bounded=true))[].c.data 47 | Emin_verify = spinglass_energy(graph, ground_state; J) 48 | @info "The ground state is $ground_state and the energy is $Emin_verify" 49 | 50 | show_graph(graph, locs; 51 | vertex_colors=[iszero(ground_state[i]) ? "white" : "red" for i=1:nv(graph)], 52 | format=:png, filename="regular-$(n)_ground_state.png") 53 | @info "The ground state is saved as `regular-$(n)_ground_state.png`" 54 | -------------------------------------------------------------------------------- /Spinglass/ext/CUDAExt.jl: -------------------------------------------------------------------------------- 1 | module CUDAExt 2 | 3 | using CUDA, Spinglass 4 | using Spinglass: SpinGlassSA 5 | using CUDA.GPUArrays: @kernel, get_backend, @index 6 | 7 | # upload the coupling matrix to the GPU 8 | CUDA.cu(sa::SpinGlassSA) = SpinGlassSA(CUDA.CuArray(sa.coupling)) 9 | cpu(sa::SpinGlassSA) = SpinGlassSA(Matrix(sa.coupling)) 10 | 11 | struct BatchedSpinConfig{T1, T2, MT1<:AbstractMatrix{T1}, MT2<:AbstractMatrix{T2}} 12 | config::MT1 13 | field::MT2 14 | end 15 | 16 | function Spinglass.anneal(nrun::Int, prob::SpinGlassSA{TF, <:CuMatrix{TF}}, tempscales::CuVector{TF}, num_update_each_temp::Int) where {TF} 17 | initial_config = [random_config(cpu(prob)) for _ in 1:nrun] 18 | batch_config = BatchedSpinConfig(CUDA.CuArray(hcat(getfield.(initial_config, :config)...)), CUDA.CuArray(hcat(getfield.(initial_config, :field)...))) 19 | anneal_run!(batch_config, prob, tempscales, num_update_each_temp) 20 | cpu_config = BatchedSpinConfig(Matrix(batch_config.config), Matrix(batch_config.field)) 21 | eng, idx = findmin(i -> Spinglass.energy(SpinConfig(cpu_config.config[:, i], cpu_config.field[:, i]), cpu(prob)), 1:nrun) 22 | return eng, SpinConfig(cpu_config.config[:, idx], cpu_config.field[:, idx]) 23 | end 24 | 25 | function anneal_run!(config::BatchedSpinConfig{TI, TF, <:CuMatrix{TI}, <:CuMatrix{TF}}, prob::SpinGlassSA{TF, <:CuMatrix{TF}}, tempscales::CuVector{TF}, num_update_each_temp::Int) where {TI, TF} 26 | @kernel function kernel(config, field, coupling) 27 | ibatch = @index(Global, Linear) 28 | for temp in tempscales 29 | beta = inv(temp) 30 | for _ = 1:num_update_each_temp # single instriuction multiple data, see julia performance tips. 31 | proposal, ΔE = propose(config, field, coupling, ibatch) 32 | if exp(-beta*ΔE) > CUDA.Random.rand() #accept 33 | flip!(config, field, proposal, coupling, ibatch) 34 | end 35 | end 36 | end 37 | end 38 | # we only parallel over the batch size, not the spin number. 39 | kernel(get_backend(config.config))(config.config, config.field, prob.coupling; ndrange=size(config.config, 2)) 40 | end 41 | 42 | 43 | @inline function propose(config, field, coupling, ibatch::Int) # ommit the name of argument, since not used. 44 | ispin = CUDA.Random.rand(1:size(coupling, 1)) 45 | ΔE = -field[ispin, ibatch] * config[ispin, ibatch] * 4 # 2 for spin change, 2 for mutual energy. 46 | ispin, ΔE 47 | end 48 | 49 | @inline function flip!(config, field, ispin::Int, coupling, ibatch::Int) 50 | @inbounds config[ispin, ibatch] = -config[ispin, ibatch] # @inbounds can remove boundary check, and improve performance 51 | # update the field 52 | for i=1:size(coupling, 1) 53 | @inbounds field[i, ibatch] += 2 * config[ispin, ibatch] * coupling[i,ispin] 54 | end 55 | end 56 | 57 | @info "`CUDAExt` (for `Spinglass`) is loaded successfully." 58 | 59 | end 60 | -------------------------------------------------------------------------------- /Spinglass/src/Spinglass.jl: -------------------------------------------------------------------------------- 1 | module Spinglass 2 | 3 | using Random 4 | using DelimitedFiles, Graphs, ProblemReductions 5 | 6 | export load_spinglass, random_config, anneal 7 | export SpinConfig, SpinglassModel, SpinGlassSA 8 | 9 | include("simulated_annealing.jl") 10 | include("mis_sa.jl") 11 | include("dynamics.jl") 12 | 13 | end 14 | -------------------------------------------------------------------------------- /Spinglass/test/CUDAExt.jl: -------------------------------------------------------------------------------- 1 | using Spinglass, Test, CUDA 2 | 3 | @testset "anneal" begin 4 | sap = load_spinglass(pkgdir(Spinglass, "data", "example.txt")) 5 | tempscales = 10 .- (1:64 .- 1) .* 0.15 |> collect 6 | cusap = SpinGlassSA(sap) |> CUDA.cu 7 | opt_cost, opt_config = anneal(30, cusap, CUDA.CuVector(tempscales), 4000) 8 | @test opt_cost == -3858 9 | end -------------------------------------------------------------------------------- /Spinglass/test/dynamics.jl: -------------------------------------------------------------------------------- 1 | using Test 2 | using Spinglass: Transverse, iterate_T 3 | 4 | @testset "Transverse Model Tests" begin 5 | # Create a simple test matrix 6 | J = [0.0 1.0; 1.0 0.0] 7 | 8 | # Set up parameters 9 | n_step = 100 10 | trials = 1 11 | beta = exp.(range(log(0.1), log(10), length=n_step)) 12 | 13 | # Test 1: Gradient descent only (g=1, gama=0) 14 | @testset "Gradient Descent Configuration" begin 15 | model_gd = Transverse( 16 | J, beta, trials, 17 | gama=0.0, g=1.0, a_set=2.0, 18 | Delta_t=0.3, c0=0.2, 19 | seed=42, track_energy=true 20 | ) 21 | energy_gd, track_gd = iterate_T(model_gd) 22 | 23 | @test size(energy_gd) == (trials, n_step) 24 | @test size(track_gd) == (n_step+1, size(J, 1)) 25 | @test all(track_gd .>= -1.0) && all(track_gd .<= 1.0) 26 | end 27 | 28 | # Test 2: Momentum only (g=0, gama=1) 29 | @testset "Momentum Configuration" begin 30 | model_mom = Transverse( 31 | J, beta, trials, 32 | gama=1.0, g=0.0, a_set=2.0, 33 | Delta_t=0.3, c0=0.2, 34 | seed=42, track_energy=true 35 | ) 36 | energy_mom, track_mom = iterate_T(model_mom) 37 | 38 | @test size(energy_mom) == (trials, n_step) 39 | @test size(track_mom) == (n_step+1, size(J, 1)) 40 | @test all(track_mom .>= -1.0) && all(track_mom .<= 1.0) 41 | end 42 | 43 | # Test 3: Combined approach (g=0.07, gama=1) 44 | @testset "Combined Approach" begin 45 | model_comb = Transverse( 46 | J, beta, trials, 47 | gama=1.0, g=0.07, a_set=2.0, 48 | Delta_t=0.3, c0=0.2, 49 | seed=42, track_energy=true 50 | ) 51 | energy_comb, track_comb = iterate_T(model_comb) 52 | 53 | @test size(energy_comb) == (trials, n_step) 54 | @test size(track_comb) == (n_step+1, size(J, 1)) 55 | @test all(track_comb .>= -1.0) && all(track_comb .<= 1.0) 56 | end 57 | 58 | # Test 4: Without energy tracking 59 | @testset "Without Energy Tracking" begin 60 | model_no_track = Transverse( 61 | J, beta, trials, 62 | gama=1.0, g=0.07, a_set=2.0, 63 | Delta_t=0.3, c0=0.2, 64 | seed=42, track_energy=false 65 | ) 66 | final_energy = iterate_T(model_no_track) 67 | 68 | @test size(final_energy) == (1, 1) 69 | @test typeof(final_energy) <: AbstractArray 70 | end 71 | 72 | # Test 5: Different trial counts 73 | @testset "Multiple Trials" begin 74 | multi_trials = 5 75 | model_multi = Transverse( 76 | J, beta, multi_trials, 77 | gama=1.0, g=0.07, a_set=2.0, 78 | Delta_t=0.3, c0=0.2, 79 | seed=42, track_energy=true 80 | ) 81 | energy_multi, _ = iterate_T(model_multi) 82 | 83 | @test size(energy_multi) == (multi_trials, n_step) 84 | @test length(unique(energy_multi[1,:])) > 1 # Energy should change over time 85 | end 86 | end -------------------------------------------------------------------------------- /Spinglass/test/logic_gates.jl: -------------------------------------------------------------------------------- 1 | using Test, Spinglass 2 | using ProblemReductions: findbest, BruteForce, spinglass_gadget, truth_table, @bit_str, Factoring, reduction_paths, reduceto, extract_solution, target_problem, SpinGlass 3 | 4 | @testset "gates" begin 5 | or_gadget = spinglass_gadget(Val(:∨)) 6 | and_gadget = spinglass_gadget(Val(:∧)) 7 | not_gadget = spinglass_gadget(Val(:¬)) 8 | 9 | res = findbest(and_gadget.problem, BruteForce()) 10 | @test sort(res) == [[0, 0, 0], [0, 1, 0], [1, 0, 0], [1, 1, 1]] 11 | tt = truth_table(and_gadget) 12 | @test length(tt) == 4 13 | @test tt[bit"00"] == tt[bit"01"] == tt[bit"10"] == bit"0" 14 | @test tt[bit"11"] == bit"1" 15 | 16 | res = findbest(or_gadget.problem, BruteForce()) 17 | @test sort(res) == [[0, 0, 0], [0, 1, 1], [1, 0, 1], [1, 1, 1]] 18 | 19 | res = findbest(not_gadget.problem, BruteForce()) 20 | @test sort(res) == [[0, 1], [1, 0]] 21 | end 22 | 23 | @testset "arraymul" begin 24 | arr = spinglass_gadget(Val(:arraymul)) 25 | tt = truth_table(arr) 26 | @test length(tt) == 16 27 | @test tt[bit"0000"] == tt[bit"0001"] == tt[bit"0010"] == bit"00" 28 | @test tt[bit"0100"] == tt[bit"0101"] == tt[bit"0110"] == tt[bit"0011"] == 29 | tt[bit"1000"] == tt[bit"1001"] == tt[bit"1010"] == bit"10" 30 | @test tt[bit"0111"] == tt[bit"1011"] == 31 | tt[bit"1101"] == tt[bit"1110"] == tt[bit"1100"] == bit"01" 32 | @test tt[bit"1111"] == bit"11" 33 | 34 | fact = Factoring(2, 3, 15) 35 | path = reduction_paths(Factoring, SpinGlass) 36 | spin_glass = reduceto(path[1], fact) |> target_problem 37 | @test nv(spin_glass.graph) == 63 38 | end -------------------------------------------------------------------------------- /Spinglass/test/runtests.jl: -------------------------------------------------------------------------------- 1 | using Spinglass 2 | using Test, CUDA 3 | 4 | @testset "simulated_annealing" begin 5 | include("simulated_annealing.jl") 6 | end 7 | 8 | @testset "logic_gates" begin 9 | include("logic_gates.jl") 10 | end 11 | 12 | @testset "dynamics" begin 13 | include("dynamics.jl") 14 | end 15 | 16 | if CUDA.functional() 17 | @testset "CUDAExt" begin 18 | include("CUDAExt.jl") 19 | end 20 | end -------------------------------------------------------------------------------- /Spinglass/test/simulated_annealing.jl: -------------------------------------------------------------------------------- 1 | using Spinglass, Test, Random, Graphs 2 | 3 | @testset "loading" begin 4 | sap = load_spinglass(pkgdir(Spinglass, "data", "example.txt")) 5 | @test nv(sap.graph) == 300 6 | sapsa = Spinglass.SpinGlassSA(sap) 7 | @test size(sapsa.coupling) == (300, 300) 8 | end 9 | 10 | @testset "random config" begin 11 | sap = Spinglass.SpinGlassSA(load_spinglass(pkgdir(Spinglass, "data", "example.txt"))) 12 | initial_config = random_config(sap) 13 | @test initial_config.config |> length == 300 14 | @test eltype(initial_config.config) == Int 15 | end 16 | 17 | @testset "anneal" begin 18 | sap = load_spinglass(pkgdir(Spinglass, "data", "example.txt")) 19 | tempscales = 10 .- (1:64 .- 1) .* 0.15 |> collect 20 | opt_cost, opt_config = anneal(30, sap, tempscales, 4000) 21 | @test anneal(30, sap, tempscales, 4000)[1] == -3858 22 | end -------------------------------------------------------------------------------- /SpringSystem/Project.toml: -------------------------------------------------------------------------------- 1 | name = "SpringSystem" 2 | uuid = "5ba23fd3-a36f-499d-8664-5298adf72e9b" 3 | authors = ["GiggleLiu and contributors"] 4 | version = "1.0.0-DEV" 5 | 6 | [deps] 7 | Graphs = "86223c79-3864-5bf0-83f7-82e725a168b6" 8 | LinearAlgebra = "37e2e46d-f89d-539d-b4ee-838fcccc9c8e" 9 | 10 | [compat] 11 | Graphs = "1" 12 | LinearAlgebra = "1" 13 | julia = "1" 14 | 15 | [extras] 16 | Test = "8dfed614-e22c-5e08-85e1-65c5234f0b40" 17 | 18 | [targets] 19 | test = ["Test"] 20 | -------------------------------------------------------------------------------- /SpringSystem/README.md: -------------------------------------------------------------------------------- 1 | # SpringSystem 2 | 3 | This demo implements the physics simulation of spring-mass systems and planet orbits. The main tool we are using is the geometric numerical integration method. The main reference is the book by Hairer[^Hairer2006]. 4 | 5 | ## Contents 6 | - Spring-mass system ([examples/main.jl](examples/main.jl)), a comparative study between the leap-frog based simulation and the exact solution with eigenmodes. 7 | 8 | ## To run 9 | 10 | Clone the repository to your local machine and install the required packages (in a terminal): 11 | 12 | ```bash 13 | $ git clone https://github.com/GiggleLiu/ScientificComputingDemos.git 14 | $ cd ScientificComputingDemos 15 | $ make init-SpringSystem # initialize the environment in SpringSystem and SpringSystem/examples 16 | $ make example-SpringSystem # run the script SpringSystem/examples/main.jl 17 | ``` 18 | 19 | ## References 20 | [^Hairer2006]: Hairer, Ernst, et al. "Geometric numerical integration." Oberwolfach Reports 3.1 (2006): 805-882. 21 | -------------------------------------------------------------------------------- /SpringSystem/examples/Project.toml: -------------------------------------------------------------------------------- 1 | [deps] 2 | CairoMakie = "13f3f980-e62b-5c42-98c6-ff1f3baf88f0" 3 | SpringSystem = "5ba23fd3-a36f-499d-8664-5298adf72e9b" 4 | -------------------------------------------------------------------------------- /SpringSystem/examples/spring_sample.jl: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/GiggleLiu/ScientificComputingDemos/c0ad0c00e5d0bf90ed1167d27d4dffe70c34dea7/SpringSystem/examples/spring_sample.jl -------------------------------------------------------------------------------- /SpringSystem/src/SpringSystem.jl: -------------------------------------------------------------------------------- 1 | module SpringSystem 2 | 3 | using LinearAlgebra, Graphs 4 | 5 | export Point, Point2D, Point3D 6 | export spring_chain, SpringModel, coordinate, velocity, waveat 7 | export leapfrog_simulation, LeapFrogSystem 8 | 9 | include("point.jl") 10 | include("chain.jl") 11 | include("leapfrog.jl") 12 | 13 | end 14 | -------------------------------------------------------------------------------- /SpringSystem/src/leapfrog.jl: -------------------------------------------------------------------------------- 1 | """ 2 | LeapFrogSystem{T, D, SYS<:AbstractHamiltonianSystem{D}} 3 | 4 | The leapfrog system is a symplectic integrator for the Hamiltonian system. 5 | 6 | ### Fields 7 | - `sys` is the Hamiltonian system 8 | - `a` is the acceleration of the system 9 | """ 10 | struct LeapFrogSystem{T, D, SYS<:AbstractHamiltonianSystem{D}} 11 | sys::SYS 12 | a::Vector{Point{D, T}} 13 | function LeapFrogSystem(bds::AbstractHamiltonianSystem, a::Vector{Point{D, T}}) where {T, D} 14 | @assert length(bds) == length(a) 15 | new{T, D, typeof(bds)}(bds, a) 16 | end 17 | end 18 | function LeapFrogSystem(bds::AbstractHamiltonianSystem) 19 | LeapFrogSystem(bds, zero(coordinate(bds))) 20 | end 21 | 22 | # evolve the Hamiltonian system with the leapfrog method for a time step dt 23 | function step!(bdsc::LeapFrogSystem{T}, dt) where T 24 | sys, a = bdsc.sys, bdsc.a 25 | @inbounds for j = 1:length(sys) 26 | drj = dt / 2 * velocity(sys, j) 27 | offset_coordinate!(sys, j, drj) 28 | end 29 | update_acceleration!(a, sys) 30 | @inbounds for j = 1:length(sys) 31 | dvj = dt * a[j] 32 | offset_velocity!(sys, j, dvj) 33 | drj = dt / 2 * velocity(sys, j) 34 | offset_coordinate!(sys, j, drj) 35 | end 36 | return bdsc 37 | end 38 | 39 | # evolve the Hamiltonian system with the leapfrog method for nsteps * dt time, and return the states 40 | function leapfrog_simulation(sys::AbstractHamiltonianSystem; dt, nsteps) 41 | cached_system = LeapFrogSystem(deepcopy(sys)) 42 | states = [deepcopy(cached_system)] 43 | for i=1:nsteps 44 | cached_system = step!(cached_system, dt) 45 | push!(states, deepcopy(cached_system)) 46 | end 47 | return states 48 | end -------------------------------------------------------------------------------- /SpringSystem/src/point.jl: -------------------------------------------------------------------------------- 1 | """ 2 | Point{D, T} 3 | 4 | A point in D-dimensional space, with coordinates of type T. 5 | 6 | # Examples 7 | ```jldoctest 8 | julia> p1 = Point(1.0, 2.0) 9 | Point{2, Float64}((1.0, 2.0)) 10 | 11 | julia> p2 = Point(3.0, 4.0) 12 | Point{2, Float64}((3.0, 4.0)) 13 | 14 | julia> p1 + p2 15 | Point{2, Float64}((4.0, 6.0)) 16 | ``` 17 | """ 18 | struct Point{D, T <: Real} 19 | data::NTuple{D, T} 20 | end 21 | const Point2D{T} = Point{2, T} 22 | const Point3D{T} = Point{3, T} 23 | Point(x::Real...) = Point((x...,)) 24 | LinearAlgebra.dot(x::Point, y::Point) = mapreduce(*, +, x.data .* y.data) 25 | Base.:*(x::Real, y::Point) = Point(x .* y.data) 26 | Base.:*(x::Point, y::Real) = Point(x.data .* y) 27 | Base.:/(y::Point, x::Real) = Point(y.data ./ x) 28 | Base.:+(x::Point, y::Point) = Point(x.data .+ y.data) 29 | Base.:-(x::Point, y::Point) = Point(x.data .- y.data) 30 | Base.isapprox(x::Point, y::Point; kwargs...) = all(isapprox.(x.data, y.data; kwargs...)) 31 | Base.getindex(p::Point, i::Int) = p.data[i] 32 | Base.broadcastable(p::Point) = p.data 33 | Base.iterate(p::Point, args...) = iterate(p.data, args...) 34 | Base.zero(::Type{Point{D, T}}) where {D, T} = Point(ntuple(i->zero(T), D)) 35 | Base.zero(::Point{D, T}) where {D, T} = Point(ntuple(i->zero(T), D)) 36 | Base.length(p::Point) = length(p.data) 37 | distance(p::Point, q::Point) = sqrt(sum((p - q) .^ 2)) -------------------------------------------------------------------------------- /SpringSystem/test/chain.jl: -------------------------------------------------------------------------------- 1 | using Test, SpringSystem 2 | 3 | @testset "chain dynamics" begin 4 | c = spring_chain(randn(10) .* 0.1, 1.0, 1.0; periodic=true) 5 | @test c isa SpringModel 6 | end 7 | 8 | @testset "leapfrog" begin 9 | c = spring_chain(randn(10) .* 0.1, 1.0, 1.0; periodic=true) 10 | cached = SpringSystem.LeapFrogSystem(c) 11 | newcache = SpringSystem.step!(cached, 0.1) 12 | @test newcache isa SpringSystem.LeapFrogSystem 13 | end 14 | 15 | @testset "eigenmodes" begin 16 | L = 10 17 | C = 4.0 # stiffness 18 | M = 2.0 # mass 19 | c = spring_chain(randn(L) * 0.1, C, M; periodic=true) 20 | sys = SpringSystem.eigensystem(c) 21 | modes = SpringSystem.eigenmodes(sys) 22 | 23 | ks_expected = [n * 2π / L for n in 0:L-1] 24 | omega_expected = sqrt(4C / M) .* sin.(abs.(ks_expected) ./ 2) 25 | @test isapprox(modes.frequency, sort(omega_expected), atol=1e-5) 26 | 27 | # wave function 28 | t = 5.0 29 | # method 1: solve with leapfrog method 30 | idx = 2 31 | c = spring_chain(waveat(modes, idx, 0.0), C, M; periodic=true) 32 | lf = SpringSystem.LeapFrogSystem(c) 33 | for i=1:500 34 | SpringSystem.step!(lf, 0.01) 35 | end 36 | ut_lf = first.(coordinate(c)) 37 | 38 | # method 2: solve with eigenmodes 39 | ut_expected = (0:L-1) .+ waveat(modes, idx, t) 40 | @test isapprox(ut_lf, ut_expected; rtol=1e-4) 41 | 42 | ### more complex example: random wave 43 | wave = randn(L) * 0.2 44 | c = spring_chain(wave, C, M; periodic=true) 45 | lf = LeapFrogSystem(c) 46 | for i=1:500 47 | SpringSystem.step!(lf, 0.01) 48 | end 49 | ut_lf = first.(coordinate(c)) 50 | ut_expected = (0:L-1) .+ waveat(modes, wave, [t])[] 51 | @test isapprox(ut_lf, ut_expected; rtol=1e-4) 52 | end -------------------------------------------------------------------------------- /SpringSystem/test/leapfrog.jl: -------------------------------------------------------------------------------- 1 | using Test 2 | using SpringSystem 3 | using LinearAlgebra 4 | 5 | # Simple harmonic oscillator as a test system 6 | struct SimpleHarmonianOscillator{D} <: SpringSystem.AbstractHamiltonianSystem{D} 7 | position::Vector{Point{D, Float64}} 8 | velocity::Vector{Point{D, Float64}} 9 | k::Float64 # spring constant 10 | m::Float64 # mass 11 | end 12 | 13 | function SimpleHarmonianOscillator(; k=1.0, m=1.0) 14 | # Single particle in 1D 15 | pos = [Point(1.0)] # Initial position x=1 16 | vel = [Point(0.0)] # Initial velocity v=0 17 | SimpleHarmonianOscillator{1}(pos, vel, k, m) 18 | end 19 | 20 | Base.length(sys::SimpleHarmonianOscillator) = length(sys.position) 21 | SpringSystem.coordinate(sys::SimpleHarmonianOscillator) = sys.position 22 | SpringSystem.velocity(sys::SimpleHarmonianOscillator, i::Int) = sys.velocity[i] 23 | SpringSystem.offset_coordinate!(sys::SimpleHarmonianOscillator, i::Int, dr) = sys.position[i] += dr 24 | SpringSystem.offset_velocity!(sys::SimpleHarmonianOscillator, i::Int, dv) = sys.velocity[i] += dv 25 | 26 | function SpringSystem.update_acceleration!(a::Vector{<:Point}, sys::SimpleHarmonianOscillator) 27 | # F = -kx for simple harmonic oscillator 28 | for i in 1:length(sys) 29 | a[i] = Point(-sys.k/sys.m * sys.position[i][1]) 30 | end 31 | end 32 | 33 | @testset "LeapFrog Integration" begin 34 | # Test system construction 35 | sys = SimpleHarmonianOscillator() 36 | leapfrog_sys = LeapFrogSystem(sys) 37 | @test length(leapfrog_sys.a) == length(sys) 38 | @test leapfrog_sys.sys === sys 39 | 40 | # Test single step 41 | dt = 0.1 42 | stepped_sys = SpringSystem.step!(leapfrog_sys, dt) 43 | @test stepped_sys === leapfrog_sys # Should modify in-place 44 | @test stepped_sys.sys.position[1][1] != 1.0 # Position should change 45 | 46 | # Test simulation 47 | dt = 0.1 48 | nsteps = 100 49 | states = leapfrog_simulation(sys; dt=dt, nsteps=nsteps) 50 | @test length(states) == nsteps + 1 51 | 52 | # Test energy conservation (approximately) 53 | function total_energy(state::LeapFrogSystem) 54 | sys = state.sys 55 | # E = 1/2 mv² + 1/2 kx² 56 | kinetic = 0.5 * sys.m * sum(v[1]^2 for v in sys.velocity) 57 | potential = 0.5 * sys.k * sum(x[1]^2 for x in sys.position) 58 | return kinetic + potential 59 | end 60 | 61 | initial_energy = total_energy(states[1]) 62 | for state in states[2:end] 63 | current_energy = total_energy(state) 64 | # Check energy conservation with some tolerance 65 | @test isapprox(current_energy, initial_energy, rtol=1e-2) 66 | end 67 | 68 | # Test periodicity (approximately) 69 | # For simple harmonic oscillator, period T = 2π√(m/k) 70 | period = 2π * sqrt(sys.m/sys.k) 71 | n_period_steps = round(Int, period/dt) 72 | 73 | if n_period_steps <= nsteps 74 | initial_pos = states[1].sys.position[1][1] 75 | period_pos = states[n_period_steps+1].sys.position[1][1] 76 | @test isapprox(initial_pos, period_pos, rtol=1e-1) 77 | end 78 | end 79 | -------------------------------------------------------------------------------- /SpringSystem/test/point.jl: -------------------------------------------------------------------------------- 1 | using Test, SpringSystem, LinearAlgebra 2 | 3 | @testset "Point Construction" begin 4 | # Basic construction 5 | p = Point(1.0, 2.0) 6 | @test p isa Point{2, Float64} 7 | @test p.data == (1.0, 2.0) 8 | 9 | # Type specific construction 10 | p_int = Point(1, 2) 11 | @test p_int isa Point{2, Int} 12 | 13 | # Convenience types 14 | p2d = Point2D{Float64}((1.0, 2.0)) 15 | p3d = Point3D{Float64}((1.0, 2.0, 3.0)) 16 | @test p2d isa Point2D{Float64} 17 | @test p3d isa Point3D{Float64} 18 | end 19 | 20 | @testset "Point Arithmetic" begin 21 | p1 = Point(1.0, 2.0) 22 | p2 = Point(3.0, 4.0) 23 | 24 | # Addition 25 | @test p1 + p2 == Point(4.0, 6.0) 26 | 27 | # Subtraction 28 | @test p2 - p1 == Point(2.0, 2.0) 29 | 30 | # Scalar multiplication 31 | @test 2 * p1 == Point(2.0, 4.0) 32 | @test p1 * 2 == Point(2.0, 4.0) 33 | 34 | # Division 35 | @test p1 / 2 == Point(0.5, 1.0) 36 | end 37 | 38 | @testset "Point Operations" begin 39 | p1 = Point(1.0, 2.0) 40 | p2 = Point(3.0, 4.0) 41 | 42 | # Dot product 43 | @test dot(p1, p2) ≈ 11.0 44 | 45 | # Distance 46 | @test SpringSystem.distance(p1, p2) ≈ sqrt(8.0) 47 | 48 | # Zero 49 | @test zero(Point{2, Float64}) == Point(0.0, 0.0) 50 | @test zero(p1) == Point(0.0, 0.0) 51 | end 52 | 53 | @testset "Point Utilities" begin 54 | p = Point(1.0, 2.0) 55 | @test length(p) == 2 56 | 57 | # Indexing 58 | @test p[1] == 1.0 59 | @test p[2] == 2.0 60 | 61 | # Iteration 62 | collected = collect(p) 63 | @test collected == [1.0, 2.0] 64 | 65 | # Approximate equality 66 | p1 = Point(1.0, 2.0) 67 | p2 = Point(1.0 + 1e-10, 2.0 - 1e-10) 68 | @test isapprox(p1, p2, atol=1e-9) 69 | end 70 | -------------------------------------------------------------------------------- /SpringSystem/test/runtests.jl: -------------------------------------------------------------------------------- 1 | using SpringSystem 2 | using Test 3 | 4 | @testset "point" begin 5 | include("point.jl") 6 | end 7 | 8 | @testset "leapfrog" begin 9 | include("leapfrog.jl") 10 | end 11 | 12 | @testset "chain" begin 13 | include("chain.jl") 14 | end 15 | 16 | --------------------------------------------------------------------------------