├── src ├── quine.jl ├── bogo_sort!_one_liner.jl ├── linear_regression_one_liner.jl ├── neural_network_one_liner.jl ├── bogo_sort!.jl ├── nearest_neighbor.jl ├── linear_regression.jl ├── basis_regression.jl ├── k_nearest_neighbors.jl ├── ridge_regression.jl ├── hailstone.jl ├── neural_network.jl ├── distance_functions.jl ├── bubble_sort!.jl ├── newtons_method.jl ├── multi_layer_neural_network.jl ├── insertion_sort!.jl ├── radial_basis_regression.jl ├── gradient_descent.jl ├── finite_difference_methods.jl ├── thompson_sampling.jl ├── cross_entropy_method.jl ├── stochastic_gradient_descent.jl ├── logistic_regression.jl ├── particle_filter.jl ├── huffman_coding.jl ├── gaussian_process.jl ├── simulated_annealing.jl ├── merge_sort.jl ├── k_means_clustering.jl ├── gaussian_process_kernels.jl ├── branch_and_bound.jl ├── value_iteration.jl ├── twiddle.jl ├── loss_functions.jl ├── em_algorithm.jl ├── monte_carlo_tree_search.jl └── BeautifulAlgorithms.jl ├── img └── png │ ├── quine.png │ ├── twiddle.png │ ├── bogo_sort!.png │ ├── hailstone.png │ ├── merge_sort.png │ ├── bubble_sort!.png │ ├── em_algorithm.png │ ├── huffman_coding.png │ ├── insertion_sort!.png │ ├── loss_functions.png │ ├── neural_network.png │ ├── newtons_method.png │ ├── particle_filter.png │ ├── value_iteration.png │ ├── basis_regression.png │ ├── branch_and_bound.png │ ├── distance_functions.png │ ├── gaussian_process.png │ ├── gradient_descent.png │ ├── k_means_clustering.png │ ├── linear_regression.png │ ├── nearest_neighbor.png │ ├── ridge_regression.png │ ├── thompson_sampling.png │ ├── bogo_sort!_one_liner.png │ ├── cross_entropy_method.png │ ├── k_nearest_neighbors.png │ ├── logistic_regression.png │ ├── simulated_annealing.png │ ├── monte_carlo_tree_search.png │ ├── radial_basis_regression.png │ ├── finite_difference_methods.png │ ├── gaussian_process_kernels.png │ ├── neural_network_one_liner.png │ ├── linear_regression_one_liner.png │ ├── multi_layer_neural_network.png │ └── stochastic_gradient_descent.png ├── test ├── test_bubble_sort!.jl ├── test_merge_sort.jl ├── test_newtons_method.jl ├── test_basis_regression.jl ├── test_distance_functions.jl ├── test_insertion_sort!.jl ├── test_thompson_sampling.jl ├── test_radial_basis_regression.jl ├── test_gaussian_process.jl ├── test_finite_difference_methods.jl ├── test_bogo_sort!.jl ├── test_ridge_regression.jl ├── test_simulated_annealing.jl ├── test_cross_entropy_method.jl ├── test_value_iteration.jl ├── test_logistic_regression.jl ├── test_twiddle.jl ├── test_gaussian_process_kernels.jl ├── test_quine.jl ├── test_branch_and_bound.jl ├── test_k_means_clustering.jl ├── test_monte_carlo_tree_search.jl ├── test_hailstone.jl ├── test_stochastic_gradient_descent.jl ├── test_linear_regression.jl ├── test_nearest_neighbor.jl ├── test_neural_network.jl ├── test_k_nearest_neighbors.jl ├── test_particle_filter.jl ├── test_multi_layer_neural_network.jl ├── test_loss_functions.jl ├── test_huffman_coding.jl ├── runtests.jl ├── test_em_algorithm.jl └── test_gradient_descent.jl ├── Project.toml ├── .github └── workflows │ └── CI.yml └── README.md /src/quine.jl: -------------------------------------------------------------------------------- 1 | s = "@show s; print(s)" 2 | @show s; print(s) -------------------------------------------------------------------------------- /src/bogo_sort!_one_liner.jl: -------------------------------------------------------------------------------- 1 | bogo_sort!(X) = while !issorted(X) shuffle!(X) end -------------------------------------------------------------------------------- /img/png/quine.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Wikunia/BeautifulAlgorithms.jl/master/img/png/quine.png -------------------------------------------------------------------------------- /src/linear_regression_one_liner.jl: -------------------------------------------------------------------------------- 1 | linear_regression(X, y, 𝐗=[ones(size(y)) X], θ=𝐗\y) = x -> [1;x]'θ -------------------------------------------------------------------------------- /src/neural_network_one_liner.jl: -------------------------------------------------------------------------------- 1 | neural_network(x, 𝐕, 𝐰, φ, g) = 𝐰 ⋅ map(𝐯ⱼ -> g(𝐯ⱼ ⋅ φ(x)), 𝐕) -------------------------------------------------------------------------------- /img/png/twiddle.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Wikunia/BeautifulAlgorithms.jl/master/img/png/twiddle.png -------------------------------------------------------------------------------- /img/png/bogo_sort!.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Wikunia/BeautifulAlgorithms.jl/master/img/png/bogo_sort!.png -------------------------------------------------------------------------------- /img/png/hailstone.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Wikunia/BeautifulAlgorithms.jl/master/img/png/hailstone.png -------------------------------------------------------------------------------- /img/png/merge_sort.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Wikunia/BeautifulAlgorithms.jl/master/img/png/merge_sort.png -------------------------------------------------------------------------------- /img/png/bubble_sort!.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Wikunia/BeautifulAlgorithms.jl/master/img/png/bubble_sort!.png -------------------------------------------------------------------------------- /img/png/em_algorithm.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Wikunia/BeautifulAlgorithms.jl/master/img/png/em_algorithm.png -------------------------------------------------------------------------------- /img/png/huffman_coding.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Wikunia/BeautifulAlgorithms.jl/master/img/png/huffman_coding.png -------------------------------------------------------------------------------- /img/png/insertion_sort!.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Wikunia/BeautifulAlgorithms.jl/master/img/png/insertion_sort!.png -------------------------------------------------------------------------------- /img/png/loss_functions.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Wikunia/BeautifulAlgorithms.jl/master/img/png/loss_functions.png -------------------------------------------------------------------------------- /img/png/neural_network.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Wikunia/BeautifulAlgorithms.jl/master/img/png/neural_network.png -------------------------------------------------------------------------------- /img/png/newtons_method.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Wikunia/BeautifulAlgorithms.jl/master/img/png/newtons_method.png -------------------------------------------------------------------------------- /img/png/particle_filter.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Wikunia/BeautifulAlgorithms.jl/master/img/png/particle_filter.png -------------------------------------------------------------------------------- /img/png/value_iteration.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Wikunia/BeautifulAlgorithms.jl/master/img/png/value_iteration.png -------------------------------------------------------------------------------- /img/png/basis_regression.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Wikunia/BeautifulAlgorithms.jl/master/img/png/basis_regression.png -------------------------------------------------------------------------------- /img/png/branch_and_bound.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Wikunia/BeautifulAlgorithms.jl/master/img/png/branch_and_bound.png -------------------------------------------------------------------------------- /img/png/distance_functions.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Wikunia/BeautifulAlgorithms.jl/master/img/png/distance_functions.png -------------------------------------------------------------------------------- /img/png/gaussian_process.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Wikunia/BeautifulAlgorithms.jl/master/img/png/gaussian_process.png -------------------------------------------------------------------------------- /img/png/gradient_descent.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Wikunia/BeautifulAlgorithms.jl/master/img/png/gradient_descent.png -------------------------------------------------------------------------------- /img/png/k_means_clustering.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Wikunia/BeautifulAlgorithms.jl/master/img/png/k_means_clustering.png -------------------------------------------------------------------------------- /img/png/linear_regression.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Wikunia/BeautifulAlgorithms.jl/master/img/png/linear_regression.png -------------------------------------------------------------------------------- /img/png/nearest_neighbor.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Wikunia/BeautifulAlgorithms.jl/master/img/png/nearest_neighbor.png -------------------------------------------------------------------------------- /img/png/ridge_regression.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Wikunia/BeautifulAlgorithms.jl/master/img/png/ridge_regression.png -------------------------------------------------------------------------------- /img/png/thompson_sampling.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Wikunia/BeautifulAlgorithms.jl/master/img/png/thompson_sampling.png -------------------------------------------------------------------------------- /img/png/bogo_sort!_one_liner.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Wikunia/BeautifulAlgorithms.jl/master/img/png/bogo_sort!_one_liner.png -------------------------------------------------------------------------------- /img/png/cross_entropy_method.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Wikunia/BeautifulAlgorithms.jl/master/img/png/cross_entropy_method.png -------------------------------------------------------------------------------- /img/png/k_nearest_neighbors.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Wikunia/BeautifulAlgorithms.jl/master/img/png/k_nearest_neighbors.png -------------------------------------------------------------------------------- /img/png/logistic_regression.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Wikunia/BeautifulAlgorithms.jl/master/img/png/logistic_regression.png -------------------------------------------------------------------------------- /img/png/simulated_annealing.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Wikunia/BeautifulAlgorithms.jl/master/img/png/simulated_annealing.png -------------------------------------------------------------------------------- /src/bogo_sort!.jl: -------------------------------------------------------------------------------- 1 | using Random 2 | 3 | function bogo_sort!(X) 4 | while !issorted(X) 5 | shuffle!(X) 6 | end 7 | end -------------------------------------------------------------------------------- /img/png/monte_carlo_tree_search.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Wikunia/BeautifulAlgorithms.jl/master/img/png/monte_carlo_tree_search.png -------------------------------------------------------------------------------- /img/png/radial_basis_regression.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Wikunia/BeautifulAlgorithms.jl/master/img/png/radial_basis_regression.png -------------------------------------------------------------------------------- /src/nearest_neighbor.jl: -------------------------------------------------------------------------------- 1 | function nearest_neighbor(x′, φ, 𝒟, dist) 2 | 𝒟[argmin([dist(φ(x), φ(x′)) for (x,y) in 𝒟])][end] 3 | end -------------------------------------------------------------------------------- /img/png/finite_difference_methods.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Wikunia/BeautifulAlgorithms.jl/master/img/png/finite_difference_methods.png -------------------------------------------------------------------------------- /img/png/gaussian_process_kernels.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Wikunia/BeautifulAlgorithms.jl/master/img/png/gaussian_process_kernels.png -------------------------------------------------------------------------------- /img/png/neural_network_one_liner.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Wikunia/BeautifulAlgorithms.jl/master/img/png/neural_network_one_liner.png -------------------------------------------------------------------------------- /src/linear_regression.jl: -------------------------------------------------------------------------------- 1 | function linear_regression(X, y) 2 | 𝐗 = [ones(size(y)) X] 3 | θ = 𝐗\y 4 | return x -> [1;x]'θ 5 | end -------------------------------------------------------------------------------- /img/png/linear_regression_one_liner.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Wikunia/BeautifulAlgorithms.jl/master/img/png/linear_regression_one_liner.png -------------------------------------------------------------------------------- /img/png/multi_layer_neural_network.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Wikunia/BeautifulAlgorithms.jl/master/img/png/multi_layer_neural_network.png -------------------------------------------------------------------------------- /img/png/stochastic_gradient_descent.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Wikunia/BeautifulAlgorithms.jl/master/img/png/stochastic_gradient_descent.png -------------------------------------------------------------------------------- /test/test_bubble_sort!.jl: -------------------------------------------------------------------------------- 1 | @testset "Bubble sort" begin 2 | X = rand(1:1000, 100) 3 | bubble_sort!(X) 4 | @test issorted(X) 5 | end 6 | -------------------------------------------------------------------------------- /src/basis_regression.jl: -------------------------------------------------------------------------------- 1 | function basis_regression(X, y, φ) 2 | B = mapreduce(x->φ(x)', vcat, X) 3 | θ = B\y 4 | return x -> φ(x)'θ 5 | end -------------------------------------------------------------------------------- /test/test_merge_sort.jl: -------------------------------------------------------------------------------- 1 | @testset "Merge sort" begin 2 | X = rand(1:100, rand(1:100)) 3 | X = merge_sort(X) 4 | @test issorted(X) 5 | end 6 | -------------------------------------------------------------------------------- /src/k_nearest_neighbors.jl: -------------------------------------------------------------------------------- 1 | function k_nearest_neighbors(x′, φ, 𝒟, dist, k) 2 | last.(𝒟[partialsortperm([dist(φ(x), φ(x′)) for (x,y) in 𝒟], 1:k)]) 3 | end -------------------------------------------------------------------------------- /src/ridge_regression.jl: -------------------------------------------------------------------------------- 1 | function ridge_regression(X, y, k) 2 | 𝐗 = mapreduce(x->[1;x]', vcat, X) 3 | θ = (𝐗'𝐗 + k*I)\𝐗'y 4 | return x -> [1;x]'θ 5 | end -------------------------------------------------------------------------------- /src/hailstone.jl: -------------------------------------------------------------------------------- 1 | function hailstone(n::Int) 2 | N = [n] 3 | while n > 1 4 | n = iseven(n) ? n÷2 : 3n+1 5 | push!(N, n) 6 | end 7 | return N 8 | end -------------------------------------------------------------------------------- /test/test_newtons_method.jl: -------------------------------------------------------------------------------- 1 | @testset "Newton's method" begin 2 | f = x->x^2 - 4 3 | @test newtons_method(f) == 2.0 4 | @test newtons_method(f; x=-rand()) == -2.0 5 | end 6 | -------------------------------------------------------------------------------- /src/neural_network.jl: -------------------------------------------------------------------------------- 1 | using LinearAlgebra 2 | 3 | ReLU(z) = max(z, 0) 4 | 5 | function neural_network(x, 𝐕, 𝐰, φ, g=ReLU) 6 | 𝐡 = map(𝐯ⱼ -> g(𝐯ⱼ ⋅ φ(x)), 𝐕) 7 | 𝐰 ⋅ 𝐡 8 | end -------------------------------------------------------------------------------- /src/distance_functions.jl: -------------------------------------------------------------------------------- 1 | using LinearAlgebra 2 | 3 | dist_manhattan(𝐯, 𝐯′) = norm(𝐯 - 𝐯′, 1) 4 | dist_euclidean(𝐯, 𝐯′) = norm(𝐯 - 𝐯′, 2) 5 | dist_supremum(𝐯, 𝐯′) = norm(𝐯 - 𝐯′, Inf) -------------------------------------------------------------------------------- /src/bubble_sort!.jl: -------------------------------------------------------------------------------- 1 | function bubble_sort!(X) 2 | for i in 1:length(X), j in 1:length(X)-i 3 | if X[j] > X[j+1] 4 | (X[j+1], X[j]) = (X[j], X[j+1]) 5 | end 6 | end 7 | end -------------------------------------------------------------------------------- /src/newtons_method.jl: -------------------------------------------------------------------------------- 1 | using Zygote 2 | 3 | function newtons_method(f; x=rand(), tol=eps(Float64)) 4 | while !isapprox(f(x), 0, atol=tol) 5 | x = x - f(x)/f'(x) 6 | end 7 | return x 8 | end -------------------------------------------------------------------------------- /test/test_basis_regression.jl: -------------------------------------------------------------------------------- 1 | @testset "Basis regression" begin 2 | X = collect(1:5) 3 | y = [3, 4, 3, 6, -1] 4 | ϕ(x) = [1, x, x^2, x^3] 5 | 6 | f = basis_regression(X, y, ϕ) 7 | @test f(1) ≈ 3.285714285714497 8 | end 9 | -------------------------------------------------------------------------------- /test/test_distance_functions.jl: -------------------------------------------------------------------------------- 1 | @testset "Distance functions" begin 2 | @test dist_manhattan([0,0], [3,3]) == 6.0 3 | @test dist_euclidean([0,0], [3,3]) ≈ 4.242640687119285 4 | @test dist_supremum([0,0], [3,3]) == 3.0 5 | end 6 | -------------------------------------------------------------------------------- /test/test_insertion_sort!.jl: -------------------------------------------------------------------------------- 1 | @testset "Insertion sort" begin 2 | X = rand(1:100, rand(1:100)) 3 | copyX = copy(X) 4 | insertion_sort!(X) 5 | @test issorted(X) 6 | @test X == sort(copyX) # checks for cᵥ necessity 7 | end 8 | -------------------------------------------------------------------------------- /src/multi_layer_neural_network.jl: -------------------------------------------------------------------------------- 1 | using LinearAlgebra 2 | 3 | function multi_layer_neural_network(x, 𝐖, φ, 𝐠) 4 | 𝐡ᵢ = φ(x) 5 | for (i,g) in enumerate(𝐠) 6 | 𝐡ᵢ = map(𝐰ⱼ -> g(𝐰ⱼ ⋅ 𝐡ᵢ), 𝐖[i]) 7 | end 8 | 𝐡ᵢ ⋅ last(𝐖) 9 | end -------------------------------------------------------------------------------- /test/test_thompson_sampling.jl: -------------------------------------------------------------------------------- 1 | @testset "Thompson sampling" begin 2 | using Random; Random.seed!(0) 3 | apply(xₜ) = [0.1, 0.5, 0.9][xₜ] 4 | estimates = thompson_sampling(ones(3), ones(3), apply) 5 | @test argmax(mean.(estimates)) == 3 6 | end 7 | -------------------------------------------------------------------------------- /test/test_radial_basis_regression.jl: -------------------------------------------------------------------------------- 1 | @testset "Radial basis regression" begin 2 | D = [(1,1), (2,1), (3,3), (4,3)] 3 | X = map(first, D) 4 | y = map(last, D) 5 | f = radial_basis_regression(X, y) 6 | @test f(2.5) ≈ 1.9304121024661014 7 | end 8 | -------------------------------------------------------------------------------- /src/insertion_sort!.jl: -------------------------------------------------------------------------------- 1 | function insertion_sort!(X) 2 | for i in 1:length(X) 3 | cᵥ = X[i] 4 | j = i - 1 5 | while j ≥ 1 && X[j] > cᵥ 6 | X[j+1] = X[j] 7 | j -= 1 8 | end 9 | X[j+1] = cᵥ 10 | end 11 | end -------------------------------------------------------------------------------- /src/radial_basis_regression.jl: -------------------------------------------------------------------------------- 1 | using LinearAlgebra 2 | 3 | function radial_basis_regression(X, y; σ=1, p=2) 4 | ψ = r->exp(-r^2 / (2σ^2)) 5 | φ(x) = [ψ(norm(x - c, p)) for c in X] 6 | B = mapreduce(x->φ(x)', vcat, X) 7 | θ = B\y 8 | return x -> φ(x)'θ 9 | end -------------------------------------------------------------------------------- /src/gradient_descent.jl: -------------------------------------------------------------------------------- 1 | using Statistics, LinearAlgebra 2 | 3 | function gradient_descent(𝒟train, φ, ∇loss; η=0.1, T=100) 4 | 𝐰 = zeros(length(φ(𝒟train[1][1]))) 5 | for t in 1:T 6 | 𝐰 = 𝐰 .- η*mean(∇loss(x, y, 𝐰, φ) for (x,y) ∈ 𝒟train) 7 | end 8 | return 𝐰 9 | end -------------------------------------------------------------------------------- /test/test_gaussian_process.jl: -------------------------------------------------------------------------------- 1 | @testset "Gaussian process" begin 2 | import Random: seed! 3 | seed!(0) 4 | 5 | 𝒢 = GaussianProcess(m_zero, k_brownian) 6 | X = [0, 1] 7 | Y = rand(𝒢, X) 8 | 9 | @test Y ≈ [0.0006791074260357777, 0.8284138971066739] 10 | end 11 | -------------------------------------------------------------------------------- /test/test_finite_difference_methods.jl: -------------------------------------------------------------------------------- 1 | @testset "Finite difference methods" begin 2 | f = x->x^2 3 | x, df = 11, 22 4 | for diff_method in [forward_difference, central_difference, backward_difference, complex_difference] 5 | @test isapprox(diff_method(f, x), df) 6 | end 7 | end 8 | -------------------------------------------------------------------------------- /src/finite_difference_methods.jl: -------------------------------------------------------------------------------- 1 | forward_difference(f, x; h=sqrt(eps())) = (f(x .+ h) .- f(x))/h 2 | central_difference(f, x; h=cbrt(eps())) = (f(x .+ h/2) .- f(x .- h/2))/h 3 | backward_difference(f, x; h=sqrt(eps())) = (f(x) .- f(x .- h))/h 4 | complex_difference(f, x; h=1e-20) = imag(f(x .+ h*im))/h -------------------------------------------------------------------------------- /src/thompson_sampling.jl: -------------------------------------------------------------------------------- 1 | using Distributions 2 | 3 | function thompson_sampling(𝛂, 𝛃, apply; T=100) 4 | for t in 1:T 5 | 𝛉 = rand.(Beta.(𝛂, 𝛃)) 6 | x = argmax(𝛉) 7 | r = apply(x) 8 | 𝛂[x], 𝛃[x] = (𝛂[x] + r, 𝛃[x] + 1 - r) 9 | end 10 | return Beta.(𝛂, 𝛃) 11 | end -------------------------------------------------------------------------------- /src/cross_entropy_method.jl: -------------------------------------------------------------------------------- 1 | using Distributions 2 | 3 | function cross_entropy_method(f, P, k_max, m=100, m_elite=10) 4 | for k in 1:k_max 5 | samples = rand(P, m) 6 | order = sortperm([f(samples[:,i]) for i in 1:m]) 7 | P = fit(typeof(P), samples[:,order[1:m_elite]]) 8 | end 9 | return P 10 | end -------------------------------------------------------------------------------- /src/stochastic_gradient_descent.jl: -------------------------------------------------------------------------------- 1 | using LinearAlgebra 2 | 3 | function stochastic_gradient_descent(𝒟train, φ, ∇loss; η=0.1, T=100) 4 | 𝐰 = zeros(length(φ(𝒟train[1][1]))) 5 | for t in 1:T 6 | for (x, y) ∈ 𝒟train 7 | 𝐰 = 𝐰 .- η*∇loss(x, y, 𝐰, φ) 8 | end 9 | end 10 | return 𝐰 11 | end -------------------------------------------------------------------------------- /src/logistic_regression.jl: -------------------------------------------------------------------------------- 1 | σ(X, θ) = 1 ./ (1 .+ exp.(-X*θ)) 2 | 3 | predict(f, x) = f(x) > 0.5 4 | 5 | function logistic_regression(X, y; T=1000, α=1e-4) 6 | 𝐗 = mapreduce(x->[1;x]', vcat, X) 7 | θ = zeros(2) 8 | for iteration in 1:T 9 | θ = θ + α * 𝐗' * (y - σ(𝐗,θ)) 10 | end 11 | return x -> σ([1;x]', θ) 12 | end -------------------------------------------------------------------------------- /src/particle_filter.jl: -------------------------------------------------------------------------------- 1 | using Distributions, LinearAlgebra 2 | 3 | struct POMDP γ; 𝒮; 𝒜; 𝒪; T; R; O end 4 | 5 | function particle_filter(𝐬::Vector, 𝒫::POMDP, a, o) 6 | (T, O) = (𝒫.T, 𝒫.O) 7 | 𝐬′ = rand.(T.(𝐬, a)) 8 | 𝐰 = O.(a, 𝐬′, o) 9 | D = Categorical(normalize(𝐰, 1)) 10 | return 𝐬′[rand(D, length(𝐬′))] 11 | end -------------------------------------------------------------------------------- /test/test_bogo_sort!.jl: -------------------------------------------------------------------------------- 1 | @testset "Bogo sort" begin 2 | X = rand(1:100, 5) 3 | bogo_sort!(X) 4 | @test issorted(X) 5 | end 6 | 7 | @testset "Bogo sort (one-liner)" begin 8 | import BeautifulAlgorithms.bogo_sort! 9 | include(joinpath(@__DIR__, "..", "src", "bogo_sort!_one_liner.jl")) 10 | X = rand(1:100, 5) 11 | bogo_sort!(X) 12 | @test issorted(X) 13 | end -------------------------------------------------------------------------------- /test/test_ridge_regression.jl: -------------------------------------------------------------------------------- 1 | @testset "Ridge regression" begin 2 | X = collect(1:5) 3 | y = [3, 4, 3, 6, -1] 4 | k = 0.77 5 | 6 | f = ridge_regression(X, y, k) 7 | # produces a "straight" horizontal line 8 | # so check that the swept values of x are all really close in difference 9 | @test all(isapprox.(diff(map(x->f(x), 1:10)), 0.00030994, atol=2)) 10 | end 11 | -------------------------------------------------------------------------------- /test/test_simulated_annealing.jl: -------------------------------------------------------------------------------- 1 | @testset "Simulated annealing" begin 2 | import Random: seed! 3 | seed!(0) 4 | 5 | f = x -> x^2 6 | k_max = 100 7 | t = k -> k_max/k - 1 8 | T = Normal(0, 1) 9 | x₀ = -10 10 | x_best = simulated_annealing(f, x₀, T, t, k_max) 11 | 12 | @test x_best ≈ 0.03216891139764366 13 | @test f(x_best) ≈ 0.0010348388605094484 14 | end 15 | -------------------------------------------------------------------------------- /src/huffman_coding.jl: -------------------------------------------------------------------------------- 1 | function huffman_coding(C::Vector{Char}, F::Vector{<:Real}) 2 | N = Dict{Any,Any}(Pair.(C, F)) 3 | while length(N) > 1 4 | X = findmin(N); delete!(N, last(X)) 5 | Y = findmin(N); delete!(N, last(Y)) 6 | children = (left=last(X), right=last(Y)) 7 | Z = Pair(children, first(X)+first(Y)) 8 | push!(N, Z) 9 | end 10 | return first(first(N)) 11 | end -------------------------------------------------------------------------------- /test/test_cross_entropy_method.jl: -------------------------------------------------------------------------------- 1 | @testset "Cross-entropy method" begin 2 | using Distributions 3 | import Random: seed! 4 | import LinearAlgebra: norm 5 | seed!(0) 6 | f = x->norm(x) 7 | μ = [0.5, 1.5] 8 | Σ = [1.0 0.2; 0.2 2.0] 9 | P = MvNormal(μ, Σ) 10 | k_max = 10 11 | P = cross_entropy_method(f, P, k_max) 12 | 13 | @test isapprox(P.μ, [-6.13623e-7, -1.37216e-6], atol=1e-5) 14 | end 15 | -------------------------------------------------------------------------------- /src/gaussian_process.jl: -------------------------------------------------------------------------------- 1 | using Distributions 2 | using LinearAlgebra 3 | 4 | struct GaussianProcess 5 | m::Function # mean function 6 | k::Function # covariance function 7 | end 8 | 9 | 𝛍(X, m) = [m(𝐱) for 𝐱 in X] 10 | 𝚺(X, k) = [k(𝐱,𝐱′) for 𝐱 in X, 𝐱′ in X] 11 | 12 | function Base.rand(𝒢::GaussianProcess, X, inflation=1e-6) 13 | 𝒩 = MvNormal(𝛍(X, 𝒢.m), 𝚺(X, 𝒢.k) + inflation*I) 14 | return rand(𝒩) 15 | end -------------------------------------------------------------------------------- /test/test_value_iteration.jl: -------------------------------------------------------------------------------- 1 | @testset "Value iteration" begin 2 | P = MDP(0.95, [1:100;], [+1, -1], (s,a,s′)->s + a == s′ ? 0.7 : 0, (s,a)->s == 50 ? 1 : 0) 3 | 4 | U = value_iteration(P, 100) 5 | 6 | reshape(U, 10, 10) 7 | 8 | # Optimal policy (𝒜 = [+1, -1]) 9 | @test all(policy.(1:50, 𝒫=P, U=U) .== 1) # go forwards toward 50 10 | @test all(policy.(51:100, 𝒫=P, U=U) .== 2) # go backwards toward 50 11 | end 12 | -------------------------------------------------------------------------------- /src/simulated_annealing.jl: -------------------------------------------------------------------------------- 1 | function simulated_annealing(f, x, P, t::Function, k_max) 2 | y = f(x) 3 | x_best, y_best = x, y 4 | for k in 1:k_max 5 | x′ = x + rand(P) 6 | y′ = f(x′) 7 | Δy = y′ - y 8 | if Δy ≤ 0 || rand() < exp(-Δy/t(k)) 9 | x, y = x′, y′ 10 | end 11 | if y′ < y_best 12 | x_best, y_best = x′, y′ 13 | end 14 | end 15 | return x_best 16 | end -------------------------------------------------------------------------------- /test/test_logistic_regression.jl: -------------------------------------------------------------------------------- 1 | using Distributions 2 | using Random 3 | 4 | @testset "Logistic regression" begin 5 | Random.seed!(0) 6 | X = collect(-50:50) 7 | y = vcat(rand(Bernoulli(0.1), 50), rand(Bernoulli(0.9), 51)); # target binary class 8 | 9 | f = logistic_regression(X,y) 10 | 11 | @test predict(f, -5) == false 12 | @test predict(f, 0) == true 13 | @test predict(f, 5) == true 14 | @test isapprox(f(0), 0.54, atol=1e-2) 15 | end 16 | -------------------------------------------------------------------------------- /test/test_twiddle.jl: -------------------------------------------------------------------------------- 1 | @testset "Twiddle" begin 2 | f = X -> X[1]^2 + X[2]^2 3 | g = X -> X[1]*exp(-(X[1]^2 + X[2]^2)) 4 | 5 | # Maximization. 6 | pf = twiddle(f, ones(2)) 7 | @test f(pf) == Inf 8 | pg = twiddle(g, ones(2)) 9 | @test isapprox(g(pg), 0.42888, atol=1e-5) 10 | 11 | # Minimization. 12 | pf = twiddle(X->-f(X), ones(2)) 13 | @test f(pf) == 0.0 14 | pg = twiddle(X->-g(X), ones(2)) 15 | @test isapprox(g(pg), -0.42888, atol=1e-5) 16 | end 17 | -------------------------------------------------------------------------------- /test/test_gaussian_process_kernels.jl: -------------------------------------------------------------------------------- 1 | @testset "Gaussian process kernels" begin 2 | import Random: seed! 3 | seed!(0) 4 | 5 | # Coverage 6 | X = [0, 1] 7 | rand(GaussianProcess(m_zero, k_sqr_exp), X) 8 | rand(GaussianProcess(m_zero, k_exp), X) 9 | rand(GaussianProcess(m_zero, k_gamma_exp), X) 10 | rand(GaussianProcess(m_zero, k_rat_quad), X) 11 | rand(GaussianProcess(m_zero, k_nn), X) 12 | rand(GaussianProcess(m_zero, k_brownian), X) 13 | 14 | @test true 15 | end 16 | -------------------------------------------------------------------------------- /test/test_quine.jl: -------------------------------------------------------------------------------- 1 | using Test 2 | @testset "Quine" begin 3 | original_stdout = stdout 4 | (rd, wr) = redirect_stdout() 5 | quine_file = joinpath(@__DIR__, "../src/quine.jl") 6 | if Sys.iswindows() 7 | # if `dos2unix` unavailable, comment it out locally. 8 | run(`dos2unix $quine_file`) 9 | end 10 | include(quine_file) 11 | redirect_stdout(original_stdout) 12 | close(wr) 13 | quine = read(rd, String) 14 | @test quine == read(quine_file, String) 15 | end 16 | -------------------------------------------------------------------------------- /src/merge_sort.jl: -------------------------------------------------------------------------------- 1 | function merge_sort(X) 2 | n = length(X) 3 | if n ≤ 1 4 | return X 5 | end 6 | L = merge_sort(X[1:n÷2]) 7 | R = merge_sort(X[n÷2+1:end]) 8 | return merge(L, R) 9 | end 10 | 11 | function Base.merge(L, R) 12 | nₗ, nᵣ, i, j = length(L), length(R), 1, 1 13 | combined = [] 14 | for k in 1:(nₗ + nᵣ) 15 | l = i ≤ nₗ ? L[i] : Inf 16 | r = j ≤ nᵣ ? R[j] : Inf 17 | push!(combined, l < r ? l : r) 18 | l < r ? i+=1 : j+=1 19 | end 20 | return combined 21 | end -------------------------------------------------------------------------------- /test/test_branch_and_bound.jl: -------------------------------------------------------------------------------- 1 | @testset "Branch and bound" begin 2 | γ = 0.95 3 | 𝒮 = 1:10 4 | 𝒜 = [+1, -1] 5 | T = (s,a,s′)->s + a == s′ ? 0.7 : 0 6 | R = (s,a)->s == 5 ? 100 : 0 7 | 𝒫 = BranchAndBound.MDP(γ, 𝒮, 𝒜, T, R) 8 | 9 | d = 4 # depth 10 | 𝑈₋ = s->0 # lower bound on value function at depth d 11 | 𝑄⁻ = (s,a)->100 # upper bound on action-value function 12 | π = s -> branch_and_bound(𝒫, s, d, 𝑈₋, 𝑄⁻).a 13 | 14 | for s in 𝒮 15 | a = s ≤ 5 || s ∈ [9, 10] ? +1 : -1 16 | @test π(s) == a 17 | end 18 | end 19 | -------------------------------------------------------------------------------- /src/k_means_clustering.jl: -------------------------------------------------------------------------------- 1 | using LinearAlgebra 2 | using Statistics 3 | 4 | function k_means_clustering(φ, 𝒟, K; T=100) 5 | local z 6 | μ = rand(first.(𝒟), K) 7 | for t in 1:T 8 | z = Dict(map(k->Pair(k, []), 1:K)) 9 | for i in 1:length(𝒟) 10 | xᵢ = first(𝒟[i]) 11 | push!(z[argmin([norm(φ(xᵢ) - μₖ)^2 for μₖ in μ])], i) 12 | end 13 | for k in 1:length(μ) 14 | if !isempty(z[k]) 15 | μ[k] = mean(φ(first(𝒟[i])) for i in z[k]) 16 | end 17 | end 18 | end 19 | return (z, μ) 20 | end -------------------------------------------------------------------------------- /test/test_k_means_clustering.jl: -------------------------------------------------------------------------------- 1 | using Random 2 | 3 | @testset "K-means clustering" begin 4 | Random.seed!(1) 5 | function test_k_means_clustering() 6 | 𝒟 = [([5.0, 9.0], 6), 7 | ([5.0, 5.0], 7), 8 | ([7.0, 5.0], 8), 9 | ([9.0, 9.0], 10)] 10 | φ = x->x 11 | 12 | (z1, μ1) = k_means_clustering(φ, 𝒟, 2) 13 | @test sort(μ1) ≈ [[6,5], [7,9]] 14 | 15 | (z2, μ2) = k_means_clustering(x->x, [0.0, 2.0, 10.0, 12.0], 2) 16 | @test sort(μ2) ≈ [1, 11] 17 | end 18 | 19 | test_k_means_clustering() 20 | end 21 | -------------------------------------------------------------------------------- /src/gaussian_process_kernels.jl: -------------------------------------------------------------------------------- 1 | using LinearAlgebra 2 | 3 | k_sqr_exp(x, x′; ℓ=1) = exp( -(x - x′)^2 / 2ℓ^2 ) 4 | k_exp(x, x′; ℓ=1) = exp( -(abs(x - x′)/ℓ) ) 5 | k_gamma_exp(x, x′; ℓ=1, γ=0.5) = exp( -(abs(x - x′)/ℓ)^γ ) 6 | k_rat_quad(x, x′; ℓ=1, α=0.5) = (1 + (x - x′)^2 / 2α*ℓ^2)^-α 7 | k_nn(x, x′; Σ=I, 𝐱=[1,x...], 𝐱′=[1,x′...]) = asin(2𝐱'Σ*𝐱′/sqrt((1 + 2𝐱'Σ*𝐱)*(1 + 2𝐱′'Σ*𝐱′))) 8 | k_brownian(x, x′; H=0.9) = 1/2 * (abs(x)^(2H) + abs(x′)^(2H) - abs(x - x′)^(2H)) 9 | m_zero(x) = zero(x) -------------------------------------------------------------------------------- /src/branch_and_bound.jl: -------------------------------------------------------------------------------- 1 | struct MDP γ; 𝒮; 𝒜; T; R end 2 | 3 | function branch_and_bound(𝒫::MDP, s, d, 𝑈₋, 𝑄⁻) 4 | (𝒮, T, R, γ) = (𝒫.𝒮, 𝒫.T, 𝒫.R, 𝒫.γ) 5 | if d ≤ 0 6 | return (a=nothing, u=𝑈₋(s)) 7 | end 8 | best = (a=nothing, u=-Inf) 9 | 𝑈′ = s -> branch_and_bound(𝒫, s, d-1, 𝑈₋, 𝑄⁻).u 10 | for a in 𝒫.𝒜 11 | if 𝑄⁻(s,a) < best.u 12 | return best # prune 13 | end 14 | u = R(s,a) + γ*sum(T(s,a,s′)*𝑈′(s′) for s′ in 𝒮) 15 | if u > best.u 16 | best = (a=a, u=u) 17 | end 18 | end 19 | return best 20 | end -------------------------------------------------------------------------------- /src/value_iteration.jl: -------------------------------------------------------------------------------- 1 | struct MDP γ; 𝒮; 𝒜; T; R end 2 | 3 | function lookahead(𝒫::MDP, U::Vector, s, a) 4 | (𝒮, T, R, γ) = (𝒫.𝒮, 𝒫.T, 𝒫.R, 𝒫.γ) 5 | return R(s,a) + γ*sum(T(s,a,s′)*U[i] for (i,s′) in enumerate(𝒮)) 6 | end 7 | 8 | function value_iteration(𝒫::MDP, k_max) 9 | (𝒮, 𝒜, T, R, γ) = (𝒫.𝒮, 𝒫.𝒜, 𝒫.T, 𝒫.R, 𝒫.γ) 10 | U = [0.0 for s in 𝒮] 11 | for k = 1:k_max 12 | U′ = [maximum(lookahead(𝒫, U, s, a) for a in 𝒜) for s in 𝒮] 13 | U = U′ 14 | end 15 | return U 16 | end 17 | 18 | policy(s; 𝒫, U) = findmax([lookahead(𝒫, U, s, a) for a in 𝒫.𝒜])[end] -------------------------------------------------------------------------------- /test/test_monte_carlo_tree_search.jl: -------------------------------------------------------------------------------- 1 | @testset "Monte Carlo tree search" begin 2 | import Random: seed! 3 | seed!(2) 4 | 5 | 𝒮 = [1:100;] 6 | 𝒜 = [+1, -1] 7 | T = (s,a,s′) -> s + a == s′ ? 0.7 : 0 8 | R = (s,a) -> s == 50 ? 1 : 0 9 | G = (s,a) -> begin 10 | s′ = rand([s, s+a, s-a]) 11 | r = R(s, a) 12 | return (s′, r) 13 | end 14 | 𝒫 = MDPᴳ(0.95, 𝒮, 𝒜, T, R, G) 15 | 16 | mcts = MonteCarloTreeSearch(𝒫, Dict(), Dict(), 50, 1000, 1, s->rand(map(a->s+a, 𝒜))) 17 | 18 | @test mcts(1) == 1 19 | @test mcts(55) == 2 20 | @test mcts(100) == 1 21 | end 22 | -------------------------------------------------------------------------------- /src/twiddle.jl: -------------------------------------------------------------------------------- 1 | function twiddle(f, Δ; τ=1e-3) 2 | p = zeros(length(Δ)) 3 | ϵˢ = f(p) 4 | while sum(Δ) > τ 5 | for i in 1:length(p) 6 | p[i] += Δ[i] 7 | ϵ = f(p) 8 | if ϵ > ϵˢ 9 | ϵˢ = ϵ 10 | Δ[i] *= 1.1 11 | else 12 | p[i] -= 2Δ[i] 13 | ϵ = f(p) 14 | if ϵ > ϵˢ 15 | ϵˢ = ϵ 16 | Δ[i] *= 1.1 17 | else 18 | p[i] += Δ[i] 19 | Δ[i] *= 0.9 20 | end 21 | end 22 | end 23 | end 24 | return p 25 | end -------------------------------------------------------------------------------- /test/test_hailstone.jl: -------------------------------------------------------------------------------- 1 | @testset "Hailstone" begin 2 | sequence12 = hailstone(12) 3 | sequence27 = hailstone(27) 4 | 5 | @test sequence12 == [12, 6, 3, 10, 5, 16, 8, 4, 2, 1] 6 | @test sequence27 == [27, 82, 41, 124, 62, 31, 94, 47, 142, 71, 214, 107, 322, 161, 484, 242, 121, 364, 182, 91, 274, 137, 412, 206, 103, 310, 155, 466, 233, 700, 350, 175, 526, 263, 790, 395, 1186, 593, 1780, 890, 445, 1336, 668, 334, 167, 502, 251, 754, 377, 1132, 566, 283, 850, 425, 1276, 638, 319, 958, 479, 1438, 719, 2158, 1079, 3238, 1619, 4858, 2429, 7288, 3644, 1822, 911, 2734, 1367, 4102, 2051, 6154, 3077, 9232, 4616, 2308, 1154, 577, 1732, 866, 433, 1300, 650, 325, 976, 488, 244, 122, 61, 184, 92, 46, 23, 70, 35, 106, 53, 160, 80, 40, 20, 10, 5, 16, 8, 4, 2, 1] 7 | end 8 | -------------------------------------------------------------------------------- /test/test_stochastic_gradient_descent.jl: -------------------------------------------------------------------------------- 1 | @testset "Stochastic gradeient descent" begin 2 | Base.:*(δη::Decay, x) = x/sqrt(δη.i+=1) 3 | 4 | loss_squared(x, y, 𝐰, φ) = (𝐰⋅φ(x) - y)^2 5 | mean_loss(𝐰, 𝒟train, φ, loss) = mean(loss(x, y, 𝐰, φ) for (x,y) ∈ 𝒟train) 6 | 7 | function test_stochastic_gradient_descent() 8 | 𝒟train = [([3,0.7],4), ([-1,0.3],3), ([-1,-3],0)] 9 | 𝐰_opt = stochastic_gradient_descent(𝒟train, x->x, ∇loss_squared; η=0.01) 10 | y_opt = mean_loss(𝐰_opt, 𝒟train, x->x, loss_squared) 11 | return (𝐰_opt, y_opt) 12 | end 13 | 14 | 𝐰, y = test_stochastic_gradient_descent() 15 | 16 | @test 𝐰 ≈ [0.8286227687981166, -0.07376395387093937] 17 | @test y ≈ 5.882922020275335 18 | end 19 | -------------------------------------------------------------------------------- /test/test_linear_regression.jl: -------------------------------------------------------------------------------- 1 | @testset "Linear regression 1D" begin 2 | 𝒟 = [(1,1), (2,3), (3,3), (4,4)] 3 | X = first.(𝒟) 4 | y = last.(𝒟) 5 | 6 | f = linear_regression(X,y) 7 | @test f(3) ≈ 3.2 8 | end 9 | 10 | @testset "Linear regression 2D" begin 11 | 𝒟 = [([1,1],1), ([2,2],3), ([3,3],3), ([4,4],4)] 12 | X = first.(𝒟) 13 | X = hcat(X...)' # Turn into matrix 14 | y = last.(𝒟) 15 | 16 | f = linear_regression(X,y) 17 | @test f([3,4]) ≈ 3.65 18 | end 19 | 20 | @testset "Linear regression (one-liner)" begin 21 | import BeautifulAlgorithms.linear_regression 22 | include(joinpath(@__DIR__, "..", "src", "linear_regression_one_liner.jl")) 23 | 𝒟 = [(1,1), (2,3), (3,3), (4,4)] 24 | X = first.(𝒟) 25 | y = last.(𝒟) 26 | 27 | f = linear_regression(X,y) 28 | @test f(3) ≈ 3.2 29 | end 30 | -------------------------------------------------------------------------------- /test/test_nearest_neighbor.jl: -------------------------------------------------------------------------------- 1 | @testset "Nearest neighbor" begin 2 | function test_nearest_neighbor() 3 | 𝒟 = [([5,9],6), 4 | ([5,5],7), 5 | ([7,5],8), 6 | ([9,9],10)] 7 | φ = x->x 8 | 9 | points = [[6.1,6.5], [9,6.5]] 10 | 11 | neighbor_manhattan = [nearest_neighbor(p, φ, 𝒟, dist_manhattan) for p in points] 12 | @test neighbor_manhattan == [8, 10] 13 | 14 | neighbor_euclidean = [nearest_neighbor(p, φ, 𝒟, dist_euclidean) for p in points] 15 | @test neighbor_euclidean == [8, 8] 16 | 17 | neighbor_supremum = [nearest_neighbor(p, φ, 𝒟, dist_supremum) for p in points] 18 | @test neighbor_supremum == [7, 8] 19 | 20 | @test nearest_neighbor(0, x->x, [(0,0)], dist_manhattan) == 0 21 | end 22 | 23 | test_nearest_neighbor() 24 | end 25 | -------------------------------------------------------------------------------- /test/test_neural_network.jl: -------------------------------------------------------------------------------- 1 | @testset "Two-layer neural network" begin 2 | function test_neural_network(g=σ) 3 | x = 2 4 | φ = x -> [x, x^2, sqrt(abs(x))] 5 | 𝐕 = [[2,-1,3], [3,0,1]] 6 | 𝐰 = [+1, -1] 7 | neural_network(x, 𝐕, 𝐰, φ, g) 8 | end 9 | 10 | @test test_neural_network(σ) ≈ -0.013563772681566943 11 | @test test_neural_network(ReLU) ≈ -3.1715728752538093 12 | 13 | @test σ(0) == 0.5 14 | @test ReLU(1) == 1 15 | @test ReLU(-1) == 0 16 | end 17 | 18 | @testset "Neural network (one-liner)" begin 19 | import BeautifulAlgorithms.neural_network 20 | include(joinpath(@__DIR__, "..", "src", "neural_network_one_liner.jl")) 21 | x = 2 22 | φ = x -> [x, x^2, sqrt(abs(x))] 23 | 𝐕 = [[2,-1,3], [3,0,1]] 24 | 𝐰 = [+1, -1] 25 | @test neural_network(x, 𝐕, 𝐰, φ, ReLU) ≈ -3.1715728752538093 26 | end -------------------------------------------------------------------------------- /Project.toml: -------------------------------------------------------------------------------- 1 | name = "BeautifulAlgorithms" 2 | uuid = "00354967-3942-5f87-bd19-a45bc2403154" 3 | authors = ["Robert Moss "] 4 | version = "0.1.0" 5 | 6 | [deps] 7 | Distributions = "31c24e10-a181-5473-b8eb-7969acd0382f" 8 | LinearAlgebra = "37e2e46d-f89d-539d-b4ee-838fcccc9c8e" 9 | Random = "9a3f8284-a2c9-5f02-9a11-845980a1fd5c" 10 | Statistics = "10745b16-79ce-11e8-11f9-7d13ad32a3b2" 11 | Zygote = "e88e6eb3-aa80-5325-afca-941959d7151f" 12 | 13 | [compat] 14 | Distributions = "0.17,0.18,0.19,0.20,0.21,0.22,0.23" 15 | julia = "1" 16 | 17 | [extras] 18 | Distributions = "31c24e10-a181-5473-b8eb-7969acd0382f" 19 | LinearAlgebra = "37e2e46d-f89d-539d-b4ee-838fcccc9c8e" 20 | Random = "9a3f8284-a2c9-5f02-9a11-845980a1fd5c" 21 | Statistics = "10745b16-79ce-11e8-11f9-7d13ad32a3b2" 22 | Test = "8dfed614-e22c-5e08-85e1-65c5234f0b40" 23 | 24 | [targets] 25 | test = ["Distributions", "LinearAlgebra", "Random", "Statistics", "Test"] 26 | -------------------------------------------------------------------------------- /test/test_k_nearest_neighbors.jl: -------------------------------------------------------------------------------- 1 | @testset "K-nearest neighbors" begin 2 | function test_k_nearest_neighbors() 3 | 𝒟 = [([5,9],6), 4 | ([5,5],7), 5 | ([7,5],8), 6 | ([9,9],10)] 7 | φ = x->x 8 | 9 | points = [[6.1,6.5], [9,6.5]] 10 | 11 | neighbor_euclidean_k1 = [k_nearest_neighbors(p, φ, 𝒟, dist_euclidean, 1) for p in points] 12 | @test neighbor_euclidean_k1 == [[8], [8]] 13 | 14 | neighbor_euclidean_k2 = [k_nearest_neighbors(p, φ, 𝒟, dist_euclidean, 2) for p in points] 15 | @test neighbor_euclidean_k2 == [[8,7], [8,10]] 16 | 17 | neighbor_euclidean_k3 = [k_nearest_neighbors(p, φ, 𝒟, dist_euclidean, 3) for p in points] 18 | @test neighbor_euclidean_k3 == [[8,7,6], [8,10,7]] 19 | 20 | neighbor_euclidean_k4 = [k_nearest_neighbors(p, φ, 𝒟, dist_euclidean, 4) for p in points] 21 | @test neighbor_euclidean_k4 == [[8,7,6,10], [8,10,7,6]] 22 | end 23 | 24 | test_k_nearest_neighbors() 25 | end 26 | -------------------------------------------------------------------------------- /test/test_particle_filter.jl: -------------------------------------------------------------------------------- 1 | @testset "Particle filter" begin 2 | using Random 3 | Random.seed!(228) 4 | 5 | # POMDP setup 6 | γ = 0.95 7 | 𝒮 = -10:10 8 | 𝒜 = Normal(0, 1) 9 | 𝒪 = Uniform(-10, 10) 10 | transition = (s,a) -> clamp(s+a, minimum(𝒮), maximum(𝒮)) 11 | T = (s,a) -> Normal(transition(s,a), abs(a)) 12 | R = (s,a) -> 4 < s < 6 ? 100 : 0 13 | observation = (s′,a) -> Normal(s′, abs(a)) 14 | O = (a,s′,o) -> pdf(observation(s′,a), o) 15 | 𝒫 = POMDP(γ, 𝒮, 𝒜, 𝒪, T, R, O) 16 | 17 | # Particle filter updating with random 1D walking agent 18 | belief = rand(𝒮, 1000) 19 | o = rand(𝒪) 20 | s = o 21 | for i in 1:2000 22 | a = rand(𝒜) 23 | s = transition(s,a) 24 | o = rand(observation(s,a)) 25 | belief = particle_filter(belief, 𝒫, a, o) 26 | μ_b = mean(belief) 27 | σ_b = std(belief) 28 | belief_error = abs(μ_b - s) 29 | @test (μ_b-3σ_b ≤ s ≤ μ_b+3σ_b) || belief_error ≤ 1.0 30 | end 31 | end 32 | -------------------------------------------------------------------------------- /src/loss_functions.jl: -------------------------------------------------------------------------------- 1 | using LinearAlgebra 2 | 3 | 𝕀(b) = b ? 1 : 0 4 | σ(z) = 1/(1 + exp(-z)) 5 | 6 | ŷ(x, 𝐰, φ, g=𝐱->𝐱) = g(𝐰⋅φ(x)) 7 | margin(x, y, 𝐰, φ, g=𝐱->𝐱) = ŷ(x, 𝐰, φ, g)*y 8 | residual(x, y, 𝐰, φ, g=𝐱->𝐱) = ŷ(x, 𝐰, φ, g) - y 9 | 10 | loss_01(x, y, 𝐰, φ) = 𝕀(margin(x, y, 𝐰, φ) ≤ 0) 11 | loss_absdev(x, y, 𝐰, φ) = abs(residual(x, y, 𝐰, φ)) 12 | loss_squared(x, y, 𝐰, φ) = residual(x, y, 𝐰, φ)^2 13 | loss_hinge(x, y, 𝐰, φ) = max(1 - margin(x, y, 𝐰, φ), 0) 14 | loss_logistic(x, y, 𝐰, φ) = log(1 + exp(-margin(x, y, 𝐰, φ))) 15 | loss_cross_entropy(x, y, 𝐰, φ) = -(y*log(ŷ(x, 𝐰, φ, σ)) + (1-y)*log(1-ŷ(x, 𝐰, φ, σ))) 16 | 17 | ∇loss_absdev(x, y, 𝐰, φ) = φ(x)*residual(x, y, 𝐰, φ) / abs(residual(x, y, 𝐰, φ)) 18 | ∇loss_squared(x, y, 𝐰, φ) = 2residual(x, y, 𝐰, φ)*φ(x) 19 | ∇loss_hinge(x, y, 𝐰, φ) = margin(x, y, 𝐰, φ) < 1 ? -φ(x)*y : 0 20 | ∇loss_logistic(x, y, 𝐰, φ) = -φ(x)*y / (exp(margin(x, y, 𝐰, φ)) + 1) 21 | ∇loss_cross_entropy(x, y, 𝐰, φ) = φ(x) .⋅ residual(x, y, 𝐰, φ, σ)' -------------------------------------------------------------------------------- /test/test_multi_layer_neural_network.jl: -------------------------------------------------------------------------------- 1 | include("../src/multi_layer_neural_network.jl") 2 | using BeautifulAlgorithms 3 | using Test 4 | @testset "Multi-layer neural network" begin 5 | function test_two_layer_neural_network(𝐠=[σ]) 6 | x = 2 7 | φ = x -> [x, x^2, sqrt(abs(x))] 8 | 𝐕 = [[2,-1,3], [3,0,1]] 9 | 𝐰 = [+1, -1] 10 | 𝐖 = [𝐕, 𝐰] 11 | multi_layer_neural_network(x, 𝐖, φ, 𝐠) 12 | end 13 | 14 | function test_five_layer_neural_network(𝐠) 15 | x = 2 16 | φ = x -> [x, x^2, sqrt(abs(x))] 17 | 𝐕₁ = [[2,-1,3], [3,0,1], [7,5,3]] 18 | 𝐕₂ = [[6,5,9], [3,3,3]] 19 | 𝐕₃ = [[6,5], [3,3], [3,3], [3,3], [3,3]] 20 | 𝐕₄ = [[1,2,3,4,5], [6,7,8,9,0]] 21 | 𝐰 = [+1, -1] 22 | 𝐖 = [𝐕₁, 𝐕₂, 𝐕₃, 𝐕₄, 𝐰] 23 | multi_layer_neural_network(x, 𝐖, φ, 𝐠) 24 | end 25 | 26 | @test test_two_layer_neural_network([σ]) ≈ -0.013563772681566943 27 | @test test_two_layer_neural_network([ReLU]) ≈ -3.1715728752538093 28 | @test test_five_layer_neural_network([σ,σ,σ,σ]) ≈ -3.1668639943749355e-7 29 | end 30 | -------------------------------------------------------------------------------- /src/em_algorithm.jl: -------------------------------------------------------------------------------- 1 | using Distributions, LinearAlgebra 2 | 3 | function e_step(θ, x) 4 | φ, μ, Σ = θ.φ, θ.μ, θ.Σ 5 | n, k = length(x), length(φ) 6 | w = Matrix{Real}(undef, n, k) 7 | for i in 1:n 8 | for j in 1:k 9 | normalization = sum(pdf(MvNormal(μ[l], Σ[l]), x[i]) * φ[l] for l in 1:k) 10 | w[i,j] = (pdf(MvNormal(μ[j], Σ[j]), x[i]) * φ[j]) / normalization 11 | end 12 | end 13 | return w 14 | end 15 | 16 | function m_step!(θ, w, x) 17 | φ, μ, Σ = θ.φ, θ.μ, θ.Σ 18 | n, k = length(x), length(φ) 19 | for j in 1:k 20 | sum_w = sum(w[i,j] for i in 1:n) 21 | φ[j] = 1/n * sum_w 22 | μ[j] = sum(w[i,j]*x[i] for i in 1:n) / sum_w 23 | Σ[j] = sum(w[i,j]*(x[i]-μ[j])*(x[i]-μ[j])' for i in 1:n) / sum_w |> Hermitian 24 | end 25 | return θ 26 | end 27 | 28 | function em_algorithm!(x, θ; tol=eps(Float32)) 29 | while true 30 | θ₋₁ = deepcopy(θ) 31 | w = e_step(θ, x) 32 | m_step!(θ, w, x) 33 | all([norm(θ₋₁.μ - θ.μ), norm(θ₋₁.Σ - θ.Σ), norm(θ₋₁.φ - θ.φ)] .< tol) && break 34 | end 35 | return θ 36 | end 37 | 38 | classify(xᵢ, θ) = argmax([pdf(MvNormal(θ.μ[j], θ.Σ[j]), xᵢ) for j in 1:length(θ.μ)]) -------------------------------------------------------------------------------- /test/test_loss_functions.jl: -------------------------------------------------------------------------------- 1 | @testset "Loss functions" begin 2 | x_vec = [2.3, 1.2] 3 | 𝐰_vec = [0.8, 1.1] 4 | x_scalar = 2.3 5 | 𝐰_scalar = 3 6 | y = 0.5 7 | φ = x->x 8 | 9 | @test loss_01(x_scalar, y, 𝐰_scalar, φ) == 0 10 | 11 | for (x, 𝐰) in zip([x_vec, x_scalar], [𝐰_vec, 𝐰_scalar]) 12 | for diff_method in [forward_difference, central_difference, backward_difference, complex_difference] 13 | iscomplex::Bool = diff_method == complex_difference 14 | @test iscomplex || isapprox(abs(sum(∇loss_absdev(x, y, 𝐰, φ))), abs(diff_method(𝐰ᵢ -> loss_absdev(x, y, 𝐰ᵢ, φ), 𝐰)), atol=1e-2) 15 | @test isapprox(abs(sum(∇loss_squared(x, y, 𝐰, φ))), abs(diff_method(𝐰ᵢ -> loss_squared(x, y, 𝐰ᵢ, φ), 𝐰)), atol=1e-2) 16 | @test iscomplex || isapprox(abs(sum(∇loss_hinge(x, y, 𝐰, φ))), abs(diff_method(𝐰ᵢ -> loss_hinge(x, y, 𝐰ᵢ, φ), 𝐰)), atol=1e-2) 17 | @test isapprox(abs(sum(∇loss_logistic(x, y, 𝐰, φ))), abs(diff_method(𝐰ᵢ -> loss_logistic(x, y, 𝐰ᵢ, φ), 𝐰)), atol=1e-2) 18 | @test isapprox(abs(sum(∇loss_cross_entropy(x, y, 𝐰, φ))), abs(diff_method(𝐰ᵢ -> loss_cross_entropy(x, y, 𝐰ᵢ, φ), 𝐰)), atol=1e-2) 19 | end 20 | end 21 | end 22 | -------------------------------------------------------------------------------- /test/test_huffman_coding.jl: -------------------------------------------------------------------------------- 1 | @testset "Huffman coding" begin 2 | # Huffman encoding 3 | encode(s::String) = join(map(c->encode(c, N), collect(s))) 4 | encode(c::Char, N, v="") = encode(c, N.left, v*'0') * encode(c, N.right, v*'1') 5 | encode(c::Char, N::Char, v) = N == c ? v : "" 6 | 7 | # Huffman decoding 8 | function decode(s::String, N, v="", next=N) 9 | for c in s 10 | next = (next isa Char) ? N : next 11 | next = (c == '0') ? next.left : next.right 12 | v *= (next isa Char) ? next : "" 13 | end 14 | return v 15 | end 16 | 17 | C = collect('a':'f') 18 | F = [0.45, 0.13, 0.12, 0.16, 0.09, 0.05] 19 | @assert sum(F) == 1.0 20 | N = huffman_coding(C, F) 21 | 22 | # 'a' => "0" 23 | # 'b' => "101" 24 | # 'c' => "100" 25 | # 'd' => "111" 26 | # 'e' => "1101" 27 | # 'f' => "1100" 28 | 29 | @test encode("aaaadb") == "0000111101" 30 | @test decode(encode("abcdef"), N) == "abcdef" 31 | @test decode("0", N) == "a" 32 | @test decode("101", N) == "b" 33 | @test decode("100", N) == "c" 34 | @test decode("111", N) == "d" 35 | @test decode("1101", N) == "e" 36 | @test decode("1100", N) == "f" 37 | @test decode("110", N) == "" 38 | end -------------------------------------------------------------------------------- /test/runtests.jl: -------------------------------------------------------------------------------- 1 | using Test 2 | using BeautifulAlgorithms 3 | 4 | include("test_gradient_descent.jl") 5 | include("test_stochastic_gradient_descent.jl") 6 | include("test_neural_network.jl") 7 | include("test_multi_layer_neural_network.jl") 8 | include("test_loss_functions.jl") 9 | include("test_distance_functions.jl") 10 | include("test_nearest_neighbor.jl") 11 | include("test_k_nearest_neighbors.jl") 12 | include("test_k_means_clustering.jl") 13 | include("test_em_algorithm.jl") 14 | include("test_linear_regression.jl") 15 | include("test_ridge_regression.jl") 16 | include("test_basis_regression.jl") 17 | include("test_radial_basis_regression.jl") 18 | include("test_logistic_regression.jl") 19 | include("test_cross_entropy_method.jl") 20 | include("test_finite_difference_methods.jl") 21 | include("test_simulated_annealing.jl") 22 | include("test_twiddle.jl") 23 | include("test_newtons_method.jl") 24 | include("test_gaussian_process.jl") 25 | include("test_gaussian_process_kernels.jl") 26 | include("test_thompson_sampling.jl") 27 | include("test_particle_filter.jl") 28 | include("test_value_iteration.jl") 29 | include("test_branch_and_bound.jl") 30 | include("test_monte_carlo_tree_search.jl") 31 | include("test_huffman_coding.jl") 32 | include("test_hailstone.jl") 33 | include("test_bubble_sort!.jl") 34 | include("test_merge_sort.jl") 35 | include("test_insertion_sort!.jl") 36 | include("test_bogo_sort!.jl") 37 | include("test_quine.jl") 38 | -------------------------------------------------------------------------------- /src/monte_carlo_tree_search.jl: -------------------------------------------------------------------------------- 1 | struct MDPᴳ γ; 𝒮; 𝒜; T; R; G end 2 | 3 | struct MonteCarloTreeSearch 4 | 𝒫::MDPᴳ # problem with generative model 5 | N # visit counts 6 | Q # action value estimates 7 | d # depth 8 | k_max # number of simulations 9 | c # exploration constant 10 | π # rollout policy 11 | end 12 | 13 | function (π::MonteCarloTreeSearch)(s) 14 | for k in 1:π.k_max 15 | simulate!(π, s) 16 | end 17 | return argmax([π.Q[(s,a)] for a in π.𝒫.𝒜]) 18 | end 19 | 20 | function simulate!(π::MonteCarloTreeSearch, s, d=π.d) 21 | if d ≤ 0 22 | return 0.0 23 | end 24 | (𝒫, N, Q, c) = (π.𝒫, π.N, π.Q, π.c) 25 | (𝒜, G, γ) = (𝒫.𝒜, 𝒫.G, 𝒫.γ) 26 | if !haskey(N, (s, first(𝒜))) 27 | for a in 𝒜 28 | N[(s,a)] = 0 29 | Q[(s,a)] = 0.0 30 | end 31 | return rollout(𝒫, s, π.π, d) 32 | end 33 | a = explore(π, s) 34 | s′, r = G(s, a) 35 | q = r + γ*simulate!(π, s′, d-1) 36 | N[(s,a)] += 1 37 | Q[(s,a)] += (q-Q[(s,a)])/N[(s,a)] 38 | return q 39 | end 40 | 41 | function explore(π::MonteCarloTreeSearch, s) 42 | (𝒜, N, Q, c) = (π.𝒫.𝒜, π.N, π.Q, π.c) 43 | Ns = sum(N[(s,a)] for a in 𝒜) 44 | Ns = (Ns == 0) ? Inf : Ns 45 | return 𝒜[argmax([Q[(s,a)] + c*sqrt(log(Ns)/N[(s,a)]) for a in 𝒜])] 46 | end 47 | 48 | function rollout(𝒫, s, π, d) 49 | if d ≤ 0 50 | return 0.0 51 | end 52 | a = π(s) 53 | s′, r = 𝒫.G(s, a) 54 | return r + 𝒫.γ*rollout(𝒫, s′, π, d-1) 55 | end -------------------------------------------------------------------------------- /test/test_em_algorithm.jl: -------------------------------------------------------------------------------- 1 | using Random 2 | using Distributions 3 | 4 | @testset "The EM algorithm" begin 5 | Random.seed!(1) 6 | 7 | φ = [0.2, 0.8] # ф (see: https://en.wikipedia.org/wiki/Phi) 8 | z = Multinomial(1, φ) 9 | x_z₁ = MvNormal([1, 1], [2 0; 0 2]) 10 | x_z₂ = MvNormal([4, 4], [1 0.5; 0.5 1]) 11 | n = 1000 12 | 13 | x₁_samples = [] 14 | x₂_samples = [] 15 | for i in 1:n 16 | # Sample from Multinomial to determine which Gaussian to pick 17 | if rand(z)[1] == 1 18 | push!(x₁_samples, rand(x_z₁)) # Sample from (x | z = 1) 19 | else 20 | push!(x₂_samples, rand(x_z₂)) # Sample from (x | z = 2) 21 | end 22 | end 23 | 24 | permutation = randperm(n) 25 | x = vcat(x₁_samples, x₂_samples)[permutation] 26 | y = vcat(ones(length(x₁_samples)), 2ones(length(x₂_samples)))[permutation] 27 | 28 | # Implied k=2 29 | φ̂ = [0.5, 0.5] 30 | μ̂ = [randn(2), randn(2)] 31 | Σ̂ = [randn(2,2), randn(2,2)] 32 | 33 | # Ensure symmetric, PSD, and Hermitian 34 | Σ̂[1] = Hermitian(Σ̂[1]'Σ̂[1] + I) 35 | Σ̂[2] = Hermitian(Σ̂[2]'Σ̂[2] + I) 36 | 37 | # Full parameters 38 | θ = (φ=φ̂, μ=μ̂, Σ=Σ̂) 39 | 40 | # Run EM algorithm 41 | θ = em_algorithm!(x, θ) 42 | ŷ = map(xᵢ -> classify(xᵢ, θ), x) 43 | accuracy = sum(y .== ŷ) / length(y) 44 | 45 | @test abs(θ.φ[1] - φ[1]) < 1e-2 46 | @test abs(θ.μ[2][1] - x_z₂.μ[1]) < 1e-1 47 | @test abs(θ.Σ[2][1] - x_z₂.Σ[1]) < 1e-1 48 | @test accuracy == 0.952 49 | end 50 | -------------------------------------------------------------------------------- /test/test_gradient_descent.jl: -------------------------------------------------------------------------------- 1 | @testset "Gradient descent" begin 2 | using Statistics 3 | using LinearAlgebra 4 | 5 | mutable struct Decay i end 6 | Base.:*(δη::Decay, x) = x/sqrt(δη.i+=1) 7 | 8 | loss_squared(x, y, 𝐰, φ) = (𝐰⋅φ(x) - y)^2 9 | mean_loss(𝐰, 𝒟train, φ, loss) = mean(loss(x, y, 𝐰, φ) for (x,y) ∈ 𝒟train) 10 | 11 | """ 12 | Single-dimensional training input data. 13 | """ 14 | function test_gradient_descent() 15 | 𝒟train = [(3,4), (-1,3), (-1,0)] 16 | 𝐰_opt = gradient_descent(𝒟train, x->x, ∇loss_squared) 17 | y_opt = mean_loss(𝐰_opt, 𝒟train, x->x, loss_squared) 18 | return (𝐰_opt, y_opt) 19 | end 20 | 21 | """ 22 | Decay learning rate η. 23 | """ 24 | function test_gradient_descent_decay(T) 25 | 𝒟train = [(3,4), (-1,3), (-1,0)] 26 | 𝐰_opt = gradient_descent(𝒟train, x->x, ∇loss_squared; η=Decay(0), T=T) 27 | y_opt = mean_loss(𝐰_opt, 𝒟train, x->x, loss_squared) 28 | return (𝐰_opt, y_opt) 29 | end 30 | 31 | """ 32 | Multi-dimensional training data input. 33 | """ 34 | function test_gradient_descent_multi() 35 | 𝒟train = [([3,0.7],4), ([-1,0.3],3), ([-1,-3],0)] 36 | 𝐰_opt = gradient_descent(𝒟train, x->x, ∇loss_squared) 37 | y_opt = mean_loss(𝐰_opt, 𝒟train, x->x, loss_squared) 38 | return (𝐰_opt, y_opt) 39 | end 40 | 41 | 𝐰, y = test_gradient_descent() 42 | @test 𝐰 ≈ [0.8181818181818182] 43 | @test y ≈ 5.878787878787879 44 | 45 | 𝐰, y = test_gradient_descent_decay(30) 46 | @test 𝐰 ≈ [0.41794205540127405] 47 | @test y ≈ 6.466158060393507 48 | 49 | 𝐰, y = test_gradient_descent_multi() 50 | @test 𝐰 ≈ [0.8314306533883896, -0.03036191401505953] 51 | @test y ≈ 5.876487733786738 52 | end 53 | -------------------------------------------------------------------------------- /.github/workflows/CI.yml: -------------------------------------------------------------------------------- 1 | name: CI 2 | 3 | # Run on master, tags, or any pull request 4 | on: 5 | push: 6 | branches: [master] 7 | tags: ["*"] 8 | pull_request: 9 | 10 | jobs: 11 | 12 | # unit tests with coverage 13 | test: 14 | name: Julia ${{ matrix.version }} - ${{ matrix.os }} - ${{ matrix.arch }} 15 | runs-on: ${{ matrix.os }} 16 | strategy: 17 | fail-fast: false 18 | matrix: 19 | version: 20 | - "1.6" 21 | os: 22 | - windows-latest 23 | - ubuntu-latest 24 | - macOS-latest 25 | arch: 26 | - x64 27 | steps: 28 | 29 | # check out the project and install Julia 30 | - uses: actions/checkout@v2 31 | - uses: julia-actions/setup-julia@v1 32 | with: 33 | version: ${{ matrix.version }} 34 | arch: ${{ matrix.arch }} 35 | 36 | # using a cache can speed up execution times 37 | - uses: actions/cache@v2 38 | env: 39 | cache-name: cache-artifacts 40 | with: 41 | path: ~/.julia/artifacts 42 | key: ${{ runner.os }}-${{ matrix.arch }}-test-${{ env.cache-name }}-${{ hashFiles('**/Project.toml') }} 43 | restore-keys: | 44 | ${{ runner.os }}-${{ matrix.arch }}-test-${{ env.cache-name }}- 45 | ${{ runner.os }}-${{ matrix.arch }}-test- 46 | ${{ runner.os }}-${{ matrix.arch }}- 47 | ${{ runner.os }}- 48 | 49 | # build the depencies, run the tests, and upload coverage results 50 | - uses: julia-actions/julia-buildpkg@latest 51 | - run: | 52 | git config --global user.name Tester 53 | git config --global user.email te@st.er 54 | - name: Run Tests 55 | uses: julia-actions/julia-runtest@latest 56 | with: 57 | inline: 'no' 58 | - uses: julia-actions/julia-processcoverage@v1 59 | - uses: codecov/codecov-action@v1 60 | with: 61 | file: ./lcov.info 62 | flags: unittests 63 | name: codecov-umbrella -------------------------------------------------------------------------------- /src/BeautifulAlgorithms.jl: -------------------------------------------------------------------------------- 1 | module BeautifulAlgorithms 2 | 3 | export gradient_descent 4 | include("gradient_descent.jl") 5 | 6 | export stochastic_gradient_descent 7 | include("stochastic_gradient_descent.jl") 8 | 9 | export neural_network, ReLU 10 | include("neural_network.jl") 11 | 12 | export multi_layer_neural_network 13 | include("multi_layer_neural_network.jl") 14 | 15 | export 𝕀, σ, ŷ, margin, residual, loss_01, loss_absdev, loss_squared, loss_hinge, loss_logistic, loss_cross_entropy, ∇loss_absdev, ∇loss_squared, ∇loss_hinge, ∇loss_logistic, ∇loss_cross_entropy 16 | include("loss_functions.jl") 17 | 18 | export dist_manhattan, dist_euclidean, dist_supremum 19 | include("distance_functions.jl") 20 | 21 | export nearest_neighbor, dist_manhattan, dist_euclidean, dist_supremum 22 | include("nearest_neighbor.jl") 23 | 24 | export k_nearest_neighbors 25 | include("k_nearest_neighbors.jl") 26 | 27 | export k_means_clustering 28 | include("k_means_clustering.jl") 29 | 30 | export e_step, m_step!, em_algorithm!, classify 31 | include("em_algorithm.jl") 32 | 33 | export linear_regression 34 | include("linear_regression.jl") 35 | 36 | export ridge_regression 37 | include("ridge_regression.jl") 38 | 39 | export basis_regression 40 | include("basis_regression.jl") 41 | 42 | export radial_basis_regression 43 | include("radial_basis_regression.jl") 44 | 45 | export logistic_regression, predict 46 | include("logistic_regression.jl") 47 | 48 | export cross_entropy_method 49 | include("cross_entropy_method.jl") 50 | 51 | export forward_difference, central_difference, backward_difference, complex_difference 52 | include("finite_difference_methods.jl") 53 | 54 | export simulated_annealing 55 | include("simulated_annealing.jl") 56 | 57 | export twiddle 58 | include("twiddle.jl") 59 | 60 | export newtons_method 61 | include("newtons_method.jl") 62 | 63 | export k_sqr_exp, k_exp, k_gamma_exp, k_rat_quad, k_nn, k_brownian, m_zero 64 | include("gaussian_process_kernels.jl") 65 | 66 | export GaussianProcess 67 | include("gaussian_process.jl") 68 | 69 | export thompson_sampling 70 | include("thompson_sampling.jl") 71 | 72 | export particle_filter, POMDP 73 | include("particle_filter.jl") 74 | 75 | export value_iteration, policy, lookahead, MDP 76 | include("value_iteration.jl") 77 | 78 | # suppress MDP redefinition 79 | module BranchAndBound include("branch_and_bound.jl") end 80 | import .BranchAndBound: branch_and_bound 81 | export branch_and_bound, BranchAndBound 82 | 83 | export MonteCarloTreeSearch, MDPᴳ, simulate!, explore, rollout 84 | include("monte_carlo_tree_search.jl") 85 | 86 | export huffman_coding 87 | include("huffman_coding.jl") 88 | 89 | export hailstone 90 | include("hailstone.jl") 91 | 92 | export bubble_sort! 93 | include("bubble_sort!.jl") 94 | 95 | export merge_sort, merge 96 | include("merge_sort.jl") 97 | 98 | export insertion_sort!, merge 99 | include("insertion_sort!.jl") 100 | 101 | export bogo_sort!, merge 102 | include("bogo_sort!.jl") 103 | 104 | end # module -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # BeautifulAlgorithms.jl 2 | [![Build Status](https://github.com/mossr/BeautifulAlgorithms.jl/workflows/CI/badge.svg)](https://github.com/mossr/BeautifulAlgorithms.jl/actions) 3 | [![codecov](https://codecov.io/github/mossr/BeautifulAlgorithms.jl/coverage.svg?branch=master)](https://codecov.io/gh/mossr/BeautifulAlgorithms.jl) 4 | 5 | Concise algorithms written in Julia and formatted with [Carbon](https://carbon.now.sh/). 6 | 7 | Algorithms for machine learning, optimization, reinforcement learning, online planning, decision making under uncertainty, and sorting. All implementations are working and self-contained; refer to the [test cases](./test/). 8 | 9 | > _Note, these are primarily for academic purposes and are not designed for real-world usage. There are many other Julia packages that implement more sound versons of these algorithms._ 10 | 11 | ```julia 12 | ] add http://github.com/mossr/BeautifulAlgorithms.jl 13 | ``` 14 | 15 | - [Gradient descent](#gradient-descent) 16 | - [Stochastic gradient descent](#stochastic-gradient-descent) 17 | - [Two-layer neural network](#two-layer-neural-network) 18 | - [Two-layer neural network (one-liner)](#two-layer-neural-network-one-liner) 19 | - [Multi-layer neural network](#multi-layer-neural-network) 20 | - [Loss functions](#loss-functions) 21 | - [Distance functions](#distance-functions) 22 | - [Nearest neighbor](#nearest-neighbor) 23 | - [K-nearest neighbors](#k-nearest-neighbors) 24 | - [K-means clustering](#k-means-clustering) 25 | - [The EM algorithm](#the-em-algorithm) 26 | - [Linear regression](#linear-regression) 27 | - [Linear regression (one-liner)](#linear-regression-one-liner) 28 | - [Ridge regression](#ridge-regression) 29 | - [Basis regression](#basis-regression) 30 | - [Radial basis regression](#radial-basis-regression) 31 | - [Logistic regression](#logistic-regression) 32 | - [Cross-entropy method](#cross-entropy-method) 33 | - [Finite difference methods](#finite-difference-methods) 34 | - [Simulated annealing](#simulated-annealing) 35 | - [Twiddle](#twiddle) 36 | - [Newton's method](#newtons-method) 37 | - [Gaussian process](#gaussian-process) 38 | - [Gaussian process kernels](#gaussian-process-kernels) 39 | - [Thompson sampling](#thompson-sampling) 40 | - [Particle filter](#particle-filter) 41 | - [Value iteration](#value-iteration) 42 | - [Branch and bound](#branch-and-bound) 43 | - [Monte Carlo tree search](#monte-carlo-tree-search) 44 | - [Huffman coding](#huffman-coding) 45 | - [Hailstone sequence (Collatz conjecture)](#hailstone-sequence-collatz-conjecture) 46 | - [Bubble sort](#bubble-sort) 47 | - [Merge sort](#merge-sort) 48 | - [Insertion sort](#insertion-sort) 49 | - [Bogo sort](#bogo-sort) 50 | - [Bogo sort (one-liner)](#bogo-sort-one-liner) 51 | - [Quine](#quine) 52 | 53 | *Note: Algorithms are modified from their original sources.* 54 | 55 | ## Gradient descent 56 | Percy Liang and Dorsa Sadigh, *Artificial Intelligence: Principles and Techniques*, Stanford University, 2019. 57 |

58 | 59 | 60 | ## Stochastic gradient descent 61 | Percy Liang and Dorsa Sadigh, *Artificial Intelligence: Principles and Techniques*, Stanford University, 2019. 62 |

63 | 64 | 65 | ## Two-layer neural network 66 |

67 | 68 | ##### Two-layer neural network (one-liner) 69 |

70 | 71 | 72 | ## Multi-layer neural network 73 |

74 | 75 | 76 | ## Loss functions 77 |

78 | 79 | 80 | ## Distance functions 81 |

82 | 83 | 84 | ## Nearest neighbor 85 |

86 | 87 | 88 | ## K-nearest neighbors 89 |

90 | 91 | 92 | ## K-means clustering 93 | Percy Liang and Dorsa Sadigh, *Artificial Intelligence: Principles and Techniques*, Stanford University, 2019. 94 |

95 | 96 | 97 | ## The EM algorithm 98 | Andrew Ng, *Mixtures of Gaussians and the EM algorithm*, Stanford University, 2020.1 99 |

100 | 101 | 102 | ## Linear regression 103 | Mykel J. Kochenderfer and Tim A. Wheeler, *Algorithms for Optimization*, MIT Press, 2019. (Credit [@HenriDeh](https://github.com/HenriDeh) for use of `ones`) 104 |

105 | 106 | 107 | ##### Linear regression (one-liner) 108 |

109 | 110 | 111 | ## Ridge regression 112 |

113 | 114 | 115 | ## Basis regression 116 | Mykel J. Kochenderfer and Tim A. Wheeler, *Algorithms for Optimization*, MIT Press, 2019. 117 |

118 | 119 | 120 | ## Radial basis regression 121 | Mykel J. Kochenderfer and Tim A. Wheeler, *Algorithms for Optimization*, MIT Press, 2019. 122 |

123 | 124 | 125 | ## Logistic regression 126 |

127 | 128 | 129 | ## Cross-entropy method 130 | Mykel J. Kochenderfer and Tim A. Wheeler, *Algorithms for Optimization*, MIT Press, 2019. 131 |

132 | 133 | 134 | ## Finite difference methods 135 | Mykel J. Kochenderfer and Tim A. Wheeler, *Algorithms for Optimization*, MIT Press, 2019. 136 |

137 | 138 | 139 | ## Simulated annealing 140 | Mykel J. Kochenderfer and Tim A. Wheeler, *Algorithms for Optimization*, MIT Press, 2019. 141 |

142 | 143 | 144 | ## Twiddle 145 | Sebatian Thurn, *Artificial Intelligence for Robotics*, Udacity, 2012. 146 |

147 | 148 | 149 | ## Newton's method 150 | John Wallis, *A Treatise of Algebra both Historical and Practical*, 1685. 151 |

152 | 153 | 154 | ## Gaussian process 155 | Mykel J. Kochenderfer and Tim A. Wheeler, *Algorithms for Optimization*, MIT Press, 2019. 156 |

157 | 158 | 159 | ## Gaussian process kernels 160 |

161 | 162 | 163 | ## Thompson sampling 164 | Daniel J. Russo, Benjamin Van Roy, Abbas Kazerouni, Ian Osband, and Zheng Wen, *A Tutorial on Thompson Sampling*, arXiv:1707.02038, 2020. 165 |

166 | 167 | 168 | ## Particle filter 169 | Mykel J. Kochenderfer, Tim A. Wheeler, and Kyle H. Wray, *Algorithms for Decision Making*, Preprint. 170 |

171 | 172 | 173 | ## Value iteration 174 | Mykel J. Kochenderfer, Tim A. Wheeler, and Kyle H. Wray, *Algorithms for Decision Making*, Preprint. 175 |

176 | 177 | 178 | ## Branch and bound 179 | Mykel J. Kochenderfer, Tim A. Wheeler, and Kyle H. Wray, *Algorithms for Decision Making*, Preprint. 180 |

181 | 182 | 183 | ## Monte Carlo tree search 184 | Mykel J. Kochenderfer, Tim A. Wheeler, and Kyle H. Wray, *Algorithms for Decision Making*, Preprint. 185 |

186 | 187 | 188 | ## Huffman coding 189 | David A. Huffman, *A Method for the Construction of Minimum-Redundancy Codes*, IEEE, 1952. 190 |

191 | 192 | 193 | ## Hailstone sequence (Collatz conjecture) 194 |

195 | 196 | 197 | ## Bubble sort 198 | Karey Shi, *Design and Analysis of Algorithms*, Stanford University, 2020. 199 |

200 | 201 | 202 | ## Merge sort 203 | Karey Shi, *Design and Analysis of Algorithms*, Stanford University, 2020. 204 |

205 | 206 | 207 | ## Insertion sort 208 | Karey Shi, *Design and Analysis of Algorithms*, Stanford University, 2020. 209 |

210 | 211 | 212 | ## Bogo sort 213 |

214 | 215 | ##### Bogo sort (one-liner) 216 |

217 | 218 | 219 | ## Quine 220 | Nathan Daly, *Julia Discord*, 2019.2 221 |

222 | 223 | --- 224 | 225 | Written by [Robert Moss](https://github.com/mossr). 226 | --------------------------------------------------------------------------------