├── BierEtAl.lhs ├── BierEtAlChart.hs ├── BierEtAlMain.hs ├── Blog ├── About.htm ├── EstimatingChaotic.htm ├── Previous.htm ├── TestOnly.htm └── diagrams │ ├── sanzio_01_plato_aristotle.jpg │ ├── xpath.svg │ ├── xpath4.svg │ ├── xpath5.svg │ └── xpath7.svg ├── Brownian.lhs ├── BrownianChart.hs ├── Chapter1.lhs ├── Chapter1a.lhs ├── Chapter2.lhs ├── Chart.hs ├── Diagrams.hs ├── DynSys.bib ├── Estimating Parameters1.R ├── Estimating Parameters1.ipynb ├── Estimating Parameters1.md ├── Filtering.lhs ├── FilteringMain.hs ├── ForTrevor.hs ├── ForTrevor.jl ├── Frames ├── ChangeLog.md ├── LICENSE ├── ProductionData.cabal ├── Setup.hs ├── Text.hs └── stack.yaml ├── FunWithKalmanPart1.lhs ├── FunWithKalmanPart1a.lhs ├── HaskellX ├── ChangeLog.md ├── HaskellX.cabal ├── HaskellXchange.tex ├── LICENSE ├── Main.lhs ├── Setup.hs ├── Test.lhs ├── default.nix └── stack.yaml ├── Importance.lhs ├── Insurely.org ├── InsurelyII.org ├── JuliaCPU.jl ├── Kalman.bib ├── KalmanChart.hs ├── LorenzGenerate.bi ├── LorenzState.bi ├── Prediction ├── CHANGELOG.md ├── CustomSundials │ └── default.nix ├── Heat2D.hs ├── LICENSE ├── One Dimensional Heat Equation.html ├── One Dimensional Heat Equation.ipynb ├── Prediction.cabal ├── Prediction.lhs ├── Setup.hs ├── TBA.nix ├── Two Dimensional Heat Equation.hs ├── Two Dimensional Heat Equation.ipynb ├── ark_heat1D.c ├── ark_heat1D.lhs ├── default.nix ├── index.html ├── plot_heat1D.py ├── plot_heat1E.py ├── release.nix ├── shell.nix └── test.nix ├── Preface.org ├── README.md ├── Resampling.lhs ├── ResamplingChart.hs ├── ResamplingMain.hs ├── RunAccGPU.hs ├── Symplectic.lhs ├── SymplecticMain.hs ├── TSP.hs ├── Tribbles.lhs ├── TribblesMain.hs ├── build.hs ├── ghc-fabs-tex.tex ├── ghc-hacking.lhs ├── symplectic-integrators ├── Notes.lhs └── shell.nix ├── variational ├── ChangeLog.md ├── LICENSE ├── OldFaithful.R ├── Setup.hs ├── VBEMGMM │ ├── MyEllipse.m │ ├── README.txt │ ├── Test.m │ ├── Test.py │ ├── gmmVBEM.m │ ├── gmmVBEMdemo.m │ ├── gmmVBEMorig.m │ └── logsumexp.m ├── default.nix ├── src │ ├── Main.hs │ ├── Naperian.Orig.hs │ ├── Naperian.hs │ ├── OldFaithful.hs │ └── Variational.lhs └── variational.cabal └── weno ├── LV.Unscented.lhs ├── LV.lhs ├── Roman.hs ├── SparseSundials └── default.nix ├── WENO.lhs ├── minimal.nix └── shell.nix /BierEtAl.lhs: -------------------------------------------------------------------------------- 1 | % A Dynamical System Example 2 | % Dominic Steinitz 3 | % 12th December 2015 4 | 5 | --- 6 | bibliography: Kalman.bib 7 | --- 8 | 9 | Introduction 10 | ============ 11 | 12 | Apparently and under special conditions, the concentrations of the 13 | metabolites involved in yeast glycolysis can oscillate: 14 | @Bier2000. They give the following dynamical system. 15 | 16 | $$ 17 | \begin{aligned} 18 | \frac{\mathrm{d}[G]}{\mathrm{d}t} &= V_{\mathrm{in}} - k_1 [G] [ATP] \\ 19 | \frac{\mathrm{d}[ATP]}{\mathrm{d}t} &= \pi 20 | \end{aligned} 21 | $$ 22 | 23 | where $V_{\mathrm{in}}$ represents the rate at which glucose enters 24 | the system and $k_1$ represents the rate at which glucose is converted 25 | into [ATP](https://en.wikipedia.org/wiki/Adenosine_triphosphate). 26 | 27 | > module BierEtAl where 28 | 29 | > import Numeric.GSL.ODE 30 | > import Numeric.LinearAlgebra 31 | 32 | > xdot t [x,v] = [v, -0.95*x - 0.1*v] 33 | 34 | > ts = linspace 100 (0,20 :: Double) 35 | 36 | > sol = odeSolve xdot [10,0] ts 37 | 38 | main = mplot (ts : toColumns sol) 39 | 40 | Bier, Bakker, & Westerhoff published a very simple one 41 | (Biophys. J. 78:1087-1093, 2000) 42 | 43 | > vIn' = 0.10 44 | > k1' = 0.02 45 | > kp' = 6 46 | > bigKm' = 12.0 47 | > atpInit = 4.0 48 | > bigGInit = 3.0 49 | 50 | > deltaT = 0.1 51 | > totTime = 1000.0 52 | > bigN = floor $ totTime / deltaT 53 | 54 | > data Bier = Bier { vin :: Double 55 | > , k1 :: Double 56 | > , kp :: Double 57 | > , km :: Double 58 | > } 59 | > deriving Show 60 | 61 | > bier vIn k1 kp bigKm = ydot 62 | > where 63 | > ydot t [atp, g] = [ 2 * k1 * g * atp - (kp * atp) / (atp + bigKm) 64 | > , vIn - k1 * g * atp] 65 | 66 | 67 | > us = linspace bigN (0, 500 :: Double) 68 | > us' = toList us 69 | 70 | > tol :: Bier -> [[Double]] 71 | > tol b = 72 | > map toList $ 73 | > toColumns $ 74 | > odeSolve (bier (vin b) (k1 b) (kp b) (km b)) [atpInit, bigGInit] us 75 | 76 | > bier11 vin = tol (Bier {vin = vin, k1 = k1', kp = kp', km = bigKm'}) 77 | > 78 | 79 | *Main> let mins = map (map maximum . map (drop 9000) . bier11) [0.1,0.2..1.6] 80 | *Main> let mins = map (map minimum . map (drop 9000) . bier11) [0.1,0.2..1.6] 81 | *Main> let maxs = map (map maximum . map (drop 9000) . bier11) [0.1,0.2..1.6] 82 | *Main> zipWith (zipWith (-)) maxs mins 83 | [[10.203632755679338,4.317902636079657], 84 | [17.41769751835152,20.356676523447213], 85 | [0.3447276080418492,14.50788840287894], 86 | [16.544243323228525,17.543667352329248], 87 | [15.737773849466983,15.272180092281396], 88 | [14.480447001826633,14.1415645039917], 89 | [12.600308792865956,11.916503279523486], 90 | [9.749314488605487,8.985392272576505], 91 | [5.300678173001263,4.7422363608151565], 92 | [1.06848710408807,0.8628621763411157], 93 | [7.199939417051748e-2,5.381937464709985e-2], 94 | [1.6850502574889958e-3,1.3224805287066488e-3], 95 | [1.9259713305075365e-5,1.2806362158279683e-5], 96 | [9.038267378969067e-8,6.782357075962864e-8], 97 | [1.7469403701397823e-10,6.895017889974042e-11], 98 | [0.0,0.0]] -------------------------------------------------------------------------------- /BierEtAlChart.hs: -------------------------------------------------------------------------------- 1 | {-# OPTIONS_GHC -Wall #-} 2 | {-# OPTIONS_GHC -fno-warn-name-shadowing #-} 3 | {-# OPTIONS_GHC -fno-warn-type-defaults #-} 4 | {-# OPTIONS_GHC -fno-warn-unused-do-bind #-} 5 | {-# OPTIONS_GHC -fno-warn-missing-methods #-} 6 | {-# OPTIONS_GHC -fno-warn-orphans #-} 7 | 8 | module BierEtAlChart ( 9 | diag 10 | , diagSmooth 11 | , diagFitted 12 | ) where 13 | 14 | import Graphics.Rendering.Chart 15 | import Graphics.Rendering.Chart.Backend.Diagrams 16 | import Diagrams.Backend.Cairo.CmdLine 17 | import Diagrams.Prelude hiding ( render, Renderable ) 18 | 19 | import System.IO.Unsafe 20 | 21 | 22 | denv :: DEnv Double 23 | denv = unsafePerformIO $ defaultEnv vectorAlignmentFns 600 500 24 | 25 | diag :: String -> 26 | [(Double, Double)] -> 27 | [(Double, Double)] -> 28 | Diagram Cairo 29 | diag t l xs = 30 | fst $ runBackend denv (render (chart t l xs) (600, 500)) 31 | 32 | chart :: String -> 33 | [(Double, Double)] -> 34 | [(Double, Double)] -> 35 | Renderable () 36 | chart t l obs = toRenderable layout 37 | where 38 | 39 | boundry = plot_lines_values .~ [l] 40 | $ plot_lines_style . line_color .~ opaque red 41 | $ plot_lines_title .~ "ATP" 42 | $ plot_lines_style . line_width .~ 1.0 43 | $ def 44 | 45 | actuals = plot_points_values .~ obs 46 | $ plot_points_style . point_color .~ opaque blue 47 | $ plot_points_title .~ "Glucose" 48 | $ def 49 | 50 | layout = layout_title .~ t 51 | $ layout_plots .~ [toPlot actuals, toPlot boundry] 52 | $ layout_y_axis . laxis_title .~ "y co-ordinate" 53 | $ layout_y_axis . laxis_override .~ axisGridHide 54 | $ layout_x_axis . laxis_title .~ "x co-ordindate" 55 | $ layout_x_axis . laxis_override .~ axisGridHide 56 | $ def 57 | 58 | diagFitted :: String -> 59 | [(Double, Double)] -> 60 | [(Double, Double)] -> 61 | [(Double, Double)] -> 62 | Diagram Cairo 63 | diagFitted t l xs es = 64 | fst $ runBackend denv (render (chartFitted t l xs es) (600, 500)) 65 | 66 | chartFitted :: String -> 67 | [(Double, Double)] -> 68 | [(Double, Double)] -> 69 | [(Double, Double)] -> 70 | Renderable () 71 | chartFitted t l obs ests = toRenderable layout 72 | where 73 | 74 | boundry = plot_lines_values .~ [l] 75 | $ plot_lines_style . line_color .~ opaque red 76 | $ plot_lines_title .~ "Actual Trajectory" 77 | $ plot_lines_style . line_width .~ 1.0 78 | $ def 79 | 80 | estimas = plot_lines_values .~ [ests] 81 | $ plot_lines_style . line_color .~ opaque green 82 | $ plot_lines_title .~ "Inferred Trajectory" 83 | $ plot_lines_style . line_width .~ 1.0 84 | $ def 85 | 86 | actuals = plot_points_values .~ obs 87 | $ plot_points_style . point_color .~ opaque blue 88 | $ plot_points_title .~ "Measurements" 89 | $ def 90 | 91 | layout = layout_title .~ t 92 | $ layout_plots .~ [toPlot actuals, toPlot boundry, toPlot estimas] 93 | $ layout_y_axis . laxis_title .~ "y co-ordinate" 94 | $ layout_y_axis . laxis_override .~ axisGridHide 95 | $ layout_x_axis . laxis_title .~ "x co-ordindate" 96 | $ layout_x_axis . laxis_override .~ axisGridHide 97 | $ def 98 | 99 | chartSmooth :: String -> 100 | [(Double, Double)] -> 101 | [[(Double, Double)]] -> 102 | Renderable () 103 | chartSmooth t l obss = toRenderable layout 104 | where 105 | 106 | boundry = plot_points_values .~ l 107 | $ plot_points_title .~ "Actual" 108 | $ plot_points_style .~ filledCircles 4 (red `withOpacity` 0.25) 109 | $ def 110 | 111 | actuals obs = plot_points_values .~ obs 112 | $ plot_points_style .~ filledCircles 2 (blue `withOpacity` 0.25) 113 | $ def 114 | 115 | paths = plot_lines_values .~ obss 116 | $ plot_lines_style . line_color .~ (green `withOpacity` 0.1) 117 | $ plot_lines_title .~ "Particle Trajectories" 118 | -- $ plot_lines_style . line_width .~ 1.0 119 | $ def 120 | 121 | layout = layout_title .~ t 122 | $ layout_plots .~ (toPlot paths) : (toPlot boundry) : (map (toPlot . actuals) obss) 123 | $ layout_y_axis . laxis_title .~ "Position" 124 | $ layout_y_axis . laxis_override .~ axisGridHide 125 | $ layout_x_axis . laxis_title .~ "Time" 126 | $ layout_x_axis . laxis_override .~ axisGridHide 127 | $ def 128 | 129 | diagSmooth :: String -> 130 | [(Double, Double)] -> 131 | [[(Double, Double)]] -> 132 | Diagram Cairo 133 | diagSmooth t l xss = 134 | fst $ runBackend denv (render (chartSmooth t l xss) (600, 500)) 135 | -------------------------------------------------------------------------------- /BierEtAlMain.hs: -------------------------------------------------------------------------------- 1 | {-# OPTIONS_GHC -Wall #-} 2 | {-# OPTIONS_GHC -fno-warn-name-shadowing #-} 3 | {-# OPTIONS_GHC -fno-warn-type-defaults #-} 4 | {-# OPTIONS_GHC -fno-warn-unused-do-bind #-} 5 | {-# OPTIONS_GHC -fno-warn-missing-methods #-} 6 | {-# OPTIONS_GHC -fno-warn-orphans #-} 7 | 8 | module Main ( 9 | main 10 | ) where 11 | 12 | import Diagrams.Prelude 13 | import Diagrams.Backend.CmdLine 14 | import Diagrams.Backend.Cairo.CmdLine 15 | 16 | import BierEtAl 17 | import BierEtAlChart 18 | 19 | displayHeader :: FilePath -> Diagram B -> IO () 20 | displayHeader fn = 21 | mainRender ( DiagramOpts (Just 900) (Just 700) fn 22 | , DiagramLoopOpts False Nothing 0 23 | ) 24 | 25 | 26 | -- x1 = Bier {vin =0.36, k1 =0.02, kp =6, km =15} 27 | -- x2 = Bier {vin =0.36, k1 =0.02, kp =5, km =5} 28 | -- x3 = Bier {vin =0.36, k1 =0.02, kp =6, km =10} 29 | -- x4 = Bier {vin =0.2, k1 =0.02, kp =5, km =13} 30 | 31 | -- x1 = Bier {vin =0.36, k1 =0.02, kp =4, km =15} 32 | -- x2 = Bier {vin =0.36, k1 =0.02, kp =6, km = 7} 33 | -- x3 = Bier {vin =0.2, k1 =0.02, kp =5, km =13} 34 | -- x4 = Bier {vin =0.1, k1 =0.02, kp =6, km =13} 35 | 36 | x1 = Bier {vin =0.36, k1 =0.01, kp =6, km =13} 37 | x2 = Bier {vin =0.30, k1 =0.02, kp =6, km =18} 38 | x3 = Bier {vin =0.50, k1 =0.02, kp =6, km =12} 39 | x4 = Bier {vin =0.36, k1 =0.01, kp =7, km =13} 40 | 41 | w1 = Bier {vin =1.10, k1 =0.02, kp =6, km =13} 42 | w2 = Bier {vin =1.00, k1 =0.02, kp =6, km =13} 43 | w3 = Bier {vin =0.95, k1 =0.02, kp =6, km =13} 44 | w4 = Bier {vin =0.90, k1 =0.02, kp =6, km =13} 45 | 46 | -- w1 = Bier {vin =0.36, k1 =0.02, kp =6, km=17} 47 | -- w2 = Bier {vin =0.36, k1 =0.02, kp =6, km=18} 48 | -- w3 = Bier {vin =0.36, k1 =0.02, kp =6, km=19} 49 | -- w4 = Bier {vin =0.36, k1 =0.02, kp =6, km=20} 50 | 51 | -- w1 = Bier {vin =0.36, k1 =0.02, kp =6, km=17} 52 | -- w2 = Bier {vin =0.36, k1 =0.02, kp =6, km=18} 53 | -- w3 = Bier {vin =0.36, k1 =0.02, kp =6, km=19} 54 | -- w4 = Bier {vin =0.36, k1 =0.02, kp =6, km=20} 55 | 56 | main :: IO () 57 | main = do 58 | let alt suffix xs = displayHeader ("diagrams/BierEtAl" ++ suffix ++ ".png") 59 | (((diag ("BierEtAl" ++ suffix ++ "a") 60 | (zip us' ((tol (xs!!0))!!0)) 61 | (zip us' ((tol (xs!!0))!!1))) 62 | === 63 | (diag ("BierEtAl" ++ suffix ++ "b") 64 | (zip us' ((tol (xs!!1))!!0)) 65 | (zip us' ((tol (xs!!1))!!1)))) 66 | ||| 67 | ((diag ("BierEtAl" ++ suffix ++ "c") 68 | (zip us' ((tol (xs!!2))!!0)) 69 | (zip us' ((tol (xs!!2))!!1))) 70 | === 71 | (diag ("BierEtAl" ++ suffix ++ "d") 72 | (zip us' ((tol (xs!!3))!!0)) 73 | (zip us' ((tol (xs!!3))!!1))))) 74 | alt "11" [w1,w2,w3,w4] 75 | putStrLn "hello" 76 | -------------------------------------------------------------------------------- /Blog/diagrams/sanzio_01_plato_aristotle.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/idontgetoutmuch/NumMethHaskell/ed62bb0c4b9cb42d2ae9b7c29a2cc3251867d9fd/Blog/diagrams/sanzio_01_plato_aristotle.jpg -------------------------------------------------------------------------------- /Brownian.lhs: -------------------------------------------------------------------------------- 1 | % The Existence of Brownian Motion 2 | % Dominic Steinitz 3 | % 24th September 2014 4 | 5 | --- 6 | bibliography: Kalman.bib 7 | --- 8 | 9 | In 1905, Einstein published five remarkable papers including, "Über 10 | die von der molekularkinetischen Theorie der Wärme geforderte Bewegung 11 | von in ruhenden Flüssigkeiten suspendierten Teilchen." which roughly 12 | translates as "On the movement of small particles suspended in a 13 | stationary liquid demanded by the molecular-kinetic theory of heat." 14 | giving the first explanation of the phenomenon observed by Robert 15 | Brown in 1827 of small particles of pollen moving at random when 16 | suspended in water. 17 | 18 | The eponymously named Brownian motion is a stochastic process 19 | $\big(W_t\big)_{0 \le t \le 1}$ (presumably $W$ for Wiener) such that 20 | 21 | 1. $W_0(\omega) = 0$ for all $\omega$ 22 | 23 | 2. For all $0 \le t_1 \le t \le t_2 \le, \ldots, t_n \le 1$, $W_{t_1}, 24 | W_{t_2} - W_{t_3}, \ldots W_{t_{n-1}} - W_{t_n}$ are independent. 25 | 26 | 3. $W_{t+h} - W_t \sim {\cal{N}}(0,h)$. 27 | 28 | 4. The map $t \mapsto W_t(\omega)$ is a continuous function of $t$ for 29 | all $\omega$. 30 | 31 | The 32 | [Kolmogorov-Daniell](http://www.hss.caltech.edu/~kcb/Notes/Kolmogorov.pdf) 33 | theorem guarantees that a stochastic process satisfying the first two 34 | conditions exists but does not tell us that the paths are 35 | continuous. 36 | 37 | For example, suppose that we have constructed Brownian motion as 38 | defined by the above conditions then take a random variable $U \sim 39 | {\cal{U}}[0,1]$. Define a new process 40 | 41 | $$ 42 | \tilde{W}_t = 43 | \begin{cases} 44 | W & \text{if } t \neq U \\ 45 | 0 & \text{if } t = U 46 | \end{cases} 47 | $$ 48 | 49 | This has the same finite dimensional distributions as $W_t$ as 50 | $(W_{t_1}, W_{t_2}, W_{t_3}, \ldots W_{t_{n-1}}, W_{t_n})$ and 51 | $(\tilde{W}_{t_1}, \tilde{W}_{t_2}, \tilde{W}_{t_3}, \ldots 52 | \tilde{W}_{t_{n-1}}, \tilde{W}_{t_n})$ are equal unless $U \in \{t_1, 53 | \ldots, t_n\}$ and this set has measure 0. This process satisifes 54 | conditions 1--3 but is not continuous 55 | 56 | $$ 57 | \lim_{t \uparrow U} \tilde{W}_t = \lim_{t \uparrow U} W_t = W_U 58 | $$ 59 | 60 | and 61 | 62 | $$ 63 | \mathbb{P}(W_U = 0) = \int_0^1 64 | $$ 65 | 66 | Further this theorem is not constructive relying on the axiom of 67 | choice. Most proofs of existence follow [@Ciesielski61]. Instead let 68 | us follow [@liggett2010continuous]. 69 | 70 | It is tempting to think of Brownian motion as the limit in some sense 71 | of a random walk. 72 | 73 | $$ 74 | x_t^{(N)} = \sum_{i=1}^\floor{Nt}\frac{\xi_i}{N} 75 | $$ 76 | 77 | where $0 \le t \le 1$ However, the processes, $x_t^{(1)}, x_t^{(2)}, 78 | \ldots$ are discontinuous and it is not clear how one would prove that 79 | the limit of discontinuous processes is in fact continuous. 80 | 81 | Let $\{\phi_i\}$ be a complete orthonormal basis for $L^2[0,1]$. That 82 | is any $f$ for which $\int_0^1 f^2 \mathrm{d}\mu$ exists then 83 | 84 | $$ 85 | f = \sum_{i=1}^\infty \langle f, \phi_i\rangle 86 | $$ 87 | 88 | where $\mu$ is Lesbegue measure and $\langle\ldots,\ldots\rangle$ is 89 | the inner product for $L^2$ defined as usual by 90 | 91 | $$ 92 | \langle f, g\rangle = \int_0^1 fg\mathrm{d}\mu$ 93 | $$ 94 | 95 | We know such bases exist, for example, the [Fourier 96 | expansion](http://en.wikipedia.org/wiki/Fourier_series) and [Legendre 97 | polynomials](http://en.wikipedia.org/wiki/Legendre_polynomials). 98 | 99 | We defined the so-called Haar wavelets for $n = 0, 1, \ldots$ and $k = 1, 3, 5, \ldots, 2^n - 1$. 100 | 101 | $$ 102 | H_{n,k}(t) = +2^{(n-1)/2)} (k - 1)2^{-n} < t \le k2^{-n} 103 | = -2^{(n-1)/2)} k2^{-n} < t \le (k + 1)2^{-n} 104 | $$ 105 | 106 | Using Haskell's capabilities for dependently typed programming we can 107 | express these as shown below. 108 | 109 | > {-# OPTIONS_GHC -Wall #-} 110 | > {-# OPTIONS_GHC -fno-warn-name-shadowing #-} 111 | > {-# OPTIONS_GHC -fno-warn-type-defaults #-} 112 | > {-# OPTIONS_GHC -fno-warn-unused-do-bind #-} 113 | > {-# OPTIONS_GHC -fno-warn-missing-methods #-} 114 | > {-# OPTIONS_GHC -fno-warn-orphans #-} 115 | 116 | > {-# LANGUAGE DataKinds #-} 117 | > {-# LANGUAGE TypeOperators #-} 118 | > {-# LANGUAGE KindSignatures #-} 119 | > {-# LANGUAGE TypeFamilies #-} 120 | > {-# LANGUAGE TypeOperators #-} 121 | > {-# LANGUAGE Rank2Types #-} 122 | > {-# LANGUAGE ScopedTypeVariables #-} 123 | > {-# LANGUAGE PolyKinds #-} 124 | 125 | > module Brownian where 126 | 127 | > import GHC.TypeLits 128 | > import Data.Proxy 129 | 130 | > import Numeric.Integration.TanhSinh 131 | 132 | > data Haar (a :: Nat) (b :: Nat) = Haar { unHaar :: Double -> Double } 133 | 134 | > haar :: forall n k . 135 | > (KnownNat n, KnownNat k, (2 * k - 1 <=? 2^n - 1) ~ 'True) => 136 | > Haar n (2 * k - 1) 137 | > haar = Haar g where 138 | > g t | (k' - 1) * 2 ** (-n') < t && t <= k' * 2 ** (-n') = 2 ** ((n' - 1) / 2) 139 | > | k' * 2 ** (-n') < t && t <= (k' + 1) * 2 ** (-n') = -2 ** ((n' - 1) / 2) 140 | > | otherwise = 0 141 | > where 142 | > n' = fromIntegral (natVal (Proxy :: Proxy n)) 143 | > k' = 2 * (fromIntegral (natVal (Proxy :: Proxy k))) - 1 144 | 145 | Now for example we can evaluate 146 | 147 | > haar11 :: Double -> Double 148 | > haar11 = unHaar (haar :: Haar 1 1) 149 | 150 | [ghci] 151 | haar11 0.75 152 | 153 | but we if we try to evaluate *haar :: Haar 1 2* we get a type error. 154 | 155 | > type family ZipWith (f :: a -> b -> c) (as :: [a]) (bs :: [b]) :: [c] where 156 | > ZipWith f (a ': as) (b ': bs) = (f a b) ': (ZipWith f as bs) 157 | > ZipWith f as bs = '[] 158 | 159 | Rather than go too far into dependently typed programming which would 160 | distract us from the existence proof, let us re-express this function 161 | in a more traditional way (which will only give us errors at runtime). 162 | 163 | > haarEtouffee :: Int -> Int -> Double -> Double 164 | > haarEtouffee n k t 165 | > | n <= 0 = error "n must be >= 1" 166 | > | k `mod`2 == 0 = error "k must be odd" 167 | > | k < 0 || k > 2^n - 1 = error "k must be >=0 and <= 2^n -1" 168 | > | (k' - 1) * 2 ** (-n') < t && t <= k' * 2 ** (-n') = 2 ** ((n' - 1) / 2) 169 | > | k' * 2 ** (-n') < t && t <= (k' + 1) * 2 ** (-n') = -2 ** ((n' - 1) / 2) 170 | > | otherwise = 0 171 | > where 172 | > k' = fromIntegral k 173 | > n' = fromIntegral n 174 | 175 | Here are the first few Haar wavelets. 176 | 177 | ```{.dia height='300'} 178 | import Brownian 179 | import BrownianChart 180 | 181 | dia = diag 2 "n = 1, k = 1" (xss!!0) 182 | ```` 183 | 184 | ```{.dia height='300'} 185 | import Brownian 186 | import BrownianChart 187 | 188 | dia = (diag 2 "n = 2, k = 1" (xss!!1) ||| 189 | diag 2 "n = 2, k = 3" (xss!!2)) 190 | ```` 191 | 192 | ```{.dia height='300'} 193 | import Brownian 194 | import BrownianChart 195 | 196 | dia = ((diag 2 "n = 3, k = 1" (xss!!3) ||| 197 | diag 2 "n = 3, k = 3" (xss!!4) ||| 198 | diag 2 "n = 3, k = 5" (xss!!5) ||| 199 | diag 2 "n = 3, k = 7" (xss!!6))) 200 | ```` 201 | 202 | > schauderEtouffee :: Int -> Int -> Double -> Double 203 | > schauderEtouffee n k t = result (absolute 1e-6 (parSimpson (haarEtouffee n k) 0 t)) 204 | 205 | > n :: Int 206 | > n = 1000 207 | 208 | > xss :: [[(Double, Double)]] 209 | > xss = map (\(m, k) -> map (\i -> let x = fromIntegral i / fromIntegral n in (x, haarEtouffee m k x)) [0..n - 1]) [(1,1), (2,1), (2,3), (3,1), (3,3), (3,5), (3,7)] 210 | 211 | > yss :: [[(Double, Double)]] 212 | > yss = map (\(m, k) -> map (\i -> let x = fromIntegral i / fromIntegral n in (x, schauderEtouffee m k x)) [0..n - 1]) [(1,1), (2,1), (2,3), (3,1), (3,3), (3,5), (3,7)] 213 | 214 | ```{.dia height='300'} 215 | import Brownian 216 | import BrownianChart 217 | 218 | dia = diag 0.5 "Foo" (yss!!0) 219 | ```` 220 | 221 | ```{.dia height='300'} 222 | import Brownian 223 | import BrownianChart 224 | 225 | dia = (diag 0.5 "Foo" (yss!!1) ||| 226 | diag 0.5 "Baz" (yss!!2)) 227 | ```` 228 | 229 | ```{.dia height='300'} 230 | import Brownian 231 | import BrownianChart 232 | 233 | dia = ((diag 0.5 "Foo" (yss!!3) ||| 234 | diag 0.5 "Baz" (yss!!4) ||| 235 | diag 0.5 "Baz" (yss!!5) ||| 236 | diag 0.5 "Baz" (yss!!6))) 237 | ```` 238 | 239 | Bibliography and Resources 240 | -------------------------- 241 | -------------------------------------------------------------------------------- /BrownianChart.hs: -------------------------------------------------------------------------------- 1 | {-# OPTIONS_GHC -Wall #-} 2 | {-# OPTIONS_GHC -fno-warn-name-shadowing #-} 3 | {-# OPTIONS_GHC -fno-warn-type-defaults #-} 4 | {-# OPTIONS_GHC -fno-warn-unused-do-bind #-} 5 | {-# OPTIONS_GHC -fno-warn-missing-methods #-} 6 | {-# OPTIONS_GHC -fno-warn-orphans #-} 7 | 8 | module BrownianChart ( 9 | diag 10 | ) where 11 | 12 | import Control.Lens hiding ( (#) ) 13 | import Graphics.Rendering.Chart 14 | import Graphics.Rendering.Chart.Backend.Diagrams 15 | import Diagrams.Backend.Cairo.CmdLine 16 | import Diagrams.Prelude hiding ( render, Renderable ) 17 | import Data.Default.Class 18 | 19 | import System.IO.Unsafe 20 | 21 | denv :: DEnv 22 | denv = unsafePerformIO $ defaultEnv vectorAlignmentFns 500 500 23 | 24 | diag :: Double -> String -> 25 | [(Double, Double)] -> 26 | Diagram Cairo R2 27 | diag yHt t ls = 28 | fst $ runBackend denv (render (chart yHt t ls) (500, 500)) 29 | 30 | chart :: Double -> String -> 31 | [(Double, Double)] -> 32 | Renderable () 33 | chart yHt t lineVals = toRenderable layout 34 | where 35 | 36 | actuals = plot_lines_values .~ [lineVals] 37 | $ plot_lines_style . line_color .~ opaque blue 38 | $ plot_lines_style . line_width .~ 5.0 39 | $ def 40 | 41 | layout = layout_title .~ t 42 | $ layout_plots .~ [toPlot actuals] 43 | $ layout_y_axis . laxis_generate .~ scaledAxis def (-yHt, yHt) 44 | $ def 45 | -------------------------------------------------------------------------------- /Chapter1a.lhs: -------------------------------------------------------------------------------- 1 | 2 | Introduction 3 | ============ 4 | 5 | The intended audience of this blog post (which will be a chapter or 6 | part of a chapter in the book) is applied mathematicians, 7 | computational physicists, numerical analysts and anyone who has the 8 | need to compute numerical solutions, who have a limited acquaintance 9 | with Haskell and who do not wish to learn about parsing, searching and 10 | sorting, compiler design and other such techniques that are often used 11 | as examples in pedagogic Haskell material. Instead we take the example 12 | of multiplying matrices in the hope that it will provide a more 13 | motivating example. This intitial version is based on a fascinating 14 | discussion the author had with two (applied?) mathematicians. The 15 | exposition is more detailed than other blog posts although more words 16 | does not necessarily mean greater perspicuity. All feedback is 17 | gratefully received. 18 | 19 | Implementations 20 | =============== 21 | 22 | We assume our matrix is dense. There are many ways to implement matrix 23 | multiplication in Haskell. We start with the bookies' favourite. 24 | 25 | Lists 26 | ----- 27 | 28 | We represent vectors as lists and matrices as lists of vectors, that 29 | is lists of lists. We can capture this using type synonyms. 30 | 31 | > type Vector a = [a] 32 | > type Matrix a = [Vector a] 33 | 34 | First let us multiply a vector by a matrix. Let us ignore the 35 | implementation to start with and just focus on the type 36 | signature. 37 | 38 | First we note that Haskell can infer this type; we do not 39 | actually need to give it although this is a good check the 40 | implementation in some sense satisfies its specification. 41 | 42 | Here is the function without a type signature. Ignore the 43 | implementation and note below that we can ask ghci to tell us the 44 | type. 45 | 46 | > matTimesVecNoSig m v = result 47 | > where 48 | > lrs = map length m 49 | > l = length v 50 | > result = if all (== l) lrs 51 | > then map (\r -> sum $ zipWith (*) r v) m 52 | > else error $ "Matrix has rows of length " ++ show lrs ++ 53 | > " but vector is of length " ++ show l 54 | 55 | [ghci] 56 | :t matTimesVecNoSig 57 | 58 | Notice that ghci does not know to use the type synonyms; we have 59 | introduced them purely as an aid for the reader. 60 | 61 | > matTimesVec :: Num a => Matrix a -> (Vector a -> Vector a) 62 | > matTimesVec m v = result 63 | > where 64 | > lrs = map length m 65 | > l = length v 66 | > result = if all (== l) lrs 67 | > then map (\r -> sum $ zipWith (*) r v) m 68 | > else error $ "Matrix has rows of length " ++ show lrs ++ 69 | > " but vector is of length " ++ show l 70 | 71 | First, notice that we have made the implicit brackets in the type 72 | signature explicit. Ignoring *Num a =>* for the time being, we see 73 | that *matTimesVec* takes a matrix a returns a function which itself 74 | takes a vector and returns a vector. 75 | -------------------------------------------------------------------------------- /Chapter2.lhs: -------------------------------------------------------------------------------- 1 | Application to the Travelling Salesman 2 | ====================================== 3 | 4 | Applications to Physics 5 | ======================= 6 | 7 | Markov Process Theory 8 | ===================== 9 | -------------------------------------------------------------------------------- /Chart.hs: -------------------------------------------------------------------------------- 1 | {-# OPTIONS_GHC -Wall #-} 2 | {-# OPTIONS_GHC -fno-warn-name-shadowing #-} 3 | {-# OPTIONS_GHC -fno-warn-type-defaults #-} 4 | {-# OPTIONS_GHC -fno-warn-unused-do-bind #-} 5 | {-# OPTIONS_GHC -fno-warn-missing-methods #-} 6 | {-# OPTIONS_GHC -fno-warn-orphans #-} 7 | 8 | module Chart ( 9 | diagLine 10 | , diagPoint 11 | ) where 12 | 13 | import Control.Lens hiding ( (#) ) 14 | import Graphics.Rendering.Chart 15 | import Graphics.Rendering.Chart.Backend.Diagrams 16 | import Diagrams.Backend.Cairo.CmdLine 17 | import Diagrams.Prelude hiding ( render, Renderable ) 18 | import Data.Default.Class 19 | 20 | import System.IO.Unsafe 21 | 22 | 23 | denv :: DEnv 24 | denv = unsafePerformIO $ defaultEnv vectorAlignmentFns 500 500 25 | 26 | diagLine :: [(Double, Double)] -> Int -> QDiagram Cairo R2 Any 27 | diagLine ls n = 28 | fst $ runBackend denv (render (chartLine ls n) (500, 500)) 29 | 30 | chartLine :: [(Double, Double)] -> Int -> Renderable () 31 | chartLine lineVals n = toRenderable layout 32 | where 33 | 34 | fitted = plot_lines_values .~ [lineVals] 35 | $ plot_lines_style . line_color .~ opaque blue 36 | $ plot_lines_title .~ "Trajectory" 37 | $ def 38 | 39 | layout = layout_title .~ "Gibbs Sampling Bivariate Normal (" ++ (show n) ++ " samples)" 40 | $ layout_y_axis . laxis_generate .~ scaledAxis def (-3,3) 41 | $ layout_x_axis . laxis_generate .~ scaledAxis def (-3,3) 42 | 43 | $ layout_plots .~ [toPlot fitted] 44 | $ def 45 | 46 | diagPoint :: [(Double, Double)] -> Int -> QDiagram Cairo R2 Any 47 | diagPoint ls n = 48 | fst $ runBackend denv (render (chartPoint ls n) (500, 500)) 49 | 50 | chartPoint :: [(Double, Double)] -> Int -> Renderable () 51 | chartPoint pointVals n = toRenderable layout 52 | where 53 | 54 | fitted = plot_points_values .~ pointVals 55 | $ plot_points_style . point_color .~ opaque red 56 | $ plot_points_title .~ "Sample" 57 | $ def 58 | 59 | layout = layout_title .~ "Gibbs Sampling Bivariate Normal (" ++ (show n) ++ " samples)" 60 | $ layout_y_axis . laxis_generate .~ scaledAxis def (-3,3) 61 | $ layout_x_axis . laxis_generate .~ scaledAxis def (-3,3) 62 | 63 | $ layout_plots .~ [toPlot fitted] 64 | $ def 65 | -------------------------------------------------------------------------------- /Diagrams.hs: -------------------------------------------------------------------------------- 1 | {-# OPTIONS_GHC -Wall #-} 2 | {-# OPTIONS_GHC -fno-warn-name-shadowing #-} 3 | {-# OPTIONS_GHC -fno-warn-type-defaults #-} 4 | {-# OPTIONS_GHC -fno-warn-unused-do-bind #-} 5 | {-# OPTIONS_GHC -fno-warn-missing-methods #-} 6 | {-# OPTIONS_GHC -fno-warn-orphans #-} 7 | 8 | {-# LANGUAGE NoMonomorphismRestriction #-} 9 | {-# LANGUAGE FlexibleContexts #-} 10 | {-# LANGUAGE TypeFamilies #-} 11 | 12 | module Diagrams ( example 13 | , clockSelf 14 | , self 15 | ) where 16 | 17 | import Diagrams.Prelude 18 | import Diagrams.TwoD.Text 19 | 20 | state :: Renderable (Path R2) b => Diagram b R2 21 | state = circle 1 # lw 0.05 # fc silver 22 | 23 | fState :: Renderable (Path R2) b => Diagram b R2 24 | fState = circle 0.85 # lw 0.05 # fc lightblue <> state 25 | 26 | clockPoints :: Int -> [(Double, Double)] 27 | clockPoints n = [ (7 + 6*cos x, 6 + 6*sin x) 28 | | m <- [0..n-1] 29 | , let x = 2 * pi * fromIntegral m / fromIntegral n 30 | ] 31 | 32 | clockTurns :: Integral a => a -> [Angle] 33 | clockTurns n = [ x @@ turn 34 | | m <- [0..n-1] 35 | , let x = fromIntegral m / fromIntegral n 36 | ] 37 | 38 | points :: Int -> [P2] 39 | points n = map p2 $ map (\m -> (clockPoints n)!!m) [0..n-1] 40 | 41 | ds :: (Renderable Text b, Renderable (Path R2) b) => Int -> [Diagram b R2] 42 | ds m = map (\n -> (text (show (m - n + 1)) <> fState) # named (show n)) [1..m] 43 | 44 | states :: (Renderable Text b, Renderable (Path R2) b) => Int -> Diagram b R2 45 | states clockSize = position (zip (points clockSize) (ds clockSize)) 46 | 47 | line :: Trail R2 48 | line = trailFromOffsets [unitX] 49 | 50 | arrowStyle2 :: ArrowOpts 51 | arrowStyle2 = (with & arrowHead .~ noHead & tailSize .~ 0.3 52 | & arrowShaft .~ shaft' & arrowTail .~ spike' 53 | & shaftStyle %~ lw 0.1 54 | & tailColor .~ black) 55 | 56 | shaft' :: (Transformable t, TrailLike t, V t ~ R2) => t 57 | shaft' = arc (1/2 @@ turn) (0 @@ turn) # scaleX 0.33 58 | 59 | arrowStyle3 :: ArrowOpts 60 | arrowStyle3 = (with & arrowHead .~ noHead & tailSize .~ 0.3 61 | & arrowShaft .~ line & arrowTail .~ spike' 62 | & shaftStyle %~ lw 0.1 63 | & tailColor .~ black) 64 | 65 | clockConnectWiddershins :: Renderable (Path R2) b => 66 | Int -> Int -> Diagram b R2 -> Diagram b R2 67 | clockConnectWiddershins n m = connectPerim' arrowStyle3 68 | (show (1 + ((n + 1) `mod` m))) (show (1 + (n `mod` m))) 69 | ((clockTurns m)!!(n `mod` m)) 70 | ((clockTurns m)!!((n + 1) `mod` m)) 71 | clockConnectClockwise :: Renderable (Path R2) b => 72 | Int -> Int -> Diagram b R2 -> Diagram b R2 73 | clockConnectClockwise n m = connectPerim' arrowStyle3 74 | (show (1 + (n `mod` m))) (show (1 + ((n + 1) `mod` m))) 75 | ((clockTurns m)!!((n + 2) `mod` m)) 76 | ((clockTurns m)!!((n - 1) `mod` m)) 77 | 78 | clockConnectSelf :: Renderable (Path R2) b => 79 | Int -> Int -> Diagram b R2 -> Diagram b R2 80 | clockConnectSelf n m = connectPerim' arrowStyle2 81 | (show (1 + (n `mod` m))) (show (1 + (n `mod` m))) 82 | ((clockTurns m)!!((n - 1) `mod` m)) 83 | ((clockTurns m)!!((n + 1) `mod` m)) 84 | 85 | oneWay :: Renderable (Path R2) b => 86 | Int -> Diagram b R2 -> Diagram b R2 87 | oneWay clockSize = foldr (.) id 88 | (map (flip clockConnectWiddershins clockSize) [0..clockSize - 1]) 89 | 90 | otherWay :: Renderable (Path R2) b => 91 | Int -> Diagram b R2 -> Diagram b R2 92 | otherWay clockSize = foldr (.) (oneWay clockSize) 93 | (map (flip clockConnectClockwise clockSize) [0..clockSize - 1]) 94 | 95 | self :: Renderable (Path R2) b => 96 | Int -> Diagram b R2 -> Diagram b R2 97 | self clockSize = foldr (.) (otherWay clockSize) 98 | (map (flip clockConnectSelf clockSize) [0..clockSize - 1]) 99 | 100 | example :: (Renderable Text b, Renderable (Path R2) b) => 101 | Int -> Diagram b R2 102 | example clockSize = (otherWay clockSize) $ states clockSize 103 | 104 | clockSelf :: (Renderable Text b, Renderable (Path R2) b) => 105 | Int -> Diagram b R2 106 | clockSelf clockSize = (self clockSize) $ states clockSize 107 | 108 | -------------------------------------------------------------------------------- /Estimating Parameters1.R: -------------------------------------------------------------------------------- 1 | 2 | library(readr) 3 | model_file_name <- "LorenzGenerate.bi" 4 | writeLines(read_file(model_file_name)) 5 | 6 | library('rbi') 7 | library(ggplot2) 8 | 9 | Lorenz <- bi_model(model_file_name) 10 | 11 | T <- 10.0 12 | nObs <- 100 13 | init_parameters <- list(X = 1, Y = 1, Z = 1) 14 | 15 | synthetic_dataset <- bi_generate_dataset(end_time=T, model=Lorenz, 16 | init=init_parameters, 17 | noutputs = nObs) 18 | 19 | synthetic_data <- bi_read(synthetic_dataset) 20 | synthetic_df <- as.data.frame(synthetic_data) 21 | tail(synthetic_df) 22 | 23 | p <- ggplot(synthetic_df, aes(X.time)) + 24 | geom_path(aes(y = X.value, colour="alpha 16.0")) + 25 | theme(legend.position="bottom") + 26 | ggtitle("Lorenz") + 27 | theme(plot.title = element_text(hjust = 0.5)) + 28 | xlab("Time") + 29 | ylab("X Value") 30 | ggsave(filename = "diagrams/xpath.svg", plot = p) 31 | 32 | path0 <- ggplot() + 33 | theme(legend.position="bottom") + 34 | ggtitle("Lorenz") + 35 | theme(plot.title = element_text(hjust = 0.5)) + 36 | xlab("Time") + 37 | ylab("Value") 38 | 39 | 40 | set.seed(42) 41 | 42 | T <- 20.0 43 | 44 | for (i in c("red", "blue", "green")) { 45 | init_parameters <- list(X = 1 + rnorm(1,0.0,0.01), 46 | Y = 1 + rnorm(1,0.0,0.01), 47 | Z = 1 + rnorm(1,0.0,0.01)) 48 | 49 | synthetic_dataset <- bi_generate_dataset(end_time=T, model=Lorenz, 50 | init=init_parameters, 51 | noutputs = nObs) 52 | 53 | synthetic_data <- bi_read(synthetic_dataset) 54 | synthetic_df <- as.data.frame(synthetic_data) 55 | 56 | path0 <- path0 + 57 | geom_line(data = synthetic_df, aes(x = X.time, y = X.value), color = i) 58 | } 59 | 60 | ggsave(filename = "diagrams/xpath4.svg", plot = path0) 61 | 62 | model_file_name <- "LorenzState.bi" 63 | writeLines(read_file(model_file_name)) 64 | 65 | LorenzState <- bi_model(model_file_name) 66 | 67 | bi_state_model <- libbi(model=LorenzState) 68 | bi_state <- filter(bi_state_model, 69 | nparticles = 8192, 70 | nthreads = 1, 71 | end_time = T, 72 | obs = synthetic_dataset, 73 | init = init_parameters, 74 | ess_rel = 1, 75 | sample_obs = TRUE) 76 | 77 | bi_file_summary(bi_state$output_file_name) 78 | bi_state 79 | summary(bi_state) 80 | 81 | output <- bi_read(bi_state) 82 | logw <- xtabs(value ~ time + np, data = output$logweight, addNA = TRUE) 83 | X <- output$X$value 84 | Y <- output$Y$value 85 | Z <- output$Z$value 86 | A <- output$ln_alpha$value 87 | 88 | log2normw <- function(lw){ 89 | w <- exp(lw - max(lw)) 90 | return(w / sum(w)) 91 | } 92 | 93 | w = t(apply(X=logw, MARGIN=1, FUN=log2normw)) 94 | Xmeans = apply(X = X*w, MARGIN=1, FUN=sum) 95 | Ymeans = apply(X = X*w, MARGIN=1, FUN=sum) 96 | Zmeans = apply(X = Z*w, MARGIN=1, FUN=sum) 97 | Ameans = apply(X = A*w, MARGIN=1, FUN=sum) 98 | 99 | 100 | synthetic_data <- bi_read(synthetic_dataset) 101 | X_original <- synthetic_data$X$value 102 | Y_original <- synthetic_data$Y$value 103 | Z_original <- synthetic_data$Z$value 104 | 105 | 106 | synthetic_df <- as.data.frame(synthetic_data) 107 | synthetic_df$Xmeans <- Xmeans 108 | synthetic_df$Ymeans <- Ymeans 109 | synthetic_df$Zmeans <- Zmeans 110 | synthetic_df$Ameans <- Ameans 111 | 112 | pAmeans <- ggplot(synthetic_df, aes(X.time)) + 113 | geom_path(aes(y = exp(Ameans), colour="Ameans")) + 114 | theme(legend.position="bottom") + 115 | ggtitle("Lorenz") + 116 | theme(plot.title = element_text(hjust = 0.5)) + 117 | ylim(0.0, max(exp(Ameans))) + 118 | xlab("Time") + 119 | ylab("Value") 120 | 121 | 122 | ggsave(filename = "diagrams/xpath5.svg", plot = pAmeans) 123 | 124 | dataset_list <- list() 125 | parameters_list <- list() 126 | 127 | for (i in c(1,2,3)) { 128 | init_parameters <- list(X = 1 + rnorm(1,0.0,0.01), 129 | Y = 1 + rnorm(1,0.0,0.01), 130 | Z = 1 + rnorm(1,0.0,0.01)) 131 | 132 | parameters_list[[i]] <- init_parameters 133 | synthetic_dataset <- bi_generate_dataset(end_time=T, model=Lorenz, 134 | init=init_parameters, 135 | noutputs = nObs) 136 | 137 | dataset_list[[i]] <- synthetic_dataset 138 | } 139 | 140 | X_list <- list() 141 | Y_list <- list() 142 | Z_list <- list() 143 | A_list <- list() 144 | 145 | for (i in c(1,2,3)) { 146 | bi_state <- filter(bi_state_model, nparticles = 8192, nthreads = 1, end_time = T, obs = dataset_list[[i]], init = parameters_list[[i]], ess_rel = 1, sample_obs = TRUE) 147 | output <- bi_read(bi_state) 148 | logw <- xtabs(value ~ time + np, data = output$logweight, addNA = TRUE) 149 | w = t(apply(X=logw, MARGIN=1, FUN=log2normw)) 150 | X <- output$X$value 151 | Y <- output$Y$value 152 | Z <- output$Z$value 153 | A <- output$ln_alpha$value 154 | X_list[[i]] = apply(X = X*w, MARGIN=1, FUN=sum) 155 | Y_list[[i]] = apply(X = X*w, MARGIN=1, FUN=sum) 156 | Z_list[[i]] = apply(X = Z*w, MARGIN=1, FUN=sum) 157 | A_list[[i]] = apply(X = A*w, MARGIN=1, FUN=sum) 158 | } 159 | 160 | 161 | path2 <- ggplot() + 162 | theme(legend.position="bottom") + 163 | ggtitle("Lorenz") + 164 | theme(plot.title = element_text(hjust = 0.5)) + 165 | xlab("Time") + 166 | ylab("Value") 167 | 168 | 169 | for (i in c(1,2,3)) { 170 | synthetic_data <- bi_read(dataset_list[[i]]) 171 | synthetic_df <- as.data.frame(synthetic_data) 172 | synthetic_df$Ameans <- exp(A_list[[i]]) 173 | path2 <- path2 + geom_line(data = synthetic_df, 174 | aes(x = X.time, y = Ameans), color = "blue") 175 | } 176 | 177 | ggsave(filename = "diagrams/xpath7.svg", plot = path2) 178 | 179 | x <- list() 180 | 181 | for (i in c(1:3)) { 182 | x[[i]] <- tail(exp(A_list[[i]]), n = 50) 183 | } 184 | 185 | for (i in 1:3) print(mean(x[[i]])) 186 | 187 | for (i in 1:3) print(sd(x[[i]])) 188 | -------------------------------------------------------------------------------- /FilteringMain.hs: -------------------------------------------------------------------------------- 1 | {-# OPTIONS_GHC -Wall #-} 2 | {-# OPTIONS_GHC -fno-warn-name-shadowing #-} 3 | {-# OPTIONS_GHC -fno-warn-type-defaults #-} 4 | {-# OPTIONS_GHC -fno-warn-unused-do-bind #-} 5 | {-# OPTIONS_GHC -fno-warn-missing-methods #-} 6 | {-# OPTIONS_GHC -fno-warn-orphans #-} 7 | 8 | import KalmanChart 9 | import Filtering 10 | 11 | import Diagrams.Prelude hiding ( render, Renderable ) 12 | 13 | import Diagrams.Backend.CmdLine 14 | import Diagrams.Backend.Cairo.CmdLine 15 | 16 | 17 | dia :: Diagram B R2 18 | dia = diagPartFilter (zip [1..1000](snd runPF)) 3 19 | 20 | displayHeader :: FilePath -> Diagram B R2 -> IO () 21 | displayHeader fn = 22 | mainRender ( DiagramOpts (Just 900) (Just 700) fn 23 | , DiagramLoopOpts False Nothing 0 24 | ) 25 | 26 | main :: IO () 27 | main = do 28 | displayHeader "diagrams/SingleRvNoisy.png" dia 29 | 30 | 31 | -------------------------------------------------------------------------------- /ForTrevor.hs: -------------------------------------------------------------------------------- 1 | {-# OPTIONS_GHC -Wall #-} 2 | {-# OPTIONS_GHC -fno-warn-type-defaults #-} 3 | 4 | {-# LANGUAGE FlexibleContexts #-} 5 | {-# LANGUAGE TypeOperators #-} 6 | {-# LANGUAGE TypeFamilies #-} 7 | 8 | import Prelude as P 9 | import Data.Array.Accelerate as A hiding ((^)) 10 | import Data.Array.Accelerate.LLVM.Native as CPU 11 | import Data.Array.Accelerate.LLVM.PTX as GPU 12 | import Data.Array.Accelerate.Linear hiding (trace) 13 | import Data.Array.Accelerate.Control.Lens 14 | import qualified Linear as L 15 | 16 | e, q10, q20, p10, p20 :: Double 17 | e = 0.6 18 | q10 = 1 - e 19 | q20 = 0.0 20 | p10 = 0.0 21 | p20 = sqrt ((1 + e) / (1 - e)) 22 | 23 | h :: Double 24 | h = 0.01 25 | 26 | oneStep2 :: Double -> Exp (V2 Double, V2 Double) -> Exp (V2 Double, V2 Double) 27 | oneStep2 hh prev = lift (qNew, pNew) 28 | where 29 | h2 = hh / 2 30 | hhs = lift ((pure hh) :: V2 Double) 31 | hh2s = (lift ((pure h2) :: V2 Double)) 32 | pp2 = psPrev - hh2s * nablaQ' qsPrev 33 | qNew = qsPrev + hhs * nablaP' pp2 34 | pNew = pp2 - hh2s * nablaQ' qNew 35 | qsPrev = A.fst prev 36 | psPrev = A.snd prev 37 | nablaQ' :: Exp (V2 Double) -> Exp (V2 Double) 38 | nablaQ' qs = lift (V2 (qq1 / r) (qq2 / r)) 39 | where 40 | qq1 = qs ^. _x 41 | qq2 = qs ^. _y 42 | r = (qq1 ^ 2 + qq2 ^ 2) ** (3/2) 43 | nablaP' :: Exp (V2 Double) -> Exp (V2 Double) 44 | nablaP' ps = ps 45 | 46 | oneStepH98 :: Double -> V2 (V2 Double) -> V2 (V2 Double) 47 | oneStepH98 hh prev = V2 qNew pNew 48 | where 49 | h2 = hh / 2 50 | hhs = V2 hh hh 51 | hh2s = V2 h2 h2 52 | pp2 = psPrev - hh2s * nablaQ' qsPrev 53 | qNew = qsPrev + hhs * nablaP' pp2 54 | pNew = pp2 - hh2s * nablaQ' qNew 55 | qsPrev = prev ^. L._x 56 | psPrev = prev ^. L._y 57 | nablaQ' qs = V2 (qq1 / r) (qq2 / r) 58 | where 59 | qq1 = qs ^. L._x 60 | qq2 = qs ^. L._y 61 | r = (qq1 ^ 2 + qq2 ^ 2) ** (3/2) 62 | nablaP' ps = ps 63 | 64 | -- oneStep :: Double -> Exp (V2 (V2 Double)) -> Exp (V2 (V2 Double)) 65 | -- oneStep h prev = lift $ V2 qNew pNew 66 | -- where 67 | -- hs = lift (V2 h h) 68 | -- h2s = lift (V2 h2 h2) where h2 = h / 2 69 | -- qsPrev = prev ^. _x 70 | -- psPrev = prev ^. _y 71 | -- nablaQ qs = lift (V2 (qq1 / r) (qq2 / r)) 72 | -- where 73 | -- qq1 = qs ^. _x 74 | -- qq2 = qs ^. _y 75 | -- r = (qq1 ^ 2 + qq2 ^ 2) ** (3/2) 76 | -- nablaP ps = ps 77 | -- p2 = psPrev - h2s * nablaQ qsPrev 78 | -- qNew = qsPrev + hs * nablaP p2 79 | -- pNew = p2 - h2s * nablaQ qNew 80 | 81 | -- nSteps :: Int 82 | -- nSteps = 100 83 | 84 | dummyStart :: Exp (V2 Double, V2 Double) 85 | dummyStart = lift (V2 q10 q20, V2 p10 p20) 86 | 87 | dummyStart98 :: V2 (V2 Double) 88 | dummyStart98 = V2 (V2 q10 q20) (V2 p10 p20) 89 | 90 | -- dummyInputs :: Acc (Array DIM1 (V2 Double, V2 Double)) 91 | -- dummyInputs = A.use $ A.fromList (Z :. nSteps) $ 92 | -- P.replicate nSteps (pure 0.0 :: V2 Double, pure 0.0 :: V2 Double) 93 | 94 | -- runSteps :: Acc (Array DIM1 (V2 Double, V2 Double)) 95 | -- runSteps = A.scanl (\s _x -> (oneStep2 h s)) dummyStart dummyInputs 96 | 97 | nSteps :: Int 98 | nSteps = 80000000 -- 100000000 99 | 100 | runSteps' :: Exp (V2 Double, V2 Double) -> Exp (V2 Double, V2 Double) 101 | runSteps' = A.iterate (lift nSteps) (oneStep2 h) 102 | 103 | myIterate :: Int -> (a -> a) -> a -> a 104 | myIterate n f x | n P.<= 0 = x 105 | | otherwise = myIterate (n-1) f $! f x 106 | -- myIterate 0 _ a = a 107 | -- myIterate n f a = myIterate (n-1) f (f a) 108 | -- myIterate n f a = P.last $ P.take n $ P.iterate f a 109 | 110 | runSteps98' :: V2 (V2 Double) -> V2 (V2 Double) 111 | runSteps98' = myIterate nSteps (oneStepH98 h) 112 | 113 | reallyRunSteps' :: (Array DIM1 (V2 Double, V2 Double)) 114 | reallyRunSteps' = CPU.run $ 115 | A.scanl (\s _x -> runSteps' s) dummyStart 116 | (A.use $ A.fromList (Z :. 1) [(V2 0.0 0.0, V2 0.0 0.0)]) 117 | 118 | main :: IO () 119 | main = do 120 | putStrLn $ show $ reallyRunSteps' 121 | -- putStrLn $ show $ runSteps98' dummyStart98 122 | 123 | -- Onesteph98 :: Double -> (V2 Double, V2 Double) -> (V2 Double, V2 Double) 124 | -- oneStepH98 h prev = (qNew, pNew) 125 | -- where 126 | -- h2 = h / 2 127 | -- hhs = pure h 128 | -- hs = pure h2 129 | -- p2 = psPrev - hs * nablaQ qsPrev 130 | -- qNew = qsPrev + hhs * nablaP p2 131 | -- pNew = p2 - hs * nablaQ qNew 132 | -- qsPrev = P.fst prev 133 | -- psPrev = P.snd prev 134 | -- nablaQ qs = V2 (q1 / r) (q2 / r) 135 | -- where 136 | -- q1 = qs ^. L._x 137 | -- q2 = qs ^. L._y 138 | -- r = (q1 ^ 2 + q2 ^ 2) ** (3/2) 139 | -- nablaP ps = ps 140 | 141 | -- bigH2BodyH98 :: (V2 Double, V2 Double) -> Double 142 | -- bigH2BodyH98 x = ke + pe 143 | -- where 144 | -- pe = let V2 q1 q2 = P.fst x in negate $ recip (sqrt (q1^2 + q2^2)) 145 | -- ke = let V2 p1 p2 = P.snd x in 0.5 * (p1^2 + p2^2) 146 | -------------------------------------------------------------------------------- /ForTrevor.jl: -------------------------------------------------------------------------------- 1 | using StaticArrays 2 | 3 | e = 0.6 4 | q10 = 1 - e 5 | q20 = 0.0 6 | p10 = 0.0 7 | p20 = sqrt((1 + e) / (1 - e)) 8 | 9 | h = 0.01 10 | 11 | x1 = SVector{2,Float64}(q10, q20) 12 | x2 = SVector{2,Float64}(p10, p20) 13 | x3 = SVector{2,SVector{2,Float64}}(x1,x2) 14 | 15 | @inline function oneStep(h, prev) 16 | h2 = h / 2 17 | @inbounds qsPrev = prev[1] 18 | @inbounds psPrev = prev[2] 19 | function nablaQQ(qs) 20 | @inbounds q1 = qs[1] 21 | @inbounds q2 = qs[2] 22 | r = abs(q1^2 + q2^2) ^ (3/2) 23 | return SVector{2,Float64}(q1 / r, q2 / r) 24 | end 25 | function nablaPP(ps) 26 | return ps 27 | end 28 | p2 = psPrev - h2 * nablaQQ(qsPrev) 29 | qNew = qsPrev + h * nablaPP(p2) 30 | pNew = p2 - h2 * nablaQQ(qNew) 31 | return SVector{2,SVector{2,Float64}}(qNew, pNew) 32 | end 33 | 34 | function manyStepsFinal(n,h,prev) 35 | for i in 1:n 36 | prev = oneStep(h,prev) 37 | end 38 | return prev 39 | end 40 | 41 | final = manyStepsFinal(8,h,x3) 42 | print(final) 43 | 44 | # function manySteps(h, states) 45 | # n = length(states) 46 | # for i in 1:(n-1) 47 | # states[i+1] = oneStep2(h,states[i]) 48 | # end 49 | # return states 50 | # end 51 | 52 | 53 | # function bigH(x) 54 | # q1 = x[1][1] 55 | # q2 = x[1][2] 56 | # p1 = x[2][1] 57 | # p2 = x[2][2] 58 | # pe = - 1 / (sqrt(q1^2 + q2^2)) 59 | # ke = 0.5 * (p1^2 + p2^2) 60 | # return (ke + pe) 61 | # end 62 | 63 | # print(bigH(final)) 64 | 65 | # nSteps = 1000000 66 | # states = Array{SVector{2,SVector{2,Float64}}}(nSteps) 67 | # states[1] = x3 68 | 69 | # states = manySteps(h, states) 70 | 71 | # test = map(bigH, states[1:100]) 72 | 73 | -------------------------------------------------------------------------------- /Frames/ChangeLog.md: -------------------------------------------------------------------------------- 1 | # Revision history for ProductionData 2 | 3 | ## 0.1.0.0 -- YYYY-mm-dd 4 | 5 | * First version. Released on an unsuspecting world. 6 | -------------------------------------------------------------------------------- /Frames/LICENSE: -------------------------------------------------------------------------------- 1 | Copyright (c) 2018, Dominic Steinitz 2 | 3 | All rights reserved. 4 | 5 | Redistribution and use in source and binary forms, with or without 6 | modification, are permitted provided that the following conditions are met: 7 | 8 | * Redistributions of source code must retain the above copyright 9 | notice, this list of conditions and the following disclaimer. 10 | 11 | * Redistributions in binary form must reproduce the above 12 | copyright notice, this list of conditions and the following 13 | disclaimer in the documentation and/or other materials provided 14 | with the distribution. 15 | 16 | * Neither the name of Dominic Steinitz nor the names of other 17 | contributors may be used to endorse or promote products derived 18 | from this software without specific prior written permission. 19 | 20 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 21 | "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 22 | LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 23 | A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 24 | OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 25 | SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 26 | LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 27 | DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 28 | THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 29 | (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 30 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 31 | -------------------------------------------------------------------------------- /Frames/ProductionData.cabal: -------------------------------------------------------------------------------- 1 | -- Initial ProductionData.cabal generated by cabal init. For further 2 | -- documentation, see http://haskell.org/cabal/users-guide/ 3 | 4 | name: ProductionData 5 | version: 0.1.0.0 6 | -- synopsis: 7 | -- description: 8 | license: BSD3 9 | license-file: LICENSE 10 | author: Dominic Steinitz 11 | maintainer: dominic@steinitz.org 12 | -- copyright: 13 | -- category: 14 | build-type: Simple 15 | extra-source-files: ChangeLog.md 16 | cabal-version: >=1.10 17 | 18 | executable ProductionData 19 | main-is: Text.hs 20 | -- other-modules: 21 | other-extensions: TemplateHaskell, DataKinds, FlexibleContexts, TypeOperators 22 | build-depends: attoparsec, 23 | base >=4.10 && <4.11, 24 | foldl, 25 | Frames, 26 | lens, 27 | pipes, 28 | template-haskell, 29 | text, 30 | vinyl 31 | -- hs-source-dirs: 32 | default-language: Haskell2010 33 | -------------------------------------------------------------------------------- /Frames/Setup.hs: -------------------------------------------------------------------------------- 1 | import Distribution.Simple 2 | main = defaultMain 3 | -------------------------------------------------------------------------------- /Frames/Text.hs: -------------------------------------------------------------------------------- 1 | {-# LANGUAGE TemplateHaskell #-} 2 | {-# LANGUAGE DataKinds #-} 3 | {-# LANGUAGE FlexibleContexts #-} 4 | {-# LANGUAGE TypeOperators #-} 5 | {-# LANGUAGE OverloadedStrings #-} 6 | {-# LANGUAGE RankNTypes #-} 7 | 8 | import Frames hiding ((:&)) 9 | import qualified Control.Foldl as Foldl 10 | import Control.Lens 11 | import Pipes.Prelude (fold) 12 | import Pipes 13 | import qualified Pipes.Prelude as P 14 | import qualified Pipes.Core as PC 15 | import qualified Data.Foldable as F 16 | 17 | import Foo 18 | 19 | import Data.Void 20 | 21 | import qualified Data.Vinyl as V 22 | import Data.Vinyl (Rec(..)) 23 | 24 | import Data.Text hiding (map) 25 | import Data.Attoparsec.Text 26 | import Control.Applicative 27 | 28 | 29 | 30 | tableTypesText "ProductionData" "/Users/dom/Downloads/2017_prod_reports_from_excel.csv" 31 | 32 | declareColumn "oil_vol_double" ''Double 33 | 34 | getOilVol :: ProductionData -> Record '[OilVolDouble] 35 | getOilVol x = pure (Col ((f $ (parseOnly parseDouble) (x ^. oilVol)))) :& Nil 36 | where 37 | f (Left _) = 0.0 / 0.0 38 | f (Right y) = y 39 | 40 | cleanData :: ProductionData -> Record '[ReportMonth, ReportYear, OilVolDouble] 41 | cleanData x = pure (Col (x ^. reportMonth)) :& pure (Col (x ^. reportYear)) :& getOilVol x 42 | 43 | readCleanData :: MonadSafe m => Producer (Record '[ReportMonth, ReportYear, OilVolDouble]) m () 44 | readCleanData = (readTable "/Users/dom/Downloads/2017_prod_reports_from_excel.csv") >-> 45 | (P.map cleanData) 46 | 47 | readOilVol :: MonadSafe m => Producer (Record '[OilVolDouble]) m () 48 | readOilVol = (readTable "/Users/dom/Downloads/2017_prod_reports_from_excel.csv") >-> 49 | (P.map getOilVol) 50 | 51 | oilVolLength :: Foldl.Fold (Record '[OilVolDouble]) Int 52 | oilVolLength = Foldl.length 53 | 54 | totalOilVol :: Foldl.Fold (Record '[OilVolDouble]) Double 55 | totalOilVol = (Foldl.handles oilVolDouble) Foldl.sum 56 | 57 | oilVolTotalAndLength :: Foldl.Fold (Record '[OilVolDouble]) (Double, Int) 58 | oilVolTotalAndLength = (,) <$> totalOilVol <*> oilVolLength 59 | 60 | oilVolFiltered :: Text -> Foldl.Fold (Record '[ReportMonth, ReportYear, OilVolDouble]) Double 61 | oilVolFiltered m = (Foldl.handles (filtered (\x -> x ^. reportMonth == m)) . 62 | Foldl.handles oilVolDouble) Foldl.sum 63 | 64 | allMonths :: Foldl.Fold (Record '[ReportMonth, ReportYear, OilVolDouble]) [Double] 65 | allMonths = sequenceA $ map oilVolFiltered $ map pack $ map show [1..12] 66 | 67 | parseDouble :: Parser Double 68 | parseDouble = 69 | do d <- double 70 | return d 71 | <|> 72 | do _ <- string "" 73 | return 0.0 74 | 75 | main = do 76 | (t, l) <- runSafeT $ 77 | Foldl.purely fold oilVolTotalAndLength readOilVol 78 | putStrLn $ show l ++ " records totalling " ++ show t 79 | f <- runSafeT $ 80 | Foldl.purely fold allMonths readCleanData 81 | putStrLn $ show f 82 | 83 | -------------------------------------------------------------------------------- /Frames/stack.yaml: -------------------------------------------------------------------------------- 1 | # This file was automatically generated by 'stack init' 2 | # 3 | # Some commonly used options have been documented as comments in this file. 4 | # For advanced use and comprehensive documentation of the format, please see: 5 | # https://docs.haskellstack.org/en/stable/yaml_configuration/ 6 | 7 | # Resolver to choose a 'specific' stackage snapshot or a compiler version. 8 | # A snapshot resolver dictates the compiler version and the set of packages 9 | # to be used for project dependencies. For example: 10 | # 11 | # resolver: lts-3.5 12 | # resolver: nightly-2015-09-21 13 | # resolver: ghc-7.10.2 14 | # resolver: ghcjs-0.1.0_ghc-7.10.2 15 | # resolver: 16 | # name: custom-snapshot 17 | # location: "./custom-snapshot.yaml" 18 | resolver: lts-11.18 19 | 20 | # User packages to be built. 21 | # Various formats can be used as shown in the example below. 22 | # 23 | # packages: 24 | # - some-directory 25 | # - https://example.com/foo/bar/baz-0.0.2.tar.gz 26 | # - location: 27 | # git: https://github.com/commercialhaskell/stack.git 28 | # commit: e7b331f14bcffb8367cd58fbfc8b40ec7642100a 29 | # - location: https://github.com/commercialhaskell/stack/commit/e7b331f14bcffb8367cd58fbfc8b40ec7642100a 30 | # extra-dep: true 31 | # subdirs: 32 | # - auto-update 33 | # - wai 34 | # 35 | # A package marked 'extra-dep: true' will only be built if demanded by a 36 | # non-dependency (i.e. a user package), and its test suites and benchmarks 37 | # will not be run. This is useful for tweaking upstream packages. 38 | packages: 39 | - . 40 | # Dependency packages to be pulled from upstream that are not in the resolver 41 | # (e.g., acme-missiles-0.3) 42 | # extra-deps: [] 43 | 44 | # Override default flag values for local packages and extra-deps 45 | # flags: {} 46 | 47 | # Extra package databases containing global packages 48 | # extra-package-dbs: [] 49 | 50 | # Control whether we use the GHC we find on the path 51 | # system-ghc: true 52 | # 53 | # Require a specific version of stack, using version ranges 54 | # require-stack-version: -any # Default 55 | # require-stack-version: ">=1.6" 56 | # 57 | # Override the architecture used by stack, especially useful on Windows 58 | # arch: i386 59 | # arch: x86_64 60 | # 61 | # Extra directories used by stack for building 62 | # extra-include-dirs: [/path/to/dir] 63 | # extra-lib-dirs: [/path/to/dir] 64 | # 65 | # Allow a newer minor version of GHC than the snapshot specifies 66 | # compiler-check: newer-minor -------------------------------------------------------------------------------- /FunWithKalmanPart1.lhs: -------------------------------------------------------------------------------- 1 | % Fun with (Kalman) Filters Part I 2 | % Dominic Steinitz 3 | % 3rd July 2014 4 | 5 | --- 6 | bibliography: Kalman.bib 7 | --- 8 | 9 | \newcommand{\condprob} [3] {#1 \left( #2 \,\vert\, #3 \right)} 10 | 11 | ```{.dia height='300'} 12 | import FunWithKalmanPart1 13 | import KalmanChart 14 | dia = diagEsts (zip (map fromIntegral [0..]) (map fst estimates)) 15 | (zip (map fromIntegral [0..]) uppers) 16 | (zip (map fromIntegral [0..]) lowers) 17 | (zip (map fromIntegral [0..]) (replicate nObs (fst obs))) 18 | 19 | ``` 20 | 21 | Suppose we wish to estimate the mean of a sample drawn from a normal 22 | distribution. In the Bayesian approach, we know the prior distribution 23 | for the mean (it could be a non-informative prior) and then we update 24 | this with our observations to create the posterior, the latter giving 25 | us improved information about the distribution of the mean. In symbols 26 | 27 | $$ 28 | p(\theta \,\vert\, x) \propto p(x \,\vert\, \theta)p(\theta) 29 | $$ 30 | 31 | Typically, the samples are chosen to be independent, and all of the 32 | data is used to perform the update but, given independence, there is 33 | no particular reason to do that, updates can performed one at a time 34 | and the result is the same; nor is the order of update 35 | important. Being a bit imprecise, we have 36 | 37 | $$ 38 | p(z \,\vert\, x, y) = p(z, x, y)p(x, y) = p(z, x, y)p(x)p(y) = 39 | p((z \,\vert\, x) \,\vert\, y) = 40 | p((z \,\vert\, y) \,\vert\, x) 41 | $$ 42 | 43 | The standard notation in Bayesian statistics is to denote the 44 | parameters of interest as $\theta \in \mathbb{R}^p$ and the 45 | observations as $x \in \mathbb{R}^n$. For reasons that will become 46 | apparent in later blog posts, let us change notation and label the 47 | parameters as $x$ and the observations as $y$. 48 | 49 | 50 | Let us take a very simple example of a prior $X \sim {\cal{N}}(0, 51 | \sigma^2)$ where $\sigma^2$ is known and then sample from a normal 52 | distribution with mean $x$ and variance for the $i$-th sample $c_i^2$ 53 | where $c_i$ is known (normally we would not know the variance but 54 | adding this generality would only clutter the exposition 55 | unnecessarily). 56 | 57 | $$ 58 | p(y_i \,\vert\, x) = \frac{1}{\sqrt{2\pi c_i^2}}\exp\bigg(\frac{(y_i - x)^2}{2c_i^2}\bigg) 59 | $$ 60 | 61 | The likelihood is then 62 | 63 | $$ 64 | p(\boldsymbol{y} \,\vert\, x) = \prod_{i=1}^n \frac{1}{\sqrt{2\pi c_i^2}}\exp\bigg(\frac{(y_i - x)^2}{2c_i^2}\bigg) 65 | $$ 66 | 67 | As we have already noted, instead of using this with the prior to 68 | calculate the posterior, we can update the prior with each observation 69 | separately. Suppose that we have obtained the posterior given $i - 1$ 70 | samples (we do not know this is normally distributed yet but we soon 71 | will): 72 | 73 | $$ 74 | p(x \,\vert\, y_1,\ldots,y_{i-1}) = {\cal{N}}(\hat{x}_{i-1}, \hat{\sigma}^2_{i-1}) 75 | $$ 76 | 77 | Then we have 78 | 79 | $$ 80 | \begin{aligned} 81 | p(x \,\vert\, y_1,\ldots,y_{i}) &\propto p(y_i \,\vert\, x)p(x \,\vert\, y_1,\ldots,y_{i-1}) \\ 82 | &\propto \exp-\bigg(\frac{(y_i - x)^2}{2c_i^2}\bigg) \exp-\bigg(\frac{(x - \hat{x}_{i-1})^2}{2\hat{\sigma}_{i-1}^2}\bigg) \\ 83 | &\propto \exp-\Bigg(\frac{x^2}{c_i^2} - \frac{2xy_i}{c_i^2} + \frac{x^2}{\hat{\sigma}_{i-1}^2} - \frac{2x\hat{x}_{i-1}}{\hat{\sigma}_{i-1}^2}\Bigg) \\ 84 | &\propto \exp-\Bigg( x^2\Bigg(\frac{1}{c_i^2} + \frac{1}{\hat{\sigma}_{i-1}^2}\Bigg) - 2x\Bigg(\frac{y_i}{c_i^2} + \frac{\hat{x}_{i-1}}{\hat{\sigma}_{i-1}^2}\Bigg)\Bigg) 85 | \end{aligned} 86 | $$ 87 | 88 | Writing 89 | 90 | $$ 91 | \frac{1}{\hat{\sigma}_{i}^2} \triangleq \frac{1}{c_i^2} + \frac{1}{\hat{\sigma}_{i-1}^2} 92 | $$ 93 | 94 | and then completing the square we also obtain 95 | 96 | $$ 97 | \frac{\hat{x}_{i}}{\hat{\sigma}_{i}^2} \triangleq \frac{y_i}{c_i^2} + \frac{\hat{x}_{i-1}}{\hat{\sigma}_{i-1}^2} 98 | $$ 99 | 100 | More Formally 101 | ============= 102 | 103 | Now let's be a bit more formal about conditional probability and use 104 | the notation of $\sigma$-algebras to define ${\cal{F}}_i = 105 | \sigma\{Y_1,\ldots, Y_i\}$ and $M_i \triangleq \mathbb{E}(X \,\vert\, 106 | {\cal{F}}_i)$ where $Y_i = X + \epsilon_i$, $X$ is as before and 107 | $\epsilon_i \sim {\cal{N}}(0, c_k^2)$. We have previously calculated 108 | that $M_i = \hat{x}_i$ and that ${\cal{E}}((X - M_i)^2 \,\vert\, Y_1, 109 | \ldots Y_i) = \hat{\sigma}_{i}^2$ and the tower law for conditional 110 | probabilities then allows us to conclude ${\cal{E}}((X - M_i)^2) = 111 | \hat{\sigma}_{i}^2$. By [Jensen's 112 | inequality](http://en.wikipedia.org/wiki/Jensen%27s_inequality), we have 113 | 114 | $$ 115 | {\cal{E}}(M_i^2) = {\cal{E}}({\cal{E}}(X \,\vert\, {\cal{F}}_i)^2)) \leq 116 | {\cal{E}}({\cal{E}}(X^2 \,\vert\, {\cal{F}}_i))) = 117 | {\cal{E}}(X^2) = \sigma^2 118 | $$ 119 | 120 | Hence $M$ is bounded in $L^2$ and therefore converges in $L^2$ and 121 | almost surely to $M_\infty \triangleq {\cal{E}}(X \,\vert\, 122 | {\cal{F}}_\infty)$. The noteworthy point is that if $M_\infty = X$ if 123 | and only if $\hat{\sigma}_i$ converges to 0. Explicitly we have 124 | 125 | $$ 126 | \frac{1}{\hat{\sigma}_i^2} = \frac{1}{\sigma^2} + \sum_{k=1}^i\frac{1}{c_k^2} 127 | $$ 128 | 129 | which explains why we took the observations to have varying and known 130 | variances. You can read more in Williams' book [@williams]. 131 | 132 | A Quick Check 133 | ============= 134 | 135 | We have reformulated our estimation problem as a very simple version 136 | of the celebrated [Kalman 137 | filter](http://en.wikipedia.org/wiki/Kalman_filter). Of course, there 138 | are much more interesting applications of this but for now let us try 139 | "tracking" the sample from the random variable. 140 | 141 | > {-# OPTIONS_GHC -Wall #-} 142 | > {-# OPTIONS_GHC -fno-warn-name-shadowing #-} 143 | > {-# OPTIONS_GHC -fno-warn-type-defaults #-} 144 | > {-# OPTIONS_GHC -fno-warn-unused-do-bind #-} 145 | > {-# OPTIONS_GHC -fno-warn-missing-methods #-} 146 | > {-# OPTIONS_GHC -fno-warn-orphans #-} 147 | 148 | > module FunWithKalmanPart1 ( 149 | > obs 150 | > , nObs 151 | > , estimates 152 | > , uppers 153 | > , lowers 154 | > ) where 155 | > 156 | > import Data.Random.Source.PureMT 157 | > import Data.Random 158 | > import Control.Monad.State 159 | 160 | 161 | > var, cSquared :: Double 162 | > var = 1.0 163 | > cSquared = 1.0 164 | > 165 | > nObs :: Int 166 | > nObs = 100 167 | 168 | > createObs :: RVar (Double, [Double]) 169 | > createObs = do 170 | > x <- rvar (Normal 0.0 var) 171 | > ys <- replicateM nObs $ rvar (Normal x cSquared) 172 | > return (x, ys) 173 | > 174 | > obs :: (Double, [Double]) 175 | > obs = evalState (sample createObs) (pureMT 2) 176 | > 177 | > updateEstimate :: (Double, Double) -> (Double, Double) -> (Double, Double) 178 | > updateEstimate (xHatPrev, varPrev) (y, cSquared) = (xHatNew, varNew) 179 | > where 180 | > varNew = recip (recip varPrev + recip cSquared) 181 | > xHatNew = varNew * (y / cSquared + xHatPrev / varPrev) 182 | > 183 | > estimates :: [(Double, Double)] 184 | > estimates = scanl updateEstimate (y, cSquared) (zip ys (repeat cSquared)) 185 | > where 186 | > y = head $ snd obs 187 | > ys = tail $ snd obs 188 | > 189 | > uppers :: [Double] 190 | > uppers = map (\(x, y) -> x + 3 * (sqrt y)) estimates 191 | > 192 | > lowers :: [Double] 193 | > lowers = map (\(x, y) -> x - 3 * (sqrt y)) estimates 194 | 195 | ```{.dia width='800'} 196 | import FunWithKalmanPart1 197 | import KalmanChart 198 | dia = diagEsts (zip (map fromIntegral [0..]) (map fst estimates)) 199 | (zip (map fromIntegral [0..]) uppers) 200 | (zip (map fromIntegral [0..]) lowers) 201 | (zip (map fromIntegral [0..]) (replicate nObs (fst obs))) 202 | 203 | ``` 204 | 205 | Bibliography 206 | ============ -------------------------------------------------------------------------------- /HaskellX/ChangeLog.md: -------------------------------------------------------------------------------- 1 | # Revision history for HaskellX 2 | 3 | ## 0.1.0.0 -- YYYY-mm-dd 4 | 5 | * First version. Released on an unsuspecting world. 6 | -------------------------------------------------------------------------------- /HaskellX/HaskellX.cabal: -------------------------------------------------------------------------------- 1 | -- Initial HaskellX.cabal generated by cabal init. For further 2 | -- documentation, see http://haskell.org/cabal/users-guide/ 3 | 4 | -- The name of the package. 5 | name: HaskellX 6 | 7 | -- The package version. See the Haskell package versioning policy (PVP) 8 | -- for standards guiding when and how versions should be incremented. 9 | -- https://wiki.haskell.org/Package_versioning_policy 10 | -- PVP summary: +-+------- breaking API changes 11 | -- | | +----- non-breaking API additions 12 | -- | | | +--- code changes with no API change 13 | version: 0.1.0.0 14 | 15 | -- A short (one-line) description of the package. 16 | -- synopsis: 17 | 18 | -- A longer description of the package. 19 | -- description: 20 | 21 | -- The license under which the package is released. 22 | license: BSD3 23 | 24 | -- The file containing the license text. 25 | license-file: LICENSE 26 | 27 | -- The package author(s). 28 | author: Dominic Steinitz 29 | 30 | -- An email address to which users can send suggestions, bug reports, and 31 | -- patches. 32 | maintainer: dominic@steinitz.org 33 | 34 | -- A copyright notice. 35 | -- copyright: 36 | 37 | -- category: 38 | 39 | build-type: Simple 40 | 41 | -- Extra files to be distributed with the package, such as examples or a 42 | -- README. 43 | extra-source-files: ChangeLog.md 44 | 45 | -- Constraint on the version of Cabal needed to build this package. 46 | cabal-version: >=1.10 47 | 48 | 49 | executable HaskellX 50 | -- .hs or .lhs file containing the Main module. 51 | main-is: Xhaskell.lhs 52 | 53 | -- Modules included in this executable, other than Main. 54 | -- other-modules: 55 | 56 | -- LANGUAGE extensions used by modules in this package. 57 | -- other-extensions: 58 | 59 | -- Other library packages from which modules are imported. 60 | build-depends: base >=4.9 && <4.10, 61 | mtl >=2.2 && <2.3, 62 | monad-loops >=0.4 && <0.5, 63 | vector >=0.12 && <0.13, 64 | random-fu, 65 | histogram-fill, 66 | random-source, 67 | foldl, 68 | plots, 69 | diagrams-lib, 70 | diagrams-rasterific, 71 | cassava, 72 | bytestring, 73 | hmatrix, 74 | random-fu-multivariate, 75 | ad 76 | 77 | -- Directories containing source files. 78 | -- hs-source-dirs: 79 | 80 | -- Base language which the package is written in. 81 | default-language: Haskell2010 82 | 83 | -------------------------------------------------------------------------------- /HaskellX/HaskellXchange.tex: -------------------------------------------------------------------------------- 1 | \documentclass[presentation]{beamer} 2 | \usepackage[utf8]{inputenc} 3 | \usepackage[T1]{fontenc} 4 | \usepackage{fixltx2e} 5 | \usepackage{graphicx} 6 | \usepackage{grffile} 7 | \usepackage{longtable} 8 | \usepackage{wrapfig} 9 | \usepackage{rotating} 10 | \usepackage[normalem]{ulem} 11 | \usepackage{amsmath} 12 | \usepackage{textcomp} 13 | \usepackage{amssymb} 14 | \usepackage{capt-of} 15 | \usepackage{hyperref} 16 | \usepackage{listings} 17 | \usepackage{color} 18 | \usepackage{verse} 19 | \RequirePackage{fancyvrb} 20 | \DefineVerbatimEnvironment{verbatim}{Verbatim}{fontsize=\scriptsize} 21 | \usepackage[style=alphabetic]{biblatex} 22 | \usetheme{Frankfurt} 23 | \author{Dominic Steinitz} 24 | \date{Wednesday 27 September 17} 25 | \title{Hacking on GHC: A Worm's Eye View} 26 | \hypersetup{ 27 | pdfauthor={Dominic Steinitz}, 28 | pdfkeywords={}, 29 | pdflang={English}} 30 | 31 | \usepackage{dramatist} 32 | 33 | \begin{document} 34 | 35 | \maketitle 36 | \begin{frame}{Outline} 37 | \tableofcontents 38 | \end{frame} 39 | 40 | \section{Introduction} 41 | 42 | \begin{frame}{Apollo 8 launched on December 21, 1968} 43 | 44 | \StageDir{03:17:45:17 (Dec. 25, 1968, 6:36 a.m. UTC)} 45 | 46 | \begin{drama} 47 | \Character{Jim Lovell (Commander Module Pilot)}{jim} 48 | \Character{Ken Mattingly (CAPCOM)}{ken} 49 | 50 | \jimspeaks: Roger. Do you wish me to reinitialize the W-matrix at this time? 51 | \end{drama} 52 | 53 | \StageDir{03:17:45:26} 54 | 55 | \begin{drama} 56 | \Character{Jim Lovell (Commander Module Pilot)}{jim} 57 | \Character{Ken Mattingly (CAPCOM)}{ken} 58 | 59 | \kenspeaks: Affirmative, Apollo 8 60 | \end{drama} 61 | 62 | \section{Introducing the Reverend Bayes} 63 | \end{frame} 64 | 65 | \begin{frame}{Game} 66 | 67 | \begin{block}{Game} 68 | \begin{itemize} 69 | \item I select a number at random from a normal distribution. 70 | \item At time 1 I give you some information: the number with added noise. 71 | \item At time 2 I give you more information: the same number but with different added noise. 72 | \item And so on $\ldots$ 73 | \end{itemize} 74 | \end{block} 75 | 76 | \end{frame} 77 | 78 | \begin{frame}{Bayes' Theorem} 79 | 80 | $$ 81 | \mathbb{P}(A \,|\, B) \triangleq \frac{\mathbb{P}(A \cap B)}{\mathbb{P}(B)} 82 | $$ 83 | 84 | Also 85 | 86 | $$ 87 | \mathbb{P}(B \,|\, A) \triangleq \frac{\mathbb{P}(A \cap B)}{\mathbb{P}(A)} 88 | $$ 89 | 90 | Thus 91 | 92 | $$ 93 | \mathbb{P}(A \,|\, B) \propto {\mathbb{P}(B \,|\, A)}{\mathbb{P}(A)} 94 | $$ 95 | 96 | \end{frame} 97 | 98 | \begin{frame}{Take a Step Back} 99 | \begin{center} 100 | \includegraphics[width=0.95\textwidth]{./diagrams/prior.png} 101 | \end{center} 102 | \end{frame} 103 | 104 | \begin{frame}{Take a Step Back} 105 | \begin{center} 106 | \includegraphics[width=0.95\textwidth]{./diagrams/post1.png} 107 | \end{center} 108 | \end{frame} 109 | 110 | \begin{frame}{Take a Step Back} 111 | \begin{center} 112 | \includegraphics[width=0.95\textwidth]{./diagrams/postN.png} 113 | \end{center} 114 | \end{frame} 115 | 116 | \section{Introducing the Reverend Brown} 117 | 118 | \begin{frame}{Robert Brown (1827)} 119 | 120 | \begin{itemize} 121 | \item You wish to emulate the famous botanist but with a difference. 122 | \item You have a camera which gives approximate co-ordinates of the 123 | pollen particle on the slide. 124 | \item You have a motor which can drive the slide in horizontal and 125 | vertical planes. 126 | \item How to track the camera to minimize the particle's distance 127 | from the centre of the microscope's field of vision? 128 | \end{itemize} 129 | 130 | \end{frame} 131 | 132 | \begin{frame}{Mathematical Model} 133 | We can model the pollen's motion as 134 | 135 | \begin{block}{Equations of Motion} 136 | $$ 137 | \begin{aligned} 138 | \frac{\mathrm{d}^2 x_1}{\mathrm{d}t^2} &= \omega_1(t) \\ 139 | \frac{\mathrm{d}^2 x_2}{\mathrm{d}t^2} &= \omega_2(t) 140 | \end{aligned} 141 | $$ 142 | \end{block} 143 | 144 | Writing $x_3 = \mathrm{d}x_1 / \mathrm{d}t$ and 145 | $x_4 = \mathrm{d}x_2 / \mathrm{d}t$ this becomes 146 | 147 | \begin{block}{Matrix Form} 148 | $$ 149 | \frac{\mathrm{d}}{\mathrm{d}t}\begin{bmatrix}x_1 \\ x_2 \\ x_3 \\ x_4\end{bmatrix} = 150 | \begin{bmatrix} 151 | 0 & 0 & 1 & 0 \\ 152 | 0 & 0 & 0 & 1 \\ 153 | 0 & 0 & 0 & 0 \\ 154 | 0 & 0 & 0 & 0 155 | \end{bmatrix} 156 | \begin{bmatrix}x_1 \\ x_2 \\ x_3 \\ x_4\end{bmatrix} + 157 | \begin{bmatrix} 158 | 0 & 0 \\ 159 | 0 & 0 \\ 160 | 1 & 0 \\ 161 | 0 & 1 162 | \end{bmatrix} 163 | \begin{bmatrix}\omega_1 \\ \omega_2\end{bmatrix} 164 | $$ 165 | \end{block} 166 | 167 | \end{frame} 168 | 169 | \begin{frame}{} 170 | 171 | \begin{block}{Discretizing at $0, \Delta t, 2\Delta t, \ldots $} 172 | $$ 173 | \begin{bmatrix}x^{(k)}_1 \\ x^{(k)}_2 \\ x^{(k)}_3 \\ x^{(k)}_4\end{bmatrix} = 174 | \begin{bmatrix} 175 | 1 & 0 & \Delta t & 0 \\ 176 | 0 & 1 & 0 & \Delta t \\ 177 | 0 & 0 & 1 & 0 \\ 178 | 0 & 0 & 0 & 1 179 | \end{bmatrix} 180 | \begin{bmatrix}x^{(k-1)}_1 \\ x^{(k-1)}_2 \\ x^{(k-1)}_3 \\ x^{(k-1)}_4\end{bmatrix} + 181 | \mathbf{q}_k 182 | $$ 183 | \end{block} 184 | 185 | \begin{block}{In vector notation} 186 | $$ 187 | \mathbf{x}_k = \mathbf{A} \mathbf{x}_{k_1} + \mathbf{q}_k 188 | $$ 189 | \end{block} 190 | 191 | \end{frame} 192 | 193 | \section{Bayes for Pollen} 194 | 195 | \begin{frame}{} 196 | 197 | A similar but lengthier derivation gives us the following algorithm 198 | 199 | \end{frame} 200 | 201 | \end{document} 202 | 203 | -------------------------------------------------------------------------------- /HaskellX/LICENSE: -------------------------------------------------------------------------------- 1 | Copyright (c) 2017, Dominic Steinitz 2 | 3 | All rights reserved. 4 | 5 | Redistribution and use in source and binary forms, with or without 6 | modification, are permitted provided that the following conditions are met: 7 | 8 | * Redistributions of source code must retain the above copyright 9 | notice, this list of conditions and the following disclaimer. 10 | 11 | * Redistributions in binary form must reproduce the above 12 | copyright notice, this list of conditions and the following 13 | disclaimer in the documentation and/or other materials provided 14 | with the distribution. 15 | 16 | * Neither the name of Dominic Steinitz nor the names of other 17 | contributors may be used to endorse or promote products derived 18 | from this software without specific prior written permission. 19 | 20 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 21 | "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 22 | LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 23 | A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 24 | OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 25 | SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 26 | LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 27 | DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 28 | THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 29 | (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 30 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 31 | -------------------------------------------------------------------------------- /HaskellX/Main.lhs: -------------------------------------------------------------------------------- 1 | module Main where 2 | 3 | main :: IO () 4 | main = putStrLn "Hello, Haskell!" 5 | -------------------------------------------------------------------------------- /HaskellX/Setup.hs: -------------------------------------------------------------------------------- 1 | import Distribution.Simple 2 | main = defaultMain 3 | -------------------------------------------------------------------------------- /HaskellX/default.nix: -------------------------------------------------------------------------------- 1 | { nixpkgs ? import {}, compiler ? "default" }: 2 | 3 | let 4 | 5 | inherit (nixpkgs) pkgs; 6 | 7 | f = { mkDerivation, base, monad-loops, mtl, stdenv, vector }: 8 | mkDerivation { 9 | pname = "HaskellX"; 10 | version = "0.1.0.0"; 11 | src = ./.; 12 | isLibrary = false; 13 | isExecutable = true; 14 | executableHaskellDepends = [ base monad-loops mtl vector ]; 15 | license = stdenv.lib.licenses.bsd3; 16 | }; 17 | 18 | haskellPackages = if compiler == "default" 19 | then pkgs.haskellPackages 20 | else pkgs.haskell.packages.${compiler}; 21 | 22 | drv = haskellPackages.callPackage f {}; 23 | 24 | in 25 | 26 | if pkgs.lib.inNixShell then drv.env else drv 27 | -------------------------------------------------------------------------------- /HaskellX/stack.yaml: -------------------------------------------------------------------------------- 1 | flags: {} 2 | extra-package-dbs: [] 3 | packages: 4 | - '.' 5 | extra-deps: 6 | - plots-0.1.0.2 7 | - random-fu-multivariate-0.1.2.0 8 | resolver: lts-9.6 9 | -------------------------------------------------------------------------------- /Importance.lhs: -------------------------------------------------------------------------------- 1 | % Importance Sampling 2 | % Dominic Steinitz 3 | % 14th August 2014 4 | 5 | --- 6 | bibliography: Kalman.bib 7 | --- 8 | 9 | \newcommand{\condprob} [3] {#1\left(#2 \,\vert\, #3\right)} 10 | 11 | Importance Sampling 12 | =================== 13 | 14 | Suppose we have an random variable $X$ with pdf $1/2\exp{-\lvert 15 | x\rvert}$ and we wish to find its second moment numerically. However, 16 | the [random-fu](https://hackage.haskell.org/package/random-fu) package 17 | does not support sampling from such as distribution. We notice that 18 | 19 | $$ 20 | \int_{-\infty}^\infty x^2 \frac{1}{2} \exp{-\lvert x\rvert} \mathrm{d}x = 21 | \int_{-\infty}^\infty x^2 \frac{\frac{1}{2} \exp{-\lvert x\rvert}} 22 | {\frac{1}{\sqrt{8\pi}}{\exp{-x^2/8}}} 23 | \frac{1}{\sqrt{8\pi}}{\exp{-x^2/8}} 24 | \,\mathrm{d}x 25 | $$ 26 | 27 | So we can sample from ${\cal{N}}(0, 4)$ and evaluate 28 | 29 | $$ 30 | x^2 \frac{\frac{1}{2} \exp{-\lvert x\rvert}} 31 | {\frac{1}{\sqrt{8\pi}}{\exp{-x^2/8}}} 32 | $$ 33 | 34 | > {-# OPTIONS_GHC -Wall #-} 35 | > {-# OPTIONS_GHC -fno-warn-name-shadowing #-} 36 | > {-# OPTIONS_GHC -fno-warn-type-defaults #-} 37 | > {-# OPTIONS_GHC -fno-warn-unused-do-bind #-} 38 | > {-# OPTIONS_GHC -fno-warn-missing-methods #-} 39 | > {-# OPTIONS_GHC -fno-warn-orphans #-} 40 | 41 | > module Importance where 42 | 43 | > import Control.Monad 44 | > import Data.Random.Source.PureMT 45 | > import Data.Random 46 | > import Data.Random.Distribution.Binomial 47 | > import Data.Random.Distribution.Beta 48 | > import Control.Monad.State 49 | > import qualified Control.Monad.Writer as W 50 | 51 | 52 | > sampleImportance :: RVarT (W.Writer [Double]) () 53 | > sampleImportance = do 54 | > x <- rvarT $ Normal 0.0 2.0 55 | > let x2 = x^2 56 | > u = x2 * 0.5 * exp (-(abs x)) 57 | > v = (exp ((-x2)/8)) * (recip (sqrt (8*pi))) 58 | > w = u / v 59 | > lift $ W.tell [w] 60 | > return () 61 | 62 | > runImportance :: Int -> [Double] 63 | > runImportance n = 64 | > snd $ 65 | > W.runWriter $ 66 | > evalStateT (sample (replicateM n sampleImportance)) 67 | > (pureMT 2) 68 | 69 | We can run this 10,000 times to get an estimate. 70 | 71 | [ghci] 72 | import Formatting 73 | format (fixed 2) (sum (runImportance 10000) / 10000) 74 | 75 | Since we know that the $n$-th moment of the exponential distribution 76 | is $n! / \lambda^n$ where $\lambda$ is the rate (1 in this example), 77 | the exact answer is 2 which is not too far from our estimate using 78 | importance sampling. 79 | 80 | The value of 81 | 82 | $$ 83 | w(x) = \frac{1}{N}\frac{\frac{1}{2} \exp{-\lvert x\rvert}} 84 | {\frac{1}{\sqrt{8\pi}}{\exp{-x^2/8}}} 85 | = \frac{p(x)}{\pi(x)} 86 | $$ 87 | 88 | is called the weight, $p$ is the pdf from which we wish to sample and 89 | $\pi$ is the pdf of the importance distribution. 90 | 91 | Importance Sampling Approximation of the Posterior 92 | ================================================== 93 | 94 | Suppose that the posterior distribution of a model in which we are 95 | interested has a complicated functional form and that we therefore 96 | wish to approximate it in some way. First assume that we wish to 97 | calculate the expectation of some arbitrary function $f$ of the 98 | parameters. 99 | 100 | $$ 101 | {\mathbb{E}}(f({x}) \,\vert\, y_1, \ldots y_T) = 102 | \int_\Omega f({x}) p({x} \, \vert \, y_1, \ldots y_T) \,\mathrm{d}{x} 103 | $$ 104 | 105 | Using Bayes 106 | 107 | $$ 108 | \int_\Omega f({x}) \condprob{p}{x}{y_1, \ldots y_T} \,\mathrm{d}{x} = 109 | \frac{1}{Z}\int_\Omega f({x}) \condprob{p}{y_1, \ldots y_T}{x}p(x) \,\mathrm{d}{x} 110 | $$ 111 | 112 | where $Z$ is some normalizing constant. 113 | 114 | As before we can re-write this using a proposal distribution $\pi(x)$ 115 | 116 | $$ 117 | \frac{1}{Z}\int_\Omega f({x}) \condprob{p}{y_1, \ldots y_T}{x}p(x) \,\mathrm{d}{x} = 118 | \frac{1}{Z}\int_\Omega \frac{f({x}) \condprob{p}{y_1, \ldots y_T}{x}p(x)}{\pi(x)}\pi(x) \,\mathrm{d}{x} 119 | $$ 120 | 121 | We can now sample $X^{(i)} \sim \pi({x})$ repeatedly to obtain 122 | 123 | $$ 124 | {\mathbb{E}}(f({x}) \,\vert\, y_1, \ldots y_T) \approx \frac{1}{ZN}\sum_1^N 125 | f({X^{(i)}}) \frac{p(y_1, \ldots y_T \, \vert \, {X^{(i)}})p({X^{(i)}})} 126 | {\pi({X^{(i)}})} = 127 | \sum_1^N w_if({X^{(i)}}) 128 | $$ 129 | 130 | where the weights $w_i$ are defined as before by 131 | 132 | $$ 133 | w_i = \frac{1}{ZN} \frac{p(y_1, \ldots y_T \, \vert \, {X^{(i)}})p({X^{(i)}})} 134 | {\pi({X^{(i)}})} 135 | $$ 136 | 137 | We follow [Alex 138 | Cook](http://blog.nus.edu.sg/alexcook/teaching/sph6004/) and use the 139 | example from [@citeulike:5986027]. We take the prior as $\sim 140 | {\cal{Be}}(1,1)$ and use ${\cal{U}}(0.0,1.0)$ as the proposal 141 | distribution. In this case the proposal and the prior are identical 142 | just expressed differently and therefore cancel. 143 | 144 | Note that we use the log of the pdf in our calculations otherwise we 145 | suffer from (silent) underflow, e.g., 146 | 147 | [ghci] 148 | pdf (Binomial nv (0.4 :: Double)) xv 149 | 150 | On the other hand if we use the log pdf form 151 | 152 | [ghci] 153 | logPdf (Binomial nv (0.4 :: Double)) xv 154 | 155 | > xv, nv :: Int 156 | > xv = 51 157 | > nv = 8197 158 | 159 | > sampleUniform :: RVarT (W.Writer [Double]) () 160 | > sampleUniform = do 161 | > x <- rvarT StdUniform 162 | > lift $ W.tell [x] 163 | > return () 164 | 165 | > runSampler :: RVarT (W.Writer [Double]) () -> 166 | > Int -> Int -> [Double] 167 | > runSampler sampler seed n = 168 | > snd $ 169 | > W.runWriter $ 170 | > evalStateT (sample (replicateM n sampler)) 171 | > (pureMT (fromIntegral seed)) 172 | 173 | > sampleSize :: Int 174 | > sampleSize = 1000 175 | 176 | > pv :: [Double] 177 | > pv = runSampler sampleUniform 2 sampleSize 178 | 179 | > logWeightsRaw :: [Double] 180 | > logWeightsRaw = map (\p -> logPdf (Beta 1.0 1.0) p + 181 | > logPdf (Binomial nv p) xv - 182 | > logPdf StdUniform p) pv 183 | 184 | > logWeightsMax :: Double 185 | > logWeightsMax = maximum logWeightsRaw 186 | > 187 | > weightsRaw :: [Double] 188 | > weightsRaw = map (\w -> exp (w - logWeightsMax)) logWeightsRaw 189 | 190 | > weightsSum :: Double 191 | > weightsSum = sum weightsRaw 192 | 193 | > weights :: [Double] 194 | > weights = map (/ weightsSum) weightsRaw 195 | 196 | > meanPv :: Double 197 | > meanPv = sum $ zipWith (*) pv weights 198 | > 199 | > meanPv2 :: Double 200 | > meanPv2 = sum $ zipWith (\p w -> p * p * w) pv weights 201 | > 202 | > varPv :: Double 203 | > varPv = meanPv2 - meanPv * meanPv 204 | 205 | We get the answer 206 | 207 | [ghci] 208 | meanPv 209 | 210 | But if we look at the size of the weights and the effective sample size 211 | 212 | [ghci] 213 | length $ filter (>= 1e-6) weights 214 | (sum weights)^2 / (sum $ map (^2) weights) 215 | 216 | so we may not be getting a very good estimate. Let's try 217 | 218 | > sampleNormal :: RVarT (W.Writer [Double]) () 219 | > sampleNormal = do 220 | > x <- rvarT $ Normal meanPv (sqrt varPv) 221 | > lift $ W.tell [x] 222 | > return () 223 | 224 | > pvC :: [Double] 225 | > pvC = runSampler sampleNormal 3 sampleSize 226 | 227 | > logWeightsRawC :: [Double] 228 | > logWeightsRawC = map (\p -> logPdf (Beta 1.0 1.0) p + 229 | > logPdf (Binomial nv p) xv - 230 | > logPdf (Normal meanPv (sqrt varPv)) p) pvC 231 | 232 | > logWeightsMaxC :: Double 233 | > logWeightsMaxC = maximum logWeightsRawC 234 | > 235 | > weightsRawC :: [Double] 236 | > weightsRawC = map (\w -> exp (w - logWeightsMaxC)) logWeightsRawC 237 | 238 | > weightsSumC :: Double 239 | > weightsSumC = sum weightsRawC 240 | 241 | > weightsC :: [Double] 242 | > weightsC = map (/ weightsSumC) weightsRawC 243 | 244 | > meanPvC :: Double 245 | > meanPvC = sum $ zipWith (*) pvC weightsC 246 | 247 | > meanPvC2 :: Double 248 | > meanPvC2 = sum $ zipWith (\p w -> p * p * w) pvC weightsC 249 | > 250 | > varPvC :: Double 251 | > varPvC = meanPvC2 - meanPvC * meanPvC 252 | 253 | Now the weights and the effective size are more re-assuring 254 | 255 | [ghci] 256 | length $ filter (>= 1e-6) weightsC 257 | (sum weightsC)^2 / (sum $ map (^2) weightsC) 258 | 259 | And we can take more confidence in the estimate 260 | 261 | [ghci] 262 | meanPvC 263 | 264 | Bibliography 265 | ============ -------------------------------------------------------------------------------- /InsurelyII.org: -------------------------------------------------------------------------------- 1 | #+OPTIONS: d:(not "BLOG") 2 | 3 | #+BEGIN_SRC emacs-lisp :exports none 4 | ;; make org mode allow eval of some langs 5 | (org-babel-do-load-languages 6 | 'org-babel-load-languages 7 | '((emacs-lisp . t) 8 | (python . t) 9 | (haskell . t) 10 | (R . t))) 11 | #+END_SRC 12 | 13 | #+RESULTS: 14 | : ((emacs-lisp . t) (python . t) (haskell . t) (R . t)) 15 | 16 | #+TITLE: Bayesian Change Point Detection 17 | #+AUTHOR: Dominic Steinitz 18 | #+EMAIL: dominic@steinitz.org 19 | #+DATE: [2017-07-17 Mon] 20 | #+DESCRIPTION: Bayesian change point analysis of UK / South Korea trade statistics 21 | #+LANGUAGE: en 22 | #+BEAMER_THEME: Frankfurt [height=20pt] 23 | #+OPTIONS: H:3 24 | #+LATEX_HEADER: \RequirePackage{fancyvrb} 25 | #+LATEX_HEADER: \DefineVerbatimEnvironment{verbatim}{Verbatim}{fontsize=\scriptsize} 26 | #+LATEX_HEADER: \usepackage[style=alphabetic]{biblatex} 27 | 28 | * Framework for Exotic Derivatives :presentation: 29 | 30 | ** Section 31 | 32 | *** What is an Option 33 | 34 | An option or derivative is a contract giving the owner the right, but 35 | not the obligation, to buy (call) or sell (put) an underlying asset at 36 | a specified price (aka the strike), on or before a specified date. 37 | 38 | **** Mathematically 39 | $$ 40 | c = (x - k)^+ 41 | p = (k - x)^+ 42 | $$ 43 | 44 | **** In Haskell 45 | #+BEGIN_SRC haskell :export code :session hask 46 | call k x = max (x - k) 0 47 | put k x = max (k - x) 0 48 | #+END_SRC 49 | 50 | #+RESULTS: 51 | 52 | *** Call Chart 53 | 54 | #+BEGIN_SRC R :exports none :session R-II-session 55 | library(ggplot2) 56 | 57 | x <- seq(0.0, 5, 0.1) 58 | fun.1 <- function(x){return(max(x - 2.5, 0))} 59 | y <- unlist(lapply(x,fun.1)) 60 | df <- data.frame(x) 61 | df$y <- y 62 | 63 | ggplot(df, aes(x=x, y=y)) + geom_line() + 64 | ggsave("diagrams/call.png") 65 | #+END_SRC 66 | 67 | #+RESULTS: 68 | 69 | #+BEGIN_center 70 | #+ATTR_LATEX: :height 0.85\textheight 71 | [[./diagrams/call.png]] 72 | #+END_center 73 | 74 | *** Put Chart 75 | 76 | #+BEGIN_SRC R :exports none :session R-II-session 77 | x <- seq(0.0, 5, 0.1) 78 | fun.2 <- function(x){return(max(2.5 - x, 0))} 79 | y <- unlist(lapply(x,fun.2)) 80 | df <- data.frame(x) 81 | df$y <- y 82 | 83 | ggplot(df, aes(x=x, y=y)) + geom_line() + 84 | ggsave("diagrams/put.png") 85 | #+END_SRC 86 | 87 | #+RESULTS: 88 | 89 | #+BEGIN_center 90 | #+ATTR_LATEX: :height 0.85\textheight 91 | [[./diagrams/put.png]] 92 | #+END_center 93 | 94 | 95 | *** Exotic 96 | 97 | * Baskets 98 | - an option on a portfolio of underlyings 99 | * Compound options 100 | - Options on other options, e.g. a call on a call 101 | * Path dependent options 102 | - barrier options–payout locked-in when underlying hits trigger 103 | - lookback options–payout based on highest or lowest price during 104 | the lookback period 105 | - Asian options–payout derived from average value of underlying 106 | over a specified window 107 | - Autocallables–will early redeem if a particular barrier condition 108 | is met 109 | - knock-in put 110 | 111 | *** Trade Lifecycle 112 | 113 | * Sales interact with the customers 114 | * Structurers create new products, often on customer request 115 | * Quants provide mathematical models and formal description of trades 116 | (payout functions) 117 | * Risk management validate and sign-off the payout functions 118 | * Traders derive the final price, manage the trade over its lifetime 119 | and analyse upcoming events 120 | * Payments systems handle payment events throughout the lifetime of 121 | the trade 122 | 123 | *** Functional Payout Framework 124 | 125 | * \cite{Jones_2000} \citeauthor{Jones_2000} \citetitle{Jones_2000} 126 | * Barclays 2006 127 | * A standardized representation for describing payoffs 128 | * A common suite of tools for trades which use this representation 129 | - Pricing via C / Monte Carlo 130 | - Mathematical / \LaTeX representation / Mathematica for risk management 131 | - pricing and risk management 132 | - barrier analysis 133 | - payments and other lifecycle events 134 | 135 | *** Functional Payout Framework 136 | 137 | **** Specifying a Trade 138 | 139 | * Trade type is Haskell script 140 | * Trade parameters e.g. start date, strike, expiration date, barrier 141 | levels, etc 142 | * Fixings e.g. prices on Asian in 143 | 144 | **** Backends 145 | 146 | * Pricing via MC or PDE 147 | * \LaTeX 148 | * Payments 149 | * Barriers 150 | * Mathematica 151 | 152 | *** Some Examples 153 | 154 | #+BEGIN_SRC haskell :export code :session haskII 155 | perf :: Date -> Date -> Asset -> Double 156 | perf t1 t2 asset = 157 | observe asset t2 / observe asset t1 - 1 158 | 159 | bestOf :: (List Asset, Date, Date) -> Double 160 | bestOf (assets', startDate', endDate') = 161 | foldl1 max perfs where 162 | assets = name "Assets" assets' 163 | startDate = name "Starting date" startDate' 164 | endDate = name "End date" endDate' 165 | perfs = map (perf startDate endDate) assets 166 | #+END_SRC 167 | 168 | *** Some Examples 169 | 170 | #+BEGIN_SRC haskell :export code :session haskII 171 | cliquetDemo_v2 172 | ( name "Asset" -> asset 173 | , name "Global floor" -> gf 174 | , name "Global cap" -> gc 175 | , name "Local floor" -> lf 176 | , name "Local cap" -> lc 177 | , name "Initial date" -> inDate 178 | , name "Dates" -> dates 179 | , name "Payment date" -> payDate 180 | ) 181 | = max gf $ min gc $ sum perfs 182 | where 183 | cliquet d d' = (d', max lf $ min lc $ perf d d' asset) 184 | (_, perfs) = mapAccumL cliquet inDate dates 185 | #+END_SRC 186 | 187 | # As far as I can tell we need this for reftex but not for actual 188 | # citation production as long as we have an ok .bbl file. 189 | 190 | 191 | -------------------------------------------------------------------------------- /JuliaCPU.jl: -------------------------------------------------------------------------------- 1 | using StaticArrays 2 | 3 | e = 0.6 4 | q10 = 1 - e 5 | q20 = 0.0 6 | p10 = 0.0 7 | p20 = sqrt((1 + e) / (1 - e)) 8 | 9 | h = 0.01 10 | 11 | x1 = SVector{2,Float64}(q10, q20) 12 | x2 = SVector{2,Float64}(p10, p20) 13 | x3 = SVector{2,SVector{2,Float64}}(x1,x2) 14 | 15 | @inline function oneStep(h, prev) 16 | h2 = h / 2 17 | @inbounds qsPrev = prev[1] 18 | @inbounds psPrev = prev[2] 19 | @inline function nablaQQ(qs) 20 | @inbounds q1 = qs[1] 21 | @inbounds q2 = qs[2] 22 | r = (q1^2 + q2^2) ^ (3/2) 23 | return SVector{2,Float64}(q1 / r, q2 / r) 24 | end 25 | @inline function nablaPP(ps) 26 | return ps 27 | end 28 | p2 = psPrev - h2 * nablaQQ(qsPrev) 29 | qNew = qsPrev + h * nablaPP(p2) 30 | pNew = p2 - h2 * nablaQQ(qNew) 31 | return SVector{2,SVector{2,Float64}}(qNew, pNew) 32 | end 33 | 34 | function manyStepsFinal(n,h,prev) 35 | for i in 1:n 36 | prev = oneStep(h,prev) 37 | end 38 | return prev 39 | end 40 | 41 | final = manyStepsFinal(100000000,h,x3) 42 | print(final) 43 | -------------------------------------------------------------------------------- /Kalman.bib: -------------------------------------------------------------------------------- 1 | @article{citeulike:5986027, 2 | abstract = {{In the late 1980s in Thailand, there was a dramatic increase in the prevalence of infection with the human immunodeficiency virus type 1 (HIV-1) in sentinel surveillance cohorts.1?3 Initially, these groups consisted of injection-drug users and commercial sex workers; they were subsequently expanded to include persons in the general population. By 1995, the overall seroprevalence of HIV-1 reached a peak of 3.7\% among conscripts in the Royal Thai Army and of 12.5\% among conscripts from Northern Thailand.2,4,5 The Thai Ministry of Public Health responded with an effective HIV-prevention campaign, and the number of new HIV-1 infections per . . .}}, 3 | author = {Rerks-Ngarm, Supachai and Pitisuttithum, Punnee and Nitayaphan, Sorachai and Kaewkungwal, Jaranit and Chiu, Joseph and Paris, Robert and Premsri, Nakorn and Namwat, Chawetsan and de Souza, Mark and Adams, Elizabeth and Benenson, Michael and Gurunathan, Sanjay and Tartaglia, Jim and McNeil, John G. and Francis, Donald P. and Stablein, Donald and Birx, Deborah L. and Chunsuttiwat, Supamit and Khamboonruang, Chirasak and Thongcharoen, Prasert and Robb, Merlin L. and Michael, Nelson L. and Kunasol, Prayura and Kim, Jerome H.}, 4 | booktitle = {New England Journal of Medicine}, 5 | citeulike-article-id = {5986027}, 6 | citeulike-linkout-0 = {http://dx.doi.org/10.1056/nejmoa0908492}, 7 | citeulike-linkout-1 = {http://content.nejm.org/content/NEJMoa0908492v3/.abstract}, 8 | citeulike-linkout-2 = {http://content.nejm.org/content/NEJMoa0908492v3/.full.pdf}, 9 | citeulike-linkout-3 = {http://www.nejm.org/doi/abs/10.1056/NEJMoa0908492}, 10 | citeulike-linkout-4 = {http://view.ncbi.nlm.nih.gov/pubmed/19843557}, 11 | citeulike-linkout-5 = {http://www.hubmed.org/display.cgi?uids=19843557}, 12 | day = {3}, 13 | doi = {10.1056/nejmoa0908492}, 14 | issn = {1533-4406}, 15 | journal = {N Engl J Med}, 16 | keywords = {hiv-1, thai-trial}, 17 | month = dec, 18 | number = {23}, 19 | pages = {2209--2220}, 20 | pmid = {19843557}, 21 | posted-at = {2011-09-22 23:48:21}, 22 | priority = {2}, 23 | publisher = {Massachusetts Medical Society}, 24 | title = {{Vaccination with ALVAC and AIDSVAX to Prevent HIV-1 Infection in Thailand}}, 25 | url = {http://dx.doi.org/10.1056/nejmoa0908492}, 26 | volume = {361}, 27 | year = {2009} 28 | } 29 | 30 | @book{MacKay:itp, 31 | author = "David J. C. MacKay", 32 | title = "Information Theory, Inference, and Learning Algorithms", 33 | note = "Available from {\tt{http://www.inference.phy.cam.ac.uk/mackay/itila/}}", 34 | publisher={Cambridge University Press}, 35 | url="http://www.cambridge.org/0521642981", 36 | year = {2003} 37 | } 38 | 39 | @Book{williams, 40 | author = {David Williams}, 41 | title = {Probability With Martingales}, 42 | publisher = {Cambridge University Press}, 43 | year = {1991} 44 | } 45 | 46 | @misc{Ng:cs229:Online, 47 | author = {Ng, Andrew}, 48 | title = {CS229 Machine Learning}, 49 | year = {2012}, 50 | url = {http://cs229.stanford.edu} 51 | } 52 | 53 | @misc{Boyd:EE363:Online, 54 | author = {Boyd, Stephen}, 55 | title = {EE363 Linear Dynamical Systems}, 56 | year = {2008}, 57 | url = {http://stanford.edu/class/ee363} 58 | } 59 | 60 | @inproceedings{kleeman1996understanding, 61 | title={Understanding and applying Kalman filtering}, 62 | author={Kleeman, Lindsay}, 63 | booktitle={Proceedings of the Second Workshop on Perceptive Systems, Curtin University of Technology, Perth Western Australia (25-26 January 1996)}, 64 | year={1996} 65 | } 66 | 67 | @book{sarkka2013bayesian, 68 | title={Bayesian filtering and smoothing}, 69 | author={S{\"a}rkk{\"a}, Simo}, 70 | volume={3}, 71 | year={2013}, 72 | publisher={Cambridge University Press} 73 | } 74 | 75 | @article{DBLP:journals_tsp_NemethFM14, 76 | author = {Christopher Nemeth and 77 | Paul Fearnhead and 78 | Lyudmila Mihaylova}, 79 | title = {Sequential Monte Carlo Methods for State and Parameter Estimation 80 | in Abruptly Changing Environments}, 81 | journal = {IEEE Transactions on Signal Processing}, 82 | volume = {62}, 83 | number = {5}, 84 | year = {2014}, 85 | pages = {1245-1255}, 86 | ee = {http://dx.doi.org/10.1109/TSP.2013.2296278}, 87 | bibsource = {DBLP, http://dblp.uni-trier.de} 88 | } 89 | 90 | @article{journals/tse/Vose91, 91 | added-at = {2011-11-07T00:00:00.000+0100}, 92 | author = {Vose, Michael D.}, 93 | biburl = {http://www.bibsonomy.org/bibtex/234864fed2dfb5fa5ed609cd927d2d047/dblp}, 94 | ee = {http://doi.ieeecomputersociety.org/10.1109/32.92917}, 95 | interhash = {d2d9cfa8da5f1c0c9fa210bb6042c0cb}, 96 | intrahash = {34864fed2dfb5fa5ed609cd927d2d047}, 97 | journal = {IEEE Trans. Software Eng.}, 98 | keywords = {dblp}, 99 | number = 9, 100 | pages = {972-975}, 101 | timestamp = {2011-11-07T00:00:00.000+0100}, 102 | title = {A Linear Algorithm For Generating Random Numbers With a Given Distribution.}, 103 | url = {http://dblp.uni-trier.de/db/journals/tse/tse17.html#Vose91}, 104 | volume = 17, 105 | year = 1991 106 | } 107 | 108 | @article{Ciesielski61, 109 | author = {Z. Ciesielski}, 110 | title = {Holder condition for realization of {G}aussian processes}, 111 | journal = tams, 112 | volume={99}, 113 | year = 1961, 114 | pages={403--413} 115 | } 116 | 117 | @book{liggett2010continuous, 118 | title={Continuous Time Markov Processes: An Introduction}, 119 | author={Liggett, T.M.}, 120 | isbn={9780821884195}, 121 | series={Graduate studies in mathematics}, 122 | url={http://books.google.co.uk/books?id=ClW\_cKYSON0C}, 123 | year={2010}, 124 | publisher={American Mathematical Soc.} 125 | } 126 | 127 | @article{Bier2000, 128 | abstract = {Of all the lifeforms that obtain their energy from glycolysis, yeast cells are among the most basic. Under certain conditions the concentrations of the glycolytic intermediates in yeast cells can oscillate. Individual yeast cells in a suspension can synchronize their oscillations to get in phase with each other. Although the glycolytic oscillations originate in the upper part of the glycolytic chain, the signaling agent in this synchronization appears to be acetaldehyde, a membrane-permeating metabolite at the bottom of the anaerobic part of the glycolytic chain. Here we address the issue of how a metabolite remote from the pacemaking origin of the oscillation may nevertheless control the synchronization. We present a quantitative model for glycolytic oscillations and their synchronization in terms of chemical kinetics. We show that, in essence, the common acetaldehyde concentration can be modeled as a small perturbation on the "pacemaker" whose effect on the period of the oscillations of cells in the same suspension is indeed such that a synchronization develops.}, 129 | author = {Bier, M and Bakker, B M and Westerhoff, H V}, 130 | doi = {10.1016/S0006-3495(00)76667-7}, 131 | isbn = {0006-3495}, 132 | issn = {00063495}, 133 | journal = {Biophysical journal}, 134 | number = {3}, 135 | pages = {1087--1093}, 136 | pmid = {10692299}, 137 | title = {{How yeast cells synchronize their glycolytic oscillations: a perturbation analytic treatment.}}, 138 | volume = {78}, 139 | year = {2000} 140 | } 141 | -------------------------------------------------------------------------------- /KalmanChart.hs: -------------------------------------------------------------------------------- 1 | {-# OPTIONS_GHC -Wall #-} 2 | {-# OPTIONS_GHC -fno-warn-name-shadowing #-} 3 | {-# OPTIONS_GHC -fno-warn-type-defaults #-} 4 | {-# OPTIONS_GHC -fno-warn-unused-do-bind #-} 5 | {-# OPTIONS_GHC -fno-warn-missing-methods #-} 6 | {-# OPTIONS_GHC -fno-warn-orphans #-} 7 | 8 | module KalmanChart ( 9 | diagPartFilter 10 | , diagEsts 11 | ) where 12 | 13 | import Control.Lens hiding ( (#) ) 14 | import Graphics.Rendering.Chart 15 | import Graphics.Rendering.Chart.Backend.Diagrams 16 | import Diagrams.Backend.Cairo.CmdLine 17 | import Diagrams.Prelude hiding ( render, Renderable ) 18 | import Data.Default.Class 19 | 20 | import System.IO.Unsafe 21 | 22 | 23 | denv :: DEnv 24 | denv = unsafePerformIO $ defaultEnv vectorAlignmentFns 500 500 25 | 26 | diagPartFilter :: Double -> [(Double, Double)] -> Int -> QDiagram Cairo R2 Any 27 | diagPartFilter x ls n = 28 | fst $ runBackend denv (render (chartPartFilter x ls n) (500, 500)) 29 | 30 | chartPartFilter :: Double -> [(Double, Double)] -> Int -> Renderable () 31 | chartPartFilter x lineVals n = toRenderable layout 32 | where 33 | 34 | fitted = plot_lines_values .~ [lineVals] 35 | $ plot_lines_style . line_color .~ opaque blue 36 | $ plot_lines_title .~ "Trajectory" 37 | $ def 38 | 39 | actual = plot_lines_values .~ [zip (map fst lineVals) (repeat x)] 40 | $ plot_lines_style . line_color .~ opaque red 41 | $ plot_lines_title .~ "Actual" 42 | $ def 43 | 44 | layout = layout_title .~ "Particle Filtering" 45 | $ layout_y_axis . laxis_generate .~ scaledAxis def (-3,3) 46 | $ layout_x_axis . laxis_generate .~ scaledAxis def (0,(fromIntegral n)) 47 | 48 | $ layout_plots .~ [ toPlot fitted 49 | , toPlot actual 50 | ] 51 | $ def 52 | 53 | diagEsts :: [(Double, Double)] -> 54 | [(Double, Double)] -> 55 | [(Double, Double)] -> 56 | [(Double, Double)] -> 57 | QDiagram Cairo R2 Any 58 | diagEsts ls us ks ps = 59 | fst $ runBackend denv (render (chartEsts ls us ks ps) (500, 500)) 60 | 61 | chartEsts :: [(Double, Double)] -> 62 | [(Double, Double)] -> 63 | [(Double, Double)] -> 64 | [(Double, Double)] -> 65 | Renderable () 66 | chartEsts lineVals upperVals lowerVals pointVals = toRenderable layout 67 | where 68 | 69 | fitted = plot_lines_values .~ [lineVals] 70 | $ plot_lines_style . line_color .~ opaque blue 71 | $ plot_lines_title .~ "Estimate" 72 | $ def 73 | 74 | upper = plot_lines_values .~ [upperVals] 75 | $ plot_lines_style . line_color .~ opaque green 76 | $ plot_lines_title .~ "Upper error" 77 | $ def 78 | 79 | lower = plot_lines_values .~ [lowerVals] 80 | $ plot_lines_style . line_color .~ opaque green 81 | $ plot_lines_title .~ "Lower error" 82 | $ def 83 | 84 | dataPts = plot_points_style .~ filledCircles 2 (opaque red) 85 | $ plot_points_values .~ pointVals 86 | $ plot_points_title .~ "Samples" 87 | $ def 88 | 89 | layout = layout_title .~ "Estimation by Kalman Filtering" 90 | $ layout_plots .~ [toPlot fitted, 91 | toPlot upper, 92 | toPlot lower, 93 | toPlot dataPts] 94 | $ def 95 | -------------------------------------------------------------------------------- /LorenzGenerate.bi: -------------------------------------------------------------------------------- 1 | model Lorenz { 2 | const rho = 45.92 3 | const beta = 4.0 4 | const alpha = 16.0 5 | 6 | state X, Y, Z 7 | obs X_obs 8 | 9 | sub initial { 10 | X ~ log_normal(log(1.0), 0.00002) 11 | Y ~ log_normal(log(1.0), 0.00002) 12 | Z ~ log_normal(log(1.0), 0.00002) 13 | } 14 | 15 | sub transition(delta = 0.0001) { 16 | ode { 17 | dX/dt = alpha * (Y - X) 18 | dY/dt = X * (rho - Z) - Y 19 | dZ/dt = X * Y - beta * Z 20 | } 21 | } 22 | 23 | sub observation { 24 | X_obs ~ normal(X, 0.2) 25 | } 26 | } 27 | -------------------------------------------------------------------------------- /LorenzState.bi: -------------------------------------------------------------------------------- 1 | model LorenzState { 2 | const rho = 45.92 3 | const beta = 4 4 | 5 | const h = 0.1; // time step 6 | const delta_abs = 1.0e-3; // absolute error tolerance 7 | const delta_rel = 1.0e-6; // relative error tolerance 8 | 9 | state X, Y, Z 10 | state ln_alpha 11 | 12 | param mu, sigma 13 | 14 | noise w 15 | 16 | obs X_obs 17 | 18 | sub parameter { 19 | mu ~ uniform(12.0, 20.0) 20 | sigma ~ uniform(0.0, 0.5) 21 | } 22 | 23 | sub proposal_parameter { 24 | mu ~ truncated_gaussian(mu, 0.02, 12.0, 20.0); 25 | sigma ~ truncated_gaussian(sigma, 0.01, 0.0, 0.5); 26 | } 27 | 28 | sub initial { 29 | X ~ log_normal(log(1.0), 0.2) 30 | Y ~ log_normal(log(1.0), 0.2) 31 | Z ~ log_normal(log(1.0), 0.2) 32 | ln_alpha ~ gaussian(log(mu), sigma) 33 | } 34 | 35 | sub transition(delta = h) { 36 | w ~ normal (0.0, sqrt(h)) 37 | ode(h = h, atoler = delta_abs, rtoler = delta_rel, alg = 'RK4(3)') { 38 | dX/dt = exp(ln_alpha) * (Y - X) 39 | dY/dt = X * (rho - Z) - Y 40 | dZ/dt = X * Y - beta * Z 41 | dln_alpha/dt = -sigma * sigma / 2 - sigma * w / h 42 | } 43 | } 44 | 45 | sub observation { 46 | X_obs ~ normal(X, 0.2) 47 | } 48 | } 49 | -------------------------------------------------------------------------------- /Prediction/CHANGELOG.md: -------------------------------------------------------------------------------- 1 | # Revision history for Prediction 2 | 3 | ## 0.1.0.0 -- YYYY-mm-dd 4 | 5 | * First version. Released on an unsuspecting world. 6 | -------------------------------------------------------------------------------- /Prediction/CustomSundials/default.nix: -------------------------------------------------------------------------------- 1 | { stdenv 2 | , cmake 3 | , fetchurl 4 | , openmpi 5 | , llvmPackages 6 | , python 7 | , liblapack 8 | , gfortran 9 | , lapackSupport ? true }: 10 | 11 | let liblapackShared = liblapack.override { 12 | shared = true; 13 | }; 14 | openmp = llvmPackages.openmp; 15 | 16 | in stdenv.mkDerivation rec { 17 | pname = "sundials"; 18 | version = "5.0.0"; 19 | 20 | buildInputs = [ python openmpi openmp ] ++ stdenv.lib.optionals (lapackSupport) [ gfortran ]; 21 | nativeBuildInputs = [ cmake openmpi openmp ]; 22 | 23 | src = fetchurl { 24 | url = "https://computing.llnl.gov/projects/${pname}/download/${pname}-${version}.tar.gz"; 25 | sha256 = "1lvx5pddjxgyr8kqlira36kxckz7nxwc8xilzfyx0hf607n42l9l"; 26 | }; 27 | 28 | patches = [ 29 | (fetchurl { 30 | # https://github.com/LLNL/sundials/pull/19 31 | url = "https://github.com/LLNL/sundials/commit/1350421eab6c5ab479de5eccf6af2dcad1eddf30.patch"; 32 | sha256 = "0g67lixp9m85fqpb9rzz1hl1z8ibdg0ldwq5z6flj5zl8a7cw52l"; 33 | }) 34 | (fetchurl { 35 | # https://github.com/LLNL/sundials/pull/20 36 | url = "https://github.com/LLNL/sundials/pull/20/commits/2d951bbe1ff7842fcd0dafa28c61b0aa94015f66.patch"; 37 | sha256 = "0lcr6m4lk14yqrxah4rdscpczny5l7m1zpfsjh8bgspadfsgk512"; 38 | }) 39 | ]; 40 | 41 | cmakeFlags = [ 42 | "-DEXAMPLES_INSTALL_PATH=${placeholder "out"}/share/examples" 43 | ] ++ stdenv.lib.optionals (lapackSupport) [ 44 | 45 | "-DSUNDIALS_INDEX_SIZE=64" 46 | "-DMPI_ENABLE=ON" 47 | "-DMPI_C_COMPILER=${openmpi}/bin/mpicc" 48 | "-DMPI_CXX_COMPILER=${openmpi}/bin/mpicxx" 49 | "-DMPIEXEC_EXECUTABLE=${openmpi}/bin/mpirun" 50 | 51 | "-DOPENMP_ENABLE=ON" 52 | 53 | "-DBUILD_SHARED_LIBS=OFF" 54 | "-DBUILD_STATIC_LIBS=ON" 55 | 56 | "-DBUILD_CVODE=ON" 57 | "-DBUILD_CVODES=OFF" 58 | "-DBUILD_IDA=OFF" 59 | "-DBUILD_IDAS=OFF" 60 | "-DBUILD_ARKODE=ON" 61 | "-DBUILD_KINSOL=OFF" 62 | "-DBUILD_TESTING=ON" 63 | "-DSUNDIALS_DEVTESTS=ON" 64 | "-DEXAMPLES_ENABLE_CXX=ON" 65 | 66 | "-DLAPACK_ENABLE=OFF" 67 | "-DLAPACK_LIBRARIES=${liblapackShared}/lib/liblapack${stdenv.hostPlatform.extensions.sharedLibrary};${liblapackShared}/lib/libblas${stdenv.hostPlatform.extensions.sharedLibrary}" 68 | ]; 69 | 70 | doCheck = false; 71 | preCheck = '' 72 | export OMP_NUM_THREADS=8 73 | ''; 74 | checkPhase = "make test"; 75 | 76 | meta = with stdenv.lib; { 77 | description = "Suite of nonlinear differential/algebraic equation solvers"; 78 | homepage = https://computation.llnl.gov/projects/sundials; 79 | platforms = platforms.all; 80 | maintainers = with maintainers; [ flokli idontgetoutmuch ]; 81 | license = licenses.bsd3; 82 | }; 83 | } 84 | -------------------------------------------------------------------------------- /Prediction/LICENSE: -------------------------------------------------------------------------------- 1 | Copyright (c) 2019, Dominic Steinitz 2 | 3 | All rights reserved. 4 | 5 | Redistribution and use in source and binary forms, with or without 6 | modification, are permitted provided that the following conditions are met: 7 | 8 | * Redistributions of source code must retain the above copyright 9 | notice, this list of conditions and the following disclaimer. 10 | 11 | * Redistributions in binary form must reproduce the above 12 | copyright notice, this list of conditions and the following 13 | disclaimer in the documentation and/or other materials provided 14 | with the distribution. 15 | 16 | * Neither the name of Dominic Steinitz nor the names of other 17 | contributors may be used to endorse or promote products derived 18 | from this software without specific prior written permission. 19 | 20 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 21 | "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 22 | LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 23 | A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 24 | OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 25 | SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 26 | LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 27 | DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 28 | THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 29 | (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 30 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 31 | -------------------------------------------------------------------------------- /Prediction/Prediction.cabal: -------------------------------------------------------------------------------- 1 | cabal-version: 2.4 2 | 3 | name: Prediction 4 | version: 0.1.0.0 5 | license: BSD-3-Clause 6 | license-file: LICENSE 7 | author: Dominic Steinitz 8 | maintainer: dominic@steinitz.org 9 | category: Math 10 | extra-source-files: CHANGELOG.md 11 | 12 | executable Prediction 13 | main-is: Prediction.lhs 14 | build-depends: base ^>=4.12.0.0, 15 | hmatrix-sundials, 16 | hmatrix, 17 | diagrams-cairo, 18 | diagrams-lib, 19 | diagrams-rasterific, 20 | Naperian, 21 | Chart, 22 | Chart-diagrams, 23 | cassava 24 | default-language: Haskell2010 25 | -------------------------------------------------------------------------------- /Prediction/Setup.hs: -------------------------------------------------------------------------------- 1 | import Distribution.Simple 2 | main = defaultMain 3 | -------------------------------------------------------------------------------- /Prediction/TBA.nix: -------------------------------------------------------------------------------- 1 | let 2 | pkgs = { 3 | ihaskell = builtins.fetchTarball { 4 | url = "https://github.com/gibiansky/IHaskell/tarball/bb2500c448c35ca79bddaac30b799d42947e8774"; 5 | sha256 = "1n4yqxaf2xcnjfq0r1v7mzjhrizx7z5b2n6gj1kdk2yi37z672py"; 6 | }; 7 | # nixpkgs = builtins.fetchTarball { 8 | # url = "https://github.com/NixOS/nixpkgs-channels/archive/nixos-19.09.tar.gz"; 9 | # sha256 = "16wdsazc7g09ibcxlqsa3kblzhbbpdpb6s29llliybw73cp37b9s"; 10 | # }; 11 | nixpkgs = builtins.fetchTarball { 12 | url = "https://github.com/NixOS/nixpkgs-channels/tarball/49dc8087a20e0d742d38be5f13333a03d171006a"; 13 | sha256 = "1fdnqm4vyj50jb2ydcc0nldxwn6wm7qakxfhmpf72pz2y2ld55i6"; 14 | }; 15 | }; 16 | 17 | rOverlay = rself: rsuper: { 18 | myR = rsuper.rWrapper.override { 19 | packages = with rsuper.rPackages; [ ggplot2 dplyr xts purrr cmaes cubature ]; 20 | }; 21 | }; 22 | 23 | hmatrix-sundials = nixpkgs.haskell.lib.dontCheck ( 24 | nixpkgs.haskellPackages.callCabal2nix "hmatrix-sundials" (builtins.fetchGit { 25 | url = "https://github.com/haskell-numerics/hmatrix-sundials.git"; 26 | rev = "9b6ec2b5fc509f74c5e61657dfc638a2c7ebced0"; 27 | }) { sundials_arkode = nixpkgs.sundials; sundials_cvode = nixpkgs.sundials; }); 28 | 29 | haskellOverlay = self: super: { 30 | haskell = super.haskell // { packageOverrides = 31 | hself: hsuper: { 32 | my-random-fu-multivariate = hself.callPackage /Users/dom/nix-config/pkgs/random-fu-multivariate { }; 33 | my-hmatrix-sundials = hmatrix-sundials; 34 | 35 | Chart = super.haskell.lib.dontCheck ( 36 | hself.callCabal2nixWithOptions "Chart" (builtins.fetchGit { 37 | url = "https://github.com/timbod7/haskell-chart"; 38 | rev = "65606988864938f71ea79e3bc09872e7bab54f19"; 39 | }) "--subpath chart" { }); 40 | 41 | Chart-cairo = super.haskell.lib.dontCheck ( 42 | hself.callCabal2nixWithOptions "Chart-cairo" (builtins.fetchGit { 43 | url = "https://github.com/timbod7/haskell-chart"; 44 | rev = "65606988864938f71ea79e3bc09872e7bab54f19"; 45 | }) "--subpath chart-cairo" { }); 46 | 47 | Chart-diagrams = super.haskell.lib.dontCheck ( 48 | hself.callCabal2nixWithOptions "Chart-diagrams" (builtins.fetchGit { 49 | url = "https://github.com/timbod7/haskell-chart"; 50 | rev = "65606988864938f71ea79e3bc09872e7bab54f19"; 51 | }) "--subpath chart-diagrams" { }); 52 | 53 | Naperian = hself.callCabal2nix "Naperian" (builtins.fetchGit { 54 | url = "https://github.com/idontgetoutmuch/Naperian.git"; 55 | rev = "54d873ffe99de865ca34e6bb3b92736e29e01619"; 56 | }) { }; 57 | }; 58 | }; 59 | }; 60 | 61 | nixpkgs = import pkgs.nixpkgs { overlays = [ rOverlay haskellOverlay ]; config.allowBroken = true; }; 62 | 63 | r-libs-site = nixpkgs.runCommand "r-libs-site" { 64 | buildInputs = with nixpkgs; [ R rPackages.ggplot2 rPackages.dplyr rPackages.xts rPackages.purrr rPackages.cmaes rPackages.cubature ]; 65 | } ''echo $R_LIBS_SITE > $out''; 66 | 67 | ihaskellEnv = (import "${pkgs.ihaskell}/release.nix" { 68 | compiler = "ghc864"; 69 | nixpkgs = nixpkgs; 70 | packages = self: [ 71 | self.inline-r 72 | self.hmatrix 73 | self.my-hmatrix-sundials 74 | self.random-fu 75 | self.my-random-fu-multivariate 76 | self.cassava 77 | self.diagrams 78 | self.ihaskell-diagrams 79 | self.ihaskell-charts 80 | self.Chart 81 | self.Chart-diagrams 82 | self.Chart-cairo 83 | self.Naperian 84 | self.numbers 85 | ]; 86 | }).passthru.ihaskellEnv; 87 | 88 | systemPackages = self: [ self.myR ]; 89 | 90 | jupyterlab = nixpkgs.python3.withPackages (ps: [ ps.jupyterlab ]); 91 | 92 | rtsopts = "-M3g -N2"; 93 | 94 | ihaskellJupyterCmdSh = cmd: extraArgs: nixpkgs.writeScriptBin "ihaskell-${cmd}" '' 95 | #! ${nixpkgs.stdenv.shell} 96 | export GHC_PACKAGE_PATH="$(echo ${ihaskellEnv}/lib/*/package.conf.d| tr ' ' ':'):$GHC_PACKAGE_PATH" 97 | export R_LIBS_SITE=${builtins.readFile r-libs-site} 98 | export PATH="${nixpkgs.stdenv.lib.makeBinPath ([ ihaskellEnv jupyterlab ] ++ systemPackages nixpkgs)}''${PATH:+:}$PATH" 99 | ${ihaskellEnv}/bin/ihaskell install \ 100 | -l $(${ihaskellEnv}/bin/ghc --print-libdir) \ 101 | --use-rtsopts="${rtsopts}" \ 102 | && ${jupyterlab}/bin/jupyter ${cmd} ${extraArgs} "$@" 103 | ''; 104 | in 105 | nixpkgs.buildEnv { 106 | name = "ihaskell-with-packages"; 107 | buildInputs = [ nixpkgs.makeWrapper ]; 108 | paths = [ ihaskellEnv jupyterlab ]; 109 | postBuild = '' 110 | ln -s ${ihaskellJupyterCmdSh "lab" ""}/bin/ihaskell-lab $out/bin/ 111 | ln -s ${ihaskellJupyterCmdSh "notebook" ""}/bin/ihaskell-notebook $out/bin/ 112 | ln -s ${ihaskellJupyterCmdSh "nbconvert" ""}/bin/ihaskell-nbconvert $out/bin/ 113 | ln -s ${ihaskellJupyterCmdSh "console" "--kernel=haskell"}/bin/ihaskell-console $out/bin/ 114 | for prg in $out/bin"/"*;do 115 | if [[ -f $prg && -x $prg ]]; then 116 | wrapProgram $prg --set PYTHONPATH "$(echo ${jupyterlab}/lib/*/site-packages)" 117 | fi 118 | done 119 | ''; 120 | } 121 | -------------------------------------------------------------------------------- /Prediction/Two Dimensional Heat Equation.hs: -------------------------------------------------------------------------------- 1 | 2 | {-# LANGUAGE DataKinds #-} 3 | {-# LANGUAGE OverloadedLists #-} 4 | {-# LANGUAGE ScopedTypeVariables #-} 5 | {-# LANGUAGE FlexibleContexts #-} 6 | {-# LANGUAGE FlexibleInstances #-} 7 | {-# LANGUAGE MultiParamTypeClasses #-} 8 | {-# LANGUAGE GADTs #-} 9 | {-# LANGUAGE TypeApplications #-} 10 | {-# LANGUAGE TypeOperators #-} 11 | 12 | import Data.Maybe 13 | import Data.Number.Symbolic 14 | import qualified Data.Number.Symbolic as Sym 15 | import Data.Proxy 16 | 17 | import qualified Naperian as N 18 | import qualified Data.Foldable as F 19 | import Control.Applicative ( liftA2 ) 20 | import qualified GHC.TypeLits as M 21 | import Data.Functor 22 | import Data.List.Split 23 | 24 | import Numeric.Sundials.ARKode.ODE 25 | import Numeric.LinearAlgebra 26 | 27 | kx, ky :: Floating a => a 28 | kx = 0.5 29 | ky = 0.75 30 | 31 | -- spatial mesh size 32 | nx, ny :: Int 33 | nx = 30 34 | ny = 60 35 | 36 | -- x mesh spacing 37 | -- y mesh spacing 38 | dx :: Floating a => a 39 | dx = 1 / (fromIntegral nx - 1) 40 | 41 | dy :: Floating a => a 42 | dy = 1 / (fromIntegral ny - 1) 43 | 44 | c1, c2 :: Floating a => a 45 | c1 = kx/dx/dx 46 | c2 = ky/dy/dy 47 | 48 | cc4' :: forall b m n . (M.KnownNat m, M.KnownNat n, Num b) => 49 | N.Hyper '[N.Vector n, N.Vector m, N.Vector n, N.Vector m] b 50 | cc4' = N.Prism $ N.Prism $ N.Prism $ N.Prism $ N.Scalar $ 51 | N.viota @m <&> (\(N.Fin x) -> 52 | N.viota @n <&> (\(N.Fin w) -> 53 | N.viota @m <&> (\(N.Fin v) -> 54 | N.viota @n <&> (\(N.Fin u) -> 55 | (f m n x w v u))))) 56 | where 57 | m = fromIntegral $ M.natVal (undefined :: Proxy m) 58 | n = fromIntegral $ M.natVal (undefined :: Proxy n) 59 | f m n i j k l | i == 0 = 0 60 | | j == 0 = 0 61 | | i == n - 1 = 0 62 | | j == m - 1 = 0 63 | | k == i - 1 && l == j = 1 64 | | k == i && l == j = -2 65 | | k == i + 1 && l == j = 1 66 | | otherwise = 0 67 | 68 | cc5' :: forall a m n . (M.KnownNat m, M.KnownNat n, Floating a) => 69 | N.Hyper '[N.Vector n, N.Vector m, N.Vector n, N.Vector m] a 70 | cc5' = N.binary (*) (N.Scalar c2) cc4' 71 | 72 | cc5Sym' :: forall a m n . (M.KnownNat m, M.KnownNat n, Floating a, Eq a) => 73 | N.Hyper '[N.Vector n, N.Vector m, N.Vector n, N.Vector m] (Sym a) 74 | cc5Sym' = N.binary (*) (N.Scalar $ var "c2") cc4' 75 | 76 | yy4' :: forall b m n . (M.KnownNat m, M.KnownNat n, Num b) => 77 | N.Hyper '[N.Vector n, N.Vector m, N.Vector n, N.Vector m] b 78 | yy4' = N.Prism $ N.Prism $ N.Prism $ N.Prism $ N.Scalar $ 79 | N.viota @m <&> (\(N.Fin x) -> 80 | N.viota @n <&> (\(N.Fin w) -> 81 | N.viota @m <&> (\(N.Fin v) -> 82 | N.viota @n <&> (\(N.Fin u) -> 83 | (f m n x w v u))))) 84 | where 85 | m = fromIntegral $ M.natVal (undefined :: Proxy m) 86 | n = fromIntegral $ M.natVal (undefined :: Proxy n) 87 | f :: Int -> Int -> Int -> Int -> Int -> Int -> b 88 | f m n i j k l | i == 0 = 0 89 | | j == 0 = 0 90 | | i == n - 1 = 0 91 | | j == m - 1 = 0 92 | | k == i && l == j - 1 = 1 93 | | k == i && l == j = -2 94 | | k == i && l == j + 1 = 1 95 | | otherwise = 0 96 | 97 | yy5' :: forall a m n . (M.KnownNat m, M.KnownNat n, Floating a) => 98 | N.Hyper '[N.Vector n, N.Vector m, N.Vector n, N.Vector m] a 99 | yy5' = N.binary (*) (N.Scalar c1) yy4' 100 | 101 | yy5Sym' :: forall a m n . (M.KnownNat m, M.KnownNat n, Floating a, Eq a) => 102 | N.Hyper '[N.Vector n, N.Vector m, N.Vector n, N.Vector m] (Sym a) 103 | yy5Sym' = N.binary (*) (N.Scalar $ var "c1") yy4' 104 | 105 | ccSym5 = cc5Sym' @Double @4 @5 106 | yy5Sym = yy5Sym' @Double @4 @5 107 | 108 | ccSym5 109 | 110 | yy5Sym 111 | 112 | fmap (N.elements . N.Prism . N.Prism . N.Scalar) $ N.elements $ N.crystal $ N.crystal $ N.binary (+) cc5Sym yy5Sym 113 | 114 | {-# LANGUAGE DataKinds #-} 115 | {-# LANGUAGE OverloadedLists #-} 116 | {-# LANGUAGE ScopedTypeVariables #-} 117 | {-# LANGUAGE FlexibleContexts #-} 118 | {-# LANGUAGE GADTs #-} 119 | 120 | import Data.Maybe 121 | import Data.Number.Symbolic 122 | import Data.Proxy 123 | 124 | import qualified Naperian as N 125 | import qualified Data.Foldable as F 126 | import Control.Applicative ( liftA2 ) 127 | import qualified GHC.TypeLits as M 128 | 129 | import Numeric.Sundials.ARKode.ODE 130 | import Numeric.LinearAlgebra 131 | 132 | x1, a, x2 :: Double 133 | x1 = 0 134 | a = 1.0 135 | x2 = a 136 | 137 | y1, y2 :: Double 138 | y1 = 0.0 139 | y2 = 1.0 140 | 141 | bigT :: Double 142 | bigT = 1000.0 143 | 144 | n :: Int 145 | n = 2 146 | 147 | dx :: Double 148 | dx = a / (fromIntegral n + 1) 149 | 150 | dy :: Double 151 | dy = a / (fromIntegral n + 1) 152 | 153 | beta, s :: Double 154 | beta = 1.0e-5 155 | s = beta / (dx * dx) 156 | 157 | kx, ky :: Double 158 | kx = 0.5 159 | ky = 0.75 160 | 161 | c1, c2 :: Double 162 | c1 = kx/dx/dx 163 | c2 = ky/dy/dy 164 | 165 | bigAA1 :: Matrix Double 166 | bigAA1 = assoc (n * n, n * n) 0.0 [((i, j), f (i, j)) | i <- [0 .. n * n - 1] 167 | , j <- [i - n, i, i + n] 168 | , j `elem` [0 .. n * n -1]] 169 | where 170 | f (i, j) | i == j = (-2.0) * c1 171 | | i - n == j = 1.0 * c1 172 | | i + n == j = 1.0 * c1 173 | | otherwise = error $ show (i, j) 174 | 175 | bigAA2 :: Matrix Double 176 | bigAA2 = diagBlock (replicate n bigA) 177 | where 178 | bigA :: Matrix Double 179 | bigA = assoc (n, n) 0.0 [((i, j), f (i, j)) | i <- [0 .. n - 1] 180 | , j <- [i-1..i+1] 181 | , j `elem` [0..n-1]] 182 | where 183 | f (i, j) | i == j = (-2.0) * c2 184 | | i - 1 == j = 1.0 * c2 185 | | i + 1 == j = 1.0 * c2 186 | 187 | bigAA :: Matrix Double 188 | bigAA = bigAA1 + bigAA2 189 | 190 | bigAA1 191 | 192 | bigAA2 193 | 194 | n 195 | 196 | bigZZ1 :: Matrix Double 197 | bigZZ1 = assoc (m * m, m * m) 0.0 [((i, j), f (i, j)) | i <- [0 .. m * m - 1] 198 | , j <- [0 .. m * m - 1]] 199 | where 200 | m = n + 2 201 | f (i, j) | i == 0 = 0.0 202 | | j == 0 = 0.0 203 | | i == j = (-2.0) * c1 204 | | i - n == j = 1.0 * c1 205 | | i + n == j = 1.0 * c1 206 | | i == n + 1 = 0.0 207 | | j == n + 1 = 0.0 208 | | otherwise = 0.0 209 | 210 | 211 | bigZZ1 212 | 213 | x :: forall m n . (M.KnownNat m, M.KnownNat n) => N.Vector n (N.Vector m (Sym Int)) 214 | x = (fromJust . N.fromList) $ 215 | map (fromJust . N.fromList) ([[var $ (\(x,y) -> "A" ++ show x ++ "," ++ show y) (x,y) | y <- [1..m]] | x <- [1..n]] :: [[Sym Int]]) 216 | where 217 | m = M.natVal (undefined :: Proxy m) 218 | n = M.natVal (undefined :: Proxy n) 219 | 220 | u1 :: N.Hyper '[N.Vector 3, N.Vector 2] (Sym Int) 221 | u1 = N.Prism $ N.Prism (N.Scalar x) 222 | 223 | u1 224 | 225 | y :: forall n . M.KnownNat n => N.Vector n (Sym Int) 226 | y = (fromJust . N.fromList) $ 227 | (map (var . ("v" ++) . show) [1..n ] :: [Sym Int]) 228 | where 229 | n = M.natVal (undefined :: Proxy n) 230 | 231 | u2 :: N.Hyper '[N.Vector 3] (Sym Int) 232 | u2 = N.Prism (N.Scalar y) 233 | 234 | N.innerH u1 u2 235 | -------------------------------------------------------------------------------- /Prediction/ark_heat1D.lhs: -------------------------------------------------------------------------------- 1 | % Haskell for Numerics? 2 | % Dominic Steinitz 3 | % 2nd June 2017 4 | 5 | > {-# OPTIONS_GHC -Wall #-} 6 | 7 | > import Numeric.Sundials.ARKode.ODE 8 | > import Numeric.LinearAlgebra 9 | > import Data.Csv 10 | > import Data.Char 11 | > import Data.ByteString.Lazy (putStr, writeFile) 12 | > import Prelude hiding (putStr, writeFile) 13 | 14 | With one spatial dimension we have: 15 | 16 | $$ 17 | u_{t}=k u_{x x} + f 18 | $$ 19 | 20 | initial condition $u(0, x)=0$ 21 | 22 | Dirichlet boundary conditions 23 | 24 | $$ 25 | \frac{\partial u}{\partial t}(t, 0)=\frac{\partial u}{\partial t}(t, 1)=0 26 | $$ 27 | 28 | $$ 29 | f(t, x)=\left\{\begin{array}{ll}{1} & {\text { if } x=1 / 2} \\ {0} & {\text { otherwise }}\end{array}\right. 30 | $$ 31 | 32 | and we can discretize over this spatial dimension using: 33 | 34 | $$ 35 | u_{x x}=\frac{u_{j+1}-2 u_{j}+u_{j-1}}{\Delta x^{2}} 36 | $$ 37 | 38 | where 39 | 40 | $$ 41 | u_{j}(t) \triangleq u\left(t, x_{j}\right), \quad x_{j} \triangleq j \Delta x, \quad 0 \leq j \leq n+1 42 | $$ 43 | 44 | $$ 45 | \dot{u}_i = \sum_0^{n+1} A_{i\,j} u_j + B_i, \quad 0 \leq i \leq n+1 46 | $$ 47 | 48 | where 49 | 50 | $$ 51 | \begin{aligned} 52 | A_{0\,j} = 0, & \quad 0 \leq j \leq n+1, & \text{boundary condition} \\ 53 | A_{i\,i-1} = 1 & & \\ 54 | A_{i\,i} = 2 & & \\ 55 | A_{i\,i+1} = 1 & & \\ 56 | A_{{n+1}\,j} = 0, & \quad 0 \leq j \leq n+1, & \text{boundary condition} \\ 57 | A_{i\,j} = 0 & \quad \text{otherwise} & \\ 58 | \end{aligned} 59 | $$ 60 | 61 | Converting this to a system of ODEs is straightforward: 62 | 63 | $$ 64 | \begin{bmatrix} 65 | \dot{u_0} \\ 66 | \dot{u_1} \\ 67 | \dot{u_2} \\ 68 | \dot{u_3} \\ 69 | \dot{u_4} 70 | \end{bmatrix} 71 | = 72 | \begin{bmatrix} 73 | 0 & 0 & 0 & 0 & 0 \\ 74 | 1 & -2 & 1 & 0 & 0 \\ 75 | 0 & 1 & -2 & 1 & 0 \\ 76 | 0 & 0 & 1 & -2 & 1 \\ 77 | 0 & 0 & 0 & 0 & 0 78 | \end{bmatrix} 79 | \begin{bmatrix} 80 | u_0 \\ 81 | u_1 \\ 82 | u_2 \\ 83 | u_3 \\ 84 | u_4 85 | \end{bmatrix} 86 | + 87 | \begin{bmatrix} 88 | f_0 \\ 89 | f_1 \\ 90 | f_2 \\ 91 | f_3 \\ 92 | f_4 93 | \end{bmatrix} 94 | $$ 95 | 96 | where $f_j \triangleq f(t, x_j)$. 97 | 98 | spatial mesh size 99 | 100 | > bigN :: Int 101 | > bigN = 201 102 | 103 | heat conductivity 104 | 105 | > k :: Double 106 | > k = 0.5 107 | 108 | mesh spacing 109 | 110 | > deltaX :: Double 111 | > deltaX = 1.0 / (fromIntegral bigN - 1) 112 | > c1, c2 :: Double 113 | > c1 = k / deltaX / deltaX 114 | > c2 = (-2.0) * k / deltaX / deltaX 115 | 116 | initial time 117 | 118 | > t0 :: Double 119 | > t0 = 0.0 120 | 121 | final time 122 | 123 | > tf :: Double 124 | > tf =1.0 125 | 126 | total number of output times 127 | 128 | > bigNt :: Int 129 | > bigNt = 10 130 | 131 | relative tolerance 132 | 133 | > rtol :: Double 134 | > rtol = 1.0e-6 135 | 136 | absolute tolerance 137 | 138 | > atol :: Double 139 | > atol = 1.0e-10 140 | 141 | > bigA :: Matrix Double 142 | > bigA = assoc (bigN, bigN) 0.0 [ ((i, j), f (i, j)) | i <- [0 .. bigN - 1] 143 | > , j <- [0 .. bigN - 1] 144 | > ] 145 | > where 146 | > f (i, j) | i == 0 = 0.0 -- left boundary condition 147 | > | i == bigN - 1 = 0.0 -- right boundary condition 148 | > | i == j = c2 149 | > | i - 1 == j = c1 150 | > | i + 1 == j = c1 151 | > | otherwise = 0.0 152 | 153 | > b :: Vector Double 154 | > b = assoc bigN 0.0 [ (iSource, 0.01 / deltaX) ] 155 | > where 156 | > iSource = bigN `div` 2 157 | 158 | > bigU0 :: Vector Double 159 | > bigU0 = assoc bigN 0.0 [] 160 | > 161 | > deltaT :: Double 162 | > deltaT = (tf - t0) / (fromIntegral bigNt) 163 | 164 | > sol :: Matrix Double 165 | > sol = odeSolveV SDIRK_5_3_4' Nothing rtol atol (const bigU') bigU0 (vector (map (deltaT *) [0 .. 10])) 166 | > where 167 | > bigU' bigU = bigA #> bigU + b 168 | 169 | > myOptions :: EncodeOptions 170 | > myOptions = defaultEncodeOptions { 171 | > encDelimiter = fromIntegral (ord ' ') 172 | > } 173 | 174 | > main :: IO () 175 | > main = do 176 | > writeFile "heat1E.txt" $ encodeWith myOptions $ map toList $ toRows sol 177 | 178 | -------------------------------------------------------------------------------- /Prediction/default.nix: -------------------------------------------------------------------------------- 1 | { mkDerivation, base, cassava, Chart, Chart-diagrams 2 | , diagrams-cairo, diagrams-lib, diagrams-rasterific, hmatrix 3 | , hmatrix-sundials, Naperian, stdenv 4 | }: 5 | mkDerivation { 6 | pname = "Prediction"; 7 | version = "0.1.0.0"; 8 | src = ./.; 9 | isLibrary = false; 10 | isExecutable = true; 11 | executableHaskellDepends = [ 12 | base cassava Chart Chart-diagrams diagrams-cairo diagrams-lib 13 | diagrams-rasterific hmatrix hmatrix-sundials Naperian 14 | ]; 15 | license = stdenv.lib.licenses.bsd3; 16 | } 17 | -------------------------------------------------------------------------------- /Prediction/index.html: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/idontgetoutmuch/NumMethHaskell/ed62bb0c4b9cb42d2ae9b7c29a2cc3251867d9fd/Prediction/index.html -------------------------------------------------------------------------------- /Prediction/plot_heat1D.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # ---------------------------------------------------------------- 3 | # Programmer(s): Daniel R. Reynolds @ SMU 4 | # ---------------------------------------------------------------- 5 | # SUNDIALS Copyright Start 6 | # Copyright (c) 2002-2019, Lawrence Livermore National Security 7 | # and Southern Methodist University. 8 | # All rights reserved. 9 | # 10 | # See the top-level LICENSE and NOTICE files for details. 11 | # 12 | # SPDX-License-Identifier: BSD-3-Clause 13 | # SUNDIALS Copyright End 14 | # ---------------------------------------------------------------- 15 | # matplotlib-based plotting script for heat1D.c example 16 | 17 | # imports 18 | import sys 19 | import pylab as plt 20 | import numpy as np 21 | 22 | # load mesh data file 23 | mesh = np.loadtxt('heat_mesh.txt', dtype=np.double) 24 | 25 | # load solution data file 26 | data = np.loadtxt('heat1D.txt', dtype=np.double) 27 | 28 | # determine number of time steps, mesh size 29 | nt,nx = np.shape(data) 30 | 31 | # determine maximum temperature 32 | maxtemp = 1.1*data.max() 33 | 34 | # generate plots of results 35 | for tstep in range(nt): 36 | 37 | # set string constants for output plots, current time, mesh size 38 | pname = 'heat1d.' + repr(tstep).zfill(3) + '.png' 39 | tstr = repr(tstep) 40 | nxstr = repr(nx) 41 | 42 | # plot current solution and save to disk 43 | plt.figure(1) 44 | plt.plot(mesh,data[tstep,:]) 45 | plt.xlabel('x') 46 | plt.ylabel('solution') 47 | plt.title('u(x) at output ' + tstr + ', mesh = ' + nxstr) 48 | plt.axis((0.0, 1.0, 0.0, maxtemp)) 49 | plt.grid() 50 | plt.savefig(pname) 51 | plt.close() 52 | 53 | 54 | ##### end of script ##### 55 | -------------------------------------------------------------------------------- /Prediction/plot_heat1E.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # ---------------------------------------------------------------- 3 | # Programmer(s): Daniel R. Reynolds @ SMU 4 | # ---------------------------------------------------------------- 5 | # SUNDIALS Copyright Start 6 | # Copyright (c) 2002-2019, Lawrence Livermore National Security 7 | # and Southern Methodist University. 8 | # All rights reserved. 9 | # 10 | # See the top-level LICENSE and NOTICE files for details. 11 | # 12 | # SPDX-License-Identifier: BSD-3-Clause 13 | # SUNDIALS Copyright End 14 | # ---------------------------------------------------------------- 15 | # matplotlib-based plotting script for heat1D.c example 16 | 17 | # imports 18 | import sys 19 | import pylab as plt 20 | import numpy as np 21 | 22 | # load mesh data file 23 | mesh = np.loadtxt('heat_mesh.txt', dtype=np.double) 24 | 25 | # load solution data file 26 | data = np.loadtxt('heat1E.txt', dtype=np.double) 27 | 28 | # determine number of time steps, mesh size 29 | nt,nx = np.shape(data) 30 | 31 | # determine maximum temperature 32 | maxtemp = 1.1*data.max() 33 | 34 | # generate plots of results 35 | for tstep in range(nt): 36 | 37 | # set string constants for output plots, current time, mesh size 38 | pname = 'heat1e.' + repr(tstep).zfill(3) + '.png' 39 | tstr = repr(tstep) 40 | nxstr = repr(nx) 41 | 42 | # plot current solution and save to disk 43 | plt.figure(1) 44 | plt.plot(mesh,data[tstep,:]) 45 | plt.xlabel('x') 46 | plt.ylabel('solution') 47 | plt.title('u(x) at output ' + tstr + ', mesh = ' + nxstr) 48 | plt.axis((0.0, 1.0, 0.0, maxtemp)) 49 | plt.grid() 50 | plt.savefig(pname) 51 | plt.close() 52 | 53 | 54 | ##### end of script ##### 55 | -------------------------------------------------------------------------------- /Prediction/release.nix: -------------------------------------------------------------------------------- 1 | with import {}; 2 | 3 | let 4 | myHaskellPackages = 5 | haskellPackages.override { 6 | overrides = self: super: { 7 | hmatrix-sundials = self.callCabal2nix "hmatrix-sundials" (builtins.fetchGit { 8 | url = "https://github.com/haskell-numerics/hmatrix-sundials.git"; 9 | rev = "9b6ec2b5fc509f74c5e61657dfc638a2c7ebced0"; 10 | }) { sundials_arkode = sundials; sundials_cvode = sundials; }; 11 | }; 12 | }; 13 | 14 | in 15 | 16 | myHaskellPackages.callPackage ./default.nix { } 17 | -------------------------------------------------------------------------------- /Prediction/shell.nix: -------------------------------------------------------------------------------- 1 | { pkgs ? import { overlays = [ ]; }, doBenchmark ? false }: 2 | 3 | let 4 | 5 | hmatrix-sundials = pkgs.haskellPackages.callCabal2nix "hmatrix-sundials" (builtins.fetchGit { 6 | url = "https://github.com/haskell-numerics/hmatrix-sundials.git"; 7 | rev = "9b6ec2b5fc509f74c5e61657dfc638a2c7ebced0"; 8 | }) { sundials_arkode = pkgs.sundials; sundials_cvode = pkgs.sundials; }; 9 | 10 | Naperian = pkgs.haskellPackages.callCabal2nix "Naperian" (builtins.fetchGit { 11 | url = "https://github.com/idontgetoutmuch/Naperian.git"; 12 | rev = "54d873ffe99de865ca34e6bb3b92736e29e01619"; 13 | }) { }; 14 | 15 | haskellDeps = ps: with ps; [ 16 | hmatrix 17 | hmatrix-sundials 18 | Naperian 19 | numbers 20 | ]; 21 | 22 | in 23 | 24 | pkgs.stdenv.mkDerivation { 25 | name = "env"; 26 | buildInputs = [ 27 | (pkgs.haskellPackages.ghcWithPackages haskellDeps) 28 | ]; 29 | } 30 | -------------------------------------------------------------------------------- /Prediction/test.nix: -------------------------------------------------------------------------------- 1 | let overlay1 = self: super: 2 | { 3 | sundials = self.callPackage ./CustomSundials { }; 4 | }; 5 | 6 | in 7 | 8 | { nixpkgs ? import { overlays = [ overlay1 ]; } }: 9 | 10 | nixpkgs.stdenv.mkDerivation { 11 | name = "env"; 12 | buildInputs = [ 13 | nixpkgs.openmpi 14 | nixpkgs.openssh 15 | nixpkgs.sundials 16 | ]; 17 | } 18 | -------------------------------------------------------------------------------- /Preface.org: -------------------------------------------------------------------------------- 1 | Many users of numerical methods, including physicists, financial 2 | engineers and machine learning practitioners, have dipped their toes 3 | in the functional waters only to find, to mix metaphors, that it is 4 | not to their taste. In the functional community, with some notable 5 | exceptions, numerical computations are not given a high profile and 6 | few worked examples are available with the result that, when a user of 7 | numerical methods implements something, it is often done using lists, 8 | almost guaranteeing poor performance and resultant distaste. 9 | 10 | Functional programmers on the other hand are also reluctant to dip 11 | their toes in the numerical methods waters, even though there are many 12 | features of Haskell which make it an excellent vehicle for such 13 | adventures; individuals who have mastered category theory should have 14 | no fear of other fields of mathematics! For example, implementing 15 | neural networks is significantly easier using automatic 16 | differentiation in Haskell than using the traditional method of 17 | backpropagation, solving partial differential equations using 18 | computational stencils can be done just by writing down the stencil in 19 | a very transparent way and power series approximations such as 20 | Legendre polynomials are straightforward using Haskell's lazy 21 | evaluation. 22 | 23 | This book is intended to show that one can have one's cake and eat it 24 | in the sense that one can obtain the all benefits of functional 25 | programming applied to numerical problems without sacrificing much in 26 | the way of performance. Hopefully, this will appeal to anyone who 27 | falls into either of the above categories. Through an extended series 28 | of worked examples, it shows that it is possible to have both a 29 | representation in code of a problem that is close to the way one would 30 | express it mathematically without sacrificing performance. In 31 | particular, some of the examples can be made to run on multiple 32 | processors in parallel with little or no change. In each of the 33 | examples, an exposition is given of the theory behind it; this is 34 | certainly not a pure numerical methods cookbook. In addition, each 35 | example is coded in Haskell, the code is explained, including the main 36 | Haskell numerical libraries, and the results of the output are 37 | presented together with performance measurements. 38 | 39 | There are many ways in which this book could be organised: by Haskell 40 | library, by numerical technique or by discipline to which the 41 | techniques and libraries can be applied. In the end, we opted to 42 | create three sections: finite difference methods, Monte Carlo methods 43 | and other methods. Within these sections, we intermingle specific 44 | methods and their theory with applications of Haskell libraries to 45 | specific examples which motivate the methods. Hopefully the reader 46 | will find (at least some of) the applications as fascinating and as 47 | practical as we have and leave with an appreciation of how a 48 | particular technique and Haskell library can be applied to a problem 49 | they have at hand. 50 | 51 | A word is in order on what constitues a functional 52 | language. Currently, many languages claim they are functional by which 53 | they mean functions are first class. A more demanding definition would 54 | include static typing and no mutability or side effect which then 55 | allow reasoning about programs in a straightforward way. For us that 56 | means Haskell. 57 | 58 | After 25 years of developement, it might be supposed that Haskell is a 59 | mature language. In many senses it is, but the language, the principal 60 | compiler ghc and its libraries continue to develop at a rapid 61 | rate. Even a few years ago, one could not have hoped to write a book 62 | such as this and many pundits maintained that functional programming 63 | and Haskell in particular would never be used for real 64 | applications. There is still a long way to go but it seems, for now at 65 | any rate, the future is functional. Given this rapid rate of progress, 66 | it is to be hoped that even better techniques for numerical methods in 67 | Haskell will appear in the near future. 68 | 69 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | NumMethHaskell 2 | ============== 3 | 4 | Numerical Methods in Haskell with Applications to Data Analysis, Finance and Physics 5 | -------------------------------------------------------------------------------- /Resampling.lhs: -------------------------------------------------------------------------------- 1 | \documentclass[12pt]{book} 2 | %include polycode.fmt 3 | %options ghci -fglasgow-exts 4 | 5 | \setlength{\parskip}{\medskipamount} 6 | \setlength{\parindent}{0pt} 7 | 8 | \long\def\ignore#1{} 9 | 10 | \begin{document} 11 | 12 | We previously considered the model for population growth, the logistic equation 13 | 14 | \begin{eqnarray} 15 | \dot{p} & = & rp\Big(1 - \frac{p}{k}\Big) 16 | \end{eqnarray} 17 | 18 | We take the parameter $k$ to be fixed and known and $r_0$ to be 19 | sampled from some prior 20 | 21 | \begin{eqnarray} 22 | R_0 & \sim & {\cal{N}}(\mu_r, \sigma_r^2) \label{eq:r_prior} 23 | \end{eqnarray} 24 | 25 | As before we know the solution to the logistic equation 26 | 27 | \begin{eqnarray} 28 | p = \frac{kp_0\exp rt}{k + p_0(\exp rt - 1)} 29 | \end{eqnarray} 30 | 31 | where $p_0$ is the size of the population at time $t=0$. 32 | 33 | We observe a noisy value of population at regular time intervals 34 | 35 | \begin{eqnarray} 36 | x_i &=& \frac{kp_0\exp r\Delta T i}{k + p_0(\exp r\Delta T i - 1)} \\ 37 | y_i &=& x_i + \upsilon_i 38 | \end{eqnarray} 39 | 40 | \ignore{ 41 | \begin{code} 42 | {-# OPTIONS_GHC -Wall #-} 43 | {-# OPTIONS_GHC -fno-warn-name-shadowing #-} 44 | {-# OPTIONS_GHC -fno-warn-type-defaults #-} 45 | {-# OPTIONS_GHC -fno-warn-unused-do-bind #-} 46 | {-# OPTIONS_GHC -fno-warn-missing-methods #-} 47 | {-# OPTIONS_GHC -fno-warn-orphans #-} 48 | \end{code} 49 | } 50 | 51 | \ignore{ 52 | \begin{code} 53 | {-# LANGUAGE BangPatterns #-} 54 | \end{code} 55 | } 56 | 57 | We represent the prior at time $i-1$ by 58 | 59 | \begin{eqnarray} 60 | \hat{\Phi}_{i-1}(r) = \frac{1}{Z_{i-1}}\sum_{k=1}^N w^{(k)} {\cal{N}}(r; \mu^{(k)}_r, \sigma) 61 | \end{eqnarray} 62 | 63 | where the $N$ samples $\mu^{(k)}_r$ are independent samples of the 64 | prior density $\Phi_{i-1}(r)$ and $Z_{i-1}$ is a normalizing constant. 65 | 66 | To sample from this Kernel Density Estimate we can pick $j$ with 67 | probability $\frac{w^{(j)}}{Z_{i-1}}$ and then sample 68 | from ${\cal{N}}(\mu^{(j)}_r, \sigma)$. 69 | 70 | 71 | \begin{code} 72 | module Resampling where 73 | 74 | import qualified Data.Vector.Unboxed as U 75 | 76 | import Data.Bits 77 | 78 | import Control.Monad.ST 79 | import Control.Monad.Loops 80 | import Control.Monad.State 81 | import qualified Control.Monad.Writer as W 82 | 83 | import Data.Random hiding ( gamma ) 84 | import Data.Random.Source.PureMT 85 | import System.Random.MWC 86 | 87 | k :: Double 88 | k = 1.0 89 | 90 | muPriorR, sigmaPriorR :: Double 91 | muPriorR = 5.0 92 | sigmaPriorR = 1e1 93 | 94 | logit :: Double -> Double -> Double -> Double -> Double 95 | logit p0 k r t = k * p0 * (exp (r * t)) / (k + p0 * (exp (r * t) - 1)) 96 | 97 | p0 :: Double 98 | p0 = 0.1 * k 99 | 100 | deltaT :: Double 101 | deltaT = 0.001 102 | 103 | sigma :: Double 104 | sigma = 1e-1 105 | 106 | initPopSir :: Int -> 107 | RVarT (W.Writer [(Double, U.Vector Double)]) 108 | (Double, U.Vector Double) 109 | 110 | initPopSir n = do 111 | rs <- U.replicateM n (rvarT $ Normal muPriorR sigmaPriorR) 112 | return (p0, rs) 113 | \end{code} 114 | 115 | We create a function to perform one step of the Bayesian update. 116 | 117 | \begin{code} 118 | popSir :: Int -> (Double, U.Vector Double) -> Double -> 119 | RVarT (W.Writer [(Double, U.Vector Double)]) 120 | (Double, U.Vector Double) 121 | popSir n (p0Prev, rsPrev) z = do 122 | 123 | let ps = U.map (\r -> logit p0Prev k r deltaT) rsPrev 124 | 125 | let rBar = U.sum rsPrev / (fromIntegral n) 126 | pBar = U.sum ps / (fromIntegral n) 127 | 128 | lift $ W.tell [(rBar, U.take 100 rsPrev)] 129 | 130 | let wsRaw = U.map (\p -> pdf (Normal p sigma) z) ps 131 | sumWs = U.sum wsRaw 132 | ws = U.map (/ sumWs) wsRaw 133 | 134 | let vs :: U.Vector Double 135 | vs = runST (create >>= (asGenST $ \gen -> uniformVector gen n)) 136 | 137 | cumSumWs = U.scanl (+) 0 ws 138 | 139 | js :: U.Vector Int 140 | js = myIndices (U.tail cumSumWs) vs 141 | 142 | rsTilde = U.map (rsPrev U.!) js 143 | 144 | rsNew <- U.mapM (\mu -> rvarT (Normal mu sigma)) rsTilde 145 | 146 | return (pBar, rsNew) 147 | \end{code} 148 | 149 | \begin{code} 150 | binarySearch :: (U.Unbox a, Ord a) => 151 | a -> U.Vector a -> Int 152 | binarySearch x vec = loop 0 (U.length vec - 1) 153 | where 154 | loop !l !u 155 | | u <= l = l 156 | | otherwise = let e = vec U.! k in if x <= e then loop l k else loop (k+1) u 157 | where k = (u + l) `shiftR` 1 158 | 159 | myIndices :: U.Vector Double -> U.Vector Double -> U.Vector Int 160 | myIndices bs xs = U.map (flip binarySearch bs) xs 161 | \end{code} 162 | 163 | 164 | \begin{code} 165 | nParticles :: Int 166 | nParticles = 10000 167 | 168 | nObs :: Int 169 | nObs = 1000 170 | 171 | obsVariance :: Double 172 | obsVariance = 1e-2 173 | 174 | r :: Double 175 | r = 6.0 176 | 177 | singleSample :: Double -> RVarT (W.Writer [Double]) Double 178 | singleSample p0 = do 179 | upsilon <- rvarT (Normal 0.0 obsVariance) 180 | let p1 = logit p0 k r deltaT 181 | lift $ W.tell [p1 + upsilon] 182 | return p1 183 | 184 | streamSample :: RVarT (W.Writer [Double]) Double 185 | streamSample = iterateM_ singleSample p0 186 | 187 | samples :: [Double] 188 | samples = take nObs $ snd $ W.runWriter (evalStateT (sample streamSample) (pureMT 2)) 189 | 190 | testPopSir :: RVarT (W.Writer [(Double, U.Vector Double)]) (Double, U.Vector Double) 191 | testPopSir = do 192 | (p0, rsInit) <- initPopSir nParticles 193 | foldM (popSir nParticles) (p0, rsInit) samples 194 | 195 | runPopSir :: ((Double, U.Vector Double), [(Double, U.Vector Double)]) 196 | runPopSir = W.runWriter (evalStateT (sample testPopSir) (pureMT 7)) 197 | \end{code} 198 | 199 | \end{document} -------------------------------------------------------------------------------- /ResamplingChart.hs: -------------------------------------------------------------------------------- 1 | {-# OPTIONS_GHC -Wall #-} 2 | {-# OPTIONS_GHC -fno-warn-name-shadowing #-} 3 | {-# OPTIONS_GHC -fno-warn-type-defaults #-} 4 | {-# OPTIONS_GHC -fno-warn-unused-do-bind #-} 5 | {-# OPTIONS_GHC -fno-warn-missing-methods #-} 6 | {-# OPTIONS_GHC -fno-warn-orphans #-} 7 | 8 | module ResamplingChart ( 9 | diagSamps 10 | , diagPartDist2 11 | ) where 12 | 13 | import Control.Lens hiding ( (#) ) 14 | import Graphics.Rendering.Chart 15 | import Graphics.Rendering.Chart.Backend.Diagrams 16 | import Diagrams.Backend.Cairo.CmdLine 17 | import Diagrams.Prelude hiding ( render, Renderable ) 18 | import Data.Default.Class 19 | 20 | import System.IO.Unsafe 21 | 22 | denv :: DEnv 23 | denv = unsafePerformIO $ defaultEnv vectorAlignmentFns 500 500 24 | 25 | diagSamps :: [(Double, Double)] -> 26 | [(Double, Double)] -> 27 | QDiagram Cairo R2 Any 28 | diagSamps xsActual xsEstimated = 29 | fst $ runBackend denv (render (chartSamps xsActual xsEstimated) (500, 500)) 30 | 31 | chartSamps :: [(Double, Double)] -> 32 | [(Double, Double)] -> 33 | Renderable () 34 | chartSamps as es = toRenderable layout 35 | where 36 | 37 | actual = plot_lines_values .~ [as] 38 | $ plot_lines_style . line_color .~ opaque blue 39 | $ plot_lines_title .~ "Actual" 40 | $ def 41 | 42 | est = plot_lines_values .~ [es] 43 | $ plot_lines_style . line_color .~ opaque green 44 | $ plot_lines_title .~ "Sampled" 45 | $ def 46 | 47 | layout = layout_title .~ "Noisy Population Growth" 48 | $ layout_plots .~ [ toPlot actual 49 | , toPlot est 50 | ] 51 | $ def 52 | 53 | chartPartDist2 :: String -> 54 | AlphaColour Double -> AlphaColour Double -> 55 | [(Double, Double)] -> [(Double, Double)] -> 56 | Graphics.Rendering.Chart.Renderable () 57 | chartPartDist2 t c1 c2 prices1 prices2 = toRenderable layout 58 | where 59 | 60 | price1 = plot_points_style . point_color .~ c1 61 | $ plot_points_values .~ prices1 62 | $ plot_points_title .~ "Sample Particles Certain" 63 | $ def 64 | 65 | price2 = plot_points_style .~ filledCircles 1 c2 66 | $ plot_points_values .~ prices2 67 | $ plot_points_title .~ "Sample Particles Doubtful" 68 | $ def 69 | 70 | layout = layout_title .~ t 71 | $ layout_y_axis . laxis_title .~ "Particle Value" 72 | $ layout_y_axis . laxis_override .~ axisGridHide 73 | $ layout_x_axis . laxis_title .~ "Time" 74 | $ layout_x_axis . laxis_override .~ axisGridHide 75 | $ layout_plots .~ [ toPlot price1 76 | , toPlot price2 77 | ] 78 | $ layout_grid_last .~ False 79 | $ def 80 | 81 | diagPartDist2 :: String -> 82 | AlphaColour Double -> AlphaColour Double -> 83 | [(Double, Double)] -> [(Double, Double)] -> 84 | QDiagram Cairo R2 Any 85 | diagPartDist2 t c1 c2 prices1 prices2 = 86 | fst $ runBackend denv (render (chartPartDist2 t c1 c2 prices1 prices2) (500, 500)) 87 | -------------------------------------------------------------------------------- /ResamplingMain.hs: -------------------------------------------------------------------------------- 1 | {-# OPTIONS_GHC -Wall #-} 2 | {-# OPTIONS_GHC -fno-warn-name-shadowing #-} 3 | {-# OPTIONS_GHC -fno-warn-type-defaults #-} 4 | {-# OPTIONS_GHC -fno-warn-unused-do-bind #-} 5 | {-# OPTIONS_GHC -fno-warn-missing-methods #-} 6 | {-# OPTIONS_GHC -fno-warn-orphans #-} 7 | 8 | {-# LANGUAGE NoMonomorphismRestriction #-} 9 | {-# LANGUAGE RankNTypes #-} 10 | {-# LANGUAGE QuasiQuotes #-} 11 | {-# LANGUAGE LambdaCase #-} 12 | {-# LANGUAGE GADTs #-} 13 | {-# LANGUAGE ViewPatterns #-} 14 | 15 | 16 | module Main ( main ) where 17 | 18 | import Resampling 19 | import ResamplingChart 20 | 21 | import Diagrams.Prelude 22 | import Diagrams.Backend.CmdLine 23 | import Diagrams.Backend.Cairo.CmdLine 24 | 25 | import qualified Data.Vector.Unboxed as U 26 | 27 | 28 | displayHeader :: FilePath -> Diagram B R2 -> IO () 29 | displayHeader fn = 30 | mainRender ( DiagramOpts (Just 900) (Just 700) fn 31 | , DiagramLoopOpts False Nothing 0 32 | ) 33 | 34 | diaNoisyData :: Diagram B R2 35 | diaNoisyData = 36 | diagSamps (zip (map fromIntegral [0..]) (take nObs ys)) 37 | (zip (map fromIntegral [0..]) (take nObs ys)) 38 | where ys = samples 39 | 40 | diaPartEvolve2 :: Diagram B R2 41 | diaPartEvolve2 = diagPartDist2 "Distribution of Particles Over Time" 42 | (blue `withOpacity` 0.2) 43 | (red `withOpacity` 0.5) 44 | (concat (zipWith zip 45 | (map repeat [0..]) 46 | xss)) 47 | (concat (zipWith zip 48 | (map repeat [0..]) 49 | xss)) 50 | where 51 | xss = map U.toList $ map snd $ snd runPopSir 52 | 53 | main :: IO () 54 | main = do 55 | displayHeader "diagrams/ResamplingPopGrowth.png" diaNoisyData 56 | putStrLn "Hello" -------------------------------------------------------------------------------- /RunAccGPU.hs: -------------------------------------------------------------------------------- 1 | {-# OPTIONS_GHC -Wall #-} 2 | {-# OPTIONS_GHC -fno-warn-name-shadowing #-} 3 | {-# OPTIONS_GHC -fno-warn-type-defaults #-} 4 | {-# OPTIONS_GHC -fno-warn-unused-do-bind #-} 5 | {-# OPTIONS_GHC -fno-warn-missing-methods #-} 6 | {-# OPTIONS_GHC -fno-warn-orphans #-} 7 | 8 | import Symplectic 9 | 10 | main :: IO () 11 | main = do 12 | putStrLn $ show $ reallyRunSteps' 10 13 | -------------------------------------------------------------------------------- /SymplecticMain.hs: -------------------------------------------------------------------------------- 1 | {-# OPTIONS_GHC -Wall #-} 2 | {-# OPTIONS_GHC -fno-warn-name-shadowing #-} 3 | {-# OPTIONS_GHC -fno-warn-type-defaults #-} 4 | {-# OPTIONS_GHC -fno-warn-unused-do-bind #-} 5 | {-# OPTIONS_GHC -fno-warn-missing-methods #-} 6 | {-# OPTIONS_GHC -fno-warn-orphans #-} 7 | 8 | import Graphics.Rendering.Chart 9 | import Graphics.Rendering.Chart.Backend.Diagrams 10 | import Diagrams.Backend.Cairo.CmdLine 11 | import Diagrams.Prelude hiding ( render, Renderable, (*~), Time ) 12 | import Diagrams.Backend.CmdLine 13 | 14 | import System.IO.Unsafe 15 | 16 | import Symplectic 17 | 18 | import qualified Data.Array.Accelerate as A hiding ((^)) 19 | import qualified Linear as L 20 | 21 | import Text.Printf 22 | 23 | 24 | denv :: DEnv Double 25 | denv = unsafePerformIO $ defaultEnv vectorAlignmentFns 600 500 26 | 27 | displayHeader :: FilePath -> Diagram B -> IO () 28 | displayHeader fn = 29 | mainRender ( DiagramOpts (Just 900) (Just 700) fn 30 | , DiagramLoopOpts False Nothing 0 31 | ) 32 | 33 | chart :: String -> 34 | String -> 35 | [[(Double, Double)]] -> 36 | Renderable () 37 | chart t l obss = toRenderable layout 38 | where 39 | 40 | actual x l c = plot_lines_values .~ [x] 41 | $ plot_lines_style . line_color .~ opaque c 42 | -- $ plot_lines_title .~ l 43 | $ plot_lines_style . line_width .~ 1.0 44 | $ def 45 | 46 | ls = map (\n -> "Path " ++ show n) [1..] 47 | cs = cycle [blue, green, red, brown, crimson] 48 | 49 | actuals' :: [PlotLines Double Double] 50 | actuals' = zipWith3 actual obss ls cs 51 | 52 | layout = layout_title .~ t 53 | $ layout_plots .~ (map toPlot actuals') 54 | $ layout_y_axis . laxis_title .~ l 55 | $ layout_y_axis . laxis_override .~ axisGridHide 56 | $ layout_x_axis . laxis_title .~ "Positions" 57 | $ layout_x_axis . laxis_override .~ axisGridHide 58 | $ def 59 | 60 | 61 | diagrm :: String -> String -> [[(Double, Double)]] -> Diagram Cairo 62 | diagrm t l xss = fst $ runBackendR denv (chart t l xss) 63 | 64 | diagrmM :: String -> String -> [[(Double, Double)]] -> IO (Diagram Cairo) 65 | diagrmM t l xss = do 66 | denv <- defaultEnv vectorAlignmentFns 600 500 67 | return $ fst $ runBackendR denv (chart t l xss) 68 | 69 | main = do 70 | let pqs = A.toList runSteps 71 | pq1s = map (^. L._x) pqs 72 | pq2s = map (^. L._y) pqs 73 | p1s = map (^. L._x) pq1s 74 | q1s = map (^. L._y) pq1s 75 | p2s = map (^. L._x) pq2s 76 | q2s = map (^. L._y) pq2s 77 | d <- diagrmM "Orbits" "Momenta" [zip q1s p1s, zip q2s p2s] 78 | displayHeader "diagrams/symplectic.png" d 79 | 80 | 81 | -------------------------------------------------------------------------------- /TSP.hs: -------------------------------------------------------------------------------- 1 | {-# OPTIONS_GHC -Wall #-} 2 | {-# OPTIONS_GHC -fno-warn-name-shadowing #-} 3 | {-# OPTIONS_GHC -fno-warn-type-defaults #-} 4 | {-# OPTIONS_GHC -fno-warn-unused-do-bind #-} 5 | {-# OPTIONS_GHC -fno-warn-missing-methods #-} 6 | {-# OPTIONS_GHC -fno-warn-orphans #-} 7 | 8 | module TSP ( test ) where 9 | 10 | import Prelude hiding ( zipWith, sum, foldl, foldr, scanl, length, null, tail, zip, map, reverse, concat, take ) 11 | import qualified Prelude as P 12 | import Data.Vector.Unboxed hiding ( replicateM ) 13 | import Data.Random.Source.PureMT 14 | import Data.Random 15 | import Control.Monad.State hiding ( modify ) 16 | 17 | import Debug.Trace 18 | 19 | nNodes :: Int 20 | nNodes = 6 21 | 22 | xs, ys :: Vector Float 23 | xs = generate nNodes ((*1.0) . fromIntegral) 24 | ys = generate nNodes ((*0.0) . fromIntegral) 25 | 26 | distance :: Int -> Int -> Float 27 | distance i j = sqrt $ (xs!i - xs!j)^2 + (ys!i - ys!j)^2 28 | 29 | totalDistance :: Vector Int -> Float 30 | totalDistance v = sum $ map (uncurry distance) ms 31 | where 32 | ms = zip v (tail v) 33 | 34 | reverseBetweenPair :: Int -> Int -> Vector Int -> Vector Int 35 | reverseBetweenPair i j v = concat [beginning, reverse middle, end] 36 | where 37 | k = length v 38 | 39 | beginning = slice 0 i v 40 | middle = slice i (j - i + 1) v 41 | end = slice (j + 1) (k - j - 1) v 42 | 43 | incDistance :: Vector Int -> Int -> Int -> Float 44 | incDistance v i j = d + c - b - a 45 | where 46 | a = distance (v!(i - 1)) (v!i) 47 | b = distance (v!j) (v!((j + 1) `mod` nNodes)) 48 | c = distance (v!(i - 1)) (v!j) 49 | d = distance (v!i) (v!((j + 1) `mod` nNodes)) 50 | 51 | expDv :: Int -> Int -> Vector Int -> Float -> Float -> Float -> Float 52 | expDv i1 i2 v kB j t = exp(-j * (incDistance v i1 i2) / (kB * t)) 53 | 54 | randomUpdates :: Int -> Vector (Int, Int, Float) 55 | randomUpdates m = 56 | fromList $ 57 | evalState (replicateM m x) 58 | (pureMT 1) 59 | where 60 | x = do r <- sample (uniform (1 :: Int) (nNodes - 2)) 61 | c <- sample (uniform (r + 1) (nNodes - 1)) 62 | v <- sample (uniform (0 :: Float) 1.0) 63 | return (r, c, v) 64 | 65 | kB, couplingConstant :: Float 66 | kB = 1.0 67 | couplingConstant = 1.0 68 | 69 | data McState = McState { 70 | mcRoute :: !(Vector Int) 71 | } 72 | deriving Show 73 | 74 | initMcState :: McState 75 | initMcState = McState { 76 | mcRoute = fromList [0,4,2,3,1,5,0] 77 | } 78 | 79 | singleUpdate :: Float -> McState -> (Int, Int, Float) -> McState 80 | singleUpdate t u (i, j, r) = 81 | trace (show i P.++ show j P.++ ": " P.++ 82 | show (totalDistance v) P.++ ", " P.++ 83 | show (totalDistance (reverseBetweenPair i j v)) P.++ ", " P.++ 84 | show (incDistance v i j) P.++ ", " P.++ 85 | show p P.++ ", " P.++ 86 | show r) $ if incDistance v i j <= 0 || p > r 87 | then 88 | McState { mcRoute = reverseBetweenPair i j v } 89 | else 90 | u 91 | where 92 | v = mcRoute u 93 | p = expDv i j v kB couplingConstant t 94 | 95 | test :: McState 96 | test = foldl (singleUpdate 1.0) initMcState (randomUpdates 100) -------------------------------------------------------------------------------- /TribblesMain.hs: -------------------------------------------------------------------------------- 1 | {-# OPTIONS_GHC -Wall #-} 2 | {-# OPTIONS_GHC -fno-warn-name-shadowing #-} 3 | {-# OPTIONS_GHC -fno-warn-type-defaults #-} 4 | {-# OPTIONS_GHC -fno-warn-unused-do-bind #-} 5 | {-# OPTIONS_GHC -fno-warn-missing-methods #-} 6 | {-# OPTIONS_GHC -fno-warn-orphans #-} 7 | 8 | import Graphics.Rendering.Chart 9 | import Graphics.Rendering.Chart.Backend.Diagrams 10 | import Diagrams.Backend.Cairo.CmdLine 11 | import Diagrams.Prelude hiding ( render, Renderable, (*~), Time ) 12 | import Diagrams.Backend.CmdLine 13 | 14 | import System.IO.Unsafe 15 | 16 | import Tribbles 17 | 18 | import qualified Prelude as P 19 | import Numeric.Units.Dimensional.Prelude hiding (Unit) 20 | 21 | import Text.Printf 22 | 23 | 24 | denv :: DEnv Double 25 | denv = unsafePerformIO $ defaultEnv vectorAlignmentFns 600 500 26 | 27 | displayHeader :: FilePath -> Diagram B -> IO () 28 | displayHeader fn = 29 | mainRender ( DiagramOpts (Just 900) (Just 700) fn 30 | , DiagramLoopOpts False Nothing 0 31 | ) 32 | 33 | chart :: String -> 34 | String -> 35 | [[(Double, Double)]] -> 36 | Renderable () 37 | chart t l obss = toRenderable layout 38 | where 39 | 40 | actual x l c = plot_lines_values .~ [x] 41 | $ plot_lines_style . line_color .~ opaque c 42 | -- $ plot_lines_title .~ l 43 | $ plot_lines_style . line_width .~ 1.0 44 | $ def 45 | 46 | ls = map (\n -> "Path " ++ show n) [1..] 47 | cs = cycle [blue, green, red, brown, crimson] 48 | 49 | actuals' :: [PlotLines Double Double] 50 | actuals' = zipWith3 actual obss ls cs 51 | 52 | layout = layout_title .~ t 53 | $ layout_plots .~ (map toPlot actuals') 54 | $ layout_y_axis . laxis_title .~ l 55 | $ layout_y_axis . laxis_override .~ axisGridHide 56 | $ layout_x_axis . laxis_title .~ "Time" 57 | $ layout_x_axis . laxis_override .~ axisGridHide 58 | $ def 59 | 60 | mus :: [Double] 61 | mus = map (/~ day ) $ iterate (+ deltaMu) (0.0 *~ day) 62 | 63 | nus :: [Double] 64 | nus = map (/~ day ) $ iterate (+ deltaNu) (0.0 *~ day) 65 | 66 | selNth :: Int -> [a] -> [a] 67 | selNth n xs = map snd $ filter (\z -> fst z `mod` n == 0) (zip [0..] xs) 68 | 69 | ps :: [Double] 70 | ps = map (\(p, _, _) -> p /~ (mole / kilo gram)) ys 71 | 72 | ms :: [Double] 73 | ms = map (\(_, m, _) -> m /~ (mole / kilo gram)) ys 74 | 75 | es :: [Double] 76 | es = map (\(_, _, e) -> e /~ muPerMl) ys 77 | 78 | diagrm :: String -> String -> [[(Double, Double)]] -> Diagram Cairo 79 | diagrm t l xss = fst $ runBackendR denv (chart t l xss) 80 | 81 | diagrmM :: String -> String -> [[(Double, Double)]] -> IO (Diagram Cairo) 82 | diagrmM t l xss = do 83 | denv <- defaultEnv vectorAlignmentFns 600 500 84 | return $ fst $ runBackendR denv (chart t l xss) 85 | 86 | main :: IO () 87 | main = do 88 | mapM_ (\(x, y, z) -> printf "%4.3e, %4.3e, %4.3e\n" 89 | (x /~ (mole / kilo gram)) 90 | (y /~ (mole / kilo gram)) 91 | (z /~ muPerMl)) 92 | ys 93 | displayHeader "diagrams/EPO.png" 94 | (diagrm "EPO" "mUI / mL" [zip (map (/~ day) ts) es]) 95 | displayHeader "diagrams/Precursors.png" 96 | (diagrm "Precursors" "Cells / Kilogram " [zip (map (/~ day) ts) ps]) 97 | displayHeader "diagrams/Matures.png" 98 | (diagrm "Mature Erythrocytes" "Cells / Kilogram x 1e11" [zip (map (/~ day) ts) (map (P.* 1e-11) ms)]) 99 | 100 | -------------------------------------------------------------------------------- /build.hs: -------------------------------------------------------------------------------- 1 | {-# OPTIONS_GHC -Wall #-} 2 | 3 | {-# LANGUAGE ScopedTypeVariables #-} 4 | 5 | import Development.Shake 6 | import Development.Shake.FilePath 7 | 8 | main :: IO () 9 | main = shakeArgs shakeOptions{shakeFiles="_build"} $ do 10 | want [ "diagrams" "symplectic" <.> "png" 11 | , "RunAccGPU" <.> "ll" 12 | , "TimeAccGPU" <.> "txt" 13 | , "TimeJulGPU" <.> "txt" 14 | , "RunJulGPU" <.> "ll" 15 | ] 16 | 17 | let compile file = do 18 | need [file] 19 | cmd "ghc --make -O2" file 20 | 21 | "diagrams" "symplectic" <.> "png" %> \_out -> do 22 | need ["SymplecticMain"] 23 | cmd "./SymplecticMain" 24 | 25 | "SymplecticMain" %> \out -> do 26 | compile (out -<.> "hs") 27 | 28 | "RunAccGPU" <.> "ll" %> \out -> do 29 | need ["RunAccGPU"] 30 | (Exit _code, Stdout (_stdout :: String), Stderr (stderr :: String)) <- 31 | cmd "./RunAccGPU +ACC -ddump-cc -dverbose -ACC" 32 | writeFileChanged out stderr 33 | 34 | "TimeAccGPU" <.> "txt" %> \out -> do 35 | need ["RunAccGPU"] 36 | (Exit _code, Stdout (_stdout :: String), Stderr (stderr :: String)) <- 37 | cmd "./RunAccGPU +RTS -s" 38 | writeFileChanged out stderr 39 | 40 | "RunAccGPU" %> \out -> do 41 | compile (out -<.> "hs") 42 | 43 | "TimeJulGPU" <.> "txt" %> \out -> do 44 | need ["JuliaCPU.jl"] 45 | (Exit _code, Stdout (_stdout :: String), Stderr (stderr :: String)) <- 46 | cmd "time /Applications/Julia-0.5.app/Contents/Resources/julia/bin/julia JuliaCPU.jl" 47 | writeFileChanged out stderr 48 | 49 | "RunJulGPU" <.> "ll" %> \out -> do 50 | need ["JuliaLLVM.jl"] 51 | (Exit _code, Stdout (stdout :: String), Stderr (_stderr :: String)) <- 52 | cmd "/Applications/Julia-0.5.app/Contents/Resources/julia/bin/julia JuliaLLVM.jl" 53 | writeFileChanged out stdout 54 | -------------------------------------------------------------------------------- /ghc-hacking.lhs: -------------------------------------------------------------------------------- 1 | % Hacking on GHC 2 | % Dominic Steinitz 3 | % 14th February 2017 4 | 5 | One of my colleagues pointed out that the code generated by ghc for 6 | floating point *abs* was about 20 instructions when targetting the X86 7 | instruction set and that there was a single instruction available on 8 | the floating point co-processor. 9 | 10 | In fact floating point *abs* was discussed sometime ago on the 11 | ghc-dev(?) mailing list. The original Haskell code which handled 12 | floating point *abs* did not handle negative zero (-0.0) correctly. It 13 | was noted here that more efficient code could be generated but as so 14 | often with pious hopes this was never implemented. 15 | 16 | I wanted to capture the wrong paths I took as well as the final result 17 | as I learnt as much from my failures as from my ultimate success. 18 | 19 | * C code to see what gets generated 20 | 21 | * clang and gcc 22 | 23 | * same as sqrt presumably carried out on co-processor 24 | 25 | * want to drop X87 support 26 | 27 | * X86 and llvm but what about the others? cmm 28 | 29 | * Getting the benefit of e.g. polly is going to be very difficult but 30 | could use EDSL like accelerate. See the links in Trevor's emails maybe? 31 | 32 | The Haskell: 33 | 34 | ~~~~ {include="HaskellFunction.hs"} 35 | ~~~~ 36 | 37 | The .cabal: 38 | 39 | ~~~~ {include="../test-via-c.cabal"} 40 | ~~~~ 41 | 42 | On my computer running 43 | 44 | cabal install 45 | 46 | places the library in 47 | 48 | ~/Library/Haskell/ghc-8.0.1/lib/test-via-c-0.1.0.0/bin 49 | 50 | The C: 51 | 52 | ~~~~ {include="MainC.c"} 53 | ~~~~ 54 | 55 | On my computer this can be compiled with 56 | 57 | gcc-6 Bar.c 58 | ~/Library/Haskell/ghc-8.0.1/lib/test-via-c-0.1.0.0/bin/Foo.dylib 59 | -I/Library/Frameworks/GHC.framework/Versions/8.0.1-x86_64/usr/lib/ghc-8.0.1/include 60 | -L/Library/Frameworks/GHC.framework/Versions/8.0.1-x86_64/usr/lib/ghc-8.0.1/rts 61 | -lHSrts-ghc8.0.1 62 | 63 | and can be run with 64 | 65 | DYLD_LIBRARY_PATH= 66 | ~/Library/Haskell/ghc-8.0.1/lib/test-via-c-0.1.0.0/bin: 67 | /Library/Frameworks/GHC.framework/Versions/8.0.1-x86_64/usr/lib/ghc-8.0.1/rts 68 | 69 | -------------------------------------------------------------------------------- /symplectic-integrators/Notes.lhs: -------------------------------------------------------------------------------- 1 | % Data Sources 2 | % Dominic Steinitz 3 | % 18th February 2018 4 | 5 | 6 | Introduction 7 | ============ 8 | 9 | These are some very hasty notes on Runge-Kutta methods and IRK2 in 10 | particular. I make no apologies for missing lots of details. 11 | 12 | Some Uncomprehensive Theory 13 | =========================== 14 | 15 | In general, an implicit Runge-Kutta method is given by 16 | 17 | $$ 18 | y_{n+1} = y_n + h \sum_{i=1}^s b_i k_i 19 | $$ 20 | 21 | where 22 | 23 | $$ 24 | k_i = f\left( t_n + c_i h,\ y_{n} + h \sum_{j=1}^s a_{ij} k_j \right), \quad i = 1, \ldots, s 25 | $$ 26 | 27 | and 28 | 29 | $$ 30 | \sum_{j=1}^{i-1} a_{ij} = c_i \text{ for } i=2, \ldots, s 31 | $$ 32 | 33 | Traditionally this is written as a Butcher tableau: 34 | 35 | $$ 36 | \begin{array}{c|cccc} 37 | c_1 & a_{11} & a_{12}& \dots & a_{1s}\\ 38 | c_2 & a_{21} & a_{22}& \dots & a_{2s}\\ 39 | \vdots & \vdots & \vdots& \ddots& \vdots\\ 40 | c_s & a_{s1} & a_{s2}& \dots & a_{ss} \\ 41 | \hline 42 | & b_1 & b_2 & \dots & b_s\\ 43 | & b^*_1 & b^*_2 & \dots & b^*_s\\ 44 | \end{array} 45 | $$ 46 | 47 | and even more succintly as 48 | 49 | $$ 50 | \begin{array}{c|c} 51 | \mathbf{c}& A\\ 52 | \hline 53 | & \mathbf{b^T} \\ 54 | \end{array} 55 | $$ 56 | 57 | For a Gauß-Legendre method we set the values of the $c_i$ to the zeros 58 | of the shifted Legendre polynomials. 59 | 60 | An explicit expression for the shifted Legendre polynomials is given by 61 | 62 | $$ 63 | \tilde{P}_n(x) = (-1)^n \sum_{k=0}^n \binom{n}{k} \binom{n+k}{k} (-x)^k 64 | $$ 65 | 66 | The first few shifted Legendre polynomials are: 67 | 68 | $$ 69 | \begin{array}{r|r} 70 | n & \tilde{P}_n(x) \\ 71 | \hline 72 | 0 & 1 \\ 73 | 1 & 2x-1 \\ 74 | 2 & 6x^2-6x+1 \\ 75 | 3 & 20x^3-30x^2+12x-1 \\ 76 | 4 & 70x^4-140x^3+90x^2-20x+1 \\ 77 | 5 & 252x^5 -630x^4 +560x^3 - 210 x^2 + 30 x -1 78 | \end{array} 79 | $$ 80 | 81 | Setting 82 | 83 | $$ q(t) \triangleq \prod_{j=0}^s (t - c_j) \quad {\text and} \quad q_l 84 | \triangleq \frac{q(t)}{t - c_l}, \, l = 1,2, \ldots, s $$ 85 | 86 | then the co-location method gives 87 | 88 | $$ 89 | a_{j,i} \triangleq \int_0^{c_j} \frac{q_i(\tau)}{q_i(c_i)}\,{\mathrm d}\tau 90 | $$ 91 | 92 | $$ 93 | b_{j} \triangleq \int_0^{1} \frac{q_i(\tau)}{q_i(c_i)}\,{\mathrm d}\tau 94 | $$ 95 | 96 | For $s = 1$ we have $2x - 1 = 0$ and thus $c_1 = 1/2$ and the Butcher tableau is 97 | 98 | $$ 99 | \begin{array}{c|c} 100 | 1/2 & 1/2\\ 101 | \hline 102 | & 1 \\ 103 | \end{array} 104 | $$ 105 | 106 | that is, the implicit RK2 method aka the mid-point method. 107 | 108 | For $s = 2$ we have $6x^2-6x+1 = 0$ and thus $c_1 = \frac{1}{2} - 109 | \frac{1}{2\sqrt{3}}$ and $c_2 = \frac{1}{2} + \frac{1}{2\sqrt{3}}$ and 110 | the Butcher tableau is 111 | 112 | $$ 113 | \begin{array}{c|cc} 114 | \frac{1}{2} - \frac{1}{2\sqrt{3}} & \frac{1}{4} & \frac{1}{4} - \frac{1}{2\sqrt{3}}\\ 115 | \frac{1}{2} + \frac{1}{2\sqrt{3}} & \frac{1}{4} + \frac{1}{2\sqrt{3}} & \frac{1}{4}\\ 116 | \hline 117 | & \frac{1}{2} & \frac{1}{2}\\ 118 | \end{array} 119 | $$ 120 | 121 | that is, the implicit RK4 method. 122 | 123 | 124 | Implicit RK2 125 | ============ 126 | 127 | Explicitly 128 | 129 | $$ 130 | y_{n+1} = y_n + h b_1 k_1 131 | $$ 132 | 133 | where 134 | 135 | $$ 136 | k_1 = f\left( t_n + c_1 h,\ y_{n} + h a_{11} k_1 \right) 137 | $$ 138 | 139 | Substituting in the values from the tableau, we have 140 | 141 | $$ 142 | y_{n+1} = y_n + h k_1 143 | $$ 144 | 145 | where 146 | 147 | $$ 148 | k_1 = f\left( t_n + \frac{1}{2} h,\ y_{n} + h \frac{1}{2} k_1 \right) 149 | $$ 150 | 151 | and further inlining and substitution gives 152 | 153 | $$ 154 | y_{n+1}=y_n+hf(t+\frac{h}{2},\frac{1}{2}(y_n+y_{n+1})) 155 | $$ 156 | 157 | which can be recognised as the mid-point method. 158 | 159 | Implementation 160 | ============== 161 | 162 | A common package for solving ODEs is 163 | [gsl](https://www.gnu.org/software/gsl/) which Haskell interfaces via 164 | the [hmatrix-gsl](https://hackage.haskell.org/package/hmatrix-gsl) 165 | package. Some of gsl is coded with the conditional compilation flag 166 | *DEBUG* e.g. in 167 | [msbdf.c](https://github.com/idontgetoutmuch/gsl/blob/953e81673caa21d690e5d72594bc4cd2a60ba311/ode-initval2/msbdf.c#L581) 168 | but sadly not in the simpler methods maybe because they were made part 169 | of the package some years earlier. We can add our own of course; 170 | here's a 171 | [link](https://github.com/idontgetoutmuch/gsl/commit/c2035977d65cd804169ff3370da6723cf879be75) 172 | for reference. 173 | 174 | Let's see how the Implicit Runge-Kutta Order 2 method does on the 175 | following system taken from the [gsl 176 | documenation](https://www.gnu.org/software/gsl/manual/html_node/ODE-Example-programs.html) 177 | 178 | $$ 179 | \frac{{\mathrm d}^2u}{{\mathrm d} t^2} + \mu \frac{{\mathrm d}^u}{{\mathrm d} t} (u^2 - 1) + u = 0 180 | $$ 181 | 182 | which can be re-written as 183 | 184 | $$ 185 | \begin{aligned} 186 | \frac{{\mathrm d}y_0}{{\mathrm d} t} &= y_1 \\ 187 | \frac{{\mathrm d}y_1}{{\mathrm d} t} &= -y_0 - \mu y_1 (y_0 y_0 - 1) 188 | \end{aligned} 189 | $$ 190 | 191 | but replacing *gsl_odeiv2_step_rk8pd* with *gsl_odeiv2_step_rk2imp*. 192 | 193 | Here's the first few steps 194 | 195 | 196 | ``` 197 | rk2imp_apply: t=0.00000e+00, h=1.00000e-06, y:1.00000e+00 0.00000e+00 198 | -- evaluate jacobian 199 | ( 2, 2)[0,0]: 0 200 | ( 2, 2)[0,1]: 1 201 | ( 2, 2)[1,0]: -1 202 | ( 2, 2)[1,1]: -0 203 | YZ:1.00000e+00 -5.00000e-07 204 | -- error estimates: 0.00000e+00 8.35739e-20 205 | rk2imp_apply: t=1.00000e-06, h=5.00000e-06, y:1.00000e+00 -1.00000e-06 206 | -- evaluate jacobian 207 | ( 2, 2)[0,0]: 0 208 | ( 2, 2)[0,1]: 1 209 | ( 2, 2)[1,0]: -0.99997999999999998 210 | ( 2, 2)[1,1]: 1.00008890058234101e-11 211 | YZ:1.00000e+00 -3.50000e-06 212 | -- error estimates: 1.48030e-16 1.04162e-17 213 | rk2imp_apply: t=6.00000e-06, h=2.50000e-05, y:1.00000e+00 -6.00000e-06 214 | -- evaluate jacobian 215 | ( 2, 2)[0,0]: 0 216 | ( 2, 2)[0,1]: 1 217 | ( 2, 2)[1,0]: -0.999880000000002878 218 | ( 2, 2)[1,1]: 3.59998697518904009e-10 219 | YZ:1.00000e+00 -1.85000e-05 220 | -- error estimates: 1.48030e-16 1.30208e-15 221 | rk2imp_apply: t=3.10000e-05, h=1.25000e-04, y:1.00000e+00 -3.10000e-05 222 | -- evaluate jacobian 223 | ( 2, 2)[0,0]: 0 224 | ( 2, 2)[0,1]: 1 225 | ( 2, 2)[1,0]: -0.999380000000403723 226 | ( 2, 2)[1,1]: 9.6099972424212865e-09 227 | YZ:1.00000e+00 -9.35000e-05 228 | -- error estimates: 0.00000e+00 1.62760e-13 229 | rk2imp_apply: t=1.56000e-04, h=6.25000e-04, y:1.00000e+00 -1.56000e-04 230 | -- evaluate jacobian 231 | ( 2, 2)[0,0]: 0 232 | ( 2, 2)[0,1]: 1 233 | ( 2, 2)[1,0]: -0.996880000051409643 234 | ( 2, 2)[1,1]: 2.4335999548874554e-07 235 | YZ:1.00000e+00 -4.68500e-04 236 | -- error estimates: 1.55431e-14 2.03450e-11 237 | ``` 238 | 239 | Let's see if we can reproduce this in a fast and loose way in Haskell 240 | 241 | To make our code easier to write and read let's lift some arithmetic 242 | operators to act on lists (we should really use the 243 | [Naperian](https://hackage.haskell.org/package/Naperian) package). 244 | 245 | > module RK2Imp where 246 | 247 | > instance Num a => Num [a] where 248 | > (+) = zipWith (+) 249 | > (*) = zipWith (*) 250 | 251 | The actual algorithm is almost trivial 252 | 253 | > rk2Step :: Fractional a => a -> a -> [a] -> [a] -> [a] 254 | > rk2Step h t y_n0 y_n1 = y_n0 + (repeat h) * f (t + 0.5 * h) ((repeat 0.5) * (y_n0 + y_n1)) 255 | 256 | The example Van der Pol oscillator with the same parameter and initial 257 | conditions as in the gsl example. 258 | 259 | > f :: Fractional b => a -> [b] -> [b] 260 | > f t [y0, y1] = [y1, -y0 - mu * y1 * (y0 * y0 - 1)] 261 | > where 262 | > mu = 10.0 263 | 264 | > y_init :: [Double] 265 | > y_init = [1.0, 0.0] 266 | 267 | We can use co-recursion to find the fixed point of the Runge-Kutta 268 | equation arbitrarily choosing the 10th iteration! 269 | 270 | > y_1s, y_2s, y_3s :: [[Double]] 271 | > y_1s = y_init : map (rk2Step 1.0e-6 0.0 y_init) y_1s 272 | 273 | > nC :: Int 274 | > nC = 10 275 | 276 | > y_1, y_2, y_3 :: [Double] 277 | > y_1 = y_1s !! nC 278 | 279 | This gives 280 | 281 | [ghci] 282 | y_1 283 | 284 | which is not too far from the value given by gsl. 285 | 286 | ``` 287 | y:1.00000e+00 -1.00000e-06 288 | ``` 289 | 290 | Getting the next time and step size from the gsl *DEBUG* information 291 | we can continue 292 | 293 | > y_2s = y_1 : map (rk2Step 5.0e-6 1.0e-6 y_1) y_2s 294 | > 295 | > y_2 = y_2s !! nC 296 | 297 | [ghci] 298 | y_2 299 | 300 | gsl gives 301 | 302 | ``` 303 | y:1.00000e+00 -6.00000e-06 304 | ``` 305 | 306 | > y_3s = y_2 : map (rk2Step 2.5e-5 6.0e-6 y_2) y_3s 307 | > y_3 = y_3s !! nC 308 | 309 | One more time 310 | 311 | [ghci] 312 | y_3 313 | 314 | And gsl gives 315 | 316 | ``` 317 | y:1.00000e+00 -3.10000e-05 318 | ``` 319 | -------------------------------------------------------------------------------- /symplectic-integrators/shell.nix: -------------------------------------------------------------------------------- 1 | { nixpkgs ? import 2 | {} 3 | , compiler ? "ghc822" 4 | , doBenchmark ? false }: 5 | 6 | let 7 | 8 | inherit (nixpkgs) pkgs; 9 | 10 | f = { mkDerivation, ad, base, diagrams-lib, diagrams-rasterific 11 | , hmatrix, hmatrix-gsl, inline-r, julia, plots, R, stdenv, vector }: 12 | 13 | mkDerivation { 14 | pname = "symplectic-integrators"; 15 | version = "0.1.0.0"; 16 | src = ./.; 17 | isLibrary = false; 18 | isExecutable = true; 19 | executableHaskellDepends = [ 20 | ad 21 | base 22 | diagrams-lib 23 | diagrams-rasterific 24 | hmatrix 25 | hmatrix-gsl 26 | inline-r 27 | plots 28 | vector 29 | ]; 30 | executableSystemDepends = [ 31 | julia 32 | patched-gsl 33 | R 34 | pkgs.rPackages.anytime 35 | pkgs.rPackages.ggplot2 36 | pkgs.rPackages.maptools 37 | pkgs.rPackages.reshape2 38 | pkgs.rPackages.rgeos 39 | pkgs.rPackages.rgdal 40 | pkgs.rPackages.rstan ]; 41 | license = stdenv.lib.licenses.bsd3; 42 | }; 43 | 44 | haskellPackages = if compiler == "default" 45 | then pkgs.haskellPackages 46 | else pkgs.haskell.packages.${compiler}; 47 | 48 | variant = if doBenchmark then pkgs.haskell.lib.doBenchmark else pkgs.lib.id; 49 | 50 | patched-hmatrix = pkgs.haskellPackages.hmatrix.overrideAttrs (oldAttrs: rec { 51 | src = nixpkgs.fetchgit { 52 | url = git://github.com/albertoruiz/hmatrix; 53 | rev = "d83b17190029c11e3ab8b504e5cdc917f5863120"; 54 | sha256 = "11wr59wg21rky59j3kkd3ba6aqns9gkh0r1fnhwhn3fp7zfhanqn"; 55 | }; 56 | postUnpack = '' 57 | sourceRoot=''${sourceRoot}/packages/base 58 | echo Source root reset to ''${sourceRoot} 59 | ''; 60 | }); 61 | 62 | patched-hmatrix-gsl = pkgs.haskellPackages.hmatrix-gsl.overrideAttrs (oldAttrs: rec { 63 | src = nixpkgs.fetchgit { 64 | url = git://github.com/albertoruiz/hmatrix; 65 | rev = "d83b17190029c11e3ab8b504e5cdc917f5863120"; 66 | sha256 = "11wr59wg21rky59j3kkd3ba6aqns9gkh0r1fnhwhn3fp7zfhanqn"; 67 | }; 68 | postUnpack = '' 69 | sourceRoot=''${sourceRoot}/packages/gsl 70 | echo Source root reset to ''${sourceRoot} 71 | ''; 72 | }); 73 | 74 | patched-gsl = pkgs.gsl.overrideAttrs (oldAttrs: rec { 75 | src = nixpkgs.fetchgit { 76 | url = git://github.com/idontgetoutmuch/gsl; 77 | rev = "c2035977d65cd804169ff3370da6723cf879be75"; 78 | sha256 = "1fqp77gp9nl3av1z58cwg8fivik4rff394wgjzc76ayd04y0d1k7"; 79 | }; 80 | version = "2.5"; 81 | name = "gsl-${version}"; 82 | doCheck = false; 83 | CFLAGS = "-DDEBUG"; 84 | }); 85 | 86 | drv = variant (haskellPackages.callPackage f { 87 | hmatrix = patched-hmatrix; 88 | hmatrix-gsl = patched-hmatrix-gsl; 89 | }); 90 | 91 | in 92 | 93 | if pkgs.lib.inNixShell then drv.env else drv 94 | -------------------------------------------------------------------------------- /variational/ChangeLog.md: -------------------------------------------------------------------------------- 1 | # Revision history for variational 2 | 3 | ## 0.1.0.0 -- YYYY-mm-dd 4 | 5 | * First version. Released on an unsuspecting world. 6 | -------------------------------------------------------------------------------- /variational/LICENSE: -------------------------------------------------------------------------------- 1 | Copyright (c) 2017, Dominic Steinitz 2 | 3 | All rights reserved. 4 | 5 | Redistribution and use in source and binary forms, with or without 6 | modification, are permitted provided that the following conditions are met: 7 | 8 | * Redistributions of source code must retain the above copyright 9 | notice, this list of conditions and the following disclaimer. 10 | 11 | * Redistributions in binary form must reproduce the above 12 | copyright notice, this list of conditions and the following 13 | disclaimer in the documentation and/or other materials provided 14 | with the distribution. 15 | 16 | * Neither the name of Dominic Steinitz nor the names of other 17 | contributors may be used to endorse or promote products derived 18 | from this software without specific prior written permission. 19 | 20 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 21 | "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 22 | LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 23 | A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 24 | OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 25 | SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 26 | LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 27 | DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 28 | THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 29 | (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 30 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 31 | -------------------------------------------------------------------------------- /variational/OldFaithful.R: -------------------------------------------------------------------------------- 1 | head(faithful) 2 | -------------------------------------------------------------------------------- /variational/Setup.hs: -------------------------------------------------------------------------------- 1 | import Distribution.Simple 2 | main = defaultMain 3 | -------------------------------------------------------------------------------- /variational/VBEMGMM/MyEllipse.m: -------------------------------------------------------------------------------- 1 | function h=MyEllipse(varargin) 2 | % ERROR_ELLIPSE - plot an error ellipse, or ellipsoid, defining confidence region 3 | % ERROR_ELLIPSE(C22) - Given a 2x2 covariance matrix, plot the 4 | % associated error ellipse, at the origin. It returns a graphics handle 5 | % of the ellipse that was drawn. 6 | % 7 | % ERROR_ELLIPSE(C33) - Given a 3x3 covariance matrix, plot the 8 | % associated error ellipsoid, at the origin, as well as its projections 9 | % onto the three axes. Returns a vector of 4 graphics handles, for the 10 | % three ellipses (in the X-Y, Y-Z, and Z-X planes, respectively) and for 11 | % the ellipsoid. 12 | % 13 | % ERROR_ELLIPSE(C,MU) - Plot the ellipse, or ellipsoid, centered at MU, 14 | % a vector whose length should match that of C (which is 2x2 or 3x3). 15 | % 16 | % ERROR_ELLIPSE(...,'Property1',Value1,'Name2',Value2,...) sets the 17 | % values of specified properties, including: 18 | % 'C' - Alternate method of specifying the covariance matrix 19 | % 'mu' - Alternate method of specifying the ellipse (-oid) center 20 | % 'conf' - A value betwen 0 and 1 specifying the confidence interval. 21 | % the default is 0.5 which is the 50% error ellipse. 22 | % 'scale' - Allow the plot the be scaled to difference units. 23 | % 'style' - A plotting style used to format ellipses. 24 | % 'clip' - specifies a clipping radius. Portions of the ellipse, -oid, 25 | % outside the radius will not be shown. 26 | % 27 | % NOTES: C must be positive definite for this function to work properly. 28 | % Written by somebody anonymous + Modified by Emtiyaz 29 | 30 | default_properties = struct(... 31 | 'C', [], ... % The covaraince matrix (required) 32 | 'mu', [], ... % Center of ellipse (optional) 33 | 'conf', 0.5, ... % Percent confidence/100 34 | 'scale', 1, ... % Scale factor, e.g. 1e-3 to plot m as km 35 | 'style', '', ... % Plot style 36 | 'clip', inf, ... 37 | 'intensity',1, ... 38 | 'facefill',1); % Clipping radius 39 | 40 | if length(varargin) >= 1 & isnumeric(varargin{1}) 41 | default_properties.C = varargin{1}; 42 | varargin(1) = []; 43 | end 44 | 45 | if length(varargin) >= 1 & isnumeric(varargin{1}) 46 | default_properties.mu = varargin{1}; 47 | varargin(1) = []; 48 | end 49 | 50 | if length(varargin) >= 1 & isnumeric(varargin{1}) 51 | default_properties.conf = varargin{1}; 52 | varargin(1) = []; 53 | end 54 | 55 | if length(varargin) >= 1 & isnumeric(varargin{1}) 56 | default_properties.scale = varargin{1}; 57 | varargin(1) = []; 58 | end 59 | 60 | if length(varargin) >= 1 & ~ischar(varargin{1}) 61 | error('Invalid parameter/value pair arguments.') 62 | end 63 | 64 | prop = getopt(default_properties, varargin{:}); 65 | C = prop.C; 66 | 67 | if isempty(prop.mu) 68 | mu = zeros(length(C),1); 69 | else 70 | mu = prop.mu; 71 | end 72 | 73 | conf = prop.conf; 74 | scale = prop.scale; 75 | style = prop.style; 76 | intensity = prop.intensity; 77 | facefill = prop.facefill; 78 | 79 | 80 | if conf <= 0 | conf >= 1 81 | error('conf parameter must be in range 0 to 1, exclusive') 82 | end 83 | 84 | [r,c] = size(C); 85 | if r ~= c | (r ~= 2 & r ~= 3) 86 | error(['Don''t know what to do with ',num2str(r),'x',num2str(c),' matrix']) 87 | end 88 | 89 | x0=mu(1); 90 | y0=mu(2); 91 | 92 | % Compute quantile for the desired percentile 93 | k = sqrt(qchisq(conf,r)); % r is the number of dimensions (degrees of freedom) 94 | 95 | hold_state = get(gca,'nextplot'); 96 | 97 | if r==3 & c==3 98 | z0=mu(3); 99 | 100 | % Make the matrix has positive eigenvalues - else it's not a valid covariance matrix! 101 | if any(eig(C) <=0) 102 | error('The covariance matrix must be positive definite (it has non-positive eigenvalues)') 103 | end 104 | 105 | % C is 3x3; extract the 2x2 matricies, and plot the associated error 106 | % ellipses. They are drawn in space, around the ellipsoid; it may be 107 | % preferable to draw them on the axes. 108 | Cxy = C(1:2,1:2); 109 | Cyz = C(2:3,2:3); 110 | Czx = C([3 1],[3 1]); 111 | 112 | [x,y,z] = getpoints(Cxy,prop.clip); 113 | h1=plot3(x0+k*x,y0+k*y,z0+k*z,prop.style);hold on 114 | [y,z,x] = getpoints(Cyz,prop.clip); 115 | h2=plot3(x0+k*x,y0+k*y,z0+k*z,prop.style);hold on 116 | [z,x,y] = getpoints(Czx,prop.clip); 117 | h3=plot3(x0+k*x,y0+k*y,z0+k*z,prop.style);hold on 118 | 119 | 120 | [eigvec,eigval] = eig(C); 121 | 122 | [X,Y,Z] = ellipsoid(0,0,0,1,1,1); 123 | XYZ = [X(:),Y(:),Z(:)]*sqrt(eigval)*eigvec'; 124 | 125 | X(:) = scale*(k*XYZ(:,1)+x0); 126 | Y(:) = scale*(k*XYZ(:,2)+y0); 127 | Z(:) = scale*(k*XYZ(:,3)+z0); 128 | h4=surf(X,Y,Z); 129 | colormap gray 130 | alpha(0.3) 131 | camlight 132 | if nargout 133 | h=[h1 h2 h3 h4]; 134 | end 135 | elseif r==2 & c==2 136 | % Make the matrix has positive eigenvalues - else it's not a valid covariance matrix! 137 | if any(eig(C) <=0) 138 | error('The covariance matrix must be positive definite (it has non-positive eigenvalues)') 139 | end 140 | 141 | [x,y,z] = getpoints(C,prop.clip); 142 | h1=plot(scale*(x0+k*x),scale*(y0+k*y),prop.style); 143 | set(h1,'zdata',z+1) 144 | fill(scale*(x0+k*x),scale*(y0+k*y),(1-intensity).*[1 1 1],'FaceAlpha',facefill); 145 | if nargout 146 | h=h1; 147 | end 148 | else 149 | error('C (covaraince matrix) must be specified as a 2x2 or 3x3 matrix)') 150 | end 151 | %axis equal 152 | 153 | set(gca,'nextplot',hold_state); 154 | 155 | %--------------------------------------------------------------- 156 | % getpoints - Generate x and y points that define an ellipse, given a 2x2 157 | % covariance matrix, C. z, if requested, is all zeros with same shape as 158 | % x and y. 159 | function [x,y,z] = getpoints(C,clipping_radius) 160 | 161 | n=100; % Number of points around ellipse 162 | p=0:pi/n:2*pi; % angles around a circle 163 | 164 | [eigvec,eigval] = eig(C); % Compute eigen-stuff 165 | xy = [cos(p'),sin(p')] * sqrt(eigval) * eigvec'; % Transformation 166 | x = xy(:,1); 167 | y = xy(:,2); 168 | z = zeros(size(x)); 169 | 170 | % Clip data to a bounding radius 171 | if nargin >= 2 172 | r = sqrt(sum(xy.^2,2)); % Euclidian distance (distance from center) 173 | x(r > clipping_radius) = nan; 174 | y(r > clipping_radius) = nan; 175 | z(r > clipping_radius) = nan; 176 | end 177 | 178 | %--------------------------------------------------------------- 179 | function x=qchisq(P,n) 180 | % QCHISQ(P,N) - quantile of the chi-square distribution. 181 | if nargin<2 182 | n=1; 183 | end 184 | 185 | s0 = P==0; 186 | s1 = P==1; 187 | s = P>0 & P<1; 188 | x = 0.5*ones(size(P)); 189 | x(s0) = -inf; 190 | x(s1) = inf; 191 | x(~(s0|s1|s))=nan; 192 | 193 | for ii=1:14 194 | dx = -(pchisq(x(s),n)-P(s))./dchisq(x(s),n); 195 | x(s) = x(s)+dx; 196 | if all(abs(dx) < 1e-6) 197 | break; 198 | end 199 | end 200 | 201 | %--------------------------------------------------------------- 202 | function F=pchisq(x,n) 203 | % PCHISQ(X,N) - Probability function of the chi-square distribution. 204 | if nargin<2 205 | n=1; 206 | end 207 | F=zeros(size(x)); 208 | 209 | if rem(n,2) == 0 210 | s = x>0; 211 | k = 0; 212 | for jj = 0:n/2-1; 213 | k = k + (x(s)/2).^jj/factorial(jj); 214 | end 215 | F(s) = 1-exp(-x(s)/2).*k; 216 | else 217 | for ii=1:numel(x) 218 | if x(ii) > 0 219 | F(ii) = quadl(@dchisq,0,x(ii),1e-6,0,n); 220 | else 221 | F(ii) = 0; 222 | end 223 | end 224 | end 225 | 226 | %--------------------------------------------------------------- 227 | function f=dchisq(x,n) 228 | % DCHISQ(X,N) - Density function of the chi-square distribution. 229 | if nargin<2 230 | n=1; 231 | end 232 | f=zeros(size(x)); 233 | s = x>=0; 234 | f(s) = x(s).^(n/2-1).*exp(-x(s)/2)./(2^(n/2)*gamma(n/2)); 235 | 236 | %--------------------------------------------------------------- 237 | function properties = getopt(properties,varargin) 238 | %GETOPT - Process paired optional arguments as 'prop1',val1,'prop2',val2,... 239 | % 240 | % getopt(properties,varargin) returns a modified properties structure, 241 | % given an initial properties structure, and a list of paired arguments. 242 | % Each argumnet pair should be of the form property_name,val where 243 | % property_name is the name of one of the field in properties, and val is 244 | % the value to be assigned to that structure field. 245 | % 246 | % No validation of the values is performed. 247 | % 248 | % EXAMPLE: 249 | % properties = struct('zoom',1.0,'aspect',1.0,'gamma',1.0,'file',[],'bg',[]); 250 | % properties = getopt(properties,'aspect',0.76,'file','mydata.dat') 251 | % would return: 252 | % properties = 253 | % zoom: 1 254 | % aspect: 0.7600 255 | % gamma: 1 256 | % file: 'mydata.dat' 257 | % bg: [] 258 | % 259 | % Typical usage in a function: 260 | % properties = getopt(properties,varargin{:}) 261 | 262 | % Process the properties (optional input arguments) 263 | prop_names = fieldnames(properties); 264 | TargetField = []; 265 | for ii=1:length(varargin) 266 | arg = varargin{ii}; 267 | if isempty(TargetField) 268 | if ~ischar(arg) 269 | error('Propery names must be character strings'); 270 | end 271 | f = find(strcmp(prop_names, arg)); 272 | if length(f) == 0 273 | error('%s ',['invalid property ''',arg,'''; must be one of:'],prop_names{:}); 274 | end 275 | TargetField = arg; 276 | else 277 | % properties.(TargetField) = arg; % Ver 6.5 and later only 278 | properties = setfield(properties, TargetField, arg); % Ver 6.1 friendly 279 | TargetField = ''; 280 | end 281 | end 282 | if ~isempty(TargetField) 283 | error('Property names and values must be specified in pairs.'); 284 | end 285 | -------------------------------------------------------------------------------- /variational/VBEMGMM/README.txt: -------------------------------------------------------------------------------- 1 | 22 Oct 2008 2 | gmmVBEM.m is the main file for VBEM 3 | 4 | Main file needs 5 | MyEllipse.m for plotting 6 | Netlab gmmem for initialization 7 | Following example file illustrates the usage 8 | exampleVBEM.m example file for VBEM 9 | faithful.txt Dataset for the example 10 | 11 | 12 | 13 | -------------------------------------------------------------------------------- /variational/VBEMGMM/Test.m: -------------------------------------------------------------------------------- 1 | ## Read data 2 | 3 | id = fopen('faithful.txt','r') 4 | x = fscanf(fid,'%f',[2,272]); 5 | fclose(fid); 6 | train_data = x; 7 | 8 | ## Standardize the data 9 | 10 | train_data = train_data - repmat(mean(train_data,2),1,size(train_data,2)); 11 | train_data = train_data./repmat(var(train_data,[],2),1,size(train_data,2)); 12 | [dim N] = size(train_data); 13 | 14 | ## Intialize the priors 15 | 16 | ncentres = 15 17 | PriorPar.alpha = .001; 18 | PriorPar.mu = zeros(dim,1); 19 | PriorPar.beta = 1; 20 | PriorPar.W = 200*eye(dim); 21 | PriorPar.v = 20; 22 | 23 | ## The first iteration 24 | 25 | x = train_data; 26 | [D N] = size(x); 27 | 28 | ## Initialize variables 29 | 30 | K = ncentres; 31 | alpha0 = PriorPar.alpha; 32 | m0 = PriorPar.mu; 33 | beta0 = PriorPar.beta; 34 | W0 = PriorPar.W; 35 | 36 | 37 | 38 | ncentres = 15; 39 | mix = gmm(2, ncentres, 'full'); 40 | options = foptions; 41 | options(14) = 10; 42 | mix = gmminit(mix, train_data', options); 43 | maxIter = 30; options(3) = 0.1; options(14) = maxIter; 44 | [mix, options, errlog] = gmmem(mix, train_data', options); 45 | % intialize the priors 46 | PriorPar.alpha = .001; 47 | PriorPar.mu = zeros(dim,1); 48 | PriorPar.beta = 1; 49 | PriorPar.W = 200*eye(dim); 50 | PriorPar.v = 20; 51 | -------------------------------------------------------------------------------- /variational/VBEMGMM/Test.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import pandas as pandas 3 | 4 | K = 6 5 | 6 | of_df = pandas.read_csv("/Users/dom/Dropbox/Private/NumMethHaskell/variational/VBEMGMM/faithful.txt", sep = ' ', header=None) 7 | 8 | N = of_df.shape[0] 9 | 10 | np.random.seed(42) 11 | Z = np.array([np.random.dirichlet(np.ones(K)) for _ in range(N)]) 12 | 13 | Y = pandas.DataFrame(Z) 14 | 15 | Y.to_csv("/Users/dom/Dropbox/Private/NumMethHaskell/variational/VBEMGMM/faithZ.txt") 16 | 17 | YHask = pandas.read_csv("/Users/dom/Dropbox/Private/NumMethHaskell/variational/bigR.csv", sep = ',', header=None) 18 | 19 | NK = Z.sum(axis=0) 20 | 21 | ZHask = pandas.DataFrame.as_matrix(YHask) 22 | 23 | NKHask = ZHask.sum(axis=0) 24 | 25 | def calcXd(Z,X): 26 | #weighted means (by component responsibilites) 27 | (N,XDim) = np.shape(X) 28 | (N1,K) = np.shape(Z) 29 | NK = Z.sum(axis=0) 30 | assert N==N1 31 | xd = np.zeros((K,XDim)) 32 | for n in range(N): 33 | for k in range(K): 34 | xd[k,:] += Z[n,k]*X[n,:] 35 | #safe divide: 36 | for k in range(K): 37 | if NK[k]>0: xd[k,:] = xd[k,:]/NK[k] 38 | 39 | return xd 40 | 41 | X = of_df.as_matrix() 42 | 43 | xd = calcXd(ZHask,X) 44 | 45 | def calcS(Z,X,xd,NK): 46 | (N,K)=np.shape(Z) 47 | (N1,XDim)=np.shape(X) 48 | assert N==N1 49 | 50 | S = [np.zeros((XDim,XDim)) for _ in range(K)] 51 | for n in range(N): 52 | for k in range(K): 53 | B0 = np.reshape(X[n,:]-xd[k,:], (XDim,1)) 54 | L = np.dot(B0,B0.T) 55 | assert np.shape(L)==np.shape(S[k]),np.shape(L) 56 | S[k] += Z[n,k]*L 57 | #safe divide: 58 | for k in range(K): 59 | if NK[k]>0: S[k] = S[k]/NK[k] 60 | return S 61 | 62 | S = calcS(Z,X,xd,NK) 63 | -------------------------------------------------------------------------------- /variational/VBEMGMM/gmmVBEM.m: -------------------------------------------------------------------------------- 1 | function [out] = gmmVBEM3(x, mix, PriorPar, options) 2 | 3 | % Variational Bayes EM algorithm for Gaussian Mixture Model 4 | % This implementation is based on Bishop's Book 5 | % Refer to Bishop's book for notation and details 6 | % @book{bishop2006pra, 7 | % title={{Pattern recognition and machine learning}}, 8 | % author={Bishop, C.M.}, 9 | % year={2006}, 10 | % publisher={Springer} 11 | % } 12 | % Function uses inputs similar to Netlab 13 | %%%%%%%%%%%%%%%% 14 | % INPUT 15 | % D is the dimension, N is the number of Data points 16 | % x - training data of size DxN 17 | % mix - gmm model initialize with netlab's gmmem function 18 | % PriorPar - structure containing priors 19 | % options - options for maxIter, threshold etc. etc. 20 | % FOR DETAILS OF ABOVE VARIABLE, REFER TO EXAMPLE FILE 21 | % out is the output vector (see the end of this file for details) 22 | %%%%%%%%%%%%%%%% 23 | % Written by Emtiyaz, CS, UBC 24 | % June, 2007 25 | 26 | % initialize variables 27 | [D N] = size(x); 28 | K = mix.ncentres; 29 | likIncr = options.threshold + eps; 30 | logLambdaTilde = zeros(1,K); 31 | E = zeros(N,K); 32 | trSW = zeros(1,K); 33 | xbarWxbar = zeros(1,K); 34 | mWm = zeros(1,K); 35 | trW0invW = zeros(1,K); 36 | 37 | % priors 38 | % prior over the mixing coefficients - CB 10.39 39 | alpha0 = PriorPar.alpha; 40 | % prior over gaussian means - CB 10. 40 41 | m0 = PriorPar.mu; 42 | % prior over the gaussian variance - CB 10.40 43 | beta0 = PriorPar.beta; 44 | % wishart prior variables - CB 10.40 45 | W0 = PriorPar.W; 46 | W0inv = inv(W0); 47 | v0 = PriorPar.v; 48 | 49 | % Use 'responsibilities' from initialization to set sufficient statistics - 50 | % CB 10.51-10.53. 51 | Nk = N*mix.priors'; 52 | xbar = mix.centres'; 53 | S = mix.covars; 54 | 55 | % Use above sufficient statistics for M step update equations - CB 10.58, 56 | % 10.60-10.63. 57 | alpha = alpha0 + Nk; 58 | beta = beta0 + Nk; 59 | v = v0 + Nk; 60 | m = ((beta0*m0)*ones(1,K) + (ones(D,1)*Nk').*xbar)./(ones(D,1)*beta'); 61 | W = zeros(D,D,K); 62 | for k = 1:K 63 | mult1 = beta0.*Nk(k)/(beta0 + Nk(k)); 64 | diff3 = xbar(:,k) - m0; 65 | W(:,:,k) = inv(W0inv + Nk(k)*S(:,:,k) + mult1*diff3*diff3'); 66 | end 67 | 68 | 69 | % Main loop of algorithm 70 | for iter = 1:options.maxIter 71 | % Calculate r 72 | psiAlphaHat = psi(0,sum(alpha)); 73 | logPiTilde = psi(0,alpha) - psiAlphaHat; 74 | const = D*log(2); 75 | for k = 1:K 76 | t1 = psi(0, 0.5*repmat(v(k)+1,D,1) - 0.5*[1:D]'); 77 | logLambdaTilde(k) = sum(t1) + const + log(det(W(:,:,k))); 78 | for n = 1:N 79 | % Calculate E 80 | diff = x(:,n) - m(:,k); 81 | E(n,k) = D/beta(k) + v(k)*diff'*W(:,:,k)*diff; 82 | end 83 | end 84 | logRho = repmat(logPiTilde' + 0.5*logLambdaTilde, N,1) - 0.5*E; 85 | logSumRho = logsumexp(logRho,2); 86 | logr = logRho - repmat(logSumRho, 1,K); 87 | r = exp(logr); 88 | 89 | % compute N(k) 90 | Nk = exp(logsumexp(logr,1))'; 91 | % add a non-zero term for the components with zero responsibilities 92 | Nk = Nk + 1e-10; 93 | % compute xbar(k), S(k) 94 | for k=1:K 95 | xbar(:,k) = sum(repmat(r(:,k)',D,1).*x,2)/Nk(k); 96 | diff1 = x - repmat(xbar(:,k),1,N); 97 | diff2 = repmat(r(:,k)',D,1).*diff1; 98 | S(:,:,k) = (diff2*diff1')./Nk(k); 99 | end 100 | 101 | 102 | % compute Lower bound (refer to Bishop for these terms) 103 | % C(alpha0) 104 | logCalpha0 = gammaln(K*alpha0) - K*gammaln(alpha0); 105 | % B(lambda0) 106 | logB0 = (v0/2)*log(det(W0inv)) - (v0*D/2)*log(2) ... 107 | - (D*(D-1)/4)*log(pi) - sum(gammaln(0.5*(v0+1 -[1:D]))); 108 | % log(C(alpha)) 109 | logCalpha = gammaln(sum(alpha)) - sum(gammaln(alpha)); 110 | % Various other parameters for different terms 111 | H =0; 112 | for k = 1:K 113 | % sum(H(q(Lamba(k)))) 114 | logBk = -(v(k)/2)*log(det(W(:,:,k))) - (v(k)*D/2)*log(2)... 115 | - (D*(D-1)/4)*log(pi) - sum(gammaln(0.5*(v(k) + 1 - [1:D])));; 116 | H = H -logBk - 0.5*(v(k) -D-1)*logLambdaTilde(k) + 0.5*v(k)*D; 117 | % for Lt1 - third term 118 | trSW(k) = trace(v(k)*S(:,:,k)*W(:,:,k)); 119 | diff = xbar(:,k) - m(:,k); 120 | xbarWxbar(k) = diff'*W(:,:,k)*diff; 121 | % for Lt4 - Fourth term 122 | diff = m(:,k) - m0; 123 | mWm(k) = diff'*W(:,:,k)*diff; 124 | trW0invW(k) = trace(W0inv*W(:,:,k)); 125 | end 126 | 127 | Lt1 = 0.5*sum(Nk.*(logLambdaTilde' - D./beta... 128 | - trSW' - v.*xbarWxbar' - D*log(2*pi))); 129 | Lt2 = sum(Nk.*logPiTilde); 130 | Lt3 = logCalpha0 + (alpha0 -1)*sum(logPiTilde); 131 | Lt41 = 0.5*sum(D*log(beta0/(2*pi)) + logLambdaTilde' - D*beta0./beta - beta0.*v.*mWm'); 132 | Lt42 = K*logB0 + 0.5*(v0-D-1)*sum(logLambdaTilde) - 0.5*sum(v.*trW0invW'); 133 | Lt4 = Lt41+Lt42; 134 | Lt5 = sum(sum(r.*logr)); 135 | Lt6 = sum((alpha - 1).*logPiTilde) + logCalpha; 136 | Lt7 = 0.5*sum(logLambdaTilde' + D.*log(beta/(2*pi))) - 0.5*D*K - H; 137 | 138 | %Bishop's Lower Bound 139 | L(iter) = Lt1 + Lt2 + Lt3 + Lt4 - Lt5 - Lt6 - Lt7; 140 | 141 | % warning if lower bound decreses 142 | if iter>2 && L(iter)1 179 | likIncr = abs((L(iter)-L(iter-1))/L(iter-1)); 180 | end 181 | if likIncr < options.threshold 182 | break; 183 | end 184 | end 185 | 186 | out.alpha = alpha; 187 | out.beta = beta; 188 | out.m = m; 189 | out.W = W; 190 | out.v = v; 191 | out.L = L; 192 | 193 | 194 | -------------------------------------------------------------------------------- /variational/VBEMGMM/gmmVBEMdemo.m: -------------------------------------------------------------------------------- 1 | % This file illustrates the usage of the function 'gmmVBEM.m' 2 | % Refer to Bishop's book for notation and details 3 | % @book{bishop2006pra, 4 | % title={{Pattern recognition and machine learning}}, 5 | % author={Bishop, C.M.}, 6 | % year={2006}, 7 | % publisher={Springer} 8 | % } 9 | % This function needs faithful.txt, gmmVEBM.m, MyEllipse.m 10 | % Written by Emtiyaz, CS, UBC 11 | % June, 2007 12 | 13 | %read data 14 | fid = fopen('faithful.txt','r'); 15 | x = fscanf(fid,'%f',[2,272]); 16 | fclose(fid); 17 | train_data = x; 18 | %standardize the data 19 | train_data = train_data - repmat(mean(train_data,2),1,size(train_data,2)); 20 | train_data = train_data./repmat(var(train_data,[],2),1,size(train_data,2)); 21 | [dim N] = size(train_data); 22 | 23 | %%%%%%%%%%%%%%%%%%%% 24 | % GMM VBEM clustering 25 | %%%%%%%%%%%%%%%%%%%% 26 | % initialize with EM algorithm 27 | ncentres = 15; 28 | mix = gmm(2, ncentres, 'full'); 29 | options = foptions; 30 | options(14) = 10; 31 | mix = gmminit(mix, train_data', options); 32 | maxIter = 30; options(3) = 0.1; options(14) = maxIter; 33 | [mix, options, errlog] = gmmem(mix, train_data', options); 34 | % intialize the priors 35 | PriorPar.alpha = .001; 36 | PriorPar.mu = zeros(dim,1); 37 | PriorPar.beta = 1; 38 | PriorPar.W = 200*eye(dim); 39 | PriorPar.v = 20; 40 | 41 | % set the options for VBEM 42 | clear options; 43 | options.maxIter = 100; 44 | options.threshold = 1e-5; 45 | options.displayFig = 1; 46 | options.displayIter = 1; 47 | 48 | % Call the function 49 | [out] = gmmVBEM(train_data, mix, PriorPar, options); 50 | %plot lower bound 51 | figure 52 | plot(out.L) 53 | 54 | 55 | -------------------------------------------------------------------------------- /variational/VBEMGMM/gmmVBEMorig.m: -------------------------------------------------------------------------------- 1 | function [out] = gmmVBEM1(x, mix, PriorPar, options) 2 | 3 | % Variational Bayes EM algorithm for Gaussian Mixture Model 4 | % This implementation is based on Bishop's Book 5 | % Refer to Bishop's book for notation and details 6 | % @book{bishop2006pra, 7 | % title={{Pattern recognition and machine learning}}, 8 | % author={Bishop, C.M.}, 9 | % year={2006}, 10 | % publisher={Springer} 11 | % } 12 | % Function uses inputs similar to Netlab 13 | %%%%%%%%%%%%%%%% 14 | % INPUT 15 | % D is the dimension, N is the number of Data points 16 | % x - training data of size DxN 17 | % mix - gmm model initialize with netlab's gmmem function 18 | % PriorPar - structure containing priors 19 | % options - options for maxIter, threshold etc. etc. 20 | % FOR DETAILS OF ABOVE VARIABLE, REFER TO EXAMPLE FILE 21 | % out is the output vector (see the end of this file for details) 22 | %%%%%%%%%%%%%%%% 23 | % Written by Emtiyaz, CS, UBC 24 | % June, 2007 25 | 26 | K = mix.ncentres; 27 | 28 | % priors 29 | alpha0 = PriorPar.alpha; 30 | m0 = PriorPar.mu; 31 | beta0 = PriorPar.beta; 32 | W0 = PriorPar.W; 33 | v0 = PriorPar.v; 34 | 35 | W0inv = inv(W0); 36 | [D N] = size(x); 37 | 38 | %initialize parameters 39 | alpha = mix.priors'; 40 | m = mix.centres'; 41 | beta = repmat(beta0,K,1); 42 | W = mix.covars; 43 | v = repmat(v0,K,1); 44 | %initialization of some auxiliary variables 45 | E = zeros(N,K); 46 | xbar = zeros(D,K); 47 | S = zeros(D,D,K); 48 | Nk = zeros(K,1); 49 | likIncr = options.threshold + eps; 50 | 51 | % Main loop of algorithm 52 | for iter = 1:options.maxIter 53 | % Calculate r 54 | psiAlphaHat = psi(0,sum(alpha)); 55 | logPiTilde = psi(0,alpha) - psiAlphaHat; 56 | const = D*log(2); 57 | for k = 1:K 58 | t1 = psi(0, 0.5*repmat(v(k)+1,D,1) - [1:D]'); 59 | logLambdaTilde(k) = sum(t1) + const + log(det(W(:,:,k))); 60 | for n = 1:N 61 | % Calculate E 62 | diff = x(:,n) - m(:,k); 63 | E(n,k) = D/beta(k) + v(k)*diff'*W(:,:,k)*diff; 64 | end 65 | end 66 | logRho = repmat(logPiTilde' + 0.5*logLambdaTilde, N,1) - 0.5*E; 67 | logSumRho = logsumexp(logRho,2); 68 | logr = logRho - repmat(logSumRho, 1,K); 69 | r = exp(logr); 70 | 71 | % compute Lower bound (refer to Bishop for these terms) 72 | % C(alpha0) 73 | logCalpha0 = gammaln(K*alpha0) - K*gammaln(alpha0); 74 | % B(lambda0) 75 | logB0 = (v0/2)*det(W0inv) - (v0*D/2)*log(2) ... 76 | - (D*(D-1)/4)*log(pi) - sum(gammaln(0.5*(v0+1 -[1:D]))); 77 | % log(C(alpha)) 78 | logCalpha = gammaln(sum(alpha)) - sum(gammaln(alpha)); 79 | % Various other parameters for different terms 80 | H =0; 81 | for k = 1:K 82 | % sum(H(q(Lamba(k)))) 83 | logBk = -(v(k)/2)*log(det(W(:,:,k))) - (v(k)*D/2)*log(2)... 84 | - (D*(D-1)/4)*log(pi) - sum(gammaln(0.5*(v(k) + 1 - [1:D])));; 85 | H = H -logBk - 0.5*(v(k) -D-1)*logLambdaTilde(k) + 0.5*v(k)*D; 86 | % for Lt1 - third term 87 | trSW(k) = trace(v(k)*S(:,:,k)*W(:,:,k)); 88 | diff = xbar(:,k) - m(:,k); 89 | xbarWxbar(k) = diff'*W(:,:,k)*diff; 90 | % for Lt4 - Fourth term 91 | diff = m(:,k) - m0; 92 | mWm(k) = diff'*W(:,:,k)*diff; 93 | trW0invW(k) = trace(W0inv*W(:,:,k)); 94 | end 95 | 96 | Lt1 = 0.5*sum(Nk.*(logLambdaTilde' - D./beta... 97 | - trSW' - v.*xbarWxbar' - D*log(2*pi))); 98 | Lt2 = sum(Nk.*logPiTilde); 99 | Lt3 = logCalpha0 + (alpha0 -1)*sum(logPiTilde); 100 | Lt41 = 0.5*sum(D*log(beta0/(2*pi)) + logLambdaTilde' - D*beta0./beta - beta0.*v.*mWm'); 101 | Lt42 = K*logB0 + 0.5*(v0-D-1)*sum(logLambdaTilde) - 0.5*sum(v.*trW0invW'); 102 | Lt4 = Lt41+Lt42; 103 | Lt5 = sum(sum(r.*logr)); 104 | Lt6 = sum((alpha - 1).*logPiTilde) + logCalpha; 105 | Lt7 = 0.5*sum(logLambdaTilde' + D.*log(beta/(2*pi))) - 0.5*D*K - H; 106 | 107 | %Bishop's Lower Bound 108 | L(iter) = Lt1 + Lt2 + Lt3 + Lt4 - Lt5 - Lt6 - Lt7; 109 | 110 | % warning if lowe bound decreses 111 | if iter>2 & L(iter)1 158 | likIncr = abs((L(iter)-L(iter-1))/L(iter-1)) 159 | end 160 | if likIncr < options.threshold 161 | break; 162 | end 163 | end 164 | 165 | out.alpha = alpha; 166 | out.beta = beta; 167 | out.m = m; 168 | out.W = W; 169 | out.v = v; 170 | out.L = L; 171 | 172 | 173 | -------------------------------------------------------------------------------- /variational/VBEMGMM/logsumexp.m: -------------------------------------------------------------------------------- 1 | function s = logsumexp(b, dim) 2 | % s = logsumexp(b) by Tom Minka 3 | % Returns s(i) = log(sum(exp(b(:,i)))) while avoiding numerical underflow. 4 | % s = logsumexp(b, dim) sums over dimension 'dim' instead of summing over rows 5 | 6 | if nargin < 2 % if 2nd argument is missing 7 | dim = 1; 8 | end 9 | 10 | [B, junk] = max(b,[],dim); 11 | dims = ones(1,ndims(b)); 12 | dims(dim) = size(b,dim); 13 | b = b - repmat(B, dims); 14 | s = B + log(sum(exp(b),dim)); 15 | i = find(~isfinite(B)); 16 | if ~isempty(i) 17 | s(i) = B(i); 18 | end 19 | -------------------------------------------------------------------------------- /variational/default.nix: -------------------------------------------------------------------------------- 1 | { nixpkgs ? import {}, compiler ? "default" }: 2 | 3 | let 4 | 5 | inherit (nixpkgs) pkgs; 6 | 7 | f = { mkDerivation, base, datasets, hmatrix, mtl, random-fu 8 | , random-source, stdenv, typelits-witnesses, containers 9 | , ghc-prim, vector, cassava, bytestring 10 | }: 11 | mkDerivation { 12 | pname = "variational"; 13 | version = "0.1.0.0"; 14 | src = ./.; 15 | isLibrary = false; 16 | isExecutable = true; 17 | executableHaskellDepends = [ 18 | base datasets hmatrix mtl random-fu random-source 19 | typelits-witnesses containers ghc-prim vector cassava bytestring 20 | ]; 21 | license = stdenv.lib.licenses.bsd3; 22 | }; 23 | 24 | haskellPackages = if compiler == "default" 25 | then pkgs.haskellPackages 26 | else pkgs.haskell.packages.${compiler}; 27 | 28 | drv = haskellPackages.callPackage f {}; 29 | 30 | in 31 | 32 | if pkgs.lib.inNixShell then drv.env else drv 33 | -------------------------------------------------------------------------------- /variational/src/OldFaithful.hs: -------------------------------------------------------------------------------- 1 | module OldFaithful where 2 | 3 | data OldFaithful = OF 4 | { eruptions :: Double -- ^ duration of eruption in minutes 5 | , waiting :: Double -- ^ waiting time until next eruption 6 | } deriving Show 7 | 8 | oldFaithful :: [OldFaithful] 9 | oldFaithful = 10 | [ OF 3.600 79, 11 | OF 1.800 54, 12 | OF 3.333 74, 13 | OF 2.283 62, 14 | OF 4.533 85, 15 | OF 2.883 55, 16 | OF 4.700 88, 17 | OF 3.600 85, 18 | OF 1.950 51, 19 | OF 4.350 85, 20 | OF 1.833 54, 21 | OF 3.917 84, 22 | OF 4.200 78, 23 | OF 1.750 47, 24 | OF 4.700 83, 25 | OF 2.167 52, 26 | OF 1.750 62, 27 | OF 4.800 84, 28 | OF 1.600 52, 29 | OF 4.250 79, 30 | OF 1.800 51, 31 | OF 1.750 47, 32 | OF 3.450 78, 33 | OF 3.067 69, 34 | OF 4.533 74, 35 | OF 3.600 83, 36 | OF 1.967 55, 37 | OF 4.083 76, 38 | OF 3.850 78, 39 | OF 4.433 79, 40 | OF 4.300 73, 41 | OF 4.467 77, 42 | OF 3.367 66, 43 | OF 4.033 80, 44 | OF 3.833 74, 45 | OF 2.017 52, 46 | OF 1.867 48, 47 | OF 4.833 80, 48 | OF 1.833 59, 49 | OF 4.783 90, 50 | OF 4.350 80, 51 | OF 1.883 58, 52 | OF 4.567 84, 53 | OF 1.750 58, 54 | OF 4.533 73, 55 | OF 3.317 83, 56 | OF 3.833 64, 57 | OF 2.100 53, 58 | OF 4.633 82, 59 | OF 2.000 59, 60 | OF 4.800 75, 61 | OF 4.716 90, 62 | OF 1.833 54, 63 | OF 4.833 80, 64 | OF 1.733 54, 65 | OF 4.883 83, 66 | OF 3.717 71, 67 | OF 1.667 64, 68 | OF 4.567 77, 69 | OF 4.317 81, 70 | OF 2.233 59, 71 | OF 4.500 84, 72 | OF 1.750 48, 73 | OF 4.800 82, 74 | OF 1.817 60, 75 | OF 4.400 92, 76 | OF 4.167 78, 77 | OF 4.700 78, 78 | OF 2.067 65, 79 | OF 4.700 73, 80 | OF 4.033 82, 81 | OF 1.967 56, 82 | OF 4.500 79, 83 | OF 4.000 71, 84 | OF 1.983 62, 85 | OF 5.067 76, 86 | OF 2.017 60, 87 | OF 4.567 78, 88 | OF 3.883 76, 89 | OF 3.600 83, 90 | OF 4.133 75, 91 | OF 4.333 82, 92 | OF 4.100 70, 93 | OF 2.633 65, 94 | OF 4.067 73, 95 | OF 4.933 88, 96 | OF 3.950 76, 97 | OF 4.517 80, 98 | OF 2.167 48, 99 | OF 4.000 86, 100 | OF 2.200 60, 101 | OF 4.333 90, 102 | OF 1.867 50, 103 | OF 4.817 78, 104 | OF 1.833 63, 105 | OF 4.300 72, 106 | OF 4.667 84, 107 | OF 3.750 75, 108 | OF 1.867 51, 109 | OF 4.900 82, 110 | OF 2.483 62, 111 | OF 4.367 88, 112 | OF 2.100 49, 113 | OF 4.500 83, 114 | OF 4.050 81, 115 | OF 1.867 47, 116 | OF 4.700 84, 117 | OF 1.783 52, 118 | OF 4.850 86, 119 | OF 3.683 81, 120 | OF 4.733 75, 121 | OF 2.300 59, 122 | OF 4.900 89, 123 | OF 4.417 79, 124 | OF 1.700 59, 125 | OF 4.633 81, 126 | OF 2.317 50, 127 | OF 4.600 85, 128 | OF 1.817 59, 129 | OF 4.417 87, 130 | OF 2.617 53, 131 | OF 4.067 69, 132 | OF 4.250 77, 133 | OF 1.967 56, 134 | OF 4.600 88, 135 | OF 3.767 81, 136 | OF 1.917 45, 137 | OF 4.500 82, 138 | OF 2.267 55, 139 | OF 4.650 90, 140 | OF 1.867 45, 141 | OF 4.167 83, 142 | OF 2.800 56, 143 | OF 4.333 89, 144 | OF 1.833 46, 145 | OF 4.383 82, 146 | OF 1.883 51, 147 | OF 4.933 86, 148 | OF 2.033 53, 149 | OF 3.733 79, 150 | OF 4.233 81, 151 | OF 2.233 60, 152 | OF 4.533 82, 153 | OF 4.817 77, 154 | OF 4.333 76, 155 | OF 1.983 59, 156 | OF 4.633 80, 157 | OF 2.017 49, 158 | OF 5.100 96, 159 | OF 1.800 53, 160 | OF 5.033 77, 161 | OF 4.000 77, 162 | OF 2.400 65, 163 | OF 4.600 81, 164 | OF 3.567 71, 165 | OF 4.000 70, 166 | OF 4.500 81, 167 | OF 4.083 93, 168 | OF 1.800 53, 169 | OF 3.967 89, 170 | OF 2.200 45, 171 | OF 4.150 86, 172 | OF 2.000 58, 173 | OF 3.833 78, 174 | OF 3.500 66, 175 | OF 4.583 76, 176 | OF 2.367 63, 177 | OF 5.000 88, 178 | OF 1.933 52, 179 | OF 4.617 93, 180 | OF 1.917 49, 181 | OF 2.083 57, 182 | OF 4.583 77, 183 | OF 3.333 68, 184 | OF 4.167 81, 185 | OF 4.333 81, 186 | OF 4.500 73, 187 | OF 2.417 50, 188 | OF 4.000 85, 189 | OF 4.167 74, 190 | OF 1.883 55, 191 | OF 4.583 77, 192 | OF 4.250 83, 193 | OF 3.767 83, 194 | OF 2.033 51, 195 | OF 4.433 78, 196 | OF 4.083 84, 197 | OF 1.833 46, 198 | OF 4.417 83, 199 | OF 2.183 55, 200 | OF 4.800 81, 201 | OF 1.833 57, 202 | OF 4.800 76, 203 | OF 4.100 84, 204 | OF 3.966 77, 205 | OF 4.233 81, 206 | OF 3.500 87, 207 | OF 4.366 77, 208 | OF 2.250 51, 209 | OF 4.667 78, 210 | OF 2.100 60, 211 | OF 4.350 82, 212 | OF 4.133 91, 213 | OF 1.867 53, 214 | OF 4.600 78, 215 | OF 1.783 46, 216 | OF 4.367 77, 217 | OF 3.850 84, 218 | OF 1.933 49, 219 | OF 4.500 83, 220 | OF 2.383 71, 221 | OF 4.700 80, 222 | OF 1.867 49, 223 | OF 3.833 75, 224 | OF 3.417 64, 225 | OF 4.233 76, 226 | OF 2.400 53, 227 | OF 4.800 94, 228 | OF 2.000 55, 229 | OF 4.150 76, 230 | OF 1.867 50, 231 | OF 4.267 82, 232 | OF 1.750 54, 233 | OF 4.483 75, 234 | OF 4.000 78, 235 | OF 4.117 79, 236 | OF 4.083 78, 237 | OF 4.267 78, 238 | OF 3.917 70, 239 | OF 4.550 79, 240 | OF 4.083 70, 241 | OF 2.417 54, 242 | OF 4.183 86, 243 | OF 2.217 50, 244 | OF 4.450 90, 245 | OF 1.883 54, 246 | OF 1.850 54, 247 | OF 4.283 77, 248 | OF 3.950 79, 249 | OF 2.333 64, 250 | OF 4.150 75, 251 | OF 2.350 47, 252 | OF 4.933 86, 253 | OF 2.900 63, 254 | OF 4.583 85, 255 | OF 3.833 82, 256 | OF 2.083 57, 257 | OF 4.367 82, 258 | OF 2.133 67, 259 | OF 4.350 74, 260 | OF 2.200 54, 261 | OF 4.450 83, 262 | OF 3.567 73, 263 | OF 4.500 73, 264 | OF 4.150 88, 265 | OF 3.817 80, 266 | OF 3.917 71, 267 | OF 4.450 83, 268 | OF 2.000 56, 269 | OF 4.283 79, 270 | OF 4.767 78, 271 | OF 4.533 84, 272 | OF 1.850 58, 273 | OF 4.250 83, 274 | OF 1.983 43, 275 | OF 2.250 60, 276 | OF 4.750 75, 277 | OF 4.117 81, 278 | OF 2.150 46, 279 | OF 4.417 90, 280 | OF 1.817 46, 281 | OF 4.467 74 ] 282 | 283 | -------------------------------------------------------------------------------- /variational/variational.cabal: -------------------------------------------------------------------------------- 1 | -- Initial variational.cabal generated by cabal init. For further 2 | -- documentation, see http://haskell.org/cabal/users-guide/ 3 | 4 | name: variational 5 | version: 0.1.0.0 6 | -- synopsis: 7 | -- description: 8 | license: BSD3 9 | license-file: LICENSE 10 | author: Dominic Steinitz 11 | maintainer: dominic@steinitz.org 12 | -- copyright: 13 | category: Math 14 | build-type: Simple 15 | extra-source-files: ChangeLog.md 16 | cabal-version: >=1.10 17 | 18 | executable variational 19 | main-is: Main.hs 20 | -- other-modules: 21 | -- other-extensions: 22 | build-depends: base >=4.9 && <4.10, 23 | datasets, 24 | random-fu, 25 | random-source, 26 | hmatrix, 27 | typelits-witnesses, 28 | mtl, 29 | containers, 30 | ghc-prim, 31 | vector, 32 | array, 33 | cassava, 34 | bytestring 35 | hs-source-dirs: src 36 | default-language: Haskell2010 37 | -------------------------------------------------------------------------------- /weno/SparseSundials/default.nix: -------------------------------------------------------------------------------- 1 | { stdenv 2 | , cmake 3 | , fetchurl 4 | , python 5 | , blas 6 | , liblapack 7 | , gfortran 8 | , suitesparse 9 | , lapackSupport ? true 10 | , kluSupport ? true }: 11 | 12 | # assert (!blas.isILP64) && (!lapack.isILP64); 13 | 14 | let liblapackShared = liblapack.override { 15 | shared = true; 16 | }; 17 | 18 | in 19 | 20 | stdenv.mkDerivation rec { 21 | pname = "sundials"; 22 | version = "5.3.0"; 23 | 24 | buildInputs = [ 25 | python 26 | ] ++ stdenv.lib.optionals (lapackSupport) [ 27 | gfortran 28 | blas 29 | liblapack 30 | ] 31 | # KLU support is based on Suitesparse. 32 | # It is tested upstream according to the section 1.1.4 of 33 | # [INSTALL_GUIDE.pdf](https://raw.githubusercontent.com/LLNL/sundials/master/INSTALL_GUIDE.pdf) 34 | ++ stdenv.lib.optionals (kluSupport) [ 35 | suitesparse 36 | ]; 37 | 38 | nativeBuildInputs = [ cmake ]; 39 | 40 | src = fetchurl { 41 | url = "https://computation.llnl.gov/projects/${pname}/download/${pname}-${version}.tar.gz"; 42 | sha256 = "19xwi7pz35s2nqgldm6r0jl2k0bs36zhbpnmmzc56s1n3bhzgpw8"; 43 | }; 44 | 45 | patches = [ 46 | (fetchurl { 47 | # https://github.com/LLNL/sundials/pull/19 48 | url = "https://github.com/LLNL/sundials/commit/1350421eab6c5ab479de5eccf6af2dcad1eddf30.patch"; 49 | sha256 = "0g67lixp9m85fqpb9rzz1hl1z8ibdg0ldwq5z6flj5zl8a7cw52l"; 50 | }) 51 | ]; 52 | 53 | cmakeFlags = [ 54 | "-DEXAMPLES_INSTALL_PATH=${placeholder "out"}/share/examples" 55 | ] ++ stdenv.lib.optionals (lapackSupport) [ 56 | "-DSUNDIALS_INDEX_TYPE=int32_t" 57 | "-DLAPACK_ENABLE=ON" 58 | # "-DLAPACK_LIBRARIES=${lapack}/lib/liblapack${stdenv.hostPlatform.extensions.sharedLibrary}" 59 | "-DLAPACK_LIBRARIES=${liblapackShared}/lib/liblapack${stdenv.hostPlatform.extensions.sharedLibrary};${liblapackShared}/lib/libblas${stdenv.hostPlatform.extensions.sharedLibrary}" 60 | ] ++ stdenv.lib.optionals (kluSupport) [ 61 | "-DKLU_ENABLE=ON" 62 | "-DKLU_INCLUDE_DIR=${suitesparse}/include" 63 | "-DKLU_LIBRARY_DIR=${suitesparse}/lib" 64 | ]; 65 | 66 | doCheck = true; 67 | checkPhase = "make test"; 68 | 69 | meta = with stdenv.lib; { 70 | description = "Suite of nonlinear differential/algebraic equation solvers"; 71 | homepage = "https://computation.llnl.gov/projects/sundials"; 72 | platforms = platforms.all; 73 | maintainers = with maintainers; [ flokli idontgetoutmuch ]; 74 | license = licenses.bsd3; 75 | }; 76 | } 77 | -------------------------------------------------------------------------------- /weno/minimal.nix: -------------------------------------------------------------------------------- 1 | # nix-shell -I nixpkgs=https://github.com/NixOS/nixpkgs-channels/archive/nixos-20.03.tar.gz minimal.nix 2 | 3 | let 4 | 5 | # nixpkgsRev = "bc260badaebf67442befe20fb443034d3a91f2b3"; # 20.09-beta 6 | # nixpkgsSha256 = "1iysc4xyk88ngkfb403xfq5bs3zy29zfs83pn99kchxd45nbpb5q"; 7 | 8 | # mixpkgs = fetchTarball { 9 | # url = "https://github.com/nixos/nixpkgs/archive/${nixpkgsRev}.tar.gz"; 10 | # sha256 = nixpkgsSha256; 11 | # }; 12 | 13 | # mixpkgs = fetchTarball { 14 | # url = "https://github.com/NixOS/nixpkgs-channels/archive/nixos-20.03.tar.gz"; 15 | # sha256 = "1qbs7p0mmcmpg70ibd437hl57byqx5q0pc61p1dckrkazj7kq0pc"; 16 | # }; 17 | 18 | overlay1 = self: super: 19 | { 20 | sundials1 = self.callPackage ./SparseSundials { }; 21 | }; 22 | 23 | myHaskellPackageOverlay = self: super: { 24 | myHaskellPackages = super.haskellPackages.override { 25 | overrides = hself: hsuper: rec { 26 | 27 | tasty-golden = 28 | let newTastyGoldenSrc = builtins.fetchTarball { url = "https://hackage.haskell.org/package/tasty-golden-2.3.3/tasty-golden-2.3.3.tar.gz"; 29 | sha256 = "0wgcs4pqr30bp801cyrg6g551i7q0vjjmd9gmk5jy44fgdhb7kkl"; 30 | }; 31 | tg = hself.callCabal2nix "tasty-golden" newTastyGoldenSrc {}; 32 | in 33 | super.haskell.lib.dontCheck tg; 34 | 35 | Frames = 36 | let newFramesSrc = builtins.fetchTarball { url = "https://hackage.haskell.org/package/Frames-0.7.0/Frames-0.7.0.tar.gz"; 37 | sha256 = "1dxdwcz21rk83rp8b9l1jahdicmhywkpaa1n70fp7ihc07jghxmh"; 38 | }; 39 | f = hself.callCabal2nix "Frames" newFramesSrc {}; 40 | in 41 | super.haskell.lib.dontCheck f; 42 | 43 | vinyl = 44 | let newVinylSrc = builtins.fetchTarball { url = "https://hackage.haskell.org/package/vinyl-0.13.0/vinyl-0.13.0.tar.gz"; 45 | sha256 = "05kj1ld70yrxfff3zlc0vr8b6r8pawikcmiyvv1cc8c40jypaji4"; 46 | }; 47 | v = hself.callCabal2nix "vinyl" newVinylSrc {}; 48 | in 49 | super.haskell.lib.dontCheck v; 50 | 51 | # inline-r = 52 | # let newInlineRSrc = builtins.fetchTarball { url = "https://hackage.haskell.org/package/inline-r-0.10.3/inline-r-0.10.3.tar.gz"; 53 | # sha256 = "10v4d6ka27pxi54zcq1c3ic140v9dx6lb62l5dmxjmlb6522n3g7"; 54 | # }; 55 | # ir = hself.callCabal2nix "inline-r" newInlineRSrc {}; 56 | # in 57 | # super.haskell.lib.dontCheck ir; 58 | 59 | kalman1 = super.haskell.lib.dontCheck ( 60 | hself.callCabal2nix "kalman" (builtins.fetchGit { 61 | url = "file:///Users/dom/Kalman"; 62 | rev = "2314229a14ae25143f675a9eb08d8767a9c1ac56"; 63 | }) { }); 64 | 65 | hmatrix-sundials1 = super.haskell.lib.dontCheck ( 66 | hself.callCabal2nix "hmatrix-sundials" (builtins.fetchGit { 67 | url = "file:///Users/dom/hmatrix-sundials"; 68 | rev = "70db2c221ce2bb7f5a26a6689178ea3e1121b86b"; 69 | }) { sundials_arkode = self.sundials1; 70 | sundials_cvode = self.sundials1; 71 | klu = self.suitesparse; 72 | suitesparseconfig = self.suitesparse; 73 | sundials_sunlinsolklu = self.sundials1; 74 | sundials_sunmatrixsparse = self.sundials1; 75 | }); 76 | }; 77 | }; 78 | }; 79 | 80 | in 81 | 82 | { nixpkgs ? import { overlays = [ overlay1 myHaskellPackageOverlay ]; } }: 83 | 84 | let 85 | 86 | inherit (nixpkgs) pkgs; 87 | inherit (pkgs) myHaskellPackages; 88 | 89 | haskellDeps = ps: with ps; [ 90 | base 91 | cassava 92 | Frames 93 | hmatrix-sundials1 94 | kalman1 95 | lens 96 | (nixpkgs.haskell.lib.dontCheck inline-r) 97 | ]; 98 | 99 | ghc = myHaskellPackages.ghcWithPackages haskellDeps; 100 | 101 | nixPackages = [ 102 | ghc 103 | myHaskellPackages.cabal-install 104 | myHaskellPackages.stack 105 | myHaskellPackages.lhs2tex 106 | pkgs.R 107 | pkgs.rPackages.ggplot2 108 | ]; 109 | 110 | in 111 | 112 | pkgs.mkShell { 113 | buildInputs = [ 114 | nixPackages 115 | ]; 116 | } 117 | -------------------------------------------------------------------------------- /weno/shell.nix: -------------------------------------------------------------------------------- 1 | let overlay1 = self: super: 2 | { 3 | sundials1 = self.callPackage ./SparseSundials { }; 4 | }; 5 | 6 | myHaskellPackageOverlay = self: super: { 7 | myHaskellPackages = super.haskellPackages.override { 8 | overrides = hself: hsuper: rec { 9 | 10 | tasty-golden = 11 | let newTastyGoldenSrc = builtins.fetchTarball { url = "https://hackage.haskell.org/package/tasty-golden-2.3.3/tasty-golden-2.3.3.tar.gz"; 12 | sha256 = "0wgcs4pqr30bp801cyrg6g551i7q0vjjmd9gmk5jy44fgdhb7kkl"; 13 | }; 14 | tg = hself.callCabal2nix "tasty-golden" newTastyGoldenSrc {}; 15 | in 16 | super.haskell.lib.dontCheck tg; 17 | 18 | BlogLiterately1 = 19 | let newBlogLiteratelySrc = builtins.fetchTarball { url = "https://hackage.haskell.org/package/BlogLiterately-0.8.6.3/BlogLiterately-0.8.6.3.tar.gz"; 20 | sha256 = "1z0c4lj4z1j138vh505bvsg5c5v557b7bxlpipjrlc14mvcm7irw"; 21 | }; 22 | bl = hself.callCabal2nix "BlogLiterately" newBlogLiteratelySrc {}; 23 | in 24 | super.haskell.lib.doJailbreak (super.haskell.lib.dontCheck bl); 25 | 26 | haxr = 27 | let newHaxrSrc = builtins.fetchTarball { url = "https://hackage.haskell.org/package/haxr-3000.11.4.1/haxr-3000.11.4.1.tar.gz"; 28 | sha256 = "1mm83k75zdnbx440zczk60ii4nkzll6dyhl3fzj1c4idb37r801r"; 29 | }; 30 | hr = hself.callCabal2nix "haxr" newHaxrSrc {}; 31 | in 32 | super.haskell.lib.dontCheck hr; 33 | 34 | http-streams = 35 | let newHttpStreamSrc = builtins.fetchTarball { url = "https://hackage.haskell.org/package/http-streams-0.8.7.2/http-streams-0.8.7.2.tar.gz"; 36 | sha256 = "1kz1rs89ii6mzb63h55fj3d7k7zwxi5b1ks04kak2gs9s50xykqh"; 37 | }; 38 | hs = hself.callCabal2nix "http-streams" newHttpStreamSrc {}; 39 | in 40 | super.haskell.lib.dontCheck hs; 41 | 42 | hmatrix-sundials1 = super.haskell.lib.dontCheck ( 43 | hself.callCabal2nix "hmatrix-sundials" (builtins.fetchGit { 44 | url = "file:///Users/dom/hmatrix-sundials"; 45 | rev = "8333309c82b737247d3c14e66ae245febb1a6ab0"; 46 | }) { sundials_arkode = self.sundials1; 47 | sundials_cvode = self.sundials1; 48 | klu = self.suitesparse; 49 | suitesparseconfig = self.suitesparse; 50 | sundials_sunlinsolklu = self.sundials1; 51 | sundials_sunmatrixsparse = self.sundials1; 52 | }); 53 | }; 54 | }; 55 | }; 56 | 57 | in 58 | 59 | { nixpkgs ? import { overlays = [ overlay1 myHaskellPackageOverlay ]; } }: 60 | 61 | let 62 | 63 | inherit (nixpkgs) pkgs; 64 | inherit (pkgs) myHaskellPackages; 65 | 66 | haskellDeps = ps: with ps; [ 67 | base 68 | cassava 69 | dimensional 70 | integration 71 | monad-loops 72 | hmatrix-sundials1 73 | numbers 74 | my-inline-r 75 | # Naperian 76 | ]; 77 | 78 | ghc = myHaskellPackages.ghcWithPackages haskellDeps; 79 | 80 | my-python-packages = python-packages: with python-packages; [ 81 | numpy 82 | matplotlib 83 | ]; 84 | 85 | python-with-my-packages = pkgs.python3.withPackages my-python-packages; 86 | 87 | nixPackages = [ 88 | ghc 89 | myHaskellPackages.cabal-install 90 | myHaskellPackages.stack 91 | myHaskellPackages.lhs2tex 92 | # myHaskellPackages.BlogLiterately1 93 | pkgs.R 94 | pkgs.rPackages.ggplot2 95 | ]; 96 | 97 | in 98 | 99 | pkgs.mkShell { 100 | buildInputs = [ 101 | nixPackages 102 | python-with-my-packages 103 | # pkgs.sage 104 | ]; 105 | MYVARIABLE = "hi"; 106 | } 107 | --------------------------------------------------------------------------------