├── .gitignore ├── AI ├── DemoLayer.hs ├── DemoNeuron.hs ├── Layer.hs ├── Network.hs ├── Network │ ├── FeedForwardNetwork.hs │ └── SOM.hs ├── Neuron.hs ├── Trainer.hs ├── Trainer │ └── BackpropTrainer.hs └── Visualizations.hs ├── Changelog ├── LICENSE ├── LambdaNet.cabal ├── README.md ├── Setup.hs ├── docs ├── Makefile ├── _build │ ├── dirhtml │ │ ├── .buildinfo │ │ ├── _sources │ │ │ └── index.txt │ │ ├── _static │ │ │ ├── ajax-loader.gif │ │ │ ├── basic.css │ │ │ ├── default.css │ │ │ ├── doctools.js │ │ │ ├── jquery.js │ │ │ ├── pygments.css │ │ │ ├── searchtools.js │ │ │ ├── sidebar.js │ │ │ ├── underscore.js │ │ │ └── websupport.js │ │ ├── genindex │ │ │ └── index.html │ │ ├── index.html │ │ ├── objects.inv │ │ ├── search │ │ │ └── index.html │ │ └── searchindex.js │ ├── doctrees │ │ ├── environment.pickle │ │ └── index.doctree │ └── html │ │ ├── .buildinfo │ │ ├── _sources │ │ └── index.txt │ │ ├── _static │ │ ├── ajax-loader.gif │ │ ├── basic.css │ │ ├── default.css │ │ ├── doctools.js │ │ ├── jquery.js │ │ ├── pygments.css │ │ ├── searchtools.js │ │ ├── sidebar.js │ │ ├── underscore.js │ │ └── websupport.js │ │ ├── genindex.html │ │ ├── index.html │ │ ├── objects.inv │ │ ├── search.html │ │ └── searchindex.js ├── analysis.py ├── conf.py ├── docs.hs ├── images │ ├── bounded_uniform.png │ ├── derivative_reclu.png │ ├── derivative_sigmoid.png │ ├── derivative_tanh.png │ ├── normal.png │ ├── reclu.png │ ├── sigmoid.png │ ├── tanh.png │ └── uniform.png ├── index.rst └── make.bat ├── examples ├── Convolutional.hs ├── SOM.hs └── XOR.hs ├── stack.yaml └── test ├── Main.hs ├── TestDemoNeuron.hs ├── TestLayer.hs └── TestNeuron.hs /.gitignore: -------------------------------------------------------------------------------- 1 | *.class 2 | *.log 3 | *.ann 4 | *.png 5 | 6 | .DS_Store 7 | docs/*.txt 8 | dist 9 | 10 | .hc 11 | .o 12 | lein 13 | -------------------------------------------------------------------------------- /AI/DemoLayer.hs: -------------------------------------------------------------------------------- 1 | {-# LANGUAGE FlexibleContexts #-} 2 | {-# LANGUAGE RecordWildCards #-} 3 | 4 | module AI.DemoLayer 5 | ( LayerDefinition(..) 6 | , Layer(..) 7 | 8 | , Weights 9 | , Biases 10 | , LayerWidth 11 | , Connectivity 12 | 13 | --, createLayer 14 | ) where 15 | 16 | import AI.DemoNeuron 17 | 18 | import Numeric.LinearAlgebra 19 | import Numeric.LinearAlgebra.Data 20 | import Data.Binary 21 | import Data.Binary.Put 22 | import qualified Data.ByteString.Lazy as B 23 | import System.Random 24 | 25 | data LayerDefinition a = LayerDefinition { neuronType :: a 26 | , width :: LayerWidth 27 | , connectivity :: Connectivity 28 | } 29 | 30 | data Layer a = Layer { neuron :: a 31 | , weights :: Weights 32 | , biases :: Biases 33 | } deriving (Show) 34 | 35 | type Weights = Matrix Double 36 | type Biases = Vector Double 37 | type LayerWidth = Int 38 | type Connectivity = LayerWidth -> LayerWidth -> Weights 39 | 40 | instance (Neuron a) => Binary (Layer a) where 41 | put Layer{..} = do put weights; put biases 42 | get = do weights <- get; biases <- get; return Layer{..} 43 | 44 | -- createLayer :: (Neuron a, RandomGen g) 45 | -- => RandomTransform -> g -> LayerDefinition a 46 | -- -> LayerDefinition a -> Layer 47 | -- createLayer t g li lj = Layer (neuron lj) 48 | -------------------------------------------------------------------------------- /AI/DemoNeuron.hs: -------------------------------------------------------------------------------- 1 | module AI.DemoNeuron 2 | ( Neuron(..) 3 | , L2Neuron(..) 4 | , ReducedNeuron(..) 5 | 6 | , NeuronWeights 7 | , Values 8 | , ActivationFunction 9 | , ActivationFunction' 10 | 11 | , sigmoidNeuron 12 | , tanhNeuron 13 | , recluNeuron 14 | 15 | , sigmoid, sigmoid' 16 | , tanh, tanh' 17 | , reclu, reclu' 18 | , l1Norm, l2Norm 19 | ) where 20 | 21 | import Numeric.LinearAlgebra 22 | import Numeric.LinearAlgebra.Data 23 | 24 | type ActivationFunction = Double -> Double 25 | type ActivationFunction' = Double -> Double 26 | 27 | data L2Neuron = L2Neuron deriving (Show) 28 | data ReducedNeuron = ReducedNeuron { activation :: ActivationFunction 29 | , activation' :: ActivationFunction' 30 | , description :: String 31 | } 32 | 33 | type NeuronWeights = Vector Double 34 | type Values = Vector Double 35 | type Activation = Double 36 | 37 | -- | A Neuron type has two functions -- evaluate and evaluate', 38 | -- both of which are functions from NeuronWeights to input values 39 | -- to doubles. 40 | class (Show a) => Neuron a where 41 | evaluate :: a -> NeuronWeights -> Values -> Activation 42 | evaluate' :: a -> NeuronWeights -> Values -> Activation 43 | 44 | instance Show ReducedNeuron where 45 | show = description 46 | 47 | instance Neuron ReducedNeuron where 48 | evaluate n weights values = f $ dot weights values 49 | where f = activation n 50 | 51 | evaluate' n weights values = f' $ dot weights values 52 | where f' = activation' n 53 | 54 | instance Neuron L2Neuron where 55 | evaluate n = l2Norm 56 | evaluate' n = l1Norm 57 | 58 | -- | Our provided neuron types: sigmoid, tanh, reclu 59 | sigmoidNeuron :: ReducedNeuron 60 | sigmoidNeuron = ReducedNeuron sigmoid sigmoid' "sigmoid" 61 | 62 | tanhNeuron :: ReducedNeuron 63 | tanhNeuron = ReducedNeuron tanh tanh' "tanh" 64 | 65 | recluNeuron :: ReducedNeuron 66 | recluNeuron = ReducedNeuron reclu reclu' "reclu" 67 | 68 | -- | Compute a dot product, but ensure that the dimensions of both 69 | -- vectors are the same size. 70 | l1Norm :: NeuronWeights -> Values -> Double 71 | l1Norm w v = if size w /= size v 72 | then error "Neuron NeuronWeights and values don't align" 73 | else dot w v 74 | 75 | -- | The sigmoid activation function, a standard activation function defined 76 | -- on the range (0, 1). 77 | sigmoid :: Double -> Activation 78 | sigmoid t = 1 / (1 + exp (-1 * t)) 79 | 80 | -- | The derivative of the sigmoid function conveniently can be computed in 81 | -- terms of the sigmoid function. 82 | sigmoid' :: Double -> Activation 83 | sigmoid' t = s * (1 - s) 84 | where s = sigmoid t 85 | 86 | -- | The hyperbolic tangent activation function is provided in Prelude. Here 87 | -- we provide the derivative. As with the sigmoid function, the derivative 88 | -- of tanh can be computed in terms of tanh. 89 | tanh' :: Double -> Activation 90 | tanh' t = 1 - s ^ 2 91 | where s = tanh t 92 | 93 | -- | The rectified linear activation function. This is a more "biologically 94 | -- accurate" activation function that still retains differentiability. 95 | reclu :: Double -> Activation 96 | reclu t = log (1 + exp t) 97 | 98 | -- | The derivative of the rectified linear activation function is just the 99 | -- sigmoid. 100 | reclu' :: Double -> Activation 101 | reclu' = sigmoid 102 | 103 | -- | Calculate the distance between a SOM neuron and an input 104 | l2Norm :: NeuronWeights -> Values -> Activation 105 | l2Norm a b = sqrt $ sum $ map (^2) $ zipWith (-) (toList a) (toList b) 106 | -------------------------------------------------------------------------------- /AI/Layer.hs: -------------------------------------------------------------------------------- 1 | {-# LANGUAGE FlexibleContexts #-} 2 | {-# LANGUAGE RecordWildCards #-} 3 | 4 | module AI.Layer 5 | ( LayerDefinition(..) 6 | , Layer(..) 7 | , Connectivity 8 | , RandomTransform 9 | , Randomization 10 | 11 | , showableToLayer 12 | 13 | , createLayer 14 | , scaleLayer 15 | , randomizeFully 16 | , randomizeLocally 17 | , connectFully 18 | , connectLocally 19 | 20 | , randomList 21 | , boxMuller 22 | , normals 23 | , uniforms 24 | , boundedUniforms 25 | ) where 26 | 27 | import Data.Binary (Binary (..), decode, encode) 28 | import AI.Neuron 29 | import Numeric.LinearAlgebra 30 | import System.Random 31 | 32 | -- | The LayerDefinition type is an intermediate type initialized by the 33 | -- library user to define the different layers of the network. 34 | data LayerDefinition g = LayerDefinition { neuronDef :: Neuron 35 | , neuronCount :: Int 36 | , connect :: Connectivity 37 | , randomize :: Randomization g 38 | } 39 | 40 | -- | The Layer type, which stores the weight matrix, the bias matrix, and 41 | -- a neuron type. 42 | data Layer = Layer { weightMatrix :: Matrix Double 43 | , biasVector :: Vector Double 44 | , neuron :: Neuron 45 | } deriving Show 46 | 47 | instance Binary Layer where 48 | put Layer{..} = do put weightMatrix; put biasVector 49 | get = do weightMatrix <- get; biasVector <- get; return Layer{..} 50 | 51 | -- | Connectivity is the type alias for a function that defines the connective 52 | -- matrix for two layers (fully connected, convolutionally connected, etc.) 53 | -- and takes in the number of output and input neurons 54 | type Connectivity = Int -> Int -> Matrix Double 55 | 56 | -- | Randomiation is the type alias for a function that defines 57 | -- the initial random values for the weight matrix and bias vector 58 | -- for two layers and takes in a random transformation on an infinite 59 | -- stream of uniformly generated numbers, a source of entropy, 60 | -- the number of output neurons, and the number of input neurons 61 | type Randomization g = g -> RandomTransform -> Int -> Int -> (Matrix Double, Vector Double) 62 | 63 | -- | ConvolutionalSettings is a type alias for the receptive field size, 64 | -- stride, zero-padding, the number of filters, the number of dimensions, 65 | -- the weight and height of the input and output fields 66 | type ConvolutionalSettings = Int -> Int -> Int -> Int -> Int -> Int -> Int -> Int -> Int 67 | 68 | -- | A random transformation type alias. It is a transformation defined on an 69 | -- infinite list of uniformly distributed random numbers, and returns a list 70 | -- distributed on the transforming distribution. 71 | type RandomTransform = [Double] -> [Double] 72 | 73 | -- | The createLayer function takes in a random transformation on an infinite 74 | -- stream of uniformly generated numbers, a source of entropy, and two 75 | -- layer definitions, one for the previous layer and one for the next layer. 76 | -- It returns a layer defined by the Layer type -- a weight matrix, a bias 77 | -- vector, and a neuron type. 78 | createLayer :: (RandomGen g) 79 | => RandomTransform -> g -> LayerDefinition g -> LayerDefinition g -> Layer 80 | createLayer t g layerDef layerDef' = 81 | Layer (randomMatrix * connectivity i j) 82 | (randomVector * bias) 83 | (neuronDef layerDef) 84 | where (randomMatrix, randomVector) = randomize layerDef' g t i j 85 | i = neuronCount layerDef' 86 | j = neuronCount layerDef 87 | connectivity = connect layerDef' 88 | bias = i |> repeat 1 -- bias connectivity (full) 89 | 90 | scaleLayer :: Double -> Layer -> Layer 91 | scaleLayer factor l = 92 | Layer (factor `scale` weightMatrix l) (factor `scale` biasVector l) (neuron l) 93 | 94 | -- | The randomizeFully function takes in a source of entropy, the number of output 95 | -- neurons, and the number of input neurons, and returns a tuple of the 96 | -- a fully random matrix, and a fully random vector 97 | randomizeFully :: (RandomGen g) => Randomization g 98 | randomizeFully g t i j = (randomMatrix, randomVector) 99 | where randomMatrix = (i >< j) (randomList t g') 100 | randomVector = i |> randomList t g'' 101 | (g', g'') = split g 102 | 103 | -- | The randomizeLocally function takes in ConvolutionalSettings a source 104 | -- of entropy, a random transform, the number of output and input neurons. 105 | -- It returns a tuple with a random matrix and a random vector 106 | randomizeLocally :: (RandomGen g) => Int -> Int -> Int -> Int -> Int -> Int -> Int -> Int -> Int -> Randomization g 107 | randomizeLocally f s p k d w1 h1 w2 h2 g t i j = (randomMatrix, randomVector) 108 | where randomMatrix = fromLists (locallyRandomList f s p k d w1 h1 w2 h2 g' t i j) :: Matrix Double 109 | randomVector = i |> randomList t g'' 110 | (g', g'') = split g 111 | 112 | -- | The locallyRandomList function takes in ConvolutionalSettings a source 113 | -- of entropy, a random transform, the number of output and input neurons. 114 | -- It returns a list of lists of locally random numbers 115 | locallyRandomList :: (RandomGen g) => Int -> Int -> Int -> Int -> Int -> Int -> Int -> Int -> Int -> g -> RandomTransform -> Int -> Int -> [[Double]] 116 | locallyRandomList f s p k d w1 h1 w2 h2 g t i j = 117 | if k == 0 then [] 118 | else filterValues ++ nextFilterValues 119 | where filterValues = [replicate (rowZeroOffset + colZeroOffset) 0 120 | ++ take (j - rowZeroOffset - colZeroOffset) (randomList t g') 121 | | n <- [0..div i k-1], 122 | let rowSize = w1 + 2 * p, 123 | let postsynPerFilter = rem n (div i k), 124 | let rowZeroOffset = (1 + s) * quot postsynPerFilter w2 * rowSize, 125 | let colZeroOffset = (1 + s) * mod n w2] 126 | nextFilterValues = locallyRandomList f s p (k - 1) k w1 h1 w2 h2 g'' t (i - div i k) j 127 | (g', g'') = split g 128 | 129 | -- | The connectFully function takes the number of input neurons for a layer, i, 130 | -- and the number of output neurons of a layer, j, and returns an i x j 131 | -- connectivity matrix for a fully connected network. 132 | connectFully :: Connectivity 133 | connectFully i j = (i >< j) (repeat 1) 134 | 135 | -- | The connectLocally function takes in ConvolutionalSettings and the number 136 | -- of output and input neurons 137 | connectLocally :: Int -> Int -> Int -> Int -> Int -> Int -> Int -> Int -> Int -> Connectivity 138 | connectLocally f s p k d w1 h1 w2 h2 i j = 139 | repmat (fromLists conn :: Matrix Double) k d 140 | where conn = [replicate rowZeroOffset 0 141 | ++ take (f * rowSize) (cycle fieldArea) 142 | ++ replicate (rowSize * colSize - rowSize * f - rowZeroOffset) 0 143 | | n <- [0.. quot i k-1], 144 | let rowSize = w1 + 2 * p, 145 | let colSize = h1 + 2 * p, 146 | let rowZeroOffset = (1 + s) * quot n w2 * rowSize, 147 | let fieldAreaZeroOffset = (1 + s) * mod n w2, 148 | let fieldArea = replicate fieldAreaZeroOffset 0 149 | ++ replicate f 1 150 | ++ replicate (rowSize - f - fieldAreaZeroOffset) 0] 151 | 152 | -- | To go from a showable to a layer, we also need a neuron type, 153 | -- which is an unfortunate restriction owed to Haskell's inability to 154 | -- serialize functions. 155 | showableToLayer :: (RandomGen g) => (Layer, LayerDefinition g) -> Layer 156 | showableToLayer (s, d) = Layer (weightMatrix s) (biasVector s) (neuronDef d) 157 | 158 | -- | Initialize an infinite random list given a random transform and a source 159 | -- of entroy. 160 | randomList :: RandomGen g => RandomTransform -> g -> [Double] 161 | randomList transform = transform . randoms 162 | 163 | -- | Define a transformation on the uniform distribution to generate 164 | -- normally distributed numbers in Haskell (the Box-Muller transform) 165 | boxMuller :: Double -> Double -> (Double, Double) 166 | boxMuller x1 x2 = (z1, z2) 167 | where z1 = sqrt ((-2) * log x1) * cos (2 * pi * x2) 168 | z2 = sqrt ((-2) * log x1) * sin (2 * pi * x2) 169 | 170 | -- | This is a function of type RandomTransform that transforms a list of 171 | -- uniformly distributed numbers to a list of normally distributed numbers. 172 | normals :: RandomTransform 173 | normals (x1:x2:xs) = z1:z2:normals xs 174 | where (z1, z2) = boxMuller x1 x2 175 | normals _ = [] 176 | 177 | -- | A non-transformation to return a list of uniformly distributed numbers 178 | -- from a list of uniformly distributed numbers. It's really a matter of 179 | -- naming consistency. It generates numbers on the range (0, 1] 180 | uniforms :: RandomTransform 181 | uniforms xs = xs 182 | 183 | -- | An affine transformation to return a list of uniforms on the range 184 | -- (a, b] 185 | boundedUniforms :: (Double, Double) -> [Double] -> [Double] 186 | boundedUniforms (lower, upper) = map affine 187 | where affine x = lower + x * (upper - lower) 188 | -------------------------------------------------------------------------------- /AI/Network.hs: -------------------------------------------------------------------------------- 1 | {-# LANGUAGE FlexibleContexts #-} 2 | {-# LANGUAGE InstanceSigs #-} 3 | {-# LANGUAGE TypeFamilies #-} 4 | {-# LANGUAGE UndecidableInstances #-} 5 | 6 | module AI.Network 7 | ( Network(..) ) where 8 | 9 | import AI.Layer 10 | import Numeric.LinearAlgebra 11 | import System.Random 12 | 13 | -- | A network is 14 | class Network a where 15 | type Parameters a g :: * 16 | 17 | predict :: Vector Double -> a -> Vector Double 18 | createNetwork :: (RandomGen g) => RandomTransform -> g -> Parameters a g -> a 19 | -------------------------------------------------------------------------------- /AI/Network/FeedForwardNetwork.hs: -------------------------------------------------------------------------------- 1 | {-# LANGUAGE FlexibleContexts #-} 2 | {-# LANGUAGE TypeFamilies #-} 3 | {-# LANGUAGE UndecidableInstances #-} 4 | 5 | module AI.Network.FeedForwardNetwork 6 | ( FeedForwardNetwork(..) 7 | 8 | , emptyFeedForwardNetwork 9 | , isEmptyFeedForwardNetwork 10 | , addFeedForwardNetworks 11 | 12 | , loadFeedForwardNetwork 13 | , saveFeedForwardNetwork 14 | 15 | , apply 16 | ) where 17 | 18 | import AI.Layer 19 | import AI.Network 20 | import AI.Neuron 21 | 22 | import Data.Binary (Binary (..), decode, encode) 23 | import qualified Data.ByteString.Lazy as B 24 | import Data.Monoid (Monoid (..)) 25 | import Numeric.LinearAlgebra 26 | import Numeric.LinearAlgebra.Data (cmap) 27 | import System.IO 28 | import System.Random 29 | 30 | -- | Networks are constructed front to back. Start by adding an input layer, 31 | -- then each hidden layer, and finally an output layer. 32 | data FeedForwardNetwork = FeedForwardNetwork { layers :: [Layer] } deriving Show 33 | 34 | -- | We gain the ability to combine two networks of the same proportions 35 | -- by abstracting a network as a monoid. This is useful in backpropagation 36 | -- for batch training 37 | instance Monoid FeedForwardNetwork where 38 | mempty = emptyFeedForwardNetwork 39 | mappend = addFeedForwardNetworks 40 | 41 | -- | A tuple of (input, expected output) 42 | type TrainingData = (Vector Double, Vector Double) 43 | 44 | -- | Our Unit, an empty network with no layers 45 | emptyFeedForwardNetwork :: FeedForwardNetwork 46 | emptyFeedForwardNetwork = FeedForwardNetwork [] 47 | 48 | -- | A boolean to check if the network is the unit network or not 49 | isEmptyFeedForwardNetwork :: FeedForwardNetwork -> Bool 50 | isEmptyFeedForwardNetwork n = null $ layers n 51 | 52 | -- | A function to combine two networks 53 | addFeedForwardNetworks :: FeedForwardNetwork -> FeedForwardNetwork -> FeedForwardNetwork 54 | addFeedForwardNetworks n1 n2 55 | | isEmptyFeedForwardNetwork n1 = n2 56 | | isEmptyFeedForwardNetwork n2 = n1 57 | | otherwise = 58 | FeedForwardNetwork $ zipWith combineLayers (layers n1) (layers n2) 59 | where combineLayers l1 l2 60 | = Layer (weightMatrix l1 + weightMatrix l2) 61 | (biasVector l1 + biasVector l2) 62 | (neuron l1) 63 | 64 | instance Network FeedForwardNetwork where 65 | type Parameters FeedForwardNetwork g = [LayerDefinition g] 66 | 67 | -- | Predict folds over each layer of the network using the input vector as the 68 | -- first value of the accumulator. It operates on whatever network you pass in. 69 | predict input network = foldl apply input (layers network) 70 | 71 | -- | The createNetwork function takes in a random transform used for weight 72 | -- initialization, a source of entropy, and a list of layer definitions, 73 | -- and returns a network with the weights initialized per the random transform. 74 | createNetwork t g (layerDef : layerDef' : otherLayerDefs) = 75 | FeedForwardNetwork (layer : layers restOfNetwork) 76 | where layer = createLayer t g' layerDef layerDef' 77 | restOfNetwork = createNetwork t g'' (layerDef' : otherLayerDefs) 78 | (g', g'') = split g 79 | createNetwork _ _ _ = emptyFeedForwardNetwork 80 | 81 | -- | A function used in the fold in predict that applies the activation 82 | -- function and pushes the input through a layer of the network. 83 | apply :: Vector Double -> Layer -> Vector Double 84 | apply vector layer = cmap sigma (weights #> vector + bias) 85 | where sigma = activation (neuron layer) 86 | weights = weightMatrix layer 87 | bias = biasVector layer 88 | 89 | -- | Given a filename and a network, we want to save the weights and biases 90 | -- of the network to the file for later use. 91 | saveFeedForwardNetwork :: (Binary Layer) => FilePath -> FeedForwardNetwork -> IO () 92 | saveFeedForwardNetwork file n = B.writeFile file (encode (layers n)) 93 | 94 | -- | Given a filename, and a list of layer definitions, we want to reexpand 95 | -- the data back into a network. 96 | loadFeedForwardNetwork :: (Binary Layer, RandomGen g) => FilePath -> [LayerDefinition g] -> IO FeedForwardNetwork 97 | loadFeedForwardNetwork file defs = B.readFile file >>= \sls -> 98 | return . FeedForwardNetwork $ zipWith (curry showableToLayer) (decode sls) defs 99 | -------------------------------------------------------------------------------- /AI/Network/SOM.hs: -------------------------------------------------------------------------------- 1 | {-# LANGUAGE FlexibleContexts #-} 2 | {-# LANGUAGE InstanceSigs #-} 3 | {-# LANGUAGE TypeFamilies #-} 4 | {-# LANGUAGE UndecidableInstances #-} 5 | 6 | module AI.Network.SOM 7 | ( SOM(..) 8 | , MapDefinition(..) 9 | 10 | , randomNeuron 11 | , makeVectors 12 | , reshapeList 13 | , distance 14 | ) where 15 | 16 | import AI.Layer 17 | import AI.Network 18 | import AI.Neuron 19 | 20 | import Numeric.LinearAlgebra 21 | import System.Random 22 | 23 | -- | The SOM definition is simple, it only contains a 2-dimensional list of weights 24 | data SOM = SOM { neuronMap :: [[Vector Double]] } 25 | 26 | -- | A definitution type for the SOM, it contains the dimensions of each layer (x, y) 27 | -- and the dimension of the input vector (dim) 28 | data MapDefinition = MapDefinition { x :: Int 29 | , y :: Int 30 | , inputDim :: Int 31 | } 32 | 33 | instance Network SOM where 34 | type Parameters SOM g = MapDefinition 35 | 36 | predict :: Vector Double -> SOM -> Vector Double 37 | predict inputs network = inputs 38 | 39 | -- | Create a SOM and initialize it with given weights 40 | createNetwork :: (RandomGen g) => RandomTransform -> g -> Parameters SOM g -> SOM 41 | createNetwork transformation g def = SOM randomVectors 42 | where randomVectors = reshapeList (x def) $ 43 | makeVectors transformation g (inputDim def) 44 | (x def * y def) 45 | 46 | -- | A helper function to reshape a 1D list into a 2D list 47 | reshapeList :: Int -> [a] -> [[a]] 48 | reshapeList x [] = [[]] 49 | reshapeList x lst = h : reshapeList x t 50 | where (h, t) = splitAt x lst 51 | 52 | -- | Create a random set of weights for a given neuron 53 | randomNeuron :: (RandomGen g) => RandomTransform -> g -> Int -> Vector Double 54 | randomNeuron transform g inputDim = inputDim |> randomList transform g 55 | 56 | -- | Make a 1D list of vectors to be used by the SOM in creating a map of weights 57 | makeVectors :: (RandomGen g) => RandomTransform -> g -> Int -> Int -> [Vector Double] 58 | makeVectors transform g inputDim 0 = [] 59 | makeVectors transform g inputDim num = randomNeuron transform g' inputDim : 60 | makeVectors transform g'' inputDim (num - 1) 61 | where (g', g'') = split g 62 | 63 | -- | Calculate the distance between a SOM neuron and an input 64 | distance :: Vector Double -> Vector Double -> Double 65 | distance a b = sqrt $ sum $ map (^2) $ zipWith (-) (toList a) (toList b) 66 | -------------------------------------------------------------------------------- /AI/Neuron.hs: -------------------------------------------------------------------------------- 1 | module AI.Neuron 2 | ( Neuron(..) 3 | 4 | , ActivationFunction 5 | , ActivationFunction' 6 | , sigmoidNeuron 7 | , tanhNeuron 8 | , recluNeuron 9 | , l2Neuron 10 | 11 | , sigmoid 12 | , sigmoid' 13 | , tanh 14 | , tanh' 15 | , reclu 16 | , reclu' 17 | ) where 18 | 19 | -- | Using this structure allows users of the library to create their own 20 | -- neurons by creating two functions - an activation function and its 21 | -- derivative - and packaging them up into a neuron type. 22 | data Neuron = Neuron { activation :: ActivationFunction 23 | , activation' :: ActivationFunction' 24 | , description :: String 25 | } 26 | 27 | instance Show Neuron where 28 | show = description 29 | 30 | type ActivationFunction = Double -> Double 31 | type ActivationFunction' = Double -> Double 32 | 33 | -- | Our provided neuron types: sigmoid, tanh, reclu 34 | sigmoidNeuron :: Neuron 35 | sigmoidNeuron = Neuron sigmoid sigmoid' "sigmoid" 36 | 37 | tanhNeuron :: Neuron 38 | tanhNeuron = Neuron tanh tanh' "tanh" 39 | 40 | recluNeuron :: Neuron 41 | recluNeuron = Neuron reclu reclu' "reclu" 42 | 43 | l2Neuron :: Neuron 44 | l2Neuron = Neuron (^2) id "L2" 45 | 46 | -- | The sigmoid activation function, a standard activation function defined 47 | -- on the range (0, 1). 48 | sigmoid :: Double -> Double 49 | sigmoid t = 1 / (1 + exp (-1 * t)) 50 | 51 | -- | The derivative of the sigmoid function conveniently can be computed in 52 | -- terms of the sigmoid function. 53 | sigmoid' :: Double -> Double 54 | sigmoid' t = s * (1 - s) 55 | where s = sigmoid t 56 | 57 | -- | The hyperbolic tangent activation function is provided in Prelude. Here 58 | -- we provide the derivative. As with the sigmoid function, the derivative 59 | -- of tanh can be computed in terms of tanh. 60 | tanh' :: Double -> Double 61 | tanh' t = 1 - s ^ 2 62 | where s = tanh t 63 | 64 | -- | The rectified linear activation function. This is a more "biologically 65 | -- accurate" activation function that still retains differentiability. 66 | reclu :: Double -> Double 67 | reclu t = log (1 + exp t) 68 | 69 | -- | The derivative of the rectified linear activation function is just the 70 | -- sigmoid. 71 | reclu' :: Double -> Double 72 | reclu' = sigmoid 73 | -------------------------------------------------------------------------------- /AI/Trainer.hs: -------------------------------------------------------------------------------- 1 | {-# LANGUAGE FlexibleContexts #-} 2 | {-# LANGUAGE MultiParamTypeClasses #-} 3 | 4 | module AI.Trainer 5 | ( Trainer(..) 6 | , CostFunction 7 | , CostFunction' 8 | , TrainingData 9 | , Selection 10 | , StopCondition 11 | 12 | , quadraticCost 13 | , quadraticCost' 14 | , softmax 15 | , softmaxCost 16 | , softmaxCost' 17 | , minibatch 18 | , online 19 | 20 | , trainNTimes 21 | , trainUntilErrorLessThan 22 | , trainUntil 23 | ) where 24 | 25 | import AI.Layer 26 | import AI.Network 27 | import AI.Neuron 28 | 29 | import Data.List.Split (chunksOf) 30 | import Numeric.LinearAlgebra 31 | import Numeric.LinearAlgebra.Data (size) 32 | import System.Random 33 | import System.Random.Shuffle (shuffle') 34 | 35 | -- | Trainer is a typeclass for all trainer types - a trainer will take in 36 | -- an instance of itself, a network, a list of training data, and return a 37 | -- new network trained on the data. 38 | class (Network n) => Trainer a n where 39 | fit :: Selection -> a -> n -> [TrainingData] -> n 40 | evaluate :: a -> n -> TrainingData -> Double 41 | 42 | -- | A CostFunction is used for evaluating a network's performance on a given 43 | -- input 44 | type CostFunction = Vector Double -> Vector Double -> Double 45 | 46 | -- | A CostFunction' (derivative) is used in backPropagation 47 | type CostFunction' = Vector Double -> Vector Double -> Vector Double 48 | 49 | -- | A tuple of (input, expected output) 50 | type TrainingData = (Vector Double, Vector Double) 51 | 52 | -- | A selection function for performing gradient descent 53 | type Selection = [TrainingData] -> [[TrainingData]] 54 | 55 | -- | A predicate (given a network, trainer, a list of training 56 | -- data, and the number of [fit]s performed) that 57 | -- tells the trainer to stop training 58 | type StopCondition t n = n -> t -> [TrainingData] -> Int -> Bool 59 | 60 | -- | The quadratic cost function (1/2) * sum (y - a) ^ 2 61 | quadraticCost :: Vector Double -> Vector Double -> Double 62 | quadraticCost y a = sumElements $ 0.5 * (a - y) ** 2 63 | 64 | -- | The derivative of the quadratic cost function sum (y - a) 65 | quadraticCost' :: Vector Double -> Vector Double -> Vector Double 66 | quadraticCost' y a = a - y 67 | 68 | -- | The softmax function: a / (e ** a) 69 | -- Subtracts the maxium element when computes 'exp' to avoid numerical issues. 70 | softmax :: Vector Double -> Vector Double 71 | softmax a = (1 / sumElements a') `scale` a' 72 | where a' = cmap (\ x -> exp $ x - maxElement a) a 73 | 74 | -- | The softmax cost function: - y * (log p), where p = softmax a 75 | softmaxCost :: Vector Double -> Vector Double -> Double 76 | softmaxCost y a = - (y <.> log (softmax a)) / fromIntegral (size y) 77 | 78 | -- | The derivative of the softmax cost function: a - y 79 | softmaxCost' :: Vector Double -> Vector Double -> Vector Double 80 | softmaxCost' y a = (1.0 / fromIntegral (size y)) `scale` (a - y) 81 | 82 | -- | The minibatch function becomes a Selection when partially applied 83 | -- with the minibatch size 84 | minibatch :: Int -> [TrainingData] -> [[TrainingData]] 85 | minibatch = chunksOf 86 | 87 | -- | If we want to train the network online 88 | online :: [TrainingData] -> [[TrainingData]] 89 | online = minibatch 1 90 | 91 | -- | This function returns true if the error of the network is less than 92 | -- a given error value, given a network, a trainer, a list of 93 | -- training data, and a counter (should start with 0) 94 | -- Note: Is there a way to have a counter with a recursive function 95 | -- without providing 0? 96 | networkErrorLessThan :: (Trainer t n) => Double -> n -> t -> [TrainingData] -> Int -> Bool 97 | networkErrorLessThan err network trainer dat _ = meanError < err 98 | where meanError = sum errors / fromIntegral (length errors) 99 | errors = map (evaluate trainer network) dat 100 | 101 | -- | Given a network, a trainer, a list of training data, 102 | -- and N, this function trains the network with the list of 103 | -- training data N times 104 | trainNTimes :: (Trainer t n, RandomGen g) => g -> n -> t -> Selection -> [TrainingData] -> Int -> n 105 | trainNTimes g network trainer s dat n = 106 | trainUntil g network trainer s dat completion 0 107 | where completion _ _ _ n' = n == n' 108 | 109 | -- | Given a network, a trainer, a list of training data, 110 | -- and an error value, this function trains the network with the list of 111 | -- training data until the error of the network (calculated 112 | -- by averaging the errors of each training data) is less than 113 | -- the given error value 114 | trainUntilErrorLessThan :: (Trainer t n, RandomGen g) => g -> n -> t -> Selection -> [TrainingData] -> Double -> n 115 | trainUntilErrorLessThan g network trainer s dat err = 116 | trainUntil g network trainer s dat (networkErrorLessThan err) 0 117 | 118 | -- | This function trains a network until a given StopCondition 119 | -- is satisfied. 120 | trainUntil :: (Trainer t n, RandomGen g) => g -> n -> t -> Selection -> [TrainingData] -> StopCondition t n -> Int -> n 121 | trainUntil g network trainer s dat completion n = 122 | if completion network trainer dat n 123 | then network 124 | else trainUntil g' network' trainer s (shuffle' dat (length dat) g'') completion (n+1) 125 | where network' = fit s trainer network dat 126 | (g', g'') = split g 127 | -------------------------------------------------------------------------------- /AI/Trainer/BackpropTrainer.hs: -------------------------------------------------------------------------------- 1 | {-# LANGUAGE FlexibleContexts #-} 2 | {-# LANGUAGE MultiParamTypeClasses #-} 3 | module AI.Trainer.BackpropTrainer 4 | ( BackpropTrainer(..) 5 | 6 | , backprop 7 | , inputs 8 | , outputs 9 | , deltas 10 | , hiddenDeltas 11 | , calculateNablas 12 | , fit 13 | , evaluate 14 | ) where 15 | 16 | import AI.Layer 17 | import AI.Network 18 | import AI.Network.FeedForwardNetwork 19 | import AI.Neuron 20 | import AI.Trainer 21 | 22 | import Numeric.LinearAlgebra 23 | 24 | -- | A BackpropTrainer performs simple backpropagation on a neural network. 25 | -- It can be used as the basis for more complex trainers. 26 | data BackpropTrainer = BackpropTrainer { eta :: Double 27 | , cost :: CostFunction 28 | , cost' :: CostFunction' 29 | } 30 | 31 | -- | Declare the BackpropTrainer to be an instance of Trainer. 32 | instance Trainer BackpropTrainer FeedForwardNetwork where 33 | fit s t n examples = foldl (backprop t) n $ s examples 34 | -- | Use the cost function to determine the error of a network 35 | evaluate t n example = cost t (snd example) (predict (fst example) n) 36 | 37 | -- | Perform backpropagation on a single training data instance. 38 | backprop :: BackpropTrainer -> FeedForwardNetwork -> [TrainingData] -> FeedForwardNetwork 39 | backprop t n es = 40 | updateNetwork (length es) t (foldl (calculateNablas t n) emptyFeedForwardNetwork es) n 41 | 42 | -- | Given the size of the minibatch, the trainer, the nablas for each layer, given 43 | -- as a network, and the network itself, return a network with updated wieghts. 44 | updateNetwork :: Int -> BackpropTrainer -> FeedForwardNetwork -> FeedForwardNetwork -> FeedForwardNetwork 45 | updateNetwork mag t nablas n = addFeedForwardNetworks n 46 | (FeedForwardNetwork $ map (scaleLayer $ -1 * eta t / fromIntegral mag) (layers nablas)) 47 | 48 | -- | Calculate the nablas for a minibatch and return them as a network (so each 49 | -- weight and bias gets its own nabla). 50 | calculateNablas :: BackpropTrainer -> FeedForwardNetwork -> FeedForwardNetwork -> TrainingData -> FeedForwardNetwork 51 | calculateNablas t n nablas e = addFeedForwardNetworks nablas 52 | (FeedForwardNetwork $ map (updateLayer t) (zip3 (layers n) ds os)) 53 | where ds = deltas t n e 54 | os = outputs (fst e) n 55 | 56 | -- | The mapped function to update the weight and biases in a single layer 57 | updateLayer :: BackpropTrainer -> (Layer, Vector Double, Vector Double) -> Layer 58 | updateLayer t (l, delta, output) = Layer newWeight newBias n 59 | where n = neuron l 60 | newWeight = scalar $ dot delta output 61 | newBias = delta 62 | 63 | -- | The outputs function scans over each layer of the network and stores the 64 | -- activated results 65 | outputs :: Vector Double -> FeedForwardNetwork -> [Vector Double] 66 | outputs input network = scanl apply input (layers network) 67 | 68 | -- | The inputs function performs a similar task to outputs, but returns a list 69 | -- of vectors of unactivated inputs 70 | inputs :: Vector Double -> FeedForwardNetwork -> [Vector Double] 71 | inputs input network = if null (layers network) then [] 72 | else unactivated : inputs activated (FeedForwardNetwork (tail $ layers network)) 73 | where unactivated = weightMatrix layer #> input + biasVector layer 74 | layer = head $ layers network 75 | activated = cmap (activation (neuron layer)) unactivated 76 | 77 | -- | The deltas function returns a list of layer deltas. 78 | deltas :: BackpropTrainer -> FeedForwardNetwork -> TrainingData -> [Vector Double] 79 | deltas t n example = reverse (hiddenDeltas 80 | (FeedForwardNetwork (reverse (layers n))) outputDelta (tail $ reverse is)) ++ [outputDelta] 81 | where outputDelta = costd (snd example) output * 82 | cmap activationd lastInput 83 | costd = cost' t 84 | activationd = activation' (neuron (last (layers n))) 85 | output = last os 86 | lastInput = last is 87 | is = inputs (fst example) n 88 | os = outputs (fst example) n 89 | 90 | -- | Compute the hidden layer deltas 91 | hiddenDeltas :: FeedForwardNetwork -> Vector Double -> [Vector Double] -> [Vector Double] 92 | hiddenDeltas n prevDelta is = if length (layers n) <= 1 then [] 93 | else delta : hiddenDeltas rest delta (tail is) 94 | where rest = FeedForwardNetwork (tail $ layers n) 95 | delta = tr w #> prevDelta * spv 96 | w = weightMatrix (head $ layers n) 97 | spv = cmap (activation' (neuron (head $ layers n))) (head is) 98 | -------------------------------------------------------------------------------- /AI/Visualizations.hs: -------------------------------------------------------------------------------- 1 | {-# LANGUAGE FlexibleContexts #-} 2 | 3 | module AI.Visualizations 4 | ( networkHistogram 5 | , weightList 6 | , biasList 7 | ) where 8 | 9 | import AI.Layer 10 | import AI.Network 11 | import AI.Network.FeedForwardNetwork 12 | import AI.Neuron 13 | import Numeric.LinearAlgebra 14 | 15 | import Data.Foldable (foldMap) 16 | import GHC.Float 17 | import Graphics.Histogram 18 | import AI.Trainer 19 | 20 | weightList :: FeedForwardNetwork -> [Double] 21 | weightList = toList . flatten . weightMatrix <=< layers 22 | 23 | biasList :: FeedForwardNetwork -> [Double] 24 | biasList = toList . biasVector <=< layers 25 | 26 | networkHistogram :: FilePath -> (FeedForwardNetwork -> [Double]) -> FeedForwardNetwork -> IO () 27 | networkHistogram filename listFunction n = do 28 | let hist = histogram binSturges (listFunction n) 29 | plot filename hist 30 | return () 31 | -------------------------------------------------------------------------------- /Changelog: -------------------------------------------------------------------------------- 1 | 0.2.0.0 - Introduced selection functions and a training function with a 2 | stop condition. 3 | 4 | 0.1.0.1 - Updated the Cabal file to fix failing builds 5 | 6 | 0.1.0.0 - Initial build 7 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | The MIT License (MIT) 2 | 3 | Copyright (c) 2014 Brent Baumgartner, Harang Ju, Alex Thomas, Joseph Barrow 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /LambdaNet.cabal: -------------------------------------------------------------------------------- 1 | -- Initial LambdaNet.cabal generated by cabal init. For further 2 | -- documentation, see http://haskell.org/cabal/users-guide/ 3 | 4 | name: LambdaNet 5 | version: 0.2.0.0 6 | synopsis: A configurable and extensible neural network library 7 | description: { 8 | LambdaNet is an artificial neural network library that allows 9 | users to compose their own networks from function primitives. 10 | . 11 | Documentation and nightly builds for LambdaNet can be found 12 | at (). 13 | } 14 | license: MIT 15 | license-file: LICENSE 16 | author: Brent Baumgartner, Alex Thomas, Harang Ju, Joseph Barrow 17 | maintainer: Joseph Barrow 18 | copyright: 2014 19 | category: Machine Learning 20 | build-type: Simple 21 | cabal-version: >=1.8 22 | extra-source-files: README.md 23 | Changelog 24 | 25 | flag examples 26 | Description: Enable examples 27 | Default: False 28 | 29 | library 30 | exposed-modules: 31 | AI.Network, 32 | AI.Neuron, 33 | AI.Layer, 34 | AI.Trainer, 35 | AI.Trainer.BackpropTrainer, 36 | AI.Network.FeedForwardNetwork, 37 | AI.Network.SOM, 38 | AI.DemoNeuron, 39 | AI.DemoLayer 40 | -- other-modules: 41 | build-depends: 42 | base >= 4 && < 5, 43 | hmatrix >= 0.17.0.1, 44 | random, 45 | random-shuffle >= 0.0.4, 46 | split, 47 | binary, 48 | bytestring, 49 | Histogram, 50 | hspec 51 | 52 | executable convolutional 53 | main-is: Convolutional.hs 54 | hs-source-dirs: examples 55 | build-depends: 56 | base >= 4 && < 5, 57 | LambdaNet, 58 | hmatrix >= 0.17.0.1, 59 | random 60 | if flag(examples) 61 | buildable: True 62 | else 63 | buildable: False 64 | 65 | executable som 66 | main-is: SOM.hs 67 | hs-source-dirs: examples 68 | build-depends: 69 | base >= 4 && < 5, 70 | LambdaNet, 71 | hmatrix >= 0.17.0.1, 72 | random 73 | if flag(examples) 74 | buildable: True 75 | else 76 | buildable: False 77 | 78 | executable xor 79 | main-is: XOR.hs 80 | hs-source-dirs: examples 81 | build-depends: 82 | base >= 4 && < 5, 83 | LambdaNet, 84 | hmatrix >= 0.17.0.1, 85 | random 86 | if flag(examples) 87 | buildable: True 88 | else 89 | buildable: False 90 | 91 | test-suite test-all 92 | type: exitcode-stdio-1.0 93 | main-is: Main.hs 94 | hs-source-dirs: test 95 | build-depends: 96 | base >= 4 && < 5, 97 | LambdaNet, 98 | hspec, 99 | QuickCheck 100 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | LambdaNet 2 | ===== 3 | 4 | LambdaNet is an artificial neural network library written in Haskell 5 | that abstracts network creation, training, and use as higher order 6 | functions. The benefit of this approach is that it provides a framework 7 | in which users can: 8 | - quickly iterate through network designs by using different functional components 9 | - experiment by writing small functional components to extend the library 10 | 11 | The library comes with a pre-defined set of functions that can be composed 12 | in many ways to operate on real-world data. These will be enumerated later 13 | in the documentation. 14 | 15 | ## Current Release 16 | 17 | The code from this repo doesn't reflect the current release of LambdaNet. The README 18 | for the current release on Hackage [can be found here](https://github.com/jbarrow/LambdaNet/tree/8bb914712794cc87dd442a6577b34cdb72ad059b). 19 | 20 | ## Installation 21 | 22 | The first step is to follow the [HMatrix installation instructions](https://github.com/albertoruiz/hmatrix/blob/master/INSTALL.md). 23 | After that, LambdaNet can be installed through Cabal: 24 | 25 | ``` 26 | cabal update 27 | cabal install LambdaNet 28 | ``` 29 | 30 | ### Installing the Most Recent Build 31 | 32 | Alternatively, you can use the nightly. The API may be different than what 33 | is covered in the README, but the `examples/` folder will always contain 34 | a working file using all the features of the current commit. 35 | 36 | To install the nightly build, simply run: 37 | 38 | ``` 39 | git clone https://github.com/jbarrow/LambdaNet.git && cd LambdaNet 40 | cabal install 41 | ``` 42 | 43 | ## Using LambdaNet 44 | 45 | Using LambdaNet to rapidly prototype networks using built-in functions 46 | requires only a minimal level of Haskell knowledge (although getting 47 | the data into the right form may be more difficult). However, extending 48 | the library may require a more in-depth knowledge of Haskell and 49 | functional programming techniques. 50 | 51 | You can find a quick example of using the network in `XOR.hs`. Once LambdaNet 52 | is installed, download XOR.hs, and then you can run the file in your REPL to 53 | see the results: 54 | 55 | ``` 56 | runhaskell examples/XOR.hs 57 | ``` 58 | 59 | The rest of this section dissects the XOR network in order to talk about 60 | the design of LambdaNet. 61 | 62 | ### Training Data 63 | 64 | Before you can train or use a network, you must have training data. The 65 | training data is a tuple of vectors, the first value being the input 66 | to the network, and the second value being the expected output. 67 | 68 | For the XOR network, the data is easily hardcoded: 69 | 70 | ``` 71 | let trainData = [ 72 | (fromList [0.0, 0.0], fromList [0.0]), 73 | (fromList [0.0, 1.0], fromList [1.0]), 74 | (fromList [1.0, 0.0], fromList [1.0]), 75 | (fromList [1.0, 1.0], fromList [0.0]) 76 | ] 77 | ``` 78 | 79 | However, for any non-trivial application the most difficult work will be 80 | getting the data in this form. Unfortunately, LambdaNet does not currently 81 | have tools to support data handling. 82 | 83 | ### Layer Definitions 84 | 85 | The first step in creating a network is to define a list of layer 86 | definitions. The type layer definition takes a neuron type, a count of 87 | neurons in the layer, and a connectivity function. 88 | 89 | Creating the layer definitions for a three-layer XOR network, with 90 | 2 neurons in the input layer, 2 hidden neurons, and 1 output neuron 91 | can be done as: 92 | 93 | ``` 94 | let l = LayerDefinition sigmoidNeuron 2 connectFully 95 | let l' = LayerDefinition sigmoidNeuron 2 connectFully 96 | let l'' = LayerDefinition sigmoidNeuron 1 connectFully 97 | ``` 98 | 99 | #### Neuron Types 100 | 101 | A neuron is simply defined as an activation function and its derivative, 102 | and the LambdaNet library provides three built-in neuron types: 103 | - `sigmoidNeuron` - A neuron with a sigmoid activation function 104 | - `tanhNeuron` - A neuron with a hyperbolic tangent activation function 105 | - `recluNeuron` - A neuron with a rectified linear activation function 106 | 107 | By passing one of these functions into a LayerDefinition, you can 108 | create a layer with neurons of that type. 109 | 110 | #### Connectivity 111 | 112 | A connectivity function is a bit more opaque. Currently, the library 113 | only provides `connectFully`, a function which creates a fully 114 | connected feed-forward network. 115 | 116 | Simply, the connectivity function takes in the number of neurons in layer l 117 | and the number of neurons in layer l + 1, and returns a boolean matrix 118 | of integers (0/1) that represents the connectivity graph of the layers 119 | -- a 0 means two neurons are not connected and a 1 means they are. The 120 | starting weights are defined later. 121 | 122 | ### Creating the Network 123 | 124 | The `createNetwork` function takes in a random transform, an entropy 125 | generator, and a list of layer definitions, and returns a network. 126 | 127 | For the XOR network, the createNetwork function is: 128 | 129 | ``` 130 | let n = createNetwork normals (mkStdGen 4) [l, l', l''] 131 | ``` 132 | 133 | Our source of entropy is the very random: `mkStdGen 4`, which will 134 | always result in the same generator. 135 | 136 | #### Random Transforms 137 | 138 | The random transform function is a transform that operates on a 139 | stream of uniformly distributed random numbers and returns a stream 140 | of floating point numbers. 141 | 142 | Currently, the two defined distributions are: 143 | - `uniforms` - A trivial function that returns a stream of uniformly distributed random numbers 144 | - `normals` - A slightly less-trivial function that uses the Box-Muller transform to create a stream of numbers ~ N(0, 1) 145 | 146 | Work is being done to offer a student t-distribution, which would require 147 | support for a chi-squared distribution transformation. 148 | 149 | ### Training the Network 150 | 151 | In order to train a network, you must create a new trainer: 152 | 153 | ``` 154 | let t = BackpropTrainer (3 :: Float) quadraticCost quadraticCost' 155 | ``` 156 | 157 | The BackpropTrainer type takes in a learning rate, a cost function, and 158 | its derivative. 159 | 160 | The actual training of the network, the `fit` function uses the trainer, a 161 | network, and the training data, and returns a new, trained network. 162 | For the XOR network, this is: 163 | 164 | ``` 165 | let n' = trainUntilErrorLessThan n t online dat 0.01 166 | ``` 167 | 168 | LambdaNet provides three training methods: 169 | - `trainUntil` 170 | - `trainUntilErrorLessThan` 171 | - `trainNTimes` 172 | 173 | The `trainUntil` function takes a StopCondition (check Network/Trainer.hs) 174 | for more information, and the last two are simply wrappers for the first one that 175 | provide specific predicates. 176 | 177 | The calculated error is what is returned by the cost function. 178 | 179 | #### Cost Functions 180 | 181 | Currently, the only provided cost function is the quadratic error cost function, 182 | `quadraticCost` and its derivative, `quadraticCost'`. I am about to add the 183 | cross-entropy cost function. 184 | 185 | #### Selection Functions 186 | 187 | Selection functions break up a dataset for each round of training. The currently provided 188 | selection functions are: 189 | - `minibatch n` - You must provide an n and partially apply it to minibatch to get a valid selection function. This function updates the network after every n passes. 190 | - `online` - Using this function means that the network updates after every training example. 191 | 192 | For small data sets, it's better to use online, while for larger data sets, the training 193 | can occur much faster if you use a reasonably sized minibatch. 194 | 195 | ### Using the Network 196 | 197 | Once the network is trained, you can use it with your test data or 198 | production data: 199 | 200 | ``` 201 | predict (fromList [1, 0]) n' 202 | ``` 203 | 204 | LambdaNet at least attempts to follow a Scikit-Learn style naming scheme 205 | with `fit` and `predict` functions. 206 | 207 | ### Storing and Loading 208 | 209 | Once a network has been trained, the weights and biases can be stored in 210 | a file: 211 | 212 | ``` 213 | saveNetwork "xor.ann" n' 214 | ``` 215 | 216 | By calling `saveNetwork` with a file path, you can save the state of the 217 | network. 218 | 219 | Loading a network requires passing in a list of layer definitions 220 | for the original network, but will load all the weights and biases of the 221 | saved network: 222 | 223 | ``` 224 | n'' <- loadNetwork "xor.ann" [l, l', l''] 225 | ``` 226 | 227 | Note that the loadNetwork function returns an IO (Network), you can't simply 228 | call predict or train on the object returned by loadNetwork. Using the 229 | approach in XOR.hs should allow you to work with the returned object. 230 | 231 | ## Currently Under Development 232 | 233 | What has been outlined above is only the first stages of LambdaNet. I intend 234 | to support some additional features, such as: 235 | - Unit testing 236 | - Self-organizing maps 237 | - Regularization functions 238 | - Additional trainer types (RProp, RMSProp) 239 | - Additional cost functions 240 | 241 | ### Unit Testing 242 | 243 | In order to develop more complex network architectures, it is important 244 | to ensure that all of the basics are working -- especially as the API 245 | undergoes changes. To run the unit tests: 246 | 247 | ``` 248 | git clone https://github.com/jbarrow/LambdaNet.git && cd LambdaNet 249 | cabal install 250 | cd test 251 | runhaskell Main.hs 252 | ``` 253 | 254 | This will download the most recent version of LambdaNet and run all the 255 | unit tests. 256 | 257 | ### Self-Organizing Maps (SOMs, or Kohonen Maps) 258 | 259 | SOMs were chosen as the next architecture to develop because they make 260 | different assumptions than FeedForward networks. This allows us to see 261 | how the current library handles building out new architectures. Already 262 | this has forced a change in the Neuron model and spurred the development 263 | of a visualizations package (in order to usefully understand the outputs 264 | of the SOMs). 265 | 266 | ### Regularization Functions and Momentum 267 | 268 | Standard backprop training is subject to overfitting and falling into local 269 | minima. By providing support for regularization and momentum, LambdaNet 270 | will be able to provide more extensible and robust training. 271 | 272 | ## Future Goals 273 | 274 | The future goals are: 275 | - Convolutional Networks 276 | - Data handling for Neural Networks 277 | 278 | ## Generating the Documentation Images 279 | 280 | All the documentation for the network was generated in the following manner. In the docs folder, run: 281 | 282 | ``` 283 | runhaskell docs.hs 284 | python analysis.py 285 | ``` 286 | 287 | Note that I am currently working on removing the Python image analysis 288 | from the library, and switching it with Haskell and gnuplot. I'm also 289 | working on using the generated images in network documentation. 290 | -------------------------------------------------------------------------------- /Setup.hs: -------------------------------------------------------------------------------- 1 | import Distribution.Simple 2 | main = defaultMain 3 | -------------------------------------------------------------------------------- /docs/Makefile: -------------------------------------------------------------------------------- 1 | # Makefile for Sphinx documentation 2 | # 3 | 4 | # You can set these variables from the command line. 5 | SPHINXOPTS = 6 | SPHINXBUILD = sphinx-build 7 | PAPER = 8 | BUILDDIR = _build 9 | 10 | # User-friendly check for sphinx-build 11 | ifeq ($(shell which $(SPHINXBUILD) >/dev/null 2>&1; echo $$?), 1) 12 | $(error The '$(SPHINXBUILD)' command was not found. Make sure you have Sphinx installed, then set the SPHINXBUILD environment variable to point to the full path of the '$(SPHINXBUILD)' executable. Alternatively you can add the directory with the executable to your PATH. If you don't have Sphinx installed, grab it from http://sphinx-doc.org/) 13 | endif 14 | 15 | # Internal variables. 16 | PAPEROPT_a4 = -D latex_paper_size=a4 17 | PAPEROPT_letter = -D latex_paper_size=letter 18 | ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) . 19 | # the i18n builder cannot share the environment and doctrees with the others 20 | I18NSPHINXOPTS = $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) . 21 | 22 | .PHONY: help clean html dirhtml singlehtml pickle json htmlhelp qthelp devhelp epub latex latexpdf text man changes linkcheck doctest gettext 23 | 24 | help: 25 | @echo "Please use \`make ' where is one of" 26 | @echo " html to make standalone HTML files" 27 | @echo " dirhtml to make HTML files named index.html in directories" 28 | @echo " singlehtml to make a single large HTML file" 29 | @echo " pickle to make pickle files" 30 | @echo " json to make JSON files" 31 | @echo " htmlhelp to make HTML files and a HTML help project" 32 | @echo " qthelp to make HTML files and a qthelp project" 33 | @echo " devhelp to make HTML files and a Devhelp project" 34 | @echo " epub to make an epub" 35 | @echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter" 36 | @echo " latexpdf to make LaTeX files and run them through pdflatex" 37 | @echo " latexpdfja to make LaTeX files and run them through platex/dvipdfmx" 38 | @echo " text to make text files" 39 | @echo " man to make manual pages" 40 | @echo " texinfo to make Texinfo files" 41 | @echo " info to make Texinfo files and run them through makeinfo" 42 | @echo " gettext to make PO message catalogs" 43 | @echo " changes to make an overview of all changed/added/deprecated items" 44 | @echo " xml to make Docutils-native XML files" 45 | @echo " pseudoxml to make pseudoxml-XML files for display purposes" 46 | @echo " linkcheck to check all external links for integrity" 47 | @echo " doctest to run all doctests embedded in the documentation (if enabled)" 48 | 49 | clean: 50 | rm -rf $(BUILDDIR)/* 51 | 52 | html: 53 | $(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html 54 | @echo 55 | @echo "Build finished. The HTML pages are in $(BUILDDIR)/html." 56 | 57 | dirhtml: 58 | $(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml 59 | @echo 60 | @echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml." 61 | 62 | singlehtml: 63 | $(SPHINXBUILD) -b singlehtml $(ALLSPHINXOPTS) $(BUILDDIR)/singlehtml 64 | @echo 65 | @echo "Build finished. The HTML page is in $(BUILDDIR)/singlehtml." 66 | 67 | pickle: 68 | $(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle 69 | @echo 70 | @echo "Build finished; now you can process the pickle files." 71 | 72 | json: 73 | $(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json 74 | @echo 75 | @echo "Build finished; now you can process the JSON files." 76 | 77 | htmlhelp: 78 | $(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp 79 | @echo 80 | @echo "Build finished; now you can run HTML Help Workshop with the" \ 81 | ".hhp project file in $(BUILDDIR)/htmlhelp." 82 | 83 | qthelp: 84 | $(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp 85 | @echo 86 | @echo "Build finished; now you can run "qcollectiongenerator" with the" \ 87 | ".qhcp project file in $(BUILDDIR)/qthelp, like this:" 88 | @echo "# qcollectiongenerator $(BUILDDIR)/qthelp/LambdaNet.qhcp" 89 | @echo "To view the help file:" 90 | @echo "# assistant -collectionFile $(BUILDDIR)/qthelp/LambdaNet.qhc" 91 | 92 | devhelp: 93 | $(SPHINXBUILD) -b devhelp $(ALLSPHINXOPTS) $(BUILDDIR)/devhelp 94 | @echo 95 | @echo "Build finished." 96 | @echo "To view the help file:" 97 | @echo "# mkdir -p $$HOME/.local/share/devhelp/LambdaNet" 98 | @echo "# ln -s $(BUILDDIR)/devhelp $$HOME/.local/share/devhelp/LambdaNet" 99 | @echo "# devhelp" 100 | 101 | epub: 102 | $(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) $(BUILDDIR)/epub 103 | @echo 104 | @echo "Build finished. The epub file is in $(BUILDDIR)/epub." 105 | 106 | latex: 107 | $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex 108 | @echo 109 | @echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex." 110 | @echo "Run \`make' in that directory to run these through (pdf)latex" \ 111 | "(use \`make latexpdf' here to do that automatically)." 112 | 113 | latexpdf: 114 | $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex 115 | @echo "Running LaTeX files through pdflatex..." 116 | $(MAKE) -C $(BUILDDIR)/latex all-pdf 117 | @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex." 118 | 119 | latexpdfja: 120 | $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex 121 | @echo "Running LaTeX files through platex and dvipdfmx..." 122 | $(MAKE) -C $(BUILDDIR)/latex all-pdf-ja 123 | @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex." 124 | 125 | text: 126 | $(SPHINXBUILD) -b text $(ALLSPHINXOPTS) $(BUILDDIR)/text 127 | @echo 128 | @echo "Build finished. The text files are in $(BUILDDIR)/text." 129 | 130 | man: 131 | $(SPHINXBUILD) -b man $(ALLSPHINXOPTS) $(BUILDDIR)/man 132 | @echo 133 | @echo "Build finished. The manual pages are in $(BUILDDIR)/man." 134 | 135 | texinfo: 136 | $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo 137 | @echo 138 | @echo "Build finished. The Texinfo files are in $(BUILDDIR)/texinfo." 139 | @echo "Run \`make' in that directory to run these through makeinfo" \ 140 | "(use \`make info' here to do that automatically)." 141 | 142 | info: 143 | $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo 144 | @echo "Running Texinfo files through makeinfo..." 145 | make -C $(BUILDDIR)/texinfo info 146 | @echo "makeinfo finished; the Info files are in $(BUILDDIR)/texinfo." 147 | 148 | gettext: 149 | $(SPHINXBUILD) -b gettext $(I18NSPHINXOPTS) $(BUILDDIR)/locale 150 | @echo 151 | @echo "Build finished. The message catalogs are in $(BUILDDIR)/locale." 152 | 153 | changes: 154 | $(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes 155 | @echo 156 | @echo "The overview file is in $(BUILDDIR)/changes." 157 | 158 | linkcheck: 159 | $(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck 160 | @echo 161 | @echo "Link check complete; look for any errors in the above output " \ 162 | "or in $(BUILDDIR)/linkcheck/output.txt." 163 | 164 | doctest: 165 | $(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest 166 | @echo "Testing of doctests in the sources finished, look at the " \ 167 | "results in $(BUILDDIR)/doctest/output.txt." 168 | 169 | xml: 170 | $(SPHINXBUILD) -b xml $(ALLSPHINXOPTS) $(BUILDDIR)/xml 171 | @echo 172 | @echo "Build finished. The XML files are in $(BUILDDIR)/xml." 173 | 174 | pseudoxml: 175 | $(SPHINXBUILD) -b pseudoxml $(ALLSPHINXOPTS) $(BUILDDIR)/pseudoxml 176 | @echo 177 | @echo "Build finished. The pseudo-XML files are in $(BUILDDIR)/pseudoxml." 178 | -------------------------------------------------------------------------------- /docs/_build/dirhtml/.buildinfo: -------------------------------------------------------------------------------- 1 | # Sphinx build info version 1 2 | # This file hashes the configuration used when building these files. When it is not found, a full rebuild will be done. 3 | config: fdf8d04b85fd910e66fe9fd2a37bb129 4 | tags: d77d1c0d9ca2f4c8421862c7c5a0d620 5 | -------------------------------------------------------------------------------- /docs/_build/dirhtml/_sources/index.txt: -------------------------------------------------------------------------------- 1 | .. LambdaNet documentation master file, created by 2 | sphinx-quickstart on Thu May 14 16:26:41 2015. 3 | You can adapt this file completely to your liking, but it should at least 4 | contain the root `toctree` directive. 5 | 6 | LambdaNet 7 | ========= 8 | 9 | LambdaNet is a functional neural network library built in Haskell that allows rapid 10 | prototyping and evaluation of neural architectures. 11 | 12 | Contents: 13 | 14 | .. toctree:: 15 | :maxdepth: 2 16 | 17 | 18 | 19 | Indices and tables 20 | ================== 21 | 22 | * :ref:`genindex` 23 | * :ref:`modindex` 24 | * :ref:`search` 25 | 26 | -------------------------------------------------------------------------------- /docs/_build/dirhtml/_static/ajax-loader.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jbarrow/LambdaNet/fbdb2b9e75aaa88ea43d2f9e7b9f94ebc849301e/docs/_build/dirhtml/_static/ajax-loader.gif -------------------------------------------------------------------------------- /docs/_build/dirhtml/_static/basic.css: -------------------------------------------------------------------------------- 1 | /* 2 | * basic.css 3 | * ~~~~~~~~~ 4 | * 5 | * Sphinx stylesheet -- basic theme. 6 | * 7 | * :copyright: Copyright 2007-2014 by the Sphinx team, see AUTHORS. 8 | * :license: BSD, see LICENSE for details. 9 | * 10 | */ 11 | 12 | /* -- main layout ----------------------------------------------------------- */ 13 | 14 | div.clearer { 15 | clear: both; 16 | } 17 | 18 | /* -- relbar ---------------------------------------------------------------- */ 19 | 20 | div.related { 21 | width: 100%; 22 | font-size: 90%; 23 | } 24 | 25 | div.related h3 { 26 | display: none; 27 | } 28 | 29 | div.related ul { 30 | margin: 0; 31 | padding: 0 0 0 10px; 32 | list-style: none; 33 | } 34 | 35 | div.related li { 36 | display: inline; 37 | } 38 | 39 | div.related li.right { 40 | float: right; 41 | margin-right: 5px; 42 | } 43 | 44 | /* -- sidebar --------------------------------------------------------------- */ 45 | 46 | div.sphinxsidebarwrapper { 47 | padding: 10px 5px 0 10px; 48 | } 49 | 50 | div.sphinxsidebar { 51 | float: left; 52 | width: 230px; 53 | margin-left: -100%; 54 | font-size: 90%; 55 | } 56 | 57 | div.sphinxsidebar ul { 58 | list-style: none; 59 | } 60 | 61 | div.sphinxsidebar ul ul, 62 | div.sphinxsidebar ul.want-points { 63 | margin-left: 20px; 64 | list-style: square; 65 | } 66 | 67 | div.sphinxsidebar ul ul { 68 | margin-top: 0; 69 | margin-bottom: 0; 70 | } 71 | 72 | div.sphinxsidebar form { 73 | margin-top: 10px; 74 | } 75 | 76 | div.sphinxsidebar input { 77 | border: 1px solid #98dbcc; 78 | font-family: sans-serif; 79 | font-size: 1em; 80 | } 81 | 82 | div.sphinxsidebar #searchbox input[type="text"] { 83 | width: 170px; 84 | } 85 | 86 | div.sphinxsidebar #searchbox input[type="submit"] { 87 | width: 30px; 88 | } 89 | 90 | img { 91 | border: 0; 92 | max-width: 100%; 93 | } 94 | 95 | /* -- search page ----------------------------------------------------------- */ 96 | 97 | ul.search { 98 | margin: 10px 0 0 20px; 99 | padding: 0; 100 | } 101 | 102 | ul.search li { 103 | padding: 5px 0 5px 20px; 104 | background-image: url(file.png); 105 | background-repeat: no-repeat; 106 | background-position: 0 7px; 107 | } 108 | 109 | ul.search li a { 110 | font-weight: bold; 111 | } 112 | 113 | ul.search li div.context { 114 | color: #888; 115 | margin: 2px 0 0 30px; 116 | text-align: left; 117 | } 118 | 119 | ul.keywordmatches li.goodmatch a { 120 | font-weight: bold; 121 | } 122 | 123 | /* -- index page ------------------------------------------------------------ */ 124 | 125 | table.contentstable { 126 | width: 90%; 127 | } 128 | 129 | table.contentstable p.biglink { 130 | line-height: 150%; 131 | } 132 | 133 | a.biglink { 134 | font-size: 1.3em; 135 | } 136 | 137 | span.linkdescr { 138 | font-style: italic; 139 | padding-top: 5px; 140 | font-size: 90%; 141 | } 142 | 143 | /* -- general index --------------------------------------------------------- */ 144 | 145 | table.indextable { 146 | width: 100%; 147 | } 148 | 149 | table.indextable td { 150 | text-align: left; 151 | vertical-align: top; 152 | } 153 | 154 | table.indextable dl, table.indextable dd { 155 | margin-top: 0; 156 | margin-bottom: 0; 157 | } 158 | 159 | table.indextable tr.pcap { 160 | height: 10px; 161 | } 162 | 163 | table.indextable tr.cap { 164 | margin-top: 10px; 165 | background-color: #f2f2f2; 166 | } 167 | 168 | img.toggler { 169 | margin-right: 3px; 170 | margin-top: 3px; 171 | cursor: pointer; 172 | } 173 | 174 | div.modindex-jumpbox { 175 | border-top: 1px solid #ddd; 176 | border-bottom: 1px solid #ddd; 177 | margin: 1em 0 1em 0; 178 | padding: 0.4em; 179 | } 180 | 181 | div.genindex-jumpbox { 182 | border-top: 1px solid #ddd; 183 | border-bottom: 1px solid #ddd; 184 | margin: 1em 0 1em 0; 185 | padding: 0.4em; 186 | } 187 | 188 | /* -- general body styles --------------------------------------------------- */ 189 | 190 | a.headerlink { 191 | visibility: hidden; 192 | } 193 | 194 | h1:hover > a.headerlink, 195 | h2:hover > a.headerlink, 196 | h3:hover > a.headerlink, 197 | h4:hover > a.headerlink, 198 | h5:hover > a.headerlink, 199 | h6:hover > a.headerlink, 200 | dt:hover > a.headerlink { 201 | visibility: visible; 202 | } 203 | 204 | div.body p.caption { 205 | text-align: inherit; 206 | } 207 | 208 | div.body td { 209 | text-align: left; 210 | } 211 | 212 | .field-list ul { 213 | padding-left: 1em; 214 | } 215 | 216 | .first { 217 | margin-top: 0 !important; 218 | } 219 | 220 | p.rubric { 221 | margin-top: 30px; 222 | font-weight: bold; 223 | } 224 | 225 | img.align-left, .figure.align-left, object.align-left { 226 | clear: left; 227 | float: left; 228 | margin-right: 1em; 229 | } 230 | 231 | img.align-right, .figure.align-right, object.align-right { 232 | clear: right; 233 | float: right; 234 | margin-left: 1em; 235 | } 236 | 237 | img.align-center, .figure.align-center, object.align-center { 238 | display: block; 239 | margin-left: auto; 240 | margin-right: auto; 241 | } 242 | 243 | .align-left { 244 | text-align: left; 245 | } 246 | 247 | .align-center { 248 | text-align: center; 249 | } 250 | 251 | .align-right { 252 | text-align: right; 253 | } 254 | 255 | /* -- sidebars -------------------------------------------------------------- */ 256 | 257 | div.sidebar { 258 | margin: 0 0 0.5em 1em; 259 | border: 1px solid #ddb; 260 | padding: 7px 7px 0 7px; 261 | background-color: #ffe; 262 | width: 40%; 263 | float: right; 264 | } 265 | 266 | p.sidebar-title { 267 | font-weight: bold; 268 | } 269 | 270 | /* -- topics ---------------------------------------------------------------- */ 271 | 272 | div.topic { 273 | border: 1px solid #ccc; 274 | padding: 7px 7px 0 7px; 275 | margin: 10px 0 10px 0; 276 | } 277 | 278 | p.topic-title { 279 | font-size: 1.1em; 280 | font-weight: bold; 281 | margin-top: 10px; 282 | } 283 | 284 | /* -- admonitions ----------------------------------------------------------- */ 285 | 286 | div.admonition { 287 | margin-top: 10px; 288 | margin-bottom: 10px; 289 | padding: 7px; 290 | } 291 | 292 | div.admonition dt { 293 | font-weight: bold; 294 | } 295 | 296 | div.admonition dl { 297 | margin-bottom: 0; 298 | } 299 | 300 | p.admonition-title { 301 | margin: 0px 10px 5px 0px; 302 | font-weight: bold; 303 | } 304 | 305 | div.body p.centered { 306 | text-align: center; 307 | margin-top: 25px; 308 | } 309 | 310 | /* -- tables ---------------------------------------------------------------- */ 311 | 312 | table.docutils { 313 | border: 0; 314 | border-collapse: collapse; 315 | } 316 | 317 | table.docutils td, table.docutils th { 318 | padding: 1px 8px 1px 5px; 319 | border-top: 0; 320 | border-left: 0; 321 | border-right: 0; 322 | border-bottom: 1px solid #aaa; 323 | } 324 | 325 | table.field-list td, table.field-list th { 326 | border: 0 !important; 327 | } 328 | 329 | table.footnote td, table.footnote th { 330 | border: 0 !important; 331 | } 332 | 333 | th { 334 | text-align: left; 335 | padding-right: 5px; 336 | } 337 | 338 | table.citation { 339 | border-left: solid 1px gray; 340 | margin-left: 1px; 341 | } 342 | 343 | table.citation td { 344 | border-bottom: none; 345 | } 346 | 347 | /* -- other body styles ----------------------------------------------------- */ 348 | 349 | ol.arabic { 350 | list-style: decimal; 351 | } 352 | 353 | ol.loweralpha { 354 | list-style: lower-alpha; 355 | } 356 | 357 | ol.upperalpha { 358 | list-style: upper-alpha; 359 | } 360 | 361 | ol.lowerroman { 362 | list-style: lower-roman; 363 | } 364 | 365 | ol.upperroman { 366 | list-style: upper-roman; 367 | } 368 | 369 | dl { 370 | margin-bottom: 15px; 371 | } 372 | 373 | dd p { 374 | margin-top: 0px; 375 | } 376 | 377 | dd ul, dd table { 378 | margin-bottom: 10px; 379 | } 380 | 381 | dd { 382 | margin-top: 3px; 383 | margin-bottom: 10px; 384 | margin-left: 30px; 385 | } 386 | 387 | dt:target, .highlighted { 388 | background-color: #fbe54e; 389 | } 390 | 391 | dl.glossary dt { 392 | font-weight: bold; 393 | font-size: 1.1em; 394 | } 395 | 396 | .field-list ul { 397 | margin: 0; 398 | padding-left: 1em; 399 | } 400 | 401 | .field-list p { 402 | margin: 0; 403 | } 404 | 405 | .optional { 406 | font-size: 1.3em; 407 | } 408 | 409 | .versionmodified { 410 | font-style: italic; 411 | } 412 | 413 | .system-message { 414 | background-color: #fda; 415 | padding: 5px; 416 | border: 3px solid red; 417 | } 418 | 419 | .footnote:target { 420 | background-color: #ffa; 421 | } 422 | 423 | .line-block { 424 | display: block; 425 | margin-top: 1em; 426 | margin-bottom: 1em; 427 | } 428 | 429 | .line-block .line-block { 430 | margin-top: 0; 431 | margin-bottom: 0; 432 | margin-left: 1.5em; 433 | } 434 | 435 | .guilabel, .menuselection { 436 | font-family: sans-serif; 437 | } 438 | 439 | .accelerator { 440 | text-decoration: underline; 441 | } 442 | 443 | .classifier { 444 | font-style: oblique; 445 | } 446 | 447 | abbr, acronym { 448 | border-bottom: dotted 1px; 449 | cursor: help; 450 | } 451 | 452 | /* -- code displays --------------------------------------------------------- */ 453 | 454 | pre { 455 | overflow: auto; 456 | overflow-y: hidden; /* fixes display issues on Chrome browsers */ 457 | } 458 | 459 | td.linenos pre { 460 | padding: 5px 0px; 461 | border: 0; 462 | background-color: transparent; 463 | color: #aaa; 464 | } 465 | 466 | table.highlighttable { 467 | margin-left: 0.5em; 468 | } 469 | 470 | table.highlighttable td { 471 | padding: 0 0.5em 0 0.5em; 472 | } 473 | 474 | tt.descname { 475 | background-color: transparent; 476 | font-weight: bold; 477 | font-size: 1.2em; 478 | } 479 | 480 | tt.descclassname { 481 | background-color: transparent; 482 | } 483 | 484 | tt.xref, a tt { 485 | background-color: transparent; 486 | font-weight: bold; 487 | } 488 | 489 | h1 tt, h2 tt, h3 tt, h4 tt, h5 tt, h6 tt { 490 | background-color: transparent; 491 | } 492 | 493 | .viewcode-link { 494 | float: right; 495 | } 496 | 497 | .viewcode-back { 498 | float: right; 499 | font-family: sans-serif; 500 | } 501 | 502 | div.viewcode-block:target { 503 | margin: -1px -10px; 504 | padding: 0 10px; 505 | } 506 | 507 | /* -- math display ---------------------------------------------------------- */ 508 | 509 | img.math { 510 | vertical-align: middle; 511 | } 512 | 513 | div.body div.math p { 514 | text-align: center; 515 | } 516 | 517 | span.eqno { 518 | float: right; 519 | } 520 | 521 | /* -- printout stylesheet --------------------------------------------------- */ 522 | 523 | @media print { 524 | div.document, 525 | div.documentwrapper, 526 | div.bodywrapper { 527 | margin: 0 !important; 528 | width: 100%; 529 | } 530 | 531 | div.sphinxsidebar, 532 | div.related, 533 | div.footer, 534 | #top-link { 535 | display: none; 536 | } 537 | } -------------------------------------------------------------------------------- /docs/_build/dirhtml/_static/default.css: -------------------------------------------------------------------------------- 1 | /* 2 | * default.css_t 3 | * ~~~~~~~~~~~~~ 4 | * 5 | * Sphinx stylesheet -- default theme. 6 | * 7 | * :copyright: Copyright 2007-2014 by the Sphinx team, see AUTHORS. 8 | * :license: BSD, see LICENSE for details. 9 | * 10 | */ 11 | 12 | @import url("basic.css"); 13 | 14 | /* -- page layout ----------------------------------------------------------- */ 15 | 16 | body { 17 | font-family: sans-serif; 18 | font-size: 100%; 19 | background-color: #11303d; 20 | color: #000; 21 | margin: 0; 22 | padding: 0; 23 | } 24 | 25 | div.document { 26 | background-color: #1c4e63; 27 | } 28 | 29 | div.documentwrapper { 30 | float: left; 31 | width: 100%; 32 | } 33 | 34 | div.bodywrapper { 35 | margin: 0 0 0 230px; 36 | } 37 | 38 | div.body { 39 | background-color: #ffffff; 40 | color: #000000; 41 | padding: 0 20px 30px 20px; 42 | } 43 | 44 | div.footer { 45 | color: #ffffff; 46 | width: 100%; 47 | padding: 9px 0 9px 0; 48 | text-align: center; 49 | font-size: 75%; 50 | } 51 | 52 | div.footer a { 53 | color: #ffffff; 54 | text-decoration: underline; 55 | } 56 | 57 | div.related { 58 | background-color: #133f52; 59 | line-height: 30px; 60 | color: #ffffff; 61 | } 62 | 63 | div.related a { 64 | color: #ffffff; 65 | } 66 | 67 | div.sphinxsidebar { 68 | } 69 | 70 | div.sphinxsidebar h3 { 71 | font-family: 'Trebuchet MS', sans-serif; 72 | color: #ffffff; 73 | font-size: 1.4em; 74 | font-weight: normal; 75 | margin: 0; 76 | padding: 0; 77 | } 78 | 79 | div.sphinxsidebar h3 a { 80 | color: #ffffff; 81 | } 82 | 83 | div.sphinxsidebar h4 { 84 | font-family: 'Trebuchet MS', sans-serif; 85 | color: #ffffff; 86 | font-size: 1.3em; 87 | font-weight: normal; 88 | margin: 5px 0 0 0; 89 | padding: 0; 90 | } 91 | 92 | div.sphinxsidebar p { 93 | color: #ffffff; 94 | } 95 | 96 | div.sphinxsidebar p.topless { 97 | margin: 5px 10px 10px 10px; 98 | } 99 | 100 | div.sphinxsidebar ul { 101 | margin: 10px; 102 | padding: 0; 103 | color: #ffffff; 104 | } 105 | 106 | div.sphinxsidebar a { 107 | color: #98dbcc; 108 | } 109 | 110 | div.sphinxsidebar input { 111 | border: 1px solid #98dbcc; 112 | font-family: sans-serif; 113 | font-size: 1em; 114 | } 115 | 116 | 117 | 118 | /* -- hyperlink styles ------------------------------------------------------ */ 119 | 120 | a { 121 | color: #355f7c; 122 | text-decoration: none; 123 | } 124 | 125 | a:visited { 126 | color: #355f7c; 127 | text-decoration: none; 128 | } 129 | 130 | a:hover { 131 | text-decoration: underline; 132 | } 133 | 134 | 135 | 136 | /* -- body styles ----------------------------------------------------------- */ 137 | 138 | div.body h1, 139 | div.body h2, 140 | div.body h3, 141 | div.body h4, 142 | div.body h5, 143 | div.body h6 { 144 | font-family: 'Trebuchet MS', sans-serif; 145 | background-color: #f2f2f2; 146 | font-weight: normal; 147 | color: #20435c; 148 | border-bottom: 1px solid #ccc; 149 | margin: 20px -20px 10px -20px; 150 | padding: 3px 0 3px 10px; 151 | } 152 | 153 | div.body h1 { margin-top: 0; font-size: 200%; } 154 | div.body h2 { font-size: 160%; } 155 | div.body h3 { font-size: 140%; } 156 | div.body h4 { font-size: 120%; } 157 | div.body h5 { font-size: 110%; } 158 | div.body h6 { font-size: 100%; } 159 | 160 | a.headerlink { 161 | color: #c60f0f; 162 | font-size: 0.8em; 163 | padding: 0 4px 0 4px; 164 | text-decoration: none; 165 | } 166 | 167 | a.headerlink:hover { 168 | background-color: #c60f0f; 169 | color: white; 170 | } 171 | 172 | div.body p, div.body dd, div.body li { 173 | text-align: justify; 174 | line-height: 130%; 175 | } 176 | 177 | div.admonition p.admonition-title + p { 178 | display: inline; 179 | } 180 | 181 | div.admonition p { 182 | margin-bottom: 5px; 183 | } 184 | 185 | div.admonition pre { 186 | margin-bottom: 5px; 187 | } 188 | 189 | div.admonition ul, div.admonition ol { 190 | margin-bottom: 5px; 191 | } 192 | 193 | div.note { 194 | background-color: #eee; 195 | border: 1px solid #ccc; 196 | } 197 | 198 | div.seealso { 199 | background-color: #ffc; 200 | border: 1px solid #ff6; 201 | } 202 | 203 | div.topic { 204 | background-color: #eee; 205 | } 206 | 207 | div.warning { 208 | background-color: #ffe4e4; 209 | border: 1px solid #f66; 210 | } 211 | 212 | p.admonition-title { 213 | display: inline; 214 | } 215 | 216 | p.admonition-title:after { 217 | content: ":"; 218 | } 219 | 220 | pre { 221 | padding: 5px; 222 | background-color: #eeffcc; 223 | color: #333333; 224 | line-height: 120%; 225 | border: 1px solid #ac9; 226 | border-left: none; 227 | border-right: none; 228 | } 229 | 230 | tt { 231 | background-color: #ecf0f3; 232 | padding: 0 1px 0 1px; 233 | font-size: 0.95em; 234 | } 235 | 236 | th { 237 | background-color: #ede; 238 | } 239 | 240 | .warning tt { 241 | background: #efc2c2; 242 | } 243 | 244 | .note tt { 245 | background: #d6d6d6; 246 | } 247 | 248 | .viewcode-back { 249 | font-family: sans-serif; 250 | } 251 | 252 | div.viewcode-block:target { 253 | background-color: #f4debf; 254 | border-top: 1px solid #ac9; 255 | border-bottom: 1px solid #ac9; 256 | } -------------------------------------------------------------------------------- /docs/_build/dirhtml/_static/doctools.js: -------------------------------------------------------------------------------- 1 | /* 2 | * doctools.js 3 | * ~~~~~~~~~~~ 4 | * 5 | * Sphinx JavaScript utilities for all documentation. 6 | * 7 | * :copyright: Copyright 2007-2014 by the Sphinx team, see AUTHORS. 8 | * :license: BSD, see LICENSE for details. 9 | * 10 | */ 11 | 12 | /** 13 | * select a different prefix for underscore 14 | */ 15 | $u = _.noConflict(); 16 | 17 | /** 18 | * make the code below compatible with browsers without 19 | * an installed firebug like debugger 20 | if (!window.console || !console.firebug) { 21 | var names = ["log", "debug", "info", "warn", "error", "assert", "dir", 22 | "dirxml", "group", "groupEnd", "time", "timeEnd", "count", "trace", 23 | "profile", "profileEnd"]; 24 | window.console = {}; 25 | for (var i = 0; i < names.length; ++i) 26 | window.console[names[i]] = function() {}; 27 | } 28 | */ 29 | 30 | /** 31 | * small helper function to urldecode strings 32 | */ 33 | jQuery.urldecode = function(x) { 34 | return decodeURIComponent(x).replace(/\+/g, ' '); 35 | }; 36 | 37 | /** 38 | * small helper function to urlencode strings 39 | */ 40 | jQuery.urlencode = encodeURIComponent; 41 | 42 | /** 43 | * This function returns the parsed url parameters of the 44 | * current request. Multiple values per key are supported, 45 | * it will always return arrays of strings for the value parts. 46 | */ 47 | jQuery.getQueryParameters = function(s) { 48 | if (typeof s == 'undefined') 49 | s = document.location.search; 50 | var parts = s.substr(s.indexOf('?') + 1).split('&'); 51 | var result = {}; 52 | for (var i = 0; i < parts.length; i++) { 53 | var tmp = parts[i].split('=', 2); 54 | var key = jQuery.urldecode(tmp[0]); 55 | var value = jQuery.urldecode(tmp[1]); 56 | if (key in result) 57 | result[key].push(value); 58 | else 59 | result[key] = [value]; 60 | } 61 | return result; 62 | }; 63 | 64 | /** 65 | * highlight a given string on a jquery object by wrapping it in 66 | * span elements with the given class name. 67 | */ 68 | jQuery.fn.highlightText = function(text, className) { 69 | function highlight(node) { 70 | if (node.nodeType == 3) { 71 | var val = node.nodeValue; 72 | var pos = val.toLowerCase().indexOf(text); 73 | if (pos >= 0 && !jQuery(node.parentNode).hasClass(className)) { 74 | var span = document.createElement("span"); 75 | span.className = className; 76 | span.appendChild(document.createTextNode(val.substr(pos, text.length))); 77 | node.parentNode.insertBefore(span, node.parentNode.insertBefore( 78 | document.createTextNode(val.substr(pos + text.length)), 79 | node.nextSibling)); 80 | node.nodeValue = val.substr(0, pos); 81 | } 82 | } 83 | else if (!jQuery(node).is("button, select, textarea")) { 84 | jQuery.each(node.childNodes, function() { 85 | highlight(this); 86 | }); 87 | } 88 | } 89 | return this.each(function() { 90 | highlight(this); 91 | }); 92 | }; 93 | 94 | /** 95 | * Small JavaScript module for the documentation. 96 | */ 97 | var Documentation = { 98 | 99 | init : function() { 100 | this.fixFirefoxAnchorBug(); 101 | this.highlightSearchWords(); 102 | this.initIndexTable(); 103 | }, 104 | 105 | /** 106 | * i18n support 107 | */ 108 | TRANSLATIONS : {}, 109 | PLURAL_EXPR : function(n) { return n == 1 ? 0 : 1; }, 110 | LOCALE : 'unknown', 111 | 112 | // gettext and ngettext don't access this so that the functions 113 | // can safely bound to a different name (_ = Documentation.gettext) 114 | gettext : function(string) { 115 | var translated = Documentation.TRANSLATIONS[string]; 116 | if (typeof translated == 'undefined') 117 | return string; 118 | return (typeof translated == 'string') ? translated : translated[0]; 119 | }, 120 | 121 | ngettext : function(singular, plural, n) { 122 | var translated = Documentation.TRANSLATIONS[singular]; 123 | if (typeof translated == 'undefined') 124 | return (n == 1) ? singular : plural; 125 | return translated[Documentation.PLURALEXPR(n)]; 126 | }, 127 | 128 | addTranslations : function(catalog) { 129 | for (var key in catalog.messages) 130 | this.TRANSLATIONS[key] = catalog.messages[key]; 131 | this.PLURAL_EXPR = new Function('n', 'return +(' + catalog.plural_expr + ')'); 132 | this.LOCALE = catalog.locale; 133 | }, 134 | 135 | /** 136 | * add context elements like header anchor links 137 | */ 138 | addContextElements : function() { 139 | $('div[id] > :header:first').each(function() { 140 | $('\u00B6'). 141 | attr('href', '#' + this.id). 142 | attr('title', _('Permalink to this headline')). 143 | appendTo(this); 144 | }); 145 | $('dt[id]').each(function() { 146 | $('\u00B6'). 147 | attr('href', '#' + this.id). 148 | attr('title', _('Permalink to this definition')). 149 | appendTo(this); 150 | }); 151 | }, 152 | 153 | /** 154 | * workaround a firefox stupidity 155 | */ 156 | fixFirefoxAnchorBug : function() { 157 | if (document.location.hash && $.browser.mozilla) 158 | window.setTimeout(function() { 159 | document.location.href += ''; 160 | }, 10); 161 | }, 162 | 163 | /** 164 | * highlight the search words provided in the url in the text 165 | */ 166 | highlightSearchWords : function() { 167 | var params = $.getQueryParameters(); 168 | var terms = (params.highlight) ? params.highlight[0].split(/\s+/) : []; 169 | if (terms.length) { 170 | var body = $('div.body'); 171 | if (!body.length) { 172 | body = $('body'); 173 | } 174 | window.setTimeout(function() { 175 | $.each(terms, function() { 176 | body.highlightText(this.toLowerCase(), 'highlighted'); 177 | }); 178 | }, 10); 179 | $('') 181 | .appendTo($('#searchbox')); 182 | } 183 | }, 184 | 185 | /** 186 | * init the domain index toggle buttons 187 | */ 188 | initIndexTable : function() { 189 | var togglers = $('img.toggler').click(function() { 190 | var src = $(this).attr('src'); 191 | var idnum = $(this).attr('id').substr(7); 192 | $('tr.cg-' + idnum).toggle(); 193 | if (src.substr(-9) == 'minus.png') 194 | $(this).attr('src', src.substr(0, src.length-9) + 'plus.png'); 195 | else 196 | $(this).attr('src', src.substr(0, src.length-8) + 'minus.png'); 197 | }).css('display', ''); 198 | if (DOCUMENTATION_OPTIONS.COLLAPSE_INDEX) { 199 | togglers.click(); 200 | } 201 | }, 202 | 203 | /** 204 | * helper function to hide the search marks again 205 | */ 206 | hideSearchWords : function() { 207 | $('#searchbox .highlight-link').fadeOut(300); 208 | $('span.highlighted').removeClass('highlighted'); 209 | }, 210 | 211 | /** 212 | * make the url absolute 213 | */ 214 | makeURL : function(relativeURL) { 215 | return DOCUMENTATION_OPTIONS.URL_ROOT + '/' + relativeURL; 216 | }, 217 | 218 | /** 219 | * get the current relative url 220 | */ 221 | getCurrentURL : function() { 222 | var path = document.location.pathname; 223 | var parts = path.split(/\//); 224 | $.each(DOCUMENTATION_OPTIONS.URL_ROOT.split(/\//), function() { 225 | if (this == '..') 226 | parts.pop(); 227 | }); 228 | var url = parts.join('/'); 229 | return path.substring(url.lastIndexOf('/') + 1, path.length - 1); 230 | } 231 | }; 232 | 233 | // quick alias for translations 234 | _ = Documentation.gettext; 235 | 236 | $(document).ready(function() { 237 | Documentation.init(); 238 | }); 239 | -------------------------------------------------------------------------------- /docs/_build/dirhtml/_static/pygments.css: -------------------------------------------------------------------------------- 1 | .highlight .hll { background-color: #ffffcc } 2 | .highlight { background: #eeffcc; } 3 | .highlight .c { color: #408090; font-style: italic } /* Comment */ 4 | .highlight .err { border: 1px solid #FF0000 } /* Error */ 5 | .highlight .k { color: #007020; font-weight: bold } /* Keyword */ 6 | .highlight .o { color: #666666 } /* Operator */ 7 | .highlight .cm { color: #408090; font-style: italic } /* Comment.Multiline */ 8 | .highlight .cp { color: #007020 } /* Comment.Preproc */ 9 | .highlight .c1 { color: #408090; font-style: italic } /* Comment.Single */ 10 | .highlight .cs { color: #408090; background-color: #fff0f0 } /* Comment.Special */ 11 | .highlight .gd { color: #A00000 } /* Generic.Deleted */ 12 | .highlight .ge { font-style: italic } /* Generic.Emph */ 13 | .highlight .gr { color: #FF0000 } /* Generic.Error */ 14 | .highlight .gh { color: #000080; font-weight: bold } /* Generic.Heading */ 15 | .highlight .gi { color: #00A000 } /* Generic.Inserted */ 16 | .highlight .go { color: #333333 } /* Generic.Output */ 17 | .highlight .gp { color: #c65d09; font-weight: bold } /* Generic.Prompt */ 18 | .highlight .gs { font-weight: bold } /* Generic.Strong */ 19 | .highlight .gu { color: #800080; font-weight: bold } /* Generic.Subheading */ 20 | .highlight .gt { color: #0044DD } /* Generic.Traceback */ 21 | .highlight .kc { color: #007020; font-weight: bold } /* Keyword.Constant */ 22 | .highlight .kd { color: #007020; font-weight: bold } /* Keyword.Declaration */ 23 | .highlight .kn { color: #007020; font-weight: bold } /* Keyword.Namespace */ 24 | .highlight .kp { color: #007020 } /* Keyword.Pseudo */ 25 | .highlight .kr { color: #007020; font-weight: bold } /* Keyword.Reserved */ 26 | .highlight .kt { color: #902000 } /* Keyword.Type */ 27 | .highlight .m { color: #208050 } /* Literal.Number */ 28 | .highlight .s { color: #4070a0 } /* Literal.String */ 29 | .highlight .na { color: #4070a0 } /* Name.Attribute */ 30 | .highlight .nb { color: #007020 } /* Name.Builtin */ 31 | .highlight .nc { color: #0e84b5; font-weight: bold } /* Name.Class */ 32 | .highlight .no { color: #60add5 } /* Name.Constant */ 33 | .highlight .nd { color: #555555; font-weight: bold } /* Name.Decorator */ 34 | .highlight .ni { color: #d55537; font-weight: bold } /* Name.Entity */ 35 | .highlight .ne { color: #007020 } /* Name.Exception */ 36 | .highlight .nf { color: #06287e } /* Name.Function */ 37 | .highlight .nl { color: #002070; font-weight: bold } /* Name.Label */ 38 | .highlight .nn { color: #0e84b5; font-weight: bold } /* Name.Namespace */ 39 | .highlight .nt { color: #062873; font-weight: bold } /* Name.Tag */ 40 | .highlight .nv { color: #bb60d5 } /* Name.Variable */ 41 | .highlight .ow { color: #007020; font-weight: bold } /* Operator.Word */ 42 | .highlight .w { color: #bbbbbb } /* Text.Whitespace */ 43 | .highlight .mb { color: #208050 } /* Literal.Number.Bin */ 44 | .highlight .mf { color: #208050 } /* Literal.Number.Float */ 45 | .highlight .mh { color: #208050 } /* Literal.Number.Hex */ 46 | .highlight .mi { color: #208050 } /* Literal.Number.Integer */ 47 | .highlight .mo { color: #208050 } /* Literal.Number.Oct */ 48 | .highlight .sb { color: #4070a0 } /* Literal.String.Backtick */ 49 | .highlight .sc { color: #4070a0 } /* Literal.String.Char */ 50 | .highlight .sd { color: #4070a0; font-style: italic } /* Literal.String.Doc */ 51 | .highlight .s2 { color: #4070a0 } /* Literal.String.Double */ 52 | .highlight .se { color: #4070a0; font-weight: bold } /* Literal.String.Escape */ 53 | .highlight .sh { color: #4070a0 } /* Literal.String.Heredoc */ 54 | .highlight .si { color: #70a0d0; font-style: italic } /* Literal.String.Interpol */ 55 | .highlight .sx { color: #c65d09 } /* Literal.String.Other */ 56 | .highlight .sr { color: #235388 } /* Literal.String.Regex */ 57 | .highlight .s1 { color: #4070a0 } /* Literal.String.Single */ 58 | .highlight .ss { color: #517918 } /* Literal.String.Symbol */ 59 | .highlight .bp { color: #007020 } /* Name.Builtin.Pseudo */ 60 | .highlight .vc { color: #bb60d5 } /* Name.Variable.Class */ 61 | .highlight .vg { color: #bb60d5 } /* Name.Variable.Global */ 62 | .highlight .vi { color: #bb60d5 } /* Name.Variable.Instance */ 63 | .highlight .il { color: #208050 } /* Literal.Number.Integer.Long */ -------------------------------------------------------------------------------- /docs/_build/dirhtml/_static/searchtools.js: -------------------------------------------------------------------------------- 1 | /* 2 | * searchtools.js_t 3 | * ~~~~~~~~~~~~~~~~ 4 | * 5 | * Sphinx JavaScript utilties for the full-text search. 6 | * 7 | * :copyright: Copyright 2007-2014 by the Sphinx team, see AUTHORS. 8 | * :license: BSD, see LICENSE for details. 9 | * 10 | */ 11 | 12 | 13 | /** 14 | * Porter Stemmer 15 | */ 16 | var Stemmer = function() { 17 | 18 | var step2list = { 19 | ational: 'ate', 20 | tional: 'tion', 21 | enci: 'ence', 22 | anci: 'ance', 23 | izer: 'ize', 24 | bli: 'ble', 25 | alli: 'al', 26 | entli: 'ent', 27 | eli: 'e', 28 | ousli: 'ous', 29 | ization: 'ize', 30 | ation: 'ate', 31 | ator: 'ate', 32 | alism: 'al', 33 | iveness: 'ive', 34 | fulness: 'ful', 35 | ousness: 'ous', 36 | aliti: 'al', 37 | iviti: 'ive', 38 | biliti: 'ble', 39 | logi: 'log' 40 | }; 41 | 42 | var step3list = { 43 | icate: 'ic', 44 | ative: '', 45 | alize: 'al', 46 | iciti: 'ic', 47 | ical: 'ic', 48 | ful: '', 49 | ness: '' 50 | }; 51 | 52 | var c = "[^aeiou]"; // consonant 53 | var v = "[aeiouy]"; // vowel 54 | var C = c + "[^aeiouy]*"; // consonant sequence 55 | var V = v + "[aeiou]*"; // vowel sequence 56 | 57 | var mgr0 = "^(" + C + ")?" + V + C; // [C]VC... is m>0 58 | var meq1 = "^(" + C + ")?" + V + C + "(" + V + ")?$"; // [C]VC[V] is m=1 59 | var mgr1 = "^(" + C + ")?" + V + C + V + C; // [C]VCVC... is m>1 60 | var s_v = "^(" + C + ")?" + v; // vowel in stem 61 | 62 | this.stemWord = function (w) { 63 | var stem; 64 | var suffix; 65 | var firstch; 66 | var origword = w; 67 | 68 | if (w.length < 3) 69 | return w; 70 | 71 | var re; 72 | var re2; 73 | var re3; 74 | var re4; 75 | 76 | firstch = w.substr(0,1); 77 | if (firstch == "y") 78 | w = firstch.toUpperCase() + w.substr(1); 79 | 80 | // Step 1a 81 | re = /^(.+?)(ss|i)es$/; 82 | re2 = /^(.+?)([^s])s$/; 83 | 84 | if (re.test(w)) 85 | w = w.replace(re,"$1$2"); 86 | else if (re2.test(w)) 87 | w = w.replace(re2,"$1$2"); 88 | 89 | // Step 1b 90 | re = /^(.+?)eed$/; 91 | re2 = /^(.+?)(ed|ing)$/; 92 | if (re.test(w)) { 93 | var fp = re.exec(w); 94 | re = new RegExp(mgr0); 95 | if (re.test(fp[1])) { 96 | re = /.$/; 97 | w = w.replace(re,""); 98 | } 99 | } 100 | else if (re2.test(w)) { 101 | var fp = re2.exec(w); 102 | stem = fp[1]; 103 | re2 = new RegExp(s_v); 104 | if (re2.test(stem)) { 105 | w = stem; 106 | re2 = /(at|bl|iz)$/; 107 | re3 = new RegExp("([^aeiouylsz])\\1$"); 108 | re4 = new RegExp("^" + C + v + "[^aeiouwxy]$"); 109 | if (re2.test(w)) 110 | w = w + "e"; 111 | else if (re3.test(w)) { 112 | re = /.$/; 113 | w = w.replace(re,""); 114 | } 115 | else if (re4.test(w)) 116 | w = w + "e"; 117 | } 118 | } 119 | 120 | // Step 1c 121 | re = /^(.+?)y$/; 122 | if (re.test(w)) { 123 | var fp = re.exec(w); 124 | stem = fp[1]; 125 | re = new RegExp(s_v); 126 | if (re.test(stem)) 127 | w = stem + "i"; 128 | } 129 | 130 | // Step 2 131 | re = /^(.+?)(ational|tional|enci|anci|izer|bli|alli|entli|eli|ousli|ization|ation|ator|alism|iveness|fulness|ousness|aliti|iviti|biliti|logi)$/; 132 | if (re.test(w)) { 133 | var fp = re.exec(w); 134 | stem = fp[1]; 135 | suffix = fp[2]; 136 | re = new RegExp(mgr0); 137 | if (re.test(stem)) 138 | w = stem + step2list[suffix]; 139 | } 140 | 141 | // Step 3 142 | re = /^(.+?)(icate|ative|alize|iciti|ical|ful|ness)$/; 143 | if (re.test(w)) { 144 | var fp = re.exec(w); 145 | stem = fp[1]; 146 | suffix = fp[2]; 147 | re = new RegExp(mgr0); 148 | if (re.test(stem)) 149 | w = stem + step3list[suffix]; 150 | } 151 | 152 | // Step 4 153 | re = /^(.+?)(al|ance|ence|er|ic|able|ible|ant|ement|ment|ent|ou|ism|ate|iti|ous|ive|ize)$/; 154 | re2 = /^(.+?)(s|t)(ion)$/; 155 | if (re.test(w)) { 156 | var fp = re.exec(w); 157 | stem = fp[1]; 158 | re = new RegExp(mgr1); 159 | if (re.test(stem)) 160 | w = stem; 161 | } 162 | else if (re2.test(w)) { 163 | var fp = re2.exec(w); 164 | stem = fp[1] + fp[2]; 165 | re2 = new RegExp(mgr1); 166 | if (re2.test(stem)) 167 | w = stem; 168 | } 169 | 170 | // Step 5 171 | re = /^(.+?)e$/; 172 | if (re.test(w)) { 173 | var fp = re.exec(w); 174 | stem = fp[1]; 175 | re = new RegExp(mgr1); 176 | re2 = new RegExp(meq1); 177 | re3 = new RegExp("^" + C + v + "[^aeiouwxy]$"); 178 | if (re.test(stem) || (re2.test(stem) && !(re3.test(stem)))) 179 | w = stem; 180 | } 181 | re = /ll$/; 182 | re2 = new RegExp(mgr1); 183 | if (re.test(w) && re2.test(w)) { 184 | re = /.$/; 185 | w = w.replace(re,""); 186 | } 187 | 188 | // and turn initial Y back to y 189 | if (firstch == "y") 190 | w = firstch.toLowerCase() + w.substr(1); 191 | return w; 192 | } 193 | } 194 | 195 | 196 | 197 | /** 198 | * Simple result scoring code. 199 | */ 200 | var Scorer = { 201 | // Implement the following function to further tweak the score for each result 202 | // The function takes a result array [filename, title, anchor, descr, score] 203 | // and returns the new score. 204 | /* 205 | score: function(result) { 206 | return result[4]; 207 | }, 208 | */ 209 | 210 | // query matches the full name of an object 211 | objNameMatch: 11, 212 | // or matches in the last dotted part of the object name 213 | objPartialMatch: 6, 214 | // Additive scores depending on the priority of the object 215 | objPrio: {0: 15, // used to be importantResults 216 | 1: 5, // used to be objectResults 217 | 2: -5}, // used to be unimportantResults 218 | // Used when the priority is not in the mapping. 219 | objPrioDefault: 0, 220 | 221 | // query found in title 222 | title: 15, 223 | // query found in terms 224 | term: 5 225 | }; 226 | 227 | 228 | /** 229 | * Search Module 230 | */ 231 | var Search = { 232 | 233 | _index : null, 234 | _queued_query : null, 235 | _pulse_status : -1, 236 | 237 | init : function() { 238 | var params = $.getQueryParameters(); 239 | if (params.q) { 240 | var query = params.q[0]; 241 | $('input[name="q"]')[0].value = query; 242 | this.performSearch(query); 243 | } 244 | }, 245 | 246 | loadIndex : function(url) { 247 | $.ajax({type: "GET", url: url, data: null, 248 | dataType: "script", cache: true, 249 | complete: function(jqxhr, textstatus) { 250 | if (textstatus != "success") { 251 | document.getElementById("searchindexloader").src = url; 252 | } 253 | }}); 254 | }, 255 | 256 | setIndex : function(index) { 257 | var q; 258 | this._index = index; 259 | if ((q = this._queued_query) !== null) { 260 | this._queued_query = null; 261 | Search.query(q); 262 | } 263 | }, 264 | 265 | hasIndex : function() { 266 | return this._index !== null; 267 | }, 268 | 269 | deferQuery : function(query) { 270 | this._queued_query = query; 271 | }, 272 | 273 | stopPulse : function() { 274 | this._pulse_status = 0; 275 | }, 276 | 277 | startPulse : function() { 278 | if (this._pulse_status >= 0) 279 | return; 280 | function pulse() { 281 | var i; 282 | Search._pulse_status = (Search._pulse_status + 1) % 4; 283 | var dotString = ''; 284 | for (i = 0; i < Search._pulse_status; i++) 285 | dotString += '.'; 286 | Search.dots.text(dotString); 287 | if (Search._pulse_status > -1) 288 | window.setTimeout(pulse, 500); 289 | } 290 | pulse(); 291 | }, 292 | 293 | /** 294 | * perform a search for something (or wait until index is loaded) 295 | */ 296 | performSearch : function(query) { 297 | // create the required interface elements 298 | this.out = $('#search-results'); 299 | this.title = $('

' + _('Searching') + '

').appendTo(this.out); 300 | this.dots = $('').appendTo(this.title); 301 | this.status = $('

').appendTo(this.out); 302 | this.output = $('