├── .gitignore ├── CompositionalDeepLearning.cabal ├── LICENSE ├── README.md ├── Setup.hs ├── app └── Main.hs ├── package.yaml ├── src ├── Autodiff │ ├── Additive.hs │ ├── Cont.hs │ ├── D.hs │ ├── Dual.hs │ └── GAD.hs ├── CategoricDefinitions.hs ├── Examples.hs ├── OnesLike.hs ├── Ops.hs ├── Para.hs ├── TensorUtils.hs └── TrainUtils.hs ├── stack.yaml └── test └── Spec.hs /.gitignore: -------------------------------------------------------------------------------- 1 | dist 2 | dist-* 3 | cabal-dev 4 | *.o 5 | *.hi 6 | *.chi 7 | *.chs.h 8 | *.dyn_o 9 | *.dyn_hi 10 | .hpc 11 | .hsenv 12 | .cabal-sandbox/ 13 | cabal.sandbox.config 14 | *.prof 15 | *.aux 16 | *.hp 17 | *.eventlog 18 | .stack-work/ 19 | cabal.project.local 20 | .HTF/ -------------------------------------------------------------------------------- /CompositionalDeepLearning.cabal: -------------------------------------------------------------------------------- 1 | -- This file has been generated from package.yaml by hpack version 0.28.2. 2 | -- 3 | -- see: https://github.com/sol/hpack 4 | -- 5 | -- hash: 62c5b14b1e57ec33a34c8024e7f4126e3fb6db5917f12ac293f3b0e9c7f55c3c 6 | 7 | name: CompositionalDeepLearning 8 | version: 0.1.0.0 9 | description: Please see the README on GitHub at 10 | homepage: https://github.com/githubuser/CompositionalDeepLearning#readme 11 | bug-reports: https://github.com/githubuser/CompositionalDeepLearning/issues 12 | author: Bruno Gavranović 13 | maintainer: bgavran3@gmail.com 14 | copyright: 2018 Bruno Gavranović 15 | license: BSD3 16 | license-file: LICENSE 17 | build-type: Simple 18 | cabal-version: >= 1.10 19 | extra-source-files: 20 | README.md 21 | 22 | source-repository head 23 | type: git 24 | location: https://github.com/githubuser/CompositionalDeepLearning 25 | 26 | library 27 | exposed-modules: 28 | Autodiff.Additive 29 | Autodiff.Cont 30 | Autodiff.D 31 | Autodiff.Dual 32 | Autodiff.GAD 33 | CategoricDefinitions 34 | Ops 35 | Para 36 | TrainUtils 37 | TensorUtils 38 | Examples 39 | AsymmetricLens 40 | Test 41 | other-modules: 42 | Paths_CompositionalDeepLearning 43 | hs-source-dirs: 44 | src 45 | build-depends: 46 | base >=4.7 && <5, 47 | lens, 48 | random, 49 | hTensor, 50 | text, 51 | NumInstances 52 | default-language: Haskell2010 53 | default-extensions: EmptyCase 54 | , FlexibleContexts 55 | , FlexibleInstances 56 | , InstanceSigs 57 | , MultiParamTypeClasses 58 | , PartialTypeSignatures 59 | , LambdaCase 60 | , MultiWayIf 61 | , NamedFieldPuns 62 | , TupleSections 63 | , DeriveFunctor 64 | , TypeOperators 65 | , ScopedTypeVariables 66 | , ConstraintKinds 67 | , RankNTypes 68 | , NoMonomorphismRestriction 69 | , TypeFamilies 70 | , UndecidableInstances 71 | , GeneralizedNewtypeDeriving 72 | , TypeOperators 73 | , TemplateHaskell 74 | , AllowAmbiguousTypes 75 | , TypeApplications 76 | , DataKinds 77 | , OverloadedLists 78 | , QuantifiedConstraints 79 | ghc-options: -Wno-partial-type-signatures 80 | 81 | executable CompositionalDeepLearning-exe 82 | main-is: Main.hs 83 | other-modules: 84 | Paths_CompositionalDeepLearning 85 | hs-source-dirs: 86 | app 87 | ghc-options: -threaded -rtsopts -with-rtsopts=-N 88 | build-depends: 89 | CompositionalDeepLearning 90 | , base >=4.7 && <5 91 | default-language: Haskell2010 92 | 93 | test-suite CompositionalDeepLearning-test 94 | type: exitcode-stdio-1.0 95 | main-is: Spec.hs 96 | other-modules: 97 | Paths_CompositionalDeepLearning 98 | hs-source-dirs: 99 | test 100 | ghc-options: -threaded -rtsopts -with-rtsopts=-N 101 | build-depends: 102 | CompositionalDeepLearning 103 | , base >=4.7 && <5 104 | default-language: Haskell2010 105 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2018 Bruno Gavranović 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | ### Compositional Deep Learning 2 | 3 | **Update May 2020:** It's important to note that while eventual implementation of neural networks from first principles is my main goal, I've currently shifted to studying them from a category-theoretic perspective. This is being done in collaboration with some other people and is mostly theory right now. This is why for a while there hasn't been any developments in this repo, although I plan on getting back to it. 4 | 5 | This is a long-term project of understanding and reimplementing neural networks from first principles using the language of category theory. 6 | It's a compositional approach to organizing and layering different abstractions found in neural networks: 7 | * Differentiation of composition of functions (forward mode, reverse mode) 8 | * Notion of composition of *parametrized* functions 9 | * Notion of a cost function (fixed or adversarial) 10 | * Notion of an update rule 11 | * Notion of meta learning 12 | * Notion of multi-agent neural networks 13 | 14 | As more and more components of our deep learning systems stop being fixed throughout training, there is an increasingly larger need for more precise formal specification of the things that _do_ stay fixed. 15 | Standard methods don't seem to be as effective: the invariants across all these networks seem to be rather abstract and hard to describe. This repository explores the speculation that the language of category theory could be well suited to describe and quantify these structures. 16 | 17 | Focus is currently not on reimplementation of popular new network architectures, but rather on principled, structured design of both neural networks and the way they are trained. 18 | 19 | This is research level stuff and at the moment not really usable for anything other than playing around. 20 | 21 | Related work: 22 | 23 | * [The simple essence of automatic differentiation (SimpleAD)](http://conal.net/papers/essence-of-ad/) 24 | * [Backprop as Functor](https://arxiv.org/abs/1711.10455) 25 | * [Lenses and Learners](https://arxiv.org/abs/1903.03671) 26 | 27 | ---- 28 | 29 | A tiny example of simple linear regression can be found in Examples.hs 30 | 31 | ---- 32 | 33 | Progress so far: 34 | * Main parts of SimpleAD paper are fully implemented 35 | * Generalized, mode-independent automatic differentiation with rudimentary functions 36 | * Backpropagation (which is *just a specialization of GAD to the dual category of additive functions*) 37 | * Implemented category **Para** from BackpropFunctor (in a slightly different way than in the paper, such that the _request function_ in the specified form isn't really needed) 38 | 39 | 40 | **TODO**: 41 | * Find a suitable tensor library with the following features: 42 | * Static tensor shapes, known and type-checked at compile time 43 | * Some variant of Einstein summation notation for handling of tensors of arbitrary rank 44 | I'm working on one such library in Idris, which implements [Dependently Typed Einstein Summation](https://github.com/bgavran/Dependently_Typed_Einsum). 45 | * Provide working examples of training simple neural networks 46 | * Find a way to graphically show composition of **GAD**, **Para** and **Learners** 47 | * Explore using effects for data loading 48 | 49 | 50 | This is a heavy work in progress, but feel free to drop me a message with any questions! 51 | -------------------------------------------------------------------------------- /Setup.hs: -------------------------------------------------------------------------------- 1 | import Distribution.Simple 2 | main = defaultMain 3 | -------------------------------------------------------------------------------- /app/Main.hs: -------------------------------------------------------------------------------- 1 | module Main where 2 | 3 | import Prelude hiding (id, (.)) 4 | import Numeric.LinearAlgebra.Array 5 | import Numeric.LinearAlgebra.Array.Util 6 | import System.Random 7 | import Control.Monad 8 | import Control.Lens hiding ((#), para) 9 | 10 | import CategoricDefinitions 11 | import Autodiff.D 12 | import Para 13 | import Examples 14 | 15 | main :: IO () 16 | main = run 17 | -------------------------------------------------------------------------------- /package.yaml: -------------------------------------------------------------------------------- 1 | name: CompositionalDeepLearning 2 | version: 0.1.0.0 3 | github: "githubuser/CompositionalDeepLearning" 4 | license: BSD3 5 | author: "Bruno Gavranović" 6 | maintainer: "bgavran3@gmail.com" 7 | copyright: "2018 Bruno Gavranović" 8 | 9 | extra-source-files: 10 | - README.md 11 | 12 | # Metadata used when publishing your package 13 | # synopsis: Short description of your package 14 | # category: Web 15 | 16 | # To avoid duplicated efforts in documentation and dealing with the 17 | # complications of embedding Haddock markup inside cabal files, it is 18 | # common to point users to the README.md file. 19 | description: Please see the README on GitHub at 20 | 21 | dependencies: 22 | - base >= 4.7 && < 5 23 | 24 | library: 25 | source-dirs: src 26 | 27 | executables: 28 | CompositionalDeepLearning-exe: 29 | main: Main.hs 30 | source-dirs: app 31 | ghc-options: 32 | - -threaded 33 | - -rtsopts 34 | - -with-rtsopts=-N 35 | dependencies: 36 | - CompositionalDeepLearning 37 | 38 | tests: 39 | CompositionalDeepLearning-test: 40 | main: Spec.hs 41 | source-dirs: test 42 | ghc-options: 43 | - -threaded 44 | - -rtsopts 45 | - -with-rtsopts=-N 46 | dependencies: 47 | - CompositionalDeepLearning 48 | -------------------------------------------------------------------------------- /src/Autodiff/Additive.hs: -------------------------------------------------------------------------------- 1 | module Autodiff.Additive where 2 | 3 | import Prelude hiding (id, (.)) 4 | import Control.Lens hiding ((#), para) 5 | import qualified Prelude as P 6 | import GHC.Exts (Constraint) 7 | 8 | import CategoricDefinitions 9 | import OnesLike 10 | 11 | {- 12 | The sole purpose of this is to wrap around (->) and provide a Cocartesian instance (which (->) doesn't have) 13 | -} 14 | newtype a ->+ b = AddFun { 15 | _evalAF :: a -> b 16 | } 17 | makeLenses ''(->+) 18 | 19 | instance Category (->+) where 20 | type Allowed (->+) a = Additive a 21 | id = AddFun id 22 | AddFun g . AddFun f = AddFun (g . f) 23 | 24 | instance Monoidal (->+) where 25 | AddFun f `x` AddFun g = AddFun (f `x` g) 26 | assocL = AddFun assocL 27 | assocR = AddFun assocR 28 | unitorL = AddFun unitorL 29 | unitorL' = AddFun unitorL' 30 | swap = AddFun swap 31 | 32 | instance Cartesian (->+) where 33 | exl = AddFun exl 34 | exr = AddFun exr 35 | dup = AddFun dup 36 | counit = AddFun counit 37 | 38 | instance Cocartesian (->+) where 39 | inl = AddFun inlF 40 | inr = AddFun inrF 41 | jam = AddFun jamF 42 | unit = AddFun unitF 43 | 44 | instance Num a => Scalable (->+) a where 45 | scale a = AddFun (*a) 46 | 47 | instance Num a => NumCat (->+) a where 48 | negateC = AddFun negateC 49 | addC = AddFun addC 50 | mulC = AddFun mulC 51 | increaseC a = AddFun (increaseC a) 52 | 53 | instance Floating a => FloatCat (->+) a where 54 | expC = AddFun expC 55 | 56 | instance Fractional a => FractCat (->+) a where 57 | recipC = AddFun recipC 58 | 59 | ----------------------------- 60 | 61 | inlF :: Additive b => a -> (a, b) 62 | inrF :: Additive a => b -> (a, b) 63 | jamF :: Additive a => (a, a) -> a 64 | unitF :: Additive a => () -> a 65 | 66 | inlF = \a -> (a, zero) 67 | inrF = \b -> (zero, b) 68 | jamF = \(a, b) -> a ^+ b 69 | unitF = \_ -> zero -- this should probably be onesLike? 70 | 71 | type family AllAdditive xs :: Constraint where 72 | AllAdditive '[] = () 73 | AllAdditive (x : xs) = (Additive x, AllAdditive xs) 74 | -------------------------------------------------------------------------------- /src/Autodiff/Cont.hs: -------------------------------------------------------------------------------- 1 | module Autodiff.Cont where 2 | 3 | import Prelude hiding (id, (.)) 4 | 5 | import CategoricDefinitions 6 | 7 | -- TODO OUTDATED; I need to update this! 8 | newtype ContType k r a b = Cont ( (b `k` r) -> (a `k` r)) -- a -> b -> r 9 | 10 | cont :: (Category k, AllAllowed k [a, b, r]) => (a `k` b) -> ContType k r a b 11 | cont f = Cont (. f) 12 | 13 | instance Category k => Category (ContType k r) where 14 | type Allowed (ContType k r) a = Allowed k a 15 | id = Cont id 16 | Cont g . Cont f = Cont (f . g) 17 | 18 | --instance Monoidal k => Monoidal (ContType k r) where 19 | -- (Cont f) `x` (Cont g) = Cont $ join . (f `x` g) . unjoin 20 | -- 21 | --instance Cartesian k => Cartesian (ContType k r) where 22 | -- type AllowedCarEx (ContType k r) a b = () 23 | -- type AllowedCarDup (ContType k r) a = (AllowedSeq k a (a, a) r, 24 | -- Allowed k a, 25 | -- AllowedCoCarIn k a a, 26 | -- Cocartesian k 27 | -- ) 28 | -- 29 | -- exl = Cont $ undefined 30 | -- exr = Cont $ undefined 31 | -- dup = Cont $ undefined 32 | -- 33 | --instance Cocartesian k => Cocartesian (ContType k r) where 34 | -- type AllowedCoCarIn (ContType k r) a b = () 35 | -- type AllowedCoCarJam (ContType k r) a = (AllowedSeq k (a, a) (r, r) r, 36 | -- Allowed k r, 37 | -- AllowedMon k a a r r, 38 | -- Allowed k a, 39 | -- AllowedCoCarJam k r, 40 | -- Monoidal k) 41 | -- inl = Cont $ undefined 42 | -- inr = Cont $ undefined 43 | -- jam = Cont $ join . dup 44 | -------------------------------------------------------------------------------- /src/Autodiff/D.hs: -------------------------------------------------------------------------------- 1 | module Autodiff.D where 2 | 3 | import Prelude hiding (id, (.)) 4 | import Control.Lens hiding ((#), para) 5 | import Control.Arrow 6 | 7 | import CategoricDefinitions 8 | import Autodiff.GAD 9 | import Autodiff.Dual 10 | import Autodiff.Additive 11 | import OnesLike 12 | 13 | --------------------------- 14 | 15 | 16 | newtype DType a b = D { 17 | _evalDType :: GADType (DualType (->+)) a b 18 | } deriving (Category, Monoidal, Cocartesian) 19 | 20 | makeLenses ''DType 21 | 22 | 23 | {- 24 | Manually deriving the Cartesian instance because we need to manually derive counit. 25 | We need to manually derive counit because the only way to propagate 26 | tensor shape to the gradient computation is during runtime (see onesLike function definition). 27 | This is because we don't have statically known tensor dimensions 28 | -} 29 | instance Cartesian DType where 30 | type AllowedCar DType a = (AllowedCar (GADType (DualType (->+))) a, OnesLike a) 31 | exl = D exl 32 | exr = D exr 33 | dup = D dup 34 | counit = D $ GAD $ \a -> ((), Dual $ AddFun $ const (onesLike a)) 35 | 36 | instance Num s => NumCat DType s where 37 | negateC = D negateC 38 | addC = D addC 39 | mulC = D mulC 40 | increaseC a = D $ increaseC a 41 | 42 | instance Floating s => FloatCat DType s where 43 | expC = D expC 44 | 45 | instance Fractional s => FractCat DType s where 46 | recipC = D recipC 47 | 48 | --------------------------- 49 | 50 | grad' :: DType a b -> a -> b -> a 51 | grad' d a = (d ^. evalDType.evalGAD) a ^. (_2.evalDual.evalAF) 52 | 53 | -- just normal output of DType 54 | f :: DType a b -> a -> b 55 | f d a = (d ^. evalDType.evalGAD) a ^. _1 56 | 57 | -- grad is grad' with ones as the incoming derivative 58 | grad :: _ => DType a b -> a -> a 59 | grad dt' t = grad' (counit . dt') t () 60 | 61 | partiallyApply :: AllAdditive [a, b, c] => DType (a, b) c -> a -> DType b c 62 | partiallyApply dt a = D $ GAD $ \b -> second (.inr) $ (dt ^. evalDType.evalGAD) (a, b) 63 | 64 | 65 | --------------------------- 66 | 67 | p1 :: Num a => DType (a, a) a 68 | p1 = mulC 69 | 70 | p2 :: Num a => DType (a, a) a 71 | p2 = addC 72 | 73 | p3 :: (OnesLike a, Additive a, Num a) => DType ((a, a), a) a 74 | p3 = p2 .-- p1 -- composing two 'parametrized' functions 75 | 76 | output :: Double 77 | output = f p3 ((3, 5), 7) 78 | 79 | derivativeOutput :: ((Double, Double), Double) 80 | derivativeOutput = grad p3 ((3, 5), 7) 81 | -------------------------------------------------------------------------------- /src/Autodiff/Dual.hs: -------------------------------------------------------------------------------- 1 | module Autodiff.Dual where 2 | 3 | import Prelude hiding (id, (.), curry, uncurry) 4 | import Control.Lens hiding ((#), para) 5 | import Data.Kind (Type) 6 | 7 | import qualified Prelude as P 8 | 9 | import CategoricDefinitions 10 | 11 | newtype DualType (k :: Type -> Type -> Type) a b = Dual { 12 | _evalDual :: b `k` a 13 | } 14 | makeLenses ''DualType 15 | 16 | instance Category k => Category (DualType k) where 17 | type Allowed (DualType k) a = Allowed k a 18 | 19 | id = Dual id 20 | Dual g . Dual f = Dual (f . g) 21 | 22 | instance Monoidal k => Monoidal (DualType k) where 23 | Dual f `x` Dual g = Dual (f `x` g) 24 | assocL = Dual assocR 25 | assocR = Dual assocL 26 | unitorL = Dual unitorL' 27 | unitorL' = Dual unitorL 28 | swap = Dual swap 29 | 30 | instance (Cartesian k, Cocartesian k) => Cartesian (DualType k) where 31 | type AllowedCar (DualType k) a = AllowedCoCar k a 32 | 33 | exl = Dual inl 34 | exr = Dual inr 35 | dup = Dual jam 36 | counit = Dual unit 37 | 38 | instance Cartesian k => Cocartesian (DualType k) where 39 | type AllowedCoCar (DualType k) a = AllowedCar k a 40 | 41 | inl = Dual exl 42 | inr = Dual exr 43 | jam = Dual dup 44 | unit = Dual counit 45 | 46 | instance Scalable k a => Scalable (DualType k) a where 47 | scale s = Dual (scale s) -- is this okay? maybe scale 1/s? 48 | 49 | instance FloatCat k s => FloatCat (DualType k) s where 50 | expC = Dual expC 51 | 52 | instance FractCat k s => FractCat (DualType k) s where 53 | recipC = Dual recipC 54 | 55 | -------------------------------------------------------------------------------- /src/Autodiff/GAD.hs: -------------------------------------------------------------------------------- 1 | module Autodiff.GAD where 2 | 3 | import Prelude hiding (id, (.)) 4 | import Data.Kind (Type) 5 | import Control.Lens hiding ((#), para) 6 | 7 | import CategoricDefinitions 8 | import Autodiff.Additive 9 | import Autodiff.Cont 10 | import Autodiff.Dual 11 | 12 | newtype GADType (k :: Type -> Type -> Type) a b = GAD { 13 | _evalGAD :: a -> (b, a `k` b) 14 | } 15 | 16 | makeLenses ''GADType 17 | 18 | linearD :: (a -> b) -> (a `k` b) -> GADType k a b 19 | linearD f f' = GAD $ \x -> (f x, f') 20 | 21 | instance Category k => Category (GADType k) where 22 | type Allowed (GADType k) a = (Additive a, Allowed k a) 23 | 24 | id = linearD id id 25 | GAD g . GAD f = GAD $ \a -> let (b, f') = f a 26 | (c, g') = g b 27 | in (c, g' . f') 28 | 29 | instance Monoidal k => Monoidal (GADType k) where 30 | GAD f `x` GAD g = GAD $ \(a, b) -> let (c, f') = f a 31 | (d, g') = g b 32 | in ((c, d), f' `x` g') 33 | assocL = linearD assocL assocL 34 | assocR = linearD assocR assocR 35 | unitorL = linearD unitorL unitorL 36 | unitorL' = linearD unitorL' unitorL' 37 | swap = linearD swap swap 38 | 39 | instance Cartesian k => Cartesian (GADType k) where 40 | type AllowedCar (GADType k) a = AllowedCar k a 41 | 42 | exl = linearD exl exl 43 | exr = linearD exr exr 44 | dup = linearD dup dup 45 | counit = linearD counit counit 46 | 47 | instance Cocartesian k => Cocartesian (GADType k) where 48 | type AllowedCoCar (GADType k) a = (AllowedCoCar k a, Allowed (GADType k) a) -- whatever the category k allows + additive 49 | 50 | inl = linearD inlF inl 51 | inr = linearD inrF inr 52 | jam = linearD jamF jam 53 | unit = linearD unitF unit 54 | 55 | -- Can the set of these constraints be shortened? 56 | -- negateC and addC differ from paper; 2nd argument to linearD is changed so the constraint NumCat k s isn't needed anymore 57 | instance (Num s, Scalable k s, Monoidal k, Cocartesian k, 58 | Allowed k (s, s), Allowed k s, AllowedCoCar k s) => NumCat (GADType k) s where 59 | negateC = linearD negateC (scale (-1)) -- this is where this differs from SimpleAD paper 60 | addC = linearD addC jam 61 | mulC = GAD $ \(a, b) -> (a * b, scale b \/ scale a) -- most of the instance constraints come from \/ 62 | increaseC a = linearD (increaseC a) id 63 | 64 | instance (Floating s, FloatCat k s, Scalable k s) => FloatCat (GADType k) s where 65 | expC = GAD $ \a -> let e = exp a 66 | in (e, scale e) 67 | 68 | instance (Fractional s, FractCat k s, Scalable k s) => FractCat (GADType k) s where 69 | recipC = GAD $ \a -> let r = recip a 70 | in (r, scale (-r*r)) 71 | -------------------------------------------------------------------------------- /src/CategoricDefinitions.hs: -------------------------------------------------------------------------------- 1 | module CategoricDefinitions where 2 | 3 | import Prelude hiding (id, (.)) 4 | import qualified Prelude as P 5 | import GHC.Exts 6 | import Data.Kind (Type) 7 | import Data.NumInstances.Tuple 8 | 9 | import Numeric.LinearAlgebra.Array 10 | import Numeric.LinearAlgebra.Array.Util 11 | 12 | {- 13 | Unfortunately, there is a known bug which prevents quantifying constraints involving type families. 14 | https://gitlab.haskell.org/ghc/ghc/issues/14860 15 | This results in all the functions below having a list of constraints AllAllowed k [...] 16 | -} 17 | 18 | -- Standard haskell way to define a category 19 | class Category (k :: Type -> Type -> Type) where 20 | type Allowed k a :: Constraint 21 | type Allowed k a = () 22 | 23 | id :: Allowed k a => a `k` a 24 | (.) :: AllAllowed k [a, b, c] => b `k` c -> a `k` b -> a `k` c 25 | 26 | -- By monoidal here we mean symmetric monoidal category 27 | class Category k => Monoidal (k :: Type -> Type -> Type) where 28 | -- unit object in haskell is () 29 | x :: AllAllowed k [a, b, c, d, (a, b), (c, d)] 30 | => (a `k` c) -> (b `k` d) -> ((a, b) `k` (c, d)) 31 | assocL :: AllAllowed k [a, b, c, (a, b), ((a, b), c), (b, c), (a, (b, c))] 32 | => ((a, b), c) `k` (a, (b, c)) 33 | assocR :: AllAllowed k [a, b, c, (a, b), ((a, b), c), (b, c), (a, (b, c))] 34 | => (a, (b, c)) `k` ((a, b), c) 35 | unitorL :: Allowed k a 36 | => ((), a) `k` a 37 | unitorL' :: Allowed k a -- inverse of unitorL 38 | => a `k` ((), a) 39 | -- could also potentially add right unitor and their inverses 40 | swap :: AllAllowed k [a, b, (a, b), (b, a)] 41 | => (a, b) `k` (b, a) 42 | 43 | class Monoidal k => Cartesian k where 44 | type AllowedCar k a :: Constraint 45 | type AllowedCar k a = () 46 | 47 | exl :: AllowedCar k b => (a, b) `k` a 48 | exr :: AllowedCar k a => (a, b) `k` b 49 | dup :: AllowedCar k a => a `k` (a, a) 50 | counit :: AllowedCar k a => a `k` () 51 | 52 | class Category k => Cocartesian k where 53 | type AllowedCoCar k a :: Constraint 54 | type AllowedCoCar k a = Allowed k a 55 | 56 | inl :: AllowedCoCar k b => a `k` (a, b) 57 | inr :: AllowedCoCar k a => b `k` (a, b) 58 | jam :: AllowedCoCar k a => (a, a) `k` a 59 | unit :: AllowedCoCar k a => () `k` a 60 | 61 | {- 62 | This is a hacky way of modelling a weak 2-category which is needed for Para. 63 | Notice the tick' after class name 64 | (.*) corresponds to id 65 | (.-) corresponds to . (sequential comp) 66 | (.|) corresponds to `x` (parallel comp) 67 | 68 | Ideally, we'd like to specify in code that given any symmetric monoidal category C we can construct _another_ symmetric monoidal category called Para(C), but sometimes dreams will have to be dreams. 69 | -} 70 | 71 | class Category' (k :: Type -> Type -> Type -> Type) where 72 | type Allowed' k a :: Constraint 73 | type Allowed' k a = () 74 | 75 | (.*) :: (Allowed' k a) => k () a a 76 | (.-) :: AllAllowed' k [p, q, a, b, c, (q, p), (p, q), ((p, q), a), ((q, p), a), (q, (p, a)), (q, b), (p, a)] 77 | => k q b c -> k p a b -> k (p, q) a c 78 | 79 | -- the constraints are unfortunately very ugly 80 | class Category' k => Monoidal' (k :: Type -> Type -> Type -> Type) where 81 | (.|) :: AllAllowed' k [a, b, c, d, p, q, ((p, q), (a, b)), ((p, a), (q, b)), (c, d), (p, a), (q, b), (p, (q, (a, b))), (p, ((q, a), b)), (p, ((a, q), b)), (p, (a, (q, b))), (a, (q, b)), ((a, q), b), (a, q), ((q, a), b), (q, a), (q, (a, b)), (a, b), (p, q)] 82 | => k p a c -> k q b d -> k (p, q) (a, b) (c, d) 83 | 84 | -- Sequential composition of parametrized functions 85 | (.--) :: (Monoidal k, _) 86 | => (q, b) `k` c 87 | -> (p, a) `k` b 88 | -> ((p, q), a) `k` c 89 | g .-- f = g . (id `x` f) . assocL . (swap `x` id) 90 | 91 | -- Parallel composition of parametrized functions 92 | (.||) :: (Monoidal k, _) 93 | => (p, a) `k` b 94 | -> (q, c) `k` d 95 | -> ((p, q), (a, c)) `k` (b, d) 96 | f .|| g = f `x` g . swapParam 97 | 98 | 99 | {- 100 | Swap map for monoidal product of parametrized functions, basically bracket bookkeeping. 101 | Read from top to bottom 102 | (a b) (c d) 103 | a (b, (c, d)) 104 | a ((b, c), d) 105 | a ((c, b), d) 106 | a (c, (b, d)) 107 | (a c) (b d) 108 | -} 109 | swapParam :: (Monoidal k, _) => ((a, b), (c, d)) `k` ((a, c), (b, d)) 110 | swapParam = assocR . (id `x` assocL) . (id `x` (swap `x` id)) . (id `x` assocR) . assocL 111 | 112 | 113 | -------------------------------------- 114 | 115 | class Additive a where 116 | zero :: a 117 | (^+) :: a -> a -> a 118 | 119 | class NumCat (k :: Type -> Type -> Type) a where 120 | negateC :: a `k` a 121 | addC :: (a, a) `k` a 122 | mulC :: (a, a) `k` a 123 | increaseC :: a -> a `k` a -- curried add, add a single number 124 | 125 | class FloatCat (k :: Type -> Type -> Type) a where 126 | expC :: a `k` a 127 | 128 | class FractCat (k :: Type -> Type -> Type) a where 129 | recipC :: a `k` a 130 | 131 | class Scalable (k :: Type -> Type -> Type) a where 132 | scale :: a -> (a `k` a) 133 | 134 | type Tensor = NArray None Double 135 | 136 | ------------------------------------- 137 | -- Instances 138 | ------------------------------------- 139 | 140 | instance Category (->) where 141 | id = \a -> a 142 | g . f = \a -> g (f a) 143 | 144 | instance Monoidal (->) where 145 | f `x` g = \(a, b) -> (f a, g b) 146 | assocL = \((a, b), c) -> (a, (b, c)) 147 | assocR = \(a, (b, c)) -> ((a, b), c) 148 | unitorL = \((), a) -> a 149 | unitorL' = \a -> ((), a) 150 | swap = \(a, b) -> (b, a) 151 | 152 | instance Cartesian (->) where 153 | exl = \(a, _) -> a 154 | exr = \(_, b) -> b 155 | dup = \a -> (a, a) 156 | counit = \_ -> () 157 | 158 | instance Num a => NumCat (->) a where 159 | negateC = negate 160 | addC = uncurry (+) 161 | mulC = uncurry (*) 162 | increaseC a = (+a) 163 | 164 | instance Floating a => FloatCat (->) a where 165 | expC = exp 166 | 167 | instance Fractional a => FractCat (->) a where 168 | recipC = recip 169 | 170 | ------------------------------------- 171 | 172 | instance Additive () where 173 | zero = () 174 | () ^+ () = () 175 | 176 | instance {-# OVERLAPPABLE #-} Num a => Additive a where 177 | zero = 0 178 | (^+) = (+) 179 | 180 | instance (Additive a, Additive b) => Additive (a, b) where 181 | zero = (zero, zero) 182 | (a1, b1) ^+ (a2, b2) = (a1 ^+ a2, b1 ^+ b2) 183 | 184 | 185 | ------------------------------------- 186 | 187 | (/\) :: (Cartesian k, _) => b `k` c -> b `k` d -> b `k` (c, d) 188 | f /\ g = (f `x` g) . dup 189 | 190 | (\/) :: (Monoidal k, Cocartesian k, _) => a `k` c -> b `k` c -> (a, b) `k` c 191 | f \/ g = jam . (f `x` g) 192 | 193 | fork :: (Cartesian k, _) => (b `k` c, b `k` d) -> b `k` (c, d) 194 | fork (f, g) = f /\ g 195 | 196 | unfork :: (Cartesian k, _) => b `k` (c, d) -> (b `k` c, b `k` d) 197 | unfork h = (exl . h, exr . h) 198 | 199 | join :: (Monoidal k, Cocartesian k, _) => (a `k` c, b `k` c) -> (a, b) `k` c 200 | join (f, g) = f \/ g 201 | 202 | unjoin :: (Cocartesian k, _) => (a, b) `k` c -> (a `k` c, b `k` c) 203 | unjoin h = (h . inl, h . inr) 204 | 205 | divide :: (Monoidal k, FractCat k a, _) => k (a, a) a 206 | divide = mulC . (id `x` recipC) 207 | ------------------------------------- 208 | 209 | type family AllAllowed k xs :: Constraint where 210 | AllAllowed k '[] = () 211 | AllAllowed k (x : xs) = (Allowed k x, AllAllowed k xs) 212 | 213 | type family AllAllowed' k xs :: Constraint where 214 | AllAllowed' k '[] = () 215 | AllAllowed' k (x : xs) = (Allowed' k x, AllAllowed' k xs) 216 | -------------------------------------------------------------------------------- /src/Examples.hs: -------------------------------------------------------------------------------- 1 | module Examples where 2 | 3 | import Control.Lens hiding (para) 4 | import Prelude hiding (id, (.)) 5 | import System.Random 6 | import Control.Monad 7 | 8 | import Numeric.LinearAlgebra.Array 9 | import Numeric.LinearAlgebra.Array.Util 10 | 11 | import CategoricDefinitions 12 | import Autodiff.GAD 13 | import Autodiff.D 14 | import Ops 15 | import Para 16 | import TrainUtils 17 | import TensorUtils 18 | import OnesLike 19 | 20 | {- 21 | f (p, q) a = (p + q*a) 22 | -} 23 | linRegFn :: (Additive a, Num a) => DType ((a, a), a) a 24 | linRegFn = addC . (id `x` mulC) . assocL 25 | 26 | -- returns (inp, out) pair we want to learn 27 | sampleData :: IO (Double, Double) 28 | sampleData = do 29 | x <- randomIO :: IO Double 30 | return (x, 3 + 7*x) 31 | 32 | run :: IO () 33 | run = do 34 | initialParams <- randomIO :: IO (Double, Double) 35 | let initialLearner = Learner initialParams (Para linRegFn) sgd 36 | sampler = zip3 [0..] (repeat sampleData) (repeat sqDiff) 37 | 38 | finalLearner <- foldM trainStepWithCost initialLearner (take 10000 sampler) 39 | putStrLn $ "Starting network parameters:\n" ++ show initialParams 40 | putStrLn $ "Final network parameters:\n" ++ show (finalLearner ^. p) 41 | 42 | return () 43 | 44 | 45 | sampleDataTensor :: IO (Tensor, Tensor) 46 | sampleDataTensor = do 47 | d <- randomTensor [5, 3] "bf" 48 | return (d, 3 + 7 * d) 49 | 50 | -- multiply two arrays you get and sum the "b" axis 51 | l :: _ => ParaDType (Tensor, Tensor) Tensor Tensor 52 | l = Para $ sumAxes "b" . linRegFn 53 | 54 | run1 :: IO _ 55 | run1 = do 56 | p1 <- randomTensor [3, 2] "fo" 57 | p2 <- randomTensor [2] "o" 58 | let initialLearner = Learner (p1, p2) l sgd 59 | sampler = zip3 [0..] (repeat sampleDataTensor) (repeat (sumAxes "b" . sqDiff)) 60 | 61 | 62 | finalLearner <- foldM trainStepWithCost initialLearner (take 1 sampler) 63 | putStrLn $ "Starting network parameters:\n" ++ arrShow (p1, p2) 64 | putStrLn $ "Final network parameters:\n" ++ arrShow (finalLearner ^. p) 65 | 66 | return finalLearner 67 | -------------------------------------------------------------------------------- /src/OnesLike.hs: -------------------------------------------------------------------------------- 1 | module OnesLike where 2 | 3 | import CategoricDefinitions 4 | import Numeric.LinearAlgebra.Array 5 | import Numeric.LinearAlgebra.Array.Util 6 | 7 | -- this is all done at runtime, unfortunately 8 | class OnesLike a where 9 | onesLike :: a -> a 10 | 11 | instance OnesLike () where 12 | onesLike _ = () 13 | 14 | instance {-# OVERLAPPABLE #-} Num a => OnesLike a where 15 | onesLike _ = 1 16 | 17 | instance {-# OVERLAPPING #-} OnesLike Tensor where 18 | -- Assumes single-letter index names 19 | onesLike c = let d = map iDim $ dims c 20 | ch = concatMap iName $ dims c 21 | in d # repeat 1 ! ch 22 | 23 | ds # cs = listArray ds cs :: Array Double 24 | -------------------------------------------------------------------------------- /src/Ops.hs: -------------------------------------------------------------------------------- 1 | module Ops where 2 | 3 | import Prelude hiding ((.), id) 4 | import Autodiff.D 5 | import Autodiff.GAD 6 | import Autodiff.Additive 7 | import Autodiff.Dual 8 | import CategoricDefinitions 9 | import OnesLike 10 | 11 | sigmoid :: (OnesLike a, Additive a, Floating a) => DType a a 12 | sigmoid = recipC . increaseC 1 . expC . negateC 13 | 14 | relu :: DType Double Double 15 | relu = D $ GAD $ \a -> let b = if a < 0 then 0 else 1 16 | in (b*a, Dual $ AddFun (*b)) 17 | 18 | sgd :: Fractional p => (p, p) -> p 19 | sgd (p, pGrad) = p - 0.1 * pGrad 20 | 21 | sqDiff :: (OnesLike a, Additive a, Num a) => DType (a, a) a 22 | sqDiff = mulC . dup . (id \/ negateC) 23 | 24 | 25 | {- 26 | Some random function: 27 | 28 | e^a + a * b 29 | | 30 | / \ jam 31 | | | 32 | | / \ exp `x` mulC 33 | | (| |) assocL 34 | (| |) | 35 | ( \/ ) | dup `x` id 36 | ( | ) | 37 | a b 38 | -} 39 | myf :: (OnesLike a, Additive a, Floating a) => DType (a, a) a 40 | myf = jam . (expC `x` mulC) . assocL . (dup `x` id) 41 | -------------------------------------------------------------------------------- /src/Para.hs: -------------------------------------------------------------------------------- 1 | module Para where 2 | 3 | import Prelude hiding (id, (.)) 4 | import Control.Lens hiding ((#), para) 5 | import GHC.Exts (Constraint) 6 | import Data.Kind (Type) 7 | 8 | import CategoricDefinitions 9 | import Autodiff.GAD 10 | import Autodiff.Additive 11 | import Autodiff.Dual 12 | import Autodiff.D 13 | import TensorUtils 14 | import OnesLike 15 | import AsymmetricLens 16 | 17 | ------------------------------------------------------------------- 18 | 19 | newtype ParaType (k :: Type -> Type -> Type) p a b = Para { 20 | _fn :: (p, a) `k` b 21 | } 22 | makeLenses ''ParaType 23 | 24 | instance Monoidal k => Category' (ParaType k) where 25 | type Allowed' (ParaType k) a = (Allowed k a) 26 | (.*) = Para unitorL 27 | (Para g) .- (Para f) = Para (g .-- f) 28 | 29 | instance Monoidal k => Monoidal' (ParaType k) where 30 | (Para f) .| (Para g) = Para (f .|| g) 31 | 32 | ------------------------------------------------------------------- 33 | 34 | type ParaDType = ParaType DType 35 | 36 | -- type ParaLType = ParaType (AsymmetricLens DType) 37 | 38 | data LearnerType p a b = Learner { 39 | _p :: p, 40 | _para :: ParaDType p a b, 41 | _optimizer :: (p, p) -> p 42 | } 43 | makeLenses ''LearnerType 44 | 45 | instance Category' LearnerType where 46 | type Allowed' LearnerType a = Allowed' ParaDType a 47 | 48 | (.*) = Learner () (.*) snd 49 | (Learner p2 f2 o2) .- (Learner p1 f1 o1) = 50 | Learner (p1, p2) (f2 .- f1) (o1 `x` o2 . swapParam) 51 | 52 | instance Monoidal' LearnerType where 53 | (Learner p1 f1 o1) .| (Learner p2 f2 o2) 54 | = Learner (p1, p2) (f1 .| f2) (o1 `x` o2 . swapParam) 55 | -------------------------------------------------------------------------------- /src/TensorUtils.hs: -------------------------------------------------------------------------------- 1 | module TensorUtils where 2 | 3 | import Prelude hiding ((.), id) 4 | import System.Random 5 | import Control.Monad 6 | 7 | import Numeric.LinearAlgebra.Array 8 | import Numeric.LinearAlgebra.Array.Util 9 | 10 | import CategoricDefinitions 11 | import Autodiff.GAD 12 | import Autodiff.D 13 | import Autodiff.Dual 14 | import Autodiff.Additive 15 | 16 | import OnesLike 17 | 18 | 19 | {- 20 | All tensor stuff is pretty much ad-hoc, a complete rewrite is eventually needed 21 | -} 22 | 23 | class ArrShow a where 24 | arrShow :: a -> String 25 | 26 | instance {-# OVERLAPPABLE #-} Show a => ArrShow a where 27 | arrShow = show 28 | 29 | instance ArrShow Tensor where 30 | arrShow = formatFixed 2 31 | 32 | instance (ArrShow a, ArrShow b) => ArrShow (a, b) where 33 | arrShow (a, b) = let ls = replicate 10 '-' ++ "\n" 34 | in ls ++ arrShow a ++ ", \n" ++ arrShow b ++ "\n" ++ ls 35 | 36 | 37 | sh :: ArrShow a => a -> IO () 38 | sh a = putStr $ arrShow a ++ "\n" 39 | 40 | infixl 8 −| 41 | --(−|) :: Name → [Array Double ] → Array Double 42 | (−|) = index 43 | 44 | 45 | axes :: Tensor -> String -> (String, [Int]) 46 | axes t axesNames = let toSum = filter (\idx -> head (iName idx) `elem` axesNames) (dims t) 47 | in (map (head . iName) toSum, map iDim toSum) 48 | 49 | 50 | class TensorContractable k where 51 | sumAxes :: String -> Tensor `k` Tensor 52 | 53 | instance TensorContractable (->) where 54 | sumAxes axesNames = \t -> let (names, lengths) = axes t axesNames 55 | u = lengths # repeat 1 ! names 56 | in t*u 57 | 58 | 59 | instance (Scalable k Tensor, TensorContractable k) 60 | => TensorContractable (GADType k) where 61 | sumAxes axesNames = GAD $ \t -> let (names, lengths) = axes t axesNames 62 | u = lengths # repeat 1 ! names 63 | in (t*u, scale u) 64 | 65 | instance (TensorContractable k) => TensorContractable (DualType k) where 66 | sumAxes axesNames = Dual (sumAxes axesNames) 67 | 68 | instance TensorContractable DType where 69 | sumAxes axesNames = D (sumAxes axesNames) 70 | 71 | instance TensorContractable (->+) where 72 | sumAxes axesNames = AddFun (sumAxes axesNames) 73 | 74 | --meanAxes :: _ => String -> Tensor -> Tensor 75 | --meanAxes axesNames t = let (names, lengths) = axes t axesNames 76 | -- n = product lengths 77 | -- v = scalar . fromIntegral $ n 78 | -- in divide (sumAxes axesNames t, v) 79 | 80 | -- rand array with given shape and axis names 81 | randomTensor :: [Int] -> String -> IO Tensor 82 | randomTensor xs cs = do 83 | rs <- replicateM (product xs) randomIO 84 | return $ xs # rs ! cs 85 | -------------------------------------------------------------------------------- /src/TrainUtils.hs: -------------------------------------------------------------------------------- 1 | module TrainUtils where 2 | 3 | import Control.Lens hiding (para) 4 | import System.Random 5 | import Control.Monad 6 | import Prelude hiding (id, (.)) 7 | import Numeric.LinearAlgebra.Array 8 | import Numeric.LinearAlgebra.Array.Util 9 | 10 | import CategoricDefinitions 11 | import Autodiff.GAD 12 | import Autodiff.Dual 13 | import Autodiff.Additive 14 | import Autodiff.D 15 | import Autodiff.Cont 16 | import Ops 17 | import Para 18 | import TensorUtils 19 | import OnesLike 20 | 21 | ------------------------------------------- 22 | showNNInfo :: (ArrShow p, ArrShow a, ArrShow b) 23 | => Int -> a -> b -> LearnerType p a b -> IO () 24 | showNNInfo n a b nn = do 25 | putStrLn "-------------------------" 26 | putStrLn $ "Step " ++ arrShow n 27 | putStrLn $ "p\n" ++ arrShow (nn ^. p) 28 | putStrLn $ "a\n" ++ arrShow a 29 | putStrLn $ "a\n" ++ arrShow b 30 | 31 | 32 | -- | Supervised learning training 33 | -- Takes in a Learner, input-output pairs and a cost function 34 | -- it partially applies the output to the cost function and composes the result inside learner 35 | trainStepWithCost :: (OnesLike c, _) 36 | => LearnerType p a b -> (Int, IO (a, b), DType (b, b) c) -> IO (LearnerType p a b) 37 | trainStepWithCost l (step, dataSampler, cost) = do 38 | (i, o) <- dataSampler 39 | when (step `mod` 100 == 0) $ showNNInfo step i o l 40 | let cost' = partiallyApply cost o 41 | (pGrad, _) = grad (cost' . (l ^. para . fn)) (l ^. p, i) 42 | return $ l & p .~ (l ^. optimizer) (l ^. p, pGrad) 43 | 44 | 45 | instance (Random a, Random b) => Random (a, b) where 46 | random gen1 = let (x, gen2) = random gen1 47 | (y, gen3) = random gen2 48 | in ((x, y), gen3) 49 | 50 | randomR ((x1, y1), (x2, y2)) gen1 = let (x, gen2) = randomR (x1, x2) gen1 51 | (y, gen3) = randomR (y1, y2) gen2 52 | in ((x, y), gen3) 53 | -------------------------------------------------------------------------------- /stack.yaml: -------------------------------------------------------------------------------- 1 | # This file was automatically generated by 'stack init' 2 | # 3 | # Some commonly used options have been documented as comments in this file. 4 | # For advanced use and comprehensive documentation of the format, please see: 5 | # https://docs.haskellstack.org/en/stable/yaml_configuration/ 6 | 7 | # Resolver to choose a 'specific' stackage snapshot or a compiler version. 8 | # A snapshot resolver dictates the compiler version and the set of packages 9 | # to be used for project dependencies. For example: 10 | # 11 | # resolver: lts-3.5 12 | # resolver: nightly-2015-09-21 13 | # resolver: ghc-7.10.2 14 | # resolver: ghcjs-0.1.0_ghc-7.10.2 15 | # 16 | # The location of a snapshot can be provided as a file or url. Stack assumes 17 | # a snapshot provided as a file might change, whereas a url resource does not. 18 | # 19 | # resolver: ./custom-snapshot.yaml 20 | # resolver: https://example.com/snapshots/2018-01-01.yaml 21 | resolver: lts-13.13 22 | 23 | # User packages to be built. 24 | # Various formats can be used as shown in the example below. 25 | # 26 | # packages: 27 | # - some-directory 28 | # - https://example.com/foo/bar/baz-0.0.2.tar.gz 29 | # - location: 30 | # git: https://github.com/commercialhaskell/stack.git 31 | # commit: e7b331f14bcffb8367cd58fbfc8b40ec7642100a 32 | # - location: https://github.com/commercialhaskell/stack/commit/e7b331f14bcffb8367cd58fbfc8b40ec7642100a 33 | # subdirs: 34 | # - auto-update 35 | # - wai 36 | packages: 37 | - . 38 | # Dependency packages to be pulled from upstream that are not in the resolver 39 | # using the same syntax as the packages field. 40 | # (e.g., acme-missiles-0.3) 41 | extra-deps: 42 | - '../hTensor' 43 | - '../tensor' 44 | - tensors-0.1.0 45 | 46 | # Override default flag values for local packages and extra-deps 47 | # flags: {} 48 | 49 | # Extra package databases containing global packages 50 | # extra-package-dbs: [] 51 | 52 | # Control whether we use the GHC we find on the path 53 | # system-ghc: true 54 | # 55 | # Require a specific version of stack, using version ranges 56 | # require-stack-version: -any # Default 57 | # require-stack-version: ">=1.7" 58 | # 59 | # Override the architecture used by stack, especially useful on Windows 60 | # arch: i386 61 | # arch: x86_64 62 | # 63 | # Extra directories used by stack for building 64 | # extra-include-dirs: [/path/to/dir] 65 | # extra-lib-dirs: [/path/to/dir] 66 | # 67 | # Allow a newer minor version of GHC than the snapshot specifies 68 | # compiler-check: newer-minor 69 | -------------------------------------------------------------------------------- /test/Spec.hs: -------------------------------------------------------------------------------- 1 | main :: IO () 2 | main = putStrLn "Test suite not yet implemented" 3 | --------------------------------------------------------------------------------