├── src ├── par-transformers │ ├── LICENSE │ ├── tests │ │ └── Main.hs │ ├── Control │ │ └── Par │ │ │ └── ST │ │ │ ├── UVec2.hs │ │ │ ├── StorableVec2.hs │ │ │ └── Vec2.hs │ └── par-transformers.cabal ├── lvish-apps │ ├── graphs │ ├── pbbs │ │ ├── benchmarks │ │ │ └── graphs │ │ │ │ └── gen_chains_graph.hs │ │ ├── LICENSE │ │ └── pbbs-haskell.cabal │ ├── cfa │ │ ├── Makefile.temp │ │ ├── k-cfa-lvish-example.cabal │ │ └── LICENSE │ ├── Makefile │ └── run-lvish-benchmarks.cabal ├── par-collections │ ├── tests │ │ ├── TestHelpers.hs │ │ ├── Dummy.hs │ │ └── test-par-collections.cabal │ ├── Data │ │ ├── Par.hs │ │ └── Par │ │ │ ├── Set.hs │ │ │ ├── Map.hs │ │ │ └── Traversable.hs │ ├── default.nix │ ├── LICENSE │ └── par-collections.cabal ├── par-schedulers │ ├── Setup.hs │ ├── par-schedulers.cabal │ └── Control │ │ └── Par │ │ └── Scheds │ │ ├── Sparks.hs │ │ ├── Trace.hs │ │ └── Direct.hs ├── lvish-graph-algorithms │ ├── Setup.hs │ ├── lvish-graph-algorithms.cabal │ └── src │ │ └── Data │ │ └── LVar │ │ └── Graph │ │ └── MSF.hs ├── par-mergesort │ ├── cbits │ │ ├── sort_int64.c │ │ └── sort_int32.c │ ├── par-mergesort.cabal │ ├── bench │ │ └── Main.hs │ └── Control │ │ └── Par │ │ └── MergeSort.hs ├── lvish │ ├── Control │ │ └── LVish │ │ │ ├── Internal │ │ │ ├── Unsafe.hs │ │ │ └── Types.hs │ │ │ └── DeepFrz.hs │ ├── Data │ │ ├── Concurrent │ │ │ ├── Counter.hs │ │ │ ├── AlignedIORef.hs │ │ │ └── Bag.hs │ │ ├── UtilInternal.hs │ │ └── LVar │ │ │ ├── Pair.hs │ │ │ ├── Future.hs │ │ │ └── Generic.hs │ ├── extract_codedoc_examples.sh │ ├── default.nix │ ├── tests │ │ ├── Makefile │ │ ├── CtrieMapTests.hs │ │ ├── Main.hs │ │ ├── ThreadTest.hs │ │ ├── SLMapTests.hs │ │ ├── GenericTests.hs │ │ ├── LogicalTests.hs │ │ ├── CommonMapWriteTests.hs │ │ ├── PureMapTests.hs │ │ └── BulkRetryTests.hs │ ├── Util │ │ └── Makefile │ ├── LICENSE │ ├── README.md │ └── TODO.md ├── par-classes │ ├── default.nix │ ├── LICENSE │ ├── Data │ │ └── Splittable │ │ │ └── Class.hs │ └── par-classes.cabal └── lvish-extra │ ├── tests │ ├── Main.hs │ ├── PNCounterTests.hs │ ├── MaxPosIntTests.hs │ ├── SatMapTests.hs │ ├── LayeredSatMapTests.hs │ ├── CommonMapWriteTests.hs │ ├── MemoTests.hs │ └── AddRemoveSetTests.hs │ ├── stack.yaml │ ├── Data │ └── LVar │ │ ├── NatArray │ │ └── Unsafe.hs │ │ ├── MaxPosInt.hs │ │ ├── PNCounter.hs │ │ ├── AddRemoveSet.hs │ │ └── Memo.hs │ ├── LICENSE │ └── Experimental │ ├── Scrap.hs │ └── Monotonic.hs ├── archived_old └── fhpc13-lvars │ ├── benchmarks │ ├── data │ │ ├── bf_traverse_benchmark_data.png │ │ ├── README.md │ │ ├── makegraph.py │ │ └── bf_traverse_benchmark_data.csv │ ├── benchmark.sh │ ├── bf-traverse-monad-par │ │ ├── bf-traverse-monad-par.cabal │ │ └── bf-traverse-monad-par.hs │ ├── bf-traverse-LVar │ │ ├── bf-traverse-LVar.cabal │ │ └── bf-traverse-LVar.hs │ ├── bf-traverse-Strategies │ │ ├── bf-traverse-Strategies.cabal │ │ └── bf-traverse-Strategies.hs │ └── Makefile │ ├── Common.hs │ ├── Data │ └── LVar │ │ ├── PairPure.hs │ │ ├── PairIO.hs │ │ ├── PairScalable.hs │ │ ├── SetIO.hs │ │ ├── SetScalable.hs │ │ └── SetPure.hs │ └── fhpc13-lvars.cabal ├── .gitmodules ├── .gitignore ├── .travis_install.sh ├── stack.yaml ├── README.md ├── .jenkins_script.sh ├── .travis.yml ├── stack-cnf.yaml └── .run_benchmarks.sh /src/par-transformers/LICENSE: -------------------------------------------------------------------------------- 1 | ../lvish/LICENSE -------------------------------------------------------------------------------- /src/lvish-apps/graphs: -------------------------------------------------------------------------------- 1 | ../pbbs-haskell/benchmarks/graphs/ -------------------------------------------------------------------------------- /src/par-collections/tests/TestHelpers.hs: -------------------------------------------------------------------------------- 1 | ../../lvish/tests/TestHelpers.hs -------------------------------------------------------------------------------- /src/par-schedulers/Setup.hs: -------------------------------------------------------------------------------- 1 | import Distribution.Simple 2 | main = defaultMain 3 | -------------------------------------------------------------------------------- /src/lvish-graph-algorithms/Setup.hs: -------------------------------------------------------------------------------- 1 | import Distribution.Simple 2 | main = defaultMain 3 | -------------------------------------------------------------------------------- /src/par-collections/tests/Dummy.hs: -------------------------------------------------------------------------------- 1 | module Main where 2 | 3 | main :: IO () 4 | main = return () 5 | -------------------------------------------------------------------------------- /src/par-mergesort/cbits/sort_int64.c: -------------------------------------------------------------------------------- 1 | #include 2 | 3 | #define ELM int64_t 4 | #define ELM_SUFFIX(tok) tok ## _int64 5 | 6 | #include "./seqcore.c" 7 | -------------------------------------------------------------------------------- /archived_old/fhpc13-lvars/benchmarks/data/bf_traverse_benchmark_data.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/iu-parfunc/lvars/HEAD/archived_old/fhpc13-lvars/benchmarks/data/bf_traverse_benchmark_data.png -------------------------------------------------------------------------------- /src/par-mergesort/cbits/sort_int32.c: -------------------------------------------------------------------------------- 1 | #include 2 | 3 | // typedef int32_t ELM; 4 | 5 | #define ELM int32_t 6 | #define ELM_SUFFIX(tok) tok ## _int32 7 | 8 | #include "./seqcore.c" 9 | -------------------------------------------------------------------------------- /.gitmodules: -------------------------------------------------------------------------------- 1 | [submodule "haskell/concurrent-skiplist"] 2 | path = deps/concurrent-skiplist 3 | url = git@github.com:rrnewton/concurrent-skiplist.git 4 | [submodule "haskell/ctrie"] 5 | path = deps/ctrie 6 | url = git@github.com:iu-parfunc/ctrie.git 7 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | redex/LVish/compiled/ 2 | dist 3 | cabal-dev 4 | docs_examples 5 | *.o 6 | *.hi 7 | *.chi 8 | *.chs.h 9 | .DS_Store 10 | *~ 11 | *cabal.sandbox.config 12 | .cabal-sandbox 13 | test-results.xml 14 | tags 15 | *.stack-work 16 | untracked_junk 17 | -------------------------------------------------------------------------------- /src/lvish-apps/pbbs/benchmarks/graphs/gen_chains_graph.hs: -------------------------------------------------------------------------------- 1 | 2 | import System.Environment (getArgs) 3 | import Control.LVish 4 | 5 | main = do 6 | [arg] <- getArgs 7 | let n = read arg 8 | 9 | putStrLn "AdjacencyGraph" 10 | print n 11 | print (n-1) 12 | 13 | for_ (0,n) $ \ ix -> print ix 14 | for_ (0,n-1) $ \ ix -> print (ix+1) 15 | 16 | return () 17 | -------------------------------------------------------------------------------- /src/par-transformers/tests/Main.hs: -------------------------------------------------------------------------------- 1 | module Main where 2 | 3 | import qualified CancelTests 4 | import qualified STTests 5 | import Test.Tasty 6 | 7 | -------------------------------------------------------------------------------- 8 | 9 | main :: IO () 10 | main = defaultMain alltests 11 | 12 | alltests :: TestTree 13 | alltests = testGroup "par-transformers tests" 14 | [ STTests.tests 15 | , CancelTests.tests 16 | ] 17 | -------------------------------------------------------------------------------- /src/lvish/Control/LVish/Internal/Unsafe.hs: -------------------------------------------------------------------------------- 1 | 2 | -- | For debugging purposes, it can be useful to lift an IO computation into an LVish @Par@ monad. 3 | -- 4 | -- This module is imported for instances only (specifically, the `MonadIO` instance). 5 | 6 | {-# OPTIONS_GHC -fno-warn-orphans #-} 7 | 8 | module Control.LVish.Internal.Unsafe() where 9 | 10 | import Control.LVish.Internal 11 | import Control.Monad.IO.Class 12 | import qualified Control.LVish.Internal.SchedIdempotent as L 13 | 14 | instance MonadIO (Par e s) where 15 | liftIO = WrapPar . L.liftIO 16 | -------------------------------------------------------------------------------- /src/lvish/Data/Concurrent/Counter.hs: -------------------------------------------------------------------------------- 1 | -- | A simple, non-scalable counter. 2 | 3 | module Data.Concurrent.Counter(Counter, new, inc, dec, poll) where 4 | 5 | import Data.IORef 6 | 7 | type Counter = IORef Int 8 | 9 | new :: IO Counter 10 | new = newIORef 0 11 | 12 | -- TODO: at least switch to use fetch-and-add... 13 | inc :: Counter -> IO () 14 | inc c = atomicModifyIORef' c $ \n -> (n+1,()) 15 | 16 | dec :: Counter -> IO () 17 | dec c = atomicModifyIORef' c $ \n -> (n-1,()) 18 | 19 | -- | Is the counter (transiently) zero? 20 | poll :: Counter -> IO Bool 21 | poll c = do 22 | n <- readIORef c 23 | return (n == 0) 24 | -------------------------------------------------------------------------------- /src/par-collections/Data/Par.hs: -------------------------------------------------------------------------------- 1 | 2 | -- | Convenience module that reexports various parallel combinators. 3 | 4 | module Data.Par 5 | ( 6 | -- * Parallle consmuption of /O(1)/-splittable data 7 | module Data.Par.Splittable, 8 | -- * Ranges of numbers, aka iteration spaces 9 | module Data.Par.Range, 10 | -- * Basic per-element parallelism for Traversables 11 | module Data.Par.Traversable 12 | ) 13 | where 14 | 15 | 16 | import Data.Par.Traversable 17 | import Data.Par.Splittable 18 | import Data.Par.Range hiding (pmapReduce, pmapReduce_) 19 | -------------------------------------------------------------------------------- /archived_old/fhpc13-lvars/benchmarks/benchmark.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Just a utility script to handle building and running the benchmarks. 4 | 5 | # LK: TODO: figure out if HSBencher needs/uses the TRIALS env var. 6 | 7 | set -e 8 | 9 | make rand_data 10 | 11 | if [ $(hostname) = "hive.soic.indiana.edu" ]; 12 | then 13 | lock_for_experiments 1000 14 | export TRIALS=5 15 | fi 16 | 17 | if [ $(hostname) = "lenny" || $(hostname) = "landin.local" ]; # Lindsey's machines 18 | then 19 | export TRIALS=1 20 | fi 21 | 22 | make benchmark.run 23 | ./benchmark.run 24 | 25 | if [ $(hostname) = "hive.soic.indiana.edu" ]; 26 | then 27 | unlock_for_experiments 28 | fi 29 | 30 | -------------------------------------------------------------------------------- /src/par-classes/default.nix: -------------------------------------------------------------------------------- 1 | # This file was auto-generated by cabal2nix. Please do NOT edit manually! 2 | 3 | { haskellPackages ? (import {}).haskellPackages 4 | }: 5 | 6 | with haskellPackages; 7 | cabal.mkDerivation (self: { 8 | pname = "par-classes"; 9 | version = "1.1"; 10 | src = builtins.filterSource 11 | (path: (type: baseNameOf path != ".git")) 12 | ./.; 13 | sha256 = "1yjqhym8n2ycavzhcqvywwav3r2hsjadidkwyvz4pdhn5q138aap"; 14 | buildDepends = [ deepseq ]; 15 | meta = { 16 | description = "Type classes providing a general interface to various @Par@ monads"; 17 | license = self.stdenv.lib.licenses.bsd3; 18 | platforms = self.ghc.meta.platforms; 19 | }; 20 | }) 21 | -------------------------------------------------------------------------------- /src/par-schedulers/par-schedulers.cabal: -------------------------------------------------------------------------------- 1 | name: par-schedulers 2 | version: 0.1.0.0 3 | -- synopsis: 4 | -- description: 5 | -- license: 6 | author: Ömer Sinan Ağacan 7 | maintainer: omeragacan@indiana.edu 8 | -- copyright: 9 | -- category: 10 | build-type: Simple 11 | -- extra-source-files: 12 | cabal-version: >=1.10 13 | 14 | library 15 | exposed-modules: 16 | Control.Par.Scheds.Direct 17 | Control.Par.Scheds.Sparks 18 | Control.Par.Scheds.Trace 19 | 20 | build-depends: 21 | base >=4.7 && <4.9, 22 | monad-par >=0.3 && <0.4, 23 | par-classes 24 | 25 | default-language: Haskell2010 26 | ghc-options: -Wall -O2 27 | -------------------------------------------------------------------------------- /src/lvish/Data/Concurrent/AlignedIORef.hs: -------------------------------------------------------------------------------- 1 | {-# LANGUAGE NamedFieldPuns #-} 2 | 3 | -- | Cacheline-aligned wrappers around IORefs. Currently doing nothing. 4 | 5 | module Data.Concurrent.AlignedIORef (AlignedIORef(), newAlignedIORef, ref) 6 | where 7 | 8 | import Data.IORef 9 | 10 | data AlignedIORef a = AlignedIORef { 11 | -- pad out to 64 bytes to avoid false sharing (assuming 4 byte words and 64 12 | -- byte cachelines) 13 | -- padding :: [IORef a], 14 | ref :: {-# UNPACK #-} !(IORef a) 15 | } 16 | 17 | newAlignedIORef :: a -> IO (AlignedIORef a) 18 | newAlignedIORef v = do 19 | ref <- newIORef v 20 | -- padding <- replicateM 15 $ newIORef v 21 | return $! AlignedIORef { 22 | -- padding, 23 | ref 24 | } 25 | -------------------------------------------------------------------------------- /src/par-schedulers/Control/Par/Scheds/Sparks.hs: -------------------------------------------------------------------------------- 1 | {-# LANGUAGE ConstraintKinds #-} 2 | {-# LANGUAGE DataKinds #-} 3 | {-# LANGUAGE KindSignatures #-} 4 | {-# LANGUAGE RankNTypes #-} 5 | {-# LANGUAGE TypeFamilies #-} 6 | 7 | module Control.Par.Scheds.Sparks 8 | ( Par 9 | , runPar 10 | , runParPoly 11 | ) where 12 | 13 | import Control.Monad (void) 14 | import System.IO.Unsafe (unsafePerformIO) 15 | 16 | import Control.Par.Class 17 | import qualified Control.Par.Class.Unsafe as PC 18 | import Control.Par.EffectSigs 19 | 20 | newtype Par (e :: EffectSig) s a = 21 | Sparks { unwrapSparks :: S.Par a } 22 | 23 | newtype SparksFuture s a = SparksFuture { unwrapSparksFuture :: S.Future a } 24 | -------------------------------------------------------------------------------- /src/lvish-extra/tests/Main.hs: -------------------------------------------------------------------------------- 1 | -- #!/usr/bin/env runghc -i.. 2 | 3 | -- | This module aggregates all the unit tests in this directory. 4 | 5 | module Main where 6 | 7 | import Test.Tasty (TestTree, testGroup, defaultMain) 8 | 9 | import qualified MemoTests 10 | import qualified ArrayTests 11 | import qualified SatMapTests 12 | --import qualified LayeredSatMapTests 13 | import qualified MaxPosIntTests 14 | import qualified AddRemoveSetTests 15 | 16 | main :: IO () 17 | main = defaultMain alltests 18 | 19 | --alltests :: [TestTree] 20 | alltests :: TestTree 21 | alltests = testGroup "allTests" 22 | [ ArrayTests.tests 23 | , MemoTests.tests 24 | , MaxPosIntTests.tests 25 | , SatMapTests.tests 26 | -- , LayeredSatMapTests.tests 27 | , AddRemoveSetTests.tests 28 | ] 29 | -------------------------------------------------------------------------------- /src/par-collections/default.nix: -------------------------------------------------------------------------------- 1 | # This file was auto-generated by cabal2nix. Please do NOT edit manually! 2 | 3 | { haskellPackages ? (import {}).haskellPackages 4 | # , parClasses ? import 5 | , parClasses ? (import ../par-classes {}) 6 | }: 7 | 8 | with haskellPackages; 9 | haskellPackages.cabal.mkDerivation (self: { 10 | pname = "par-collections"; 11 | version = "1.2"; 12 | sha256 = "1111111111111111111111111111111111111111111111111111"; 13 | src = ./.; 14 | buildDepends = [ 15 | cereal deepseq mtl primitive random transformers vector 16 | parClasses 17 | ]; 18 | meta = { 19 | description = "Generic parallel combinators for data and iteration spaces"; 20 | license = self.stdenv.lib.licenses.bsd3; 21 | platforms = self.ghc.meta.platforms; 22 | }; 23 | }) 24 | -------------------------------------------------------------------------------- /src/lvish/Control/LVish/Internal/Types.hs: -------------------------------------------------------------------------------- 1 | {-# LANGUAGE DeriveDataTypeable #-} 2 | 3 | -- | A simple internal module to factor out types that are used in many places. 4 | module Control.LVish.Internal.Types 5 | ( LVishException(..) ) 6 | where 7 | 8 | import Data.Typeable (Typeable) 9 | import Control.Exception 10 | 11 | -- | All @LVar@s share a common notion of exceptions. 12 | -- The two common forms of exception currently are conflicting-put and put-after-freeze. 13 | -- There are also errors that correspond to particular invariants for particular LVars. 14 | data LVishException = ConflictingPutExn String 15 | | PutAfterFreezeExn String 16 | | LVarSpecificExn String 17 | deriving (Show, Read, Eq, Ord, Typeable) 18 | 19 | instance Exception LVishException 20 | -------------------------------------------------------------------------------- /src/lvish-apps/cfa/Makefile.temp: -------------------------------------------------------------------------------- 1 | # Temporary shorthands 2 | 3 | all: one two 4 | 5 | one: 6 | ghc -fforce-recomp -rtsopts -threaded -O2 k-CFA-lvish.hs -o k-CFA-lvish.exe 7 | ghc -DINPLACE -fforce-recomp -rtsopts -threaded -O2 k-CFA_lvish.hs -o k-CFA-lvish_inplace.exe 8 | 9 | two: 10 | ghc -fforce-recomp -rtsopts -threaded -O2 k-CFA.hs -o k-CFA.exe 11 | 12 | 13 | prof: 14 | ghc -prof -fprof-auto -fforce-recomp -rtsopts -threaded -O2 k-CFA.hs -o k-CFA_prof.exe 15 | 16 | three: 17 | ghc -DINPLACE -DNONSCALABLE -fforce-recomp -rtsopts -threaded -O2 k-CFA-lvish.hs -o k-CFA-lvish_inplace.exe 18 | ghc -DINPLACE -DLOCKFREE -fforce-recomp -rtsopts -threaded -O2 k-CFA-lvish.hs -o k-CFA_lockfree_inplace.exe 19 | ghc -DINPLACE -DHYBRID -fforce-recomp -rtsopts -threaded -O2 k-CFA-lvish.hs -o k-CFA_hybrid_inplace.exe 20 | -------------------------------------------------------------------------------- /.travis_install.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Assumes that "stack" is in the path: 4 | 5 | set -xe 6 | 7 | # In this mode we just grab the latest from hackage: 8 | if [ ${STACK_RESOLVER%-*} = "default" ]; then 9 | mv stack.yaml stack-${STACK_RESOLVER}.yaml 10 | # which -a ghc 11 | # ghc --version 12 | # stack --resolver=${STACK_RESOLVER} solver --modify-stack-yaml 13 | else 14 | cat stack.yaml | grep -v resolver > stack-${STACK_RESOLVER}.yaml 15 | echo "resolver: ${STACK_RESOLVER}" >> stack-${STACK_RESOLVER}.yaml 16 | rm -f stack.yaml # Just to make sure. 17 | fi 18 | 19 | cat stack-${STACK_RESOLVER}.yaml 20 | # Sweet and simple. Install upstream dependencies, including GHC: 21 | stack setup --no-terminal 22 | # stack solver --update-config 23 | stack build 24 | stack test --only-snapshot --no-terminal 25 | -------------------------------------------------------------------------------- /src/lvish/extract_codedoc_examples.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | rm -rf docs_examples 4 | mkdir docs_examples 5 | 6 | set -e 7 | 8 | FILES="Control/LVish/DeepFrz.hs Control/LVish.hs" 9 | 10 | for fl in $FILES; do 11 | echo Extracting from $fl 12 | filename=`basename $fl` 13 | base="${filename%.*}" 14 | target="./docs_examples/$base.lhs" 15 | output="./docs_examples/$base.out" 16 | echo " Base file name < $base > It is your job to ensure these don't collide!" 17 | grep -E "^\w*> " $fl > $target 18 | echo " Wrote `wc -l $target | awk '{ print $1 }'` lines of output to $target" 19 | echo " Running with 'runghc', output in $output ..." 20 | runghc $target &> $output 21 | echo " Done running, `wc -l $output | awk '{ print $1 }'` lines of output." 22 | done 23 | 24 | echo "Done with all extracted code examples." 25 | -------------------------------------------------------------------------------- /archived_old/fhpc13-lvars/benchmarks/bf-traverse-monad-par/bf-traverse-monad-par.cabal: -------------------------------------------------------------------------------- 1 | name: bf-traverse-monad-par 2 | version: 0.1.0.0 3 | synopsis: Cabal file for building the bf-traverse-monad-par benchmark executable. A part of the benchmarking infrastructure for the fhpc13-lvars library. 4 | author: Lindsey Kuper and Ryan Newton 5 | maintainer: lkuper@cs.indiana.edu 6 | category: Concurrency 7 | build-type: Simple 8 | cabal-version: >=1.8 9 | 10 | executable bf-traverse-monad-par 11 | main-is: bf-traverse-monad-par.hs 12 | ghc-options: -O2 -threaded -rtsopts 13 | hs-source-dirs: .., . 14 | build-depends: base ==4.6.*, split ==0.2.*, containers ==0.5.*, bytestring ==0.10.*, time==1.4.*, rdtsc ==1.3.*, vector ==0.10.*, monad-par >=0.3.4.4, monad-par-extras >=0.3.3 15 | -------------------------------------------------------------------------------- /archived_old/fhpc13-lvars/benchmarks/bf-traverse-LVar/bf-traverse-LVar.cabal: -------------------------------------------------------------------------------- 1 | name: bf-traverse-LVar 2 | version: 0.1.0.0 3 | synopsis: Cabal file for building the bf-traverse-LVar benchmark executable. A part of the benchmarking infrastructure for the fhpc13-lvars library. 4 | author: Lindsey Kuper and Ryan Newton 5 | maintainer: lkuper@cs.indiana.edu 6 | category: Concurrency 7 | build-type: Simple 8 | cabal-version: >=1.8 9 | 10 | executable bf-traverse-LVar 11 | main-is: bf-traverse-LVar.hs 12 | hs-source-dirs: .., . 13 | ghc-options: -O2 -threaded -rtsopts 14 | build-depends: base ==4.6.*, split ==0.2.*, containers ==0.5.*, bytestring ==0.10.*, time ==1.4.*, rdtsc ==1.3.*, vector ==0.10.*, deepseq ==1.3.*, fhpc13-lvars ==0.1.0.0, monad-par >=0.3.4.4, monad-par-extras >=0.3.3 15 | -------------------------------------------------------------------------------- /archived_old/fhpc13-lvars/benchmarks/bf-traverse-Strategies/bf-traverse-Strategies.cabal: -------------------------------------------------------------------------------- 1 | name: bf-traverse-Strategies 2 | version: 0.1.0.0 3 | synopsis: Cabal file for building the bf-traverse-Strategies benchmark executable. A part of the benchmarking infrastructure for the fhpc13-lvars library. 4 | author: Lindsey Kuper and Ryan Newton 5 | maintainer: lkuper@cs.indiana.edu 6 | category: Concurrency 7 | build-type: Simple 8 | cabal-version: >=1.8 9 | 10 | executable bf-traverse-Strategies 11 | main-is: bf-traverse-Strategies.hs 12 | ghc-options: -O2 -threaded -rtsopts 13 | hs-source-dirs: .., . 14 | -- Notes: the `deepseq` and `parallel` packages are specific to this executable. 15 | build-depends: base ==4.6.*, split ==0.2.*, containers ==0.5.*, bytestring ==0.10.*, time==1.4.*, rdtsc ==1.3.*, vector ==0.10.*, deepseq ==1.3.*, parallel ==3.2.* 16 | -------------------------------------------------------------------------------- /src/lvish-apps/Makefile: -------------------------------------------------------------------------------- 1 | 2 | 3 | # fixing it to this table: 4 | TABLEID=1YxEmNpeUoGCBptDK0ddtomC_oK2IVH1f2M89IIA 5 | CREDENTIALS= --clientid=905767673358.apps.googleusercontent.com --clientsecret=2a2H57dBggubW1_rqglC7jtK 6 | 7 | # BENCHARGS= --trials=3 --runid=d005_1373532273 --skipto=106 8 | # BENCHARGS= --trials=3 --runid=d003_1373528191 --skipto=137 9 | BENCHARGS= --trials=3 10 | 11 | run: build 12 | ./run_benchmarks.exe --fusion-upload=$(TABLEID) --name=LVish $(CREDENTIALS) 13 | 14 | fullrun: build 15 | ./run_benchmarks.exe $(BENCHARGS) --fusion-upload=$(TABLEID) --name=LVish $(CREDENTIALS) CFA 16 | 17 | basic: build 18 | QUICK=1 ./run_benchmarks.exe 19 | 20 | # -threaded is important here: 21 | build: 22 | ghc -threaded run_benchmarks.hs -o run_benchmarks.exe 23 | 24 | prof: 25 | ghc -O0 -fforce-recomp -prof -fprof-auto -fprof-cafs -fprof-auto-calls -threaded run_benchmarks.hs -o run_benchmarks.exe 26 | 27 | clean: 28 | rm -f *.hi *.o *.exe 29 | 30 | distclean: clean 31 | rm -f *.dat *.log *.dat.* *.log.* 32 | 33 | -------------------------------------------------------------------------------- /stack.yaml: -------------------------------------------------------------------------------- 1 | 2 | # flags: {} 3 | 4 | packages: 5 | - src/lvish/ 6 | - src/par-classes/ 7 | - src/par-collections/ 8 | - src/par-collections/tests/ 9 | - src/par-mergesort/ 10 | - src/par-transformers/ 11 | - deps/concurrent-skiplist/ 12 | - deps/ctrie 13 | 14 | # TODO: Revive: 15 | # - haskell/lvish-apps/ 16 | # - haskell/lvish-apps/cfa/ 17 | # - haskell/lvish-apps/pbbs/ 18 | # - haskell/lvish-graph-algorithms/ 19 | # - haskell/fhpc13-lvars/ 20 | # - haskell/fhpc13-lvars/benchmarks/bf-traverse-LVar/ 21 | # - haskell/fhpc13-lvars/benchmarks/bf-traverse-monad-par/ 22 | # - haskell/fhpc13-lvars/benchmarks/bf-traverse-Strategies/ 23 | 24 | # TODO: Add: 25 | # haskell/par-schedulers 26 | 27 | extra-deps: 28 | # Soon: 29 | # - tslogger-0.1.0.1 30 | - atomic-primops-0.8.0.4 31 | - chaselev-deque-0.5.0.5 32 | - bits-atomic-0.1.3 33 | - thread-local-storage-0.1.0.3 34 | - pcg-random-0.1.3.4 35 | - git: https://github.com/iu-parfunc/tslogger.git 36 | commit: bfa8d2b52f0d41439863b15863baa0ca2ef613d9 37 | 38 | resolver: lts-6.13 39 | -------------------------------------------------------------------------------- /src/lvish-extra/tests/PNCounterTests.hs: -------------------------------------------------------------------------------- 1 | {-# LANGUAGE TemplateHaskell #-} 2 | {-# LANGUAGE DataKinds #-} 3 | 4 | -- | Tests for the Data.LVar.PNCounter module. 5 | 6 | module PNCounterTests(tests, runTests) where 7 | 8 | import Test.Tasty.HUnit 9 | import Test.Tasty (TestTree, defaultMain, testGroup) 10 | --import Test.HUnit (Assertion, assertEqual, assertBool, Counts(..)) 11 | import Test.Tasty.TH (testGroupGenerator) 12 | import qualified Test.HUnit as HU 13 | import TestHelpers as T 14 | 15 | import qualified Data.Set as S 16 | 17 | import qualified Data.LVar.PNCounter as PNC 18 | 19 | import Control.LVish 20 | import Control.LVish.DeepFrz (DeepFrz(..), Frzn, Trvrsbl, runParThenFreeze, runParThenFreezeIO) 21 | 22 | -------------------------------------------------------------------------------- 23 | 24 | tests :: TestTree 25 | tests = $(testGroupGenerator) 26 | 27 | runTests :: IO () 28 | runTests = defaultMain tests 29 | 30 | -------------------------------------------------------------------------------- 31 | 32 | -- TODO: Write tests. 33 | -------------------------------------------------------------------------------- /src/lvish/default.nix: -------------------------------------------------------------------------------- 1 | # This file was auto-generated by cabal2nix. Please do NOT edit manually! 2 | 3 | { haskellPackages ? (import {}).haskellPackages 4 | , parClasses ? (import ../par-classes {}) 5 | , parCollections ? (import ../par-collections {}) 6 | }: 7 | 8 | with haskellPackages; 9 | cabal.mkDerivation (self: { 10 | pname = "lvish"; 11 | version = "2.0.2"; 12 | src = ./.; 13 | configureFlags = "--ghc-option=-j4"; 14 | noHaddock = true; 15 | sha256 = ""; 16 | # doCheck= false; 17 | buildDepends = [ 18 | async atomicPrimops bitsAtomic chaselevDeque deepseq lattices missingForeign 19 | parClasses parCollections random threadLocalStorage transformers 20 | vector 21 | ]; 22 | testDepends = [ 23 | HUnit parClasses parCollections QuickCheck random testFramework 24 | testFrameworkHunit testFrameworkQuickcheck2 testFrameworkTh text 25 | time vector 26 | ]; 27 | meta = { 28 | description = "Parallel scheduler, LVar data structures, and infrastructure to build more"; 29 | license = self.stdenv.lib.licenses.bsd3; 30 | platforms = self.ghc.meta.platforms; 31 | }; 32 | }) 33 | -------------------------------------------------------------------------------- /src/par-collections/Data/Par/Set.hs: -------------------------------------------------------------------------------- 1 | {-# LANGUAGE CPP #-} 2 | {-# LANGUAGE BangPatterns #-} 3 | {-# LANGUAGE TypeFamilies #-} 4 | {-# OPTIONS_GHC -fno-warn-orphans #-} 5 | 6 | -- | Provide instances for parallel handling of common, pure Haskell data structures. 7 | 8 | module Data.Par.Set 9 | () where 10 | 11 | import Data.Splittable.Class (Split(..)) 12 | import qualified Control.Par.Class as PC 13 | import qualified Data.Set as S 14 | import qualified Data.Foldable as F 15 | 16 | -------------------------------------------------------------------------------- 17 | 18 | instance PC.Generator (S.Set a) where 19 | type ElemOf (S.Set a) = a 20 | {-# INLINE foldM #-} 21 | foldM = F.foldlM 22 | {-# INLINE fold #-} 23 | fold = F.foldl' 24 | 25 | 26 | #ifdef NEWCONTAINERS 27 | 28 | instance Eq a => Split (S.Set a) where 29 | {-# INLINE split #-} 30 | split = S.splitRoot 31 | 32 | -- TODO: Opt in to the trivial instance of ParFoldable, using Split-based mapreduce: 33 | -- instance PC.ParFoldable (M.Map k v) where 34 | -- pmapFold = Sp.pmapReduce 35 | #else 36 | -- instance PC.ParFoldable (M.Map k v) where 37 | #endif 38 | -------------------------------------------------------------------------------- /archived_old/fhpc13-lvars/benchmarks/Makefile: -------------------------------------------------------------------------------- 1 | # A Makefile for generating the input data we use for benchmarks. 2 | # It's easier to use this instead of just building everything with 3 | # cabal. 4 | 5 | VERTICES = 40000 6 | EDGES = 320000 7 | 8 | RAND_DATA = /tmp/rand 9 | 10 | PBBSDIR = ../../../pbbs/testData/graphData 11 | 12 | # Generate random local graphs with $VERTICES vertices and $EDGES 13 | # edges, and dimension 5. The graph is generated so that the 14 | # probability of each edge out of a vertex is biased to nearby 15 | # vertices. 16 | 17 | # The resulting file ends up in /tmp/rand_E_V where E is the number of 18 | # edges and V is the number of vertices. 19 | 20 | default: rand_data run_benches 21 | 22 | rand_data: 23 | (cd $(PBBSDIR); make randLocalGraph) 24 | $(PBBSDIR)/randLocalGraph -m $(EDGES) -d 5 $(VERTICES) $(RAND_DATA)_$(EDGES)_$(VERTICES) 25 | 26 | benchmark.run: benchmark.hs 27 | cabal install hsbencher 28 | ghc -threaded benchmark.hs -o benchmark.run 29 | 30 | run_benches: benchmark.run 31 | ./benchmark.run 32 | 33 | clean_data: 34 | -rm -f /tmp/rand* 35 | 36 | clean: 37 | rm -f benchmark.run *.hi *.o 38 | -------------------------------------------------------------------------------- /src/lvish-extra/stack.yaml: -------------------------------------------------------------------------------- 1 | 2 | # flags: {} 3 | 4 | packages: 5 | - . 6 | - ../lvish/ 7 | - ../par-classes/ 8 | - ../par-collections/ 9 | # - ../par-collections/tests/ 10 | # - haskell/par-mergesort/ 11 | # - haskell/par-transformers/ 12 | - ../../deps/concurrent-skiplist 13 | - ../../deps/ctrie/ 14 | - ../../deps/concurrent-skiplist 15 | 16 | - location: 17 | git: https://github.com/iu-parfunc/tslogger.git 18 | commit: f4036354ca9c56c127bcbdeb5498db5b4893c10b 19 | 20 | # TODO: Revive: 21 | # - haskell/lvish-apps/ 22 | # - haskell/lvish-apps/cfa/ 23 | # - haskell/lvish-apps/pbbs/ 24 | # - haskell/lvish-graph-algorithms/ 25 | # - haskell/fhpc13-lvars/ 26 | # - haskell/fhpc13-lvars/benchmarks/bf-traverse-LVar/ 27 | # - haskell/fhpc13-lvars/benchmarks/bf-traverse-monad-par/ 28 | # - haskell/fhpc13-lvars/benchmarks/bf-traverse-Strategies/ 29 | 30 | # TODO: Add: 31 | # haskell/par-schedulers 32 | 33 | extra-deps: 34 | # Soon: 35 | # - tslogger-0.1.0.1 36 | - atomic-primops-0.8.0.3 37 | - chaselev-deque-0.5.0.5 38 | - bits-atomic-0.1.3 39 | - thread-local-storage-0.1.0.3 40 | - pcg-random-0.1.3.4 41 | 42 | resolver: lts-5.11 43 | -------------------------------------------------------------------------------- /archived_old/fhpc13-lvars/benchmarks/data/README.md: -------------------------------------------------------------------------------- 1 | # Benchmarking results 2 | 3 | This directory contains raw log files from the benchmarks we ran for 4 | our FHPC 2013 submission, plus some code for visualizing the data. 5 | 6 | We compared two programs (Strategies and LVarPure versions of 7 | bf_traverse), varying the number of cores (1, 2, 3, 4), and the 8 | microseconds of work done per node (1, 2, 4, 8, 16, and 32), for a 9 | total of 2 * 4 * 6 = 48 configurations. We ran each configuration for 10 | 5 trials. 11 | 12 | The file `results_basalt.dat` contains a summary of the results: the 13 | minimum, median, and maximum running time for each trial, as well as 14 | the minimum, median, and maximum productivity for each trial. The 15 | file `bench_basalt.log` contains the complete log of results. 16 | (`basalt` is the name of the machine these tests were run on.) 17 | 18 | `bf_traverse_benchmark_data.csv` is a pared-down, easier-to-parse 19 | version of `results_basalt.dat`, and `makegraph.py` is the script that 20 | produces `bf_traverse_benchmark_data.png`. It produces four subplots, 21 | one for each number of cores. The 4-core version appears in our 22 | paper. -------------------------------------------------------------------------------- /archived_old/fhpc13-lvars/Common.hs: -------------------------------------------------------------------------------- 1 | {-# LANGUAGE CPP #-} 2 | 3 | module Common (forkWithExceptions) where 4 | 5 | import Control.Exception as E 6 | import Control.Concurrent hiding (yield) 7 | import Text.Printf 8 | 9 | -- | Exceptions that walk up the fork tree of threads: 10 | forkWithExceptions :: (IO () -> IO ThreadId) -> String -> IO () -> IO ThreadId 11 | forkWithExceptions forkit descr action = do 12 | parent <- myThreadId 13 | forkit $ do 14 | tid <- myThreadId 15 | E.catch action 16 | (\ e -> 17 | case E.fromException e of 18 | Just E.ThreadKilled -> do 19 | -- Killing worker threads is normal now when exception handling, so this chatter is restricted to debug mode: 20 | #ifdef DEBUG_LVAR 21 | printf "\nThreadKilled exception inside child thread, %s (not propagating!): %s\n" (show tid) (show descr) 22 | #endif 23 | return () 24 | _ -> do 25 | #ifdef DEBUG_LVAR 26 | printf "\nException inside child thread %s, %s: %s\n" (show descr) (show tid) (show e) 27 | #endif 28 | E.throwTo parent (e :: E.SomeException) 29 | ) 30 | 31 | -------------------------------------------------------------------------------- /src/lvish-extra/Data/LVar/NatArray/Unsafe.hs: -------------------------------------------------------------------------------- 1 | {-# LANGUAGE GADTs #-} 2 | 3 | -- | Unsafe operations on NatArray. NOT for end-user applications. 4 | 5 | module Data.LVar.NatArray.Unsafe 6 | ( NatArray(..), unsafePeek ) 7 | where 8 | import qualified Data.Vector.Storable.Mutable as M 9 | import Foreign.Storable (sizeOf, Storable) 10 | -- import System.IO.Unsafe (unsafeDupablePerformIO) 11 | import Control.LVish.Internal as LI 12 | 13 | ------------------------------------------------------------------------------------------ 14 | 15 | -- | An array of bit-fields with a monotonic OR operation. This can be used to model 16 | -- a set of Ints by setting the vector entries to zero or one, but it can also 17 | -- model other finite lattices for each index. 18 | -- newtype NatArray s a = NatArray (LVar s (M.IOVector a) (Int,a)) 19 | data NatArray s a = Storable a => NatArray !(LVar s (M.IOVector a) (Int,a)) 20 | 21 | unsafePeek :: (Num a, Eq a) => NatArray s a -> Int -> Par e s (Maybe a) 22 | unsafePeek (NatArray lv) ix = do 23 | peek <- LI.liftIO $ M.read (LI.state lv) ix 24 | case peek of 25 | -- TODO: generalize: 26 | 0 -> return Nothing 27 | x -> return $! Just x 28 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # LVish library and dependencies 2 | 3 | [![Hackage page (downloads and API reference)][hackage-lvish]][hackage] 4 | 5 | 6 | Build Status: 7 | 8 | * Travis: [![Build Status](https://travis-ci.org/iu-parfunc/lvars.svg?branch=master)](https://travis-ci.org/iu-parfunc/lvars) 9 | * Jenkins: [![Build Status](http://tester-lin.soic.indiana.edu:8080/buildStatus/icon?job=LVish-implementation-2.0)](http://tester-lin.soic.indiana.edu:8080/job/LVish-implementation-2.0/) 10 | 11 | This repository is the home of the [LVish](http://hackage.haskell.org/package/lvish) Haskell library for programming with monotonically-growing concurrent data structures, also known as LVars. More information can be found along with the main library, which is found under [haskell/lvish](haskell/lvish). 12 | 13 | (Looking for the data-race detector that accompanied our FHPC '13 paper? It's [here](https://github.com/lkuper/lvar-race-detector). Looking for PLT Redex models of LVar calculi? They're [here](https://github.com/lkuper/lvar-semantics).) 14 | 15 | 16 | [hackage-lvish]: http://img.shields.io/hackage/v/lvish.svg 17 | [hackage]: http://hackage.haskell.org/package/lvish 18 | -------------------------------------------------------------------------------- /archived_old/fhpc13-lvars/Data/LVar/PairPure.hs: -------------------------------------------------------------------------------- 1 | {-# LANGUAGE BangPatterns #-} 2 | 3 | module Data.LVar.PairPure 4 | ( 5 | IPair, 6 | newPair, 7 | putFst, 8 | putSnd, 9 | getFst, 10 | getSnd, 11 | ) where 12 | import LVarTracePure 13 | 14 | ------------------------------------------------------------------------------ 15 | -- IPairs implemented on top of LVars: 16 | ------------------------------------------------------------------------------ 17 | 18 | type IPair a b = LVar (IVarContents a, IVarContents b) 19 | 20 | newPair :: Par (IPair a b) 21 | newPair = newLV (IVC Nothing, 22 | IVC Nothing) 23 | 24 | putFst :: IPair a b -> a -> Par () 25 | putFst lv !elt = putLV lv (IVC (Just elt), IVC Nothing) 26 | 27 | putSnd :: IPair a b -> b -> Par () 28 | putSnd lv !elt = putLV lv (IVC Nothing, IVC (Just elt)) 29 | 30 | getFst :: IPair a b -> Par a 31 | getFst lv = getLV lv test 32 | where 33 | test (IVC (Just x),_) = Just x 34 | test (IVC Nothing,_) = Nothing 35 | 36 | getSnd :: IPair a b -> Par b 37 | getSnd lv = getLV lv test 38 | where 39 | test (_,IVC (Just x)) = Just x 40 | test (_,IVC Nothing) = Nothing 41 | 42 | -------------------------------------------------------------------------------- /src/lvish-apps/run-lvish-benchmarks.cabal: -------------------------------------------------------------------------------- 1 | -- Initial lvish-apps.cabal generated by cabal init. For further 2 | -- documentation, see http://haskell.org/cabal/users-guide/ 3 | 4 | name: lvish-apps 5 | version: 0.1.0.0 6 | -- synopsis: 7 | -- description: 8 | -- license: BSD3 9 | -- license-file: LICENSE 10 | author: Ryan Newton 11 | maintainer: rrnewton@gmail.com 12 | -- copyright: 13 | -- category: 14 | build-type: Simple 15 | -- extra-source-files: 16 | cabal-version: >=1.10 17 | 18 | executable run-lvish-benchmarks 19 | main-is: run_benchmarks.hs 20 | other-extensions: CPP, BangPatterns, DeriveGeneric 21 | build-depends: base >=4.6 && <4.7, deepseq >=1.3 && <1.4, mtl >=2.1 && <2.2, time >=1.4 && <1.5, 22 | containers >=0.5 && <0.6, GenericPretty >=1.2 && <1.3, random >=1.0 && <1.1, 23 | test-framework >=0.8 && <0.9, test-framework-hunit >=0.3 && <0.4, HUnit >=1.2 && <1.3, 24 | hsbencher, directory, 25 | lvish, pbbs-haskell 26 | -- hs-source-dirs: 27 | default-language: Haskell2010 28 | -------------------------------------------------------------------------------- /src/lvish/tests/Makefile: -------------------------------------------------------------------------------- 1 | # A simple convenience for building tests without Cabal 2 | # (and WITH debugging enabled). 3 | #------------------------------------------------------ 4 | 5 | # LIBSRC = $(shell find ../Data/ -name "*.hs") \ 6 | # $(shell find ../Control/ -name "*.hs") 7 | LIBSRC = 8 | 9 | ARGS = -DGET_ONCE=1 -rtsopts -threaded -DDEBUG_LVAR $(GHC_ARGS) 10 | ARGS += -O2 -eventlog 11 | # -fforce-recomp 12 | 13 | ALLEXES = AddRemoveSetTests.exe ArrayTests.exe GenericTests.exe LVishAndIVar.exe LogicalTests.exe PureMapTests.exe SLMapTests.exe MaxPosIntTests.exe MemoTests.exe PNCounterTests.exe SNZITests.exe SetTests.exe SkipListTests.exe BulkRetryTests.exe Main.exe Main_wfailing.exe Current.exe 14 | 15 | .SUFFIXES: .hs .exe 16 | 17 | all: $(ALLEXES) 18 | 19 | prof: 20 | $(MAKE) MapTests.exe 21 | rm -f MapTests.exe 22 | GHC_ARGS="-prof -auto-all -osuf=po" $(MAKE) MapTests.exe 23 | 24 | .hs.exe: CommonMapTests.hs 25 | ghc $(ARGS) -i.. -main-is $(^:.hs=.runTests) $^ -o $@ 26 | 27 | main: Main.exe Main_wfailing.exe 28 | Main.exe: 29 | ghc $(ARGS) -i.. Main.hs -o Main.exe 30 | 31 | Main_wfailing.exe: 32 | ghc -DFAILING_TESTS $(ARGS) -i.. Main.hs -o Main_wfailing.exe 33 | 34 | clean: 35 | rm -f *.hi *.o $(ALLEXES) 36 | -------------------------------------------------------------------------------- /.jenkins_script.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # NOTE: Passes through extra args to the major cabal install command. 4 | # Also uses these environment vars, if available: 5 | # * JENKINS_GHC 6 | # * STACK_FLAGS 7 | # * NOTEST 8 | # * CABAL 9 | # * EXTRAPKGS -- useful for including packages in the one-big-install 10 | 11 | set -e 12 | set -x 13 | 14 | SHOWDETAILS=streaming 15 | 16 | # Just use which stack is in scope: 17 | STACK=stack 18 | # STACK=stack-1.0.4.2 19 | which -a $STACK 20 | 21 | # Always make sure the benchmarks build, even if we don't run them: 22 | CFG=" --bench --no-run-benchmarks " 23 | 24 | if [ "$STACK_RESOLVER" != "" ] && [ "$STACK_RESOLVER" != "default" ]; then 25 | CFG+=" --resolver=$STACK_RESOLVER " 26 | fi 27 | 28 | for flg in $STACK_FLAGS; do 29 | CFG+=" --flag=*:${flg} " 30 | done 31 | 32 | if [ "$PROF" == "" ] || [ "$PROF" == "0" ]; then 33 | CFG="$CFG --no-executable-profiling --no-library-profiling $DISABLE_EXEC_PROF" 34 | else 35 | CFG="$CFG --executable-profiling --library-profiling $ENABLE_EXEC_PROF" 36 | fi 37 | 38 | if [ "$NOTEST" == "" ]; then 39 | CFG="$CFG --test " 40 | fi 41 | 42 | echo "Running stack version "`$STACK --version`" with options: $CFG" 43 | 44 | stack --no-system-ghc --install-ghc build $CFG 45 | -------------------------------------------------------------------------------- /src/lvish/Util/Makefile: -------------------------------------------------------------------------------- 1 | 2 | 3 | DBGOPTS= -keep-tmp-files -dsuppress-module-prefixes -ddump-to-file -ddump-core-stats -ddump-simpl-stats -dcore-lint \ 4 | -dcmm-lint -ddump-ds -ddump-simpl -ddump-stg -ddump-asm -ddump-bcos -ddump-cmm -ddump-opt-cmm -ddump-inlinings \ 5 | 6 | BASICOPTS= -rtsopts -fforce-recomp -O2 -threaded 7 | 8 | OPTS= $(DBGOPTS) $(BASICOPTS) 9 | 10 | all: 11 | rm -f *.o *.hi bugged.exe unbugged.exe 12 | rm -rf ./bugged_dumps ./unbugged_dumps/ 13 | ghc -DACTIVATE_BUG $(OPTS) PBBS.hs -main-is Util.PBBS.t4 -o bugged.exe 14 | mkdir -p ./bugged_dumps/ 15 | mv *.dump* ./bugged_dumps/ 16 | ghc $(OPTS) PBBS.hs -main-is Util.PBBS.t4 -o unbugged.exe 17 | mkdir -p ./unbugged_dumps/ 18 | mv *.dump* ./unbugged_dumps/ 19 | time ./unbugged.exe 20 | time ./bugged.exe 21 | 22 | unbugged: 23 | # ghc $(BASICOPTS) PBBS.hs -main-is Util.PBBS.t2 -o unbugged_mmap.exe 24 | # ghc $(BASICOPTS) PBBS.hs -main-is Util.PBBS.t4 -o unbugged_readFile.exe 25 | ghc $(BASICOPTS) PBBS.hs -main-is Util.PBBS.t5 -o unbugged_mmap.exe 26 | 27 | small: 28 | ghc $(BASICOPTS) PBBS.hs -main-is Util.PBBS.t0 -o small_mmap.exe 29 | 30 | 31 | dbg: 32 | ghc $(BASICOPTS) PBBS.hs -main-is Util.PBBS.t0 -o small_mmap.exe 33 | ghc $(BASICOPTS) -osuf=po -prof PBBS.hs -main-is Util.PBBS.t0 -o small_mmap_dbg.exe 34 | -------------------------------------------------------------------------------- /src/par-schedulers/Control/Par/Scheds/Trace.hs: -------------------------------------------------------------------------------- 1 | {-# LANGUAGE ConstraintKinds #-} 2 | {-# LANGUAGE DataKinds #-} 3 | {-# LANGUAGE KindSignatures #-} 4 | {-# LANGUAGE RankNTypes #-} 5 | {-# LANGUAGE TypeFamilies #-} 6 | 7 | module Control.Par.Scheds.Trace 8 | ( Par 9 | , runPar 10 | , runParPoly 11 | ) where 12 | 13 | import System.IO.Unsafe (unsafePerformIO) 14 | 15 | import Control.Par.Class 16 | import qualified Control.Par.Class.Unsafe as PC 17 | import Control.Par.EffectSigs 18 | 19 | import qualified Control.Monad.Par.Scheds.Trace as T 20 | 21 | newtype Par (e :: EffectSig) s a = 22 | Trace { unwrapTrace :: T.Par a } 23 | 24 | newtype TraceIVar s a = TraceIVar { unwrapTraceIVar :: T.IVar a } 25 | 26 | instance PC.ParMonad Par where 27 | pbind (Trace p1) p2 = Trace $ p1 >>= unwrapTrace . p2 28 | preturn = Trace . return 29 | fork = Trace . T.fork . unwrapTrace 30 | internalLiftIO = return . unsafePerformIO 31 | 32 | instance ParFuture Par where 33 | type Future Par = TraceIVar 34 | type FutContents Par a = () 35 | spawn_ = Trace . fmap TraceIVar . T.spawn_ . unwrapTrace 36 | get = Trace . T.get . unwrapTraceIVar 37 | 38 | runPar :: (forall s. Par ('Ef 'P 'G 'NF 'B 'NI) s a) -> a 39 | runPar (Trace p) = T.runPar p 40 | 41 | runParPoly :: Deterministic e => Par e s a -> a 42 | runParPoly (Trace p) = T.runPar p 43 | -------------------------------------------------------------------------------- /src/par-schedulers/Control/Par/Scheds/Direct.hs: -------------------------------------------------------------------------------- 1 | {-# LANGUAGE ConstraintKinds #-} 2 | {-# LANGUAGE DataKinds #-} 3 | {-# LANGUAGE KindSignatures #-} 4 | {-# LANGUAGE RankNTypes #-} 5 | {-# LANGUAGE TypeFamilies #-} 6 | 7 | module Control.Par.Scheds.Direct 8 | ( Par 9 | , runPar 10 | , runParPoly 11 | ) where 12 | 13 | import System.IO.Unsafe (unsafePerformIO) 14 | 15 | import Control.Par.Class 16 | import qualified Control.Par.Class.Unsafe as PC 17 | import Control.Par.EffectSigs 18 | 19 | import qualified Control.Monad.Par.Scheds.Direct as D 20 | 21 | newtype Par (e :: EffectSig) s a = 22 | Direct { unwrapDirect :: D.Par a } 23 | 24 | newtype DirectIVar s a = DirectIVar { unwrapDirectIVar :: D.IVar a } 25 | 26 | instance PC.ParMonad Par where 27 | pbind (Direct p1) p2 = Direct $ p1 >>= unwrapDirect . p2 28 | preturn = Direct . return 29 | fork = Direct . D.fork . unwrapDirect 30 | internalLiftIO = return . unsafePerformIO 31 | 32 | instance ParFuture Par where 33 | type Future Par = DirectIVar 34 | type FutContents Par a = () 35 | spawn_ = Direct . fmap DirectIVar . D.spawn_ . unwrapDirect 36 | get = Direct . D.get . unwrapDirectIVar 37 | 38 | runPar :: (forall s. Par ('Ef 'P 'G 'NF 'B 'NI) s a) -> a 39 | runPar (Direct p) = D.runPar p 40 | 41 | runParPoly :: Deterministic e => Par e s a -> a 42 | runParPoly (Direct p) = D.runPar p 43 | -------------------------------------------------------------------------------- /src/lvish/tests/CtrieMapTests.hs: -------------------------------------------------------------------------------- 1 | {-# LANGUAGE TemplateHaskell #-} 2 | {-# LANGUAGE ScopedTypeVariables #-} 3 | {-# LANGUAGE DataKinds, TypeFamilies #-} 4 | {-# LANGUAGE CPP #-} 5 | {-# LANGUAGE ConstraintKinds #-} 6 | 7 | -- | Tests for the Data.LVar.PureMap and Data.LVar.SLMap modules. 8 | 9 | module CtrieMapTests where 10 | 11 | -- import qualified Data.LVar.SLSet as IS 12 | import qualified Data.LVar.CtrieMap as IM 13 | 14 | #include "CommonMapTests.hs" 15 | 16 | type TheMap k s v = IM.IMap k s v 17 | 18 | -------------------------------------------------------------------------------- 19 | 20 | tests :: TestTree 21 | tests = testGroup "" [tests_here, tests_common ] 22 | 23 | tests_here :: TestTree 24 | tests_here = $(testGroupGenerator) 25 | 26 | runTests :: IO () 27 | runTests = defaultMain tests 28 | 29 | {- 30 | 31 | ------------------------------------------------------------------------------------------ 32 | -- Show instances 33 | ------------------------------------------------------------------------------------------ 34 | 35 | -- | It happens that these come out in the opposite order from the Pure one: 36 | _case_show02 :: Assertion 37 | _case_show02 = assertEqual "show for SLMap" "{IMap: (\"key2\",44), (\"key1\",33)}" show02 38 | show02 :: String 39 | show02 = show$ runParThenFreeze $ do 40 | mp <- IM.newEmptyMap 41 | SM.insert "key1" (33::Int) mp 42 | SM.insert "key2" (44::Int) mp 43 | return mp 44 | 45 | -} 46 | -------------------------------------------------------------------------------- /src/lvish-apps/cfa/k-cfa-lvish-example.cabal: -------------------------------------------------------------------------------- 1 | name: k-cfa-lvish-example 2 | version: 0.1.0.0 3 | author: Ryan Newton 4 | maintainer: rrnewton@gmail.com 5 | build-type: Simple 6 | cabal-version: >=1.10 7 | 8 | executable cfa-sequential 9 | main-is: k-CFA.hs 10 | ghc-options: -O2 -threaded 11 | -- other-modules: 12 | other-extensions: CPP, BangPatterns, DeriveGeneric 13 | build-depends: lvish <2.0, pretty, base >=4.6 && <4.7, deepseq >=1.3 && <1.4, mtl >=2.1 && <2.2, time >=1.4 && <1.5, containers >=0.5 && <0.6, GenericPretty >=1.2 && <1.3, random >=1.0 && <1.1, test-framework >=0.8 && <0.9, test-framework-hunit >=0.3 && <0.4, HUnit >=1.2 && <1.3, 14 | parallel >= 3.2 15 | -- hs-source-dirs: 16 | default-language: Haskell2010 17 | 18 | executable cfa-lvish 19 | main-is: k-CFA-lvish.hs 20 | ghc-options: -O2 -threaded -rtsopts -with-rtsopts=-N4 21 | -- other-modules: 22 | other-extensions: CPP, BangPatterns, DeriveGeneric 23 | build-depends: lvish <2.0, pretty, base >=4.6 && <4.7, deepseq >=1.3 && <1.4, mtl >=2.1 && <2.2, time >=1.4 && <1.5, containers >=0.5 && <0.6, GenericPretty >=1.2 && <1.3, random >=1.0 && <1.1, test-framework >=0.8 && <0.9, test-framework-hunit >=0.3 && <0.4, HUnit >=1.2 && <1.3 24 | -- hs-source-dirs: 25 | default-language: Haskell2010 26 | -------------------------------------------------------------------------------- /src/par-collections/tests/test-par-collections.cabal: -------------------------------------------------------------------------------- 1 | Name: test-par-collections 2 | Version: 1.1 3 | Synopsis: Factored out test-suite from par-collections to break a cycle. 4 | 5 | Cabal-version: >=1.8 6 | Build-type: Simple 7 | 8 | -- Factoring this out breaks the cycle between the 'par-collections' and 'lvish' packages. 9 | 10 | Executable test-par-collections-dummy-executable 11 | Main-is: Dummy.hs 12 | Build-depends: base 13 | 14 | Test-suite test-par-collections 15 | Type: exitcode-stdio-1.0 16 | hs-source-dirs: ./ 17 | Main-is: Main.hs 18 | -- Build depends that are used directly by the test files, including: 19 | -- (1) Self dependency: 20 | Build-depends: par-collections 21 | -- (2) Dependencies common to test files and the library: 22 | Build-depends: base >= 4 && < 5 23 | , time >= 1.4 24 | , par-classes >= 1.1 25 | , atomic-primops >= 0.6 26 | 27 | -- (3) Additional depends to test concrete instances: 28 | Build-depends: lvish >= 2.0 29 | 30 | -- (4) Additional build depends for testing: 31 | Build-depends: HUnit 32 | , tasty >= 0.10 33 | , tasty-hunit 34 | , tasty-quickcheck 35 | , tasty-th 36 | , QuickCheck 37 | , HUnit 38 | , time 39 | , text 40 | 41 | ghc-options: -O2 -threaded -rtsopts -Wall 42 | -------------------------------------------------------------------------------- /src/lvish-extra/tests/MaxPosIntTests.hs: -------------------------------------------------------------------------------- 1 | {-# LANGUAGE TemplateHaskell #-} 2 | {-# LANGUAGE DataKinds #-} 3 | 4 | -- | Tests for the Data.LVar.MaxPosInt module. 5 | 6 | module MaxPosIntTests(tests, runTests) where 7 | 8 | import Test.Tasty.HUnit 9 | import Test.Tasty (TestTree, defaultMain, testGroup) 10 | -- import Test.HUnit (Assertion, assertEqual, assertBool, Counts(..)) 11 | import Test.Tasty.TH (testGroupGenerator) 12 | import qualified Test.HUnit as HU 13 | import TestHelpers as T 14 | 15 | import Control.Concurrent (killThread, myThreadId) 16 | 17 | import Data.LVar.MaxPosInt 18 | import Control.LVish hiding (put) 19 | import Control.LVish.DeepFrz (DeepFrz(..), Frzn, Trvrsbl, runParThenFreeze, runParThenFreezeIO) 20 | import qualified Control.LVish.Internal as I 21 | 22 | -------------------------------------------------------------------------------- 23 | 24 | tests :: TestTree 25 | tests = $(testGroupGenerator) 26 | 27 | runTests :: IO () 28 | runTests = defaultMain tests 29 | 30 | -------------------------------------------------------------------------------- 31 | 32 | case_mc1 :: Assertion 33 | -- Spuriously failing currently: 34 | -- case_mc1 = assertEqual "mc1" (Just ()) $ timeOutPure 0.3 $ runPar $ do 35 | case_mc1 = assertEqual "mc1" () $ runPar $ do 36 | num <- newMaxPosInt 0 37 | fork $ put num 3 38 | fork $ put num 4 39 | waitThresh num 4 40 | 41 | case_mc2 :: Assertion 42 | case_mc2 = assertEqual "mc2" () $ runPar $ do 43 | num <- newMaxPosInt 0 44 | fork $ put num 3 45 | fork $ put num 4 46 | -------------------------------------------------------------------------------- /src/lvish-apps/pbbs/LICENSE: -------------------------------------------------------------------------------- 1 | Copyright (c) 2013, Ryan Newton 2 | 3 | All rights reserved. 4 | 5 | Redistribution and use in source and binary forms, with or without 6 | modification, are permitted provided that the following conditions are met: 7 | 8 | * Redistributions of source code must retain the above copyright 9 | notice, this list of conditions and the following disclaimer. 10 | 11 | * Redistributions in binary form must reproduce the above 12 | copyright notice, this list of conditions and the following 13 | disclaimer in the documentation and/or other materials provided 14 | with the distribution. 15 | 16 | * Neither the name of Ryan Newton nor the names of other 17 | contributors may be used to endorse or promote products derived 18 | from this software without specific prior written permission. 19 | 20 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 21 | "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 22 | LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 23 | A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 24 | OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 25 | SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 26 | LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 27 | DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 28 | THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 29 | (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 30 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 31 | -------------------------------------------------------------------------------- /src/lvish-extra/tests/SatMapTests.hs: -------------------------------------------------------------------------------- 1 | {-# LANGUAGE TemplateHaskell #-} 2 | {-# LANGUAGE ScopedTypeVariables #-} 3 | {-# LANGUAGE DataKinds, TypeFamilies #-} 4 | {-# LANGUAGE CPP #-} 5 | {-# LANGUAGE ConstraintKinds #-} 6 | 7 | -- | Tests for the Data.LVar.PureMap and Data.LVar.SLMap modules. 8 | 9 | module SatMapTests(tests, runTests, fillNFreeze) where 10 | 11 | import Data.LVar.PureSet as IS 12 | import qualified Data.LVar.SatMap as IM 13 | -- The common interface under test: 14 | (SatMap, insert, newEmptyMap, newFromList, 15 | -- Not sure yet if we will get these: 16 | -- freezeMap, unionHP, forEach, forEachHP, traverseMap, traverseMapHP 17 | ) 18 | 19 | -- TODO: Use backpack for this when it is available: 20 | #include "CommonMapWriteTests.hs" 21 | 22 | type TheMap k s v = IM.SatMap k s v 23 | 24 | -------------------------------------------------------------------------------- 25 | 26 | tests :: TestTree 27 | tests = testGroup "" [testsHere, tests_writeOnly ] 28 | 29 | testsHere :: TestTree 30 | testsHere = $(testGroupGenerator) 31 | 32 | runTests :: IO () 33 | runTests = defaultMain tests 34 | 35 | ------------------------------------------------------------------------------------------ 36 | -- Show instances 37 | ------------------------------------------------------------------------------------------ 38 | 39 | case_show03 :: Assertion 40 | case_show03 = assertEqual "show for SatMap" "{SatMap: (\"key1\",33), (\"key2\",44)}" show03 41 | show03 :: String 42 | show03 = show$ runParThenFreeze $ isDet $ do 43 | mp <- IM.newEmptyMap 44 | IM.insert "key1" (33::Int) mp 45 | IM.insert "key2" (44::Int) mp 46 | return mp 47 | -------------------------------------------------------------------------------- /src/lvish-apps/cfa/LICENSE: -------------------------------------------------------------------------------- 1 | Copyright Max Bolingbroke 2009-2010. 2 | All rights reserved. 3 | 4 | Redistribution and use in source and binary forms, with or without 5 | modification, are permitted provided that the following conditions are 6 | met: 7 | 8 | * Redistributions of source code must retain the above copyright 9 | notice, this list of conditions and the following disclaimer. 10 | 11 | * Redistributions in binary form must reproduce the above 12 | copyright notice, this list of conditions and the following 13 | disclaimer in the documentation and/or other materials provided 14 | with the distribution. 15 | 16 | * Neither the name of Max Bolingbroke nor the names of other 17 | contributors may be used to endorse or promote products derived 18 | from this software without specific prior written permission. 19 | 20 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 21 | "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 22 | LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 23 | A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 24 | OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 25 | SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 26 | LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 27 | DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 28 | THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 29 | (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 30 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 31 | -------------------------------------------------------------------------------- /src/lvish/LICENSE: -------------------------------------------------------------------------------- 1 | Copyright Lindsey Kuper, Ryan Newton, Aaron Turon 2012 2 | 3 | All rights reserved. 4 | 5 | Redistribution and use in source and binary forms, with or without 6 | modification, are permitted provided that the following conditions are met: 7 | 8 | * Redistributions of source code must retain the above copyright 9 | notice, this list of conditions and the following disclaimer. 10 | 11 | * Redistributions in binary form must reproduce the above 12 | copyright notice, this list of conditions and the following 13 | disclaimer in the documentation and/or other materials provided 14 | with the distribution. 15 | 16 | * Neither the name of Simon Marlow nor the names of other 17 | contributors may be used to endorse or promote products derived 18 | from this software without specific prior written permission. 19 | 20 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 21 | "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 22 | LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 23 | A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 24 | OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 25 | SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 26 | LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 27 | DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 28 | THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 29 | (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 30 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 31 | -------------------------------------------------------------------------------- /src/par-collections/LICENSE: -------------------------------------------------------------------------------- 1 | Copyright Ryan Newton, Simon Marlow 2011-2013 2 | 3 | All rights reserved. 4 | 5 | Redistribution and use in source and binary forms, with or without 6 | modification, are permitted provided that the following conditions are met: 7 | 8 | * Redistributions of source code must retain the above copyright 9 | notice, this list of conditions and the following disclaimer. 10 | 11 | * Redistributions in binary form must reproduce the above 12 | copyright notice, this list of conditions and the following 13 | disclaimer in the documentation and/or other materials provided 14 | with the distribution. 15 | 16 | * Neither the name of the authors nor the names of other 17 | contributors may be used to endorse or promote products derived 18 | from this software without specific prior written permission. 19 | 20 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 21 | "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 22 | LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 23 | A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 24 | OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 25 | SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 26 | LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 27 | DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 28 | THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 29 | (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 30 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 31 | -------------------------------------------------------------------------------- /src/lvish-extra/LICENSE: -------------------------------------------------------------------------------- 1 | Copyright Lindsey Kuper, Ryan Newton, Aaron Turon 2012 2 | 3 | All rights reserved. 4 | 5 | Redistribution and use in source and binary forms, with or without 6 | modification, are permitted provided that the following conditions are met: 7 | 8 | * Redistributions of source code must retain the above copyright 9 | notice, this list of conditions and the following disclaimer. 10 | 11 | * Redistributions in binary form must reproduce the above 12 | copyright notice, this list of conditions and the following 13 | disclaimer in the documentation and/or other materials provided 14 | with the distribution. 15 | 16 | * Neither the name of Simon Marlow nor the names of other 17 | contributors may be used to endorse or promote products derived 18 | from this software without specific prior written permission. 19 | 20 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 21 | "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 22 | LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 23 | A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 24 | OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 25 | SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 26 | LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 27 | DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 28 | THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 29 | (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 30 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 31 | -------------------------------------------------------------------------------- /src/par-classes/LICENSE: -------------------------------------------------------------------------------- 1 | Copyright Lindsey Kuper, Ryan Newton, Aaron Turon 2012 2 | 3 | All rights reserved. 4 | 5 | Redistribution and use in source and binary forms, with or without 6 | modification, are permitted provided that the following conditions are met: 7 | 8 | * Redistributions of source code must retain the above copyright 9 | notice, this list of conditions and the following disclaimer. 10 | 11 | * Redistributions in binary form must reproduce the above 12 | copyright notice, this list of conditions and the following 13 | disclaimer in the documentation and/or other materials provided 14 | with the distribution. 15 | 16 | * Neither the name of Simon Marlow nor the names of other 17 | contributors may be used to endorse or promote products derived 18 | from this software without specific prior written permission. 19 | 20 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 21 | "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 22 | LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 23 | A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 24 | OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 25 | SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 26 | LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 27 | DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 28 | THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 29 | (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 30 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 31 | -------------------------------------------------------------------------------- /src/lvish/Data/UtilInternal.hs: -------------------------------------------------------------------------------- 1 | 2 | -- | A module with helper functions that are used elsewhere in the LVish repository. 3 | 4 | module Data.UtilInternal 5 | ( 6 | traverseWithKey_, 7 | Traverse_(..) 8 | ) 9 | where 10 | 11 | import Control.Applicative (Const(..), Applicative, pure, (*>)) 12 | import Control.Monad (void) 13 | import Data.Monoid (Monoid(..)) 14 | import qualified Data.Map as M 15 | import Prelude ((.)) 16 | 17 | -------------------------------------------------------------------------------- 18 | -- Helper code. 19 | -------------------------------------------------------------------------------- 20 | 21 | -- Version of traverseWithKey_ from Shachaf Ben-Kiki 22 | -- (See thread on Haskell-cafe.) 23 | -- Avoids O(N) allocation when traversing for side-effect. 24 | 25 | newtype Traverse_ f = Traverse_ { runTraverse_ :: f () } 26 | instance Applicative f => Monoid (Traverse_ f) where 27 | mempty = Traverse_ (pure ()) 28 | Traverse_ a `mappend` Traverse_ b = Traverse_ (a *> b) 29 | -- Since the Applicative used is Const (newtype Const m a = Const m), the 30 | -- structure is never built up. 31 | --(b) You can derive traverseWithKey_ from myfoldMapWithKey, e.g. as follows: 32 | 33 | {-# INLINE traverseWithKey_ #-} 34 | traverseWithKey_ :: Applicative f => (k -> a -> f ()) -> M.Map k a -> f () 35 | traverseWithKey_ f = runTraverse_ . 36 | myfoldMapWithKey (\k x -> Traverse_ (void (f k x))) 37 | 38 | {-# INLINE myfoldMapWithKey #-} 39 | myfoldMapWithKey :: Monoid r => (k -> a -> r) -> M.Map k a -> r 40 | myfoldMapWithKey f = getConst . M.traverseWithKey (\k x -> Const (f k x)) 41 | -------------------------------------------------------------------------------- /src/lvish-extra/tests/LayeredSatMapTests.hs: -------------------------------------------------------------------------------- 1 | {-# LANGUAGE TemplateHaskell #-} 2 | {-# LANGUAGE ScopedTypeVariables #-} 3 | {-# LANGUAGE DataKinds, TypeFamilies #-} 4 | {-# LANGUAGE CPP #-} 5 | {-# LANGUAGE ConstraintKinds #-} 6 | 7 | -- | Tests for the Data.LVar.PureMap and Data.LVar.SLMap modules. 8 | 9 | module LayeredSatMapTests(tests, runTests, fillNFreeze) where 10 | 11 | import Data.LVar.PureSet as IS 12 | import qualified Data.LVar.LayeredSatMap as IM 13 | -- The common interface under test: 14 | (LayeredSatMap, insert, newEmptyMap, newFromList, 15 | -- Not sure yet if we will get these: 16 | -- freezeMap, unionHP, forEach, forEachHP, traverseMap, traverseMapHP 17 | ) 18 | 19 | -- TODO: Use backpack for this when it is available: 20 | #include "CommonMapWriteTests.hs" 21 | 22 | type TheMap k s v = IM.LayeredSatMap k s v 23 | 24 | -------------------------------------------------------------------------------- 25 | 26 | tests :: Test 27 | tests = testGroup "" [testsHere, tests_writeOnly ] 28 | 29 | testsHere :: Test 30 | testsHere = $(testGroupGenerator) 31 | 32 | runTests :: IO () 33 | runTests = defaultMainSeqTests [tests] 34 | 35 | ------------------------------------------------------------------------------------------ 36 | -- Show instances 37 | ------------------------------------------------------------------------------------------ 38 | 39 | 40 | -- case_show03 :: Assertion 41 | -- case_show03 = assertEqual "show for LayeredSatMap" "{LayeredSatMap: (\"key1\",33), (\"key2\",44)}" show03 42 | -- show03 :: String 43 | -- show03 = show$ runParThenFreeze $ do 44 | -- mp <- IM.newEmptyMap 45 | -- IM.insert "key1" (33::Int) mp 46 | -- IM.insert "key2" (44::Int) mp 47 | -- return mp 48 | 49 | -------------------------------------------------------------------------------- /src/par-classes/Data/Splittable/Class.hs: -------------------------------------------------------------------------------- 1 | {-# LANGUAGE CPP #-} 2 | {-# LANGUAGE FlexibleInstances #-} 3 | {-# LANGUAGE FunctionalDependencies #-} 4 | {-# LANGUAGE MultiParamTypeClasses #-} 5 | {-# LANGUAGE TypeFamilies #-} 6 | {-| 7 | 8 | A simple type class for data that can be split into pieces for parallel operation, 9 | and then reassembled. 10 | 11 | -} 12 | 13 | module Data.Splittable.Class 14 | (Split(..)) 15 | where 16 | 17 | import qualified Data.List as L 18 | 19 | -- | Data that can be split into balanced pieces. The main application of this is 20 | -- parallel consumption of the data. 21 | class Split a where 22 | -- class Eq a => Split a where 23 | 24 | -- | Split the data value into pieces. An empty data structure may return an empty 25 | -- list. 26 | split :: a -> [a] 27 | 28 | -- | A variant of `split` that allows the user to provide a /hint/ as to how many 29 | -- pieces they would like to split into. There is no obligation for the 30 | -- implementation to follow this hint (either as an upper or lower bound). 31 | splitPlease :: Int -> a -> [a] 32 | -- The defaul implementation ignorse the hint: 33 | splitPlease _ = split 34 | 35 | -- -- | The inverse of split. 36 | -- combine :: [a] -> a 37 | 38 | -- empty :: a 39 | 40 | -- In some cases we may know exactly how many pieces the underlying data structur 41 | -- can produce efficiently. 42 | 43 | -- split2 :: a -> (a,a) 44 | -- split3 :: a -> (a,a,a) 45 | 46 | -- | WARNING: this instance is inefficient, because lists are NOT good 47 | -- splittable structures. Nevertheless, lists are ubiquitous, so it's 48 | -- better to have this than not. 49 | instance Split [a] where 50 | {-# INLINABLE split #-} 51 | split ls = 52 | let len = length ls 53 | (l,r) = L.splitAt (len `quot` 2) ls 54 | in [l,r] 55 | -------------------------------------------------------------------------------- /src/lvish/README.md: -------------------------------------------------------------------------------- 1 | # A Haskell implementation of the LVish parallel programming model, based on LVars 2 | 3 | _LVish_ is a deterministic or [quasi-deterministic] parallel programming model that 4 | extends [LVars] to incorporate _freezing_ and _event handlers_. This 5 | directory contains an implementation of LVish as a monadic Haskell 6 | library based on [monad-par]. 7 | 8 | [quasi-deterministic]: http://www.cs.indiana.edu/~lkuper/papers/2013-lvish-draft.pdf 9 | [LVars]: https://www.cs.indiana.edu/~lkuper/papers/lvars-fhpc13.pdf 10 | [monad-par]: http://hackage.haskell.org/package/monad-par 11 | 12 | ## Applications for LVish 13 | 14 | ### Existing 15 | 16 | * [Graph algorithms from PBBS](https://github.com/iu-parfunc/lvars/tree/master/pbbs-haskell/benchmarks/graphs) 17 | 18 | * [Parallel _k_-CFA](https://github.com/iu-parfunc/lvars/blob/master/apps/cfa) using LVars for sharing. 19 | 20 | ### In progress 21 | 22 | * [PhyBin](https://github.com/rrnewton/PhyBin): A tool for 23 | classifying phylogenetic trees by their topology. Uses LVars for 24 | a particular algorithm that computes the all-to-all tree edit 25 | distance among a set of phylogenetic trees. (The code that uses 26 | LVars is on a branch, waiting for improvements to LVish.) Mailing 27 | list post about it by Ryan: 28 | https://groups.google.com/forum/#!topic/lattice-variables/HD7e6NImCnA. 29 | 30 | * Lindsey wonders what the relationship is to general topological 31 | sorting and whether this algorithm could be generalized. 32 | 33 | * Parallel logic programming: Will Byrd has 34 | [a rough outline of a version of miniKanren](https://github.com/webyrd/latticeKanren) 35 | that could use an LVar-like mechanism for communication. 36 | 37 | ### Not yet started 38 | 39 | * Parallel alpha-beta search 40 | 41 | * More graph algorithms 42 | 43 | 44 | -------------------------------------------------------------------------------- /src/lvish/tests/Main.hs: -------------------------------------------------------------------------------- 1 | -- #!/usr/bin/env runghc -i.. 2 | 3 | {-# LANGUAGE CPP #-} 4 | 5 | -- | This module aggregates all the unit tests in this directory. 6 | 7 | module Main where 8 | 9 | import Test.Tasty (TestTree, testGroup, defaultMain) 10 | --import TestHelpers (defaultMainSeqTests) 11 | 12 | --import qualified MemoTests 13 | import qualified LVishAndIVar 14 | --import qualified ArrayTests 15 | import qualified LogicalTests 16 | import qualified SkipListTests 17 | --import qualified SNZITests 18 | import qualified PureMapTests 19 | import qualified SLMapTests 20 | import qualified CtrieMapTests 21 | 22 | -- import qualified SatMapTests 23 | -- import qualified LayeredSatMapTests 24 | import qualified SetTests 25 | --import qualified MaxPosIntTests 26 | --import qualified AddRemoveSetTests 27 | import qualified GenericTests 28 | 29 | main :: IO () 30 | main = defaultMain alltests 31 | 32 | --alltests :: [TestTree] 33 | alltests :: TestTree 34 | alltests = testGroup "allTests" 35 | [ LVishAndIVar.tests 36 | -- , ArrayTests.tests 37 | -- , MemoTests.tests 38 | , LogicalTests.tests 39 | -- , MaxPosIntTests.tests 40 | , SetTests.tests 41 | , PureMapTests.tests 42 | -- , LayeredSatMap.tests 43 | -- , SatMapTests.tests 44 | -- , LayeredSatMapTests.tests 45 | 46 | , CtrieMapTests.tests 47 | 48 | #ifdef FAILING_TESTS 49 | -- This was failing, but marking bringing it back online to test again [2014.10.22]: 50 | , SLMapTests.tests -- TODO: close Issue #27, #28 first. 51 | , SkipListTests.tests -- Seems to diverge on some sizes on slm2/slm3 [2013.12.07] 52 | -- , SNZITests.tests -- These have failures still [2013.10.23] 53 | 54 | , GenericTests.tests -- Divergence... debugging [2013.12.07] 55 | #endif 56 | -- , AddRemoveSetTests.tests 57 | ] 58 | -------------------------------------------------------------------------------- /src/lvish/Data/Concurrent/Bag.hs: -------------------------------------------------------------------------------- 1 | module Data.Concurrent.Bag(Bag, Token, new, put, remove, foreach) where 2 | 3 | -- import Control.Monad 4 | -- import Control.Concurrent 5 | import System.IO.Unsafe (unsafePerformIO) 6 | import Data.IORef 7 | import qualified Data.IntMap as M 8 | 9 | ------------------------------------------------------------------------------ 10 | -- A nonscalable implementation of a concurrent bag 11 | ------------------------------------------------------------------------------ 12 | 13 | type UID = Int 14 | type Token a = (Bag a, UID) 15 | type Bag a = IORef (M.IntMap a) 16 | 17 | -- Return the old value. Could replace with a true atomic op. 18 | atomicIncr :: IORef Int -> IO Int 19 | atomicIncr cntr = atomicModifyIORef' cntr (\c -> (c+1,c)) 20 | 21 | {-# NOINLINE uidCntr #-} 22 | uidCntr :: IORef UID 23 | uidCntr = unsafePerformIO (newIORef 0) 24 | 25 | getUID :: IO UID 26 | getUID = atomicIncr uidCntr 27 | 28 | -- | Create an empty bag 29 | new :: IO (Bag a) 30 | new = newIORef (M.empty) 31 | 32 | -- | Add an element to a bag, returning a token that can later be used to remove 33 | -- that element. 34 | put :: Bag a -> a -> IO (Token a) 35 | put b x = do 36 | uid <- getUID 37 | atomicModifyIORef' b $ \m -> (M.insert uid x m, ()) 38 | return (b, uid) 39 | 40 | -- | foreach b f will traverse b (concurrently with updates), applying f to each 41 | -- encountered element, together with a token that can be used to remove the 42 | -- element. 43 | foreach :: Bag a -> (a -> Token a -> IO ()) -> IO () 44 | foreach b f = do 45 | m <- readIORef b 46 | let invoke (k, a) = f a (b, k) 47 | mapM_ invoke $ M.toList m 48 | 49 | -- | Remove the element associated with a given token. Repeated removals are 50 | -- permitted. 51 | remove :: Token a -> IO () 52 | remove (b, uid) = atomicModifyIORef' b $ \m -> (M.delete uid m, ()) 53 | -------------------------------------------------------------------------------- /src/lvish-graph-algorithms/lvish-graph-algorithms.cabal: -------------------------------------------------------------------------------- 1 | name: lvish-graph-algorithms 2 | version: 0.1.0.0 3 | synopsis: Graph algorithms in lvish 4 | -- description: 5 | -- license: 6 | -- license-file: LICENSE 7 | author: Praveen Narayanan, Ryan Newton 8 | maintainer: pravnar@indiana.edu 9 | -- copyright: 10 | category: Concurrency 11 | build-type: Simple 12 | -- extra-source-files: 13 | cabal-version: >=1.10 14 | 15 | library 16 | exposed-modules: Data.LVar.Graph.BFS, 17 | Data.LVar.Graph.MIS, 18 | Data.Graph.Adjacency 19 | 20 | -- other-modules: 21 | other-extensions: CPP, BangPatterns, OverloadedStrings, ScopedTypeVariables, NamedFieldPuns 22 | build-depends: base >=4.6, process>=1.1.0.2, directory ==1.2.1.0, 23 | vector >=0.10, time ==1.4.2, 24 | containers >=0.5, fgl >= 5.4, deepseq >= 1.3, 25 | lvish >=1.0, par-collections >= 1.0, 26 | HUnit, bytestring-mmap >= 0.2, bytestring >= 0.10 27 | -- pbbs-haskell, monad-par-extras >=0.3 28 | hs-source-dirs: src 29 | default-language: Haskell2010 30 | 31 | 32 | executable lvish-graph-benchmarks 33 | main-is: Main.hs 34 | hs-source-dirs: src 35 | ghc-options: -O2 -threaded -rtsopts 36 | build-depends: base >=4.6, process>=1.1.0.2, directory >=1.2.1.0, 37 | vector >=0.10, time >=1.4.2, 38 | containers >=0.5, 39 | fgl >= 5.4, deepseq >= 1.3, 40 | lvish >=1.0, par-collections >= 1.0, 41 | HUnit, bytestring-mmap >= 0.2, bytestring >= 0.10, 42 | rdtsc, split 43 | -- pbbs-haskell, monad-par-extras >=0.3 44 | default-language: Haskell2010 45 | -------------------------------------------------------------------------------- /.travis.yml: -------------------------------------------------------------------------------- 1 | # RRN: Copied from template here: https://github.com/hvr/multi-ghc-travis/blob/master/README.md#ghc-head-snapshots 2 | 3 | # NB: don't set `language: haskell` here 4 | 5 | sudo: false 6 | cache: 7 | directories: 8 | - $HOME/.stack/ 9 | 10 | notifications: 11 | email: false 12 | 13 | git: 14 | submodules: false 15 | 16 | env: 17 | - STACK_RESOLVER=default 18 | FLAGS=lvish:nonidem 19 | - STACK_RESOLVER=lts-5.11 20 | FLAGS=lvish:nonidem 21 | - STACK_RESOLVER=lts-6.10 22 | FLAGS=lvish:nonidem 23 | 24 | - STACK_RESOLVER=default 25 | FLAGS=lvish:-nonidem 26 | - STACK_RESOLVER=lts-5.11 27 | FLAGS=lvish:-nonidem 28 | - STACK_RESOLVER=lts-6.10 29 | FLAGS=lvish:-nonidem 30 | 31 | addons: {apt: {packages: [libgmp-dev]}} 32 | 33 | # allow_failures: 34 | # # We should track these failures, but they're not really our direct responsibility 35 | # - env: STACK_RESOLVER=ghc-7.8 36 | # - env: STACK_RESOLVER=ghc-7.10 37 | 38 | # This is stack-specific. Oh well. 39 | before_install: 40 | - sed -i 's/git@github.com:/https:\/\/github.com\//' .gitmodules 41 | - git submodule update --init --recursive 42 | 43 | - mkdir -p ~/.local/bin 44 | - export PATH=~/.local/bin:$PATH 45 | - cat /proc/cpuinfo | grep name 46 | # Used by .travis_install.sh below: 47 | - export STACK_YAML=stack-${STACK_RESOLVER}.yaml 48 | 49 | - wget https://www.stackage.org/stack/linux-x86_64 -O stack.tar.gz 50 | - tar xvf stack.tar.gz 51 | - mv -f stack-*/stack ~/.local/bin/ 52 | - chmod a+x ~/.local/bin/stack 53 | 54 | install: 55 | - export PATH=/opt/ghc/$GHCVER/bin:/opt/cabal/$CABALVER/bin:$PATH 56 | # Build but do not test: 57 | - ./.travis_install.sh 58 | 59 | # Here starts the actual work to be performed for the package under test; any command which exits with a non-zero exit code causes the build to fail. 60 | script: 61 | - stack test --flag $FLAGS --no-terminal 62 | # TODO: bring back at least compiling here: 63 | # - stack build --no-terminal ./src/lvish-extras/ 64 | -------------------------------------------------------------------------------- /archived_old/fhpc13-lvars/Data/LVar/PairIO.hs: -------------------------------------------------------------------------------- 1 | {-# LANGUAGE BangPatterns #-} 2 | 3 | module Data.LVar.PairIO 4 | ( 5 | IPair, 6 | newPair, 7 | putFst, 8 | putSnd, 9 | getFst, 10 | getSnd, 11 | ) where 12 | import LVarTraceIO 13 | import Data.IORef 14 | 15 | ------------------------------------------------------------------------------ 16 | -- IPairs implemented on top of LVars: 17 | ------------------------------------------------------------------------------ 18 | 19 | type IPair a b = LVar (IORef (IVarContents a), 20 | IORef (IVarContents b)) 21 | 22 | newPair :: Par (IPair a b) 23 | newPair = newLV $ 24 | do r1 <- newIORef (IVarContents Nothing) 25 | r2 <- newIORef (IVarContents Nothing) 26 | return (r1,r2) 27 | 28 | putFst :: IPair a b -> a -> Par () 29 | putFst lv@(LVar (refFst, _) _ _) !elt = putLV lv putter 30 | where 31 | -- putter takes the whole pair as an argument, but ignore it and 32 | -- just deal with refFst 33 | putter _ = 34 | atomicModifyIORef refFst $ \x -> 35 | case fromIVarContents x of 36 | Nothing -> (IVarContents (Just elt), ()) 37 | Just _ -> error "multiple puts to first element of IPair" 38 | 39 | putSnd :: IPair a b -> b -> Par () 40 | putSnd lv@(LVar (_, refSnd) _ _) !elt = putLV lv putter 41 | where 42 | -- putter takes the whole pair as an argument, but ignore it and 43 | -- just deal with refSnd 44 | putter _ = 45 | atomicModifyIORef refSnd $ \x -> 46 | case fromIVarContents x of 47 | Nothing -> (IVarContents (Just elt), ()) 48 | Just _ -> error "multiple puts to second element of IPair" 49 | 50 | getFst :: IPair a b -> Par a 51 | getFst iv@(LVar (ref1,_) _ _) = getLV iv poll 52 | where 53 | poll = fmap fromIVarContents $ readIORef ref1 54 | 55 | getSnd :: IPair a b -> Par b 56 | getSnd iv@(LVar (_,ref2) _ _) = getLV iv poll 57 | where 58 | poll = fmap fromIVarContents $ readIORef ref2 59 | 60 | -------------------------------------------------------------------------------- /src/lvish-extra/Experimental/Scrap.hs: -------------------------------------------------------------------------------- 1 | 2 | #if 0 3 | -- This gets ugly real fast: 4 | 5 | class LVarData0 t where 6 | -- | This associated type models a picture of the "complete" contents of the data: 7 | -- e.g. a whole set instead of one element, or the full/empty information for an 8 | -- IVar, instead of just the payload. 9 | type Snapshot0 t 10 | freeze0 :: t -> Par (Snapshot0 t) 11 | newBottom0 :: Par t 12 | 13 | instance (LVarData1 f, LVarData0 a) => LVarData0 (f a) where 14 | type Snapshot0 (f a) = Snapshot f (Snapshot0 a) 15 | freeze0 = undefined 16 | newBottom0 = undefined 17 | #endif 18 | 19 | 20 | 21 | -- instance (LVarData1 f, LVarData1 g, Traversable g) => LVarData1 (g :. f) where 22 | -- -- type Snapshot (g :. f) a = Snapshot g (Snapshot f a) 23 | -- data Snapshot (g :. f) a = ComposedSnap !(Snapshot g (Snapshot f a)) 24 | -- freeze (inp :: (g :. f) a) = 25 | -- do let inp' :: g (f a) 26 | -- inp' = unO inp 27 | -- a <- freeze inp' :: Par (Snapshot g (f a)) 28 | -- b <- traverseSnap freeze (a :: Snapshot g (f a)) 29 | -- return $ ComposedSnap (b :: Snapshot g (Snapshot f a)) 30 | -- -- Because newBottom creates an empty structure, there should be no extra work to 31 | -- -- do here. 32 | -- newBottom = newBottom 33 | -- -- let new :: Par (g a) 34 | -- -- new = newBottom 35 | -- -- in new 36 | 37 | 38 | class DeepFreeze (from :: *) (to :: *) where 39 | type DeepSnap 40 | deepFreeze :: 41 | 42 | #if 0 43 | class LVarData0 (t :: *) where 44 | -- | This associated type models a picture of the "complete" contents of the data: 45 | -- e.g. a whole set instead of one element, or the full/empty information for an 46 | -- IVar, instead of just the payload. 47 | type Snapshot0 t 48 | freeze0 :: t -> Par (Snapshot0 t) 49 | newBottom0 :: Par t 50 | 51 | instance (LVarData1 f, LVarData0 a) => LVarData0 (f a) where 52 | type Snapshot0 (f a) = Snapshot f (Snapshot0 a) 53 | freeze0 = undefined 54 | newBottom0 = undefined 55 | #endif 56 | 57 | -------------------------------------------------------------------------------- /src/par-transformers/Control/Par/ST/UVec2.hs: -------------------------------------------------------------------------------- 1 | {-# LANGUAGE MultiParamTypeClasses #-} 2 | {-# LANGUAGE ScopedTypeVariables #-} 3 | {-# LANGUAGE GeneralizedNewtypeDeriving #-} 4 | {-# LANGUAGE TypeFamilies #-} 5 | {-# LANGUAGE ConstraintKinds #-} 6 | {-# LANGUAGE RankNTypes #-} 7 | {-# LANGUAGE FlexibleContexts #-} 8 | {-# LANGUAGE TypeSynonymInstances #-} 9 | {-# LANGUAGE FlexibleInstances #-} 10 | {-# LANGUAGE BangPatterns #-} 11 | {-# LANGUAGE GADTs #-} 12 | {-# LANGUAGE CPP #-} 13 | 14 | -- | A convenience interface -- simply a restriction of `ParST` to the case 15 | -- of a single, boxed vector as the mutable state. 16 | -- 17 | -- This library exposes simple versions of common operations from 18 | -- "Data.Vector.Mutable", which operate directly on the implicit vector state 19 | -- threaded through the monad. 20 | 21 | module Control.Par.ST.UVec2 22 | ( -- * A type alias for parallel computations with @Vector@ state 23 | ParVec2T, 24 | runParVec2T, 25 | 26 | -- * Reexported from the generic interface 27 | forkSTSplit, liftPar, 28 | 29 | -- * Retrieving an explict pointer to the Vector 30 | reify, liftST, 31 | 32 | -- * Installing new vectors 33 | installL, installR, 34 | 35 | -- * Useful vector helpers 36 | writeL, writeR, readL, readR, lengthL, lengthR, 37 | swapL, swapR, dropL, dropR, takeL, takeR, 38 | growL, growR, setL, setR, swapState 39 | ) 40 | where 41 | 42 | import Control.Par.ST hiding (reify) 43 | import Control.Par.Class.Unsafe (ParThreadSafe) 44 | import qualified Control.Monad.Reader as R 45 | --import qualified Control.Monad.State.Strict as S 46 | import qualified Data.Vector.Unboxed.Mutable as MU 47 | import Prelude hiding (read, length, drop, take) 48 | 49 | #define CONSTRAINT MU.Unbox 50 | #define FLIPTY UVectorFlp 51 | #define FLPIT UFlp 52 | #define ARRRECIP UVectorFlpRecipe 53 | 54 | -------------------------------------------------------------------------------- 55 | 56 | #include "Vec2Common.hs" 57 | -------------------------------------------------------------------------------- /archived_old/fhpc13-lvars/Data/LVar/PairScalable.hs: -------------------------------------------------------------------------------- 1 | {-# LANGUAGE BangPatterns #-} 2 | 3 | -- UNFINISHED 4 | 5 | module Data.LVar.PairScalable 6 | ( 7 | IPair, 8 | newPair, 9 | putFst, 10 | putSnd, 11 | getFst, 12 | getSnd, 13 | ) where 14 | import LVarTraceScalable 15 | import Data.IORef 16 | 17 | ------------------------------------------------------------------------------ 18 | -- IPairs implemented on top of LVars: 19 | ------------------------------------------------------------------------------ 20 | 21 | type IPair a b = LVar (IORef (IVarContents a), 22 | IORef (IVarContents b)) 23 | 24 | newPair :: Par (IPair a b) 25 | newPair = newLV $ 26 | do r1 <- newIORef (IVarContents Nothing) 27 | r2 <- newIORef (IVarContents Nothing) 28 | return (r1,r2) 29 | 30 | putFst :: IPair a b -> a -> Par () 31 | putFst lv@(LVar (refFst, _) _) !elt = putLV lv putter 32 | where 33 | -- putter takes the whole pair as an argument, but ignore it and 34 | -- just deal with refFst 35 | putter _ = 36 | atomicModifyIORef refFst $ \x -> 37 | case fromIVarContents x of 38 | Nothing -> (IVarContents (Just elt), ()) 39 | Just _ -> error "multiple puts to first element of IPair" 40 | 41 | putSnd :: IPair a b -> b -> Par () 42 | putSnd lv@(LVar (_, refSnd) _) !elt = putLV lv putter 43 | where 44 | -- putter takes the whole pair as an argument, but ignore it and 45 | -- just deal with refSnd 46 | putter _ = 47 | atomicModifyIORef refSnd $ \x -> 48 | case fromIVarContents x of 49 | Nothing -> (IVarContents (Just elt), ()) 50 | Just _ -> error "multiple puts to second element of IPair" 51 | 52 | getFst :: IPair a b -> Par a 53 | getFst iv@(LVar (ref1,_) _) = getLV iv poll 54 | where 55 | poll = fmap fromIVarContents $ readIORef ref1 56 | 57 | getSnd :: IPair a b -> Par b 58 | getSnd iv@(LVar (_,ref2) _) = getLV iv poll 59 | where 60 | poll = fmap fromIVarContents $ readIORef ref2 61 | -------------------------------------------------------------------------------- /src/par-classes/par-classes.cabal: -------------------------------------------------------------------------------- 1 | Name: par-classes 2 | Version: 1.2 3 | Synopsis: Type classes providing a general interface to various @Par@ monads. 4 | 5 | -- Version history: 6 | -- 1.0 : Replacing old 'abstract-par' package with this new, extended version. 7 | -- 1.0.1 : Add Data.Splittable 8 | -- 1.1 : Add EffectSigs 9 | -- 1.2 : Add Control.Par.Sparks 10 | 11 | Description: A @Par@ monad offers a parallel programming API based on 12 | dataflow programming. This package offers classes abstracting over 13 | Par monads. These classes separate different levels of @Par@ 14 | functionality. See the "Control.Par.Class" module for more details. 15 | . 16 | For instances of these parallelism classes, see the @lVish@ and 17 | @monad-par@ packages, as well as the limited reference 18 | implementation, found in `Control.Par.Sparks`. 19 | 20 | 21 | Homepage: https://github.com/iu-parfunc/lvars 22 | License: BSD3 23 | License-file: LICENSE 24 | Author: Ryan Newton 25 | Maintainer: Ryan Newton 26 | Copyright: (c) Ryan Newton 2011-2012 27 | Stability: Experimental 28 | Category: Control,Parallelism,Monads 29 | Build-type: Simple 30 | Cabal-version: >=1.8 31 | 32 | extra-source-files: 33 | 34 | Source-repository head 35 | type: git 36 | location: https://github.com/iu-parfunc/lvars 37 | subdir: haskell/par-classes 38 | 39 | 40 | Library 41 | Exposed-modules: 42 | -- Classes generalizing different levels of monad-par functionality: 43 | Control.Par.Class 44 | Control.Par.Class.Unsafe 45 | -- Control.Par.Sparks 46 | Control.Par.EffectSigs 47 | Data.Splittable.Class 48 | 49 | -- This needs to stay low-dependencies!! Interfaces only. 50 | Build-depends: base >= 4.7 && < 5 51 | , deepseq >= 1.1 52 | , ghc-prim >= 0.3 53 | , transformers 54 | , parallel 55 | 56 | ghc-options: -Wall -fno-warn-orphans 57 | -------------------------------------------------------------------------------- /src/par-transformers/Control/Par/ST/StorableVec2.hs: -------------------------------------------------------------------------------- 1 | {-# LANGUAGE CPP #-} 2 | {-# LANGUAGE ConstraintKinds #-} 3 | {-# LANGUAGE FlexibleContexts #-} 4 | {-# LANGUAGE FlexibleInstances #-} 5 | {-# LANGUAGE GADTs #-} 6 | {-# LANGUAGE GeneralizedNewtypeDeriving #-} 7 | {-# LANGUAGE MultiParamTypeClasses #-} 8 | {-# LANGUAGE Rank2Types #-} 9 | {-# LANGUAGE ScopedTypeVariables #-} 10 | {-# LANGUAGE TypeFamilies #-} 11 | {-# LANGUAGE TypeSynonymInstances #-} 12 | 13 | -- | A convenience interface -- simply a restriction of `ParST` to the case 14 | -- of a single, boxed vector as the mutable state. 15 | -- 16 | -- This library exposes simple versions of common operations from 17 | -- "Data.Vector.Mutable", which operate directly on the implicit vector state 18 | -- threaded through the monad. 19 | 20 | module Control.Par.ST.StorableVec2 21 | ( -- * A type alias for parallel computations with @Vector@ state 22 | ParVec2T, 23 | runParVec2T, 24 | 25 | -- * Reexported from the generic interface 26 | forkSTSplit, liftPar, 27 | 28 | -- * Retrieving an explict pointer to the Vector 29 | reify, liftST, 30 | 31 | -- * Installing new vectors 32 | installL, installR, 33 | 34 | -- * Useful vector helpers 35 | writeL, writeR, readL, readR, lengthL, lengthR, 36 | swapL, swapR, dropL, dropR, takeL, takeR, 37 | growL, growR, setL, setR, swapState 38 | ) 39 | where 40 | 41 | import qualified Control.Monad.Reader as R 42 | -- import qualified Control.Monad.State.Strict as S 43 | import Control.Par.Class.Unsafe (ParThreadSafe) 44 | import Control.Par.ST hiding (reify) 45 | import qualified Data.Vector.Storable.Mutable as MU 46 | import Prelude hiding (drop, length, read, take) 47 | 48 | #define CONSTRAINT MU.Storable 49 | #define FLIPTY SVectorFlp 50 | #define FLPIT SFlp 51 | #define ARRRECIP SVectorFlpRecipe 52 | 53 | -------------------------------------------------------------------------------- 54 | 55 | #include "Vec2Common.hs" 56 | -------------------------------------------------------------------------------- /src/par-transformers/Control/Par/ST/Vec2.hs: -------------------------------------------------------------------------------- 1 | {-# LANGUAGE CPP #-} 2 | {-# LANGUAGE ConstraintKinds #-} 3 | {-# LANGUAGE FlexibleContexts #-} 4 | {-# LANGUAGE FlexibleInstances #-} 5 | {-# LANGUAGE GADTs #-} 6 | {-# LANGUAGE GeneralizedNewtypeDeriving #-} 7 | {-# LANGUAGE MultiParamTypeClasses #-} 8 | {-# LANGUAGE Rank2Types #-} 9 | {-# LANGUAGE ScopedTypeVariables #-} 10 | {-# LANGUAGE TypeFamilies #-} 11 | {-# LANGUAGE TypeSynonymInstances #-} 12 | 13 | -- | A convenience interface -- simply a restriction of `ParST` to the case 14 | -- of a single, boxed vector as the mutable state. 15 | -- 16 | -- This library exposes simple versions of common operations from 17 | -- "Data.Vector.Mutable", which operate directly on the implicit vector state 18 | -- threaded through the monad. 19 | 20 | module Control.Par.ST.Vec2 21 | ( -- * A type alias for parallel computations with @Vector@ state 22 | ParVec2T, 23 | runParVec2T, 24 | 25 | -- * Reexported from the generic interface 26 | forkSTSplit, liftPar, 27 | 28 | -- * Retrieving an explict pointer to the Vector 29 | reify, liftST, 30 | 31 | -- * Installing new vectors 32 | installL, installR, 33 | 34 | -- * Common vector operations 35 | writeL, writeR, readL, readR, lengthL, lengthR, 36 | swapL, swapR, dropL, dropR, takeL, takeR, 37 | growL, growR, setL, setR, swapState 38 | ) 39 | where 40 | 41 | import Control.Par.Class.Unsafe (ParThreadSafe) 42 | import Control.Par.ST hiding (reify) 43 | 44 | import qualified Control.Monad.Reader as R 45 | -- import qualified Control.Monad.State.Strict as S 46 | import qualified Data.Vector.Mutable as MU 47 | import Prelude hiding (drop, length, read, take) 48 | 49 | #define CONSTRAINT(e) 50 | #define FLIPTY MVectorFlp 51 | #define FLPIT VFlp 52 | #define ARRRECIP MVectorFlpRecipe 53 | 54 | -------------------------------------------------------------------------------- 55 | 56 | -- #include "./Control/Par/ST/Vec2Common.hs" 57 | #include "Vec2Common.hs" 58 | -------------------------------------------------------------------------------- /stack-cnf.yaml: -------------------------------------------------------------------------------- 1 | 2 | # flags: {} 3 | 4 | packages: 5 | - src/lvish/ 6 | - src/par-classes/ 7 | - src/par-collections/ 8 | - src/par-collections/tests/ 9 | - src/par-mergesort/ 10 | - src/par-transformers/ 11 | - deps/ctrie 12 | - deps/concurrent-skiplist/ 13 | 14 | # Temp: This is on hackage, but the index isn't update yet: 15 | - location: 16 | git: https://github.com/rrnewton/haskell-lockfree.git 17 | commit: 0ea12fc141c6bd4762773a1adc2f005de068369c 18 | subdirs: 19 | - atomic-primops 20 | extra-dep: true 21 | 22 | # TODO: Revive: 23 | # - src/lvish-apps/ 24 | # - src/lvish-apps/cfa/ 25 | # - src/lvish-apps/pbbs/ 26 | # - src/lvish-graph-algorithms/ 27 | # - archive_old/fhpc13-lvars/ 28 | # - archive_old/fhpc13-lvars/benchmarks/bf-traverse-LVar/ 29 | # - archive_old/fhpc13-lvars/benchmarks/bf-traverse-monad-par/ 30 | # - archive_old/fhpc13-lvars/benchmarks/bf-traverse-Strategies/ 31 | 32 | # TODO: Add: 33 | # src/par-schedulers 34 | 35 | 36 | extra-deps: 37 | # - atomic-primops-0.8.0.4 38 | - chaselev-deque-0.5.0.5 39 | - bits-atomic-0.1.3 40 | - thread-local-storage-0.1.0.3 41 | 42 | - HUnit-1.3.1.1 43 | - abstract-deque-0.3 44 | - async-2.1.0 45 | - cereal-0.5.1.0 46 | - hashable-1.2.4.0 47 | - lattices-1.5.0 48 | - missing-foreign-0.1.1 49 | - monad-par-0.3.4.7 50 | - mtl-2.2.1 51 | - primitive-0.6.1.0 52 | - random-1.1 53 | - tasty-0.11.0.3 54 | - tasty-hunit-0.9.2 55 | - vector-0.11.0.0 56 | - vector-algorithms-0.7.0.1 57 | - abstract-par-0.3.3 58 | - ansi-terminal-0.6.2.3 59 | - clock-0.7.2 60 | - monad-par-extras-0.3.3 61 | - mwc-random-0.13.4.0 62 | - optparse-applicative-0.12.1.0 63 | - parallel-3.2.1.0 64 | - regex-tdfa-1.2.2 65 | - semigroups-0.18.1 66 | - stm-2.4.4.1 67 | - tagged-0.8.4 68 | - text-1.2.2.1 69 | - unbounded-delays-0.1.0.9 70 | - universe-base-1.0.2.1 71 | - universe-reverse-instances-1.0 72 | - unordered-containers-0.2.7.0 73 | - void-0.7.1 74 | - ansi-wl-pprint-0.6.7.3 75 | - parsec-3.1.9 76 | - regex-base-0.93.2 77 | - transformers-compat-0.5.1.4 78 | - universe-instances-base-1.0 79 | 80 | 81 | # Expects 8.1.20160412 from a recent GHC-mutable-cnf build: 82 | compiler-check: newer-minor 83 | resolver: ghc-8.1 84 | 85 | # Some packages aren't fully GHC 8.0-ready yet: 86 | allow-newer: true 87 | -------------------------------------------------------------------------------- /src/par-collections/par-collections.cabal: -------------------------------------------------------------------------------- 1 | Name: par-collections 2 | Version: 1.2 3 | Synopsis: Generic parallel combinators for data and iteration spaces. 4 | 5 | -- Version history: 6 | -- 1.0 : This library introduced to replace the deprecated monad-par-extras. 7 | -- 1.1 : Add LVish-2.0 style effect signatures 8 | -- 1.1.1 : Add Split instance for list 9 | -- 1.2 : Remove Eq superclass of Split. Seems unjustified. 10 | 11 | Description: 12 | These modules provide means to traverse and consume 13 | common data structures, in parallel, using any Par monad. 14 | . 15 | Additionally, a variety of parallel looping constructs are 16 | provided by this package (e.g. parallel traversals where 17 | the "data structure" is merely a range / iteration space.) 18 | 19 | License: BSD3 20 | License-file: LICENSE 21 | Author: Ryan Newton 22 | Maintainer: Ryan Newton 23 | Copyright: (c) Ryan Newton 2011-2013 24 | Stability: Experimental 25 | Category: Control,Parallelism,Monads 26 | Build-type: Simple 27 | Cabal-version: >=1.8 28 | 29 | Source-repository head 30 | type: git 31 | location: https://github.com/iu-parfunc/lvars 32 | subdir: haskell/par-collections 33 | 34 | Library 35 | Exposed-modules: 36 | Data.Par 37 | Data.Par.Map 38 | Data.Par.Range 39 | Data.Par.Set 40 | Data.Par.Splittable 41 | Data.Par.Traversable 42 | 43 | Build-depends: base >= 4 && < 5 44 | -- This provides the interface which monad-par implements: 45 | , cereal >= 0.3 46 | , deepseq >= 1.3 47 | , random >= 1.0 48 | , mtl >= 2.0 49 | , transformers >= 0.2 50 | , par-classes >= 1.1 51 | 52 | -- These are so we can provide instances for existing collections: 53 | , primitive >= 0.6 && < 0.7 54 | , vector >= 0.10 && < 0.12 55 | 56 | if impl(ghc < 7.7) { 57 | build-depends: containers >= 0.5 58 | } else { 59 | cpp-options: -DNEWCONTAINERS 60 | build-depends: containers >= 0.5.4.0 61 | } 62 | ghc-options: -O2 -rtsopts -Wall 63 | Other-modules: 64 | -------------------------------------------------------------------------------- /src/lvish/Data/LVar/Pair.hs: -------------------------------------------------------------------------------- 1 | {-# LANGUAGE BangPatterns #-} 2 | {-# LANGUAGE TypeFamilies #-} 3 | 4 | -- | Just for demonstration purposes. It's probably simpler to use a pair of IVars. 5 | 6 | module Data.LVar.Pair ( 7 | IPair, newPair, putFst, putSnd, getFst, getSnd 8 | ) where 9 | 10 | import Control.Exception (throw) 11 | import Control.LVish 12 | import Control.LVish.Internal 13 | import Data.IORef 14 | import Control.LVish.Internal.SchedIdempotent (getLV, newLV, putLV) 15 | 16 | ------------------------------------------------------------------------------ 17 | -- IPairs implemented on top of (the idempotent implementation of) LVars: 18 | ------------------------------------------------------------------------------ 19 | 20 | type IPair s a b = LVar s (IORef (Maybe a), IORef (Maybe b)) (Either a b) 21 | 22 | -- This can't be an intstance of LVarData1... we need LVarData2. 23 | 24 | newPair :: Par e s (IPair s a b) 25 | newPair = WrapPar $ fmap WrapLVar $ newLV $ do 26 | r1 <- newIORef Nothing 27 | r2 <- newIORef Nothing 28 | return (r1, r2) 29 | 30 | putFst :: HasPut e => IPair s a b -> a -> Par e s () 31 | putFst (WrapLVar lv) !elt = WrapPar $ putLV lv putter 32 | where putter (r1, _) = atomicModifyIORef' r1 update 33 | update (Just _) = throw$ ConflictingPutExn$ "Multiple puts to first element of an IPair!" 34 | update Nothing = (Just elt, Just $ Left elt) 35 | 36 | putSnd :: HasPut e => IPair s a b -> b -> Par e s () 37 | putSnd (WrapLVar lv) !elt = WrapPar $ putLV lv putter 38 | where putter (_, r2) = atomicModifyIORef' r2 update 39 | update (Just _) = throw$ ConflictingPutExn$ "Multiple puts to second element of an IPair!" 40 | update Nothing = (Just elt, Just $ Right elt) 41 | 42 | getFst :: HasGet e => IPair s a b -> Par e s a 43 | getFst (WrapLVar lv) = WrapPar $ getLV lv globalThresh deltaThresh 44 | where globalThresh (r1, _) _ = readIORef r1 45 | deltaThresh (Left x) = return $ Just x 46 | deltaThresh (Right _) = return Nothing 47 | 48 | getSnd :: HasGet e => IPair s a b -> Par e s b 49 | getSnd (WrapLVar lv) = WrapPar $ getLV lv globalThresh deltaThresh 50 | where globalThresh (_, r2) _ = readIORef r2 51 | deltaThresh (Left _) = return Nothing 52 | deltaThresh (Right x) = return $ Just x 53 | 54 | -- TODO: LVarData2 instance?? 55 | 56 | -------------------------------------------------------------------------------- /src/lvish/tests/ThreadTest.hs: -------------------------------------------------------------------------------- 1 | 2 | import Control.Exception as E 3 | import GHC.Conc 4 | import qualified System.Posix.Signals as S 5 | import System.Exit 6 | import Text.Printf 7 | 8 | t1 = do 9 | putStrLn "Thread 1 starting" 10 | t1id <- myThreadId 11 | -- forkIO (t2 t1id) 12 | forkWithExceptions forkIO "t2" (t2 t1id) 13 | putStrLn "Thread 1: done fork, now delay" 14 | threadDelay $ 100 * 1000 15 | putStrLn "Thread 1 ending.." 16 | 17 | t2 t1id = do 18 | st <- threadStatus t1id 19 | putStrLn $"Thread 2 starting; t1 stat: "++show st 20 | threadDelay $ 1 * 1000 * 1000 21 | st <- threadStatus t1id 22 | putStrLn $"Thread 2 waking to kill: t1 stat: "++show st 23 | throwTo t1id (ErrorCall "DIE t1!") 24 | st <- threadStatus t1id 25 | putStrLn$ "Thread 2 -- throwTo returned, exiting... final t1 stat: "++show st 26 | -- exitFailure -- This just throws an exception, nothing more. 27 | 28 | -- Here's a more aggressive way to kill the process: 29 | S.raiseSignal S.sigABRT 30 | error "T2 DIE too..." 31 | 32 | main = do 33 | mainid <- myThreadId 34 | putStrLn$ "Main thread ID: "++show mainid 35 | -- forkIO t1 36 | forkWithExceptions forkIO "t1" t1 37 | 38 | threadDelay $ 2000 * 1000 39 | putStrLn$ "Main thread exiting..." 40 | 41 | 42 | -- | Exceptions that walk up the fork-tree of threads. 43 | -- 44 | -- WARNING: By holding onto the ThreadId we keep the parent thread from being 45 | -- garbage collected (at least as of GHC 7.6). This means that even if it was 46 | -- complete, it will still be hanging around to accept the exception below. 47 | forkWithExceptions :: (IO () -> IO ThreadId) -> String -> IO () -> IO ThreadId 48 | forkWithExceptions forkit descr action = do 49 | parent <- myThreadId 50 | forkit $ do 51 | tid <- myThreadId 52 | E.catch action 53 | (\ e -> 54 | case E.fromException e of 55 | Just E.ThreadKilled -> do 56 | -- Killing worker threads is normal now when exception handling, so this chatter is restricted to debug mode: 57 | printf "\nThreadKilled exception inside child thread, %s (not propagating!): %s\n" (show tid) (show descr) 58 | return () 59 | _ -> do 60 | printf "\nException inside child thread %s, %s: %s\n" (show descr) (show tid) (show e) 61 | E.throwTo parent (e :: E.SomeException) 62 | ) 63 | 64 | 65 | -------------------------------------------------------------------------------- /src/lvish/tests/SLMapTests.hs: -------------------------------------------------------------------------------- 1 | {-# LANGUAGE TemplateHaskell #-} 2 | {-# LANGUAGE ScopedTypeVariables #-} 3 | {-# LANGUAGE DataKinds, TypeFamilies #-} 4 | {-# LANGUAGE CPP #-} 5 | {-# LANGUAGE ConstraintKinds #-} 6 | 7 | -- | Tests for the Data.LVar.PureMap and Data.LVar.SLMap modules. 8 | 9 | module SLMapTests(tests, runTests) where 10 | 11 | import qualified Data.LVar.SLSet as IS 12 | import qualified Data.LVar.SLMap as IM 13 | import qualified Data.Concurrent.SkipListMap as SLM 14 | 15 | import qualified Data.LVar.SLMap as SM 16 | 17 | #include "CommonMapTests.hs" 18 | 19 | type TheMap k s v = IM.IMap k s v 20 | 21 | -------------------------------------------------------------------------------- 22 | 23 | tests :: TestTree 24 | tests = testGroup "" [tests_here, tests_common ] 25 | 26 | tests_here :: TestTree 27 | tests_here = $(testGroupGenerator) 28 | 29 | runTests :: IO () 30 | runTests = defaultMain tests 31 | 32 | ------------------------------------------------------------------------------------------ 33 | -- Show instances 34 | ------------------------------------------------------------------------------------------ 35 | 36 | -- | It happens that these come out in the opposite order from the Pure one: 37 | case_show02 :: Assertion 38 | case_show02 = assertEqual "show for SLMap" "{IMap: (\"key2\",44), (\"key1\",33)}" show02 39 | show02 :: String 40 | show02 = show$ runParThenFreeze $ isDet $ do 41 | mp <- IM.newEmptyMap 42 | SM.insert "key1" (33::Int) mp 43 | SM.insert "key2" (44::Int) mp 44 | return mp 45 | 46 | -------------------------------------------------------------------------------- 47 | -- Issue related: 48 | -------------------------------------------------------------------------------- 49 | 50 | -- -- Issue #27, spurious duplication. 51 | -- case_handlrDup :: Assertion 52 | -- case_handlrDup = runParIO $ do 53 | -- ctr <- I.liftIO$ newIORef 0 54 | -- mp <- SM.newEmptyMap 55 | -- hp <- newPool 56 | -- -- Register handler FIRST.. no race. 57 | -- SM.forEachHP (Just hp) mp $ \ (k::Int) v -> do 58 | -- logDbgLn 1 $ "[case_handlrDup] Callback executing: " ++ show (k,v) 59 | -- I.liftIO $ incr ctr 60 | -- SM.insert 2 2 mp 61 | -- SM.insert 3 3 mp 62 | -- quiesce hp 63 | -- sum <- I.liftIO $ readIORef ctr 64 | -- I.liftIO $ assertEqual "Should be no duplication in this case" 2 sum 65 | 66 | -- incr :: IORef Int -> IO () 67 | -- incr ref = atomicModifyIORef' ref (\x -> (x+1,())) 68 | -------------------------------------------------------------------------------- /src/par-mergesort/par-mergesort.cabal: -------------------------------------------------------------------------------- 1 | name: par-mergesort 2 | version: 1.0 3 | build-type: Simple 4 | cabal-version: >=1.8 5 | 6 | synopsis: Fast in-place parallel mergesort using a Par-monad. 7 | 8 | description: 9 | 10 | Out-of-place parallel sorts are not fast. Even if they bottom out 11 | to fast sequential sorts, executing an out-of-place sort at larger 12 | array sizes creates extra memory traffic exactly when array slices 13 | are larger than last-level cache. 14 | 15 | This module provides a guaranteed-deterministic parallel mergesort 16 | that uses only the safe API provided by the `ParST` monad 17 | transformer. It works with any underlying `ParMonad`. 18 | 19 | 20 | Source-repository head 21 | type: git 22 | location: https://github.com/iu-parfunc/lvars 23 | subdir: haskell/par-transformers/bench/mergesort 24 | 25 | library 26 | exposed-modules: 27 | Control.Par.MergeSort 28 | Control.Par.MergeSort.Internal 29 | 30 | build-depends: 31 | base == 4.*, 32 | lvish, 33 | monad-par, 34 | mtl, 35 | par-classes, 36 | par-transformers, 37 | vector >= 0.10 && < 0.12, 38 | vector-algorithms, 39 | primitive >= 0.6 40 | 41 | c-sources: cbits/sort_int32.c 42 | cbits/sort_int64.c 43 | -- required for REPL, not sure about performance implications 44 | cc-options: -fPIC 45 | 46 | ghc-options: -O2 -Wall 47 | 48 | test-suite test-mergesort 49 | type: exitcode-stdio-1.0 50 | hs-source-dirs: test/ 51 | main-is: Main.hs 52 | 53 | build-depends: 54 | base, 55 | lvish, 56 | mtl, 57 | par-classes, 58 | par-mergesort, 59 | par-transformers, 60 | random, 61 | vector >= 0.10, 62 | 63 | QuickCheck, 64 | tasty >= 0.10, 65 | tasty-hunit, 66 | tasty-quickcheck 67 | 68 | ghc-options: -Wall -O2 -rtsopts -threaded -eventlog 69 | 70 | benchmark bench-mergesort 71 | type: exitcode-stdio-1.0 72 | main-is: Main.hs 73 | hs-source-dirs: bench/ 74 | 75 | build-depends: 76 | base, 77 | criterion >= 1.1, 78 | lvish, 79 | par-classes, 80 | par-mergesort, 81 | par-transformers, 82 | vector 83 | 84 | ghc-options: -Wall -O2 85 | -------------------------------------------------------------------------------- /src/lvish-extra/Data/LVar/MaxPosInt.hs: -------------------------------------------------------------------------------- 1 | {-# LANGUAGE Trustworthy #-} 2 | 3 | {-# LANGUAGE DataKinds, BangPatterns, MagicHash #-} 4 | {-# LANGUAGE TypeSynonymInstances, FlexibleInstances, MultiParamTypeClasses, TypeFamilies #-} 5 | -- | A positive integer LVar that contains the maximum value of all `put`s. 6 | 7 | -- TODO: Add 'Min', 'Or', 'And' and other idempotent ops... 8 | 9 | module Data.LVar.MaxPosInt 10 | ( MaxPosInt, 11 | newMaxPosInt, put, waitThresh, freezeMaxPosInt, fromMaxPosInt 12 | ) where 13 | 14 | import Control.LVish hiding (freeze, put) 15 | import Control.LVish.Internal (state) 16 | import Control.LVish.DeepFrz.Internal 17 | import Data.IORef 18 | import Data.LVar.Generic 19 | import Data.LVar.Internal.Pure as P 20 | import Algebra.Lattice 21 | import System.IO.Unsafe (unsafeDupablePerformIO) 22 | import GHC.Prim (unsafeCoerce#) 23 | 24 | -------------------------------------------------------------------------------- 25 | 26 | -- | A @MaxPosInt@ is really a constant-space ongoing @fold max@ operation. 27 | -- 28 | -- A @MaxPosInt@ is an example of a `PureLVar`. It is implemented simply as a 29 | -- pure value in a mutable box. 30 | type MaxPosInt s = PureLVar s MC 31 | 32 | newtype MC = MC Int 33 | deriving (Eq, Show, Ord, Read) 34 | 35 | instance JoinSemiLattice MC where 36 | join (MC !a) (MC !b) = MC (a `max` b) 37 | 38 | instance BoundedJoinSemiLattice MC where 39 | bottom = MC minBound 40 | 41 | -- | Create a new `MaxPosInt` with the given initial value. 42 | newMaxPosInt :: Int -> Par e s (MaxPosInt s) 43 | newMaxPosInt n = newPureLVar (MC n) 44 | 45 | -- | Incorporate a new value in the max-fold. If the previous maximum is less than 46 | -- the new value, increase it. 47 | put :: HasPut e => MaxPosInt s -> Int -> Par e s () 48 | put lv n = putPureLVar lv (MC n) 49 | 50 | -- | Wait until the maximum observed value reaches some threshold, then return. 51 | waitThresh :: HasGet e => MaxPosInt s -> Int -> Par e s () 52 | waitThresh lv n = waitPureLVar lv (MC n) 53 | 54 | -- | Observe what the final value of the `MaxPosInt` was. 55 | freezeMaxPosInt :: HasFreeze e => MaxPosInt s -> Par e s Int 56 | freezeMaxPosInt lv = do 57 | MC n <- freezePureLVar lv 58 | return n 59 | 60 | -- | Once frozen, for example by `runParThenFreeze`, a `MaxPosInt` can be converted 61 | -- directly into an `Int`. 62 | fromMaxPosInt :: MaxPosInt Frzn -> Int 63 | fromMaxPosInt (PureLVar lv) = 64 | case unsafeDupablePerformIO (readIORef (state lv)) of 65 | MC n -> n 66 | 67 | instance DeepFrz MC where 68 | type FrzType MC = MC 69 | -------------------------------------------------------------------------------- /src/par-collections/Data/Par/Map.hs: -------------------------------------------------------------------------------- 1 | {-# LANGUAGE BangPatterns #-} 2 | {-# LANGUAGE CPP #-} 3 | {-# LANGUAGE TypeFamilies #-} 4 | {-# OPTIONS_GHC -fno-warn-orphans #-} 5 | 6 | -- | Provide instances for parallel handling of common, pure Haskell data structures. 7 | 8 | module Data.Par.Map 9 | () where 10 | 11 | import qualified Control.Par.Class as PC 12 | import qualified Data.Map as M 13 | import Data.Splittable.Class (Split (..)) 14 | 15 | -- import Control.Applicative 16 | -- import Data.Monoid 17 | 18 | -------------------------------------------------------------------------------- 19 | 20 | instance PC.Generator (M.Map k v) where 21 | type ElemOf (M.Map k v) = (k,v) 22 | {-# INLINE foldM #-} 23 | foldM = foldrMWithKey 24 | {-# INLINE fold #-} 25 | fold fn = M.foldlWithKey (\ !a k v -> fn a (k,v)) 26 | 27 | #ifdef NEWCONTAINERS 28 | instance (Eq k, Eq v) => Split (M.Map k v) where 29 | {-# INLINE split #-} 30 | split = M.splitRoot 31 | 32 | -- TODO: Opt in to the trivial instance of ParFoldable, using Split-based mapreduce: 33 | instance PC.ParFoldable (M.Map k v) where 34 | pmapFold f1 f2 zer col = go col 35 | where 36 | go gen = 37 | case M.size gen of 38 | 0 -> return zer 39 | -- GHC probably won't be able to optimize this recursive function: 40 | -- splitRoot should be rewritten at a different type. 41 | 1 -> f1 $! M.findMin gen 42 | _ -> do let [l,m,r] = M.splitRoot gen 43 | l' <- PC.spawn_ (go l) 44 | r' <- go r 45 | m' <- go m 46 | x <- f2 m' r' 47 | l'' <- PC.get l' 48 | f2 l'' x 49 | #else 50 | -- instance PC.ParFoldable (M.Map k v) where 51 | #endif 52 | 53 | foldrMWithKey :: Monad m => (acc -> (k, v) -> m acc) -> acc -> M.Map k v -> m acc 54 | foldrMWithKey fn zer mp = 55 | M.foldrWithKey (\ k v m -> m >>= fn2 (k,v)) (return zer) mp 56 | where 57 | fn2 !pr !a = fn a pr 58 | {-# INLINE foldrMWithKey #-} 59 | 60 | {- 61 | 62 | foldrMWithKey :: Monad m => ((k, v) -> acc -> m acc) -> acc -> M.Map k v -> m acc 63 | -- foldrMWithKey :: Applicative m => ((k, v) -> acc -> m acc) -> acc -> M.Map k v -> m acc 64 | foldrMWithKey fn zer mp = 65 | undefined 66 | -- M.foldMapWithKey (\ k v -> undefined) mp 67 | 68 | newtype FoldAction_ f acc = FoldAction_ { runFoldAction_ :: acc -> f acc } 69 | instance Applicative f => Monoid (FoldAction_ f acc) where 70 | mempty = FoldAction_ (\ x -> pure x) 71 | FoldAction_ a `mappend` FoldAction_ b = FoldAction_ (a *> b) 72 | 73 | -} 74 | -------------------------------------------------------------------------------- /src/lvish/tests/GenericTests.hs: -------------------------------------------------------------------------------- 1 | 2 | {-# LANGUAGE TemplateHaskell #-} 3 | {-# LANGUAGE DataKinds #-} 4 | {-# LANGUAGE TypeFamilies #-} 5 | 6 | -- | Tests for the generic Par-programming interfaces. 7 | 8 | module GenericTests (tests, runTests) where 9 | 10 | import Control.Monad 11 | import Data.Maybe (fromMaybe) 12 | import Data.Word 13 | import qualified Control.Par.Class as PC 14 | import Control.Par.Class.Unsafe (internalLiftIO) 15 | import Test.HUnit (Assertion, assertEqual, assertBool, Counts(..)) 16 | import Test.Tasty.TH (testGroupGenerator) 17 | import Test.Tasty (defaultMain, TestTree) 18 | import Test.Tasty.HUnit (testCase) -- For macro-expansion. 19 | 20 | import TestHelpers as T 21 | import Control.LVish -- LVarSched instances... 22 | import Data.LVar.IVar as IV 23 | import qualified Data.LVar.SLMap as SM 24 | import qualified Control.Par.Class as PC 25 | import Data.Par.Range (zrange) 26 | import Data.Par.Splittable (pforEach) 27 | 28 | -------------------------------------------------------------------------------- 29 | 30 | 31 | case_toQPar :: Assertion 32 | case_toQPar = t1 >>= assertEqual "" "hi" 33 | 34 | t1 :: IO String 35 | t1 = runParQuasiDet $ isQD par 36 | where 37 | -- par :: QuasiDeterministic e => Par e s String 38 | par = do 39 | iv <- IV.new 40 | -- PC.toQPar $ 41 | IV.put iv "hi" 42 | IV.get iv 43 | 44 | -------------------------------------------------------------------------------- 45 | 46 | size :: Int 47 | size = fromMaybe 100 numElems 48 | 49 | expectedSum :: Word64 50 | expectedSum = (s * (s + 1)) `quot` 2 51 | where s = fromIntegral size 52 | 53 | -- ParFold instance 54 | case_pfold_imap :: Assertion 55 | case_pfold_imap = assertNoTimeOut 3.0 $ runParNonDet $ isND $ do 56 | mp <- SM.newEmptyMap 57 | -- pforEach (zrange sz) $ \ ix -> do 58 | forM_ [1..size] $ \ ix -> do 59 | SM.insert ix (fromIntegral ix::Word64) mp 60 | 61 | logDbgLn 1 $ "IMap filled up... freezing" 62 | fmp <- SM.freezeMap mp 63 | logDbgLn 3 $ "Frozen: "++show fmp 64 | let mapper (_k,x) = do 65 | logDbgLn 2 $ "Mapping in parallel: "++show x 66 | return x 67 | folder x y = do 68 | logDbgLn 2 $ "Summing in parallel "++show (x,y) 69 | return $! x+y 70 | summed <- PC.pmapFold mapper folder 0 fmp 71 | logDbgLn 1 $ "Sum of IMap values: " ++ show summed 72 | internalLiftIO$ assertEqual "Sum of IMap values" expectedSum summed 73 | return () 74 | 75 | -------------------------------------------------------------------------------- 76 | 77 | tests :: TestTree 78 | tests = $(testGroupGenerator) 79 | 80 | runTests :: IO () 81 | runTests = defaultMain tests 82 | -------------------------------------------------------------------------------- /src/lvish/Data/LVar/Future.hs: -------------------------------------------------------------------------------- 1 | 2 | {-# LANGUAGE TypeFamilies #-} 3 | {-# LANGUAGE MagicHash #-} 4 | 5 | -- | 6 | 7 | module Data.LVar.Future where 8 | 9 | import Control.DeepSeq 10 | import Control.Exception (throw) 11 | import qualified Control.LVish.Basics as LV 12 | import Control.LVish.DeepFrz.Internal 13 | import Control.LVish.Internal (LVar (WrapLVar), 14 | Par (WrapPar)) 15 | import qualified Control.LVish.Internal as I 16 | import qualified Control.LVish.Types as LV 17 | import Control.Par.EffectSigs 18 | import qualified Data.Foldable as F 19 | import Data.IORef 20 | import Data.LVar.Generic 21 | import Data.LVar.Generic.Internal (unsafeCoerceLVar) 22 | import GHC.Prim (unsafeCoerce#) 23 | import Internal.Control.LVish.SchedIdempotent (freezeLV, getLV, newLV, 24 | putLV) 25 | import qualified Internal.Control.LVish.SchedIdempotent as LI 26 | import System.IO.Unsafe (unsafeDupablePerformIO, 27 | unsafePerformIO) 28 | import System.Mem.StableName (hashStableName, 29 | makeStableName) 30 | 31 | import qualified Control.Par.Class as PC 32 | import qualified Control.Par.Class.Unsafe as PU 33 | 34 | -------------------------------------------------------------------------------- 35 | 36 | data FutStatus a = Empty 37 | | Started 38 | | Finished a 39 | 40 | newtype Future s a = Future (LVar s (IORef (FutStatus a)) a) 41 | 42 | -- instance (IdempotentParMonad (p e)) => ParFuture Par where 43 | 44 | get :: Future s a -> Par e s a 45 | get (IVar (WrapLVar iv)) = WrapPar$ getLV iv globalThresh deltaThresh 46 | where globalThresh ref _ = readIORef ref -- past threshold iff Just _ 47 | deltaThresh x = return $ Just x -- always past threshold 48 | 49 | instance PC.ParFuture Par where 50 | type Future Par = Future 51 | 52 | spawn_ m = do ref <- PU.internalLiftIO $ newIORef $ error "Data.LVar.Future - internal error" 53 | fork (do res <- m 54 | PU.internalLiftIO $ do writeIORef ref 55 | -- Mem barrier here 56 | ) 57 | undefined 58 | read = undefined 59 | -------------------------------------------------------------------------------- /src/par-mergesort/bench/Main.hs: -------------------------------------------------------------------------------- 1 | {-# LANGUAGE ConstraintKinds #-} 2 | {-# LANGUAGE ScopedTypeVariables #-} 3 | {-# LANGUAGE TypeFamilies #-} 4 | 5 | module Main where 6 | 7 | import Data.Int 8 | import qualified Data.Vector.Storable as SV 9 | import qualified Data.Vector.Storable.Mutable as SVM 10 | 11 | import Criterion 12 | import Criterion.Main 13 | 14 | import Control.LVish as LVishSched 15 | import qualified Control.Par.Class as PC 16 | import Control.Par.MergeSort.Internal 17 | import Control.Par.ST 18 | import qualified Control.Par.ST.StorableVec2 as V 19 | 20 | -- TODO: Add vector-algortihms benchmarks. 21 | 22 | main :: IO () 23 | main = defaultMain 24 | [ env (return $ SV.fromList $ reverse [0 .. 10^(6 :: Int32)]) $ \(vec :: SV.Vector Int32) -> 25 | bgroup "sorting benchmarks" $ concat $ 26 | flip map [CSort, VAMSort, VAISort] $ \ssMeth -> 27 | flip map [CMerge, HSMerge] $ \smMeth -> 28 | -- TODO(osa): Thresholds should be varying? 29 | mkBench 100 100 ssMeth smMeth vec 30 | ] 31 | 32 | mkBench :: Int -> Int -> SSort -> SMerge -> SV.Vector Int32 -> Benchmark 33 | mkBench ssThres smThres ssMeth smMeth vec = 34 | bench msg $ nf (sortPV ssThres smThres ssMeth smMeth) vec 35 | where 36 | msg = "sort" ++ " " ++ show ssThres 37 | ++ " " ++ show smThres 38 | ++ " " ++ show ssMeth 39 | ++ " " ++ show smMeth 40 | 41 | -- | Out of place sort on immutable vectors. 42 | -- 43 | -- TODO(osa): Copied from tests, maybe copy to a shared place. 44 | -- 45 | sortPV :: Int -> Int -> SSort -> SMerge -> SV.Vector Int32 -> SV.Vector Int32 46 | sortPV ssThres smThres ssMeth smMeth vec = 47 | -- TODO(osa): Maybe remove copying here by just taking mutable vec and 48 | -- returning mutable one. 49 | LVishSched.runPar $ V.runParVec2T (0, SV.length vec) $ do 50 | vec' <- liftST $ SV.thaw vec 51 | sortPV' ssThres smThres ssMeth smMeth vec' >> do 52 | (rawL, _) <- V.reify 53 | sv <- liftST $ SV.freeze rawL 54 | return $ sv 55 | 56 | -- | Sort the vector in the left component of the state. 57 | -- 58 | -- TODO(osa): Like sortPV, copied. 59 | sortPV' :: (PC.ParMonad p, PC.ParThreadSafe p, PC.ParIVar p, PC.FutContents p (), 60 | PC.ParFuture p, HasPut e, HasGet e) => 61 | Int -> Int -> SSort -> SMerge -> 62 | SVM.STVector s1 Int32 -> V.ParVec2T s1 Int32 Int32 p e s () 63 | sortPV' ssThres smThres ssMeth smMeth vec = do 64 | -- (_, right) <- V.reify 65 | -- SS.put (STTup2 (SFlp vec) (SFlp right)) 66 | V.installL vec 67 | mergeSort_int32 ssThres smThres ssMeth smMeth 68 | -------------------------------------------------------------------------------- /src/lvish/TODO.md: -------------------------------------------------------------------------------- 1 | # LVish implementation TODOs 2 | 3 | Also see issues for the initial release: 4 | https://github.com/iu-parfunc/lvars/issues?milestone=1&state=open 5 | 6 | Can we have a kind of "Relay" LVar that stores nothing but is just 7 | a place to hang handlers? It could not have a globalThresh and it 8 | would have to have the property that any addHandler after the first 9 | Put is an error. 10 | 11 | 12 | ## Scheduler and data-structure layer concerns 13 | 14 | * Leveraging idempotence -- use idemp-WS deque? 15 | 16 | * Turning OFF idempotence: What about BUMP / atomic counters!?!?!?!? 17 | - cheap to prevent dups of: blocked-getters 18 | - expensive to prevent dups of: launched-handlers 19 | - (CRDT people struggled with atomic counter thing.) 20 | - PhyBin is an app that absolutely DEPENDS on bump. 21 | It falls back to IO right now... 22 | 23 | * Recover non-idempotent operations with a hidden IORef... 24 | Idea is that thread dup will NOT happen between parallel API ops. 25 | Where do we manufacture such IORefs for use before the counter? 26 | Where the counter is made? 27 | - OR why not have ANOTHER MONAD... (kind of a sub-monad) 28 | In the submonad it does dedup on every parallel op. 29 | - OR link against multiple schedulers... 30 | 31 | * (Ryan and Aaron) Microbenchmark and optimize SLMap/SLSet 32 | 33 | * (Aaron Todd) Concurrent Bag? 34 | 35 | * Other Map implementations (non-skiplist) 36 | 37 | * Make freezing for SLSet O(1) 38 | 39 | * Continue expanding benchmark suite in general 40 | - PBBS outstanding TODOs: variants of IStructure/BitVector 41 | Ran into big problems re: scalability of blocked-lists. 42 | - (Boost C++ `intrusive_ptr` vs. `shared_ptr` analogy) 43 | - PBBS good performance would require better support 44 | for DETERMINISTIC RESERVATIONS! (*Bulk* handling of blocked 45 | iterations of a parallel for-loop.) 46 | 47 | * __Outstanding bugs?????__ SNZI? 48 | -- Testing framework is WEIRD... sometimes things fail in combination 49 | that do not fail individually. Strange observations that we don't 50 | understand. 51 | 52 | ## Old TODOs (circa July) 53 | 54 | * Adapt tests to LVarIdempotent. 55 | 56 | * Examples/benchmarks: 57 | - graph traversal 58 | - CFA 59 | - other PBBS graph algorithms 60 | 61 | * Scalable data structures: 62 | - Counter (use SNZI, http://dl.acm.org/citation.cfm?id=1281106) 63 | - Bags 64 | - Hashtables (=> sets) 65 | 66 | - Work-stealing deque (could use bag) 67 | 68 | * Fill out data structure interfaces: 69 | 70 | * in addition to "newEmpty*" we should have a few ways to provide 71 | elements at the outset, before it is exposed for concurrent 72 | modification. 73 | 74 | * Fixes: 75 | 76 | * modify and insert operations on the same IMap probably should be 77 | disqualified at the type level... 78 | 79 | * Optimizations: 80 | 81 | * Expose a version of addHandler that does NOT support quiesce. 82 | -------------------------------------------------------------------------------- /src/lvish/tests/LogicalTests.hs: -------------------------------------------------------------------------------- 1 | {-# LANGUAGE TemplateHaskell #-} 2 | 3 | module LogicalTests where 4 | 5 | import Control.LVish 6 | import Data.LVar.IVar as IV 7 | 8 | --import Test.HUnit (Assertion, assert, assertEqual, assertBool, Counts(..)) 9 | -- import Test.QuickCheck () 10 | import Test.Tasty.HUnit 11 | -- import Test.Framework.Providers.QuickCheck2 12 | import Test.Tasty (TestTree, defaultMain, testGroup) 13 | import Test.Tasty.TH (testGroupGenerator) 14 | -- import TestHelpers (defaultMainSeqTests) 15 | 16 | -------------------------------------------------------------------------------- 17 | -- TESTS: 18 | -------------------------------------------------------------------------------- 19 | 20 | case_and1 :: Assertion 21 | case_and1 = assertEqual "" False $ runPar $ do 22 | v <- IV.new 23 | asyncAnd Nothing (return True) (return False) (IV.put v) 24 | IV.get v 25 | 26 | case_and2 :: Assertion 27 | case_and2 = assertEqual "" False $ runPar $ do 28 | v <- IV.new 29 | asyncAnd Nothing (return False) (return False) (IV.put v) 30 | IV.get v 31 | 32 | case_and3 :: Assertion 33 | case_and3 = assertEqual "" True $ runPar $ do 34 | v <- IV.new 35 | asyncAnd Nothing (return True) (return True) (IV.put v) 36 | IV.get v 37 | 38 | case_and4 :: Assertion 39 | case_and4 = assertEqual "" False $ runPar $ do 40 | v <- IV.new 41 | asyncAnd Nothing (return False) (return True) (IV.put v) 42 | IV.get v 43 | 44 | case_or1 :: Assertion 45 | case_or1 = assertEqual "" True $ runPar $ do 46 | v <- IV.new 47 | asyncOr Nothing (return True) (return False) (IV.put v) 48 | IV.get v 49 | 50 | case_or2 :: Assertion 51 | case_or2 = assertEqual "" False $ runPar $ do 52 | v <- IV.new 53 | asyncOr Nothing (return False) (return False) (IV.put v) 54 | IV.get v 55 | 56 | case_or3 :: Assertion 57 | case_or3 = assertEqual "" True $ runPar $ do 58 | v <- IV.new 59 | asyncOr Nothing (return True) (return True) (IV.put v) 60 | IV.get v 61 | 62 | case_or4 :: Assertion 63 | case_or4 = assertEqual "" True $ runPar $ do 64 | v <- IV.new 65 | asyncOr Nothing (return False) (return True) (IV.put v) 66 | IV.get v 67 | 68 | case_andMap01 :: Assertion 69 | case_andMap01 = assertEqual "" False $ runPar $ 70 | andMap Nothing (return . even) [1..200::Int] 71 | 72 | case_orMap01 :: Assertion 73 | case_orMap01 = assertEqual "" True $ runPar $ 74 | orMap Nothing (return . even) [1..200::Int] 75 | 76 | -- TODO: add ones with explicit timing controls (sleep). 77 | 78 | -------------------------------------------------------------------------------- 79 | 80 | tests :: TestTree 81 | tests = $(testGroupGenerator) 82 | 83 | runTests :: IO () 84 | runTests = defaultMain tests 85 | -------------------------------------------------------------------------------- /src/lvish/tests/CommonMapWriteTests.hs: -------------------------------------------------------------------------------- 1 | 2 | -- This is NOT a full Haskell module. 3 | -- This is a slice of source code that is #included into multiple files. 4 | 5 | -- ASSUMES: module "IM" refers to the Map implementation. 6 | 7 | -- This code is for testing write-only operations: 8 | 9 | import Test.Tasty.HUnit 10 | import Test.Tasty (TestTree, testGroup, defaultMain) 11 | --import Test.HUnit (Assertion, assertEqual, assertBool, Counts(..)) 12 | import Test.Tasty.TH (testGroupGenerator) 13 | import qualified Test.HUnit as HU 14 | import TestHelpers2 as T 15 | import Control.Concurrent (threadDelay) 16 | import Control.Monad (forM_, forM) 17 | import Data.Traversable (traverse) 18 | import qualified Data.Foldable as F 19 | import qualified Data.Map as M 20 | import qualified Data.Set as S 21 | import qualified Data.List as L 22 | import Data.Word 23 | import Data.IORef 24 | import System.Random 25 | import Test.QuickCheck 26 | import Test.Tasty.QuickCheck (testProperty) 27 | 28 | -- Some maps need Hashable instead of Ord: 29 | import Data.Hashable 30 | 31 | import Control.LVish 32 | import Control.LVish.DeepFrz (DeepFrz(..), Frzn, NonFrzn, Trvrsbl, 33 | runParThenFreeze, runParThenFreezeIO) 34 | import qualified Control.LVish.Internal as I 35 | import qualified Control.Par.Class as PC 36 | import qualified Data.LVar.IVar as IV 37 | import Data.LVar.Generic ( LVarData1(sortFrzn) , AFoldable(..) ) 38 | import GHC.Conc (numCapabilities) 39 | 40 | -------------------------------------------------------------------------------- 41 | -- Quickcheck properties: 42 | 43 | -- Build simple properties that amount to the identity function, but perform 44 | -- conversions in the middle. 45 | mkSimpleIdentityProp :: 46 | (Ord v, Ord k, Hashable k, F.Foldable t, DeepFrz a, FrzType a ~ t v) => 47 | (TheMap k NonFrzn v -> Par ('Ef 'P 'G 'NF 'NB 'NI) NonFrzn a) -> 48 | [(k, v)] -> Bool 49 | mkSimpleIdentityProp trans prs = 50 | (L.sort$ L.nub$ map snd prs) == 51 | (L.sort$ L.nub $ F.toList $ 52 | runParThenFreeze $ isIdemD $ do 53 | mp0 <- IM.newFromList prs 54 | trans mp0) 55 | 56 | prop_tofrom :: [Int] -> Bool 57 | prop_tofrom ls = mkSimpleIdentityProp return (zip ls ls) 58 | 59 | tests_writeOnly :: TestTree 60 | tests_writeOnly = testGroup "Common" [ $(testGroupGenerator) ] 61 | 62 | -------------------------------------------------------------------------------- 63 | 64 | 65 | fillNFreezeChunks :: [(Int, Int)] -> TheMap Int Frzn Int 66 | -- fmap IM.freezeMap $ 67 | fillNFreezeChunks chunks = runParThenFreeze $ isDet $ do 68 | mp <- IM.newEmptyMap 69 | forM chunks $ \ (start,end) -> do 70 | fork $ do 71 | T.for_ (start, end)$ \n -> IM.insert n n mp 72 | return mp 73 | 74 | fillNFreeze :: Int -> TheMap Int Frzn Int 75 | fillNFreeze sz = fillNFreezeChunks (splitRange numCapabilities (0,sz-1)) 76 | 77 | case_fillFreeze1K :: Assertion 78 | case_fillFreeze1K = assertEqual "fill and then freeze" 79 | (sum [0..sz-1]) 80 | (case sortFrzn (fillNFreeze sz) of AFoldable x -> F.foldl' (+) 0 x) 81 | where sz = 1000 82 | 83 | -------------------------------------------------------------------------------- /src/lvish-extra/tests/CommonMapWriteTests.hs: -------------------------------------------------------------------------------- 1 | 2 | -- This is NOT a full Haskell module. 3 | -- This is a slice of source code that is #included into multiple files. 4 | 5 | -- ASSUMES: module "IM" refers to the Map implementation. 6 | 7 | -- This code is for testing write-only operations: 8 | 9 | import Test.Tasty.HUnit 10 | import Test.Tasty (TestTree, testGroup, defaultMain) 11 | --import Test.HUnit (Assertion, assertEqual, assertBool, Counts(..)) 12 | import Test.Tasty.TH (testGroupGenerator) 13 | import qualified Test.HUnit as HU 14 | import TestHelpers2 as T 15 | import Control.Concurrent (threadDelay) 16 | import Control.Monad (forM_, forM) 17 | import Data.Traversable (traverse) 18 | import qualified Data.Foldable as F 19 | import qualified Data.Map as M 20 | import qualified Data.Set as S 21 | import qualified Data.List as L 22 | import Data.Word 23 | import Data.IORef 24 | import System.Random 25 | import Test.QuickCheck 26 | import Test.Tasty.QuickCheck (testProperty) 27 | 28 | -- Some maps need Hashable instead of Ord: 29 | import Data.Hashable 30 | 31 | import Control.LVish 32 | import Control.LVish.DeepFrz (DeepFrz(..), Frzn, NonFrzn, Trvrsbl, 33 | runParThenFreeze, runParThenFreezeIO) 34 | import qualified Control.LVish.Internal as I 35 | import qualified Control.Par.Class as PC 36 | import qualified Data.LVar.IVar as IV 37 | import Data.LVar.Generic ( LVarData1(sortFrzn) , AFoldable(..) ) 38 | import GHC.Conc (numCapabilities) 39 | 40 | -------------------------------------------------------------------------------- 41 | -- Quickcheck properties: 42 | 43 | -- Build simple properties that amount to the identity function, but perform 44 | -- conversions in the middle. 45 | mkSimpleIdentityProp :: 46 | (Ord v, Ord k, Hashable k, F.Foldable t, DeepFrz a, FrzType a ~ t v) => 47 | (TheMap k NonFrzn v -> Par ('Ef 'P 'G 'NF 'NB 'NI) NonFrzn a) -> 48 | [(k, v)] -> Bool 49 | mkSimpleIdentityProp trans prs = 50 | (L.sort$ L.nub$ map snd prs) == 51 | (L.sort$ L.nub $ F.toList $ 52 | runParThenFreeze $ isIdemD $ do 53 | mp0 <- IM.newFromList prs 54 | trans mp0) 55 | 56 | prop_tofrom :: [Int] -> Bool 57 | prop_tofrom ls = mkSimpleIdentityProp return (zip ls ls) 58 | 59 | tests_writeOnly :: TestTree 60 | tests_writeOnly = testGroup "Common" [ $(testGroupGenerator) ] 61 | 62 | -------------------------------------------------------------------------------- 63 | 64 | 65 | fillNFreezeChunks :: [(Int, Int)] -> TheMap Int Frzn Int 66 | -- fmap IM.freezeMap $ 67 | fillNFreezeChunks chunks = runParThenFreeze $ isDet $ do 68 | mp <- IM.newEmptyMap 69 | forM chunks $ \ (start,end) -> do 70 | fork $ do 71 | T.for_ (start, end)$ \n -> IM.insert n n mp 72 | return mp 73 | 74 | fillNFreeze :: Int -> TheMap Int Frzn Int 75 | fillNFreeze sz = fillNFreezeChunks (splitRange numCapabilities (0,sz-1)) 76 | 77 | case_fillFreeze1K :: Assertion 78 | case_fillFreeze1K = assertEqual "fill and then freeze" 79 | (sum [0..sz-1]) 80 | (case sortFrzn (fillNFreeze sz) of AFoldable x -> F.foldl' (+) 0 x) 81 | where sz = 1000 82 | 83 | -------------------------------------------------------------------------------- /src/lvish-extra/Data/LVar/PNCounter.hs: -------------------------------------------------------------------------------- 1 | {-# LANGUAGE BangPatterns #-} 2 | {-# LANGUAGE TypeFamilies #-} 3 | 4 | -- LK: N.B. Once Data.LVar.Counter is done, we should just be able to 5 | -- glue two of those together for this, just as AddRemoveSet does with 6 | -- sets. 7 | 8 | {-| 9 | 10 | This module provides a /PN-Counter/, a counter that allows both 11 | increment and decrement operations. This is possible because, under 12 | the hood, it's represented with two monotonically growing counters, 13 | one for increments and one for decrements. The name "PN-Counter" 14 | comes from the literature on /conflict-free replicated data types/. 15 | 16 | -} 17 | module Data.LVar.PNCounter 18 | ( 19 | PNCounter, 20 | newCounter, newCounterWithValue, 21 | increment, waitForIncrements, 22 | decrement, waitForDecrements, 23 | 24 | freezeCounter 25 | 26 | ) where 27 | import Control.LVish 28 | import Control.LVish.Internal 29 | import qualified Data.Atomics.Counter as AC 30 | -- LK: FIXME: it can't be okay to use SchedIdempotent if we're using bump, can it?! 31 | -- import Internal.Control.LVish.SchedIdempotent (newLV) 32 | import Data.IORef 33 | 34 | 35 | -- | The counter datatype. 36 | 37 | -- LK: LVar around the outside, or PureLVar? What's the difference? 38 | data PNCounter s = LVar s (AC.AtomicCounter, AC.AtomicCounter) 39 | 40 | -- | Create a new `PNCounter` set to zero. 41 | newCounter :: Par e s (PNCounter s) 42 | newCounter = newCounterWithValue 0 43 | 44 | -- | Create a new `PNCounter` with the specified initial value. 45 | newCounterWithValue :: Int -> Par e s (PNCounter s) 46 | -- LK: hm, how do I create IORefs and then return a Par? I think what 47 | -- I'm supposed to be doing here is wrapping an unsafe internal Par 48 | -- computation (that's allowed to do IO) in a safe one that I return. 49 | newCounterWithValue n = undefined 50 | -- FIXME... 51 | -- do 52 | -- incs <- newIORef (Just n) 53 | -- decs <- newIORef Nothing 54 | 55 | -- | Increment the `PNCounter`. 56 | increment :: HasBump e => PNCounter s -> Par e s () 57 | increment = undefined 58 | 59 | -- | Wait for the number of increments to reach a given number. 60 | waitForIncrements :: HasGet e => Int -> PNCounter s -> Par e s () 61 | waitForIncrements = undefined 62 | 63 | -- | Decrement the `PNCounter`. 64 | decrement :: HasBump e => PNCounter s -> Par e s () 65 | decrement = undefined 66 | 67 | -- | Wait for the number of decrements to reach a given number. 68 | waitForDecrements :: HasGet e => Int -> PNCounter s -> Par e s () 69 | waitForDecrements = undefined 70 | 71 | -- | Get the exact contents of the counter. As with any 72 | -- quasi-deterministic operation, using `freezeCounter` may cause your 73 | -- program to exhibit a limited form of nondeterminism: it will never 74 | -- return the wrong answer, but it may include synchronization bugs 75 | -- that can (nondeterministically) cause exceptions. 76 | freezeCounter :: HasFreeze e => PNCounter s -> Par e s Int 77 | -- Freezing takes the difference of increments and decrements. 78 | freezeCounter = undefined 79 | -------------------------------------------------------------------------------- /archived_old/fhpc13-lvars/benchmarks/bf-traverse-monad-par/bf-traverse-monad-par.hs: -------------------------------------------------------------------------------- 1 | {-# LANGUAGE BangPatterns #-} 2 | {-# LANGUAGE DoAndIfThenElse #-} 3 | 4 | module Main where 5 | 6 | import Control.Monad (when) 7 | import qualified Data.Set as Set 8 | import qualified Data.IntSet as IS 9 | import qualified Data.Vector as V 10 | import GHC.Conc (numCapabilities) 11 | 12 | import Control.Monad.Par (Par, runParIO) 13 | import Control.Monad.Par.Combinator (parMap, parMapM, parFor, InclusiveRange(..)) 14 | import Debug.Trace (trace) 15 | 16 | import Runner 17 | 18 | prnt :: String -> Par () 19 | prnt str = trace str $ return () 20 | 21 | -- This version of bf_traverse is based on monad-par, but it doesn't 22 | -- accumulate results in an LVar while traversing the graph, as 23 | -- BF_LVar does. Instead, it does the whole traversal, then maps f 24 | -- over the resulting set of nodes once the traversal has finished. 25 | -- (Note that bf_traverse does not even take f as an argument, as it 26 | -- does in BFS_LVar.) 27 | 28 | bf_traverse :: Int -- iteration counter 29 | -> Graph2 -- graph 30 | -> IS.IntSet -- set of "seen" node labels, initially size 0 31 | -> IS.IntSet -- set of "new" node labels, initially size 1 32 | -> Par (IS.IntSet) 33 | bf_traverse 0 _ seen_rank new_rank = do 34 | when verbose $ prnt $ "bf_traverse finished! seen/new size: " 35 | ++ show (IS.size seen_rank, IS.size new_rank) 36 | return (IS.union seen_rank new_rank) 37 | 38 | bf_traverse k !g !seen_rank !new_rank = do 39 | when verbose $ prnt $"bf_traverse call... " 40 | ++ show k ++ " seen/new size " 41 | ++ show (IS.size seen_rank, IS.size new_rank) 42 | -- Nothing in the new_rank set means nothing left to traverse. 43 | if IS.null new_rank 44 | then return seen_rank 45 | else do 46 | -- Add new_rank stuff to the "seen" list 47 | let seen_rank' = IS.union seen_rank new_rank 48 | allNbr' = IS.fold (\i acc -> IS.union (g V.! i) acc) 49 | IS.empty new_rank 50 | new_rank' = IS.difference allNbr' seen_rank' 51 | bf_traverse (k-1) g seen_rank' new_rank' 52 | 53 | start_traverse :: Int -- iteration counter 54 | -> Graph2 -- graph 55 | -> Int -- start node 56 | -> WorkFn -- function to be applied to each node 57 | -> IO () 58 | start_traverse k !g startNode f = do 59 | runParIO $ do 60 | prnt $ " * Running on " ++ show numCapabilities ++ " parallel resources..." 61 | 62 | -- pass in { startNode } as the initial "new" set 63 | set <- bf_traverse k g IS.empty (IS.singleton startNode) 64 | 65 | prnt $ " * Done with bf_traverse..." 66 | 67 | resLs <- parMap f (IS.toList set) 68 | let set2 = Set.fromList resLs 69 | 70 | prnt $ " * Done parMap(f)..." 71 | prnt $ " * Set size: " ++ show (Set.size set2) 72 | prnt $ " * Set sum: " ++ show (Set.fold (\(x,_) y -> x+y) 0 set2) 73 | 74 | main = makeMain start_traverse 75 | -------------------------------------------------------------------------------- /src/lvish-graph-algorithms/src/Data/LVar/Graph/MSF.hs: -------------------------------------------------------------------------------- 1 | -- | Minimum spanning forests in LVish 2 | 3 | module Data.LVar.Graph.MSF where 4 | 5 | import Control.Monad 6 | import Control.LVish 7 | import qualified Data.LVar.SLMap as SLM 8 | import qualified Data.LVar.IStructure as IS 9 | import Data.Graph.Adjacency as Adj 10 | import qualified Data.Vector.Unboxed as U 11 | 12 | --- A NodeInfo is a 2-element array. First element is parentID, second element is rank 13 | --- Taking advantage of the fact that: type of parentID = Adj.NodeID = Int = type of rank 14 | 15 | type NodeInfo s = IS.IStructure s Int 16 | 17 | parent :: NodeInfo s -> Par e s NodeID 18 | parent info = IS.get info 0 19 | 20 | rank :: NodeInfo s -> Par e s Int 21 | rank info = IS.get info 1 22 | 23 | type DisjointSet s = SLM.IMap Int s (NodeInfo s) 24 | 25 | make_set :: NodeID -> Par e s (NodeInfo s) 26 | make_set nd = do 27 | ni <- IS.newIStructure 2 28 | IS.put ni 0 nd >> return ni 29 | 30 | 31 | -- Implements "union by rank", where the rank approximates subtree size 32 | 33 | union :: DisjointSet s -> NodeID -> NodeID -> Par e s () 34 | union ds x y = do 35 | [px,py] <- mapM (find_set ds) [x,y] 36 | rankPx <- SLM.getKey px ds >>= rank 37 | rankPy <- SLM.getKey py ds >>= rank 38 | if rankPx > rankPy 39 | then SLM.modify ds py (make_set py) $ update_parent px 40 | else SLM.modify ds px (make_set px) $ update_parent py 41 | when (rankPx == rankPy) $ do 42 | SLM.modify ds py (make_set py) $ update_rank (rankPy + 1) 43 | return () 44 | return () 45 | 46 | 47 | -- Implements path compression 48 | 49 | find_set :: DisjointSet s -> NodeID -> Par e s NodeID 50 | find_set ds nd = do 51 | info <- SLM.getKey nd ds 52 | p <- parent info 53 | when (nd /= p) $ do 54 | new_p <- find_set ds p 55 | SLM.modify ds nd (make_set nd) $ update_parent new_p 56 | return () 57 | SLM.getKey nd ds >>= parent 58 | 59 | 60 | -- Both types of updates return a new NodeInfo since multiple puts are not allowed 61 | 62 | update_parent :: NodeID -> NodeInfo s -> Par e s (NodeInfo s) 63 | update_parent pID info = do 64 | ni <- IS.newIStructure 2 65 | IS.put ni 0 pID >> rank info >>= IS.put ni 1 >> return ni 66 | 67 | update_rank :: Int -> NodeInfo s -> Par e s (NodeInfo s) 68 | update_rank r info = do 69 | ni <- IS.newIStructure 2 70 | parent info >>= IS.put ni 0 >> IS.put ni 1 r >> return ni 71 | 72 | 73 | -- Using EdgeGraph representation for this algorithm. Defining it here for now. 74 | 75 | data EdgeGraph = EdgeGraph (U.Vector (NodeID, NodeID)) 76 | 77 | type ParFor e s = (Int,Int) -> (Int -> Par e s ()) -> Par e s () 78 | 79 | -- Minimum spanning forest via Kruskal's algorithm 80 | 81 | msf_kruskal :: ParFor e s -> EdgeGraph -> Par e s (IS.IStructure s Bool) 82 | msf_kruskal parFor (EdgeGraph edges) = do 83 | msf <- IS.newIStructure (U.length edges) 84 | let maxV = U.foldl (\a (u,v) -> max a $ max u v) 0 edges 85 | vids = [0..maxV] 86 | infos <- mapM make_set vids 87 | ds <- SLM.newFromList $ zip vids infos 88 | parFor (0,U.length edges) $ \ ed -> do 89 | let (u,v) = edges U.! (fromIntegral ed) 90 | [uset, vset] <- mapM (find_set ds) [u,v] 91 | when (uset /= vset) $ IS.put_ msf (fromIntegral ed) True >> union ds u v 92 | return msf 93 | -------------------------------------------------------------------------------- /archived_old/fhpc13-lvars/benchmarks/data/makegraph.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | import numpy as np 3 | import matplotlib.pyplot as pyplot 4 | from matplotlib.mlab import csv2rec 5 | 6 | def importdata(filename): 7 | """Import CSV data into a list""" 8 | results = {} 9 | data = csv2rec(filename) 10 | 11 | # print data 12 | 13 | StrategiesData = [] 14 | LVarPureData = [] 15 | 16 | # Disregard everything but work, cores, and med running time 17 | for row in data: 18 | if row[0] == "bf_traverse_Strategies": 19 | StrategiesData.append([row[1], # work 20 | row[2], # cores 21 | row[4]]) # med running time 22 | elif row[0] == "bf_traverse_LVar": 23 | LVarPureData.append([row[1], # work 24 | row[2], # cores 25 | row[4]]) # med running time 26 | else: 27 | pass 28 | 29 | return [StrategiesData, LVarPureData] 30 | 31 | def filterdata(data, numcores): 32 | StrategiesData = data[0] 33 | LVarPureData = data[1] 34 | 35 | StrategiesTimes = [] 36 | LVarPureTimes = [] 37 | for row in StrategiesData: 38 | if (row[1] == numcores): 39 | StrategiesTimes.append(row[2]) 40 | for row in LVarPureData: 41 | if (row[1] == numcores): 42 | LVarPureTimes.append(row[2]) 43 | 44 | return [StrategiesTimes, LVarPureTimes] 45 | 46 | def plotdata(times, numcores): 47 | 48 | StrategiesTimes = times[0] 49 | LVarPureTimes = times[1] 50 | 51 | N = 6 # Number of different amounts of work (1, 2, 4, 8, 16, 32) 52 | 53 | ind = np.arange(N) # the x locations for the groups 54 | width = 0.45 # the width of the bars 55 | 56 | pyplot.subplot(("22%d" % numcores)) 57 | rects1 = pyplot.bar(ind, StrategiesTimes, width, 58 | color='b') 59 | 60 | rects2 = pyplot.bar(ind+width, LVarPureTimes, width, 61 | color='g') 62 | 63 | autolabel(rects1) 64 | autolabel(rects2) 65 | 66 | pyplot.xlabel('Work done by analyze (us)') 67 | pyplot.ylabel('Running time (s)') 68 | 69 | pyplot.title('Strategies vs. LVarPure, %d core%s' 70 | % (numcores, "" if numcores == 1 else "s")) 71 | pyplot.xticks(ind+width, ('1', '2', '4', '8', '16', '32')) 72 | 73 | pyplot.legend((rects1[0], rects2[0]), ('Strategies', 'LVarPure'), 74 | loc="upper left") 75 | 76 | def autolabel(rects): 77 | # Put some text labels at the top of columns, truncated to two 78 | # decimal places (TODO: actually round?) 79 | 80 | for rect in rects: 81 | height = rect.get_height() 82 | pyplot.text(rect.get_x()+rect.get_width()/2., 1.05*height, '%.2f'%height, 83 | ha='center', va='bottom') 84 | 85 | if __name__ == '__main__': 86 | 87 | filename = '%s.csv' % "bf_traverse_benchmark_data" 88 | data = importdata(filename) 89 | 90 | pyplot.figure(1) 91 | plotdata(filterdata(data,1), 1) 92 | plotdata(filterdata(data,2), 2) 93 | plotdata(filterdata(data,3), 3) 94 | plotdata(filterdata(data,4), 4) 95 | 96 | pyplot.savefig('%s.png' % "bf_traverse_benchmark_data") 97 | pyplot.show() 98 | -------------------------------------------------------------------------------- /src/lvish-apps/pbbs/pbbs-haskell.cabal: -------------------------------------------------------------------------------- 1 | -- Initial pbbs-haskell.cabal generated by cabal init. For further 2 | -- documentation, see http://haskell.org/cabal/users-guide/ 3 | 4 | name: pbbs-haskell 5 | version: 0.1.0.0 6 | 7 | synopsis: An implementation of some of the PBBS benchmarks in Haskell. 8 | description: 9 | The Problem-Based Benchmark Suite (PBBS) contains a number of irregular, non-numeric 10 | benchmark applications, with reference implementations in C++/Cilk. 11 | 12 | license: BSD3 13 | license-file: LICENSE 14 | author: Ryan Newton 15 | maintainer: rrnewton@gmail.com 16 | -- copyright: 17 | category: Benchmarks 18 | build-type: Simple 19 | -- extra-source-files: 20 | cabal-version: >=1.10 21 | 22 | executable pbbs-haskell-bench 23 | main-is: benchmarks/graphs/bfs_lvish.hs 24 | hs-source-dirs: . 25 | ghc-options: -O2 -threaded -rtsopts -with-rtsopts=-N4 26 | build-depends: pbbs-haskell 27 | -- build-depends: base >=4.6 && <4.7, lvish >=1.0 && <2.0, 28 | build-depends: process==1.1.0.2, directory ==1.2.0.1, vector >=0.10 && <0.11, time ==1.4.0.1 29 | build-depends: base >=4.6, deepseq >=1.3 && <1.4, split >=0.2 && <0.3, 30 | containers >=0.5 && <0.6, 31 | bytestring >=0.10 && <0.11, 32 | time >=1.4 && <1.5, 33 | rdtsc >=1.3 && <1.4, 34 | vector >=0.10 && <0.11, monad-par >=0.3 && <0.4, 35 | abstract-par >= 0.3, monad-par-extras >=0.3 && <0.5, 36 | -- parallel >=3.2 && <3.3, 37 | lvish >=1.0 && <2.0, 38 | async >=2.0 && <2.1, 39 | bytestring-mmap >=0.2 && <0.3, 40 | HUnit >=1.2 && <1.3, test-framework-hunit >=0.3 && <0.4 41 | 42 | 43 | 44 | library 45 | exposed-modules: PBBS.FileReader, PBBS.Timing 46 | -- other-modules: 47 | 48 | -- hs-source-dirs: benchmarks/graphs 49 | 50 | other-extensions: CPP, BangPatterns, OverloadedStrings, ScopedTypeVariables, NamedFieldPuns 51 | 52 | build-depends: base >=4.6, deepseq >=1.3 && <1.4, split >=0.2 && <0.3, 53 | containers >=0.5 && <0.6, 54 | bytestring >=0.10 && <0.11, 55 | time >=1.4 && <1.5, 56 | rdtsc >=1.3 && <1.4, 57 | vector >=0.10 && <0.11, monad-par >=0.3 && <0.4, 58 | abstract-par >= 0.3, monad-par-extras >=0.3 && <0.5, 59 | -- parallel >=3.2 && <3.3, 60 | lvish >=1.0 && <2.0, 61 | async >=2.0 && <2.1, 62 | bytestring-mmap >=0.2 && <0.3, 63 | HUnit >=1.2 && <1.3, test-framework-hunit >=0.3 && <0.4 64 | 65 | default-language: Haskell2010 66 | 67 | 68 | -- executable bfs-lvish 69 | -- main-is: benchmarks/graphs/bfs_lvish.hs 70 | -- ghc-options: -O2 -threaded -rtsopts -with-rtsopts=-N4 71 | -- other-extensions: CPP, BangPatterns, DeriveGeneric 72 | -- build-depends: pretty, base >=4.6, 73 | -- -- deepseq >=1.3 && <1.4, 74 | -- -- mtl >=2.1 && <2.2, time >=1.4 && <1.5, containers >=0.5 && <0.6, 75 | -- -- GenericPretty >=1.2 && <1.3, random >=1.0 && <1.1, 76 | -- -- test-framework >=0.8 && <0.9, test-framework-hunit >=0.3 && <0.4, HUnit >=1.2 && <1.3, 77 | -- lvish >=1.0 && <2.0, 78 | -- -- hs-source-dirs: 79 | -- default-language: Haskell2010 80 | 81 | 82 | -------------------------------------------------------------------------------- /archived_old/fhpc13-lvars/benchmarks/bf-traverse-Strategies/bf-traverse-Strategies.hs: -------------------------------------------------------------------------------- 1 | {-# LANGUAGE BangPatterns, OverloadedStrings #-} 2 | {-# LANGUAGE DoAndIfThenElse #-} 3 | 4 | module Main where 5 | 6 | import Control.Concurrent (getNumCapabilities) 7 | import Control.Exception (evaluate) 8 | import qualified Data.Set as Set 9 | import qualified Data.IntSet as IS 10 | import qualified Data.Vector as V 11 | 12 | import Data.Time.Clock (getCurrentTime, diffUTCTime) 13 | import qualified Control.Parallel.Strategies as Strat 14 | 15 | import Runner 16 | 17 | bf_pure :: Int -- iteration counter 18 | -> Graph2 -- graph 19 | -> IS.IntSet -- set of "seen" node labels, initially size 0 20 | -> IS.IntSet -- set of "new" node labels, initially size 1 21 | -> WorkFn -- function to be applied to each node 22 | -> IS.IntSet 23 | bf_pure 0 _ seen_rank new_rank _ = do 24 | -- when verbose $ prnt $ "bf_pure finished! seen/new size: " 25 | -- ++ show (IS.size seen_rank, IS.size new_rank) 26 | (IS.union seen_rank new_rank) 27 | 28 | bf_pure k !g !seen_rank !new_rank !f = do 29 | -- when verbose $ prnt $"bf_traverse call... " 30 | -- ++ show k ++ " seen/new size " 31 | -- ++ show (IS.size seen_rank, IS.size new_rank) 32 | if IS.null new_rank 33 | then seen_rank 34 | else do 35 | -- Add new_rank stuff to the "seen" list 36 | let seen_rank' = IS.union seen_rank new_rank 37 | -- TODO: parMap version 38 | -- allNbr = IS.fold IS.union 39 | allNbr' = IS.fold (\i acc -> IS.union (g V.! i) acc) 40 | IS.empty new_rank 41 | new_rank' = IS.difference allNbr' seen_rank' 42 | bf_pure (k-1) g seen_rank' new_rank' f 43 | 44 | 45 | start_traverse :: Int -- iteration counter 46 | -> Graph2 -- graph 47 | -> Int -- start node 48 | -> WorkFn -- function to be applied to each node 49 | -> IO () 50 | start_traverse k !g startNode f = do 51 | do 52 | ncap <- getNumCapabilities 53 | putStrLn $ " * Running on " ++ show ncap ++ " parallel resources..." 54 | let set = bf_pure k g IS.empty (IS.singleton startNode) f 55 | -- set2 = Set.fromList$ Strat.parMap Strat.rdeepseq f (IS.toList set) 56 | -- set2 = Set.fromList$ Strat.parMap Strat.rwhnf f (IS.toList set) 57 | 58 | set2 = Set.fromList$ 59 | Strat.withStrategy (Strat.parBuffer 16 Strat.rdeepseq) (map f (IS.toList set)) 60 | 61 | -- set2 = Set.fromList (map f (IS.toList set)) 62 | size = Set.size set2 63 | t0 <- getCurrentTime 64 | evaluate set 65 | t1 <- getCurrentTime 66 | putStrLn $ " * Bf_pure result in WHNF (done with bf_pure?) "++show(diffUTCTime t1 t0) 67 | putStr$ " * Set size: " ++ show size ++ " " 68 | t2 <- getCurrentTime 69 | print (diffUTCTime t2 t1) 70 | 71 | evaluate set2 72 | let ls = Set.toList set2 73 | putStrLn$ " * first element of result: "++show (head ls) 74 | putStrLn$ " * first 10 elements of result: "++show (take 10 ls) 75 | t3 <- getCurrentTime 76 | putStrLn$ " * Analyze function should be finished! "++show(diffUTCTime t3 t2) 77 | 78 | -- putStrLn$ " * Full set2 "++show set2 79 | putStrLn$ " * Set sum: " ++ show (Set.fold (\(_,x) y -> x+y) 0 set2) 80 | 81 | main = makeMain start_traverse 82 | -------------------------------------------------------------------------------- /src/par-collections/Data/Par/Traversable.hs: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | {-# LANGUAGE BangPatterns #-} 5 | {-# LANGUAGE ConstraintKinds #-} 6 | {-# LANGUAGE TypeFamilies #-} 7 | {-# LANGUAGE ScopedTypeVariables #-} 8 | 9 | {-| 10 | 11 | Parallel combinators based on top of a 'Par' monad. Specifically, this module 12 | provides higher order functions for operating on `Traversable` data structures in 13 | parallel. 14 | 15 | -} 16 | 17 | module Data.Par.Traversable 18 | ( 19 | -- * Naive parallel maps on traversable structures. 20 | 21 | -- | Because these operations assume only `Traversable`, the best they can do is 22 | -- to fork one parallel computation per element. 23 | parMap, ptraverse, 24 | 25 | -- * Variants that evaluate only down to weak-head-normal-form 26 | parMap_, ptraverse_ 27 | ) 28 | where 29 | 30 | import Control.DeepSeq 31 | import Control.Exception (evaluate) 32 | import Data.Traversable 33 | import Control.Monad as M hiding (mapM, sequence, join) 34 | import Prelude hiding (mapM, sequence, head,tail) 35 | 36 | import Control.Par.Class 37 | import Control.Par.EffectSigs 38 | import Control.Par.Class.Unsafe (internalLiftIO) 39 | 40 | -- ----------------------------------------------------------------------------- 41 | -- Parallel maps over Traversable data structures 42 | -- ----------------------------------------------------------------------------- 43 | 44 | -- | Applies the given function to each element of a data structure 45 | -- in parallel (fully evaluating the results), and returns a new data 46 | -- structure containing the results. 47 | -- 48 | -- > parMap f xs = mapM (spawnP . f) xs >>= mapM get 49 | -- 50 | -- @pmap@ is commonly used for lists, where it has this specialised type: 51 | -- 52 | -- > parMap :: NFData b => (a -> b) -> [a] -> Par [b] 53 | -- 54 | -- But note that for efficient parallelism you want balanced task trees, not "one at 55 | -- a time" parallel tasks. Thus look at `pmapReduce` and friends. 56 | parMap :: (Traversable t, NFData b, ParFuture p, HasPut e, HasGet e, FutContents p b) => 57 | (a -> b) -> t a -> p e s (t b) 58 | {-# INLINE parMap #-} 59 | parMap f xs = mapM (spawnP . f) xs >>= mapM get 60 | 61 | -- | A variant of `ParMap` that only evaluates to weak-head-normal-form. 62 | parMap_ :: (Traversable t, ParFuture p, HasGet e, HasPut e, FutContents p b) => 63 | (a -> b) -> t a -> p e s (t b) 64 | {-# INLINE parMap_ #-} 65 | parMap_ f xs = mapM spawnWHNF xs >>= mapM get 66 | where 67 | spawnWHNF x = spawn_ $ internalLiftIO $ evaluate (f x) 68 | 69 | -------------------------------------------------------------------------------- 70 | 71 | -- | Like 'parMap', but the function is a @Par@ monad operation. 72 | -- 73 | -- > ptraverse f xs = mapM (spawn . f) xs >>= mapM get 74 | -- 75 | ptraverse :: (Traversable t, NFData b, ParFuture p, HasPut e, HasGet e, FutContents p b) => 76 | (a -> p e s b) -> t a -> p e s (t b) 77 | {-# INLINE ptraverse #-} 78 | ptraverse f xs = mapM (spawn . f) xs >>= mapM get 79 | 80 | 81 | -- | A variant that only evaluates to weak-head-normal-form. 82 | ptraverse_ :: (Traversable t, ParFuture p, HasPut e, HasGet e, FutContents p b) => 83 | (a -> p e s b) -> t a -> p e s (t b) 84 | {-# INLINE ptraverse_ #-} 85 | ptraverse_ f xs = mapM (spawn_ . f) xs >>= mapM get 86 | 87 | -------------------------------------------------------------------------------- 88 | 89 | 90 | -- TODO: parBuffer -- enable signaling with a counter? 91 | -------------------------------------------------------------------------------- /src/par-mergesort/Control/Par/MergeSort.hs: -------------------------------------------------------------------------------- 1 | {-# LANGUAGE ConstraintKinds #-} 2 | {-# LANGUAGE TypeFamilies #-} 3 | {-# LANGUAGE ScopedTypeVariables #-} 4 | 5 | -- | A fast, parallel mergesort. 6 | -- 7 | -- This module exposes good default configurations, with simple interfaces. 8 | 9 | module Control.Par.MergeSort 10 | ( 11 | -- * Simple sorts 12 | 13 | -- | These are drop-in replacements for their counterparts in `vector-algorithms` 14 | sort, sortBy, Comparison, 15 | 16 | -- * Par-monad sorts 17 | sortPar 18 | 19 | -- * Unboxed and storable sorts 20 | 21 | ) 22 | where 23 | 24 | import Control.Monad.Primitive 25 | import Data.Int 26 | -- import Data.Vector.Generic as G 27 | import Data.Vector.Generic.Mutable 28 | -- import qualified Data.Vector.Mutable as MV 29 | -- import qualified Control.Par.ST as PST 30 | -- import qualified Control.Par.ST.Vec2 as V2 31 | -- import qualified Data.Vector.Storable.Mutable as SV 32 | import qualified Data.Vector.Storable as S 33 | import qualified Control.Par.ST.StorableVec2 as S2 34 | import Control.Par.MergeSort.Internal 35 | -- import Data.Vector.Par.MergeSort 36 | import Control.Par.Class (ParThreadSafe ()) 37 | import qualified Control.Par.Class as PC 38 | import Control.Par.EffectSigs 39 | import qualified Control.LVish as LV 40 | 41 | 42 | -------------------------------------------------------------------------------- 43 | 44 | type Comparison e = e -> e -> Ordering 45 | 46 | 47 | -- | Perform an out-of-place sort on a pure vector. 48 | -- 49 | sort :: S.Vector Int32 -> S.Vector Int32 50 | -- sort :: (S.Storable e, Ord e) => S.Vector e -> S.Vector e 51 | sort vec = LV.runPar (sortPar vec) 52 | 53 | -- -- The type is generic, but this should only be used for BOXED 54 | -- -- vectors. Unboxed/Storable vectors have more efficient 55 | -- -- counterparts. 56 | -- sort :: (G.Vector v e, Ord e) => v e -> v e 57 | -- sort = undefined 58 | 59 | -- | In-place version of sort that mutates the input vector. 60 | sort' :: (PrimMonad m, MVector v e, Ord e) => v (PrimState m) e -> m () 61 | sort' = undefined 62 | 63 | -- TODO: check if this can produce nondeterministic outcomes given an 64 | -- adversarial (wrong) comparison function. 65 | sortBy :: (PrimMonad m, MVector v e) => Comparison e -> v (PrimState m) e -> m () 66 | sortBy = undefined 67 | 68 | -- LV.runPar $ V.runParVec2T (0,size) $ 69 | 70 | -------------------------------------------------------------------------------- 71 | 72 | 73 | 74 | sortPar :: forall p e s . 75 | (ParThreadSafe p, PC.FutContents p (), PC.ParIVar p, 76 | PC.ParFuture p, HasGet e, HasPut e 77 | ) -- Ord elt, SV.Storable elt 78 | => S.Vector Int32 79 | -> p e s (S.Vector Int32) 80 | sortPar vec = 81 | -- Allocate the temporary buffer. But null-out the left side which 82 | -- we'll replace in a moment: 83 | S2.runParVec2T (0, S.length vec) comp 84 | where 85 | -- comp :: S2.ParVec2T s1 elt elt p e s (S.Vector elt) 86 | comp :: S2.ParVec2T s1 Int32 Int32 p e s (S.Vector Int32) 87 | comp = do vec' <- S2.liftST (S.thaw vec) 88 | S2.installL vec' 89 | -- mergeSort 2048 2048 CSort CMerge -- Breaks in ghci. 90 | -- mergeSort 2048 2048 VAMSort HSMerge -- Works 91 | -- mergeSort 2048 2048 VAMSort CMerge -- Works 92 | mergeSort_int32 2048 2048 CSort HSMerge -- Breaks in ghci. 93 | (left,_) <- S2.reify 94 | S2.liftST $ S.freeze left 95 | -------------------------------------------------------------------------------- /archived_old/fhpc13-lvars/fhpc13-lvars.cabal: -------------------------------------------------------------------------------- 1 | -- Initial fhpc13-lvars.cabal generated by cabal init. For further 2 | -- documentation, see http://haskell.org/cabal/users-guide/ 3 | 4 | -- The name of the package. 5 | name: fhpc13-lvars 6 | 7 | -- The package version. See the Haskell package versioning policy (PVP) 8 | -- for standards guiding when and how versions should be incremented. 9 | -- http://www.haskell.org/haskellwiki/Package_versioning_policy 10 | -- PVP summary: +-+------- breaking API changes 11 | -- | | +----- non-breaking API additions 12 | -- | | | +--- code changes with no API change 13 | version: 0.1.0.0 14 | 15 | -- A short (one-line) description of the package. 16 | synopsis: A prototype LVars library for Haskell 17 | 18 | -- A longer description of the package. 19 | description: A prototype LVars library for Haskell, based on the FHPC '13 paper "LVars: Lattice-based Data Structures for Deterministic Parallelism" by the authors. 20 | 21 | -- URL for the project homepage or repository. 22 | homepage: https://github.com/iu-parfunc/lvars/ 23 | 24 | -- The license under which the package is released. 25 | -- license: 26 | 27 | -- The file containing the license text. 28 | -- license-file: LICENSE 29 | 30 | -- The package author(s). 31 | author: Lindsey Kuper and Ryan Newton 32 | 33 | -- An email address to which users can send suggestions, bug reports, and 34 | -- patches. 35 | maintainer: lkuper@cs.indiana.edu 36 | 37 | -- A copyright notice. 38 | -- copyright: 39 | 40 | category: Concurrency 41 | 42 | build-type: Simple 43 | 44 | -- Constraint on the version of Cabal needed to build this package. 45 | cabal-version: >=1.8 46 | 47 | 48 | library 49 | -- Modules exported by the library. 50 | exposed-modules: LVarTraceScalable, LVarTraceIO, LVarTracePure, Common, Data.LVar.PairScalable, Data.LVar.PairIO, Data.LVar.SetScalable, Data.LVar.SetIO, Data.LVar.PairPure, Data.LVar.SetPure 51 | 52 | -- Modules included in this library but not exported. 53 | -- other-modules: 54 | 55 | -- Other library packages from which modules are imported. 56 | build-depends: base ==4.6.*, deepseq ==1.3.*, containers ==0.5.*, async ==2.0.*, lattices ==1.2.*, split ==0.2.*, bytestring ==0.10.*, time ==1.4.*, rdtsc ==1.3.*, vector ==0.10.*, parallel ==3.2.*, abstract-par ==0.3.* 57 | 58 | -------------------------------------------------------------------------------- 59 | 60 | test-suite test-lvarpure 61 | type: exitcode-stdio-1.0 62 | main-is: test.hs 63 | ghc-options: -O2 -threaded -rtsopts -with-rtsopts=-N4 64 | build-depends: HUnit, test-framework, test-framework-hunit, test-framework-th, base ==4.6.*, deepseq == 1.3.*, containers ==0.5.*, abstract-par >= 0.3, lattices ==1.2.*, async ==2.0.* 65 | cpp-options: -DLVARPURE 66 | 67 | test-suite test-lvario 68 | type: exitcode-stdio-1.0 69 | main-is: test.hs 70 | ghc-options: -O2 -threaded -rtsopts -with-rtsopts=-N4 71 | build-depends: HUnit, test-framework, test-framework-hunit, test-framework-th, base ==4.6.*, deepseq == 1.3.*, containers ==0.5.*, abstract-par >= 0.3, lattices ==1.2.*, async ==2.0.* 72 | cpp-options: -DLVARIO 73 | 74 | test-suite test-lvarscalable 75 | type: exitcode-stdio-1.0 76 | main-is: test.hs 77 | ghc-options: -O2 -threaded -rtsopts -with-rtsopts=-N4 78 | build-depends: HUnit, test-framework, test-framework-hunit, test-framework-th, base ==4.6.*, deepseq == 1.3.*, containers ==0.5.*, abstract-par >= 0.3, lattices ==1.2.*, async ==2.0.* 79 | cpp-options: -DLVARSCALABLE 80 | -------------------------------------------------------------------------------- /archived_old/fhpc13-lvars/benchmarks/data/bf_traverse_benchmark_data.csv: -------------------------------------------------------------------------------- 1 | Version,Work (microseconds),Cores (-N),Min time (5 runs),Med time (5 runs),Max time (5 runs),Min productivity,Med productivity,Max productivity 2 | bf_traverse_Strategies,1,1,0.399957,0.428029,0.428216,87.73,87.951,87.951 3 | bf_traverse_LVar,1,1,0.547646,0.553537,0.559669,84.09,84.18,84.18 4 | bf_traverse_Strategies,1,2,0.417619,0.432155,0.445399,87.654,87.804,87.951 5 | bf_traverse_LVar,1,2,0.363343,0.368254,0.377341,83.225,82.911,83.439 6 | bf_traverse_Strategies,1,3,0.364125,0.417159,0.420144,86.928,87.421,87.421 7 | bf_traverse_LVar,1,3,0.306415,0.307567,0.309153,82.993,82.55,83.108 8 | bf_traverse_Strategies,1,4,0.708443,0.728546,0.735884,87.165,86.842,87.434 9 | bf_traverse_LVar,1,4,0.277813,0.279844,0.284328,81.69,82.394,81.944 10 | bf_traverse_Strategies,2,1,0.736828,0.749379,0.750649,89.847,89.898,89.898 11 | bf_traverse_LVar,2,1,0.865879,0.877147,0.877839,86.538,86.602,86.602 12 | bf_traverse_Strategies,2,2,0.733965,0.735196,0.759022,89.69,89.69,89.847 13 | bf_traverse_LVar,2,2,0.528774,0.530999,0.536502,84.393,84.883,84.971 14 | bf_traverse_Strategies,2,3,0.710346,0.716655,0.718268,89.473,89.473,89.417 15 | bf_traverse_LVar,2,3,0.402792,0.403765,0.409137,83.974,83.87,84.076 16 | bf_traverse_Strategies,2,4,1.104888,1.158671,1.190425,90.265,89.61,87.711 17 | bf_traverse_LVar,2,4,0.346136,0.347966,0.352164,82.993,82.876,82.55 18 | bf_traverse_Strategies,4,1,1.381751,1.385413,1.39533,92.337,92.366,92.366 19 | bf_traverse_LVar,4,1,1.511316,1.522569,1.542347,89.743,89.416,89.492 20 | bf_traverse_Strategies,4,2,1.350399,1.444148,1.469492,92.187,92.481,92.537 21 | bf_traverse_LVar,4,2,0.871776,0.944824,1.168096,86.538,85.52,86.363 22 | bf_traverse_Strategies,4,3,1.292157,1.312213,1.334656,91.902,91.935,92.063 23 | bf_traverse_LVar,4,3,0.713414,0.917507,0.972963,42.433,85.446,85.253 24 | bf_traverse_Strategies,4,4,1.567723,1.599199,1.621453,91.24,90.579,91.459 25 | bf_traverse_LVar,4,4,0.492126,0.500189,0.862534,84.276,84.242,69.884 26 | bf_traverse_Strategies,8,1,2.633554,2.685817,2.700705,94.818,94.897,94.91 27 | bf_traverse_LVar,8,1,2.406907,2.810789,2.836356,92.265,93.052,93.086 28 | bf_traverse_Strategies,8,2,2.760833,2.911965,2.98288,94.949,95.157,95.226 29 | bf_traverse_LVar,8,2,1.44276,1.463801,1.480845,89.772,89.887,90.262 30 | bf_traverse_Strategies,8,3,2.159408,2.202598,2.281695,93.731,93.786,93.93 31 | bf_traverse_LVar,8,3,0.981804,0.995771,1.016873,88.372,88.317,88.127 32 | bf_traverse_Strategies,8,4,2.465992,2.50351,2.579312,93.15,93.442,93.6 33 | bf_traverse_LVar,8,4,0.784197,0.795847,0.805434,86.315,86.979,87.179 34 | bf_traverse_Strategies,16,1,5.13965,5.278661,5.303205,96.86,96.927,96.937 35 | bf_traverse_LVar,16,1,4.065307,5.357001,5.50737,94.507,95.585,95.69 36 | bf_traverse_Strategies,16,2,4.334589,4.512568,4.837559,96.389,96.503,96.688 37 | bf_traverse_LVar,16,2,2.94766,3.255129,3.427418,92.822,90.254,90.573 38 | bf_traverse_Strategies,16,3,2.53765,2.674014,2.764857,94.609,94.818,94.923 39 | bf_traverse_LVar,16,3,1.792135,2.455807,2.558713,91.467,90,91.005 40 | bf_traverse_Strategies,16,4,3.891406,3.960146,4.095286,94.861,95.516,95.238 41 | bf_traverse_LVar,16,4,1.348164,1.371292,1.425907,89.43,89.919,89.922 42 | bf_traverse_Strategies,32,1,10.258545,10.378604,10.463541,98.259,98.277,98.289 43 | bf_traverse_LVar,32,1,10.405857,10.508465,11.049832,97.506,97.612,97.636 44 | bf_traverse_Strategies,32,2,7.00909,7.158841,7.302561,97.56,97.607,97.649 45 | bf_traverse_LVar,32,2,4.999605,5.199159,5.717066,95.799,95.781,94.257 46 | bf_traverse_Strategies,32,3,3.95633,4.133057,4.262311,96.101,96.24,96.336 47 | bf_traverse_LVar,32,3,3.236585,3.378315,3.445084,93.018,94.481,94.36 48 | bf_traverse_Strategies,32,4,4.0708,4.091473,4.605994,95.038,95.419,96.341 49 | bf_traverse_LVar,32,4,2.404598,2.478476,2.541304,92.877,93.055,92.896 50 | -------------------------------------------------------------------------------- /archived_old/fhpc13-lvars/Data/LVar/SetIO.hs: -------------------------------------------------------------------------------- 1 | {-# LANGUAGE BangPatterns #-} 2 | {-# LANGUAGE NamedFieldPuns #-} 3 | {-# LANGUAGE RankNTypes #-} 4 | {-# LANGUAGE ScopedTypeVariables #-} 5 | 6 | module Data.LVar.SetIO 7 | ( 8 | ISet(), newEmptySet, newEmptySetWithCallBack, putInSet, 9 | waitForSet, waitForSetSize, consumeSet, 10 | 11 | -- For debugging only! 12 | unsafePeekSet, reallyUnsafePeekSet 13 | ) where 14 | import LVarTraceIO 15 | import Data.IORef 16 | import qualified Data.Set as S 17 | import System.IO.Unsafe (unsafePerformIO) 18 | 19 | ------------------------------------------------------------------------------ 20 | -- ISets and setmap implemented on top of LVars: 21 | ------------------------------------------------------------------------------ 22 | 23 | newtype ISet a = ISet (LVar (IORef (S.Set a))) 24 | 25 | newEmptySet :: Par (ISet a) 26 | newEmptySet = fmap ISet $ newLV$ newIORef S.empty 27 | 28 | -- | Extended lambdaLVar (callbacks). Create an empty set, but 29 | -- establish a callback that will be invoked (in parallel) on each 30 | -- element added to the set. 31 | newEmptySetWithCallBack :: forall a . Ord a => (a -> Par ()) -> Par (ISet a) 32 | newEmptySetWithCallBack callb = fmap ISet $ newLVWithCallback io 33 | where -- Every time the set is updated we fork callbacks: 34 | io = do 35 | alreadyCalled <- newIORef S.empty 36 | contents <- newIORef S.empty 37 | let fn :: IORef (S.Set a) -> IO Trace 38 | fn _ = do 39 | curr <- readIORef contents 40 | old <- atomicModifyIORef alreadyCalled (\set -> (curr,set)) 41 | let new = S.difference curr old 42 | -- Spawn in parallel all new callbacks: 43 | let trcs = map runCallback (S.toList new) 44 | -- Would be nice if this were a balanced tree: 45 | return (foldl Fork Done trcs) 46 | 47 | runCallback :: a -> Trace 48 | -- Run each callback with an etpmyt continuation: 49 | runCallback elem = runCont (callb elem) (\_ -> Done) 50 | 51 | return (contents, fn) 52 | 53 | -- | Put a single element in the set. (WHNF) Strict in the element being put in the 54 | -- set. 55 | putInSet :: Ord a => a -> ISet a -> Par () 56 | putInSet !elem (ISet lv) = putLV lv putter 57 | where 58 | putter ref = atomicModifyIORef ref (\set -> (S.insert elem set, ())) 59 | 60 | -- | Wait for the set to contain a specified element. 61 | waitForSet :: Ord a => a -> ISet a -> Par () 62 | waitForSet elem (ISet lv@(LVar ref _ _)) = getLV lv getter 63 | where 64 | getter = do 65 | set <- readIORef ref 66 | case S.member elem set of 67 | True -> return (Just ()) 68 | False -> return (Nothing) 69 | 70 | -- | Wait on the SIZE of the set, not its contents. 71 | waitForSetSize :: Int -> ISet a -> Par () 72 | waitForSetSize sz (ISet lv@(LVar ref _ _)) = getLV lv getter 73 | where 74 | getter = do 75 | set <- readIORef ref 76 | if S.size set >= sz 77 | then return (Just ()) 78 | else return Nothing 79 | 80 | -- | Get the exact contents of the set. Using this may cause your 81 | -- program to exhibit a limited form of nondeterminism: it will never 82 | -- return the wrong answer, but it may include synchronization bugs 83 | -- that can (nondeterministically) cause exceptions. 84 | consumeSet :: ISet a -> Par (S.Set a) 85 | consumeSet (ISet lv) = consumeLV lv readIORef 86 | 87 | unsafePeekSet :: ISet a -> Par (S.Set a) 88 | unsafePeekSet (ISet lv) = unsafePeekLV lv readIORef 89 | 90 | reallyUnsafePeekSet :: ISet a -> (S.Set a) 91 | reallyUnsafePeekSet (ISet (LVar {lvstate})) = 92 | unsafePerformIO $ do 93 | current <- readIORef lvstate 94 | return current 95 | 96 | -------------------------------------------------------------------------------- /archived_old/fhpc13-lvars/Data/LVar/SetScalable.hs: -------------------------------------------------------------------------------- 1 | {-# LANGUAGE BangPatterns #-} 2 | {-# LANGUAGE NamedFieldPuns #-} 3 | {-# LANGUAGE RankNTypes #-} 4 | {-# LANGUAGE ScopedTypeVariables #-} 5 | 6 | -- UNFINISHED? 7 | 8 | module Data.LVar.SetScalable 9 | ( 10 | ISet(), newEmptySet, newEmptySetWithCallBack, putInSet, 11 | waitForSet, waitForSetSize, consumeSet, 12 | 13 | -- For debugging only! 14 | unsafePeekSet, reallyUnsafePeekSet 15 | ) where 16 | import LVarTraceScalable 17 | import Control.DeepSeq 18 | import Data.IORef 19 | import qualified Data.Set as S 20 | import System.IO.Unsafe (unsafePerformIO) 21 | 22 | ------------------------------------------------------------------------------ 23 | -- ISets and setmap implemented on top of LVars: 24 | ------------------------------------------------------------------------------ 25 | 26 | newtype ISet a = ISet (LVar (IORef (S.Set a))) 27 | 28 | newEmptySet :: Par (ISet a) 29 | newEmptySet = fmap ISet $ newLV$ newIORef S.empty 30 | 31 | -- | Extended lambdaLVar (callbacks). Create an empty set, but 32 | -- establish a callback that will be invoked (in parallel) on each 33 | -- element added to the set. 34 | newEmptySetWithCallBack :: forall a . Ord a => (a -> Par ()) -> Par (ISet a) 35 | newEmptySetWithCallBack callb = fmap ISet $ newLVWithCallback io 36 | where -- Every time the set is updated we fork callbacks: 37 | io = do 38 | alreadyCalled <- newIORef S.empty 39 | contents <- newIORef S.empty 40 | let fn :: IORef (S.Set a) -> IO Trace 41 | fn _ = do 42 | curr <- readIORef contents 43 | old <- atomicModifyIORef alreadyCalled (\set -> (curr,set)) 44 | let new = S.difference curr old 45 | -- Spawn in parallel all new callbacks: 46 | let trcs = map runCallback (S.toList new) 47 | -- Would be nice if this were a balanced tree: 48 | return (foldl Fork Done trcs) 49 | 50 | runCallback :: a -> Trace 51 | -- Run each callback with an etpmyt continuation: 52 | runCallback elem = runCont (callb elem) (\_ -> Done) 53 | 54 | return (contents, fn) 55 | 56 | -- | Put a single element in the set. (WHNF) Strict in the element being put in the 57 | -- set. 58 | putInSet :: Ord a => a -> ISet a -> Par () 59 | putInSet !elem (ISet lv) = putLV lv putter 60 | where 61 | putter ref = atomicModifyIORef ref (\set -> (S.insert elem set, ())) 62 | 63 | -- | Wait for the set to contain a specified element. 64 | waitForSet :: Ord a => a -> ISet a -> Par () 65 | waitForSet elem (ISet lv@(LVar ref _)) = getLV lv getter 66 | where 67 | getter = do 68 | set <- readIORef ref 69 | case S.member elem set of 70 | True -> return (Just ()) 71 | False -> return (Nothing) 72 | 73 | -- | Wait on the SIZE of the set, not its contents. 74 | waitForSetSize :: Int -> ISet a -> Par () 75 | waitForSetSize sz (ISet lv@(LVar ref _)) = getLV lv getter 76 | where 77 | getter = do 78 | set <- readIORef ref 79 | if S.size set >= sz 80 | then return (Just ()) 81 | else return Nothing 82 | 83 | -- | Get the exact contents of the set. Using this may cause your 84 | -- program to exhibit a limited form of nondeterminism: it will never 85 | -- return the wrong answer, but it may include synchronization bugs 86 | -- that can (nondeterministically) cause exceptions. 87 | consumeSet :: ISet a -> Par (S.Set a) 88 | consumeSet (ISet lv) = consumeLV lv readIORef 89 | 90 | unsafePeekSet :: ISet a -> Par (S.Set a) 91 | unsafePeekSet (ISet lv) = unsafePeekLV lv readIORef 92 | 93 | reallyUnsafePeekSet :: ISet a -> (S.Set a) 94 | reallyUnsafePeekSet (ISet (LVar {lvstate})) = 95 | unsafePerformIO $ do 96 | current <- readIORef lvstate 97 | return current 98 | -------------------------------------------------------------------------------- /src/par-transformers/par-transformers.cabal: -------------------------------------------------------------------------------- 1 | Name: par-transformers 2 | Version: 1.1 3 | Synopsis: Extend Par monads with additional capabilities 4 | 5 | -- Version history: 6 | -- 1.0 : Initial release to replace deprecated monad-par-transformers. 7 | -- 1.1 : Add LVish-2.0 effect signatures 8 | 9 | Description: The modules below provide additional 10 | data structures, and other added capabilities 11 | layered on top of any valid 'Par' monad. 12 | 13 | License: BSD3 14 | License-file: LICENSE 15 | Author: Ryan Newton, Aaron Todd 16 | Maintainer: Omer Agacan 17 | Copyright: (c) Ryan Newton, Omer Agacan, Aaron Todd 2015 18 | Stability: Experimental 19 | Category: Control,Parallelism,Monads 20 | Build-type: Simple 21 | Cabal-version: >=1.8 22 | 23 | Extra-source-files: includes/Vec2Common.hs 24 | 25 | Source-repository head 26 | type: git 27 | location: https://github.com/iu-parfunc/lvars 28 | subdir: haskell/par-transformers 29 | 30 | Library 31 | Exposed-modules: 32 | -- A scheduler-transformer that adds cancellation: 33 | Control.LVish.CancelT 34 | -- Control.LVish.DeadlockT 35 | 36 | -- State on top of Par is generally useful, but experimental 37 | Control.Par.StateT 38 | 39 | Control.Par.ST 40 | Control.Par.ST.Vec 41 | 42 | Control.Par.ST.Vec2 43 | Control.Par.ST.UVec 44 | Control.Par.ST.UVec2 45 | Control.Par.ST.StorableVec2 46 | 47 | -- Deterministic RNG needs more testing. 48 | -- Control.Par.RNG 49 | -- Control.Par.Pedigree 50 | 51 | Build-depends: base >= 4 && < 5 52 | -- This provides the interface which monad-par implements: 53 | , par-classes >= 1.1 && < 2.0 54 | , cereal >= 0.3 55 | , deepseq >= 1.3 56 | , random >= 1.0 57 | , mtl >= 2.0 58 | , transformers >= 0.2 59 | , vector >= 0.9 60 | , ghc-prim 61 | , atomic-primops >= 0.6 62 | ghc-options: -O2 -Wall 63 | Include-Dirs: includes/ 64 | 65 | -- Note: This is actually broken with ghc 7.6 on linux, due to a bug 66 | -- with how ghci interacts with the atomic primops package. This 67 | -- should be fixed in ghc 7.8, but until then the tests can be run 68 | -- directly with: `cabal install; ghc -i. -i tests/CancelTests.hs -i 69 | -- tests/STTests.hs tests/Main.hs; ./tests/Main`. 70 | 71 | Test-suite test-par-transformers 72 | Type: exitcode-stdio-1.0 73 | Hs-source-dirs: tests/ 74 | Main-is: Main.hs 75 | Other-modules: CancelTests STTests 76 | -- (1) Build depends that are used directly by the test files, including: 77 | -- (1A) Self dependency: 78 | Build-depends: par-transformers 79 | -- (1B) Dependencies common to test files and the library: 80 | Build-depends: base >= 4 && < 5 81 | , par-classes >= 1.1 && < 2.0 82 | , mtl >= 2.0 83 | , transformers >= 0.2 84 | , vector >= 0.9 85 | , ghc-prim 86 | 87 | -- (1C) Additional depends to test concrete instances: 88 | Build-depends: lvish >= 2.0 89 | 90 | -- (1C) Additional build depends for testing: 91 | Build-depends: 92 | tasty >= 0.10, 93 | tasty-hunit 94 | 95 | ghc-options: -threaded -Wall -rtsopts -with-rtsopts=-N4 -eventlog 96 | 97 | -- Atomic-primops fails when used by template-haskell/ghci on linux: 98 | if impl(ghc < 7.7) && os(linux) { 99 | buildable: False 100 | } 101 | -------------------------------------------------------------------------------- /src/lvish-extra/tests/MemoTests.hs: -------------------------------------------------------------------------------- 1 | {-# LANGUAGE TemplateHaskell, ParallelListComp #-} 2 | 3 | -- | Test only the memoization functionality, corresponds to the @Data.LVar.Memo*@ 4 | -- modules. 5 | 6 | module MemoTests where 7 | import Control.LVish 8 | 9 | import Data.LVar.CycGraph 10 | 11 | import qualified Data.LVar.IVar as IV 12 | import Data.Set as S 13 | --import Test.HUnit (Assertion, assertEqual, assertBool, Counts(..)) 14 | import Test.Tasty.TH (testGroupGenerator) 15 | import Test.Tasty (defaultMain, TestTree) 16 | import Test.Tasty.HUnit (Assertion, assertEqual, assertBool, testCase) -- For macro-expansion. 17 | -- import TestHelpers (defaultMainSeqTests) 18 | 19 | import Prelude as P 20 | 21 | -------------------------------------------------------------------------------- 22 | -- Unit Tests 23 | -------------------------------------------------------------------------------- 24 | 25 | -- This has changed a bunch, update it: [2013.10.23] 26 | {- 27 | cyc02 :: IO String 28 | cyc02 = runParIO $ exploreGraph 29 | (\33 -> return [33]) 30 | (\cyc k nbrs -> 31 | return ("key "++show k++" cyc "++show cyc++" nbrs "++show (P.map fst nbrs))) 32 | (33::Int) 33 | 34 | cyc03 :: IO String 35 | cyc03 = runParIO $ exploreGraph fn1 fn2 33 36 | where 37 | fn1 33 = return [44] 38 | fn1 44 = return [33] 39 | fn2 cyc k nbrs = return ("key "++show k++" cyc "++show cyc++" nbrs "++show (P.map fst nbrs)) 40 | 41 | cyc04 :: IO String 42 | cyc04 = runParIO $ exploreGraph fn1 hndlr 33 43 | where 44 | fn1 33 = return [44] 45 | fn1 44 = return [55] 46 | fn1 55 = return [33] 47 | 48 | hndlr True 55 nbrs = return "stop-at-55" 49 | hndlr cyc k nbrs = do 50 | vals <- mapM (IV.get . snd) nbrs 51 | return ("key="++show k++" cyc:"++show cyc++" nbrs:("++ 52 | concat [ show k++","++str++" " | (k,_) <- nbrs | str <- vals ] ++")") 53 | -} 54 | 55 | ----------------------------------------------- 56 | -- Test the sequential cycle-detection approach 57 | ----------------------------------------------- 58 | 59 | case_02seq :: Assertion 60 | case_02seq = assertEqual "direct, one-node cycle, DFS" "cycle-33" cyc02seq 61 | cyc02seq :: String 62 | cyc02seq = runPar $ exploreGraph_seq 63 | (\33 -> return$ Request 33 (\a -> return$ Done$ "33 finished: "++a)) 64 | (\k -> return$ "cycle-"++show k) 65 | 33 66 | 67 | case_03seq :: Assertion 68 | case_03seq = assertEqual "2-way cycle, DFS" "44 finished: cycle-33" cyc03seq 69 | cyc03seq :: String 70 | cyc03seq = runPar $ exploreGraph_seq fn (\k -> return ("cycle-"++show k)) 44 71 | where 72 | fn 33 = return (Request 44 (\a -> return (Done$ "33 finished: "++a))) 73 | fn 44 = return (Request 33 (\a -> return (Done$ "44 finished: "++a))) 74 | 75 | case_04seq :: Assertion 76 | case_04seq = assertEqual "3-way cycle, DFS" 77 | "33 complete: 44 complete: cycle-55" cyc04seq 78 | 79 | cyc04seq :: String 80 | cyc04seq = runPar $ exploreGraph_seq fn (\k -> return ("cycle-"++show k)) 33 81 | where 82 | fn 33 = return (Request 44 (\a -> return (Done$ "33 complete: "++a))) 83 | fn 44 = return (Request 55 (\a -> return (Done$ "44 complete: "++a))) 84 | fn 55 = return (Request 33 (\a -> return (Done$ "55 complete: "++a))) 85 | 86 | cyc05seq :: String 87 | cyc05seq = runPar $ exploreGraph_seq fn (\k -> return ("cycle-"++show k)) 33 88 | where 89 | fn 33 = return (Request 44 (\a -> return (Done$ "33 complete: "++a))) 90 | fn 44 = return (Request 55 (\a -> return (Done$ "44 complete: "++a))) 91 | fn 55 = return (Request 33 (\a -> return (Done$ "55 complete: "++a))) 92 | 93 | 94 | -------------------------------------------------------------------------------- 95 | 96 | tests :: TestTree 97 | tests = $(testGroupGenerator) 98 | 99 | runTests :: IO () 100 | runTests = defaultMain tests 101 | -------------------------------------------------------------------------------- /archived_old/fhpc13-lvars/Data/LVar/SetPure.hs: -------------------------------------------------------------------------------- 1 | {-# LANGUAGE BangPatterns #-} 2 | {-# LANGUAGE NamedFieldPuns #-} 3 | {-# LANGUAGE RankNTypes #-} 4 | {-# LANGUAGE ScopedTypeVariables #-} 5 | {-# LANGUAGE GeneralizedNewtypeDeriving #-} 6 | 7 | module Data.LVar.SetPure 8 | ( 9 | ISet(), newEmptySet, newEmptySetWithCallBack, putInSet, putInSet_, 10 | waitForSet, waitForSetSize, consumeSet, 11 | 12 | -- For debugging only! 13 | unsafePeekSet, reallyUnsafePeekSet 14 | ) where 15 | import LVarTracePure 16 | import Control.DeepSeq 17 | import Data.IORef 18 | import qualified Data.Set as S 19 | import System.IO.Unsafe (unsafePerformIO) 20 | import Algebra.Lattice (JoinSemiLattice(..)) 21 | 22 | ------------------------------------------------------------------------------ 23 | -- ISets and setmap implemented on top of LVars: 24 | ------------------------------------------------------------------------------ 25 | 26 | -- Abstract data type: 27 | -- newtype ISet a = ISet (LVar (S.Set a)) 28 | newtype ISet a = ISet (LVar (ISetContents a)) 29 | 30 | newtype ISetContents a = ISetContents { unContents :: S.Set a } 31 | deriving JoinSemiLattice 32 | 33 | instance NFData (ISetContents a) where 34 | 35 | 36 | newEmptySet :: Par (ISet a) 37 | newEmptySet = fmap (ISet) $ newLV (ISetContents S.empty) 38 | 39 | -- | Extended lambdaLVar (callbacks). Create an empty set, but establish a callback 40 | -- that will be invoked (in parallel) on each element added to the set. 41 | newEmptySetWithCallBack :: forall a . Ord a => (a -> Par ()) -> Par (ISet a) 42 | newEmptySetWithCallBack callb = fmap ISet $ newLVWithCallback (ISetContents S.empty) cb 43 | where -- Every time the set is updated we fork callbacks on new elements: 44 | cb :: ISetContents a -> ISetContents a -> Trace 45 | cb (ISetContents old) (ISetContents new) = 46 | -- Unfortunately we need to do a set diff every time. 47 | let fresh = S.difference new old 48 | -- Spawn in parallel all new callbacks: 49 | trcs = map runCallback (S.toList fresh) 50 | runCallback :: a -> Trace 51 | -- Run each callback with an empty continuation: 52 | runCallback elem = runCont (callb elem) (\_ -> Done) 53 | in 54 | -- Would be nice if this were a balanced tree: 55 | foldl Fork Done trcs 56 | 57 | -- | Put a single element in the set. (WHNF) Strict in the element being put in the 58 | -- set. 59 | putInSet_ :: Ord a => a -> ISet a -> Par () 60 | putInSet_ !elem (ISet lv) = putLV lv (ISetContents$ S.singleton elem) 61 | 62 | putInSet :: (NFData a, Ord a) => a -> ISet a -> Par () 63 | putInSet e s = deepseq e (putInSet_ e s) 64 | 65 | -- | Wait for the set to contain a specified element. 66 | waitForSet :: Ord a => a -> ISet a -> Par () 67 | waitForSet !elem (ISet lv) = getLV lv fn 68 | where 69 | fn (ISetContents set) 70 | | S.member elem set = Just () 71 | | otherwise = Nothing 72 | 73 | -- | Wait on the SIZE of the set, not its contents. 74 | waitForSetSize :: Int -> ISet a -> Par () 75 | waitForSetSize sz (ISet lv) = getLV lv fn 76 | where 77 | fn (ISetContents set) 78 | | S.size set >= sz = Just () 79 | | otherwise = Nothing 80 | 81 | -- | Get the exact contents of the set. Using this may cause your 82 | -- program exhibit a limited form of nondeterminism: it will never 83 | -- return the wrong answer, but it may include synchronization bugs 84 | -- that can (nondeterministically) cause exceptions. 85 | consumeSet :: ISet a -> Par (S.Set a) 86 | consumeSet (ISet lv) = fmap unContents $ consumeLV lv 87 | 88 | unsafePeekSet :: ISet a -> Par (S.Set a) 89 | unsafePeekSet (ISet lv) = fmap unContents $ unsafePeekLV lv 90 | 91 | reallyUnsafePeekSet :: ISet a -> (S.Set a) 92 | reallyUnsafePeekSet (ISet (LVar {lvstate})) = 93 | unsafePerformIO $ do 94 | LVarContents {current=ISetContents x} <- readIORef lvstate 95 | return x 96 | 97 | -------------------------------------------------------------------------------- /src/lvish/tests/PureMapTests.hs: -------------------------------------------------------------------------------- 1 | {-# LANGUAGE TemplateHaskell #-} 2 | {-# LANGUAGE ScopedTypeVariables #-} 3 | {-# LANGUAGE DataKinds, TypeFamilies #-} 4 | {-# LANGUAGE CPP #-} 5 | {-# LANGUAGE ConstraintKinds #-} 6 | 7 | -- | Tests for the Data.LVar.PureMap and Data.LVar.SLMap modules. 8 | 9 | module PureMapTests(tests, runTests) where 10 | 11 | import Data.LVar.PureSet as IS 12 | import qualified Data.LVar.PureMap as IM 13 | -- The common interface under test: 14 | (IMap, waitSize, waitValue, getKey, insert, newEmptyMap, newFromList, 15 | freezeMap, unionHP, forEach, forEachHP, traverseMap, traverseMapHP, modify) 16 | 17 | -- TODO: Use backpack for this when it is available: 18 | #include "CommonMapTests.hs" 19 | 20 | type TheMap k s v = IM.IMap k s v 21 | 22 | -------------------------------------------------------------------------------- 23 | 24 | tests :: TestTree 25 | tests = testGroup "" [tests_here, tests_common ] 26 | 27 | tests_here :: TestTree 28 | tests_here = $(testGroupGenerator) 29 | 30 | runTests :: IO () 31 | runTests = defaultMain tests 32 | 33 | -------------------------------------------------------------------------------- 34 | 35 | -- [2013.08.05] RRN: Observing nondeterministic blocked-indefinitely 36 | -- exception here. 37 | case_i7b :: Assertion 38 | case_i7b = do 39 | allowSomeExceptions ["Multiple puts"] $ 40 | assertEqual "racing insert and modify" 41 | (M.fromList [(1,S.fromList [3.33]), 42 | (2,S.fromList [0.11,4.44])]) =<< 43 | i7b 44 | return () 45 | 46 | -- | A quasi-deterministic example. 47 | i7b :: IO (M.Map Int (S.Set Float)) 48 | -- A manual nested freeze instead of DeepFrz: 49 | i7b = runParQuasiDet $ isQD $ do 50 | mp <- IM.newEmptyMap 51 | s1 <- IS.newEmptySet 52 | s2 <- IS.newEmptySet 53 | IS.insert 0.11 s2 54 | f1 <- IV.spawn_ $ do IM.insert 1 s1 mp 55 | IM.insert 2 s2 mp 56 | f2 <- IV.spawn_ $ do s <- IM.getKey 1 mp 57 | IS.insert 3.33 s 58 | -- RACE: this modify is racing with the insert of s2: 59 | IM.modify mp 2 IS.newEmptySet (IS.insert 4.44) 60 | 61 | IV.get f1; IV.get f2 62 | mp2 <- IM.freezeMap mp 63 | traverse IS.freezeSet mp2 64 | 65 | -- | This example is valid because two modifies may race. 66 | v7c :: IO (M.Map Int (S.Set Float)) 67 | -- Do we need a "deep freeze" that freezes nested structures? 68 | v7c = runParQuasiDet $ do 69 | mp <- IM.newEmptyMap 70 | s1 <- IS.newEmptySet 71 | f1 <- IV.spawn_ $ IM.insert 1 s1 mp 72 | f2 <- IV.spawn_ $ do s <- IM.getKey 1 mp 73 | IS.insert 3.33 s 74 | IM.modify mp 2 IS.newEmptySet (IS.insert 4.44) 75 | f3 <- IV.spawn_ $ IM.modify mp 3 IS.newEmptySet (IS.insert 5.55) 76 | f4 <- IV.spawn_ $ IM.modify mp 3 IS.newEmptySet (IS.insert 6.6) 77 | -- No easy way to wait on the total size of all contained sets... 78 | -- 79 | -- Need a barrier here.. should have a monad-transformer that provides cilk "sync" 80 | -- Global quiesce is convenient too.. 81 | IV.get f1; IV.get f2; IV.get f3; IV.get f4 82 | mp2 <- IM.freezeMap mp 83 | traverse IS.freezeSet mp2 84 | 85 | case_v7c :: Assertion 86 | case_v7c = assertEqual "imap test - racing modifies" 87 | (M.fromList [(1,S.fromList [3.33]), 88 | (2,S.fromList [4.44]), 89 | (3,S.fromList [5.55,6.6])]) =<< 90 | v7c 91 | 92 | ------------------------------------------------------------------------------------------ 93 | -- Show instances 94 | ------------------------------------------------------------------------------------------ 95 | 96 | case_show03 :: Assertion 97 | case_show03 = assertEqual "show for PureMap" "{IMap: (\"key1\",33), (\"key2\",44)}" show03 98 | show03 :: String 99 | show03 = show$ runParThenFreeze $ isDet $ do 100 | mp <- IM.newEmptyMap 101 | IM.insert "key1" (33::Int) mp 102 | IM.insert "key2" (44::Int) mp 103 | return mp 104 | 105 | 106 | -------------------------------------------------------------------------------- /.run_benchmarks.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # This script runs benchmarks and files their results away in a 4 | # database (currently, Google fusion table). 5 | 6 | echo "Running benchmarks remotely on server `hostname`" 7 | set -e 8 | set -x 9 | 10 | # The working directory is passed as the first argument. 11 | CHECKOUT=$1 12 | if [ "$CHECKOUT" == "" ]; then 13 | echo "ERROR: must pass working copy absolute path as first arg." 14 | exit 1 15 | fi 16 | cd "$CHECKOUT/" 17 | 18 | pwd -P 19 | # These are the arguments to the HSBencher harness... 20 | export BENCHARGS=$* 21 | 22 | if [ "$CABAL" == "" ]; then 23 | CABAL=cabal-1.20 24 | fi 25 | 26 | # (1) Build everything 27 | # ================================================================================ 28 | 29 | if [ "$MACHINECLASS" == cutter ]; then 30 | echo "WARNING: on cutter for some STRANGE reason, building in parallel with cabal crashes." 31 | echo " It crashes with: 'libgcc_s.so.1 must be installed for pthread_cancel to work.'" 32 | echo " Thus we build sequentially..." 33 | PARARG="" 34 | else 35 | PARARG="-j" 36 | fi 37 | 38 | export EXTRAPKGS=" ./HSBencher/hsbencher ./HSBencher/hsbencher-fusion ./HSBencher/hsbencher-codespeed ./HSBencher/hgdata/ " 39 | NOTEST=1 ./.jenkins_script.sh $PARARG 40 | 41 | # (2) Perform micro benchmarking 42 | # ================================================================================ 43 | cd $CHECKOUT/haskell/lvish 44 | 45 | HOST=`hostname -s` 46 | REGRESS="--regress=allocated:iters --regress=bytesCopied:iters --regress=cycles:iters --regress=numGcs:iters 47 | --regress=mutatorWallSeconds:iters --regress=gcWallSeconds:iters --regress=cpuTime:iters " 48 | 49 | NAME="report_lvish_$HOST" 50 | OUTS=" --raw $NAME.criterion -o $NAME.html" 51 | 52 | # Need to reconfigure because of cabal issue #2182: 53 | $CABAL configure --enable-benchmarks 54 | $CABAL bench --benchmark-options=" $WHICHBENCH --template=./report_format.tpl $REGRESS $OUTS +RTS -T -s -RTS" 55 | # For performance debugging of benchmarks we include these: 56 | # --ghc-options="-ddump-simpl -ddump-to-file" 57 | 58 | # Use the LVish uploader project 59 | CID=820162629229-kp29aklebt6ucos5a71u8tu3hu8unres.apps.googleusercontent.com 60 | SEC=pSsMxVAJCFKyWsazuxZVRZwX 61 | # CID=905767673358.apps.googleusercontent.com 62 | # SEC=2a2H57dBggubW1_rqglC7jtK 63 | 64 | export HSBENCHER_GOOGLE_CLIENTID=$CID 65 | export HSBENCHER_GOOGLE_CLIENTSECRET=$SEC 66 | cabal exec fusion-upload-criterion -- --name=LVish_microbench_results $NAME.criterion 67 | # cabal exec fusion-upload-criterion -- --name=LVish_microbench_results --clientid=$CID --clientsecret=$SEC $NAME.criterion 68 | 69 | # Phone home and send the report 70 | function phone_home() { 71 | STAMP=`date +"%Y_%M_%d_%H:%M:%S"` 72 | mkdir -p $HOME/collected_criterion_reports 73 | # Copy locally first: 74 | cp $NAME.html $HOME/collected_criterion_reports/"$STAMP"_$NAME.html 75 | ssh parfunc@tank.soic.indiana.edu mkdir -p collected_criterion_reports 76 | scp $NAME.html parfunc@tank.soic.indiana.edu:collected_criterion_reports/"$STAMP"_$NAME.html 77 | } 78 | 79 | phone_home || echo "ok if this fails." 80 | 81 | 82 | # (3) Run larger benchmarks: 83 | # ================================================================================ 84 | 85 | cd $CHECKOUT/haskell/lvish-apps 86 | 87 | # TODO: FINISHME 88 | 89 | 90 | # Old/scrap: 91 | # ================================================================================ 92 | 93 | # NUMCPUS=`ls -d /sys/devices/system/cpu/cpu? /sys/devices/system/cpu/cpu?? 2> /dev/null | wc -l` 94 | # if [ $NUMCPUS -lt 17 ]; then 95 | # export THREADS=`seq 1 $NUMCPUS` 96 | # else 97 | # THREADS="1 " 98 | # THREADS+=`seq 2 2 $NUMCPUS` 99 | # export THREADS 100 | # fi 101 | 102 | 103 | # Enable upload of benchmarking data to a Google Fusion Table: 104 | # hsbencher --fusion-upload --name monad-par-test --clientid=$CID --clientsecret=$SEC 105 | 106 | # echo "Printing out final .dat file:" 107 | # cat result*.dat 108 | -------------------------------------------------------------------------------- /src/lvish/tests/BulkRetryTests.hs: -------------------------------------------------------------------------------- 1 | {-# LANGUAGE TemplateHaskell, DataKinds, ScopedTypeVariables #-} 2 | 3 | -- | Tests for the Data.LVar.PureSet and Data.LVar.SLSet modules. 4 | 5 | module BulkRetryTests(tests, runTests) where 6 | 7 | import Control.Monad 8 | import Control.Exception 9 | import qualified Data.Set as S 10 | import System.IO (stderr) 11 | import Test.Tasty.HUnit 12 | import Test.Tasty (TestTree, defaultMain, testGroup) 13 | --import Test.HUnit (Assertion, assertEqual, assertBool, Counts(..)) 14 | import Test.Tasty.TH (testGroupGenerator) 15 | import qualified Test.HUnit as HU 16 | import TestHelpers as T 17 | import GHC.Conc (numCapabilities) 18 | 19 | import Control.LVish 20 | import Control.LVish.DeepFrz (DeepFrz(..), Frzn, Trvrsbl, runParThenFreeze, runParThenFreezeIO) 21 | import Control.LVish.BulkRetry 22 | import qualified Data.LVar.Generic as G 23 | import Data.LVar.PureSet as IS 24 | import qualified Data.LVar.SLSet as SS 25 | import qualified Data.LVar.IVar as IV 26 | import Data.LVar.NatArray as NA 27 | 28 | import Data.Par.Splittable (pforEach) 29 | import Data.Par.Range (range, fullpar) 30 | 31 | import qualified System.Log.TSLogger as L 32 | 33 | -------------------------------------------------------------------------------- 34 | 35 | tests :: TestTree 36 | tests = $(testGroupGenerator) 37 | 38 | runTests :: IO () 39 | runTests = defaultMain tests 40 | 41 | -------------------------------------------------------------------------------- 42 | 43 | case_br_v1 = do 44 | (logs,Right res) <- runParDetailed (DbgCfg Nothing [L.OutputTo stderr] False) numCapabilities v1 45 | assertEqual "simple par for loop" [100,101,102,103,104] res 46 | 47 | -- | In the blocking version we should only execute each iteration once, and thus see 48 | -- an exact number of log messages. 49 | v1 = do 50 | (na :: NatArray s Int) <- NA.newNatArray 5 51 | -- forSpeculative (0,5) $ \ _hub ix -> do 52 | pforEach (fullpar$ range 0 5) $ \ ix -> do 53 | logDbgLn 1 $ "ForSpeculative, START iter "++show ix 54 | case ix of 55 | 0 -> void$ NA.get na 1 56 | 1 -> void$ NA.get na 2 57 | 2 -> void$ NA.get na 4 58 | 3 -> void$ NA.get na 4 59 | 4 -> return () 60 | NA.put na ix (100 + ix) 61 | logDbgLn 1 $ "ForSpeculative, END iter "++show ix 62 | return () 63 | a <- NA.get na 0 64 | b <- NA.get na 1 65 | c <- NA.get na 2 66 | d <- NA.get na 3 67 | e <- NA.get na 4 68 | return [a,b,c,d,e] 69 | 70 | case_br_v2 = do 71 | (logs,res) <- runParDetailed (DbgCfg Nothing [L.OutputTo stderr] False) numCapabilities v2 72 | case res of 73 | Left e -> throw e 74 | Right x -> assertEqual "simple forSpeculative" [100,101,102,103,104] x 75 | 76 | -- v2 = do 77 | -- (na :: NatArray s Int) <- NA.newNatArray 5 78 | -- forSpeculative (0,5) $ \ hub ix -> do 79 | -- logDbgLn 1 $ "ForSpeculative, START iter "++show ix 80 | -- case ix of 81 | -- 0 -> void$ getNB hub na 1 82 | -- 1 -> void$ getNB hub na 2 83 | -- 2 -> void$ getNB hub na 4 84 | -- 3 -> void$ getNB hub na 4 85 | -- 4 -> return () 86 | -- NA.put na ix (100 + ix) 87 | -- logDbgLn 1 $ "ForSpeculative, END iter "++show ix 88 | -- return () 89 | -- return "hi" 90 | 91 | v2 = do 92 | (na :: NatArray s Int) <- NA.newNatArray 5 93 | forSpeculative (0,5) $ \ hub ix -> do 94 | logDbgLn 1 $ "ForSpeculative, START iter "++show ix 95 | let cont _ = do NA.put na ix (100 + ix) 96 | logDbgLn 1 $ "ForSpeculative, END iter "++show ix 97 | case ix of 98 | 0 -> getNB_cps hub na 1 cont 99 | 1 -> getNB_cps hub na 2 cont 100 | 2 -> getNB_cps hub na 4 cont 101 | 3 -> getNB_cps hub na 4 cont 102 | 4 -> cont 4 103 | return () 104 | logDbgLn 1 $ "DONE with for loop, reading final results." 105 | a <- NA.get na 0 106 | b <- NA.get na 1 107 | c <- NA.get na 2 108 | d <- NA.get na 3 109 | e <- NA.get na 4 110 | return [a,b,c,d,e] 111 | 112 | 113 | -------------------------------------------------------------------------------- /src/lvish-extra/tests/AddRemoveSetTests.hs: -------------------------------------------------------------------------------- 1 | {-# LANGUAGE TemplateHaskell #-} 2 | {-# LANGUAGE DataKinds #-} 3 | {-# LANGUAGE TypeFamilies #-} 4 | 5 | -- | Tests for the Data.LVar.AddRemoveSet module. 6 | 7 | module AddRemoveSetTests(tests, runTests) where 8 | 9 | import Control.Concurrent 10 | import Test.Tasty.HUnit 11 | import Test.Tasty (TestTree, defaultMain, testGroup) 12 | --import Test.HUnit (Assertion, assertEqual, assertBool, Counts(..)) 13 | import Test.Tasty.TH (testGroupGenerator) 14 | import qualified Test.HUnit as HU 15 | import TestHelpers2 as T 16 | 17 | import qualified Data.Set as S 18 | 19 | import qualified Data.LVar.AddRemoveSet as ARS 20 | 21 | import Control.LVish 22 | import Control.LVish.DeepFrz (DeepFrz(..), Frzn, Trvrsbl, runParThenFreeze, runParThenFreezeIO) 23 | import Control.LVish.Internal (liftIO) 24 | 25 | -------------------------------------------------------------------------------- 26 | 27 | tests :: TestTree 28 | tests = $(testGroupGenerator) 29 | 30 | runTests :: IO () 31 | runTests = defaultMain tests 32 | 33 | -------------------------------------------------------------------------------- 34 | 35 | case_v1 :: Assertion 36 | case_v1 = v1 >>= assertEqual "freeze with 3 elements" 37 | (S.fromList [1..3] :: S.Set Int) 38 | 39 | -- If you have a computation that does freezing, you have to run it with 40 | -- runParQuasiDet / runParNonDet. 41 | v1 :: IO (S.Set Int) 42 | v1 = runParQuasiDet $ 43 | do s <- ARS.newEmptySet 44 | ARS.insert 1 s 45 | ARS.insert 2 s 46 | ARS.insert 3 s 47 | ARS.waitAddedSize 3 s 48 | ARS.freezeSet s 49 | 50 | case_v2 :: Assertion 51 | case_v2 = v2 >>= assertEqual "freeze with 10 elements added, asynchronously" 52 | (S.fromList [1.. v2size] :: S.Set Int) 53 | 54 | v2 :: IO (S.Set Int) 55 | v2 = runParQuasiDet $ 56 | do s <- ARS.newEmptySet 57 | mapM_ (\n -> fork $ do 58 | -- liftIO$ threadDelay 5000 59 | logDbgLn 3$ " [AR-v2] Doing one insert: "++show n 60 | ARS.insert n s) [1.. v2size] 61 | logDbgLn 3$ " [AR-v2] now waiting.." 62 | ARS.waitAddedSize v2size s 63 | logDbgLn 3$ " [AR-v2] now freezing.." 64 | ARS.freezeSet s 65 | 66 | v2size = 67 | case numElems of 68 | Just x -> x 69 | Nothing -> 10 70 | 71 | case_v3 :: Assertion 72 | case_v3 = stressTest T.stressTestReps 15 v3 (\()->True) 73 | 74 | -- "freeze with 3 elements added, asynchronously" 75 | -- If we're doing a guaranteed-deterministic computation we can't 76 | -- actually read out the contents of the set. 77 | v3 :: (HasPut e, HasGet e) => Par e s () 78 | v3 = 79 | do s <- ARS.newEmptySet 80 | mapM_ (\n -> fork $ ARS.insert n s) [1..10::Int] 81 | ARS.waitAddedSize 10 s 82 | 83 | -- Getting occasional failures here with -N2, don't know what's 84 | -- wrong. :( 85 | case_v4 :: Assertion 86 | case_v4 = stressTest T.stressTestReps 30 v4 (== (S.fromList [1..10] :: S.Set Int)) 87 | 88 | -- "additions and removals" 89 | v4 :: (HasPut e, HasGet e, HasFreeze e) => Par e s (S.Set Int) 90 | v4 = 91 | do s <- ARS.newEmptySet 92 | mapM_ (\n -> fork $ ARS.insert n s) [1..15] 93 | mapM_ (\n -> fork $ ARS.remove n s) [11..15] 94 | ARS.waitAddedSize 15 s 95 | ARS.waitRemovedSize 5 s 96 | ARS.freezeSet s 97 | 98 | -- This one is intentionally undersynchronized. 99 | case_i1 :: Assertion 100 | case_i1 = do 101 | allowSomeExceptions ["Attempt to change a frozen LVar"] $ 102 | do x <- i1 103 | assertEqual "additions and removals, undersynchronized" 104 | (S.fromList [1..10] :: S.Set Int) x 105 | return () 106 | 107 | -- Unblock too early, leaving a put-after-freeze possibility. 108 | i1 :: IO (S.Set Int) 109 | i1 = runParQuasiDet $ 110 | do s <- ARS.newEmptySet 111 | mapM_ (\n -> fork $ ARS.insert n s) [1..15] 112 | mapM_ (\n -> fork $ ARS.remove n s) [11..15] 113 | -- If we don't wait for 15 additions, they might not all be 114 | -- there when we check. 115 | ARS.waitRemovedSize 5 s 116 | ARS.freezeSet s 117 | -------------------------------------------------------------------------------- /src/lvish/Control/LVish/DeepFrz.hs: -------------------------------------------------------------------------------- 1 | {-# LANGUAGE ConstraintKinds #-} 2 | {-# LANGUAGE DataKinds #-} 3 | {-# LANGUAGE FlexibleInstances #-} 4 | {-# LANGUAGE MagicHash #-} 5 | {-# LANGUAGE RankNTypes #-} 6 | {-# LANGUAGE Trustworthy #-} 7 | {-# LANGUAGE TypeFamilies #-} 8 | 9 | {-| 10 | 11 | The `DeepFrz` module provides a way to return arbitrarily complex data 12 | structures containing LVars from `Par` computations. 13 | 14 | The important thing to know is that to use `runParThenFreeze` to run a 15 | `Par` computation, you must make sure that all types you return from 16 | the `Par` computation have `DeepFrz` instances. This means that, if 17 | you wish to return a user-defined type, you will need to include a bit 18 | of boilerplate to give it a `DeepFrz` instance. Here is a complete 19 | example: 20 | 21 | > {-# LANGUAGE TypeFamilies #-} 22 | > import Control.LVish.DeepFrz 23 | > 24 | > data MyData = MyData Int deriving Show 25 | > 26 | > instance DeepFrz MyData where 27 | > type FrzType MyData = MyData 28 | > 29 | > main = print (runParThenFreeze (return (MyData 3))) 30 | 31 | -} 32 | 33 | -- TODO: a more detailed (recursive?) DeepFrz instance example might 34 | -- be really helpful here for people who want to implement their own 35 | -- LVar types. -- LK 36 | 37 | module Control.LVish.DeepFrz 38 | ( 39 | -- * The functions you'll want to use 40 | runParThenFreeze, 41 | runParThenFreezeIO, 42 | 43 | -- * Some supporting types 44 | DeepFrz(), FrzType, 45 | NonFrzn, Frzn, Trvrsbl, 46 | 47 | ) where 48 | 49 | -- import Control.LVish (LVarData1(..)) 50 | import Control.LVish.DeepFrz.Internal (DeepFrz (..), Frzn, NonFrzn, 51 | Trvrsbl) 52 | import Control.LVish.Internal (Par (WrapPar)) 53 | import Control.Par.EffectSigs 54 | import Control.LVish.Internal.SchedIdempotent (runPar, runParIO) 55 | -------------------------------------------------------------------------------- 56 | 57 | -- | Under normal conditions, calling a `freeze` operation inside a 58 | -- `Par` computation makes the `Par` computation quasi-deterministic. 59 | -- However, if we freeze only after all LVar operations are completed 60 | -- (after the implicit global barrier of `runPar`), then we've avoided 61 | -- all data races, and freezing is therefore safe. Running a `Par` 62 | -- computation with `runParThenFreeze` accomplishes this, without our 63 | -- having to call `freeze` explicitly. 64 | -- 65 | -- In order to use `runParThenFreeze`, the type returned from the 66 | -- `Par` computation must be a member of the `DeepFrz` class. All the 67 | -- @Data.LVar.*@ libraries should provide instances of `DeepFrz` 68 | -- already. Further, you can create additional instances for custom, 69 | -- pure datatypes. The result of a `runParThenFreeze` depends on the 70 | -- type-level function `FrzType`, whose only purpose is to toggle the 71 | -- `s` parameters of all IVars to the `Frzn` state. 72 | -- 73 | -- Significantly, the freeze at the end of `runParThenFreeze` has /no/ runtime cost, in 74 | -- spite of the fact that it enables a /deep/ (recursive) freeze of the value returned 75 | -- by the `Par` computation. 76 | runParThenFreeze :: (DeepFrz a, Deterministic e) => Par e NonFrzn a -> FrzType a 77 | -- runParThenFreeze :: Deterministic e => DeepFrz a => Par e NonFrzn a -> FrzType a 78 | runParThenFreeze (WrapPar p) = frz $ runPar p 79 | 80 | -- | This version works for nondeterministic computations as well. 81 | -- 82 | -- Of course, nondeterministic computations may also call `freeze` 83 | -- internally, but this function has an advantage to doing your own 84 | -- `freeze` at the end of a `runParIO`: there is an implicit barrier 85 | -- before the final freeze. Further, `DeepFrz` has no runtime 86 | -- overhead, whereas regular freezing has a cost. 87 | runParThenFreezeIO :: DeepFrz a => Par e NonFrzn a -> IO (FrzType a) 88 | runParThenFreezeIO (WrapPar p) = do 89 | x <- runParIO p 90 | return $ frz x 91 | 92 | {- 93 | -- This won't work because it conflicts with other instances such as "Either": 94 | instance (LVarData1 f, DeepFrz a) => DeepFrz (f s a) where 95 | type FrzType (f s a) = f Frzn (FrzType a) 96 | frz = unsafeCoerce# 97 | -} 98 | -------------------------------------------------------------------------------- /src/lvish/Data/LVar/Generic.hs: -------------------------------------------------------------------------------- 1 | {-# LANGUAGE Trustworthy #-} 2 | {-# LANGUAGE MagicHash #-} 3 | {-# LANGUAGE KindSignatures #-} 4 | {-# LANGUAGE RankNTypes #-} 5 | {-# LANGUAGE ConstraintKinds #-} 6 | {-# LANGUAGE TypeFamilies #-} 7 | {-# LANGUAGE DataKinds #-} -- For Determinism 8 | 9 | -- | A generic interface providing operations that work on /all/ LVars. 10 | 11 | module Data.LVar.Generic 12 | ( 13 | -- * Classes containing the generic interfaces 14 | LVarData1(..), LVarWBottom(..), 15 | OrderedLVarData1(..), 16 | 17 | -- * Supporting types and utilities 18 | AFoldable(..), 19 | castFrzn, forFrzn, 20 | PartialJoinSemiLattice(..) 21 | ) 22 | where 23 | 24 | -- import Control.LVish.Internal.Types 25 | import Control.Par.EffectSigs 26 | import Control.LVish.Internal.Basics 27 | -- -- import Control.LVish.Internal (Par) 28 | import Control.LVish.DeepFrz.Internal (Frzn, Trvrsbl) 29 | import qualified Data.Foldable as F 30 | -- -- import Data.List (sort) 31 | -- -- import GHC.Prim (unsafeCoerce#) 32 | import System.IO.Unsafe (unsafeDupablePerformIO) 33 | import Data.LVar.Generic.Internal 34 | 35 | -------------------------------------------------------------------------------- 36 | 37 | -- | Some LVar datatypes are stored in an /internally/ ordered way so 38 | -- that it is then possible to take /O(1)/ frozen snapshots and consume them 39 | -- inexpensively in a deterministic order. 40 | -- 41 | -- LVars with this additional property provide this class as well as `LVarData1`. 42 | class LVarData1 f => OrderedLVarData1 (f :: * -> * -> *) where 43 | -- | Don't just freeze the LVar, but make the full contents 44 | -- completely available and `Foldable`. Guaranteed /O(1)/. 45 | snapFreeze :: HasFreeze e => f s a -> Par e s (f Trvrsbl a) 46 | 47 | {- 48 | -- | Just like LVarData1 but for type constructors of kind `*`. 49 | class LVarData0 (t :: *) where 50 | -- | This associated type models a picture of the "complete" contents of the data: 51 | -- e.g. a whole set instead of one element, or the full/empty information for an 52 | -- IVar, instead of just the payload. 53 | type Snapshot0 t 54 | freeze0 :: HasFreeze e => t -> Par e s (Snapshot0 t) 55 | newBottom0 :: Par e s t 56 | -} 57 | 58 | -- | A partial version of "Algebra.Lattice.JoinSemiLattice", this 59 | -- could be made into a complete lattice by the addition of a top 60 | -- element. 61 | class PartialJoinSemiLattice a where 62 | joinMaybe :: a -> a -> Maybe a 63 | 64 | instance PartialJoinSemiLattice Int where 65 | joinMaybe a b 66 | | even a && even b = Just (max a b) 67 | | odd a && odd b = Just (max a b) 68 | | otherwise = Nothing 69 | 70 | ------------------------------------------------------------------------------ 71 | -- Dealing with frozen LVars. 72 | ------------------------------------------------------------------------------ 73 | 74 | -- | `Trvrsbl` is a stronger property than `Frzn`, so it is always safe to \"upcast\" to 75 | -- the weaker version. 76 | castFrzn :: LVarData1 f => f Trvrsbl a -> f Frzn a 77 | castFrzn x = unsafeCoerceLVar x 78 | 79 | -- | LVish `Par` actions must commute, therefore one safe way to consume a frozen (but 80 | -- unordered) LVar, even in another `runPar` session, is to run a `Par` computation for 81 | -- each element. 82 | forFrzn :: LVarData1 f => f Frzn a -> (a -> Par e s ()) -> Par e s () 83 | forFrzn fzn fn = 84 | F.foldrM (\ a () -> fn a) () $ 85 | unsafeDupablePerformIO $ -- ASSUME idempotence. 86 | unsafeTraversable fzn 87 | 88 | 89 | -- | For any LVar, we have a generic way to freeze it in a `runParThenFreeze`. 90 | -- instance (DeepFrz a, LVarData1 f) => DeepFrz (f s a) where 91 | -- type FrzType (f s a) = f Frzn a 92 | -- frz = unsafeCoerceLVar 93 | 94 | -- ^^^ 95 | 96 | -- Note that this doesn't work because it CONFLICTS with the other DeepFrz instances. 97 | -- There's no way that we can prove to GHC that pure data will NEVER be an instance 98 | -- of LVarData1, and therefore will never actually cause a conflict with e above 99 | -- instance. 100 | -------------------------------------------------------------------------------- /src/lvish-extra/Data/LVar/AddRemoveSet.hs: -------------------------------------------------------------------------------- 1 | {-# LANGUAGE BangPatterns #-} 2 | {-# LANGUAGE TypeFamilies #-} 3 | 4 | {-| 5 | 6 | This module provides sets that allow both addition and removal of 7 | elements. This is possible because, under the hood, it's represented 8 | with two monotonically growing sets, one for additions and one for 9 | removals. It is inspired by /2P-Sets/ from the literature on 10 | /conflict-free replicated data types/. 11 | 12 | -} 13 | module Data.LVar.AddRemoveSet 14 | ( 15 | AddRemoveSet, 16 | newEmptySet, newSet, newFromList, 17 | insert, waitAddedElem, waitAddedSize, 18 | remove, waitRemovedElem, waitRemovedSize, 19 | 20 | freezeSet 21 | 22 | ) where 23 | import Control.Applicative 24 | import Control.LVish 25 | import qualified Data.LVar.PureSet as PS 26 | import qualified Data.Set as S 27 | 28 | -- | The set datatype. 29 | data AddRemoveSet s a = 30 | AddRemoveSet !(PS.ISet s a) 31 | !(PS.ISet s a) 32 | 33 | -- | Create a new, empty `AddRemoveSet`. 34 | newEmptySet :: Ord a => Par e s (AddRemoveSet s a) 35 | newEmptySet = newSet S.empty 36 | 37 | -- | Create a new `AddRemoveSet` populated with initial elements. 38 | newSet :: Ord a => S.Set a -> Par e s (AddRemoveSet s a) 39 | -- Here we're creating two new PureSets, one from the provided initial 40 | -- elements (the "add" set) and one empty (the "remove" set), and 41 | -- then, since both of those return `Par` computations, we're using 42 | -- our friends `<$>` and `<*>`. 43 | newSet set = AddRemoveSet <$> (PS.newSet set) <*> PS.newEmptySet 44 | -- Alternate version that works if we import `Control.Monad`: 45 | -- newSet set = ap (fmap AddRemoveSet (PS.newSet set)) PS.newEmptySet 46 | 47 | -- | A simple convenience function. Create a new 'ISet' drawing 48 | -- initial elements from an existing list. 49 | newFromList :: Ord a => [a] -> Par e s (AddRemoveSet s a) 50 | newFromList ls = newSet (S.fromList ls) 51 | 52 | -- | Put a single element in the set. (WHNF) Strict in the element 53 | -- being put in the set. 54 | insert :: (HasPut e, Ord a) => a -> AddRemoveSet s a -> Par e s () 55 | -- Because the two sets inside an AddRemoveSet are already PureSets, 56 | -- we really just have to call the provided `insert` method for 57 | -- PureSet. We don't need to call `putLV` or anything like that! 58 | insert !elm (AddRemoveSet added _) = PS.insert elm added 59 | 60 | -- | Wait for the set to contain a specified element. 61 | waitAddedElem :: (HasGet e, Ord a) => a -> AddRemoveSet s a -> Par e s () 62 | -- And similarly here, we don't have to call `getLV` ourselves. 63 | waitAddedElem !elm (AddRemoveSet added _) = PS.waitElem elm added 64 | 65 | -- | Wait on the size of the set of added elements. 66 | waitAddedSize :: HasGet e => Int -> AddRemoveSet s a -> Par e s () 67 | -- You get the idea... 68 | waitAddedSize !sz (AddRemoveSet added _) = PS.waitSize sz added 69 | 70 | -- | Remove a single element from the set. 71 | remove :: (HasPut e, Ord a) => a -> AddRemoveSet s a -> Par e s () 72 | -- We remove an element by adding it to the `removed` set! 73 | remove !elm (AddRemoveSet _ removed) = PS.insert elm removed 74 | 75 | -- | Wait for a single element to be removed from the set. 76 | waitRemovedElem :: (HasGet e, Ord a) => a -> AddRemoveSet s a -> Par e s () 77 | waitRemovedElem !elm (AddRemoveSet _ removed) = PS.waitElem elm removed 78 | 79 | -- | Wait on the size of the set of removed elements. 80 | waitRemovedSize :: HasGet e => Int -> AddRemoveSet s a -> Par e s () 81 | waitRemovedSize !sz (AddRemoveSet _ removed) = do 82 | logDbgLn 2 "waitRemovedSize: about to block." 83 | PS.waitSize sz removed 84 | logDbgLn 2 "waitRemovedSize: unblocked, returning." 85 | 86 | -- | Get the exact contents of the set. As with any 87 | -- quasi-deterministic operation, using `freezeSet` may cause your 88 | -- program to exhibit a limited form of nondeterminism: it will never 89 | -- return the wrong answer, but it may include synchronization bugs 90 | -- that can (nondeterministically) cause exceptions. 91 | freezeSet :: (Ord a, HasFreeze e) => AddRemoveSet s a -> Par e s (S.Set a) 92 | -- Freezing takes the set difference of added and removed elements. 93 | freezeSet (AddRemoveSet added removed) = 94 | liftA2 S.difference (PS.freezeSet added) (PS.freezeSet removed) 95 | -------------------------------------------------------------------------------- /src/lvish-extra/Data/LVar/Memo.hs: -------------------------------------------------------------------------------- 1 | {-# LANGUAGE DataKinds #-} 2 | {-# LANGUAGE EmptyDataDecls #-} 3 | {-# LANGUAGE KindSignatures #-} 4 | {-# LANGUAGE ScopedTypeVariables #-} 5 | {-# LANGUAGE TypeFamilies #-} 6 | {-# OPTIONS_GHC -O2 #-} 7 | 8 | {-| 9 | 10 | This basic version of memotables is implemented on top of existing LVars without 11 | breaking any rules. 12 | 13 | The problem is that it cannot do cycle detection, because that requires tracking 14 | extra information (where we've been) which is NOT exposed to the user and NOT used 15 | 16 | -} 17 | module Data.LVar.Memo 18 | ( 19 | -- * Memo tables and defered lookups 20 | Memo, MemoFuture, makeMemo, 21 | 22 | -- * Memo table operations 23 | getLazy, getMemo, force 24 | ) where 25 | import Debug.Trace 26 | 27 | import Control.LVish 28 | import qualified Data.LVar.PureMap as IM 29 | import Data.LVar.PureSet as IS 30 | 31 | -------------------------------------------------------------------------------- 32 | -- Types 33 | -------------------------------------------------------------------------------- 34 | 35 | -- | A Memo-table that stores cached results of executing a `Par` computation. 36 | data Memo (e::EffectSig) s a b = 37 | Memo !(IS.ISet s a) 38 | !(IM.IMap a s b) 39 | 40 | -- | A result from a lookup in a Memo-table, unforced. 41 | -- The two-stage `getLazy`/`force` lookup is useful to separate 42 | -- spawning the work from demanding its result. 43 | newtype MemoFuture (e :: EffectSig) s b = MemoFuture (Par e s b) 44 | 45 | -------------------------------------------------------------------------------- 46 | 47 | -- | Reify a function in the `Par` monad as an explicit memoization table. 48 | makeMemo :: (HasPut e, Ord a, Eq b, Show a, Show b) => 49 | (a -> Par e s b) -> Par e s (Memo e s a b) 50 | makeMemo fn = do 51 | st <- newEmptySet 52 | mp <- IM.newEmptyMap 53 | IS.forEach st $ \ elm -> do 54 | res <- fn elm 55 | trace ("makeMemo, about to insert result: "++show (show elm, show res)) $ 56 | IM.insert elm res mp 57 | return $! Memo st mp 58 | -- TODO: this version may want to have access to the memo-table within the handler as 59 | -- well.... 60 | 61 | 62 | -- | Read from the memo-table. If the value must be computed, do that right away and 63 | -- block until its complete. 64 | getMemo :: (HasPut e, HasGet e, Ord a, Eq b) => Memo e s a b -> a -> Par e s b 65 | getMemo tab key = 66 | do fut <- getLazy tab key 67 | force fut 68 | 69 | -- | Begin to read from the memo-table. Initiate the computation if the key is not 70 | -- already present. Don't block on the computation being complete, rather, return a 71 | -- future. 72 | getLazy :: (HasPut e, HasGet e, Ord a, Eq b) => Memo e s a b -> a -> Par e s (MemoFuture e s b) 73 | getLazy (Memo st mp) key = do 74 | IS.insert key st 75 | return $! MemoFuture (IM.getKey key mp) 76 | 77 | 78 | -- | This will throw exceptions that were raised during the computation, INCLUDING 79 | -- multiple put. 80 | force :: MemoFuture e s b -> Par e s b 81 | force (MemoFuture pr) = pr 82 | -- FIXME!!! Where do errors in the memoized function (e.g. multiple put) surface? 83 | -- We must pick a determined, consistent place. 84 | -- 85 | -- Multiple put errors may not be able to wait until this point to get 86 | -- thrown. Otherwise we'd have to be at least quasideterministic here. If you have 87 | -- a MemoFuture you never force, it and an outside computation may be racing to do a 88 | -- put. If the outside one wins the MemoFuture is the one that gets the exception 89 | -- (and hides it), otherwise the exception is exposed. Quasideterminism. 90 | 91 | -- It may be fair to distinguish between internal problems with the MemoFuture 92 | -- (deferred exceptions), and problematic interactions with the outside world (double 93 | -- put) which would then not be deferred. Such futures can't be canceled anyway, so 94 | -- there's really no need to defer the exceptions. 95 | 96 | 97 | 98 | {- 99 | 100 | 101 | -- | Cancel an outstanding speculative computation. This recursively attempts to 102 | -- cancel any downstream computations in this or other memo-tables that are children 103 | -- of the given `MemoFuture`. 104 | cancel :: MemoFuture Det s b -> Par Det s () 105 | -- FIXME: Det needs to be replaced here with "GetOnly". 106 | cancel fut = undefined 107 | 108 | -} 109 | -------------------------------------------------------------------------------- /src/lvish-extra/Experimental/Monotonic.hs: -------------------------------------------------------------------------------- 1 | {-# LANGUAGE GeneralizedNewtypeDeriving #-} 2 | 3 | -- | EXPERIMENTAL 4 | 5 | -- | A restricted sub-language for writing ONLY monotonic computations. 6 | 7 | module Monotonic 8 | -- Control.LVish.Monotonic 9 | where 10 | 11 | import Control.DeepSeq 12 | 13 | import qualified Control.LVish as L 14 | import qualified Data.LVar.IVar as I 15 | import qualified Data.LVar.PureSet as S 16 | 17 | import Classes 18 | 19 | import Data.Word 20 | import Data.IORef 21 | import qualified Data.Set as Set 22 | -------------------------------------------------------------------------------- 23 | 24 | 25 | -- | A value used in a monotonic computation. 26 | newtype Mono a = Mono a 27 | 28 | -- | A monotonic, parallel computation with side effects. 29 | newtype MPar a = MPar { unMPar :: L.Par a } 30 | deriving (Monad ) 31 | 32 | -- We can't lift the raw LVar ops, since they are unsafe anyway! 33 | -- putLV :: LVar a d -> (Mono a -> IO (Maybe d)) -> Par () 34 | 35 | -------------------------------------------------------------------------------- 36 | -- Lifted IVar ops: 37 | 38 | -- Oops, wait, this won't work! This only makes sense for counters, not ivars... 39 | put_ :: Eq a => I.IVar a -> Mono a -> MPar () 40 | put_ i (Mono a) = MPar$ I.put_ i a 41 | 42 | get :: I.IVar a -> MPar (Mono a) 43 | get = MPar . fmap Mono . I.get 44 | 45 | ---------------------------------------- 46 | -- Lifted Set ops: 47 | 48 | putInSet :: Ord a => Mono a -> S.ISet a -> MPar () 49 | putInSet (Mono a) set = MPar $ S.insert a set 50 | 51 | -- By using monotonic callbacks we can guarantee that premature freezes can be rerun 52 | -- rather than resulting in errors. 53 | -- withCallbacksThenFreeze :: Eq b => S.ISet a -> (Mono a -> MPar ()) -> MPar b -> MPar b 54 | -- withCallbacksThenFreeze st cb initm = return undefined 55 | 56 | setForeach :: Mono (Snapshot S.ISet a) -> (Mono a -> MPar ()) -> MPar () 57 | setForeach = undefined 58 | 59 | -- Here we speculatively assume that it is safe to freeze and snapshot, but to be 60 | -- careful we only run a monotonic computation on the snapshot. Any side effects it 61 | -- have must *increase* if it is passed a larger snapshot. If there is a put AFTER 62 | -- this freeze, the monotonic callback can simply be rerun. 63 | speculateFrozen :: (LVishData1 f) => 64 | f a -> (Mono (Snapshot f a) -> MPar ()) -> L.Par () 65 | -- speculateFrozen :: (LVishData1 f, LVishData1 g) => 66 | -- f a -> (Mono (Snapshot f a) -> MPar (g b)) -> L.Par (g b) 67 | speculateFrozen = error "finishme -speculateFrozen" 68 | 69 | -- FIXME: What prevents bad combinations such as a monotonically shrinking set with a 70 | -- monotonically growing set? 71 | 72 | 73 | -------------------------------------------------------------------------------- 74 | -- Can we enable a limited form of monotonic math? 75 | -- 76 | -- But that assumes the normal total-ordering for builtin types like Word... 77 | 78 | add :: Mono Word -> Mono Word -> Mono Word 79 | add (Mono a) (Mono b) = Mono (a + b) 80 | 81 | mul :: Mono Word -> Mono Word -> Mono Word 82 | mul (Mono a) (Mono b) = Mono (a * b) 83 | 84 | -- This is only true for NON-negative numbers! 85 | setSum :: Mono (Snapshot S.ISet Word) -> Mono Word 86 | setSum = undefined 87 | 88 | setSize :: Mono (Snapshot S.ISet Word) -> Mono Word 89 | setSize (Mono s) = Mono $ fromIntegral $ Set.size s 90 | 91 | mconst :: Num a => a -> Mono a 92 | mconst = Mono 93 | 94 | example :: L.Par () 95 | example = do 96 | s1 <- S.newEmptySet 97 | s2 <- S.newEmptySet 98 | mapM_ (\n -> L.fork $ S.insert n s1) [1..10::Word] 99 | -- sync here if desired... 100 | speculateFrozen s1 $ \ snap -> do 101 | setForeach snap $ \ elem -> do 102 | putInSet (elem `mul` mconst 10) s2 103 | return () 104 | 105 | example2 :: L.Par Counter 106 | example2 = do 107 | s1 <- S.newEmptySet 108 | sz <- newCounter 109 | sm <- newCounter 110 | mapM_ (\n -> L.fork $ S.insert n s1) [1..10::Word] 111 | -- sync here if desired... 112 | speculateFrozen s1 $ \ snap -> do 113 | setForeach snap $ \ elem -> do 114 | setCounter sz (setSize snap) 115 | setCounter sm (setSum snap) 116 | return sm 117 | 118 | 119 | newtype Counter = Counter (L.LVar (IORef Word) ()) 120 | newCounter :: L.Par Counter 121 | newCounter = undefined 122 | 123 | setCounter :: Counter -> Mono Word -> MPar () 124 | setCounter = undefined 125 | 126 | freezeCounter :: Counter -> L.Par Word 127 | freezeCounter = undefined 128 | 129 | -------------------------------------------------------------------------------- /archived_old/fhpc13-lvars/benchmarks/bf-traverse-LVar/bf-traverse-LVar.hs: -------------------------------------------------------------------------------- 1 | {-# LANGUAGE BangPatterns #-} 2 | {-# LANGUAGE DoAndIfThenElse #-} 3 | 4 | module Main where 5 | 6 | import Control.Exception (evaluate) 7 | import Control.Monad (forM_, when) 8 | import qualified Data.Set as Set 9 | import qualified Data.IntSet as IS 10 | import qualified Data.Vector as V 11 | import GHC.Conc (numCapabilities) 12 | 13 | import qualified Control.Monad.Par as Par 14 | import Control.Monad.Par.Combinator (parMap, parMapM, parFor, InclusiveRange(..)) 15 | import LVarTracePure 16 | import Data.LVar.SetPure 17 | import Debug.Trace (trace) 18 | 19 | import Runner 20 | 21 | prnt :: String -> Par () 22 | prnt str = trace str $ return () 23 | 24 | -- An LVar-based version of bf_traverse. As we traverse the graph, 25 | -- the results of applying f to each node accumulate in an LVar, where 26 | -- they are available to other computations, enabling pipelining. 27 | 28 | bf_traverse :: Int -- iteration counter 29 | -> Graph2 -- graph 30 | -> ISet WorkRet -- LVar 31 | -> IS.IntSet -- set of "seen" node labels, initially size 0 32 | -> IS.IntSet -- set of "new" node labels, initially size 1 33 | -> WorkFn -- function to be applied to each node 34 | -> Par (IS.IntSet) 35 | bf_traverse 0 _ _ seen_rank new_rank _ = do 36 | when verbose $ prnt $ "bf_traverse finished! seen/new size: " 37 | ++ show (IS.size seen_rank, IS.size new_rank) 38 | return (IS.union seen_rank new_rank) 39 | 40 | bf_traverse k !g !l_acc !seen_rank !new_rank !f = do 41 | when verbose $ prnt $"bf_traverse call... " 42 | ++ show k ++ " seen/new size " 43 | ++ show (IS.size seen_rank, IS.size new_rank) 44 | -- Nothing in the new_rank set means nothing left to traverse. 45 | if IS.null new_rank 46 | then return seen_rank 47 | else do 48 | -- Add new_rank stuff to the "seen" list 49 | let seen_rank' = IS.union seen_rank new_rank 50 | allNbr' = IS.fold (\i acc -> IS.union (g V.! i) acc) 51 | IS.empty new_rank 52 | new_rank' = IS.difference allNbr' seen_rank' 53 | 54 | -- We COULD use callbacks here, but rather we're modeling what happens in the 55 | -- current paper: 56 | parMapM_ (\x -> fork$ do 57 | let elem = f x 58 | putInSet elem l_acc 59 | when dbg $ do 60 | st <- unsafePeekSet l_acc 61 | prnt$ " --> Called putInSet, node "++show x 62 | ++" size is "++show(Set.size st) 63 | ++" elem is "++show elem -- ++" "++show st 64 | ) 65 | (IS.toList new_rank') -- toList is HORRIBLE 66 | bf_traverse (k-1) g l_acc seen_rank' new_rank' f 67 | 68 | start_traverse :: Int -- iteration counter 69 | -> Graph2 -- graph 70 | -> Int -- start node 71 | -> WorkFn -- function to be applied to each node 72 | -> IO () 73 | start_traverse k !g startNode f = do 74 | runParIO $ do 75 | prnt $ " * Running on " ++ show numCapabilities ++ " parallel resources..." 76 | 77 | l_acc <- newEmptySet 78 | -- "manually" add startNode 79 | fork $ putInSet (f startNode) l_acc 80 | -- pass in { startNode } as the initial "new" set 81 | set <- bf_traverse k g l_acc IS.empty (IS.singleton startNode) f 82 | 83 | prnt $ " * Done with bf_traverse..." 84 | let size = IS.size set 85 | 86 | prnt$ " * Waiting on "++show size++" set results..." 87 | 88 | when dbg $ do 89 | forM_ [0..size] $ \ s -> do 90 | prnt$ " ? Blocking on "++show s++" elements to be in the set..." 91 | waitForSetSize s l_acc 92 | 93 | -- Waiting is required in any case for correctness, whether or 94 | -- not we consume the result 95 | waitForSetSize (size) l_acc -- Depends on a bunch of forked computations 96 | prnt$ " * Set results all available! (" ++ show size ++ ")" 97 | 98 | s <- consumeSet l_acc :: Par (Set.Set WorkRet) 99 | liftIO (do evaluate s; return ()) 100 | prnt $ " * Finished consumeSet:" 101 | prnt $ " * Set size: " ++ show (Set.size s) 102 | prnt $ " * Set sum: " ++ show (Set.fold (\(x,_) y -> x+y) 0 s) 103 | 104 | parMapM_ f l = 105 | do parMapM f l 106 | return () 107 | 108 | main = makeMain start_traverse 109 | 110 | --------------------------------------------------------------------------------