├── Setup.hs ├── test └── Spec.hs ├── Makefile ├── .gitignore ├── README.md ├── src ├── Deli │ ├── Printer.hs │ └── Random.hs ├── Deli.hs └── Control │ └── Monad │ └── Concurrent.hs ├── LICENSE ├── docs ├── contributing.md ├── overview.md └── user-guide.md ├── deli.cabal ├── stack.yaml ├── CODE_OF_CONDUCT.md └── app └── Tutorial.lhs /Setup.hs: -------------------------------------------------------------------------------- 1 | import Distribution.Simple 2 | main = defaultMain 3 | -------------------------------------------------------------------------------- /test/Spec.hs: -------------------------------------------------------------------------------- 1 | main :: IO () 2 | main = putStrLn "Test suite not yet implemented" 3 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | .PHONY: all compile 2 | 3 | all: compile docs/user-guide.md 4 | 5 | compile: 6 | @stack build 7 | 8 | docs/user-guide.md: app/Tutorial.lhs 9 | @./build-docs.sh 10 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | dist 2 | dist-* 3 | cabal-dev 4 | *.o 5 | *.hi 6 | *.chi 7 | *.chs.h 8 | *.dyn_o 9 | *.dyn_hi 10 | .hpc 11 | .hsenv 12 | .cabal-sandbox/ 13 | cabal.sandbox.config 14 | *.prof 15 | *.aux 16 | *.hp 17 | *.eventlog 18 | .stack-work/ 19 | cabal.project.local 20 | .HTF/ 21 | .ghc.environment.* 22 | 23 | # profiling 24 | *.svg 25 | 26 | # datasets 27 | *.csv 28 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # deli 2 | 3 | ## What is Deli? 4 | 5 | Deli is a performance modeling tool, allowing you to understand and experiment 6 | with new designs at several orders of magnitude faster than wall-clock time. 7 | Specifically, Deli is designed to help you understand how long it takes for 8 | 'jobs' or 'requests' to complete in your system. Deli borrows concepts and 9 | terminology from [queueing 10 | theory](https://en.wikipedia.org/wiki/Queueing_theory), and is implemented as a 11 | Haskell DSL, exposing a [Go (language)](https://golang.org/) -like concurrency 12 | and message-passing API. 13 | 14 | Deli's documentation is divided into the following three sections, depending on 15 | your interest. 16 | 17 | ## First time readers 18 | 19 | If you're looking for an overview on Deli, and whether it may be appropriate 20 | for your problem, head over to our [overview documentation](docs/overview.md). 21 | 22 | ## Using Deli 23 | 24 | If you've decided you want to use Deli, or are already using it in a project, 25 | then our [user guide](docs/user-guide.md) is a great resource. 26 | 27 | ## Contributing 28 | 29 | If you'd like to contribute to Deli, start with our [contributing 30 | documentation](docs/contributing.md). 31 | 32 | ## License 33 | 34 | Deli is BSD3 licensed. More information is available in [LICENSE](LICENSE). 35 | -------------------------------------------------------------------------------- /src/Deli/Printer.hs: -------------------------------------------------------------------------------- 1 | module Deli.Printer 2 | ( printResults 3 | ) where 4 | 5 | import Data.Maybe (fromJust) 6 | import Data.TDigest 7 | import Deli 8 | import Text.Printf (printf) 9 | 10 | 11 | 12 | uQuantile 13 | :: Double 14 | -> TDigest comp 15 | -> Double 16 | uQuantile q digest = 17 | 1000 * fromJust (quantile q digest) 18 | 19 | printTruncate :: String -> Double -> IO () 20 | printTruncate s d = do 21 | printf s d 22 | putStrLn "" 23 | 24 | printResults :: DeliState -> IO () 25 | printResults res = do 26 | putStrLn "Simulated wait (milliseconds):" 27 | 28 | printTruncate "simulated 99th: %.2f" (uQuantile 0.99 (_waitStatistics res)) 29 | printTruncate "simulated 95th: %.2f" (uQuantile 0.95 (_waitStatistics res)) 30 | printTruncate "simulated 75th: %.2f" (uQuantile 0.75 (_waitStatistics res)) 31 | printTruncate "simulated 50th: %.2f" (uQuantile 0.50 (_waitStatistics res)) 32 | putStrLn "" 33 | 34 | putStrLn "Simulated sojourn (milliseconds):" 35 | 36 | printTruncate "simulated 99th: %.2f" (uQuantile 0.99 (_sojournStatistics res)) 37 | printTruncate "simulated 95th: %.2f" (uQuantile 0.95 (_sojournStatistics res)) 38 | printTruncate "simulated 75th: %.2f" (uQuantile 0.75 (_sojournStatistics res)) 39 | printTruncate "simulated 50th: %.2f" (uQuantile 0.50 (_sojournStatistics res)) 40 | putStrLn "" 41 | 42 | putStrLn "Overall processing:" 43 | putStrLn $ "total number processed: " ++ show (_numProcessed res) 44 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | BSD 3-Clause License 2 | 3 | Copyright (c) 2019, GitHub 4 | All rights reserved. 5 | 6 | Redistribution and use in source and binary forms, with or without 7 | modification, are permitted provided that the following conditions are met: 8 | 9 | 1. Redistributions of source code must retain the above copyright notice, this 10 | list of conditions and the following disclaimer. 11 | 12 | 2. Redistributions in binary form must reproduce the above copyright notice, 13 | this list of conditions and the following disclaimer in the documentation 14 | and/or other materials provided with the distribution. 15 | 16 | 3. Neither the name of the copyright holder nor the names of its 17 | contributors may be used to endorse or promote products derived from 18 | this software without specific prior written permission. 19 | 20 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 21 | AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 | IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 23 | DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE 24 | FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 25 | DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 26 | SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 27 | CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 28 | OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 29 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 30 | -------------------------------------------------------------------------------- /docs/contributing.md: -------------------------------------------------------------------------------- 1 | ## Contributing 2 | 3 | [fork]: https://github.com/github/deli/fork 4 | [pr]: https://github.com/github/deli/compare 5 | [code-of-conduct]: /CODE_OF_CONDUCT.md 6 | 7 | Hi there! We're thrilled that you'd like to contribute to this project. Your 8 | help is essential for keeping it great. 9 | 10 | Contributions to this project are 11 | [released](https://help.github.com/articles/github-terms-of-service/#6-contributions-under-repository-license) 12 | to the public under the [BSD3](/LICENSE). 13 | 14 | Please note that this project is released with a [Contributor Code of 15 | Conduct][code-of-conduct]. By participating in this project you agree to abide 16 | by its terms. 17 | 18 | ## Submitting a pull request 19 | 20 | 0. [Fork][fork] and clone the repository 21 | 0. Configure and install the dependencies: `stack build` 22 | 0. Make sure the tests pass on your machine: `stack test` 23 | 0. Create a new branch: `git checkout -b my-branch-name` 24 | 0. Make your change, add tests, and make sure the tests still pass 25 | 0. Push to your fork and [submit a pull request][pr] 26 | 0. Pat your self on the back and wait for your pull request to be reviewed and merged. 27 | 28 | Here are a few things you can do that will increase the likelihood of your pull request being accepted: 29 | 30 | - Write tests. 31 | - Keep your change as focused as possible. If there are multiple changes you would like to make that are not dependent upon each other, consider submitting them as separate pull requests. 32 | - Write a [good commit message](http://tbaggery.com/2008/04/19/a-note-about-git-commit-messages.html). 33 | 34 | ## Resources 35 | 36 | - [How to Contribute to Open Source](https://opensource.guide/how-to-contribute/) 37 | - [Using Pull Requests](https://help.github.com/articles/about-pull-requests/) 38 | - [GitHub Help](https://help.github.com) 39 | -------------------------------------------------------------------------------- /deli.cabal: -------------------------------------------------------------------------------- 1 | name: deli 2 | version: 0.1.0.0 3 | -- synopsis: 4 | -- description: 5 | homepage: https://github.com/github/deli 6 | license: BSD3 7 | license-file: LICENSE 8 | author: Reid Draper 9 | maintainer: opensource+deli@github.com 10 | copyright: 2019 GitHub 11 | category: Simulation 12 | build-type: Simple 13 | extra-source-files: README.md 14 | cabal-version: >=1.10 15 | 16 | library 17 | hs-source-dirs: src 18 | exposed-modules: Control.Monad.Concurrent 19 | , Deli 20 | , Deli.Printer 21 | , Deli.Random 22 | build-depends: base >= 4.7 && < 5 23 | , MonadRandom 24 | , bytestring 25 | , containers 26 | , dlist 27 | , lens 28 | , mtl 29 | , pqueue 30 | , random 31 | , random-fu 32 | , random-source 33 | , tdigest 34 | , time 35 | , transformers 36 | default-language: Haskell2010 37 | ghc-options: -Wall 38 | 39 | executable tutorial 40 | hs-source-dirs: app 41 | main-is: Tutorial.lhs 42 | ghc-options: -O1 43 | build-depends: base 44 | , bytestring 45 | , containers 46 | , deli 47 | , mtl 48 | , parallel 49 | , lens 50 | , monad-loops 51 | , random 52 | , random-fu 53 | , random-source 54 | , tdigest 55 | , time 56 | default-language: Haskell2010 57 | ghc-options: -threaded -rtsopts -with-rtsopts=-N -O1 58 | 59 | test-suite deli-test 60 | type: exitcode-stdio-1.0 61 | hs-source-dirs: test 62 | main-is: Spec.hs 63 | build-depends: base 64 | , deli 65 | ghc-options: -threaded -rtsopts -with-rtsopts=-N 66 | default-language: Haskell2010 67 | 68 | source-repository head 69 | type: git 70 | location: https://github.com/github/deli 71 | -------------------------------------------------------------------------------- /docs/overview.md: -------------------------------------------------------------------------------- 1 | # Overview 2 | 3 | Deli is a Haskell DSL which allows you to model a system, and then run 4 | simulations to understand performance. Deli borrows from [queueing 5 | theory](https://en.wikipedia.org/wiki/Queueing_theory), and allows us to model 6 | and understand how work is performed with a limited set of resources. Deli can 7 | be used to model everything from elevator scheduling, thread and disk 8 | schedulers in operating systems, to how to design checkout lines at a grocery 9 | store. 10 | 11 | To model your system, Deli gives you a concurrency and message passing API 12 | similar to the Go programming language, and allows you to model input into your 13 | system either statistically, or from a production log (like a CSV file). 14 | 15 | Once you have modeled your system and its inputs, you run Deli, which runs as 16 | quickly as it can, simulating wall-clock time, and then returns with statistics 17 | about how your system performed. 18 | 19 | Before digging in further, let's start with why you *should not* use Deli. 20 | 21 | ## Why shouldn't you use Deli? 22 | 23 | * It may take longer to learn to use Deli than to fix your problem another way 24 | * Uses Deli requires a small but existent knowledge of the Haskell programming 25 | language 26 | * Using Deli doesn't obviate the need to understand basic statistics and the 27 | distribution of your input data 28 | * Deli currently is light on documentation, we hope that the community can assist here 29 | 30 | ## Why was this built? 31 | 32 | Deli was built to explore improvements to GitHub's webhook infrastructure, 33 | where the HTTP response time distribution can vary dramatically when a single 34 | popular webhook target is unavailable. 35 | 36 | The initial design goals were sketched out as follows: 37 | 38 | * Easily test new algorithms 39 | * Run several orders of magnitude faster than “real time”. Ideally constrained 40 | only by CPU, not wall-clock whatsoever. 41 | * Run deterministically (given the same RNG seed/state) 42 | 43 | ## How does it work? 44 | 45 | Deli is a Haskell library and DSL, implemented as a [discrete event 46 | simulation](https://en.wikipedia.org/wiki/Discrete_event_simulation). It 47 | presents a 48 | [CSP](https://en.wikipedia.org/wiki/Communicating_sequential_processes) 49 | concurrency API which is used to model systems. 50 | 51 | ## What next? 52 | 53 | If you'd like to start using Deli, head over to our [user 54 | guide](user-guide.md). 55 | -------------------------------------------------------------------------------- /stack.yaml: -------------------------------------------------------------------------------- 1 | # This file was automatically generated by 'stack init' 2 | # 3 | # Some commonly used options have been documented as comments in this file. 4 | # For advanced use and comprehensive documentation of the format, please see: 5 | # https://docs.haskellstack.org/en/stable/yaml_configuration/ 6 | 7 | # Resolver to choose a 'specific' stackage snapshot or a compiler version. 8 | # A snapshot resolver dictates the compiler version and the set of packages 9 | # to be used for project dependencies. For example: 10 | # 11 | # resolver: lts-3.5 12 | # resolver: nightly-2015-09-21 13 | # resolver: ghc-7.10.2 14 | # resolver: ghcjs-0.1.0_ghc-7.10.2 15 | # resolver: 16 | # name: custom-snapshot 17 | # location: "./custom-snapshot.yaml" 18 | resolver: nightly-2017-10-19 19 | 20 | # User packages to be built. 21 | # Various formats can be used as shown in the example below. 22 | # 23 | # packages: 24 | # - some-directory 25 | # - https://example.com/foo/bar/baz-0.0.2.tar.gz 26 | # - location: 27 | # git: https://github.com/commercialhaskell/stack.git 28 | # commit: e7b331f14bcffb8367cd58fbfc8b40ec7642100a 29 | # - location: https://github.com/commercialhaskell/stack/commit/e7b331f14bcffb8367cd58fbfc8b40ec7642100a 30 | # extra-dep: true 31 | # subdirs: 32 | # - auto-update 33 | # - wai 34 | # 35 | # A package marked 'extra-dep: true' will only be built if demanded by a 36 | # non-dependency (i.e. a user package), and its test suites and benchmarks 37 | # will not be run. This is useful for tweaking upstream packages. 38 | packages: 39 | - . 40 | # Dependency packages to be pulled from upstream that are not in the resolver 41 | # (e.g., acme-missiles-0.3) 42 | extra-deps: 43 | - transformers-0.5.5.0 44 | - random-fu-0.2.7.0 45 | - rvar-0.2.0.3 46 | - MonadPrompt-1.0.0.5 47 | - log-domain-0.12 48 | - random-source-0.3.0.6 49 | - flexible-defaults-0.0.1.2 50 | 51 | # Override default flag values for local packages and extra-deps 52 | flags: {} 53 | 54 | # Extra package databases containing global packages 55 | extra-package-dbs: [] 56 | 57 | # Control whether we use the GHC we find on the path 58 | # system-ghc: true 59 | # 60 | # Require a specific version of stack, using version ranges 61 | # require-stack-version: -any # Default 62 | # require-stack-version: ">=1.5" 63 | # 64 | # Override the architecture used by stack, especially useful on Windows 65 | # arch: i386 66 | # arch: x86_64 67 | # 68 | # Extra directories used by stack for building 69 | # extra-include-dirs: [/path/to/dir] 70 | # extra-lib-dirs: [/path/to/dir] 71 | # 72 | # Allow a newer minor version of GHC than the snapshot specifies 73 | # compiler-check: newer-minor 74 | -------------------------------------------------------------------------------- /src/Deli/Random.hs: -------------------------------------------------------------------------------- 1 | {-# LANGUAGE BangPatterns #-} 2 | 3 | module Deli.Random 4 | ( distributionToJobs 5 | , distributionToList 6 | , arrivalTimePoissonDistribution 7 | , durationExponentialDistribution 8 | , durationParetoDistribution 9 | ) where 10 | 11 | import Data.Random (RVar, sampleState) 12 | import Data.Random.Distribution.Exponential (exponential) 13 | import Data.Random.Distribution.Pareto (pareto) 14 | import Data.Random.Source.PureMT (PureMT) 15 | import Data.Time.Clock (diffTimeToPicoseconds) 16 | import Deli (Time, Duration(..), JobTiming(..), microsecondsToDuration, microsecondsToTime) 17 | 18 | distributionToJobs 19 | :: RVar Time 20 | -> RVar Duration 21 | -> PureMT 22 | -> [JobTiming] 23 | distributionToJobs timing duration gen = 24 | let jobTimingR = JobTiming <$> timing <*> duration 25 | jobs = distributionToList jobTimingR gen 26 | addTimings (JobTiming a _) (JobTiming b d) = JobTiming (a + b) d 27 | in scanl1 addTimings jobs 28 | 29 | distributionToList 30 | :: RVar a 31 | -> PureMT 32 | -> [a] 33 | distributionToList dist gen = 34 | let (!val, newGen) = sampleState dist gen 35 | in (val : distributionToList dist newGen ) 36 | 37 | arrivalTimePoissonDistribution 38 | :: Double -- ^ Mean number of arrivals per second 39 | -> RVar Time 40 | arrivalTimePoissonDistribution rate = 41 | let inverseRate = 1 / rate 42 | expDist = exponential inverseRate 43 | doubleToTime d = round (d * 1000 * 1000) 44 | in microsecondsToTime . doubleToTime <$> expDist 45 | 46 | durationExponentialDistribution 47 | :: Duration -- ^ Mean service time 48 | -> RVar Duration 49 | durationExponentialDistribution (Duration diffTime) = 50 | let picosDuration = diffTimeToPicoseconds diffTime 51 | oneSecondInPicos = 1000 * 1000 * 1000 :: Double 52 | expDist = exponential (fromIntegral picosDuration / oneSecondInPicos) 53 | doubleToDuration d = round (d * 1000) 54 | in microsecondsToDuration . doubleToDuration <$> expDist 55 | 56 | -- |Create a Duration Pareto distribution from a mean service time. Note that 57 | -- this hardcodes an alpha (α) of 1.16 (log4 5), which is used for the 80-20 58 | -- "Pareto priciple" distribution. 59 | durationParetoDistribution 60 | :: Duration -- ^ Mean service time 61 | -> RVar Duration 62 | durationParetoDistribution (Duration diffTime) = 63 | let picosDuration = diffTimeToPicoseconds diffTime 64 | picosMeanToScale = fromIntegral picosDuration * (0.1379 :: Double) 65 | oneSecondInPicos = 1000 * 1000 * 1000 :: Double 66 | paretoDist = pareto (picosMeanToScale / oneSecondInPicos) 1.16 67 | doubleToDuration d = round (d * 1000) 68 | in microsecondsToDuration . doubleToDuration <$> paretoDist 69 | -------------------------------------------------------------------------------- /CODE_OF_CONDUCT.md: -------------------------------------------------------------------------------- 1 | # Contributor Covenant Code of Conduct 2 | 3 | ## Our Pledge 4 | 5 | In the interest of fostering an open and welcoming environment, we as 6 | contributors and maintainers pledge to making participation in our project and 7 | our community a harassment-free experience for everyone, regardless of age, body 8 | size, disability, ethnicity, sex characteristics, gender identity and expression, 9 | level of experience, education, socio-economic status, nationality, personal 10 | appearance, race, religion, or sexual identity and orientation. 11 | 12 | ## Our Standards 13 | 14 | Examples of behavior that contributes to creating a positive environment 15 | include: 16 | 17 | * Using welcoming and inclusive language 18 | * Being respectful of differing viewpoints and experiences 19 | * Gracefully accepting constructive criticism 20 | * Focusing on what is best for the community 21 | * Showing empathy towards other community members 22 | 23 | Examples of unacceptable behavior by participants include: 24 | 25 | * The use of sexualized language or imagery and unwelcome sexual attention or 26 | advances 27 | * Trolling, insulting/derogatory comments, and personal or political attacks 28 | * Public or private harassment 29 | * Publishing others' private information, such as a physical or electronic 30 | address, without explicit permission 31 | * Other conduct which could reasonably be considered inappropriate in a 32 | professional setting 33 | 34 | ## Our Responsibilities 35 | 36 | Project maintainers are responsible for clarifying the standards of acceptable 37 | behavior and are expected to take appropriate and fair corrective action in 38 | response to any instances of unacceptable behavior. 39 | 40 | Project maintainers have the right and responsibility to remove, edit, or 41 | reject comments, commits, code, wiki edits, issues, and other contributions 42 | that are not aligned to this Code of Conduct, or to ban temporarily or 43 | permanently any contributor for other behaviors that they deem inappropriate, 44 | threatening, offensive, or harmful. 45 | 46 | ## Scope 47 | 48 | This Code of Conduct applies both within project spaces and in public spaces 49 | when an individual is representing the project or its community. Examples of 50 | representing a project or community include using an official project e-mail 51 | address, posting via an official social media account, or acting as an appointed 52 | representative at an online or offline event. Representation of a project may be 53 | further defined and clarified by project maintainers. 54 | 55 | ## Enforcement 56 | 57 | Instances of abusive, harassing, or otherwise unacceptable behavior may be 58 | reported by contacting the project team at opensource@github.com. All 59 | complaints will be reviewed and investigated and will result in a response that 60 | is deemed necessary and appropriate to the circumstances. The project team is 61 | obligated to maintain confidentiality with regard to the reporter of an incident. 62 | Further details of specific enforcement policies may be posted separately. 63 | 64 | Project maintainers who do not follow or enforce the Code of Conduct in good 65 | faith may face temporary or permanent repercussions as determined by other 66 | members of the project's leadership. 67 | 68 | ## Attribution 69 | 70 | This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, 71 | available at https://www.contributor-covenant.org/version/1/4/code-of-conduct.html 72 | 73 | [homepage]: https://www.contributor-covenant.org 74 | 75 | For answers to common questions about this code of conduct, see 76 | https://www.contributor-covenant.org/faq 77 | -------------------------------------------------------------------------------- /src/Deli.hs: -------------------------------------------------------------------------------- 1 | {-# LANGUAGE BangPatterns #-} 2 | {-# LANGUAGE DataKinds #-} 3 | {-# LANGUAGE DeriveFunctor #-} 4 | {-# LANGUAGE GeneralizedNewtypeDeriving #-} 5 | {-# LANGUAGE TemplateHaskell #-} 6 | 7 | module Deli 8 | ( Deli 9 | , HasJobTiming(..) 10 | , JobTiming(..) 11 | , TimesliceStats(..) 12 | , DeliState(..) 13 | -- re-exported from Control.Monad.Concurrent 14 | , Concurrent.Time(..) 15 | , Concurrent.Duration(..) 16 | , Concurrent.Channel 17 | , Concurrent.ThreadId 18 | , Concurrent.addDuration 19 | , Concurrent.microsecond 20 | , Concurrent.millisecond 21 | , Concurrent.millisecondsToDuration 22 | , Concurrent.millisecondsToTime 23 | , Concurrent.microsecondsToDuration 24 | , Concurrent.microsecondsToTime 25 | , Concurrent.subtractTime 26 | , fork 27 | , threadId 28 | , sleep 29 | , now 30 | , newChannel 31 | , writeChannel 32 | , writeChannelNonblocking 33 | , readChannel 34 | , readChannelNonblocking 35 | , runDeli 36 | , runJob 37 | , priority 38 | , simulate 39 | ) where 40 | 41 | import Control.Lens (Getter, makeLenses, to, use, (%~), (+~), (.~), (^.)) 42 | import Control.Monad.Random.Strict 43 | import Control.Monad.State.Strict (State, execState, modify') 44 | import Data.Function ((&)) 45 | import Data.Map.Strict 46 | import Data.Maybe (fromJust) 47 | import Data.TDigest (TDigest, tdigest, quantile) 48 | import Data.Time 49 | import System.Random (StdGen) 50 | import qualified Control.Monad.Concurrent as Concurrent 51 | import qualified Data.TDigest as TDigest 52 | 53 | data JobTiming = JobTiming 54 | { _jobStart :: !Concurrent.Time 55 | , _jobDuration :: !Concurrent.Duration 56 | } deriving (Show, Eq, Ord) 57 | 58 | class HasJobTiming a where 59 | jobTiming :: Getter a JobTiming 60 | 61 | instance HasJobTiming JobTiming where 62 | jobTiming = to id 63 | 64 | data FinishedJob = FinishedJob 65 | { _jobFinishTime :: Concurrent.Time 66 | , _jobWait :: Concurrent.Duration 67 | } deriving (Show, Eq, Ord) 68 | 69 | data TimesliceStats = TimesliceStats 70 | { 71 | -- inclusive 72 | _sliceStart :: Concurrent.Time 73 | , _response50 :: Concurrent.Duration 74 | , _response99 :: Concurrent.Duration 75 | } deriving (Show) 76 | 77 | data DeliState = DeliState 78 | { _sojournStatistics :: !(TDigest 10) 79 | , _perfectStatistics :: !(TDigest 10) 80 | , _waitStatistics :: !(TDigest 10) 81 | , _temporalStats :: !(Map Concurrent.Time TimesliceStats) 82 | , _currentMinute :: !Concurrent.Time 83 | , _currentDigest :: !(TDigest 10) 84 | , _numProcessed :: !Integer 85 | } deriving (Show) 86 | 87 | makeLenses ''DeliState 88 | 89 | freshState :: DeliState 90 | freshState = 91 | DeliState 92 | { _sojournStatistics = emptyDigest 93 | , _perfectStatistics = emptyDigest 94 | , _waitStatistics = emptyDigest 95 | , _temporalStats = Data.Map.Strict.empty 96 | , _currentMinute = 0 97 | , _currentDigest = emptyDigest 98 | , _numProcessed = 0 99 | } 100 | where emptyDigest = tdigest [] 101 | 102 | newtype Deli chanState a = 103 | Deli 104 | { _getDeli :: Concurrent.ConcurrentT chanState (RandT StdGen (State DeliState)) a 105 | } deriving (Functor, Applicative, Monad) 106 | 107 | instance MonadRandom (Deli chanState) where 108 | getRandomR range = Deli $ lift (getRandomR range) 109 | 110 | getRandom = Deli $ lift getRandom 111 | 112 | getRandomRs range = Deli $ lift (getRandomRs range) 113 | 114 | getRandoms = Deli $ lift getRandoms 115 | 116 | ------------------------------------------------------------------------------ 117 | -- ## Wrappers around the Control.Monad.Concurrent API 118 | ------------------------------------------------------------------------------ 119 | 120 | fork 121 | :: Deli chanState () 122 | -> Deli chanState () 123 | fork (Deli conc) = 124 | Deli $ Concurrent.fork conc 125 | 126 | threadId 127 | :: Deli chanState Concurrent.ThreadId 128 | threadId = 129 | Deli Concurrent.threadId 130 | 131 | sleep 132 | :: Concurrent.Duration 133 | -> Deli chanState () 134 | sleep = Deli . Concurrent.sleep 135 | 136 | now 137 | :: Deli chanState Concurrent.Time 138 | now = Deli Concurrent.now 139 | 140 | newChannel 141 | :: Maybe Int 142 | -> Deli chanState (Concurrent.Channel chanState) 143 | newChannel = Deli . Concurrent.newChannel 144 | 145 | writeChannel 146 | :: Concurrent.Channel chanState 147 | -> chanState 148 | -> Deli chanState () 149 | writeChannel chan item = 150 | Deli (Concurrent.writeChannel chan item) 151 | 152 | writeChannelNonblocking 153 | :: Concurrent.Channel chanState 154 | -> chanState 155 | -> Deli chanState (Maybe chanState) 156 | writeChannelNonblocking chan item = 157 | Deli (Concurrent.writeChannelNonblocking chan item) 158 | 159 | readChannel 160 | :: Concurrent.Channel chanState 161 | -> Deli chanState chanState 162 | readChannel = Deli . Concurrent.readChannel 163 | 164 | readChannelNonblocking 165 | :: Concurrent.Channel chanState 166 | -> Deli chanState (Maybe chanState) 167 | readChannelNonblocking = Deli . Concurrent.readChannelNonblocking 168 | 169 | ------------------------------------------------------------------------------ 170 | -- ## Time Conversion 171 | ------------------------------------------------------------------------------ 172 | 173 | -- Round down a `Concurrent.Time' to the nearest minute 174 | clampMinutes 175 | :: Concurrent.Time 176 | -> Concurrent.Time 177 | clampMinutes (Concurrent.Time t) = 178 | let picosPerMinute = 60000000000000 179 | inPicos = diffTimeToPicoseconds t 180 | toMinute = inPicos `quot` picosPerMinute 181 | in Concurrent.Time (picosecondsToDiffTime (toMinute * picosPerMinute)) 182 | 183 | doubleToDuration :: Double -> Concurrent.Duration 184 | doubleToDuration = fromRational . toRational 185 | 186 | ------------------------------------------------------------------------------ 187 | -- ## Simulation 188 | ------------------------------------------------------------------------------ 189 | 190 | runDeli 191 | :: StdGen 192 | -> Deli chanState () 193 | -> DeliState 194 | runDeli gen (Deli conc) = 195 | let !randomAction = Concurrent.runConcurrentT conc 196 | !writerAction = evalRandT randomAction gen 197 | !res = execState writerAction freshState 198 | in res 199 | 200 | runJob 201 | :: HasJobTiming j 202 | => j 203 | -> Deli chanState () 204 | runJob j = do 205 | let (JobTiming start duration) = j ^. jobTiming 206 | beforeJob <- Deli Concurrent.now 207 | Deli (Concurrent.sleep duration) 208 | nowTime <- Deli Concurrent.now 209 | let !sojourn = Concurrent.subtractTime nowTime start 210 | !waitTime = Concurrent.subtractTime beforeJob start 211 | modifier s = s & numProcessed +~ 1 212 | & sojournStatistics %~ TDigest.insert (realToFrac sojourn) 213 | & waitStatistics %~ TDigest.insert (realToFrac waitTime) 214 | & perfectStatistics %~ TDigest.insert (realToFrac duration) 215 | Deli $ modify' modifier 216 | updateTemporalStats (FinishedJob nowTime waitTime) 217 | 218 | priority 219 | :: HasJobTiming j 220 | => Concurrent.Time 221 | -> j 222 | -> Concurrent.Duration 223 | priority time j = 224 | let (JobTiming start duration) = j ^. jobTiming 225 | numerator = Concurrent.subtractTime time start + duration 226 | denominator = duration 227 | in 228 | if denominator == 0 229 | then 0 230 | else numerator / denominator 231 | 232 | updateTemporalStats 233 | :: FinishedJob 234 | -> Deli chanState () 235 | updateTemporalStats (FinishedJob endTime sojourn) = do 236 | let clampedEnd = clampMinutes endTime 237 | currentSlice <- Deli $ use currentMinute 238 | if currentSlice == clampedEnd 239 | then do 240 | let modifier s = 241 | s & currentDigest %~ TDigest.insert (realToFrac sojourn) 242 | Deli $ modify' modifier 243 | else do 244 | let modifier s = 245 | s & currentMinute .~ clampedEnd 246 | & currentDigest .~ TDigest.singleton (realToFrac sojourn) 247 | & temporalStats %~ Data.Map.Strict.insert currentSlice (digestToTimeSlice currentSlice (s ^. currentDigest)) 248 | Deli $ modify' modifier 249 | 250 | 251 | digestToTimeSlice 252 | :: Concurrent.Time 253 | -> TDigest compression 254 | -> TimesliceStats 255 | digestToTimeSlice minute stats = 256 | TimesliceStats 257 | { _sliceStart = minute 258 | , _response50 = doubleToDuration (fromJust (quantile 0.5 stats)) 259 | , _response99 = doubleToDuration (fromJust (quantile 0.99 stats)) 260 | } 261 | 262 | simulate 263 | :: HasJobTiming j 264 | => StdGen 265 | -> [j] 266 | -> (Concurrent.Channel j -> Deli j ()) 267 | -> DeliState 268 | simulate gen jobs process = 269 | runDeli gen $ do 270 | mainChan <- Deli (Concurrent.newChannel Nothing) 271 | let insertQueue = Concurrent.writeChannel mainChan 272 | scheduled = [(_jobStart (job ^. jobTiming), insertQueue job) | job <- jobs] 273 | Deli (Concurrent.lazySchedule scheduled) 274 | process mainChan 275 | -------------------------------------------------------------------------------- /app/Tutorial.lhs: -------------------------------------------------------------------------------- 1 | Welcome to the Deli tutorial. Through a series of increasingly complex 2 | examples, this tutorial will give you an idea of the power and usage for Deli. 3 | 4 | This example is also a literate Haskell file, which means this document itself 5 | compiles and is executable. You can run it yourself and see the output by 6 | running: 7 | 8 | ```shell 9 | $ stack build 10 | $ stack run tutorial 11 | ``` 12 | 13 | First, let's begin with our imports: 14 | 15 | \begin{code} 16 | module Main where 17 | 18 | import Control.Lens (to) 19 | import Control.Monad (replicateM_, forever) 20 | import Data.Random.Source.PureMT (newPureMT) 21 | import Deli (Channel, Deli, JobTiming(..)) 22 | import Deli.Printer (printResults) 23 | import System.Random 24 | import qualified Deli 25 | import qualified Deli.Random 26 | \end{code} 27 | 28 | Simple Queues 29 | --- 30 | 31 | Next, let's create our first example, of a single queue and worker. Work will 32 | be placed on the main queue, and our worker will read from it, and process each 33 | item in serial: 34 | 35 | \begin{code} 36 | singleQueue 37 | :: Channel JobTiming 38 | -> Deli JobTiming () 39 | singleQueue queue = 40 | forever $ do 41 | job <- Deli.readChannel queue 42 | Deli.runJob job 43 | \end{code} 44 | 45 | As you can see, describing a very simple system like this has little ceremony. 46 | Next, let's set up the rest of the simulation, and run it. 47 | 48 | \begin{code} 49 | singleQueueExample :: IO () 50 | singleQueueExample = do 51 | gen <- newStdGen 52 | let durations = cycle [0.8, 0.9, 1.0, 1.1, 1.2] 53 | times = [0,1..(100000-1)] 54 | jobs = zipWith JobTiming times durations 55 | res = Deli.simulate gen jobs singleQueue 56 | printResults res 57 | \end{code} 58 | 59 | First we've created a new random number generator (the Deli type implements 60 | `MonadRandom`, for convenient, reproducible random number generation). Next, we 61 | create a dataset of our jobs, to be simulated. In this case, jobs will take one 62 | of a set of durations (in seconds), with a mean of `1.0`. Then we set it up so 63 | that jobs will be triggered from the outside world once each second. 64 | 65 | Finally, we run the simulation, passing in our random number seed, set of jobs, 66 | and our implemented system (`singleQueueExample`). 67 | 68 | Running the simulation, we get two primary sets of statistics. We see the wait 69 | time (how long did our jobs have to wait in line before processing begun), and 70 | their sojourn time, which is the wait time plus the processing time. 71 | 72 | In this case, we have a non-zero wait-time, which means we are sometimes at 73 | capacity, and are queueing up work. This is also reflected in the fact that the 74 | soujourn 50th percentile is greating (albeit slightly) than one-second. 75 | 76 | You will see output similar to this: 77 | 78 | ```shell 79 | Simulated wait (milliseconds): 80 | simulated 99th: 294.99974998749934 81 | simulated 95th: 274.9987499374968 82 | simulated 75th: 181.24578114452865 83 | simulated 50th: 87.4934373359334 84 | 85 | Simulated sojourn (milliseconds): 86 | simulated 99th: 1295.0000000000002 87 | simulated 95th: 1275.0000000000002 88 | simulated 75th: 1181.2495312382812 89 | simulated 50th: 1087.497187429686 90 | ``` 91 | 92 | 93 | Next, let's see what happens if we add more workers: 94 | 95 | \begin{code} 96 | variableWorkers 97 | :: Deli.HasJobTiming jobType 98 | => Int 99 | -> Channel jobType 100 | -> Deli jobType () 101 | variableWorkers num queue = 102 | replicateM_ num $ 103 | Deli.fork $ forever $ do 104 | job <- Deli.readChannel queue 105 | Deli.runJob job 106 | \end{code} 107 | 108 | Here we've simply parameterized the number of workers. For each worker, we 109 | spawn a thread (using the Deli DSL), and enter an infinite loop to read work 110 | from the shared queue. This expands our exposure to the Deli API, as we've now 111 | seen `fork`, `readChannel`, `runJob`, and `simulate`. Deli's core API exposes 112 | familiar programming concepts to create queues, read and write to them, and 113 | fork (lightweight) threads. This allows you to create a model of your system, 114 | using similar constructs to the actual version. This is core to Deli. 115 | 116 | \begin{code} 117 | twoWorkerQueueExample :: IO () 118 | twoWorkerQueueExample = do 119 | gen <- newStdGen 120 | let durations = cycle [0.8, 0.9, 1.0, 1.1, 1.2] 121 | times = [0,1..(100000-1)] 122 | jobs = zipWith JobTiming times durations 123 | res = Deli.simulate gen jobs (variableWorkers 2) 124 | printResults res 125 | \end{code} 126 | 127 | Now we can run our same example, and pass in two workers. Running this, we see 128 | that the system never reaches capacity, as the wait time is always zero. We 129 | won't be able to beat this performance. 130 | 131 | ``` 132 | Simulated wait (milliseconds): 133 | simulated 99th: 0.0 134 | simulated 95th: 0.0 135 | simulated 75th: 0.0 136 | simulated 50th: 0.0 137 | 138 | Simulated sojourn (milliseconds): 139 | simulated 99th: 1197.5 140 | simulated 95th: 1187.5 141 | simulated 75th: 1125.0 142 | simulated 50th: 1000.0 143 | ``` 144 | 145 | A more complex example 146 | --- 147 | 148 | Now, let's say we have an pareto distribution, with some requests 149 | generally being quick, and others generally taking much longer. Let's compare 150 | two implementations, one simply with twenty workers, and another with two separate 151 | queues, partitioned by request type (using a total still of twenty workers). 152 | 153 | Now let's create our two systems whose performance we want to compare. 154 | 155 | \begin{code} 156 | twentyWorkers 157 | :: Channel JobTiming 158 | -> Deli JobTiming () 159 | twentyWorkers = variableWorkers 20 160 | 161 | partitionedQueues 162 | :: Channel JobTiming 163 | -> Deli JobTiming () 164 | partitionedQueues jobChannel = do 165 | -- We'll read work from the main queue, and then partition 166 | -- it into either the slow or fast queue. 167 | -- First, we create the two partitions, each with a buffer of 16. 168 | -- Instead, we could pass in Nothing for an unbounded queue. 169 | slowChannel <- Deli.newChannel (Just 16) 170 | fastChannel <- Deli.newChannel (Just 16) 171 | 172 | -- Each of our two workers will implement work stealing. The algorithm 173 | -- is as follows. First, check if your primary queue has work, if so, 174 | -- perform it. If not, check to see if the other queue has work, if so, 175 | -- per form it. If not, wait until your primary queue does have work. 176 | 177 | -- Spawn the slow workers 178 | replicateM_ 4 $ 179 | Deli.fork $ 180 | forever $ do 181 | mSlowJob <- Deli.readChannelNonblocking slowChannel 182 | case mSlowJob of 183 | Just job -> 184 | Deli.runJob job 185 | Nothing -> do 186 | mFastJob <- Deli.readChannelNonblocking fastChannel 187 | case mFastJob of 188 | Nothing -> 189 | Deli.readChannel slowChannel >>= Deli.runJob 190 | Just fastJob -> 191 | Deli.runJob fastJob 192 | -- Spawn the fast workers 193 | replicateM_ 16 $ 194 | Deli.fork $ 195 | forever $ do 196 | mFastJob <- Deli.readChannelNonblocking fastChannel 197 | case mFastJob of 198 | Just job -> 199 | Deli.runJob job 200 | Nothing -> do 201 | mSlowJob <- Deli.readChannelNonblocking slowChannel 202 | case mSlowJob of 203 | Nothing -> 204 | Deli.readChannel fastChannel >>= Deli.runJob 205 | Just slowJob -> 206 | Deli.runJob slowJob 207 | -- Loop forever, reading items, and putting them in the 208 | -- appropriate queue 209 | forever $ do 210 | item <- Deli.readChannel jobChannel 211 | -- If a job's duration is greater than 500 milliseconds, 212 | -- put it into the slow queue. 213 | 214 | -- In the real world, you'd likely have to predict the service 215 | -- time based on the parameters of the request, and in practice, 216 | -- that technique works remarkably well. 217 | if _jobDuration item > 0.5 218 | then Deli.writeChannel slowChannel item 219 | else Deli.writeChannel fastChannel item 220 | \end{code} 221 | 222 | We've set up our two implementations, now let's generate some example requests, 223 | and compare results. 224 | 225 | Instead of using a cycled list for our input data, we'll make things a bit more 226 | realistic, and use a poisson process for arrival times, and a pareto 227 | distribution for service times. 228 | 229 | \begin{code} 230 | 231 | paretoExample :: IO () 232 | paretoExample = do 233 | simulationGen <- newStdGen 234 | inputGen <- newPureMT 235 | -- Generate a poisson process of arrivals, with a mean of 650 arrivals 236 | -- per second 237 | let arrivals = Deli.Random.arrivalTimePoissonDistribution 650 238 | -- Generate a Pareto distribution of service times, with a mean service 239 | -- time of 3 milliseconds (0.03 seconds) (alpha is set to 1.16 inside this 240 | -- function) 241 | serviceTimes = Deli.Random.durationParetoDistribution 0.03 242 | jobs = take 200000 $ Deli.Random.distributionToJobs arrivals serviceTimes inputGen 243 | twentyWorkersRes = Deli.simulate simulationGen jobs twentyWorkers 244 | partitionedRes = Deli.simulate simulationGen jobs partitionedQueues 245 | 246 | putStrLn "## Pareto example ##" 247 | putStrLn "## twentyWorkers ##" 248 | printResults twentyWorkersRes 249 | newline 250 | 251 | putStrLn "## partitionedQueues ##" 252 | printResults partitionedRes 253 | newline 254 | 255 | where newline = putStrLn "\n" 256 | \end{code} 257 | 258 | Interestingly enough, our more complex implementation is able to beat the 259 | simple twenty workers. Intuitively, this is because with a Pareto distribution, 260 | the occasional really slow job causes head of line blocking. By separating out 261 | into a slow and fast queue (with work stealing), slow items will only block 262 | other slow items, and when there are no slow items, all workers can be utilized 263 | (via work stealing) to process fast jobs. 264 | 265 | Note in particular how much better the work stealing algorithm does at the 95th 266 | and 99th percentile of sojourn time. 267 | 268 | \begin{code} 269 | main :: IO () 270 | main = do 271 | putStrLn "## singleQueueExample ##" 272 | singleQueueExample 273 | newline 274 | 275 | putStrLn "## twoWorkerQueueExample ##" 276 | twoWorkerQueueExample 277 | newline 278 | 279 | paretoExample 280 | newline 281 | 282 | where newline = putStrLn "\n" 283 | \end{code} 284 | 285 | 286 | That's currently it for this tutorial, but we'll be looking to expand it in the 287 | future. 288 | -------------------------------------------------------------------------------- /docs/user-guide.md: -------------------------------------------------------------------------------- 1 | This file is generated, please edit [app/Tutorial.lhs](../app/Tutorial.lhs) instead. 2 | *** 3 | Welcome to the Deli tutorial. Through a series of increasingly complex 4 | examples, this tutorial will give you an idea of the power and usage for 5 | Deli. 6 | 7 | This example is also a literate Haskell file, which means this document 8 | itself compiles and is executable. You can run it yourself and see the 9 | output by running: 10 | 11 | ``` shell 12 | $ stack build 13 | $ stack run tutorial 14 | ``` 15 | 16 | First, let's begin with our imports: 17 | 18 | ``` haskell 19 | module Main where 20 | 21 | import Control.Lens (to) 22 | import Control.Monad (replicateM_, forever) 23 | import Data.Random.Source.PureMT (newPureMT) 24 | import Deli (Channel, Deli, JobTiming(..)) 25 | import Deli.Printer (printResults) 26 | import System.Random 27 | import qualified Deli 28 | import qualified Deli.Random 29 | ``` 30 | 31 | Simple Queues 32 | ------------- 33 | 34 | Next, let's create our first example, of a single queue and worker. Work 35 | will be placed on the main queue, and our worker will read from it, and 36 | process each item in serial: 37 | 38 | ``` haskell 39 | singleQueue 40 | :: Channel JobTiming 41 | -> Deli JobTiming () 42 | singleQueue queue = 43 | forever $ do 44 | job <- Deli.readChannel queue 45 | Deli.runJob job 46 | ``` 47 | 48 | As you can see, describing a very simple system like this has little 49 | ceremony. Next, let's set up the rest of the simulation, and run it. 50 | 51 | ``` haskell 52 | singleQueueExample :: IO () 53 | singleQueueExample = do 54 | gen <- newStdGen 55 | let durations = cycle [0.8, 0.9, 1.0, 1.1, 1.2] 56 | times = [0,1..(100000-1)] 57 | jobs = zipWith JobTiming times durations 58 | res = Deli.simulate gen jobs singleQueue 59 | printResults res 60 | ``` 61 | 62 | First we've created a new random number generator (the Deli type 63 | implements `MonadRandom`, for convenient, reproducible random number 64 | generation). Next, we create a dataset of our jobs, to be simulated. In 65 | this case, jobs will take one of a set of durations (in seconds), with a 66 | mean of `1.0`. Then we set it up so that jobs will be triggered from the 67 | outside world once each second. 68 | 69 | Finally, we run the simulation, passing in our random number seed, set 70 | of jobs, and our implemented system (`singleQueueExample`). 71 | 72 | Running the simulation, we get two primary sets of statistics. We see 73 | the wait time (how long did our jobs have to wait in line before 74 | processing begun), and their sojourn time, which is the wait time plus 75 | the processing time. 76 | 77 | In this case, we have a non-zero wait-time, which means we are sometimes 78 | at capacity, and are queueing up work. This is also reflected in the 79 | fact that the soujourn 50th percentile is greating (albeit slightly) 80 | than one-second. 81 | 82 | You will see output similar to this: 83 | 84 | ``` shell 85 | Simulated wait (milliseconds): 86 | simulated 99th: 294.99974998749934 87 | simulated 95th: 274.9987499374968 88 | simulated 75th: 181.24578114452865 89 | simulated 50th: 87.4934373359334 90 | 91 | Simulated sojourn (milliseconds): 92 | simulated 99th: 1295.0000000000002 93 | simulated 95th: 1275.0000000000002 94 | simulated 75th: 1181.2495312382812 95 | simulated 50th: 1087.497187429686 96 | ``` 97 | 98 | Next, let's see what happens if we add more workers: 99 | 100 | ``` haskell 101 | variableWorkers 102 | :: Deli.HasJobTiming jobType 103 | => Int 104 | -> Channel jobType 105 | -> Deli jobType () 106 | variableWorkers num queue = 107 | replicateM_ num $ 108 | Deli.fork $ forever $ do 109 | job <- Deli.readChannel queue 110 | Deli.runJob job 111 | ``` 112 | 113 | Here we've simply parameterized the number of workers. For each worker, 114 | we spawn a thread (using the Deli DSL), and enter an infinite loop to 115 | read work from the shared queue. This expands our exposure to the Deli 116 | API, as we've now seen `fork`, `readChannel`, `runJob`, and `simulate`. 117 | Deli's core API exposes familiar programming concepts to create queues, 118 | read and write to them, and fork (lightweight) threads. This allows you 119 | to create a model of your system, using similar constructs to the actual 120 | version. This is core to Deli. 121 | 122 | ``` haskell 123 | twoWorkerQueueExample :: IO () 124 | twoWorkerQueueExample = do 125 | gen <- newStdGen 126 | let durations = cycle [0.8, 0.9, 1.0, 1.1, 1.2] 127 | times = [0,1..(100000-1)] 128 | jobs = zipWith JobTiming times durations 129 | res = Deli.simulate gen jobs (variableWorkers 2) 130 | printResults res 131 | ``` 132 | 133 | Now we can run our same example, and pass in two workers. Running this, 134 | we see that the system never reaches capacity, as the wait time is 135 | always zero. We won't be able to beat this performance. 136 | 137 | Simulated wait (milliseconds): 138 | simulated 99th: 0.0 139 | simulated 95th: 0.0 140 | simulated 75th: 0.0 141 | simulated 50th: 0.0 142 | 143 | Simulated sojourn (milliseconds): 144 | simulated 99th: 1197.5 145 | simulated 95th: 1187.5 146 | simulated 75th: 1125.0 147 | simulated 50th: 1000.0 148 | 149 | A more complex example 150 | ---------------------- 151 | 152 | Now, let's say we have an pareto distribution, with some requests 153 | generally being quick, and others generally taking much longer. Let's 154 | compare two implementations, one simply with twenty workers, and another 155 | with two separate queues, partitioned by request type (using a total 156 | still of twenty workers). 157 | 158 | Now let's create our two systems whose performance we want to compare. 159 | 160 | ``` haskell 161 | twentyWorkers 162 | :: Channel JobTiming 163 | -> Deli JobTiming () 164 | twentyWorkers = variableWorkers 20 165 | 166 | partitionedQueues 167 | :: Channel JobTiming 168 | -> Deli JobTiming () 169 | partitionedQueues jobChannel = do 170 | -- We'll read work from the main queue, and then partition 171 | -- it into either the slow or fast queue. 172 | -- First, we create the two partitions, each with a buffer of 16. 173 | -- Instead, we could pass in Nothing for an unbounded queue. 174 | slowChannel <- Deli.newChannel (Just 16) 175 | fastChannel <- Deli.newChannel (Just 16) 176 | 177 | -- Each of our two workers will implement work stealing. The algorithm 178 | -- is as follows. First, check if your primary queue has work, if so, 179 | -- perform it. If not, check to see if the other queue has work, if so, 180 | -- per form it. If not, wait until your primary queue does have work. 181 | 182 | -- Spawn the slow workers 183 | replicateM_ 4 $ 184 | Deli.fork $ 185 | forever $ do 186 | mSlowJob <- Deli.readChannelNonblocking slowChannel 187 | case mSlowJob of 188 | Just job -> 189 | Deli.runJob job 190 | Nothing -> do 191 | mFastJob <- Deli.readChannelNonblocking fastChannel 192 | case mFastJob of 193 | Nothing -> 194 | Deli.readChannel slowChannel >>= Deli.runJob 195 | Just fastJob -> 196 | Deli.runJob fastJob 197 | -- Spawn the fast workers 198 | replicateM_ 16 $ 199 | Deli.fork $ 200 | forever $ do 201 | mFastJob <- Deli.readChannelNonblocking fastChannel 202 | case mFastJob of 203 | Just job -> 204 | Deli.runJob job 205 | Nothing -> do 206 | mSlowJob <- Deli.readChannelNonblocking slowChannel 207 | case mSlowJob of 208 | Nothing -> 209 | Deli.readChannel fastChannel >>= Deli.runJob 210 | Just slowJob -> 211 | Deli.runJob slowJob 212 | -- Loop forever, reading items, and putting them in the 213 | -- appropriate queue 214 | forever $ do 215 | item <- Deli.readChannel jobChannel 216 | -- If a job's duration is greater than 500 milliseconds, 217 | -- put it into the slow queue. 218 | 219 | -- In the real world, you'd likely have to predict the service 220 | -- time based on the parameters of the request, and in practice, 221 | -- that technique works remarkably well. 222 | if _jobDuration item > 0.5 223 | then Deli.writeChannel slowChannel item 224 | else Deli.writeChannel fastChannel item 225 | ``` 226 | 227 | We've set up our two implementations, now let's generate some example 228 | requests, and compare results. 229 | 230 | Instead of using a cycled list for our input data, we'll make things a 231 | bit more realistic, and use a poisson process for arrival times, and a 232 | pareto distribution for service times. 233 | 234 | ``` haskell 235 | 236 | paretoExample :: IO () 237 | paretoExample = do 238 | simulationGen <- newStdGen 239 | inputGen <- newPureMT 240 | -- Generate a poisson process of arrivals, with a mean of 650 arrivals 241 | -- per second 242 | let arrivals = Deli.Random.arrivalTimePoissonDistribution 650 243 | -- Generate a Pareto distribution of service times, with a mean service 244 | -- time of 3 milliseconds (0.03 seconds) (alpha is set to 1.16 inside this 245 | -- function) 246 | serviceTimes = Deli.Random.durationParetoDistribution 0.03 247 | jobs = take 200000 $ Deli.Random.distributionToJobs arrivals serviceTimes inputGen 248 | twentyWorkersRes = Deli.simulate simulationGen jobs twentyWorkers 249 | partitionedRes = Deli.simulate simulationGen jobs partitionedQueues 250 | 251 | putStrLn "## Pareto example ##" 252 | putStrLn "## twentyWorkers ##" 253 | printResults twentyWorkersRes 254 | newline 255 | 256 | putStrLn "## partitionedQueues ##" 257 | printResults partitionedRes 258 | newline 259 | 260 | where newline = putStrLn "\n" 261 | ``` 262 | 263 | Interestingly enough, our more complex implementation is able to beat 264 | the simple twenty workers. Intuitively, this is because with a Pareto 265 | distribution, the occasional really slow job causes head of line 266 | blocking. By separating out into a slow and fast queue (with work 267 | stealing), slow items will only block other slow items, and when there 268 | are no slow items, all workers can be utilized (via work stealing) to 269 | process fast jobs. 270 | 271 | Note in particular how much better the work stealing algorithm does at 272 | the 95th and 99th percentile of sojourn time. 273 | 274 | ``` haskell 275 | main :: IO () 276 | main = do 277 | putStrLn "## singleQueueExample ##" 278 | singleQueueExample 279 | newline 280 | 281 | putStrLn "## twoWorkerQueueExample ##" 282 | twoWorkerQueueExample 283 | newline 284 | 285 | paretoExample 286 | newline 287 | 288 | where newline = putStrLn "\n" 289 | ``` 290 | 291 | That's currently it for this tutorial, but we'll be looking to expand it 292 | in the future. -------------------------------------------------------------------------------- /src/Control/Monad/Concurrent.hs: -------------------------------------------------------------------------------- 1 | {-# LANGUAGE FlexibleInstances #-} 2 | {-# LANGUAGE GeneralizedNewtypeDeriving #-} 3 | {-# LANGUAGE MultiParamTypeClasses #-} 4 | {-# LANGUAGE TemplateHaskell #-} 5 | {-# LANGUAGE UndecidableInstances #-} 6 | 7 | module Control.Monad.Concurrent 8 | ( Channel 9 | , Time(..) 10 | , Duration(..) 11 | , ThreadId 12 | , ConcurrentT 13 | , addDuration 14 | , microsecond 15 | , millisecond 16 | , millisecondsToDuration 17 | , millisecondsToTime 18 | , microsecondsToDuration 19 | , microsecondsToTime 20 | , subtractTime 21 | , fork 22 | , threadId 23 | , sleep 24 | , yield 25 | , lazySchedule 26 | , now 27 | , newChannel 28 | , writeChannel 29 | , writeChannelNonblocking 30 | , readChannel 31 | , readChannelNonblocking 32 | , runConcurrentT 33 | ) where 34 | 35 | import Control.Lens (at, ix, makeLenses, to, use, (^?), (.=), (+=), (%=), (?~)) 36 | import Control.Monad.State.Strict 37 | import Control.Monad.Reader (MonadReader, ReaderT, ask, local, runReaderT) 38 | import Control.Monad.Trans.Cont (ContT, evalContT, resetT, shiftT) 39 | import Data.Map.Strict 40 | import Data.Maybe 41 | import Data.PQueue.Min as PQueue 42 | import Data.Sequence 43 | import Data.Time.Clock (DiffTime, picosecondsToDiffTime) 44 | 45 | data Queue a = Queue 46 | { _writeEnd :: [a] 47 | , _readEnd :: [a] 48 | } 49 | 50 | emptyQueue :: Queue a 51 | emptyQueue = Queue [] [] 52 | 53 | readQueue 54 | :: Queue a 55 | -> Maybe (a, Queue a) 56 | readQueue (Queue writeEnd readEnd) = 57 | case readEnd of 58 | (h:tl) -> 59 | let newQueue = Queue writeEnd tl 60 | in Just (h, newQueue) 61 | [] -> 62 | if Prelude.null writeEnd 63 | then Nothing 64 | else readQueue (Queue [] (Prelude.reverse writeEnd)) 65 | 66 | writeQueue 67 | :: Queue a 68 | -> a 69 | -> Queue a 70 | writeQueue (Queue writeEnd readEnd) val = 71 | Queue (val:writeEnd) readEnd 72 | 73 | -- ** delimited continuations ** 74 | -- shift = escape 75 | -- reset = capture 76 | 77 | data Channel a = Channel 78 | { _chanId :: !Integer 79 | , _chanSize :: !(Maybe Int) 80 | } 81 | deriving (Eq, Ord, Show) 82 | 83 | data ChanAndWaiters chanState m = ChanAndWaiters 84 | { _contents :: !(Seq chanState) 85 | , _readers :: Queue (ThreadId, IConcurrentT chanState m ()) 86 | , _writers :: Queue (ThreadId, IConcurrentT chanState m ()) 87 | } 88 | 89 | newtype Time = Time DiffTime 90 | deriving (Show, Eq, Ord, Num, Fractional, Enum) 91 | 92 | newtype Duration = Duration DiffTime 93 | deriving (Show, Eq, Ord, Num, Fractional, Real, Enum) 94 | 95 | newtype ThreadId = ThreadId Integer 96 | deriving (Show, Eq, Ord) 97 | 98 | addDuration 99 | :: Time 100 | -> Duration 101 | -> Time 102 | addDuration (Time t) (Duration d) = 103 | Time (t + d) 104 | 105 | microsecond :: Duration 106 | microsecond = Duration (picosecondsToDiffTime 1000000) 107 | 108 | millisecond :: Duration 109 | millisecond = microsecond * 1000 110 | 111 | millisecondsToTime 112 | :: Integer 113 | -> Time 114 | millisecondsToTime millis = 115 | Time $ picosecondsToDiffTime (1000 * 1000 * 1000 * millis) 116 | 117 | millisecondsToDuration 118 | :: Integer 119 | -> Duration 120 | millisecondsToDuration millis = 121 | Duration $ picosecondsToDiffTime (1000 * 1000 * 1000 * millis) 122 | 123 | microsecondsToTime 124 | :: Integer 125 | -> Time 126 | microsecondsToTime micros = 127 | Time $ picosecondsToDiffTime (1000 * 1000 * micros) 128 | 129 | microsecondsToDuration 130 | :: Integer 131 | -> Duration 132 | microsecondsToDuration micros = 133 | Duration $ picosecondsToDiffTime (1000 * 1000 * micros) 134 | 135 | subtractTime 136 | :: Time 137 | -> Time 138 | -> Duration 139 | subtractTime (Time end) (Time start) = 140 | Duration (end - start) 141 | 142 | data PriorityCoroutine chanState m = PriorityCoroutine 143 | { _routine :: IConcurrentT chanState m () 144 | , _pId :: !ThreadId 145 | , _priority :: !Time 146 | } 147 | 148 | instance Eq (PriorityCoroutine chanState m) 149 | where (==) a b = (_priority a, _pId a) == (_priority b, _pId b) 150 | 151 | instance Ord (PriorityCoroutine chanState m) 152 | -- NOTE: should this incorporate the threadId? 153 | where compare a b = compare (_priority a) (_priority b) 154 | 155 | type CoroutineQueue chanState m = MinQueue (PriorityCoroutine chanState m) 156 | 157 | data ConcurrentState chanState m = ConcurrentState 158 | { _coroutines :: !(CoroutineQueue chanState m) 159 | , _scheduledRoutines :: [(Time, IConcurrentT chanState m ())] 160 | , _nextThreadIdent :: !ThreadId 161 | , _channels :: !(Map (Channel chanState) (ChanAndWaiters chanState m)) 162 | , _nextChannelIdent :: !Integer 163 | , _nowTime :: !Time 164 | } 165 | 166 | newtype IConcurrentT chanState m a = 167 | IConcurrentT 168 | { runIConcurrentT' :: ContT () (ReaderT ThreadId (StateT (ConcurrentState chanState m) m)) a 169 | } deriving (Functor, Monad, MonadIO, MonadReader ThreadId, MonadState (ConcurrentState chanState m)) 170 | 171 | instance Applicative (IConcurrentT chanState m) where 172 | pure = IConcurrentT . pure 173 | 174 | (IConcurrentT a) <*> (IConcurrentT b) = IConcurrentT (a <*> b) 175 | 176 | (IConcurrentT a) *> (IConcurrentT b) = IConcurrentT $ a >>= const b 177 | 178 | instance MonadTrans (IConcurrentT chanState) where 179 | lift = IConcurrentT . lift . lift . lift 180 | 181 | newtype ConcurrentT chanState m a = 182 | ConcurrentT 183 | { runConcurrentT' :: IConcurrentT chanState m a 184 | } deriving (Functor, Applicative, Monad, MonadIO) 185 | 186 | instance MonadState s m => MonadState s (ConcurrentT chanState m) where 187 | get = lift get 188 | 189 | put = lift . put 190 | 191 | state = lift . state 192 | 193 | instance MonadTrans (ConcurrentT chanState) where 194 | lift = ConcurrentT . IConcurrentT . lift . lift . lift 195 | 196 | -- For some reason I had to put these together underneath the definition 197 | -- of `IConcurrentT' 198 | makeLenses ''ConcurrentState 199 | makeLenses ''ChanAndWaiters 200 | 201 | 202 | freshState 203 | :: ConcurrentState chanState m 204 | freshState = ConcurrentState 205 | { _coroutines = PQueue.empty 206 | , _scheduledRoutines = [] 207 | -- 1 because we `runReaderT' with 0 for the main thread, 208 | -- which is already created 209 | , _nextThreadIdent = ThreadId 1 210 | , _channels = Data.Map.Strict.empty 211 | , _nextChannelIdent = 0 212 | , _nowTime = 0 213 | } 214 | 215 | register 216 | :: Monad m 217 | => (IConcurrentT chanState m () -> IConcurrentT chanState m ()) 218 | -> IConcurrentT chanState m () 219 | register callback = 220 | IConcurrentT $ shiftT $ \k -> do 221 | let routine = IConcurrentT (lift (k ())) 222 | runIConcurrentT' (callback routine) 223 | 224 | getCCs 225 | :: Monad m 226 | => IConcurrentT chanState m (CoroutineQueue chanState m) 227 | getCCs = use coroutines 228 | 229 | putCCs 230 | :: Monad m 231 | => CoroutineQueue chanState m 232 | -> IConcurrentT chanState m () 233 | putCCs queue = 234 | coroutines .= queue 235 | 236 | updateNow 237 | :: Monad m 238 | => Time 239 | -> IConcurrentT chanState m () 240 | updateNow time = 241 | nowTime .= time 242 | 243 | dequeue 244 | :: Monad m 245 | => IConcurrentT chanState m () 246 | dequeue = do 247 | queue <- getCCs 248 | scheduled <- use scheduledRoutines 249 | let mMin = PQueue.minView queue 250 | case (mMin, scheduled) of 251 | (Nothing, []) -> 252 | return () 253 | (Just (PriorityCoroutine nextCoroutine pId priority, modifiedQueue), []) -> do 254 | putCCs modifiedQueue 255 | updateNow priority 256 | IConcurrentT (resetT (runIConcurrentT' (local (const pId) nextCoroutine))) 257 | dequeue 258 | (Nothing, (priority, nextCoroutine): tl) -> do 259 | scheduledRoutines .= tl 260 | updateNow priority 261 | IConcurrentT (resetT (runIConcurrentT' nextCoroutine)) 262 | dequeue 263 | (Just (PriorityCoroutine nextCoroutineQ pId priorityQ, modifiedQueue), (priorityL, nextCoroutineL): tl) -> 264 | if priorityL <= priorityQ 265 | then do 266 | scheduledRoutines .= tl 267 | updateNow priorityL 268 | IConcurrentT (resetT (runIConcurrentT' (local (const pId) nextCoroutineL))) 269 | dequeue 270 | else do 271 | putCCs modifiedQueue 272 | updateNow priorityQ 273 | IConcurrentT (resetT (runIConcurrentT' nextCoroutineQ)) 274 | dequeue 275 | 276 | ischeduleDuration 277 | :: Monad m 278 | => Duration 279 | -> ThreadId 280 | -> IConcurrentT chanState m () 281 | -> IConcurrentT chanState m () 282 | ischeduleDuration duration pId routine = do 283 | currentNow <- inow 284 | ischedule (addDuration currentNow duration) pId routine 285 | 286 | sleep 287 | :: Monad m 288 | => Duration 289 | -> ConcurrentT chanState m () 290 | sleep = ConcurrentT . isleep 291 | 292 | isleep 293 | :: Monad m 294 | => Duration 295 | -> IConcurrentT chanState m () 296 | isleep duration = do 297 | myId <- ithreadId 298 | register (ischeduleDuration duration myId) 299 | 300 | yield 301 | :: Monad m 302 | => ConcurrentT chanState m () 303 | yield = ConcurrentT iyield 304 | 305 | iyield 306 | :: Monad m 307 | => IConcurrentT chanState m () 308 | iyield = 309 | -- rather than implementing a separate queue/seq for yield'ers, we actually 310 | -- do want to advance our clock as we yield, simulating CPU cycles 311 | isleep microsecond 312 | 313 | lazySchedule 314 | :: Monad m 315 | => [(Time, ConcurrentT chanState m ())] 316 | -> ConcurrentT chanState m () 317 | lazySchedule scheduled = 318 | ConcurrentT (ilazySchedule [(time, runConcurrentT' t) | (time, t) <- scheduled]) 319 | 320 | ilazySchedule 321 | :: Monad m 322 | => [(Time, IConcurrentT chanState m ())] 323 | -> IConcurrentT chanState m () 324 | ilazySchedule scheduled = 325 | scheduledRoutines .= scheduled 326 | 327 | 328 | ischedule 329 | :: Monad m 330 | => Time 331 | -> ThreadId 332 | -> IConcurrentT chanState m () 333 | -> IConcurrentT chanState m () 334 | ischedule time pId routine = do 335 | currentRoutines <- getCCs 336 | currentNow <- inow 337 | -- to prevent time from moving backward by scheduling something in the 338 | -- past, we schedule it to the `max' of the current time, or the schedule 339 | -- time. Effectively this immediately schedules the process if it were 340 | -- to otherwise have been scheduled for the past. 341 | let scheduleTime = max time currentNow 342 | newRoutines = insertBehind (PriorityCoroutine routine pId scheduleTime) currentRoutines 343 | putCCs newRoutines 344 | 345 | now 346 | :: Monad m 347 | => ConcurrentT chanState m Time 348 | now = ConcurrentT inow 349 | 350 | inow 351 | :: Monad m 352 | => IConcurrentT chanState m Time 353 | inow = use nowTime 354 | 355 | fork 356 | :: Monad m 357 | => ConcurrentT chanState m () 358 | -> ConcurrentT chanState m () 359 | fork (ConcurrentT f) = 360 | ConcurrentT (ifork f) 361 | 362 | ifork 363 | :: Monad m 364 | => IConcurrentT chanState m () 365 | -> IConcurrentT chanState m () 366 | ifork routine = do 367 | tId@(ThreadId i) <- use nextThreadIdent 368 | nextThreadIdent .= (ThreadId (i + 1)) 369 | ischeduleDuration 0 tId routine 370 | myId <- ithreadId 371 | register (ischeduleDuration 0 myId) 372 | 373 | threadId 374 | :: Monad m 375 | => ConcurrentT chanState m ThreadId 376 | threadId = ConcurrentT ithreadId 377 | 378 | ithreadId 379 | :: Monad m 380 | => IConcurrentT chanState m ThreadId 381 | ithreadId = ask 382 | 383 | newChannel 384 | :: Monad m 385 | => Maybe Int 386 | -> ConcurrentT chanState m (Channel chanState) 387 | newChannel = ConcurrentT . inewChannel 388 | 389 | inewChannel 390 | :: Monad m 391 | => Maybe Int 392 | -> IConcurrentT chanState m (Channel chanState) 393 | inewChannel mChanSize = do 394 | -- grab the next channel identifier and then 395 | -- immediately increment it for the next use 396 | chanIdent <- use nextChannelIdent 397 | nextChannelIdent += 1 398 | 399 | let chan = Channel chanIdent mChanSize 400 | emptySeq = Data.Sequence.empty 401 | chanAndWaiters = ChanAndWaiters emptySeq emptyQueue emptyQueue 402 | channels %= (at chan ?~ chanAndWaiters) 403 | return chan 404 | 405 | writeChannel 406 | :: Monad m 407 | => Channel chanState 408 | -> chanState 409 | -> ConcurrentT chanState m () 410 | writeChannel chan item = 411 | ConcurrentT (iwriteChannel chan item) 412 | 413 | iwriteChannel 414 | :: Monad m 415 | => Channel chanState 416 | -> chanState 417 | -> IConcurrentT chanState m () 418 | iwriteChannel chan@(Channel _ident mMaxSize) item = do 419 | chanMap <- use channels 420 | let chanContents = chanMap ^? (ix chan . contents) 421 | chanCurrentSize = maybe 0 Data.Sequence.length chanContents 422 | 423 | myId <- ithreadId 424 | 425 | -- when there's already an element, we block and wait our turn to write 426 | -- once the queue is empty/writable 427 | case mMaxSize of 428 | Just maxSize | chanCurrentSize >= maxSize -> 429 | register $ \routine -> 430 | channels . ix chan . writers %= flip writeQueue (myId, routine) 431 | _ -> 432 | return () 433 | 434 | 435 | -- now we've waited, if needed 436 | -- write the value, and then notify any readers 437 | -- our state may have changed, so get it again 438 | 439 | -- write the value to the queue 440 | channels . ix chan . contents %= (|> item) 441 | 442 | chanMap2 <- use channels 443 | let readerView = join $ (readQueue . _readers) <$> Data.Map.Strict.lookup chan chanMap2 444 | case readerView of 445 | -- there are no readers 446 | Nothing -> 447 | return () 448 | -- there is a reader, call the reader 449 | Just ((readerId, nextReader), newReaders) -> do 450 | channels . ix chan . readers .= newReaders 451 | local (const readerId) nextReader 452 | 453 | writeChannelNonblocking 454 | :: Monad m 455 | => Channel chanState 456 | -> chanState 457 | -> ConcurrentT chanState m (Maybe chanState) 458 | writeChannelNonblocking chan item = 459 | ConcurrentT (iwriteChannelNonblocking chan item) 460 | 461 | iwriteChannelNonblocking 462 | :: Monad m 463 | => Channel chanState 464 | -> chanState 465 | -> IConcurrentT chanState m (Maybe chanState) 466 | iwriteChannelNonblocking chan@(Channel _ident mMaxSize) item = do 467 | chanMap <- use channels 468 | myId <- ithreadId 469 | let chanContents = chanMap ^? (ix chan . contents) 470 | chanCurrentSize = maybe 0 Data.Sequence.length chanContents 471 | 472 | -- when there's already an element, we block and wait our turn to write 473 | -- once the queue is empty/writable 474 | case mMaxSize of 475 | Just maxSize | chanCurrentSize >= maxSize -> 476 | return Nothing 477 | _ -> do 478 | -- write the value to the queue 479 | channels . ix chan . contents %= (|> item) 480 | 481 | chanMap2 <- use channels 482 | let readerView = join $ (readQueue . _readers) <$> Data.Map.Strict.lookup chan chanMap2 483 | case readerView of 484 | -- there are no readers 485 | Nothing -> 486 | return (Just item) 487 | -- there is a reader, call the reader 488 | Just ((readerId, nextReader), newReaders) -> do 489 | channels . ix chan . readers .= newReaders 490 | --local (const readerId) nextReader 491 | ischeduleDuration 0 readerId nextReader 492 | register (ischeduleDuration 0 myId) 493 | return (Just item) 494 | 495 | readChannel 496 | :: Monad m 497 | => Channel chanState 498 | -> ConcurrentT chanState m chanState 499 | readChannel = ConcurrentT . ireadChannel 500 | 501 | ireadChannel 502 | :: Monad m 503 | => Channel chanState 504 | -> IConcurrentT chanState m chanState 505 | ireadChannel chan = do 506 | chanMap <- use channels 507 | let mChanContents = fromMaybe EmptyL $ chanMap ^? (ix chan . contents . to viewl) 508 | 509 | myId <- ithreadId 510 | 511 | case mChanContents of 512 | EmptyL -> do 513 | -- nothing to read, so we add ourselves to the queue 514 | register $ \routine -> 515 | channels . ix chan . readers %= flip writeQueue (myId, routine) 516 | -- we can actually just recur here to read the value, since now 517 | -- that we're running again, the queue will have a value for us to 518 | -- read 519 | ireadChannel chan 520 | val :< newSeq -> do 521 | -- write the new seq 522 | channels . ix chan . contents .= newSeq 523 | 524 | -- see if there are any writers 525 | chanMap2 <- use channels 526 | let writerView = join $ (readQueue . _writers) <$> Data.Map.Strict.lookup chan chanMap2 527 | case writerView of 528 | Nothing -> 529 | return val 530 | Just ((writerId, nextWriter), newWriters) -> do 531 | channels . ix chan . writers .= newWriters 532 | local (const writerId) nextWriter 533 | return val 534 | 535 | readChannelNonblocking 536 | :: Monad m 537 | => Channel chanState 538 | -> ConcurrentT chanState m (Maybe chanState) 539 | readChannelNonblocking = ConcurrentT . ireadChannelNonblocking 540 | 541 | ireadChannelNonblocking 542 | :: Monad m 543 | => Channel chanState 544 | -> IConcurrentT chanState m (Maybe chanState) 545 | ireadChannelNonblocking chan = do 546 | chanMap <- use channels 547 | let mChanContents = fromMaybe EmptyL $ chanMap ^? (ix chan . contents . to viewl) 548 | 549 | case mChanContents of 550 | EmptyL -> return Nothing 551 | val :< newSeq -> do 552 | -- write the new seq 553 | channels . ix chan . contents .= newSeq 554 | 555 | -- see if there are any writers 556 | chanMap2 <- use channels 557 | let writerView = join $ (readQueue . _writers) <$> Data.Map.Strict.lookup chan chanMap2 558 | case writerView of 559 | Nothing -> 560 | return (Just val) 561 | Just ((writerId, nextWriter), newWriters) -> do 562 | channels . ix chan . writers .= newWriters 563 | local (const writerId) nextWriter 564 | return (Just val) 565 | 566 | runConcurrentT 567 | :: Monad m 568 | => ConcurrentT chanState m () 569 | -> m () 570 | runConcurrentT (ConcurrentT routine) = 571 | runIConcurrentT routine 572 | 573 | runIConcurrentT 574 | :: Monad m 575 | => IConcurrentT chanState m () 576 | -> m () 577 | runIConcurrentT routine = 578 | let resetAction = do 579 | resetT (runIConcurrentT' routine) 580 | runIConcurrentT' dequeue 581 | in 582 | void $ flip evalStateT freshState $ flip runReaderT (ThreadId 0) $ evalContT resetAction 583 | 584 | --------------------------------------------------------------------------------