├── .gitignore ├── .travis.yml ├── CHANGELOG.md ├── LICENSE ├── README.md ├── Setup.hs ├── Vagrantfile ├── benchmarks └── Main.hs ├── cabal.project ├── developer_notes.md ├── flamecharts ├── baseline.svg ├── batch-100-hardcoded.svg ├── batch-20.svg ├── benchmark-notify.svg ├── benchmark.svg ├── diff.svg ├── hasql.svg ├── int4Payload.svg ├── justDequeue.svg ├── justValueDequeue.svg ├── no-create.svg ├── no-modified-timestamp.svg ├── no-trigger.svg ├── noNotify.svg ├── prepared-statement.svg ├── sixtySecondsWorth.svg └── tryDequeue.svg ├── hasql-queue-tmp-db └── Main.hs ├── hasql-queue.cabal ├── package.yaml ├── s ├── ghcid ├── ghcid-bench └── test-time ├── src └── Hasql │ └── Queue │ ├── High │ ├── AtLeastOnce.hs │ ├── AtMostOnce.hs │ └── ExactlyOnce.hs │ ├── Internal.hs │ ├── Low │ ├── AtLeastOnce.hs │ ├── AtMostOnce.hs │ └── ExactlyOnce.hs │ └── Migrate.hs ├── stack.yaml ├── stack.yaml.lock └── test ├── Hasql └── Queue │ ├── High │ ├── AtLeastOnceSpec.hs │ ├── AtMostOnceSpec.hs │ └── ExactlyOnceSpec.hs │ ├── Low │ ├── AtLeastOnceSpec.hs │ ├── AtMostOnceSpec.hs │ └── ExactlyOnceSpec.hs │ ├── MigrateSpec.hs │ └── TestUtils.hs └── Main.hs /.gitignore: -------------------------------------------------------------------------------- 1 | # Compiled source # 2 | ################### 3 | *.com 4 | *.class 5 | *.dll 6 | *.exe 7 | *.o 8 | *.so 9 | *.hi 10 | *.lib 11 | 12 | *.aes 13 | *.pem 14 | # Packages # 15 | ############ 16 | # it's better to unpack these files and commit the raw source 17 | # git has its own built in compression methods 18 | *.7z 19 | *.dmg 20 | *.gz 21 | *.iso 22 | *.jar 23 | *.rar 24 | *.tar 25 | *.zip 26 | 27 | # Logs and databases # 28 | ###################### 29 | *.log 30 | *.sql 31 | *.sqlite 32 | 33 | #keys 34 | *.pem 35 | *.cer 36 | *.pem~ 37 | *.p12 38 | 39 | # OS generated files # 40 | ###################### 41 | .DS_Store* 42 | ehthumbs.db 43 | Icon? 44 | Thumbs.db 45 | 46 | *.p_o 47 | *.hi 48 | *.*~ 49 | dist 50 | tags 51 | *.prof 52 | 53 | .cabal-sandbox 54 | .stack-work 55 | .vagrant 56 | bin/ 57 | core/ 58 | *.s 59 | 60 | .test-cache 61 | /postgresql-simple-queue.cabal 62 | dist-newstyle 63 | perf.script 64 | -------------------------------------------------------------------------------- /.travis.yml: -------------------------------------------------------------------------------- 1 | # This Travis job script has been generated by a script via 2 | # 3 | # haskell-ci 'cabal.project' '--output' '.travis.yml' 4 | # 5 | # For more information, see https://github.com/haskell-CI/haskell-ci 6 | # 7 | # version: 0.9.20191213 8 | # 9 | version: ~> 1.0 10 | language: c 11 | os: linux 12 | dist: xenial 13 | git: 14 | # whether to recursively clone submodules 15 | submodules: false 16 | cache: 17 | directories: 18 | - $HOME/.cabal/packages 19 | - $HOME/.cabal/store 20 | - $HOME/.hlint 21 | before_cache: 22 | - rm -fv $CABALHOME/packages/hackage.haskell.org/build-reports.log 23 | # remove files that are regenerated by 'cabal update' 24 | - rm -fv $CABALHOME/packages/hackage.haskell.org/00-index.* 25 | - rm -fv $CABALHOME/packages/hackage.haskell.org/*.json 26 | - rm -fv $CABALHOME/packages/hackage.haskell.org/01-index.cache 27 | - rm -fv $CABALHOME/packages/hackage.haskell.org/01-index.tar 28 | - rm -fv $CABALHOME/packages/hackage.haskell.org/01-index.tar.idx 29 | - rm -rfv $CABALHOME/packages/head.hackage 30 | jobs: 31 | include: 32 | - compiler: ghc-8.8.1 33 | addons: {"apt":{"sources":[{"sourceline":"deb http://ppa.launchpad.net/hvr/ghc/ubuntu xenial main","key_url":"https://keyserver.ubuntu.com/pks/lookup?op=get&search=0x063dab2bdc0b3f9fcebc378bff3aeacef6f88286"}],"packages":["ghc-8.8.1","cabal-install-3.0"]}} 34 | os: linux 35 | before_install: 36 | - HC=$(echo "/opt/$CC/bin/ghc" | sed 's/-/\//') 37 | - WITHCOMPILER="-w $HC" 38 | - HADDOCK=$(echo "/opt/$CC/bin/haddock" | sed 's/-/\//') 39 | - HCPKG="$HC-pkg" 40 | - unset CC 41 | - CABAL=/opt/ghc/bin/cabal 42 | - CABALHOME=$HOME/.cabal 43 | - export PATH="$CABALHOME/bin:$PATH" 44 | - export PATH=$PATH:/usr/lib/postgresql/10/bin/ 45 | - TOP=$(pwd) 46 | - "HCNUMVER=$(${HC} --numeric-version|perl -ne '/^(\\d+)\\.(\\d+)\\.(\\d+)(\\.(\\d+))?$/; print(10000 * $1 + 100 * $2 + ($3 == 0 ? $5 != 1 : $3))')" 47 | - echo $HCNUMVER 48 | - CABAL="$CABAL -vnormal+nowrap+markoutput" 49 | - set -o pipefail 50 | - | 51 | echo 'function blue(s) { printf "\033[0;34m" s "\033[0m " }' >> .colorful.awk 52 | echo 'BEGIN { state = "output"; }' >> .colorful.awk 53 | echo '/^-----BEGIN CABAL OUTPUT-----$/ { state = "cabal" }' >> .colorful.awk 54 | echo '/^-----END CABAL OUTPUT-----$/ { state = "output" }' >> .colorful.awk 55 | echo '!/^(-----BEGIN CABAL OUTPUT-----|-----END CABAL OUTPUT-----)/ {' >> .colorful.awk 56 | echo ' if (state == "cabal") {' >> .colorful.awk 57 | echo ' print blue($0)' >> .colorful.awk 58 | echo ' } else {' >> .colorful.awk 59 | echo ' print $0' >> .colorful.awk 60 | echo ' }' >> .colorful.awk 61 | echo '}' >> .colorful.awk 62 | - cat .colorful.awk 63 | - | 64 | color_cabal_output () { 65 | awk -f $TOP/.colorful.awk 66 | } 67 | - echo text | color_cabal_output 68 | install: 69 | - ${CABAL} --version 70 | - echo "$(${HC} --version) [$(${HC} --print-project-git-commit-id 2> /dev/null || echo '?')]" 71 | - TEST=--enable-tests 72 | - BENCH=--enable-benchmarks 73 | - HEADHACKAGE=false 74 | - rm -f $CABALHOME/config 75 | - | 76 | echo "verbose: normal +nowrap +markoutput" >> $CABALHOME/config 77 | echo "remote-build-reporting: anonymous" >> $CABALHOME/config 78 | echo "write-ghc-environment-files: always" >> $CABALHOME/config 79 | echo "remote-repo-cache: $CABALHOME/packages" >> $CABALHOME/config 80 | echo "logs-dir: $CABALHOME/logs" >> $CABALHOME/config 81 | echo "world-file: $CABALHOME/world" >> $CABALHOME/config 82 | echo "extra-prog-path: $CABALHOME/bin" >> $CABALHOME/config 83 | echo "symlink-bindir: $CABALHOME/bin" >> $CABALHOME/config 84 | echo "installdir: $CABALHOME/bin" >> $CABALHOME/config 85 | echo "build-summary: $CABALHOME/logs/build.log" >> $CABALHOME/config 86 | echo "store-dir: $CABALHOME/store" >> $CABALHOME/config 87 | echo "install-dirs user" >> $CABALHOME/config 88 | echo " prefix: $CABALHOME" >> $CABALHOME/config 89 | echo "repository hackage.haskell.org" >> $CABALHOME/config 90 | echo " url: http://hackage.haskell.org/" >> $CABALHOME/config 91 | echo " secure: True" >> $CABALHOME/config 92 | echo " key-threshold: 3" >> $CABALHOME/config 93 | echo " root-keys:" >> $CABALHOME/config 94 | echo " fe331502606802feac15e514d9b9ea83fee8b6ffef71335479a2e68d84adc6b0" >> $CABALHOME/config 95 | echo " 1ea9ba32c526d1cc91ab5e5bd364ec5e9e8cb67179a471872f6e26f0ae773d42" >> $CABALHOME/config 96 | echo " 2c6c3627bd6c982990239487f1abd02e08a02e6cf16edb105a8012d444d870c3" >> $CABALHOME/config 97 | echo " 0a5c7ea47cd1b15f01f5f51a33adda7e655bc0f0b0615baa8e271f4c3351e21d" >> $CABALHOME/config 98 | echo " 51f0161b906011b52c6613376b1ae937670da69322113a246a09f807c62f6921" >> $CABALHOME/config 99 | - | 100 | echo "program-default-options" >> $CABALHOME/config 101 | echo " ghc-options: $GHCJOBS +RTS -M6G -RTS" >> $CABALHOME/config 102 | - cat $CABALHOME/config 103 | - rm -fv cabal.project cabal.project.local cabal.project.freeze 104 | - travis_retry ${CABAL} v2-update -v 105 | # Generate cabal.project 106 | - rm -rf cabal.project cabal.project.local cabal.project.freeze 107 | - touch cabal.project 108 | - | 109 | echo "packages: ." >> cabal.project 110 | - "for pkg in $($HCPKG list --simple-output); do echo $pkg | sed 's/-[^-]*$//' | (grep -vE -- '^(hasql-queue)$' || true) | sed 's/^/constraints: /' | sed 's/$/ installed/' >> cabal.project.local; done" 111 | - cat cabal.project || true 112 | - cat cabal.project.local || true 113 | - if [ -f "./configure.ac" ]; then (cd "." && autoreconf -i); fi 114 | - ${CABAL} v2-freeze $WITHCOMPILER ${TEST} ${BENCH} | color_cabal_output 115 | - "cat cabal.project.freeze | sed -E 's/^(constraints: *| *)//' | sed 's/any.//'" 116 | - rm cabal.project.freeze 117 | - ${CABAL} v2-build $WITHCOMPILER ${TEST} ${BENCH} --dep -j2 all | color_cabal_output 118 | - ${CABAL} v2-build $WITHCOMPILER --disable-tests --disable-benchmarks --dep -j2 all | color_cabal_output 119 | script: 120 | - DISTDIR=$(mktemp -d /tmp/dist-test.XXXX) 121 | # Packaging... 122 | - ${CABAL} v2-sdist all | color_cabal_output 123 | # Unpacking... 124 | - mv dist-newstyle/sdist/*.tar.gz ${DISTDIR}/ 125 | - cd ${DISTDIR} || false 126 | - find . -maxdepth 1 -type f -name '*.tar.gz' -exec tar -xvf '{}' \; 127 | - find . -maxdepth 1 -type f -name '*.tar.gz' -exec rm '{}' \; 128 | - PKGDIR_hasql_queue="$(find . -maxdepth 1 -type d -regex '.*/hasql-queue-[0-9.]*')" 129 | # Generate cabal.project 130 | - rm -rf cabal.project cabal.project.local cabal.project.freeze 131 | - touch cabal.project 132 | - | 133 | echo "packages: ${PKGDIR_hasql_queue}" >> cabal.project 134 | - "for pkg in $($HCPKG list --simple-output); do echo $pkg | sed 's/-[^-]*$//' | (grep -vE -- '^(hasql-queue)$' || true) | sed 's/^/constraints: /' | sed 's/$/ installed/' >> cabal.project.local; done" 135 | - cat cabal.project || true 136 | - cat cabal.project.local || true 137 | # Building... 138 | # this builds all libraries and executables (without tests/benchmarks) 139 | - ${CABAL} v2-build $WITHCOMPILER --disable-tests --disable-benchmarks all | color_cabal_output 140 | # Building with tests and benchmarks... 141 | # build & run tests, build benchmarks 142 | - ${CABAL} v2-build $WITHCOMPILER ${TEST} ${BENCH} all | color_cabal_output 143 | # Testing... 144 | - ${CABAL} v2-test $WITHCOMPILER ${TEST} ${BENCH} all | color_cabal_output 145 | # cabal check... 146 | - (cd ${PKGDIR_hasql_queue} && ${CABAL} -vnormal check) 147 | # haddock... 148 | - ${CABAL} v2-haddock $WITHCOMPILER --with-haddock $HADDOCK ${TEST} ${BENCH} all | color_cabal_output 149 | # Building without installed constraints for packages in global-db... 150 | - rm -f cabal.project.local 151 | - ${CABAL} v2-build $WITHCOMPILER --disable-tests --disable-benchmarks all | color_cabal_output 152 | 153 | # REGENDATA ("0.9.20191213",["cabal.project","--output",".travis.yml"]) 154 | # EOF 155 | -------------------------------------------------------------------------------- /CHANGELOG.md: -------------------------------------------------------------------------------- 1 | Changelog for hasql-queue 2 | - 1.2.0.2 3 | - Better partial index #104 4 | 5 | - 1.2.0.1 6 | - Update README.md 7 | 8 | -1.2.0.0 9 | - Escape notification channel #100 10 | - Remove dequeued count from benchmarks #99 11 | - Document new api #98 12 | - Create teardown for migration #96 13 | - Extend API to use schemas #95 14 | - Add `delete` to the `AtLeastOnce` APIs #92 15 | - Change `failed` function to `failures` and return all `PayloadIds` #91 16 | - Reorg API #89 17 | - Use a compound index to make the partial index work #86 18 | - Specialize single element dequeue #84 19 | - Delete instead of changing the state to dequeued #82 20 | - 21 | 22 | 23 | - 1.0.1.1 24 | - Fixed cabal meta data and copyright 25 | 26 | - 1.0.1 27 | - First release! 28 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Copyright Author Jonathan Fischoff (c) 2020 2 | 3 | All rights reserved. 4 | 5 | Redistribution and use in source and binary forms, with or without 6 | modification, are permitted provided that the following conditions are met: 7 | 8 | * Redistributions of source code must retain the above copyright 9 | notice, this list of conditions and the following disclaimer. 10 | 11 | * Redistributions in binary form must reproduce the above 12 | copyright notice, this list of conditions and the following 13 | disclaimer in the documentation and/or other materials provided 14 | with the distribution. 15 | 16 | * Neither the name of Author name here nor the names of other 17 | contributors may be used to endorse or promote products derived 18 | from this software without specific prior written permission. 19 | 20 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 21 | "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 22 | LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 23 | A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 24 | OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 25 | SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 26 | LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 27 | DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 28 | THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 29 | (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 30 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 31 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | [![Travis CI Status](https://travis-ci.org/jfischoff/hasql-queue.svg?branch=master)](http://travis-ci.org/jfischoff/hasql-queue) 2 | 3 | # hasql-queue 4 | 5 | This module utilizes PostgreSQL to implement a durable queue for efficently processing payloads. 6 | 7 | Typically a producer would enqueue a new payload as part of larger database transaction 8 | 9 | ```haskell 10 | createAccount userRecord = transaction Serializable Write $ do 11 | createUser userRecord 12 | enqueue "queue_channel" emailEncoder [makeVerificationEmail userRecord] 13 | ``` 14 | 15 | In another thread or process the consumer would drain the queue. 16 | 17 | ```haskell 18 | -- Wait for a single new record and try to send the email 5 times for giving 19 | -- up and marking the payload as failed. 20 | forever $ withDequeue "queue_channel" conn emailDecoder 5 1 $ 21 | mapM_ sendEmail 22 | ``` 23 | -------------------------------------------------------------------------------- /Setup.hs: -------------------------------------------------------------------------------- 1 | import Distribution.Simple 2 | main = defaultMain 3 | -------------------------------------------------------------------------------- /Vagrantfile: -------------------------------------------------------------------------------- 1 | # -*- mode: ruby -*- 2 | # vi: set ft=ruby : 3 | 4 | # All Vagrant configuration is done below. The "2" in Vagrant.configure 5 | # configures the configuration version (we support older styles for 6 | # backwards compatibility). Please don't change it unless you know what 7 | # you're doing. 8 | Vagrant.configure("2") do |config| 9 | # The most common configuration options are documented and commented below. 10 | # For a complete reference, please see the online documentation at 11 | # https://docs.vagrantup.com. 12 | 13 | # Every Vagrant development environment requires a box. You can search for 14 | # boxes at https://vagrantcloud.com/search. 15 | config.vm.box = "bento/ubuntu-20.04" 16 | 17 | # Disable automatic box update checking. If you disable this, then 18 | # boxes will only be checked for updates when the user runs 19 | # `vagrant box outdated`. This is not recommended. 20 | # config.vm.box_check_update = false 21 | 22 | # Create a forwarded port mapping which allows access to a specific port 23 | # within the machine from a port on the host machine. In the example below, 24 | # accessing "localhost:8080" will access port 80 on the guest machine. 25 | # NOTE: This will enable public access to the opened port 26 | # config.vm.network "forwarded_port", guest: 80, host: 8080 27 | 28 | # Create a forwarded port mapping which allows access to a specific port 29 | # within the machine from a port on the host machine and only allow access 30 | # via 127.0.0.1 to disable public access 31 | # config.vm.network "forwarded_port", guest: 80, host: 8080, host_ip: "127.0.0.1" 32 | 33 | # Create a private network, which allows host-only access to the machine 34 | # using a specific IP. 35 | # config.vm.network "private_network", ip: "192.168.33.10" 36 | 37 | # Create a public network, which generally matched to bridged network. 38 | # Bridged networks make the machine appear as another physical device on 39 | # your network. 40 | # config.vm.network "public_network" 41 | 42 | # Share an additional folder to the guest VM. The first argument is 43 | # the path on the host to the actual folder. The second argument is 44 | # the path on the guest to mount the folder. And the optional third 45 | # argument is a set of non-required options. 46 | # config.vm.synced_folder "../data", "/vagrant_data" 47 | 48 | # Provider-specific configuration so you can fine-tune various 49 | # backing providers for Vagrant. These expose provider-specific options. 50 | # Example for VirtualBox: 51 | # 52 | config.vm.provider "virtualbox" do |vb| 53 | # # Display the VirtualBox GUI when booting the machine 54 | # vb.gui = true 55 | # 56 | # # Customize the amount of memory on the VM: 57 | vb.cpus = 8 58 | vb.memory = "4096" 59 | end 60 | # 61 | # View the documentation for the provider you are using for more 62 | # information on available options. 63 | 64 | # Enable provisioning with a shell script. Additional provisioners such as 65 | # Puppet, Chef, Ansible, Salt, and Docker are also available. Please see the 66 | # documentation for more information about their specific syntax and use. 67 | config.vm.provision "shell", inline: <<-SHELL 68 | sudo apt update 69 | sudo apt install -y linux-tools-common linux-tools-generic linux-tools-5.4.0-29-generic 70 | 71 | sudo sh -c 'echo "deb http://apt.postgresql.org/pub/repos/apt $(lsb_release -cs)-pgdg main" > /etc/apt/sources.list.d/pgdg.list' 72 | wget --quiet -O - https://www.postgresql.org/media/keys/ACCC4CF8.asc | sudo apt-key add - 73 | sudo apt-get update 74 | sudo apt-get install -y postgresql-12 libpq-dev postgresql-client-12 75 | 76 | curl -sSL https://get.haskellstack.org/ | sh 77 | 78 | echo "export PATH=$PATH:/usr/lib/postgresql/12/bin/" >> /home/ubuntu/.bashrc 79 | 80 | sudo sysctl -w kernel.perf_event_paranoid=-1 81 | 82 | SHELL 83 | end 84 | -------------------------------------------------------------------------------- /benchmarks/Main.hs: -------------------------------------------------------------------------------- 1 | module Main where 2 | import System.Environment 3 | import Hasql.Queue.Migrate 4 | import Data.IORef 5 | import Control.Exception 6 | import Crypto.Hash.SHA1 (hash) 7 | import qualified Data.ByteString.Base64.URL as Base64 8 | import qualified Data.ByteString.Char8 as BSC 9 | import Data.Pool 10 | import Database.Postgres.Temp 11 | import Control.Concurrent 12 | import Control.Monad (replicateM, forever, void) 13 | import Hasql.Session 14 | import Hasql.Connection 15 | import Data.Function 16 | import qualified Hasql.Encoders as E 17 | import qualified Hasql.Decoders as D 18 | import Hasql.Statement 19 | import qualified Hasql.Queue.Internal as I 20 | import qualified Hasql.Queue.Low.AtLeastOnce as IO 21 | import qualified Hasql.Queue.High.ExactlyOnce as S 22 | import Data.Int 23 | 24 | -- TODO need to make sure the number of producers and consumers does not go over the number of connections 25 | 26 | withConn :: DB -> (Connection -> IO a) -> IO a 27 | withConn db f = do 28 | let connStr = toConnectionString db 29 | bracket (either (throwIO . userError . show) pure =<< acquire connStr) release f 30 | 31 | durableConfig :: Int -> Config 32 | durableConfig microseconds = defaultConfig <> mempty 33 | { postgresConfigFile = 34 | [ ("wal_level", "replica") 35 | , ("archive_mode", "on") 36 | , ("max_wal_senders", "2") 37 | , ("fsync", "on") 38 | , ("synchronous_commit", "on") 39 | , ("commit_delay", show microseconds) 40 | ] 41 | } 42 | 43 | withSetup :: Int -> Bool -> (Pool Connection -> IO ()) -> IO () 44 | withSetup microseconds durable f = do 45 | -- Helper to throw exceptions 46 | let throwE x = either throwIO pure =<< x 47 | 48 | throwE $ withDbCache $ \dbCache -> do 49 | --let combinedConfig = autoExplainConfig 15 <> cacheConfig dbCache 50 | let combinedConfig = (if durable then durableConfig microseconds else defaultConfig) <> cacheConfig dbCache 51 | migratedConfig <- throwE $ cacheAction (("~/.tmp-postgres/" <>) . BSC.unpack . Base64.encode . hash 52 | $ BSC.pack $ migrationQueryString "int4") 53 | (flip withConn $ flip migrate "int4") 54 | combinedConfig 55 | withConfig migratedConfig $ \db -> do 56 | print $ toConnectionString db 57 | 58 | f =<< createPool 59 | (either (throwIO . userError . show) pure =<< acquire (toConnectionString db) 60 | ) release 2 60 49 61 | 62 | payload :: Int32 63 | payload = 1 64 | 65 | main :: IO () 66 | main = do 67 | [producerCount, consumerCount, time, initialEnqueueCount, enqueueBatchCount, dequeueBatchCount, notify, durable, microseconds] 68 | <- map read <$> getArgs 69 | -- create a temporary database 70 | enqueueCounter <- newIORef (0 :: Int) 71 | dequeueCounter <- newIORef (0 :: Int) 72 | 73 | let printCounters = do 74 | finalEnqueueCount <- readIORef enqueueCounter 75 | finalDequeueCount <- readIORef dequeueCounter 76 | putStrLn $ "Time " <> show time <> " secs" 77 | putStrLn $ "Enqueue Count: " <> show finalEnqueueCount 78 | putStrLn $ "Dequeue Count: " <> show finalDequeueCount 79 | 80 | flip finally printCounters $ withSetup microseconds (1 == durable) $ \pool -> do 81 | -- enqueue the enqueueCount + dequeueCount 82 | let enqueueAction = if notify > 0 83 | then void $ withResource pool $ \conn -> IO.enqueue "channel" conn E.int4 (replicate enqueueBatchCount payload) 84 | else void $ withResource pool $ \conn -> I.runThrow (S.enqueue E.int4 (replicate enqueueBatchCount payload)) conn 85 | dequeueAction = if notify > 0 86 | then void $ withResource pool $ \conn -> 87 | IO.withDequeue "channel" conn D.int4 1 dequeueBatchCount (const $ pure ()) 88 | else void $ withResource pool $ \conn -> fix $ \next -> 89 | I.runThrow (S.dequeue D.int4 dequeueBatchCount) conn >>= \case 90 | [] -> next 91 | _ -> pure () 92 | 93 | let enqueueInsertSql = "INSERT INTO payloads (attempts, value) SELECT 0, g.value FROM generate_series(1, $1) AS g (value)" 94 | enqueueInsertStatement = 95 | statement (fromIntegral initialEnqueueCount) $ Statement enqueueInsertSql (E.param $ E.nonNullable E.int4) D.noResult False 96 | 97 | _ <- withResource pool $ run enqueueInsertStatement 98 | 99 | withResource pool $ \conn -> void $ run (sql "VACUUM FULL ANALYZE") conn 100 | putStrLn "Finished VACUUM FULL ANALYZE" 101 | 102 | let enqueueLoop = forever $ do 103 | enqueueAction 104 | atomicModifyIORef' enqueueCounter $ \x -> (x+1, ()) 105 | 106 | dequeueLoop = forever $ do 107 | dequeueAction 108 | atomicModifyIORef' dequeueCounter $ \x -> (x+1, ()) 109 | 110 | -- Need better exception behavior ... idk ... I'll deal with this later 111 | _enqueueThreads <- replicateM producerCount $ forkIO enqueueLoop 112 | _dequeueThreads <- replicateM consumerCount $ forkIO dequeueLoop 113 | 114 | threadDelay $ time * 1000000 115 | throwIO $ userError "Finished" 116 | -------------------------------------------------------------------------------- /cabal.project: -------------------------------------------------------------------------------- 1 | packages: ./ 2 | -------------------------------------------------------------------------------- /developer_notes.md: -------------------------------------------------------------------------------- 1 | # 6/13/20 2 | - I guess I am ready to release it now 3 | 4 | # 2/2/20 5 | - Not sure why stack is not working 6 | 7 | # 1/21/20 8 | - According to the flamecharts preparing the statements takes a third of the time. Also setting the schema takes some time. 9 | - Going to remove the schema setting first to simplify the code. Then I'll use prepared statements. 10 | - The reason I need the schema is for a prefix for the notify 11 | - One option is to remove it and the figure out how to add it back. 12 | - Yeah I'm going to do that. I would like to keep things simple as I improve perf 13 | - I think it needs to make a record of function based on the string 14 | 15 | # 1/13/20 16 | - Initial benchmark setup ... not sure what else to do with it at the moment 17 | - There are two types of slow queries 18 | - Update on public.payloads (cost=4.01..12.04 rows=1 width=102) (actual time=21.069..21.070 rows=1 loops=1) 19 | Output: payloads.id, payloads.value, payloads.state, payloads.attempts, payloads.created_at, payloads.modified_at 20 | Buffers: shared hit=69 21 | -> Nested Loop (cost=4.01..12.04 rows=1 width=102) (actual time=2.849..2.850 rows=1 loops=1) 22 | Output: payloads.id, payloads.value, payloads.attempts, 'dequeued'::state_t, payloads.created_at, payloads.modified_at, payloads.ctid, "ANY_subquery".* 23 | Inner Unique: true 24 | Buffers: shared hit=43 25 | -> HashAggregate (cost=3.73..3.74 rows=1 width=40) (actual time=2.835..2.836 rows=1 loops=1) 26 | Output: "ANY_subquery".*, "ANY_subquery".id 27 | Group Key: "ANY_subquery".id 28 | Buffers: shared hit=40 29 | -> Subquery Scan on "ANY_subquery" (cost=0.28..3.72 rows=1 width=40) (actual time=2.821..2.823 rows=1 loops=1) 30 | Output: "ANY_subquery".*, "ANY_subquery".id 31 | Buffers: shared hit=40 32 | -> Limit (cost=0.28..3.71 rows=1 width=22) (actual time=2.799..2.800 rows=1 loops=1) 33 | Output: payloads_1.id, payloads_1.modified_at, payloads_1.ctid 34 | Buffers: shared hit=40 35 | -> LockRows (cost=0.28..161.58 rows=47 width=22) (actual time=2.798..2.798 rows=1 loops=1) 36 | Output: payloads_1.id, payloads_1.modified_at, payloads_1.ctid 37 | Buffers: shared hit=40 38 | -> Index Scan using active_modified_at_idx on public.payloads payloads_1 (cost=0.28..161.11 rows=47 width=22) (actual time=1.262..1.296 rows=21 loops=1) 39 | Output: payloads_1.id, payloads_1.modified_at, payloads_1.ctid 40 | Filter: (payloads_1.state = 'enqueued'::state_t) 41 | Buffers: shared hit=19 42 | -> Index Scan using payloads_pkey on public.payloads (cost=0.29..8.30 rows=1 width=66) (actual time=0.011..0.011 rows=1 loops=1) 43 | Output: payloads.id, payloads.value, payloads.attempts, payloads.created_at, payloads.modified_at, payloads.ctid 44 | Index Cond: (payloads.id = "ANY_subquery".id) 45 | Buffers: shared hit=3 46 | Trigger payloads_modified: time=0.299 calls=1 47 | 48 | Where the time is attributed to nothing in particular and where it is attributed to the lock 49 | 50 | - Update on public.payloads (cost=6.54..14.56 rows=1 width=80) (actual time=23.427..23.428 rows=1 loops=1) 51 | Output: payloads.id, payloads.value, payloads.state, payloads.attempts, payloads.created_at, payloads.modified_at 52 | Buffers: shared hit=371 53 | -> Nested Loop (cost=6.54..14.56 rows=1 width=80) (actual time=23.388..23.389 rows=1 loops=1) 54 | Output: payloads.id, payloads.value, payloads.attempts, 'dequeued'::state_t, payloads.created_at, payloads.modified_at, payloads.ctid, "ANY_subquery".* 55 | Inner Unique: true 56 | Buffers: shared hit=363 57 | -> HashAggregate (cost=6.25..6.26 rows=1 width=40) (actual time=23.378..23.378 rows=1 loops=1) 58 | Output: "ANY_subquery".*, "ANY_subquery".id 59 | Group Key: "ANY_subquery".id 60 | Buffers: shared hit=360 61 | -> Subquery Scan on "ANY_subquery" (cost=0.28..6.25 rows=1 width=40) (actual time=23.374..23.375 rows=1 loops=1) 62 | Output: "ANY_subquery".*, "ANY_subquery".id 63 | Buffers: shared hit=360 64 | -> Limit (cost=0.28..6.24 rows=1 width=22) (actual time=23.370..23.370 rows=1 loops=1) 65 | Output: payloads_1.id, payloads_1.modified_at, payloads_1.ctid 66 | Buffers: shared hit=360 67 | -> LockRows (cost=0.28..6.24 rows=1 width=22) (actual time=23.369..23.369 rows=1 loops=1) 68 | Output: payloads_1.id, payloads_1.modified_at, payloads_1.ctid 69 | Buffers: shared hit=360 70 | -> Index Scan using active_modified_at_idx on public.payloads payloads_1 (cost=0.28..6.23 rows=1 width=22) (actual time=0.068..0.191 rows=86 loops=1) 71 | Output: payloads_1.id, payloads_1.modified_at, payloads_1.ctid 72 | Filter: (payloads_1.state = 'enqueued'::state_t) 73 | Buffers: shared hit=141 74 | -> Index Scan using payloads_pkey on public.payloads (cost=0.29..8.30 rows=1 width=44) (actual time=0.007..0.007 rows=1 loops=1) 75 | Output: payloads.id, payloads.value, payloads.attempts, payloads.created_at, payloads.modified_at, payloads.ctid 76 | Index Cond: (payloads.id = "ANY_subquery".id) 77 | Buffers: shared hit=3 78 | Trigger payloads_modified: time=0.013 calls=1 79 | 80 | # 1/12/20 81 | 82 | - I wish there were actual benchmarks already setup. 83 | - I want to see if the `pg_wait_sampling` provides any insight. 84 | - Not really about to see any perf issues just looking at the tests. 85 | - I need to make actual benchmarks to stress the system. 86 | - Some thoughts about benchmarks 87 | - I need to warm the caches when setting up 88 | - I should run vacuum analyze full and reindex before running. 89 | - I would like to be able to `pg_wait_sampling` as well. 90 | - I feel like I am going to want to be able replay what happens when there is contention. 91 | - I should map out what happens as the consumers and producers numbers are adjusted. 92 | - I don't think I should write a criterion benchmark for this. I think I should have a 93 | exe that that could how many queues/enqueues in some amount of time. 94 | - It should take args for 95 | - number of producers 96 | - number of consumers 97 | - number of time 98 | - initial number dequeued payloads 99 | - initial number of enqueued payloads 100 | # Decemeber 27th 2019 101 | 102 | The result of removing a postgres instance. 103 | 104 | The old times are on the left and the new (removed instance) are on the right. 105 | 106 | 1.6324 1.4624 R 107 | 1.6689 1.6946 L 108 | 1.7416 1.3665 R 109 | 1.8576 1.5354 R 110 | 1.8035 1.5383 R 111 | 1.7434 1.4234 R 112 | 1.9321 1.4897 R 113 | 1.7444 1.4729 R 114 | 1.6803 1.4600 R 115 | 1.6598 1.6410 R 116 | 117 | Removing an instance wins. 118 | -------------------------------------------------------------------------------- /hasql-queue-tmp-db/Main.hs: -------------------------------------------------------------------------------- 1 | import Database.Postgres.Temp 2 | import Control.Concurrent 3 | import Hasql.Queue.Migrate 4 | import qualified Data.ByteString.Char8 as BSC 5 | import qualified Data.ByteString.Base64.URL as Base64 6 | import Control.Exception 7 | import Control.Monad 8 | import Crypto.Hash.SHA1 (hash) 9 | import Hasql.Connection 10 | 11 | withConn :: DB -> (Connection -> IO a) -> IO a 12 | withConn db f = do 13 | let connStr = toConnectionString db 14 | bracket (either (throwIO . userError . show) pure =<< acquire connStr) release f 15 | 16 | 17 | main :: IO () 18 | main = either throwIO pure <=< withDbCache $ \dbCache -> do 19 | migratedConfig <- either throwIO pure =<< 20 | cacheAction 21 | (("~/.tmp-postgres/" <>) . BSC.unpack . Base64.encode . hash 22 | $ BSC.pack $ migrationQueryString "int4") 23 | (flip withConn $ flip migrate "int4") 24 | (autoExplainConfig 1 <> cacheConfig dbCache) 25 | withConfig migratedConfig $ \db -> do 26 | print $ toConnectionString db 27 | forever $ threadDelay 100000000 28 | -------------------------------------------------------------------------------- /hasql-queue.cabal: -------------------------------------------------------------------------------- 1 | cabal-version: 1.12 2 | 3 | -- This file has been generated from package.yaml by hpack version 0.31.2. 4 | -- 5 | -- see: https://github.com/sol/hpack 6 | -- 7 | -- hash: 956ae93525f9dafcc0c9c8149cd2bbc8cfcfe4e63310adec92ce40f995e4cbf4 8 | 9 | name: hasql-queue 10 | version: 1.2.0.1 11 | synopsis: A PostgreSQL backed queue 12 | description: A PostgreSQL backed queue. Please see README.md 13 | category: Web 14 | homepage: https://github.com/jfischoff/hasql-queue#readme 15 | bug-reports: https://github.com/jfischoff/hasql-queue/issues 16 | author: Jonathan Fischoff 17 | maintainer: jonathangfischoff@gmail.com 18 | copyright: 2020 Jonathan Fischoff 19 | license: BSD3 20 | license-file: LICENSE 21 | tested-with: GHC ==8.8.1 22 | build-type: Simple 23 | extra-source-files: 24 | README.md 25 | CHANGELOG.md 26 | 27 | source-repository head 28 | type: git 29 | location: https://github.com/jfischoff/hasql-queue 30 | 31 | library 32 | exposed-modules: 33 | Hasql.Queue.High.AtLeastOnce 34 | Hasql.Queue.High.AtMostOnce 35 | Hasql.Queue.High.ExactlyOnce 36 | Hasql.Queue.Internal 37 | Hasql.Queue.Low.AtLeastOnce 38 | Hasql.Queue.Low.AtMostOnce 39 | Hasql.Queue.Low.ExactlyOnce 40 | Hasql.Queue.Migrate 41 | other-modules: 42 | Paths_hasql_queue 43 | hs-source-dirs: 44 | src 45 | default-extensions: OverloadedStrings LambdaCase RecordWildCards TupleSections GeneralizedNewtypeDeriving QuasiQuotes ScopedTypeVariables TypeApplications AllowAmbiguousTypes 46 | ghc-options: -Wall -Wno-unused-do-bind -Wno-unused-foralls 47 | build-depends: 48 | aeson 49 | , base >=4.7 && <5 50 | , bytestring 51 | , exceptions 52 | , hasql 53 | , here 54 | , monad-control 55 | , postgresql-libpq 56 | , postgresql-libpq-notify >=0.2.0.0 57 | , random 58 | , stm 59 | , text 60 | , time 61 | , transformers 62 | default-language: Haskell2010 63 | 64 | executable benchmark 65 | main-is: Main.hs 66 | other-modules: 67 | Paths_hasql_queue 68 | hs-source-dirs: 69 | benchmarks 70 | default-extensions: OverloadedStrings LambdaCase RecordWildCards TupleSections GeneralizedNewtypeDeriving QuasiQuotes ScopedTypeVariables TypeApplications AllowAmbiguousTypes 71 | ghc-options: -Wall -Wno-unused-do-bind -Wno-unused-foralls -O2 -threaded -rtsopts -with-rtsopts=-N 72 | build-depends: 73 | aeson 74 | , async 75 | , base >=4.7 && <5 76 | , base64-bytestring 77 | , bytestring 78 | , cryptohash-sha1 79 | , exceptions 80 | , hasql 81 | , hasql-queue 82 | , here 83 | , monad-control 84 | , postgresql-libpq 85 | , postgresql-libpq-notify >=0.2.0.0 86 | , random 87 | , resource-pool 88 | , stm 89 | , text 90 | , time 91 | , tmp-postgres 92 | , transformers 93 | default-language: Haskell2010 94 | 95 | executable hasql-queue-tmp-db 96 | main-is: Main.hs 97 | other-modules: 98 | Paths_hasql_queue 99 | hs-source-dirs: 100 | hasql-queue-tmp-db 101 | default-extensions: OverloadedStrings LambdaCase RecordWildCards TupleSections GeneralizedNewtypeDeriving QuasiQuotes ScopedTypeVariables TypeApplications AllowAmbiguousTypes 102 | ghc-options: -Wall -Wno-unused-do-bind -Wno-unused-foralls -O2 -threaded -rtsopts -with-rtsopts=-N -g2 103 | build-depends: 104 | aeson 105 | , async 106 | , base >=4.7 && <5 107 | , base64-bytestring 108 | , bytestring 109 | , cryptohash-sha1 110 | , exceptions 111 | , hasql 112 | , hasql-queue 113 | , here 114 | , monad-control 115 | , postgresql-libpq 116 | , postgresql-libpq-notify >=0.2.0.0 117 | , random 118 | , stm 119 | , text 120 | , time 121 | , tmp-postgres 122 | , transformers 123 | default-language: Haskell2010 124 | 125 | test-suite unit-tests 126 | type: exitcode-stdio-1.0 127 | main-is: Main.hs 128 | other-modules: 129 | Hasql.Queue.High.AtLeastOnceSpec 130 | Hasql.Queue.High.AtMostOnceSpec 131 | Hasql.Queue.High.ExactlyOnceSpec 132 | Hasql.Queue.Low.AtLeastOnceSpec 133 | Hasql.Queue.Low.AtMostOnceSpec 134 | Hasql.Queue.Low.ExactlyOnceSpec 135 | Hasql.Queue.MigrateSpec 136 | Hasql.Queue.TestUtils 137 | Paths_hasql_queue 138 | hs-source-dirs: 139 | test 140 | default-extensions: OverloadedStrings LambdaCase RecordWildCards TupleSections GeneralizedNewtypeDeriving QuasiQuotes ScopedTypeVariables TypeApplications AllowAmbiguousTypes 141 | ghc-options: -Wall -Wno-unused-do-bind -Wno-unused-foralls -O2 -threaded -rtsopts -with-rtsopts=-N 142 | build-depends: 143 | aeson 144 | , async 145 | , base >=4.7 && <5 146 | , base64-bytestring 147 | , bytestring 148 | , cryptohash-sha1 149 | , exceptions 150 | , hasql 151 | , hasql-queue 152 | , here 153 | , hspec 154 | , hspec-core 155 | , hspec-expectations-lifted 156 | , monad-control 157 | , postgresql-libpq 158 | , postgresql-libpq-notify >=0.2.0.0 159 | , random 160 | , resource-pool 161 | , split 162 | , stm 163 | , text 164 | , time 165 | , tmp-postgres 166 | , transformers 167 | default-language: Haskell2010 168 | -------------------------------------------------------------------------------- /package.yaml: -------------------------------------------------------------------------------- 1 | name: hasql-queue 2 | version: '1.2.0.2' 3 | synopsis: A PostgreSQL backed queue 4 | description: A PostgreSQL backed queue. Please see README.md 5 | category: Web 6 | author: Jonathan Fischoff 7 | maintainer: jonathangfischoff@gmail.com 8 | copyright: 2020 Jonathan Fischoff 9 | license: BSD3 10 | github: jfischoff/hasql-queue 11 | ghc-options: 12 | - -Wall 13 | - -Wno-unused-do-bind 14 | - -Wno-unused-foralls 15 | extra-source-files: 16 | - README.md 17 | - CHANGELOG.md 18 | 19 | 20 | tested-with: GHC ==8.8.1 21 | 22 | default-extensions: 23 | - OverloadedStrings 24 | - LambdaCase 25 | - RecordWildCards 26 | - TupleSections 27 | - GeneralizedNewtypeDeriving 28 | - QuasiQuotes 29 | - ScopedTypeVariables 30 | - TypeApplications 31 | - AllowAmbiguousTypes 32 | 33 | dependencies: 34 | - base >=4.7 && <5 35 | - time 36 | - transformers 37 | - random 38 | - text 39 | - monad-control 40 | - exceptions 41 | - hasql 42 | - postgresql-libpq 43 | - postgresql-libpq-notify >= 0.2.0.0 44 | - aeson 45 | - bytestring 46 | - stm 47 | - here 48 | 49 | library: 50 | source-dirs: src 51 | 52 | tests: 53 | unit-tests: 54 | main: Main.hs 55 | source-dirs: test 56 | ghc-options: 57 | - -O2 58 | - -threaded 59 | - -rtsopts 60 | - -with-rtsopts=-N 61 | dependencies: 62 | - base64-bytestring 63 | - cryptohash-sha1 64 | - hasql-queue 65 | - hspec 66 | - hspec-core 67 | - hspec-expectations-lifted 68 | - async 69 | - split 70 | - tmp-postgres 71 | - resource-pool 72 | 73 | executables: 74 | benchmark: 75 | main: Main.hs 76 | source-dirs: benchmarks 77 | ghc-options: 78 | - -O2 79 | - -threaded 80 | - -rtsopts 81 | - -with-rtsopts=-N 82 | dependencies: 83 | - base64-bytestring 84 | - cryptohash-sha1 85 | - hasql-queue 86 | - async 87 | - tmp-postgres 88 | - resource-pool 89 | 90 | hasql-queue-tmp-db: 91 | main: Main.hs 92 | source-dirs: hasql-queue-tmp-db 93 | ghc-options: 94 | - -O2 95 | - -threaded 96 | - -rtsopts 97 | - -with-rtsopts=-N 98 | - -g2 99 | dependencies: 100 | - base64-bytestring 101 | - cryptohash-sha1 102 | - hasql-queue 103 | - async 104 | - tmp-postgres 105 | -------------------------------------------------------------------------------- /s/ghcid: -------------------------------------------------------------------------------- 1 | ghcid -c 'stack ghci --test hasql-queue:lib hasql-queue:test:unit-tests' --test=:main --restart=package.yaml --restart=stack.yaml 2 | -------------------------------------------------------------------------------- /s/ghcid-bench: -------------------------------------------------------------------------------- 1 | ghcid -c 'stack ghci hasql-queue:lib hasql-queue:exe:benchmark' --restart=package.yaml --restart=stack.yaml 2 | -------------------------------------------------------------------------------- /s/test-time: -------------------------------------------------------------------------------- 1 | stack test 2>&1 | grep Fini 2 | -------------------------------------------------------------------------------- /src/Hasql/Queue/High/AtLeastOnce.hs: -------------------------------------------------------------------------------- 1 | module Hasql.Queue.High.AtLeastOnce where 2 | import qualified Hasql.Queue.High.ExactlyOnce as H 3 | import qualified Hasql.Queue.Internal as I 4 | import Hasql.Connection 5 | import qualified Hasql.Encoders as E 6 | import qualified Hasql.Decoders as D 7 | import Control.Exception 8 | import Data.Function 9 | 10 | {-|Enqueue a list of payloads. 11 | -} 12 | enqueue :: Connection 13 | -- ^ Connection 14 | -> E.Value a 15 | -- ^ Payload encoder 16 | -> [a] 17 | -- ^ List of payloads to enqueue 18 | -> IO () 19 | enqueue conn encoder xs = I.runThrow (H.enqueue encoder xs) conn 20 | 21 | {-| 22 | Wait for the next payload and process it. If the continuation throws an 23 | exception the payloads are put back in the queue. 'IOError' is caught 24 | and 'withDequeue' will retry up to the retry count. If 'withDequeue' fails 25 | after too many retries the final exception is rethrown. If individual payloads are 26 | are attempted more than the retry count they are set as "failed". See 'failures' 27 | to receive the list of failed payloads. 28 | 29 | If the queue is empty 'withDequeue' return 'Nothing'. If there are 30 | any entries 'withDequeue' will wrap the list in 'Just'. 31 | -} 32 | withDequeue :: Connection 33 | -- ^ Connection 34 | -> D.Value a 35 | -- ^ Payload decoder 36 | -> Int 37 | -- ^ Retry count 38 | -> Int 39 | -- ^ Element count 40 | -> ([a] -> IO b) 41 | -- ^ Continuation 42 | -> IO (Maybe b) 43 | withDequeue = withDequeueWith @IOError 44 | 45 | 46 | {-| 47 | Retrieve the payloads that have entered a failed state. See 'withDequeue' for how that 48 | occurs. The function returns a list of values and an id. The id is used the starting 49 | place for the next batch of values. If 'Nothing' is passed the list starts at the 50 | beginning. 51 | -} 52 | failures :: Connection 53 | -> D.Value a 54 | -- ^ Payload decoder 55 | -> Maybe I.PayloadId 56 | -- ^ Starting position of payloads. Pass 'Nothing' to 57 | -- start at the beginning 58 | -> Int 59 | -- ^ Count 60 | -> IO [(I.PayloadId, a)] 61 | failures conn decoder mPayload count = I.runThrow (I.failures decoder mPayload count) conn 62 | 63 | 64 | {-| 65 | Permantently remove a failed payload. 66 | -} 67 | delete :: Connection 68 | -> [I.PayloadId] 69 | -> IO () 70 | delete conn xs = I.runThrow (I.delete xs) conn 71 | 72 | {-| 73 | A more general configurable version of 'withDequeue'. Unlike 'withDequeue' one 74 | can specify the exception that causes a retry. Additionally event 75 | handlers can be specified to observe the internal behavior of the 76 | retry loop. 77 | -} 78 | withDequeueWith :: forall e a b 79 | . Exception e 80 | => Connection 81 | -- ^ Connection 82 | -> D.Value a 83 | -- ^ Payload decoder 84 | -> Int 85 | -- ^ Retry count 86 | -> Int 87 | -- ^ Element count 88 | -> ([a] -> IO b) 89 | -- ^ Continuation 90 | -> IO (Maybe b) 91 | withDequeueWith conn decoder retryCount count f = (fix $ \restart i -> do 92 | try (flip I.runThrow conn $ I.withDequeue decoder retryCount count f) >>= \case 93 | Right x -> pure x 94 | Left (e :: e) -> 95 | if i < retryCount then 96 | restart $ i + 1 97 | else 98 | throwIO e 99 | ) 0 100 | -------------------------------------------------------------------------------- /src/Hasql/Queue/High/AtMostOnce.hs: -------------------------------------------------------------------------------- 1 | module Hasql.Queue.High.AtMostOnce where 2 | import qualified Hasql.Queue.High.ExactlyOnce as H 3 | import qualified Hasql.Queue.Internal as I 4 | import Hasql.Connection 5 | import qualified Hasql.Encoders as E 6 | import qualified Hasql.Decoders as D 7 | 8 | {-|Enqueue a payload. 9 | -} 10 | enqueue :: Connection 11 | -- ^ Connection 12 | -> E.Value a 13 | -- ^ Payload encoder 14 | -> [a] 15 | -- ^ List of payloads to enqueue 16 | -> IO () 17 | enqueue conn encoder xs = I.runThrow (H.enqueue encoder xs) conn 18 | 19 | {-| 20 | Dequeue a list of payloads. 21 | -} 22 | dequeue :: Connection 23 | -- ^ Connection 24 | -> D.Value a 25 | -- ^ Payload decoder 26 | -> Int 27 | -- ^ Element count 28 | -> IO [a] 29 | dequeue conn theDecoder batchCount = I.runThrow (H.dequeue theDecoder batchCount) conn 30 | -------------------------------------------------------------------------------- /src/Hasql/Queue/High/ExactlyOnce.hs: -------------------------------------------------------------------------------- 1 | {-| 2 | A high throughput 'Session' based API for a PostgreSQL backed queue. 3 | -} 4 | module Hasql.Queue.High.ExactlyOnce 5 | ( enqueue 6 | , dequeue 7 | ) where 8 | import qualified Hasql.Encoders as E 9 | import qualified Hasql.Decoders as D 10 | import Hasql.Session 11 | import Data.Functor.Contravariant 12 | import Data.String.Here.Uninterpolated 13 | import Hasql.Statement 14 | 15 | 16 | 17 | {-|Enqueue a payload. 18 | -} 19 | enqueue :: E.Value a 20 | -- ^ Payload encoder 21 | -> [a] 22 | -- ^ List of payloads to enqueue 23 | -> Session () 24 | enqueue theEncoder = \case 25 | [] -> pure () 26 | [x] -> do 27 | let theQuery = 28 | [here| 29 | INSERT INTO payloads (attempts, value) 30 | VALUES (0, $1) 31 | |] 32 | 33 | encoder = E.param $ E.nonNullable theEncoder 34 | 35 | statement x $ Statement theQuery encoder D.noResult True 36 | 37 | xs -> do 38 | let theQuery = 39 | [here| 40 | INSERT INTO payloads (attempts, value) 41 | SELECT 0, * FROM unnest($1) 42 | |] 43 | 44 | encoder = E.param $ E.nonNullable $ E.foldableArray $ E.nonNullable theEncoder 45 | 46 | statement xs $ Statement theQuery encoder D.noResult True 47 | 48 | 49 | {-| 50 | Dequeue a list of payloads 51 | -} 52 | dequeue :: D.Value a 53 | -- ^ Payload decoder 54 | -> Int 55 | -- ^ Element count 56 | -> Session [a] 57 | dequeue valueDecoder count 58 | | count <= 0 = pure [] 59 | | otherwise = do 60 | let multipleQuery = [here| 61 | DELETE FROM payloads 62 | WHERE id in 63 | ( SELECT p1.id 64 | FROM payloads AS p1 65 | WHERE p1.state='enqueued' 66 | ORDER BY p1.modified_at ASC 67 | FOR UPDATE SKIP LOCKED 68 | LIMIT $1 69 | ) 70 | RETURNING value 71 | |] 72 | multipleEncoder = E.param $ E.nonNullable $ fromIntegral >$< E.int4 73 | 74 | singleQuery = [here| 75 | DELETE FROM payloads 76 | WHERE id = 77 | ( SELECT p1.id 78 | FROM payloads AS p1 79 | WHERE p1.state='enqueued' 80 | ORDER BY p1.modified_at ASC 81 | FOR UPDATE SKIP LOCKED 82 | LIMIT 1 83 | ) 84 | RETURNING value 85 | |] 86 | 87 | singleEncoder = mempty 88 | 89 | decoder = D.rowList $ D.column $ D.nonNullable $ valueDecoder 90 | 91 | theStatement = case count of 92 | 1 -> Statement singleQuery singleEncoder decoder True 93 | _ -> Statement multipleQuery multipleEncoder decoder True 94 | statement count theStatement 95 | -------------------------------------------------------------------------------- /src/Hasql/Queue/Internal.hs: -------------------------------------------------------------------------------- 1 | {-| 2 | Internal module. Changes to this modules are not reflected in the 3 | package version. 4 | -} 5 | module Hasql.Queue.Internal where 6 | import qualified Hasql.Encoders as E 7 | import qualified Hasql.Decoders as D 8 | import Hasql.Session 9 | import Database.PostgreSQL.LibPQ.Notify 10 | import Control.Monad (unless) 11 | import Data.Function(fix) 12 | import Hasql.Connection 13 | import Data.Int 14 | import Data.Functor.Contravariant 15 | import Data.String.Here.Uninterpolated 16 | import Hasql.Statement 17 | import Data.ByteString (ByteString) 18 | import Control.Exception 19 | import Data.Typeable 20 | import qualified Database.PostgreSQL.LibPQ as PQ 21 | import Data.Maybe 22 | import Control.Monad.IO.Class 23 | import Data.Text (Text) 24 | import qualified Data.Text.Encoding as TE 25 | 26 | -- | A 'Payload' can exist in three states in the queue, 'Enqueued', 27 | -- and 'Dequeued'. A 'Payload' starts in the 'Enqueued' state and is locked 28 | -- so some sort of process can occur with it, usually something in 'IO'. 29 | -- Once the processing is complete, the `Payload' is moved the 'Dequeued' 30 | -- state, which is the terminal state. 31 | data State = Enqueued | Failed 32 | deriving (Show, Eq, Ord, Enum, Bounded) 33 | 34 | state :: E.Params a -> D.Result b -> ByteString -> Statement a b 35 | state enc dec theSql = Statement theSql enc dec True 36 | 37 | stateDecoder :: D.Value State 38 | stateDecoder = D.enum $ \txt -> 39 | if txt == "enqueued" then 40 | pure Enqueued 41 | else if txt == "failed" then 42 | pure Failed 43 | else Nothing 44 | 45 | stateEncoder :: E.Value State 46 | stateEncoder = E.enum $ \case 47 | Enqueued -> "enqueued" 48 | Failed -> "failed" 49 | 50 | initialPayloadId :: PayloadId 51 | initialPayloadId = PayloadId (-1) 52 | 53 | {-| 54 | Internal payload id. Used by the public api as continuation token 55 | for pagination. 56 | -} 57 | newtype PayloadId = PayloadId { unPayloadId :: Int64 } 58 | deriving (Eq, Show) 59 | 60 | -- | The fundemental record stored in the queue. The queue is a single table 61 | -- and each row consists of a 'Payload' 62 | data Payload a = Payload 63 | { pId :: PayloadId 64 | , pState :: State 65 | -- TODO do I need this? 66 | , pAttempts :: Int 67 | , pModifiedAt :: Int 68 | -- TODO rename. I don't need this either. 69 | , pValue :: a 70 | } deriving (Show, Eq) 71 | 72 | -- | 'Payload' decoder 73 | payloadDecoder :: D.Value a -> D.Row (Payload a) 74 | payloadDecoder thePayloadDecoder 75 | = Payload 76 | <$> payloadIdRow 77 | <*> D.column (D.nonNullable stateDecoder) 78 | <*> D.column (D.nonNullable $ fromIntegral <$> D.int4) 79 | <*> D.column (D.nonNullable $ fromIntegral <$> D.int4) 80 | <*> D.column (D.nonNullable thePayloadDecoder) 81 | 82 | payloadIdEncoder :: E.Value PayloadId 83 | payloadIdEncoder = unPayloadId >$< E.int8 84 | 85 | payloadIdDecoder :: D.Value PayloadId 86 | payloadIdDecoder = PayloadId <$> D.int8 87 | 88 | payloadIdRow :: D.Row PayloadId 89 | payloadIdRow = D.column (D.nonNullable payloadIdDecoder) 90 | 91 | -- TODO include special cases for single element insertion 92 | enqueuePayload :: E.Value a -> [a] -> Session [PayloadId] 93 | enqueuePayload theEncoder values = do 94 | let theQuery = [here| 95 | INSERT INTO payloads (attempts, value) 96 | SELECT 0, * FROM unnest($1) 97 | RETURNING id 98 | |] 99 | encoder = E.param $ E.nonNullable $ E.foldableArray $ E.nonNullable theEncoder 100 | decoder = D.rowList (D.column (D.nonNullable payloadIdDecoder)) 101 | theStatement = Statement theQuery encoder decoder True 102 | 103 | statement values theStatement 104 | 105 | dequeuePayload :: D.Value a -> Int -> Session [Payload a] 106 | dequeuePayload valueDecoder count = do 107 | let multipleQuery = [here| 108 | DELETE FROM payloads 109 | WHERE id in 110 | ( SELECT p1.id 111 | FROM payloads AS p1 112 | WHERE p1.state='enqueued' 113 | ORDER BY p1.modified_at ASC 114 | FOR UPDATE SKIP LOCKED 115 | LIMIT $1 116 | ) 117 | RETURNING id, state, attempts, modified_at, value 118 | |] 119 | multipleEncoder = E.param $ E.nonNullable $ fromIntegral >$< E.int4 120 | 121 | singleQuery = [here| 122 | DELETE FROM payloads 123 | WHERE id = 124 | ( SELECT p1.id 125 | FROM payloads AS p1 126 | WHERE p1.state='enqueued' 127 | ORDER BY p1.modified_at ASC 128 | FOR UPDATE SKIP LOCKED 129 | LIMIT 1 130 | ) 131 | RETURNING id, state, attempts, modified_at, value 132 | |] 133 | 134 | singleEncoder = mempty 135 | 136 | decoder = D.rowList $ payloadDecoder valueDecoder 137 | 138 | theStatement = case count of 139 | 1 -> Statement singleQuery singleEncoder decoder True 140 | _ -> Statement multipleQuery multipleEncoder decoder True 141 | statement count theStatement 142 | 143 | -- | Get the 'Payload' given a 'PayloadId' 144 | getPayload :: D.Value a -> PayloadId -> Session (Maybe (Payload a)) 145 | getPayload decoder payloadId = do 146 | let theQuery = [here| 147 | SELECT id, state, attempts, modified_at, value 148 | FROM payloads 149 | WHERE id = $1 150 | |] 151 | 152 | encoder = E.param (E.nonNullable payloadIdEncoder) 153 | statement payloadId $ Statement theQuery encoder (D.rowMaybe $ payloadDecoder decoder) True 154 | 155 | 156 | -- | Get the number of rows in the 'Enqueued' state. 157 | getCount :: Session Int64 158 | getCount = do 159 | let decoder = D.singleRow (D.column (D.nonNullable D.int8)) 160 | theSql = [here| 161 | SELECT count(*) 162 | FROM payloads 163 | WHERE state='enqueued'; 164 | |] 165 | theStatement = Statement theSql mempty decoder True 166 | statement () theStatement 167 | 168 | incrementAttempts :: Int -> [PayloadId] -> Session () 169 | incrementAttempts retryCount pids = do 170 | let theQuery = [here| 171 | UPDATE payloads 172 | SET state=CASE WHEN attempts >= $1 THEN 'failed' :: state_t ELSE 'enqueued' END 173 | , attempts=attempts+1 174 | WHERE id = ANY($2) 175 | |] 176 | encoder = (fst >$< E.param (E.nonNullable E.int4)) <> 177 | (snd >$< E.param (E.nonNullable $ E.foldableArray $ E.nonNullable payloadIdEncoder)) 178 | 179 | theStatement = Statement theQuery encoder D.noResult True 180 | 181 | statement (fromIntegral retryCount, pids) theStatement 182 | 183 | 184 | 185 | -- TODO remove 186 | newtype QueryException = QueryException QueryError 187 | deriving (Eq, Show, Typeable) 188 | 189 | instance Exception QueryException 190 | 191 | runThrow :: Session a -> Connection -> IO a 192 | runThrow sess conn = either (throwIO . QueryException) pure =<< run sess conn 193 | 194 | execute :: Connection -> ByteString -> IO () 195 | execute conn theSql = runThrow (sql theSql) conn 196 | 197 | -- Block until a payload notification is fired. Fired during insertion. 198 | notifyPayload :: ByteString -> Connection -> IO () 199 | notifyPayload channel conn = fix $ \restart -> do 200 | PQ.Notify {..} <- either throwIO pure =<< withLibPQConnection conn getNotification 201 | unless (notifyRelname == channel) restart 202 | 203 | -- | To aid in observability and white box testing 204 | data WithNotifyHandlers = WithNotifyHandlers 205 | { withNotifyHandlersAfterAction :: IO () 206 | -- ^ An event that is trigger after the initial action, e.g. 207 | -- before dequeue is called. 208 | , withNotifyHandlersBeforeNotification :: IO () 209 | -- ^ An event that is triggered before the blocking on a 210 | -- notification. 211 | } 212 | 213 | instance Semigroup WithNotifyHandlers where 214 | x <> y = WithNotifyHandlers 215 | { withNotifyHandlersAfterAction = withNotifyHandlersAfterAction x <> withNotifyHandlersAfterAction y 216 | , withNotifyHandlersBeforeNotification = withNotifyHandlersBeforeNotification x <> withNotifyHandlersBeforeNotification y 217 | } 218 | 219 | instance Monoid WithNotifyHandlers where 220 | mempty = WithNotifyHandlers mempty mempty 221 | 222 | data NoRows = NoRows 223 | deriving (Show, Eq, Typeable) 224 | 225 | instance Exception NoRows 226 | 227 | withNotifyWith :: WithNotifyHandlers 228 | -> Text 229 | -> Connection 230 | -> Session a 231 | -> IO a 232 | withNotifyWith WithNotifyHandlers {..} channel conn action = bracket_ 233 | (flip runThrow conn $ statement channel $ Statement "SELECT listen_on($1)" (E.param $ E.nonNullable E.text) D.noResult True) 234 | (flip runThrow conn $ statement channel $ Statement "SELECT unlisten_on($1)" (E.param $ E.nonNullable E.text) D.noResult True) 235 | $ fix $ \restart -> do 236 | x <- try $ runThrow action conn 237 | withNotifyHandlersAfterAction 238 | case x of 239 | Left NoRows -> do 240 | -- TODO record the time here 241 | withNotifyHandlersBeforeNotification 242 | notifyPayload (TE.encodeUtf8 channel) conn 243 | restart 244 | Right xs -> pure xs 245 | 246 | fst3 :: (a, b, c) -> a 247 | fst3 (x, _, _) = x 248 | 249 | snd3 :: (a, b, c) -> b 250 | snd3 (_, x, _) = x 251 | 252 | trd3 :: (a, b, c) -> c 253 | trd3 (_, _, x) = x 254 | 255 | listState :: State -> D.Value a -> Maybe PayloadId -> Int -> Session [(PayloadId, a)] 256 | listState theState valueDecoder mPayloadId count = do 257 | let theQuery = [here| 258 | SELECT id, value 259 | FROM payloads 260 | WHERE state = ($1 :: state_t) 261 | AND id > $2 262 | ORDER BY id ASC 263 | LIMIT $3 264 | |] 265 | encoder = (fst3 >$< E.param (E.nonNullable stateEncoder)) 266 | <> (snd3 >$< E.param (E.nonNullable payloadIdEncoder)) 267 | <> (trd3 >$< E.param (E.nonNullable E.int4)) 268 | 269 | decoder = D.rowList 270 | $ (,) 271 | <$> D.column (D.nonNullable payloadIdDecoder) 272 | <*> D.column (D.nonNullable valueDecoder) 273 | theStatement = Statement theQuery encoder decoder True 274 | 275 | defaultPayloadId = fromMaybe initialPayloadId mPayloadId 276 | 277 | statement (theState, defaultPayloadId, fromIntegral count) theStatement 278 | {-| 279 | Retrieve the payloads that have entered a failed state. See 'withDequeue' for how that 280 | occurs. The function returns a list of values and an id. The id is used the starting 281 | place for the next batch of values. If 'Nothing' is passed the list starts at the 282 | beginning. 283 | -} 284 | failures :: D.Value a 285 | -- ^ Payload decoder 286 | -> Maybe PayloadId 287 | -- ^ Starting position of payloads. Pass 'Nothing' to 288 | -- start at the beginning 289 | -> Int 290 | -- ^ Count 291 | -> Session [(PayloadId, a)] 292 | failures = listState Failed 293 | 294 | -- Move to Internal 295 | -- This should use bracketOnError 296 | withDequeue :: D.Value a -> Int -> Int -> ([a] -> IO b) -> Session (Maybe b) 297 | withDequeue decoder retryCount count f = do 298 | -- TODO turn to a save point 299 | sql "BEGIN;SAVEPOINT temp" 300 | dequeuePayload decoder count >>= \case 301 | [] -> Nothing <$ sql "COMMIT" 302 | xs -> fmap Just $ do 303 | liftIO (try $ f $ fmap pValue xs) >>= \case 304 | Left (e :: SomeException) -> do 305 | sql "ROLLBACK TO SAVEPOINT temp; RELEASE SAVEPOINT temp" 306 | let pids = fmap pId xs 307 | incrementAttempts retryCount pids 308 | sql "COMMIT" 309 | liftIO (throwIO e) 310 | Right x -> x <$ sql "COMMIT" 311 | 312 | delete :: [PayloadId] -> Session () 313 | delete xs = do 314 | let theQuery = [here| 315 | DELETE FROM payloads 316 | WHERE id = ANY($1) 317 | |] 318 | 319 | encoder = E.param 320 | $ E.nonNullable 321 | $ E.foldableArray 322 | $ E.nonNullable payloadIdEncoder 323 | 324 | statement xs $ Statement theQuery encoder D.noResult True 325 | -------------------------------------------------------------------------------- /src/Hasql/Queue/Low/AtLeastOnce.hs: -------------------------------------------------------------------------------- 1 | {-| 2 | A high throughput 'Session' based API for a PostgreSQL backed queue. 3 | -} 4 | module Hasql.Queue.Low.AtLeastOnce 5 | ( enqueue 6 | , withDequeue 7 | -- ** Listing API 8 | , I.PayloadId 9 | , failures 10 | , delete 11 | -- ** Advanced API 12 | , withDequeueWith 13 | , I.WithNotifyHandlers (..) 14 | ) where 15 | 16 | import qualified Hasql.Queue.Low.ExactlyOnce as E 17 | import qualified Hasql.Queue.Internal as I 18 | import Hasql.Connection 19 | import qualified Hasql.Encoders as E 20 | import qualified Hasql.Decoders as D 21 | import Control.Exception 22 | import Data.Function 23 | import Data.Text (Text) 24 | import Control.Monad.IO.Class 25 | 26 | {-|Enqueue a list of payloads. 27 | -} 28 | enqueue :: Text 29 | -- ^ Notification channel name. Any valid PostgreSQL identifier 30 | -> Connection 31 | -- ^ Connection 32 | -> E.Value a 33 | -- ^ Payload encoder 34 | -> [a] 35 | -- ^ List of payloads to enqueue 36 | -> IO () 37 | enqueue channel conn encoder xs = I.runThrow (E.enqueue channel encoder xs) conn 38 | 39 | {-| 40 | Wait for the next payload and process it. If the continuation throws an 41 | exception the payloads are put back in the queue. 'IOError' is caught 42 | and 'withDequeue' will retry up to the retry count. If 'withDequeue' fails 43 | after too many retries the final exception is rethrown. If individual payloads are 44 | are attempted more than the retry count they are set as "failed". See 'failures' 45 | to receive the list of failed payloads. 46 | 47 | If the queue is empty 'withDequeue' will block until it recieves a notification 48 | from the PostgreSQL server. 49 | -} 50 | withDequeue :: Text 51 | -- ^ Notification channel name. Any valid PostgreSQL identifier 52 | -> Connection 53 | -- ^ Connection 54 | -> D.Value a 55 | -- ^ Payload decoder 56 | -> Int 57 | -- ^ Retry count 58 | -> Int 59 | -- ^ Element count 60 | -> ([a] -> IO b) 61 | -- ^ Continuation 62 | -> IO b 63 | withDequeue = withDequeueWith @IOError mempty 64 | 65 | {-| 66 | Retrieve the payloads that have entered a failed state. See 'withDequeue' for how that 67 | occurs. The function returns a list of values and an id. The id is used the starting 68 | place for the next batch of values. If 'Nothing' is passed the list starts at the 69 | beginning. 70 | -} 71 | failures :: Connection 72 | -> D.Value a 73 | -- ^ Payload decoder 74 | -> Maybe I.PayloadId 75 | -- ^ Starting position of payloads. Pass 'Nothing' to 76 | -- start at the beginning 77 | -> Int 78 | -- ^ Count 79 | -> IO [(I.PayloadId, a)] 80 | failures conn decoder mPayload count = I.runThrow (I.failures decoder mPayload count) conn 81 | 82 | {-| 83 | Permantently remove a failed payload. 84 | -} 85 | delete :: Connection 86 | -> [I.PayloadId] 87 | -> IO () 88 | delete conn xs = I.runThrow (I.delete xs) conn 89 | 90 | {-| 91 | A more general configurable version of 'withDequeue'. Unlike 'withDequeue' one 92 | can specify the exception that causes a retry. Additionally event 93 | handlers can be specified to observe the internal behavior of the 94 | retry loop. 95 | -} 96 | withDequeueWith :: forall e a b 97 | . Exception e 98 | => I.WithNotifyHandlers 99 | -- ^ Event handlers for events that occur as 'withDequeWith' loops 100 | -> Text 101 | -- ^ Notification channel name. Any valid PostgreSQL identifier 102 | -> Connection 103 | -- ^ Connection 104 | -> D.Value a 105 | -- ^ Payload decoder 106 | -> Int 107 | -- ^ Retry count 108 | -> Int 109 | -- ^ Element count 110 | -> ([a] -> IO b) 111 | -- ^ Continuation 112 | -> IO b 113 | withDequeueWith withNotifyHandlers channel conn decoder retryCount count f = (fix $ \restart i -> do 114 | let action = I.withDequeue decoder retryCount count f >>= \case 115 | Nothing -> liftIO $ throwIO I.NoRows 116 | Just x -> pure x 117 | 118 | try (I.withNotifyWith withNotifyHandlers channel conn action) >>= \case 119 | Right x -> pure x 120 | Left (e :: e) -> 121 | if i < retryCount then 122 | restart $ i + 1 123 | else 124 | throwIO e 125 | ) 0 126 | -------------------------------------------------------------------------------- /src/Hasql/Queue/Low/AtMostOnce.hs: -------------------------------------------------------------------------------- 1 | module Hasql.Queue.Low.AtMostOnce where 2 | import qualified Hasql.Queue.Low.ExactlyOnce as E 3 | import Hasql.Connection 4 | import qualified Hasql.Encoders as E 5 | import qualified Hasql.Decoders as D 6 | import Data.Text(Text) 7 | import qualified Hasql.Queue.Internal as I 8 | 9 | 10 | {-|Enqueue a payload. 11 | -} 12 | enqueue :: Text 13 | -- ^ Notification channel name. Any valid PostgreSQL identifier 14 | -> Connection 15 | -- ^ Connection 16 | -> E.Value a 17 | -- ^ Payload encoder 18 | -> [a] 19 | -- ^ List of payloads to enqueue 20 | -> IO () 21 | enqueue channel conn encoder xs = I.runThrow (E.enqueue channel encoder xs) conn 22 | 23 | {-| 24 | Dequeue a list of payloads. 25 | -} 26 | dequeue :: Text 27 | -- ^ Notification channel name. Any valid PostgreSQL identifier 28 | -> Connection 29 | -- ^ Connection 30 | -> D.Value a 31 | -- ^ Payload decoder 32 | -> Int 33 | -- ^ Element count 34 | -> IO [a] 35 | dequeue channel conn theDecoder batchCount = 36 | E.withDequeue channel conn theDecoder batchCount id 37 | -------------------------------------------------------------------------------- /src/Hasql/Queue/Low/ExactlyOnce.hs: -------------------------------------------------------------------------------- 1 | module Hasql.Queue.Low.ExactlyOnce 2 | ( enqueue 3 | , withDequeue 4 | , withDequeueWith 5 | ) where 6 | import qualified Hasql.Queue.High.ExactlyOnce as H 7 | import Control.Exception 8 | import qualified Hasql.Encoders as E 9 | import qualified Hasql.Decoders as D 10 | import Hasql.Session 11 | import Hasql.Statement 12 | import Hasql.Connection 13 | import qualified Hasql.Queue.Internal as I 14 | import Control.Monad.IO.Class 15 | import Data.Text(Text) 16 | 17 | {-|Enqueue a payload send a notification on the 18 | specified channel. 19 | -} 20 | enqueue :: Text 21 | -- ^ Notification channel name. Any valid PostgreSQL identifier 22 | -> E.Value a 23 | -- ^ Payload encoder 24 | -> [a] 25 | -- ^ List of payloads to enqueue 26 | -> Session () 27 | enqueue channel theEncoder values = do 28 | H.enqueue theEncoder values 29 | statement channel $ Statement "SELECT notify_on($1)" (E.param $ E.nonNullable E.text) D.noResult True 30 | 31 | dequeueOrRollbackAndThrow :: D.Value a -> Int -> Session [a] 32 | dequeueOrRollbackAndThrow theDecoder dequeueCount = H.dequeue theDecoder dequeueCount >>= \case 33 | [] -> liftIO $ throwIO I.NoRows 34 | xs -> pure xs 35 | 36 | withDequeue :: Text 37 | -- ^ Notification channel name. Any valid PostgreSQL identifier 38 | -> Connection 39 | -- ^ Connection 40 | -> D.Value a 41 | -- ^ Payload decoder 42 | -> Int 43 | -- ^ Batch count 44 | -> (Session [a] -> Session b) 45 | -- ^ Transaction runner 46 | -> IO b 47 | withDequeue = withDequeueWith mempty 48 | 49 | withDequeueWith :: I.WithNotifyHandlers 50 | -- ^ Event handlers for events that occur as 'withDequeWith' loops 51 | -> Text 52 | -- ^ Notification channel name. Any valid PostgreSQL identifier 53 | -> Connection 54 | -- ^ Connection 55 | -> D.Value a 56 | -- ^ Payload decoder 57 | -> Int 58 | -- ^ Batch count 59 | -> (Session [a] -> Session b) 60 | -- ^ Transaction runner 61 | -> IO b 62 | withDequeueWith withNotifyHandlers channel conn theDecoder dequeueCount runner 63 | = I.withNotifyWith withNotifyHandlers channel conn 64 | $ runner (dequeueOrRollbackAndThrow theDecoder dequeueCount) 65 | -------------------------------------------------------------------------------- /src/Hasql/Queue/Migrate.hs: -------------------------------------------------------------------------------- 1 | {-| 2 | Functions for migrating the database to create the necessary 3 | functions for the package. 4 | 5 | Users can use these functions or copy and paste the tables 6 | to create these tables through a standalone migration 7 | system. 8 | -} 9 | {-# OPTIONS_HADDOCK prune #-} 10 | {-# LANGUAGE QuasiQuotes, OverloadedStrings #-} 11 | module Hasql.Queue.Migrate where 12 | import qualified Hasql.Queue.Internal as I 13 | import Data.String 14 | import Data.String.Here.Interpolated 15 | import Hasql.Connection 16 | import Hasql.Session 17 | 18 | 19 | {-| 20 | The DDL statements to create the schema given a value type. 21 | -} 22 | migrationQueryString :: String 23 | -- ^ @value@ column type, e.g. @int4@ or 24 | -- @jsonb@. 25 | -> String 26 | migrationQueryString valueType = [i| 27 | CREATE OR REPLACE FUNCTION notify_on(channel text) RETURNs VOID AS $$ 28 | BEGIN 29 | EXECUTE (format(E'NOTIFY %I', channel)); 30 | END; 31 | $$ LANGUAGE plpgsql; 32 | 33 | CREATE OR REPLACE FUNCTION listen_on(channel text) RETURNS VOID AS $$ 34 | BEGIN 35 | EXECUTE (format(E'LISTEN %I', channel)); 36 | END; 37 | $$ LANGUAGE plpgsql; 38 | 39 | CREATE OR REPLACE FUNCTION unlisten_on(channel text) RETURNS VOID AS $$ 40 | BEGIN 41 | EXECUTE (format(E'UNLISTEN %I', channel)); 42 | END; 43 | $$ LANGUAGE plpgsql; 44 | 45 | DO $$ 46 | BEGIN 47 | IF NOT EXISTS (SELECT 1 FROM pg_type WHERE typname = 'state_t') THEN 48 | CREATE TYPE state_t AS ENUM ('enqueued', 'failed'); 49 | END IF; 50 | END$$; 51 | 52 | CREATE SEQUENCE IF NOT EXISTS modified_index START 1; 53 | 54 | CREATE TABLE IF NOT EXISTS payloads 55 | ( id BIGSERIAL PRIMARY KEY 56 | , attempts int NOT NULL DEFAULT 0 57 | , state state_t NOT NULL DEFAULT 'enqueued' 58 | , modified_at int8 NOT NULL DEFAULT nextval('modified_index') 59 | , value ${valueType} NOT NULL 60 | ); 61 | 62 | CREATE INDEX IF NOT EXISTS active_modified_at_idx ON payloads USING btree (modified_at) 63 | WHERE (state = 'enqueued'); 64 | 65 | |] 66 | 67 | {-| This function creates a table and enumeration type that is 68 | appriopiate for the queue. The following sql is used. 69 | 70 | @ 71 | DO $$ 72 | CREATE OR REPLACE FUNCTION notify_on(channel text) RETURNs VOID AS $$ 73 | BEGIN 74 | EXECUTE (format(E'NOTIFY %I', channel)); 75 | END; 76 | $$ LANGUAGE plpgsql; 77 | 78 | CREATE OR REPLACE FUNCTION listen_on(channel text) RETURNS VOID AS $$ 79 | BEGIN 80 | EXECUTE (format(E'LISTEN %I', channel)); 81 | END; 82 | $$ LANGUAGE plpgsql; 83 | 84 | CREATE OR REPLACE FUNCTION unlisten_on(channel text) RETURNS VOID AS $$ 85 | BEGIN 86 | EXECUTE (format(E'UNLISTEN %I', channel)); 87 | END; 88 | $$ LANGUAGE plpgsql; 89 | 90 | DO $$ 91 | BEGIN 92 | IF NOT EXISTS (SELECT 1 FROM pg_type WHERE typname = 'state_t') THEN 93 | CREATE TYPE state_t AS ENUM ('enqueued', 'failed'); 94 | END IF; 95 | END$$; 96 | 97 | CREATE SEQUENCE IF NOT EXISTS modified_index START 1; 98 | 99 | CREATE TABLE IF NOT EXISTS payloads 100 | ( id BIGSERIAL PRIMARY KEY 101 | , attempts int NOT NULL DEFAULT 0 102 | , state state_t NOT NULL DEFAULT 'enqueued' 103 | , modified_at int8 NOT NULL DEFAULT nextval('modified_index') 104 | , value ${VALUE_TYPE} NOT NULL 105 | ); 106 | 107 | CREATE INDEX IF NOT EXISTS active_modified_at_idx ON payloads USING btree (modified_at, state) 108 | WHERE (state = 'enqueued'); 109 | @ 110 | 111 | The @VALUE_TYPE@ needs to passed in through the second argument. 112 | -} 113 | migrate :: Connection 114 | -> String 115 | -- ^ The type of the @value@ column 116 | -> IO () 117 | migrate conn valueType = 118 | I.runThrow (sql $ fromString $ migrationQueryString valueType) conn 119 | 120 | {-| 121 | Drop everything created by 'migrate' 122 | -} 123 | teardown :: Connection -> IO () 124 | teardown conn = do 125 | let theQuery = [i| 126 | DROP TABLE IF EXISTS payloads; 127 | DROP TYPE IF EXISTS state_t; 128 | DROP SEQUENCE IF EXISTS modified_index; 129 | DROP FUNCTION IF EXISTS notify_on; 130 | DROP FUNCTION IF EXISTS listen_on; 131 | DROP FUNCTION IF EXISTS unlisten_on; 132 | |] 133 | 134 | I.runThrow (sql theQuery) conn 135 | -------------------------------------------------------------------------------- /stack.yaml: -------------------------------------------------------------------------------- 1 | # This file was automatically generated by 'stack init' 2 | # 3 | # Some commonly used options have been documented as comments in this file. 4 | # For advanced use and comprehensive documentation of the format, please see: 5 | # http://docs.haskellstack.org/en/stable/yaml_configuration/ 6 | 7 | # Resolver to choose a 'specific' stackage snapshot or a compiler version. 8 | # A snapshot resolver dictates the compiler version and the set of packages 9 | # to be used for project dependencies. For example: 10 | # 11 | # resolver: lts-3.5 12 | # resolver: nightly-2015-09-21 13 | # resolver: ghc-7.10.2 14 | # resolver: ghcjs-0.1.0_ghc-7.10.2 15 | # resolver: 16 | # name: custom-snapshot 17 | # location: "./custom-snapshot.yaml" 18 | resolver: lts-16.0 19 | 20 | # User packages to be built. 21 | # Various formats can be used as shown in the example below. 22 | # 23 | # packages: 24 | # - some-directory 25 | # - https://example.com/foo/bar/baz-0.0.2.tar.gz 26 | # - location: 27 | # git: https://github.com/commercialhaskell/stack.git 28 | # commit: e7b331f14bcffb8367cd58fbfc8b40ec7642100a 29 | # - location: https://github.com/commercialhaskell/stack/commit/e7b331f14bcffb8367cd58fbfc8b40ec7642100a 30 | # extra-dep: true 31 | # subdirs: 32 | # - auto-update 33 | # - wai 34 | # 35 | # A package marked 'extra-dep: true' will only be built if demanded by a 36 | # non-dependency (i.e. a user package), and its test suites and benchmarks 37 | # will not be run. This is useful for tweaking upstream packages. 38 | packages: 39 | - '.' 40 | 41 | build: 42 | library-stripping: false 43 | executable-stripping: false 44 | 45 | # Dependency packages to be pulled from upstream that are not in the resolver 46 | # (e.g., acme-missiles-0.3) 47 | extra-deps: 48 | - postgresql-libpq-notify-0.2.0.0 49 | # Override default flag values for local packages and extra-deps 50 | flags: {} 51 | 52 | # Extra package databases containing global packages 53 | extra-package-dbs: [] 54 | 55 | # Control whether we use the GHC we find on the path 56 | # system-ghc: true 57 | # 58 | # Require a specific version of stack, using version ranges 59 | # require-stack-version: -any # Default 60 | # require-stack-version: ">=1.4" 61 | # 62 | # Override the architecture used by stack, especially useful on Windows 63 | # arch: i386 64 | # arch: x86_64 65 | # 66 | # Extra directories used by stack for building 67 | # extra-include-dirs: [/path/to/dir] 68 | # extra-lib-dirs: [/path/to/dir] 69 | # 70 | # Allow a newer minor version of GHC than the snapshot specifies 71 | # compiler-check: newer-minor 72 | 73 | 74 | ghc-options: 75 | "$everything": -g2 -O2 76 | -------------------------------------------------------------------------------- /stack.yaml.lock: -------------------------------------------------------------------------------- 1 | # This file was autogenerated by Stack. 2 | # You should not edit this file by hand. 3 | # For more information, please see the documentation at: 4 | # https://docs.haskellstack.org/en/stable/lock_files 5 | 6 | packages: 7 | - completed: 8 | hackage: postgresql-libpq-notify-0.2.0.0@sha256:bbc4340400dcd788eaea238b5e7a64abe6046f84b1ba0a333384512cf081ec8c,1365 9 | pantry-tree: 10 | size: 354 11 | sha256: c3dc643ca689ec267e00d629ad527cda61a14b34dcf2490a08a9b5e250a8c95a 12 | original: 13 | hackage: postgresql-libpq-notify-0.2.0.0 14 | snapshots: 15 | - completed: 16 | size: 531237 17 | url: https://raw.githubusercontent.com/commercialhaskell/stackage-snapshots/master/lts/16/0.yaml 18 | sha256: 210e15b7043e2783115afe16b0d54914b1611cdaa73f3ca3ca7f8e0847ff54e5 19 | original: lts-16.0 20 | -------------------------------------------------------------------------------- /test/Hasql/Queue/High/AtLeastOnceSpec.hs: -------------------------------------------------------------------------------- 1 | module Hasql.Queue.High.AtLeastOnceSpec where 2 | import Hasql.Queue.High.AtLeastOnce 3 | import qualified Hasql.Encoders as E 4 | import qualified Hasql.Decoders as D 5 | import Test.Hspec (Spec, describe, parallel, it) 6 | import Test.Hspec.Expectations.Lifted 7 | import Test.Hspec.Core.Spec (sequential) 8 | import Hasql.Queue.TestUtils 9 | import qualified Hasql.Queue.Internal as I 10 | import Control.Exception as E 11 | import Hasql.Connection 12 | import Data.Typeable 13 | import Data.IORef 14 | import Control.Monad 15 | 16 | data FailedwithDequeue = FailedwithDequeue 17 | deriving (Show, Eq, Typeable) 18 | 19 | instance Exception FailedwithDequeue 20 | 21 | getPayload :: Connection -> D.Value a -> I.PayloadId -> IO (Maybe (I.Payload a)) 22 | getPayload conn decoder payloadId = I.runThrow (I.getPayload decoder payloadId) conn 23 | 24 | spec :: Spec 25 | spec = describe "Hasql.Queue.High.AtLeastOnce" $ parallel $ do 26 | sequential $ aroundAll withSetup $ describe "enqueue/dequeue" $ do 27 | it "enqueue nothing gives nothing" $ withConnection $ \conn -> do 28 | enqueue conn E.int4 [] 29 | withDequeue conn D.int4 1 1 pure `shouldReturn` Nothing 30 | 31 | it "enqueue 1 gives 1" $ withConnection $ \conn -> do 32 | enqueue conn E.int4 [1] 33 | withDequeue conn D.int4 1 1 pure `shouldReturn` Just [1] 34 | 35 | it "dequeue give nothing after enqueueing everything" $ withConnection $ \conn -> do 36 | withDequeue conn D.int4 1 1 pure `shouldReturn` Nothing 37 | 38 | it "dequeueing is in FIFO order" $ withConnection $ \conn -> do 39 | enqueue conn E.int4 [1] 40 | enqueue conn E.int4 [2] 41 | withDequeue conn D.int4 1 1 pure `shouldReturn` Just [1] 42 | withDequeue conn D.int4 1 1 pure `shouldReturn` Just [2] 43 | 44 | it "dequeueing a batch of elements works" $ withConnection $ \conn -> do 45 | enqueue conn E.int4 [1, 2, 3] 46 | withDequeue conn D.int4 1 2 pure `shouldReturn` Just [1, 2] 47 | 48 | withDequeue conn D.int4 1 2 pure `shouldReturn` Just [3] 49 | 50 | it "withDequeue fails if a non IOError is thrown" $ withConnection $ \conn -> do 51 | enqueue conn E.int4 [1] 52 | handle (\FailedwithDequeue -> pure Nothing) $ 53 | withDequeue conn D.int4 2 1 $ \_ -> throwIO FailedwithDequeue 54 | 55 | failures conn D.int4 Nothing 1 `shouldReturn` [] 56 | withDequeue conn D.int4 0 1 pure `shouldReturn` Just [1] 57 | 58 | it "withDequeue fails if throws occur and retry is zero" $ withConnection $ \conn -> do 59 | enqueue conn E.int4 [1] 60 | handle (\(_ :: IOError) -> pure Nothing) $ 61 | withDequeue conn D.int4 0 1 $ \_ -> throwIO $ userError "hey" 62 | 63 | [(pId, x)] <- failures conn D.int4 Nothing 1 64 | x `shouldBe` 1 65 | delete conn [pId] 66 | 67 | it "withDequeue succeeds even if the first attempt fails" $ withConnection $ \conn -> do 68 | enqueue conn E.int4 [1] 69 | 70 | ref <- newIORef (0 :: Int) 71 | 72 | withDequeue conn D.int4 1 1 (\_ -> do 73 | count <- readIORef ref 74 | writeIORef ref $ count + 1 75 | when (count < 1) $ throwIO $ userError "hey" 76 | pure '!') `shouldReturn` Just '!' 77 | 78 | withDequeue conn D.int4 1 1 pure `shouldReturn` Nothing 79 | readIORef ref `shouldReturn` 2 80 | 81 | it "failures paging works" $ withConnection $ \conn -> do 82 | enqueue conn E.int4 [2] 83 | enqueue conn E.int4 [3] 84 | 85 | handle (\(_ :: IOError) -> pure Nothing) $ 86 | withDequeue conn D.int4 0 1 $ \_ -> throwIO $ userError "fds" 87 | handle (\(_ :: IOError) -> pure Nothing) $ 88 | withDequeue conn D.int4 0 1 $ \_ -> throwIO $ userError "fds" 89 | 90 | [(next, x)] <- failures conn D.int4 Nothing 1 91 | x `shouldBe` 2 92 | fmap (fmap snd) (failures conn D.int4 (Just next) 2) `shouldReturn` [3] 93 | -------------------------------------------------------------------------------- /test/Hasql/Queue/High/AtMostOnceSpec.hs: -------------------------------------------------------------------------------- 1 | module Hasql.Queue.High.AtMostOnceSpec where 2 | import Hasql.Queue.High.AtMostOnce 3 | import qualified Hasql.Encoders as E 4 | import qualified Hasql.Decoders as D 5 | import Test.Hspec (Spec, describe, parallel, it) 6 | import Test.Hspec.Expectations.Lifted 7 | import Test.Hspec.Core.Spec (sequential) 8 | import Hasql.Queue.TestUtils 9 | 10 | spec :: Spec 11 | spec = describe "Hasql.Queue.High.AtMostOnce" $ parallel $ do 12 | sequential $ aroundAll withSetup $ describe "enqueue/dequeue" $ do 13 | it "enqueue nothing gives nothing" $ withConnection $ \conn -> do 14 | enqueue conn E.int4 [] 15 | dequeue conn D.int4 1 `shouldReturn` [] 16 | 17 | it "enqueue 1 gives 1" $ withConnection $ \conn -> do 18 | enqueue conn E.int4 [1] 19 | dequeue conn D.int4 1 `shouldReturn` [1] 20 | 21 | it "dequeue give nothing after enqueueing everything" $ withConnection $ \conn -> do 22 | dequeue conn D.int4 1 `shouldReturn` [] 23 | 24 | it "dequeueing is in FIFO order" $ withConnection $ \conn -> do 25 | enqueue conn E.int4 [1] 26 | enqueue conn E.int4 [2] 27 | dequeue conn D.int4 1 `shouldReturn` [1] 28 | dequeue conn D.int4 1 `shouldReturn` [2] 29 | 30 | it "dequeueing a batch of elements works" $ withConnection $ \conn -> do 31 | enqueue conn E.int4 [1, 2, 3] 32 | dequeue conn D.int4 2 `shouldReturn` [1, 2] 33 | 34 | dequeue conn D.int4 2 `shouldReturn` [3] 35 | -------------------------------------------------------------------------------- /test/Hasql/Queue/High/ExactlyOnceSpec.hs: -------------------------------------------------------------------------------- 1 | {-# LANGUAGE OverloadedStrings #-} 2 | {-# LANGUAGE RecordWildCards #-} 3 | {-# LANGUAGE ScopedTypeVariables #-} 4 | module Hasql.Queue.High.ExactlyOnceSpec where 5 | import Control.Exception as E 6 | import Hasql.Queue.High.ExactlyOnce 7 | import Test.Hspec (Spec, describe, parallel, it) 8 | import Test.Hspec.Expectations.Lifted 9 | import Test.Hspec.Core.Spec (sequential) 10 | import qualified Hasql.Encoders as E 11 | import qualified Hasql.Decoders as D 12 | import Data.Typeable 13 | import Data.Int 14 | import Hasql.Queue.TestUtils 15 | 16 | -- Fix this to be more of what I would expec 17 | 18 | newtype TooManyRetries = TooManyRetries Int64 19 | deriving (Show, Eq, Typeable) 20 | 21 | instance Exception TooManyRetries 22 | 23 | spec :: Spec 24 | spec = describe "Hasql.Queue.High.ExactlyOnce" $ parallel $ do 25 | sequential $ aroundAll withSetup $ describe "enqueue/dequeue" $ do 26 | it "enqueue nothing gives nothing" $ withReadCommitted $ do 27 | enqueue E.int4 [] 28 | dequeue D.int4 1 `shouldReturn` [] 29 | 30 | it "enqueue 1 gives 1" $ withReadCommitted $ do 31 | enqueue E.int4 [1] 32 | dequeue D.int4 1 `shouldReturn` [1] 33 | 34 | it "dequeue give nothing after enqueueing everything" $ withReadCommitted $ do 35 | dequeue D.int4 1 `shouldReturn` [] 36 | 37 | it "dequeueing is in FIFO order" $ withReadCommitted $ do 38 | enqueue E.int4 [1] 39 | enqueue E.int4 [2] 40 | dequeue D.int4 1 `shouldReturn` [1] 41 | dequeue D.int4 1 `shouldReturn` [2] 42 | 43 | it "dequeueing a batch of elements works" $ withReadCommitted $ do 44 | enqueue E.int4 [1, 2, 3] 45 | dequeue D.int4 2 `shouldReturn` [1, 2] 46 | 47 | dequeue D.int4 2 `shouldReturn` [3] 48 | -------------------------------------------------------------------------------- /test/Hasql/Queue/Low/AtLeastOnceSpec.hs: -------------------------------------------------------------------------------- 1 | {-# LANGUAGE OverloadedStrings #-} 2 | {-# LANGUAGE RecordWildCards #-} 3 | {-# LANGUAGE ScopedTypeVariables #-} 4 | module Hasql.Queue.Low.AtLeastOnceSpec where 5 | import Control.Concurrent 6 | import Control.Concurrent.STM 7 | import Control.Concurrent.Async 8 | import Control.Exception as E 9 | import Control.Monad 10 | import Data.Aeson 11 | import Data.Function 12 | import Data.IORef 13 | import Data.List 14 | import Hasql.Queue.Low.AtLeastOnce 15 | import Test.Hspec (Spec, describe, it) 16 | import Test.Hspec.Expectations.Lifted 17 | import Data.List.Split 18 | import Data.Text(Text) 19 | import Hasql.Connection 20 | import qualified Hasql.Encoders as E 21 | import qualified Hasql.Decoders as D 22 | import Data.Int 23 | import Data.Typeable 24 | import qualified Hasql.Queue.Internal as I 25 | import Hasql.Queue.Internal (Payload (..)) 26 | import Hasql.Queue.TestUtils 27 | import System.Timeout 28 | 29 | getCount :: Connection -> IO Int64 30 | getCount = I.runThrow I.getCount 31 | 32 | getPayload :: Connection -> D.Value a -> I.PayloadId -> IO (Maybe (I.Payload a)) 33 | getPayload conn decoder payloadId = I.runThrow (I.getPayload decoder payloadId) conn 34 | 35 | channel :: Text 36 | channel = "hey" 37 | 38 | data FailedwithDequeue = FailedwithDequeue 39 | deriving (Show, Eq, Typeable) 40 | 41 | instance Exception FailedwithDequeue 42 | 43 | spec :: Spec 44 | spec = describe "Hasql.Queue.Low.AtLeastOnce" $ aroundAll withSetup $ describe "enqueue/withDequeue" $ do 45 | it "enqueue nothing timesout" $ withConnection $ \conn -> do 46 | enqueue channel conn E.int4 [] 47 | timeout 100000 (withDequeue channel conn D.int4 1 1 pure) `shouldReturn` Nothing 48 | 49 | it "enqueue 1 gives 1" $ withConnection $ \conn -> do 50 | enqueue channel conn E.int4 [1] 51 | withDequeue channel conn D.int4 1 1 pure `shouldReturn` [1] 52 | 53 | it "dequeue timesout after enqueueing everything" $ withConnection $ \conn -> do 54 | timeout 100000 (withDequeue channel conn D.int4 1 1 pure) `shouldReturn` Nothing 55 | 56 | it "dequeueing is in FIFO order" $ withConnection $ \conn -> do 57 | enqueue channel conn E.int4 [1] 58 | enqueue channel conn E.int4 [2] 59 | withDequeue channel conn D.int4 1 1 pure `shouldReturn` [1] 60 | withDequeue channel conn D.int4 1 1 pure `shouldReturn` [2] 61 | 62 | it "dequeueing a batch of elements works" $ withConnection $ \conn -> do 63 | enqueue channel conn E.int4 [1, 2, 3] 64 | withDequeue channel conn D.int4 1 2 pure `shouldReturn` [1, 2] 65 | 66 | withDequeue channel conn D.int4 1 1 pure `shouldReturn` [3] 67 | 68 | it "withDequeue blocks until something is enqueued: before" $ withConnection $ \conn -> do 69 | void $ enqueue channel conn E.int4 [1] 70 | res <- withDequeue channel conn D.int4 1 1 pure 71 | res `shouldBe` [1] 72 | 73 | it "withDequeue blocks until something is enqueued: during" $ withConnection $ \conn -> do 74 | afterActionMVar <- newEmptyMVar 75 | beforeNotifyMVar <- newEmptyMVar 76 | 77 | let handlers = I.WithNotifyHandlers 78 | { withNotifyHandlersAfterAction = putMVar afterActionMVar () 79 | , withNotifyHandlersBeforeNotification = takeMVar beforeNotifyMVar 80 | } 81 | 82 | -- This is the definition of IO.dequeue 83 | resultThread <- async $ withDequeueWith @IOError handlers channel conn D.int4 1 1 pure 84 | takeMVar afterActionMVar 85 | 86 | void $ enqueue "hey" conn E.int4 [1] 87 | 88 | putMVar beforeNotifyMVar () 89 | 90 | wait resultThread `shouldReturn` [1] 91 | 92 | it "withDequeue blocks until something is enqueued: after" $ withConnection2 $ \(conn1, conn2) -> do 93 | thread <- async $ withDequeue channel conn1 D.int4 1 1 pure 94 | timeout 100000 (wait thread) `shouldReturn` Nothing 95 | 96 | enqueue channel conn2 E.int4 [1] 97 | 98 | wait thread `shouldReturn` [1] 99 | 100 | -- TODO redo just using failures 101 | it "withDequeue fails and sets the retries to +1" $ withConnection $ \conn -> do 102 | enqueue channel conn E.int4 [1] 103 | handle (\(_ :: IOError) -> pure ()) $ withDequeue channel conn D.int4 0 1 $ \_ -> throwIO $ userError "hey" 104 | xs <- failures conn D.int4 Nothing 1 105 | 106 | map snd xs `shouldBe` [1] 107 | 108 | it "withDequeue succeeds even if the first attempt fails" $ withConnection $ \conn -> do 109 | [payloadId] <- I.runThrow (I.enqueuePayload E.int4 [1]) conn 110 | 111 | ref <- newIORef (0 :: Int) 112 | 113 | withDequeueWith @FailedwithDequeue mempty channel conn D.int4 1 1 (\_ -> do 114 | count <- readIORef ref 115 | writeIORef ref $ count + 1 116 | when (count < 1) $ throwIO FailedwithDequeue 117 | pure '!') `shouldReturn` '!' 118 | 119 | getPayload conn D.int4 payloadId `shouldReturn` Nothing 120 | 121 | it "enqueues and dequeues concurrently withDequeue" $ \testDB -> do 122 | let withPool' = flip withConnection testDB 123 | elementCount = 1000 :: Int 124 | expected = [0 .. elementCount - 1] 125 | 126 | ref <- newTVarIO [] 127 | 128 | loopThreads <- replicateM 35 $ async $ withPool' $ \c -> fix $ \next -> do 129 | lastCount <- withDequeue channel c D.int4 1 1 $ \[x] -> do 130 | atomically $ do 131 | xs <- readTVar ref 132 | writeTVar ref $ x : xs 133 | return $ length xs + 1 134 | 135 | when (lastCount < elementCount) next 136 | 137 | forM_ (chunksOf (elementCount `div` 11) expected) $ \xs -> forkIO $ void $ withPool' $ \c -> 138 | forM_ xs $ \i -> enqueue channel c E.int4 [fromIntegral i] 139 | 140 | _ <- waitAnyCancel loopThreads 141 | xs <- atomically $ readTVar ref 142 | let Just decoded = mapM (decode . encode) xs 143 | sort decoded `shouldBe` sort expected 144 | 145 | it "enqueue returns a PayloadId that cooresponds to the entry it added" $ withConnection $ \conn -> do 146 | [payloadId] <- I.runThrow (I.enqueuePayload E.int4 [1]) conn 147 | Just actual <- getPayload conn D.int4 payloadId 148 | 149 | pValue actual `shouldBe` 1 150 | -------------------------------------------------------------------------------- /test/Hasql/Queue/Low/AtMostOnceSpec.hs: -------------------------------------------------------------------------------- 1 | module Hasql.Queue.Low.AtMostOnceSpec where 2 | import Hasql.Queue.Low.AtMostOnce 3 | import qualified Hasql.Encoders as E 4 | import qualified Hasql.Decoders as D 5 | import Test.Hspec (Spec, describe, parallel, it) 6 | import Test.Hspec.Expectations.Lifted 7 | import Test.Hspec.Core.Spec (sequential) 8 | import Hasql.Queue.TestUtils 9 | import Data.Text(Text) 10 | import System.Timeout 11 | import Control.Concurrent.Async 12 | 13 | channel :: Text 14 | channel = "channel" 15 | 16 | spec :: Spec 17 | spec = describe "Hasql.Queue.Low.AtMostOnce" $ parallel $ do 18 | sequential $ aroundAll withSetup $ describe "enqueue/dequeue" $ do 19 | it "enqueue nothing timesout" $ withConnection $ \conn -> do 20 | enqueue channel conn E.int4 [] 21 | timeout 100000 (dequeue channel conn D.int4 1) `shouldReturn` Nothing 22 | 23 | it "enqueue 1 gives 1" $ withConnection $ \conn -> do 24 | enqueue channel conn E.int4 [1] 25 | dequeue channel conn D.int4 1 `shouldReturn` [1] 26 | 27 | it "dequeue timesout after enqueueing everything" $ withConnection $ \conn -> do 28 | timeout 100000 (dequeue channel conn D.int4 1) `shouldReturn` Nothing 29 | 30 | it "dequeueing is in FIFO order" $ withConnection $ \conn -> do 31 | enqueue channel conn E.int4 [1] 32 | enqueue channel conn E.int4 [2] 33 | dequeue channel conn D.int4 1 `shouldReturn` [1] 34 | dequeue channel conn D.int4 1 `shouldReturn` [2] 35 | 36 | it "dequeueing a batch of elements works" $ withConnection $ \conn -> do 37 | enqueue channel conn E.int4 [1, 2, 3] 38 | dequeue channel conn D.int4 2 `shouldReturn` [1, 2] 39 | 40 | dequeue channel conn D.int4 2 `shouldReturn` [3] 41 | 42 | it "dequeueing blocks until something is enqueued" $ withConnection2 $ \(conn1, conn2) -> do 43 | thread <- async $ dequeue channel conn1 D.int4 1 44 | timeout 100000 (wait thread) `shouldReturn` Nothing 45 | 46 | enqueue channel conn2 E.int4 [1] 47 | 48 | wait thread `shouldReturn` [1] 49 | -------------------------------------------------------------------------------- /test/Hasql/Queue/Low/ExactlyOnceSpec.hs: -------------------------------------------------------------------------------- 1 | module Hasql.Queue.Low.ExactlyOnceSpec where 2 | import Control.Exception as E 3 | import Hasql.Queue.Low.ExactlyOnce 4 | import qualified Hasql.Queue.Internal as I 5 | import Test.Hspec (Spec, describe, it) 6 | import Test.Hspec.Expectations.Lifted 7 | import qualified Hasql.Encoders as E 8 | import qualified Hasql.Decoders as D 9 | import Data.Typeable 10 | import Data.Int 11 | import Hasql.Queue.TestUtils 12 | import System.Timeout 13 | import Control.Concurrent.Async 14 | import Hasql.Queue.Internal (runThrow) 15 | import Control.Concurrent 16 | import Control.Monad 17 | import Data.Text (Text) 18 | 19 | -- Fix this to be more of what I would expec 20 | 21 | newtype TooManyRetries = TooManyRetries Int64 22 | deriving (Show, Eq, Typeable) 23 | 24 | instance Exception TooManyRetries 25 | 26 | channel :: Text 27 | channel = "channel" 28 | 29 | spec :: Spec 30 | spec = describe "Hasql.Queue.High.ExactlyOnce" $ do 31 | aroundAll withSetup $ describe "enqueue/withDequeue" $ do 32 | it "enqueue nothing timesout" $ withConnection $ \conn -> do 33 | runThrow (enqueue channel E.int4 []) conn 34 | timeout 100000 (withDequeue channel conn D.int4 1 id) `shouldReturn` Nothing 35 | 36 | it "enqueue 1 gives 1" $ withConnection $ \conn -> do 37 | runThrow (enqueue channel E.int4 [1]) conn 38 | withDequeue channel conn D.int4 1 id `shouldReturn` [1] 39 | 40 | it "dequeue timesout after enqueueing everything" $ withConnection $ \conn -> do 41 | timeout 100000 (withDequeue channel conn D.int4 1 id) `shouldReturn` Nothing 42 | 43 | it "dequeueing is in FIFO order" $ withConnection $ \conn -> do 44 | runThrow (enqueue channel E.int4 [1]) conn 45 | runThrow (enqueue channel E.int4 [2]) conn 46 | withDequeue channel conn D.int4 1 id `shouldReturn` [1] 47 | withDequeue channel conn D.int4 1 id `shouldReturn` [2] 48 | 49 | it "dequeueing a batch of elements works" $ withConnection $ \conn -> do 50 | runThrow (enqueue channel E.int4 [1, 2, 3]) conn 51 | withDequeue channel conn D.int4 1 id `shouldReturn` [1, 2] 52 | 53 | withDequeue channel conn D.int4 1 id `shouldReturn` [3] 54 | 55 | it "withDequeue blocks until something is enqueued: before" $ withConnection $ \conn -> do 56 | void $ runThrow (enqueue channel E.int4 [1]) conn 57 | res <- withDequeue channel conn D.int4 1 id 58 | res `shouldBe` [1] 59 | 60 | it "withDequeue blocks until something is enqueued: during" $ withConnection $ \conn -> do 61 | afterActionMVar <- newEmptyMVar 62 | beforeNotifyMVar <- newEmptyMVar 63 | 64 | let handlers = I.WithNotifyHandlers 65 | { withNotifyHandlersAfterAction = putMVar afterActionMVar () 66 | , withNotifyHandlersBeforeNotification = takeMVar beforeNotifyMVar 67 | } 68 | 69 | -- This is the definition of IO.dequeue 70 | resultThread <- async $ withDequeueWith handlers channel conn D.int4 1 id 71 | takeMVar afterActionMVar 72 | 73 | void $ runThrow (enqueue "hey" E.int4 [1]) conn 74 | 75 | putMVar beforeNotifyMVar () 76 | 77 | wait resultThread `shouldReturn` [1] 78 | 79 | it "withDequeue blocks until something is enqueued: after" $ withConnection2 $ \(conn1, conn2) -> do 80 | thread <- async $ withDequeue channel conn1 D.int4 1 id 81 | timeout 100000 (wait thread) `shouldReturn` Nothing 82 | 83 | runThrow (enqueue channel E.int4 [1]) conn2 84 | 85 | wait thread `shouldReturn` [1] 86 | -------------------------------------------------------------------------------- /test/Hasql/Queue/MigrateSpec.hs: -------------------------------------------------------------------------------- 1 | module Hasql.Queue.MigrateSpec where 2 | import Hasql.Queue.Migrate 3 | import Hasql.Queue.TestUtils 4 | import Test.Hspec (Spec, describe, parallel, it, shouldReturn) 5 | import Test.Hspec.Core.Spec (sequential) 6 | import Control.Monad.IO.Class 7 | import qualified Hasql.Decoders as D 8 | import Hasql.Session 9 | import Hasql.Statement 10 | import Data.String.Here.Uninterpolated 11 | import qualified Hasql.Queue.Internal as I 12 | 13 | 14 | spec :: Spec 15 | spec = describe "Hasql.Queue.High.ExactlyOnce" $ parallel $ do 16 | sequential $ aroundAll withSetup $ describe "basic" $ do 17 | it "is okay to migrate multiple times" $ withConnection $ \conn -> 18 | liftIO $ migrate conn "int4" 19 | 20 | it "drops all items" $ withConnection $ \conn -> do 21 | teardown conn 22 | 23 | let theQuery = [here| 24 | SELECT EXISTS ( 25 | SELECT FROM information_schema.tables 26 | WHERE table_schema = 'public' 27 | AND table_name = 'payloads' 28 | ); 29 | |] 30 | 31 | decoder = D.singleRow $ D.column $ D.nonNullable D.bool 32 | I.runThrow (statement () $ Statement theQuery mempty decoder True) conn `shouldReturn` False 33 | 34 | let theQuery' = [here| 35 | SELECT EXISTS ( 36 | SELECT FROM pg_catalog.pg_class c 37 | JOIN pg_catalog.pg_namespace n ON n.oid = c.relnamespace 38 | WHERE n.nspname = 'public' 39 | AND c.relname = 'modified_index' 40 | ) 41 | |] 42 | 43 | decoder' = D.singleRow $ D.column $ D.nonNullable D.bool 44 | I.runThrow (statement () $ Statement theQuery' mempty decoder' True) conn `shouldReturn` False 45 | 46 | let theQuery'' = [here| 47 | SELECT EXISTS ( 48 | SELECT 1 49 | FROM pg_type t 50 | JOIN pg_catalog.pg_namespace n ON n.oid = t.typnamespace 51 | WHERE t.typname = 'state_t' 52 | AND n.nspname = 'public' 53 | ) 54 | |] 55 | 56 | decoder'' = D.singleRow $ D.column $ D.nonNullable D.bool 57 | I.runThrow (statement () $ Statement theQuery'' mempty decoder'' True) conn `shouldReturn` False 58 | -------------------------------------------------------------------------------- /test/Hasql/Queue/TestUtils.hs: -------------------------------------------------------------------------------- 1 | module Hasql.Queue.TestUtils where 2 | import qualified Data.ByteString.Char8 as BSC 3 | import Control.Concurrent.Async 4 | import Database.Postgres.Temp as Temp 5 | import Test.Hspec 6 | import Control.Exception as E 7 | import Data.Pool 8 | import Hasql.Connection 9 | import Hasql.Session 10 | import qualified Data.ByteString.Base64.URL as Base64 11 | import Control.Concurrent 12 | import Data.IORef 13 | import Data.Foldable 14 | import Control.Monad ((<=<)) 15 | import Crypto.Hash.SHA1 (hash) 16 | import Hasql.Queue.Migrate 17 | 18 | aroundAll :: forall a. ((a -> IO ()) -> IO ()) -> SpecWith a -> Spec 19 | aroundAll withFunc specWith = do 20 | (var, stopper, asyncer) <- runIO $ 21 | (,,) <$> newEmptyMVar <*> newEmptyMVar <*> newIORef Nothing 22 | let theStart :: IO a 23 | theStart = do 24 | 25 | thread <- async $ do 26 | withFunc $ \x -> do 27 | putMVar var x 28 | takeMVar stopper 29 | pure $ error "Don't evaluate this" 30 | 31 | writeIORef asyncer $ Just thread 32 | 33 | either pure pure =<< (wait thread `race` takeMVar var) 34 | 35 | theStop :: a -> IO () 36 | theStop _ = do 37 | putMVar stopper () 38 | traverse_ cancel =<< readIORef asyncer 39 | 40 | beforeAll theStart $ afterAll theStop $ specWith 41 | 42 | withConn :: Temp.DB -> (Connection -> IO a) -> IO a 43 | withConn db f = do 44 | let connStr = toConnectionString db 45 | E.bracket (either (throwIO . userError . show) pure =<< acquire connStr) release f 46 | 47 | withSetup :: (Pool Connection -> IO ()) -> IO () 48 | withSetup f = either throwIO pure <=< withDbCache $ \dbCache -> do 49 | migratedConfig <- either throwIO pure =<< 50 | cacheAction 51 | (("~/.tmp-postgres/" <>) . BSC.unpack . Base64.encode . hash 52 | $ BSC.pack $ migrationQueryString "int4") 53 | (flip withConn $ flip migrate "int4") 54 | (verboseConfig <> cacheConfig dbCache) 55 | withConfig migratedConfig $ \db -> do 56 | f =<< createPool 57 | (either (throwIO . userError . show) pure =<< acquire (toConnectionString db)) 58 | release 59 | 2 60 | 60 61 | 50 62 | 63 | withConnection :: (Connection -> IO ()) -> Pool Connection -> IO () 64 | withConnection = flip withResource 65 | 66 | withConnection2 :: ((Connection, Connection) -> IO ()) -> Pool Connection -> IO () 67 | withConnection2 f pool = withResource pool $ \conn1 -> 68 | withResource pool $ \conn2 -> f (conn1, conn2) 69 | 70 | runImplicitTransaction :: Pool Connection -> Session a -> IO a 71 | runImplicitTransaction pool action = do 72 | let wrappedAction = do 73 | r <- action 74 | pure r 75 | withResource pool $ \conn -> 76 | either (throwIO . userError . show) pure =<< run wrappedAction conn 77 | 78 | runReadCommitted :: Pool Connection -> Session a -> IO a 79 | runReadCommitted = flip withReadCommitted 80 | 81 | withReadCommitted :: Session a -> Pool Connection -> IO a 82 | withReadCommitted action pool = do 83 | let wrappedAction = do 84 | sql "BEGIN" 85 | r <- action 86 | sql "ROLLBACK" 87 | pure r 88 | withResource pool $ \conn -> 89 | either (throwIO . userError . show) pure =<< run wrappedAction conn 90 | -------------------------------------------------------------------------------- /test/Main.hs: -------------------------------------------------------------------------------- 1 | import Test.Hspec 2 | import Hasql.Queue.High.ExactlyOnceSpec as HE 3 | import Hasql.Queue.High.AtLeastOnceSpec as HL 4 | import Hasql.Queue.High.AtMostOnceSpec as HM 5 | import Hasql.Queue.Low.AtLeastOnceSpec as LL 6 | import Hasql.Queue.Low.AtMostOnceSpec as LM 7 | import Hasql.Queue.Low.AtMostOnceSpec as LE 8 | import Hasql.Queue.MigrateSpec as M 9 | 10 | 11 | main :: IO () 12 | main = hspec $ do 13 | HE.spec 14 | HM.spec 15 | HL.spec 16 | LL.spec 17 | LM.spec 18 | LE.spec 19 | M.spec 20 | --------------------------------------------------------------------------------