├── .github ├── PULL_REQUEST_TEMPLATE.md └── workflows │ └── ci.yml ├── .gitignore ├── LICENSE ├── README.md ├── Setup.hs ├── example └── Main.hs ├── haskey.cabal ├── src-unix └── FileIO.hs ├── src └── Database │ └── Haskey │ ├── Alloc │ ├── Concurrent.hs │ ├── Concurrent │ │ └── Internal │ │ │ ├── Database.hs │ │ │ ├── Environment.hs │ │ │ ├── FreePages │ │ │ ├── Query.hs │ │ │ ├── Save.hs │ │ │ └── Tree.hs │ │ │ ├── Meta.hs │ │ │ ├── Monad.hs │ │ │ └── Overflow.hs │ └── Transaction.hs │ ├── Store.hs │ ├── Store │ ├── Class.hs │ ├── File.hs │ ├── InMemory.hs │ └── Page.hs │ └── Utils │ ├── IO.hs │ ├── Monad.hs │ ├── Monad │ └── Catch.hs │ ├── RLock.hs │ └── STM │ └── Map.hs ├── stack.yaml └── tests ├── Integration.hs ├── Integration ├── CreateAndOpen.hs └── WriteOpenRead │ ├── Concurrent.hs │ └── Transactions.hs ├── Properties.hs └── Properties ├── Store └── Page.hs └── Utils.hs /.github/PULL_REQUEST_TEMPLATE.md: -------------------------------------------------------------------------------- 1 | - [ ] includes tests 2 | - [ ] ready for review 3 | - [ ] reviewed by @username\_here 4 | 5 | 6 | #### What does this PR do? 7 | #### Where should the reviewer start? 8 | #### How should this be manually tested? 9 | #### Any background context you want to provide? 10 | #### What are the relevant tickets? 11 | -------------------------------------------------------------------------------- /.github/workflows/ci.yml: -------------------------------------------------------------------------------- 1 | name: CI 2 | 3 | # Trigger the workflow on push or pull request, but only for the main branch 4 | on: 5 | - push 6 | - pull_request 7 | 8 | jobs: 9 | cabal: 10 | name: cabal ${{ matrix.cabal }} / ${{ matrix.os }} / ghc ${{ matrix.ghc }} 11 | 12 | strategy: 13 | fail-fast: false 14 | matrix: 15 | os: 16 | - ubuntu-latest 17 | - macOS-latest 18 | cabal: 19 | - "3.2" 20 | ghc: 21 | - "8.6" 22 | - "8.8" 23 | - "8.10" 24 | exclude: 25 | - os: macOS-latest 26 | ghc: 8.8.4 27 | - os: macOS-latest 28 | ghc: 8.6.5 29 | 30 | runs-on: ${{ matrix.os }} 31 | 32 | steps: 33 | - uses: actions/checkout@v2 34 | 35 | - uses: haskell/actions/setup@v1.1.7 36 | id: setup-haskell-cabal 37 | name: Setup Haskell 38 | with: 39 | ghc-version: ${{ matrix.ghc }} 40 | cabal-version: ${{ matrix.cabal }} 41 | 42 | - name: Configure 43 | run: | 44 | cabal configure --enable-tests --enable-benchmarks --test-show-details=direct 45 | 46 | - uses: actions/cache@v2 47 | name: Cache ~/.cabal/store and build files 48 | with: 49 | path: | 50 | ${{ steps.setup-haskell-cabal.outputs.cabal-store }} 51 | dist-newstyle 52 | key: ${{ runner.os }}-${{ matrix.ghc }}-cabal-${{ github.sha }} 53 | restore-keys: | 54 | ${{ runner.os }}-${{ matrix.ghc }}-cabal 55 | 56 | - name: Install dependencies 57 | run: | 58 | cabal build all --only-dependencies 59 | 60 | - name: Build 61 | run: | 62 | cabal build all 63 | 64 | - name: Test 65 | run: | 66 | cabal test all 67 | 68 | stack: 69 | name: stack / ${{ matrix.os }} 70 | 71 | strategy: 72 | fail-fast: false 73 | matrix: 74 | os: 75 | - ubuntu-latest 76 | - macOS-latest 77 | 78 | runs-on: 79 | - ${{ matrix.os }} 80 | 81 | steps: 82 | - uses: actions/checkout@v2 83 | 84 | - uses: haskell/actions/setup@v1.1.7 85 | with: 86 | enable-stack: true 87 | stack-version: latest 88 | 89 | - uses: actions/cache@v2 90 | name: Cache ~/.stack 91 | with: 92 | path: | 93 | ~/.stack 94 | key: ${{ runner.os }}-${{ matrix.ghc }}-stack-${{ github.sha }} 95 | restore-keys: | 96 | ${{ runner.os }}-${{ matrix.ghc }}-stack- 97 | 98 | - name: Install dependencies 99 | run: | 100 | stack build --test --bench --no-run-tests --no-run-benchmarks --only-dependencies 101 | 102 | - name: Build 103 | run: | 104 | stack build --test --bench --no-run-tests --no-run-benchmarks 105 | 106 | - name: Test 107 | run: | 108 | stack test 109 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | *.chi 2 | *.chs.h 3 | *.dyn_hi 4 | *.dyn_o 5 | *.hi 6 | *.hp 7 | *.o 8 | *.orig 9 | *.prof 10 | *.prof.html 11 | *~ 12 | .DS_Store 13 | .cabal-sandbox/ 14 | .hpc 15 | .hsenv 16 | .virtualenv 17 | \#*# 18 | cabal-dev 19 | cabal.config 20 | cabal.sandbox.config 21 | dist 22 | dist-newstyle 23 | .stack-work 24 | .*.sw[a-z] 25 | *.tix 26 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Copyright (c) 2017, Henri Verroken, Steven Keuchel 2 | 3 | All rights reserved. 4 | 5 | Redistribution and use in source and binary forms, with or without 6 | modification, are permitted provided that the following conditions are 7 | met: 8 | 9 | * Redistributions of source code must retain the above copyright 10 | notice, this list of conditions and the following disclaimer. 11 | 12 | * Redistributions in binary form must reproduce the above 13 | copyright notice, this list of conditions and the following 14 | disclaimer in the documentation and/or other materials provided 15 | with the distribution. 16 | 17 | * Neither the name of Henri Verroken or Steven Keuchel nor the 18 | names of other contributors may be used to endorse or promote 19 | products derived from this software without specific prior 20 | written permission. 21 | 22 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 23 | "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 24 | LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 25 | A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 26 | OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 27 | SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 28 | LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 29 | DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 30 | THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 31 | (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 32 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 33 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | haskey 2 | ====== 3 | 4 | [![CI](https://github.com/haskell-haskey/haskey/actions/workflows/ci.yml/badge.svg)](https://github.com/haskell-haskey/haskey/actions/workflows/ci.yml) 5 | [![Hackage](https://img.shields.io/hackage/v/haskey.svg?maxAge=2592000)](https://hackage.haskell.org/package/haskey) 6 | [![Stackage Nightly](http://stackage.org/package/haskey/badge/nightly)](http://stackage.org/nightly/package/haskey) 7 | [![Stackage LTS](http://stackage.org/package/haskey/badge/lts)](http://stackage.org/lts/package/haskey) 8 | 9 | Haskey is a transactional, ACID compliant, embeddable, scalable key-value 10 | store written entirely in Haskell. It was developed as part of the [Summer of Haskell 2017][soh2017] project. 11 | 12 | [soh2017]: https://summer.haskell.org/news/2017-05-24-accepted-projects.html 13 | 14 | ## Tutorial 15 | 16 | A full tutorial can be [found in the haskey-mtl library](https://github.com/haskell-haskey/haskey-mtl/blob/master/docs/tutorial.md), along with a [full code example](https://github.com/haskell-haskey/haskey-mtl/tree/master/example). 17 | 18 | ## Historical blog posts 19 | Some blog posts have been written on Haskey's design an internals. These give an insight in the inner workings of Haskey, but the used APIs might be a bit outdated. 20 | 21 | - An introductory blog post on Haskey can be found [here][introduction]. 22 | - An blog post on user-defined schemas and using the `HaskeyT` monad can be found [here][haskey-mtl-post] 23 | 24 | [introduction]: https://hverr.github.io/posts/2017-08-24-introducing-haskey.html 25 | [haskey-mtl-post]: https://hverr.github.io/posts/2017-09-14-haskey-user-defined-schemas-and-monad-transformers.html 26 | -------------------------------------------------------------------------------- /Setup.hs: -------------------------------------------------------------------------------- 1 | import Distribution.Simple 2 | main = defaultMain 3 | -------------------------------------------------------------------------------- /example/Main.hs: -------------------------------------------------------------------------------- 1 | module Main where 2 | 3 | import Control.Concurrent (threadDelay) 4 | import Control.Concurrent.Async (async, wait) 5 | import Control.Monad (void, replicateM) 6 | import Control.Monad.Catch (bracket_, finally) 7 | 8 | import Data.BTree.Impure (Tree) 9 | import Data.ByteString (ByteString) 10 | import Data.Int (Int32) 11 | import Data.Text.Encoding (encodeUtf8) 12 | import qualified Data.BTree.Impure as B 13 | import qualified Data.Text as Text 14 | 15 | import Database.Haskey.Alloc.Concurrent (ConcurrentDb, 16 | ConcurrentHandles, 17 | concurrentHandles, 18 | lockConcurrentDb, 19 | unlockConcurrentDb, 20 | openConcurrentDb, 21 | createConcurrentDb, 22 | transact_, 23 | transactReadOnly, 24 | commit_) 25 | import Database.Haskey.Store.File (FileStoreT, runFileStoreT, defFileStoreConfig) 26 | import Database.Haskey.Store.InMemory (MemoryStoreT, MemoryFiles, newEmptyMemoryStore, 27 | runMemoryStoreT, defMemoryStoreConfig) 28 | 29 | import System.Directory (removeDirectoryRecursive) 30 | import System.Random (randomIO) 31 | 32 | concurrency :: Integral a => a 33 | concurrency = 100 34 | 35 | type Root = Tree Int32 ByteString 36 | 37 | main :: IO () 38 | main = do 39 | inMemoryMain root 40 | fileMain root `finally` delRoot 41 | where 42 | root = "example-database.haskey" 43 | delRoot = removeDirectoryRecursive root 44 | 45 | inMemoryMain :: FilePath -> IO () 46 | inMemoryMain root = do 47 | store <- newEmptyMemoryStore 48 | db <- openOrCreate store 49 | 50 | writers <- mapM (async . writer store db) [1..concurrency] 51 | readers <- replicateM concurrency . async $ do 52 | delay <- randomIO 53 | reader store db (delay `rem` 5000) 54 | mapM_ wait writers 55 | mapM_ wait readers 56 | putStrLn "InMemory: done" 57 | where 58 | writer :: MemoryFiles FilePath 59 | -> ConcurrentDb Root 60 | -> Int32 61 | -> IO () 62 | writer store db i = 63 | runDatabase store $ transact_ tx db 64 | where 65 | bs = encodeUtf8 $ Text.pack (show i) 66 | 67 | tx tree = B.insert i bs tree >>= commit_ 68 | 69 | reader :: MemoryFiles FilePath 70 | -> ConcurrentDb Root 71 | -> Int 72 | -> IO () 73 | reader files db delay = void $ replicateM 10 $ do 74 | threadDelay delay 75 | runDatabase files $ transactReadOnly B.toList db 76 | 77 | openOrCreate :: MemoryFiles FilePath 78 | -> IO (ConcurrentDb Root) 79 | openOrCreate store = runDatabase store $ do 80 | maybeDb <- openConcurrentDb handles 81 | case maybeDb of 82 | Nothing -> createConcurrentDb handles B.empty 83 | Just db -> return db 84 | 85 | runDatabase :: MemoryFiles FilePath 86 | -> MemoryStoreT FilePath m a 87 | -> m a 88 | runDatabase files action = runMemoryStoreT action defMemoryStoreConfig files 89 | 90 | handles :: ConcurrentHandles 91 | handles = concurrentHandles root 92 | 93 | fileMain :: FilePath -> IO () 94 | fileMain root = bracket_ (runDatabase $ lockConcurrentDb handles) 95 | (runDatabase $ unlockConcurrentDb handles) $ do 96 | 97 | db <- openOrCreate 98 | writers <- mapM (async . writer db) [1..concurrency] 99 | readers <- replicateM concurrency . async $ do 100 | delay <- randomIO 101 | reader db (delay `rem` 5000) 102 | mapM_ wait writers 103 | mapM_ wait readers 104 | putStrLn "File: done" 105 | where 106 | writer :: ConcurrentDb Root 107 | -> Int32 108 | -> IO () 109 | writer db i = 110 | runDatabase $ transact_ tx db 111 | where 112 | bs = encodeUtf8 $ Text.pack (show i) 113 | 114 | tx tree = B.insert i bs tree >>= commit_ 115 | 116 | reader :: ConcurrentDb Root 117 | -> Int 118 | -> IO () 119 | reader db delay = void $ replicateM 10 $ do 120 | threadDelay delay 121 | runDatabase $ transactReadOnly B.toList db 122 | 123 | openOrCreate :: IO (ConcurrentDb Root) 124 | openOrCreate = runDatabase $ do 125 | maybeDb <- openConcurrentDb handles 126 | case maybeDb of 127 | Nothing -> createConcurrentDb handles B.empty 128 | Just db -> return db 129 | 130 | runDatabase :: Monad m 131 | => FileStoreT FilePath m a 132 | -> m a 133 | runDatabase action = runFileStoreT action defFileStoreConfig 134 | 135 | handles :: ConcurrentHandles 136 | handles = concurrentHandles root 137 | -------------------------------------------------------------------------------- /haskey.cabal: -------------------------------------------------------------------------------- 1 | name: haskey 2 | version: 0.3.1.0 3 | synopsis: A transactional, ACID compliant, embeddable key-value store. 4 | description: 5 | Haskey is a transactional, ACID compliant, embeddable, scalable key-value 6 | store written entirely in Haskell. 7 | . 8 | For more information on how to use this package, visit 9 | 10 | homepage: https://github.com/haskell-haskey 11 | license: BSD3 12 | license-file: LICENSE 13 | author: Henri Verroken, Steven Keuchel 14 | maintainer: steven.keuchel@gmail.com 15 | copyright: Copyright (c) 2017, Henri Verroken, Steven Keuchel 16 | category: Database 17 | build-type: Simple 18 | cabal-version: >=1.10 19 | 20 | extra-source-files: README.md 21 | 22 | library 23 | exposed-modules: 24 | Database.Haskey.Alloc.Concurrent 25 | Database.Haskey.Alloc.Concurrent.Internal.Database 26 | Database.Haskey.Alloc.Concurrent.Internal.Environment 27 | Database.Haskey.Alloc.Concurrent.Internal.FreePages.Query 28 | Database.Haskey.Alloc.Concurrent.Internal.FreePages.Save 29 | Database.Haskey.Alloc.Concurrent.Internal.FreePages.Tree 30 | Database.Haskey.Alloc.Concurrent.Internal.Meta 31 | Database.Haskey.Alloc.Concurrent.Internal.Monad 32 | Database.Haskey.Alloc.Concurrent.Internal.Overflow 33 | Database.Haskey.Alloc.Transaction 34 | Database.Haskey.Store 35 | Database.Haskey.Store.Class 36 | Database.Haskey.Store.File 37 | Database.Haskey.Store.InMemory 38 | Database.Haskey.Store.Page 39 | 40 | other-modules: 41 | Database.Haskey.Utils.IO 42 | Database.Haskey.Utils.Monad 43 | Database.Haskey.Utils.Monad.Catch 44 | Database.Haskey.Utils.RLock 45 | Database.Haskey.Utils.STM.Map 46 | FileIO 47 | 48 | other-extensions: 49 | DataKinds 50 | DeriveFoldable 51 | DeriveFunctor 52 | DeriveTraversable 53 | GADTs 54 | KindSignatures 55 | MultiWayIf 56 | ScopedTypeVariables 57 | StandaloneDeriving 58 | 59 | build-depends: 60 | base >=4.7 && <5, 61 | binary >=0.6 && <0.9 || >0.9 && <1, 62 | bytestring >=0.10 && <1, 63 | containers >=0.5 && <1, 64 | directory >=1.2 && <2, 65 | exceptions >=0.8.3 && <0.11, 66 | filepath >=1.4 && <2, 67 | focus >=0.1.2 && <0.2 || >= 1.0 && < 1.1, 68 | haskey-btree >=0.3.0.0 && <0.4, 69 | list-t >=0.2 && <2, 70 | lz4 >=0.2 && <1, 71 | mtl >=2.1 && <3, 72 | semigroups >=0.12 && <1, 73 | stm >=2.1 && <3, 74 | stm-containers >=0.2 && <1 || >= 1.1 && < 1.2, 75 | transformers >=0.3 && <1, 76 | unix >=2.7.1.0 && <3, 77 | xxhash-ffi >=0.1.0.1 && <1, 78 | hashable 79 | 80 | default-language: Haskell2010 81 | ghc-options: -Wall 82 | hs-source-dirs: src 83 | hs-source-dirs: src-unix 84 | 85 | test-suite haskey-properties 86 | main-is: Properties.hs 87 | type: exitcode-stdio-1.0 88 | other-modules: 89 | Properties.Store.Page 90 | Properties.Utils 91 | 92 | build-depends: 93 | base >=4.7 && <5, 94 | binary >=0.6 && <0.9 || >0.9 && <1, 95 | bytestring >=0.10 && <1, 96 | containers >=0.5 && <1, 97 | vector >=0.10 && <1, 98 | 99 | HUnit >=1.3 && <2, 100 | QuickCheck >=2 && <3, 101 | test-framework >=0.8 && <1, 102 | test-framework-hunit >=0.3 && <1, 103 | test-framework-quickcheck2 >=0.3 && <1, 104 | haskey, 105 | haskey-btree >=0.2 && <1 106 | 107 | default-language: Haskell2010 108 | ghc-options: -Wall 109 | hs-source-dirs: tests 110 | 111 | test-suite haskey-integration 112 | main-is: Integration.hs 113 | type: exitcode-stdio-1.0 114 | other-modules: 115 | Integration.CreateAndOpen 116 | Integration.WriteOpenRead.Concurrent 117 | Integration.WriteOpenRead.Transactions 118 | 119 | build-depends: 120 | base >=4.7 && <5, 121 | binary >=0.6 && <0.9 || >0.9 && <1, 122 | bytestring >=0.10 && <1, 123 | containers >=0.5 && <1, 124 | directory >=1.2 && <2, 125 | exceptions >=0.8.3 && <0.11, 126 | mtl >=2.1 && <3, 127 | transformers >=0.3 && <1, 128 | temporary >=1.2 && <1.4, 129 | vector >=0.10 && <1, 130 | 131 | HUnit >=1.3 && <2, 132 | QuickCheck >=2 && <3, 133 | test-framework >=0.8 && <1, 134 | test-framework-hunit >=0.3 && <1, 135 | test-framework-quickcheck2 >=0.3 && <1, 136 | haskey, 137 | haskey-btree >=0.2 && <1 138 | 139 | default-language: Haskell2010 140 | ghc-options: -Wall 141 | hs-source-dirs: tests 142 | 143 | test-suite haskey-example 144 | type: exitcode-stdio-1.0 145 | hs-source-dirs: example 146 | main-is: Main.hs 147 | build-depends: 148 | base >= 4.7 && <5, 149 | haskey, 150 | haskey-btree, 151 | async >=2.1 && <3, 152 | bytestring >=0.6 && <0.9 || >0.9 && <1, 153 | directory >=1.2 && <2, 154 | exceptions >=0.8.3 && <0.11, 155 | random >=1.1 && <2, 156 | text >=1.2 && <2 157 | 158 | ghc-options: -threaded -rtsopts -with-rtsopts=-N -Wall 159 | default-language: Haskell2010 160 | 161 | source-repository head 162 | type: git 163 | location: https://github.com/haskell-haskey/haskey 164 | -------------------------------------------------------------------------------- /src-unix/FileIO.hs: -------------------------------------------------------------------------------- 1 | -- | Module exporting some low level file primitives for Unix. 2 | -- 3 | -- This source file was taken from acid-state-0.14.3, and slightly modified. 4 | module FileIO ( 5 | FHandle 6 | , openReadWrite 7 | , write 8 | , read 9 | , flush 10 | , close 11 | , seek 12 | , setFileSize 13 | , getFileSize 14 | , PrefixLock 15 | , prefixLockFromPrefix 16 | , obtainPrefixLock 17 | , releasePrefixLock 18 | ) where 19 | 20 | import Prelude hiding (read) 21 | import qualified Prelude as P 22 | 23 | import Control.Applicative ((<$>)) 24 | import Control.Exception (SomeException(..), throw, try) 25 | import Control.Monad (void) 26 | 27 | import Data.Maybe (listToMaybe) 28 | import Data.Word (Word8, Word64) 29 | 30 | import Foreign (Ptr) 31 | 32 | import System.Directory (createDirectoryIfMissing, removeFile) 33 | import System.FilePath 34 | import System.IO 35 | import System.Posix (Fd, 36 | openFd, 37 | fdReadBuf, 38 | fdWriteBuf, 39 | fdToHandle, 40 | fdSeek, 41 | setFdSize, 42 | fileSynchronise, 43 | closeFd, 44 | OpenMode(ReadWrite), 45 | exclusive, trunc, 46 | defaultFileFlags, 47 | stdFileMode) 48 | import System.Posix.Process (getProcessID) 49 | import System.Posix.Signals (nullSignal, signalProcess) 50 | import System.Posix.Types (ProcessID) 51 | import qualified System.IO.Error as SE 52 | 53 | 54 | newtype PrefixLock = PrefixLock FilePath 55 | 56 | prefixLockFromPrefix :: FilePath -> PrefixLock 57 | prefixLockFromPrefix = PrefixLock . (++ ".lock") 58 | 59 | newtype FHandle = FHandle Fd 60 | 61 | -- | Open the specified file in read-write mode. 62 | openReadWrite :: FilePath -> IO FHandle 63 | openReadWrite filename = 64 | FHandle <$> openFd filename ReadWrite (Just stdFileMode) defaultFileFlags 65 | 66 | -- | Write **at most** the specified amount of bytes to a handle. 67 | write :: FHandle -> Ptr Word8 -> Word64 -> IO Word64 68 | write (FHandle fd) data' length' = 69 | fmap fromIntegral $ fdWriteBuf fd data' $ fromIntegral length' 70 | 71 | -- | Read **at most** the specified amount of bytes from a handle. 72 | -- 73 | -- Return the amount of bytes actually read, or throw an IO error (including 74 | -- EOF). 75 | read :: FHandle -> Ptr Word8 -> Word64 -> IO Word64 76 | read (FHandle fd) buf len = 77 | fromIntegral <$> fdReadBuf fd buf (fromIntegral len) 78 | 79 | -- | Synchronize the file contents to disk. 80 | flush :: FHandle -> IO () 81 | flush (FHandle fd) = fileSynchronise fd 82 | 83 | -- | Seek to an absolute position. 84 | seek :: FHandle -> Word64 -> IO () 85 | seek (FHandle fd) offset = void $ fdSeek fd AbsoluteSeek (fromIntegral offset) 86 | 87 | -- | Set the filesize to a certain length. 88 | setFileSize :: FHandle -> Word64 -> IO () 89 | setFileSize (FHandle fd) size = setFdSize fd (fromIntegral size) 90 | 91 | -- | Get the filesize. This **edits the file pointer**. 92 | getFileSize :: FHandle -> IO Word64 93 | getFileSize (FHandle fd) = fromIntegral <$> fdSeek fd SeekFromEnd 0 94 | 95 | -- | Close a file. 96 | close :: FHandle -> IO () 97 | close (FHandle fd) = closeFd fd 98 | 99 | -- Unix needs to use a special open call to open files for exclusive writing 100 | --openExclusively :: FilePath -> IO Handle 101 | --openExclusively fp = 102 | -- fdToHandle =<< openFd fp ReadWrite (Just 0o600) flags 103 | -- where flags = defaultFileFlags {exclusive = True, trunc = True} 104 | 105 | 106 | -- | Obtain a lock on a file. 107 | -- 108 | -- Use 'releasePrefixLock' to release the prefix lock. 109 | obtainPrefixLock :: FilePath -> IO PrefixLock 110 | obtainPrefixLock prefix = checkLock fp >> takeLock fp 111 | where fp = prefix ++ ".lock" 112 | 113 | -- |Read the lock and break it if the process is dead. 114 | checkLock :: FilePath -> IO () 115 | checkLock fp = readLock fp >>= maybeBreakLock fp 116 | 117 | -- |Read the lock and return the process id if possible. 118 | readLock :: FilePath -> IO (Maybe ProcessID) 119 | readLock fp = do 120 | pid <- try (readFile fp) 121 | return $ either (checkReadFileError fp) 122 | (fmap (fromInteger . P.read) . listToMaybe . lines) 123 | pid 124 | 125 | -- |Is this a permission error? If so we don't have permission to 126 | -- remove the lock file, abort. 127 | checkReadFileError :: String -> IOError -> Maybe ProcessID 128 | checkReadFileError fp e | SE.isPermissionError e = throw (userError ("Could not read lock file: " ++ show fp)) 129 | | SE.isDoesNotExistError e = Nothing 130 | | otherwise = throw e 131 | 132 | maybeBreakLock :: FilePath -> Maybe ProcessID -> IO () 133 | maybeBreakLock fp Nothing = 134 | -- The lock file exists, but there's no PID in it. At this point, 135 | -- we will break the lock, because the other process either died 136 | -- or will give up when it failed to read its pid back from this 137 | -- file. 138 | breakLock fp 139 | maybeBreakLock fp (Just pid) = do 140 | -- The lock file exists and there is a PID in it. We can break the 141 | -- lock if that process has died. 142 | -- getProcessStatus only works on the children of the calling process. 143 | -- exists <- try (getProcessStatus False True pid) >>= either checkException (return . isJust) 144 | exists <- doesProcessExist pid 145 | if exists 146 | then throw (lockedBy fp pid) 147 | else breakLock fp 148 | 149 | doesProcessExist :: ProcessID -> IO Bool 150 | doesProcessExist pid = do 151 | -- Implementation 1 152 | -- doesDirectoryExist ("/proc/" ++ show pid) 153 | -- Implementation 2 154 | v <- try (signalProcess nullSignal pid) 155 | return $ either checkException (const True) v 156 | where checkException e | SE.isDoesNotExistError e = False 157 | | otherwise = throw e 158 | 159 | -- |We have determined the locking process is gone, try to remove the 160 | -- lock. 161 | breakLock :: FilePath -> IO () 162 | breakLock fp = try (removeFile fp) >>= either checkBreakError (const (return ())) 163 | 164 | -- |An exception when we tried to break a lock, if it says the lock 165 | -- file has already disappeared we are still good to go. 166 | checkBreakError :: IOError -> IO () 167 | checkBreakError e | SE.isDoesNotExistError e = return () 168 | | otherwise = throw e 169 | 170 | -- |Try to create lock by opening the file with the O_EXCL flag and 171 | -- writing our PID into it. Verify by reading the pid back out and 172 | -- matching, maybe some other process slipped in before we were done 173 | -- and broke our lock. 174 | takeLock :: FilePath -> IO PrefixLock 175 | takeLock fp = do 176 | createDirectoryIfMissing True (takeDirectory fp) 177 | h <- openFd fp ReadWrite (Just 0o600) (defaultFileFlags {exclusive = True, trunc = True}) >>= fdToHandle 178 | pid <- getProcessID 179 | hPrint h pid >> hClose h 180 | -- Read back our own lock and make sure its still ours 181 | readLock fp >>= maybe (throw (cantLock fp pid)) 182 | (\ pid' -> if pid /= pid' 183 | then throw (stolenLock fp pid pid') 184 | else return (PrefixLock fp)) 185 | 186 | -- |An exception saying the data is locked by another process. 187 | lockedBy :: (Show a) => FilePath -> a -> SomeException 188 | lockedBy fp pid = SomeException (SE.mkIOError SE.alreadyInUseErrorType ("Locked by " ++ show pid) Nothing (Just fp)) 189 | 190 | -- |An exception saying we don't have permission to create lock. 191 | cantLock :: FilePath -> ProcessID -> SomeException 192 | cantLock fp pid = SomeException (SE.mkIOError SE.alreadyInUseErrorType ("Process " ++ show pid ++ " could not create a lock") Nothing (Just fp)) 193 | 194 | -- |An exception saying another process broke our lock before we 195 | -- finished creating it. 196 | stolenLock :: FilePath -> ProcessID -> ProcessID -> SomeException 197 | stolenLock fp pid pid' = SomeException (SE.mkIOError SE.alreadyInUseErrorType ("Process " ++ show pid ++ "'s lock was stolen by process " ++ show pid') Nothing (Just fp)) 198 | 199 | -- |Relinquish the lock by removing it and then verifying the removal. 200 | releasePrefixLock :: PrefixLock -> IO () 201 | releasePrefixLock (PrefixLock fp) = 202 | dropLock >>= either checkDrop return 203 | where 204 | dropLock = try (removeFile fp) 205 | checkDrop e | SE.isDoesNotExistError e = return () 206 | | otherwise = throw e 207 | -------------------------------------------------------------------------------- /src/Database/Haskey/Alloc/Concurrent.hs: -------------------------------------------------------------------------------- 1 | -- | The module implements an page allocator with page reuse and support for 2 | -- multiple readers and serialized writers. 3 | module Database.Haskey.Alloc.Concurrent ( 4 | -- * Allocator 5 | ConcurrentDb(..) 6 | 7 | -- * Open, close and create databases 8 | , ConcurrentHandles(..) 9 | , concurrentHandles 10 | , lockConcurrentDb 11 | , unlockConcurrentDb 12 | , createConcurrentDb 13 | , openConcurrentDb 14 | , closeConcurrentHandles 15 | 16 | -- * Manipulation and transactions 17 | , module Database.Haskey.Alloc.Transaction 18 | , transact 19 | , transact_ 20 | , transactReadOnly 21 | 22 | -- * Storage requirements 23 | , Root 24 | , ConcurrentMeta(..) 25 | , ConcurrentMetaStoreM(..) 26 | ) where 27 | 28 | import Database.Haskey.Alloc.Concurrent.Internal.Database 29 | import Database.Haskey.Alloc.Concurrent.Internal.Meta 30 | import Database.Haskey.Alloc.Concurrent.Internal.Monad 31 | import Database.Haskey.Alloc.Transaction 32 | -------------------------------------------------------------------------------- /src/Database/Haskey/Alloc/Concurrent/Internal/Database.hs: -------------------------------------------------------------------------------- 1 | {-# LANGUAGE CPP #-} 2 | {-# LANGUAGE FlexibleContexts #-} 3 | {-# LANGUAGE LambdaCase #-} 4 | {-# LANGUAGE RankNTypes #-} 5 | {-# LANGUAGE RecordWildCards #-} 6 | -- | This module implements data structures and functions related to the database. 7 | module Database.Haskey.Alloc.Concurrent.Internal.Database where 8 | 9 | import Control.Applicative ((<$>)) 10 | import Control.Concurrent.STM 11 | import Control.Monad (void, unless) 12 | import Control.Monad.IO.Class 13 | import Control.Monad.Catch (MonadCatch, MonadMask, SomeException, 14 | catch, mask, onException, bracket, bracket_) 15 | import Control.Monad.State 16 | import Control.Monad.Trans (lift) 17 | 18 | import Data.Proxy (Proxy(..)) 19 | import Data.List.NonEmpty (NonEmpty((:|))) 20 | import Data.Maybe (fromMaybe) 21 | 22 | #if MIN_VERSION_stm_containers(1,1,0) 23 | import StmContainers.Map (Map) 24 | import qualified StmContainers.Map as Map 25 | #else 26 | import STMContainers.Map (Map) 27 | import qualified STMContainers.Map as Map 28 | #endif 29 | 30 | import Data.BTree.Alloc.Class 31 | import Data.BTree.Impure (Tree(..)) 32 | import Data.BTree.Primitives 33 | 34 | import Database.Haskey.Alloc.Concurrent.Internal.Environment 35 | import Database.Haskey.Alloc.Concurrent.Internal.FreePages.Save 36 | import Database.Haskey.Alloc.Concurrent.Internal.Meta 37 | import Database.Haskey.Alloc.Concurrent.Internal.Monad 38 | import Database.Haskey.Alloc.Concurrent.Internal.Overflow 39 | import Database.Haskey.Alloc.Transaction 40 | import Database.Haskey.Store 41 | import Database.Haskey.Utils.RLock 42 | import qualified Database.Haskey.Utils.STM.Map as Map 43 | 44 | -- | An active concurrent database. 45 | -- 46 | -- This can be shared amongst threads. 47 | data ConcurrentDb root = ConcurrentDb 48 | { concurrentDbHandles :: ConcurrentHandles 49 | , concurrentDbWriterLock :: RLock 50 | , concurrentDbCurrentMeta :: TVar CurrentMetaPage 51 | , concurrentDbMeta1 :: TVar (ConcurrentMeta root) 52 | , concurrentDbMeta2 :: TVar (ConcurrentMeta root) 53 | , concurrentDbReaders :: Map TxId Integer 54 | } 55 | 56 | -- | Lock the database. 57 | -- 58 | -- This needs to be called manually, if you want exclusive access, before 59 | -- calling either 'createConcurrentDb' or 'openConcurrentDb' 60 | -- 61 | -- Use 'unlockConcurrentDb' using the 'bracket' pattern to properly unlock the 62 | -- database. 63 | lockConcurrentDb :: ConcurrentMetaStoreM m => ConcurrentHandles -> m () 64 | lockConcurrentDb = lockHandle . concurrentHandlesRoot 65 | 66 | -- | Unlock the database. 67 | unlockConcurrentDb :: ConcurrentMetaStoreM m => ConcurrentHandles -> m () 68 | unlockConcurrentDb = releaseHandle . concurrentHandlesRoot 69 | 70 | -- | Open all concurrent handles. 71 | openConcurrentHandles :: ConcurrentMetaStoreM m 72 | => ConcurrentHandles -> m () 73 | openConcurrentHandles ConcurrentHandles{..} = do 74 | openHandle concurrentHandlesData 75 | openHandle concurrentHandlesIndex 76 | openHandle concurrentHandlesMetadata1 77 | openHandle concurrentHandlesMetadata2 78 | 79 | -- | Open a new concurrent database, with the given handles. 80 | createConcurrentDb :: (Root root, MonadIO m, MonadMask m, ConcurrentMetaStoreM m) 81 | => ConcurrentHandles 82 | -> root 83 | -> m (ConcurrentDb root) 84 | createConcurrentDb hnds root = 85 | bracket_ (openConcurrentHandles hnds) 86 | (closeConcurrentHandles hnds) $ do 87 | 88 | db <- newConcurrentDb hnds meta0 89 | setCurrentMeta meta0 db 90 | setCurrentMeta meta0 db 91 | return db 92 | where 93 | meta0 = ConcurrentMeta { 94 | concurrentMetaRevision = 0 95 | , concurrentMetaDataNumPages = DataState 0 96 | , concurrentMetaIndexNumPages = IndexState 0 97 | , concurrentMetaRoot = root 98 | , concurrentMetaDataFreeTree = DataState $ Tree zeroHeight Nothing 99 | , concurrentMetaIndexFreeTree = IndexState $ Tree zeroHeight Nothing 100 | , concurrentMetaOverflowTree = Tree zeroHeight Nothing 101 | , concurrentMetaDataCachedFreePages = DataState [] 102 | , concurrentMetaIndexCachedFreePages = IndexState [] 103 | } 104 | 105 | -- | Open the an existing database, with the given handles. 106 | openConcurrentDb :: (Root root, MonadIO m, MonadMask m, ConcurrentMetaStoreM m) 107 | => ConcurrentHandles 108 | -> m (Maybe (ConcurrentDb root)) 109 | openConcurrentDb hnds@ConcurrentHandles{..} = 110 | bracket_ (openConcurrentHandles hnds) 111 | (closeConcurrentHandles hnds) $ do 112 | 113 | m1 <- readConcurrentMeta concurrentHandlesMetadata1 Proxy 114 | m2 <- readConcurrentMeta concurrentHandlesMetadata2 Proxy 115 | maybeDb <- case (m1, m2) of 116 | (Nothing, Nothing) -> return Nothing 117 | (Just m , Nothing) -> Just <$> newConcurrentDb hnds m 118 | (Nothing, Just m ) -> Just <$> newConcurrentDb hnds m 119 | (Just x , Just y ) -> if concurrentMetaRevision x > concurrentMetaRevision y 120 | then Just <$> newConcurrentDb hnds x 121 | else Just <$> newConcurrentDb hnds y 122 | case maybeDb of 123 | Nothing -> return Nothing 124 | Just db -> do 125 | meta <- liftIO . atomically $ getCurrentMeta db 126 | cleanupAfterException hnds (concurrentMetaRevision meta + 1) 127 | return (Just db) 128 | 129 | -- | Close the handles of the database. 130 | closeConcurrentHandles :: (MonadIO m, ConcurrentMetaStoreM m) 131 | => ConcurrentHandles 132 | -> m () 133 | closeConcurrentHandles ConcurrentHandles{..} = do 134 | closeHandle concurrentHandlesData 135 | closeHandle concurrentHandlesIndex 136 | closeHandle concurrentHandlesMetadata1 137 | closeHandle concurrentHandlesMetadata2 138 | 139 | -- | Create a new concurrent database with handles and metadata provided. 140 | newConcurrentDb :: (Root root, MonadIO m) 141 | => ConcurrentHandles 142 | -> ConcurrentMeta root 143 | -> m (ConcurrentDb root) 144 | newConcurrentDb hnds meta0 = do 145 | readers <- liftIO Map.newIO 146 | meta <- liftIO $ newTVarIO Meta1 147 | lock <- liftIO newRLock 148 | meta1 <- liftIO $ newTVarIO meta0 149 | meta2 <- liftIO $ newTVarIO meta0 150 | return $! ConcurrentDb 151 | { concurrentDbHandles = hnds 152 | , concurrentDbWriterLock = lock 153 | , concurrentDbCurrentMeta = meta 154 | , concurrentDbMeta1 = meta1 155 | , concurrentDbMeta2 = meta2 156 | , concurrentDbReaders = readers 157 | } 158 | 159 | -- | Get the current meta data. 160 | getCurrentMeta :: Root root 161 | => ConcurrentDb root 162 | -> STM (ConcurrentMeta root) 163 | getCurrentMeta db 164 | | ConcurrentDb { concurrentDbCurrentMeta = v } <- db 165 | = readTVar v >>= \case 166 | Meta1 -> readTVar $ concurrentDbMeta1 db 167 | Meta2 -> readTVar $ concurrentDbMeta2 db 168 | 169 | -- | Write the new metadata, and switch the pointer to the current one. 170 | setCurrentMeta :: (Root root, MonadIO m, ConcurrentMetaStoreM m) 171 | => ConcurrentMeta root 172 | -> ConcurrentDb root 173 | -> m () 174 | setCurrentMeta new db 175 | | ConcurrentDb 176 | { concurrentDbCurrentMeta = v 177 | , concurrentDbHandles = hnds 178 | } <- db 179 | = liftIO (atomically $ readTVar v) >>= \case 180 | Meta1 -> do 181 | flushHandle (concurrentHandlesData hnds) 182 | flushHandle (concurrentHandlesIndex hnds) 183 | putConcurrentMeta (concurrentHandlesMetadata2 hnds) new 184 | flushHandle (concurrentHandlesMetadata2 hnds) 185 | liftIO . atomically $ do 186 | writeTVar v Meta2 187 | writeTVar (concurrentDbMeta2 db) new 188 | Meta2 -> do 189 | flushHandle (concurrentHandlesData hnds) 190 | flushHandle (concurrentHandlesIndex hnds) 191 | putConcurrentMeta (concurrentHandlesMetadata1 hnds) new 192 | flushHandle (concurrentHandlesMetadata1 hnds) 193 | liftIO . atomically $ do 194 | writeTVar v Meta1 195 | writeTVar (concurrentDbMeta1 db) new 196 | 197 | -- | Execute a write transaction, with a result. 198 | transact :: (MonadIO m, MonadMask m, ConcurrentMetaStoreM m, Root root) 199 | => (forall n. (AllocM n, MonadMask n) => root -> n (Transaction root a)) 200 | -> ConcurrentDb root 201 | -> m a 202 | transact act db = withRLock (concurrentDbWriterLock db) $ do 203 | cleanup 204 | transactNow act db 205 | where 206 | cleanup :: (MonadIO m, MonadMask m, ConcurrentMetaStoreM m) => m () 207 | cleanup = actAndCommit db $ \meta -> do 208 | v <- deleteOutdatedOverflowIds (concurrentMetaOverflowTree meta) 209 | case v of 210 | Nothing -> return (Nothing, ()) 211 | Just tree -> do 212 | let meta' = meta { concurrentMetaOverflowTree = tree } 213 | return (Just meta', ()) 214 | 215 | -- | Execute a write transaction, without cleaning up old overflow pages. 216 | transactNow :: (MonadIO m, MonadMask m, ConcurrentMetaStoreM m, Root root) 217 | => (forall n. (AllocM n, MonadMask n) => root -> n (Transaction root a)) 218 | -> ConcurrentDb root 219 | -> m a 220 | transactNow act db = withRLock (concurrentDbWriterLock db) $ 221 | actAndCommit db $ \meta -> do 222 | tx <- act (concurrentMetaRoot meta) 223 | case tx of 224 | Abort v -> return (Nothing, v) 225 | Commit root v -> 226 | let meta' = meta { concurrentMetaRoot = root } in 227 | return (Just meta', v) 228 | 229 | -- | Execute a write transaction, without a result. 230 | transact_ :: (MonadIO m, MonadMask m, ConcurrentMetaStoreM m, Root root) 231 | => (forall n. (AllocM n, MonadMask n) => root -> n (Transaction root ())) 232 | -> ConcurrentDb root 233 | -> m () 234 | transact_ act db = void $ transact act db 235 | 236 | -- | Execute a read-only transaction. 237 | transactReadOnly :: (MonadIO m, MonadMask m, ConcurrentMetaStoreM m, Root root) 238 | => (forall n. (AllocReaderM n, MonadMask n) => root -> n a) 239 | -> ConcurrentDb root 240 | -> m a 241 | transactReadOnly act db = 242 | bracket_ (openConcurrentHandles hnds) 243 | (closeConcurrentHandles hnds) $ 244 | 245 | bracket acquireMeta 246 | releaseMeta $ 247 | \meta -> evalConcurrentT (act $ concurrentMetaRoot meta) 248 | (ReaderEnv hnds) 249 | where 250 | hnds = concurrentDbHandles db 251 | readers = concurrentDbReaders db 252 | 253 | addOne Nothing = Just 1 254 | addOne (Just x) = Just $! x + 1 255 | subOne Nothing = Nothing 256 | subOne (Just 0) = Nothing 257 | subOne (Just 1) = Nothing 258 | subOne (Just x) = Just $! x - 1 259 | 260 | acquireMeta = liftIO . atomically $ do 261 | meta <- getCurrentMeta db 262 | Map.alter (concurrentMetaRevision meta) addOne readers 263 | return meta 264 | 265 | releaseMeta meta = 266 | let rev = concurrentMetaRevision meta in 267 | liftIO . atomically $ Map.alter rev subOne readers 268 | 269 | -------------------------------------------------------------------------------- 270 | 271 | -- | Run a write action that takes the current meta-data and returns new 272 | -- meta-data to be commited, or 'Nothing' if the write transaction should be 273 | -- aborted. 274 | actAndCommit :: (MonadIO m, MonadMask m, ConcurrentMetaStoreM m, Root root) 275 | => ConcurrentDb root 276 | -> (forall n. (MonadIO n, MonadMask n, ConcurrentMetaStoreM n) 277 | => ConcurrentMeta root 278 | -> ConcurrentT WriterEnv ConcurrentHandles n (Maybe (ConcurrentMeta root), a) 279 | ) 280 | -> m a 281 | actAndCommit db act 282 | | ConcurrentDb 283 | { concurrentDbHandles = hnds 284 | , concurrentDbWriterLock = lock 285 | , concurrentDbReaders = readers 286 | } <- db 287 | = withRLock lock $ 288 | bracket_ (openConcurrentHandles hnds) 289 | (closeConcurrentHandles hnds) $ do 290 | 291 | meta <- liftIO . atomically $ getCurrentMeta db 292 | let newRevision = concurrentMetaRevision meta + 1 293 | wrap hnds newRevision $ do 294 | ((maybeMeta, v), env) <- runConcurrentT (act meta) $ 295 | newWriter hnds 296 | newRevision 297 | readers 298 | (concurrentMetaDataNumPages meta) 299 | (concurrentMetaIndexNumPages meta) 300 | (concurrentMetaDataCachedFreePages meta) 301 | (concurrentMetaIndexCachedFreePages meta) 302 | (concurrentMetaDataFreeTree meta) 303 | (concurrentMetaIndexFreeTree meta) 304 | 305 | let maybeMeta' = updateMeta env <$> maybeMeta 306 | 307 | case maybeMeta' of 308 | Nothing -> do 309 | removeNewlyAllocatedOverflows env 310 | return v 311 | 312 | Just meta' -> do 313 | -- Bookkeeping 314 | (newMeta, _) <- flip execStateT (meta', env) $ do 315 | saveOverflowIds 316 | saveFreePages' 0 DataState 317 | writerDataFileState 318 | (\e s -> e { writerDataFileState = s }) 319 | saveFreePages' 0 IndexState 320 | writerIndexFileState 321 | (\e s -> e { writerIndexFileState = s }) 322 | handleCachedFreePages 323 | 324 | -- Commit 325 | setCurrentMeta (newMeta { concurrentMetaRevision = newRevision }) 326 | db 327 | return v 328 | where 329 | wrap :: (MonadIO m, MonadMask m, ConcurrentMetaStoreM m) 330 | => ConcurrentHandles 331 | -> TxId 332 | -> m a 333 | -> m a 334 | wrap hnds tx action = mask $ \restore -> 335 | restore action `onException` cleanupAfterException hnds tx 336 | 337 | -- | Cleanup after an exception occurs, or after a program crash. 338 | -- 339 | -- The 'TxId' of the aborted transaction should be passed. 340 | cleanupAfterException :: (MonadIO m, MonadCatch m, ConcurrentMetaStoreM m) 341 | => ConcurrentHandles 342 | -> TxId 343 | -> m () 344 | cleanupAfterException hnds tx = do 345 | let dir = getOverflowDir (concurrentHandlesOverflowDir hnds) tx 346 | overflows <- filter filter' <$> listOverflows dir 347 | mapM_ (\fp -> removeHandle fp `catch` ignore) overflows 348 | where 349 | filter' fp = fromMaybe False $ (== tx) . fst <$> readOverflowId fp 350 | 351 | ignore :: Monad m => SomeException -> m () 352 | ignore _ = return () 353 | 354 | -- | Remove all overflow pages that were written in the transaction. 355 | -- 356 | -- If the transaction is aborted, all written pages should be deleted. 357 | removeNewlyAllocatedOverflows :: (MonadIO m, ConcurrentMetaStoreM m) 358 | => WriterEnv ConcurrentHandles 359 | -> m () 360 | removeNewlyAllocatedOverflows env = do 361 | let root = concurrentHandlesOverflowDir (writerHnds env) 362 | sequence_ [ delete root (i - 1) | i <- [1..(writerOverflowCounter env)] ] 363 | where 364 | delete root c = do 365 | let i = (writerTxId env, c) 366 | removeHandle (getOverflowHandle root i) 367 | 368 | -- | Update the meta-data from a writer environment 369 | updateMeta :: WriterEnv ConcurrentHandles -> ConcurrentMeta root -> ConcurrentMeta root 370 | updateMeta env m = m { 371 | concurrentMetaDataFreeTree = fileStateFreeTree (writerDataFileState env) 372 | , concurrentMetaIndexFreeTree = fileStateFreeTree (writerIndexFileState env) } 373 | 374 | 375 | -- | Save the newly free'd overflow pages, for deletion on the next tx. 376 | saveOverflowIds :: (MonadIO m, MonadMask m, ConcurrentMetaStoreM m) 377 | => StateT (ConcurrentMeta root, WriterEnv ConcurrentHandles) m () 378 | saveOverflowIds = do 379 | (meta, env) <- get 380 | case map (\(OldOverflow i) ->i) (writerRemovedOverflows env) of 381 | [] -> return () 382 | x:xs -> do 383 | (tree', env') <- lift $ flip runConcurrentT env $ 384 | insertOverflowIds (writerTxId env) 385 | (x :| xs) 386 | (concurrentMetaOverflowTree meta) 387 | let meta' = (updateMeta env meta) 388 | { concurrentMetaOverflowTree = tree' } 389 | put (meta', env') 390 | 391 | -- | Save the free'd pages to the free page database 392 | saveFreePages' :: (MonadIO m, MonadMask m, ConcurrentMetaStoreM m) 393 | => Int 394 | -> (forall a. a -> S t a) 395 | -> (forall hnds. WriterEnv hnds -> FileState t) 396 | -> (forall hnds. WriterEnv hnds -> FileState t -> WriterEnv hnds) 397 | -> StateT (ConcurrentMeta root, WriterEnv ConcurrentHandles) m () 398 | saveFreePages' paranoid cons getState setState 399 | {- paranoid >= 100 = error "paranoid: looping!" 400 | | otherwise-} 401 | = do 402 | 403 | (meta, env) <- get 404 | let tx = writerTxId env 405 | (tree', envWithoutTree) <- lift $ 406 | runConcurrentT (saveFreePages tx (getState env)) $ 407 | env { writerQueryFreeTreeOn = False } 408 | 409 | let state' = (getState envWithoutTree) { fileStateFreeTree = cons tree' } 410 | let env' = setState envWithoutTree state' 411 | let meta' = updateMeta env' meta 412 | put (meta', env') 413 | 414 | -- Did we free any new pages? We have to put them in the free tree! 415 | unless (fileStateNewlyFreedPages state' == fileStateNewlyFreedPages (getState env)) $ 416 | saveFreePages' (paranoid + 1) cons getState setState 417 | 418 | -- | Handle the cached free pages. 419 | -- 420 | -- Save the cached free pages to the metadata for later use. 421 | -- 422 | -- Update the database size. 423 | handleCachedFreePages :: (MonadIO m, MonadMask m, ConcurrentMetaStoreM m) 424 | => StateT (ConcurrentMeta root, WriterEnv ConcurrentHandles) m () 425 | handleCachedFreePages = do 426 | (meta, env) <- get 427 | 428 | let dataEnv = writerDataFileState env 429 | let indexEnv = writerIndexFileState env 430 | 431 | let meta' = meta { concurrentMetaDataNumPages = 432 | fileStateNewNumPages dataEnv 433 | , concurrentMetaDataFreeTree = 434 | fileStateFreeTree dataEnv 435 | , concurrentMetaDataCachedFreePages = 436 | fileStateCachedFreePages dataEnv 437 | 438 | , concurrentMetaIndexNumPages = 439 | fileStateNewNumPages indexEnv 440 | , concurrentMetaIndexFreeTree = 441 | fileStateFreeTree indexEnv 442 | , concurrentMetaIndexCachedFreePages = 443 | fileStateCachedFreePages indexEnv 444 | } 445 | put (meta', env) 446 | 447 | -------------------------------------------------------------------------------- 448 | -------------------------------------------------------------------------------- /src/Database/Haskey/Alloc/Concurrent/Internal/Environment.hs: -------------------------------------------------------------------------------- 1 | {-# LANGUAGE CPP #-} 2 | {-# LANGUAGE DataKinds #-} 3 | {-# LANGUAGE DeriveDataTypeable #-} 4 | {-# LANGUAGE FlexibleContexts #-} 5 | {-# LANGUAGE FlexibleInstances #-} 6 | {-# LANGUAGE GADTs #-} 7 | {-# LANGUAGE GeneralizedNewtypeDeriving #-} 8 | {-# LANGUAGE KindSignatures #-} 9 | {-# LANGUAGE MultiWayIf #-} 10 | {-# LANGUAGE RankNTypes #-} 11 | {-# LANGUAGE StandaloneDeriving #-} 12 | -- | Environments of a read or write transaction. 13 | module Database.Haskey.Alloc.Concurrent.Internal.Environment where 14 | 15 | import Control.Applicative ((<$>)) 16 | import Control.Monad.State 17 | 18 | import Data.Binary (Binary) 19 | import Data.Set (Set) 20 | import Data.Typeable (Typeable) 21 | import Data.Word (Word32) 22 | import qualified Data.Binary as B 23 | import qualified Data.Set as S 24 | 25 | #if MIN_VERSION_stm_containers(1,1,0) 26 | import StmContainers.Map (Map) 27 | #else 28 | import STMContainers.Map (Map) 29 | #endif 30 | 31 | import Data.BTree.Primitives 32 | 33 | import Database.Haskey.Alloc.Concurrent.Internal.FreePages.Tree 34 | 35 | data StateType = TypeData 36 | | TypeIndex 37 | 38 | -- | Wrapper around a type to indicate it belongs to a file with either 39 | -- data/leaf nodes or index nodes. 40 | data S (t :: StateType) a where 41 | DataState :: a -> S 'TypeData a 42 | IndexState :: a -> S 'TypeIndex a 43 | deriving (Typeable) 44 | 45 | deriving instance Show a => Show (S t a) 46 | 47 | instance Binary a => Binary (S 'TypeData a) where 48 | put (DataState a) = B.put a 49 | get = DataState <$> B.get 50 | 51 | instance Binary a => Binary (S 'TypeIndex a) where 52 | put (IndexState a) = B.put a 53 | get = IndexState <$> B.get 54 | 55 | instance Functor (S t) where 56 | f `fmap` (DataState v) = DataState (f v) 57 | f `fmap` (IndexState v) = IndexState (f v) 58 | 59 | getSValue :: S t a -> a 60 | getSValue (DataState a) = a 61 | getSValue (IndexState a) = a 62 | 63 | newtype ReaderEnv hnds = ReaderEnv { readerHnds :: hnds } 64 | 65 | data FileState stateType = FileState { 66 | fileStateNewlyFreedPages :: ![NewlyFreed] 67 | -- ^ Pages free'd in this transaction, not ready for reuse until the 68 | -- transaction is commited. 69 | 70 | , fileStateOriginalNumPages :: !(S stateType PageId) 71 | -- ^ The original number of pages in the file, before the transaction 72 | -- started. 73 | 74 | , fileStateNewNumPages :: !(S stateType PageId) 75 | -- ^ The new uncommited number of pages in the file. 76 | -- 77 | -- All pages in the range 'fileStateOriginalNumPages' to 78 | -- 'fileStateNewNumPages' (excluding) are freshly allocated in the 79 | -- ongoing transaction. 80 | 81 | , fileStateDirtyPages :: !(Set PageId) 82 | -- ^ Pages written to in this transaction. 83 | 84 | , fileStateFreeTree :: !(S stateType FreeTree) 85 | -- ^ The root of the free tree, might change during a transaction. 86 | 87 | , fileStateCachedFreePages :: !(S stateType [FreePage]) 88 | -- ^ All pages that are immediately ready for reuse in this and any 89 | -- subsequent transactions. 90 | } 91 | 92 | data WriterEnv hnds = WriterEnv 93 | { writerHnds :: !hnds 94 | , writerTxId :: !TxId 95 | , writerReaders :: Map TxId Integer 96 | 97 | , writerIndexFileState :: FileState 'TypeIndex 98 | -- ^ State of the file with index nodes. 99 | 100 | , writerDataFileState :: FileState 'TypeData 101 | -- ^ State of the file with data/leaf nodes. 102 | 103 | , writerQueryFreeTreeOn :: !Bool 104 | -- ^ Whether or not querying free pages from the free is enabled. 105 | 106 | , writerDirtyOverflows :: !(Set DirtyOverflow) 107 | -- ^ Newly allocated overflow pages in this transaction. 108 | 109 | , writerOverflowCounter :: !Word32 110 | -- ^ Counts how many overflow pages were already allocated in this transaction. 111 | 112 | , writerRemovedOverflows :: ![OldOverflow] 113 | -- ^ Old overflow pages that were removed in this transaction 114 | -- and should be deleted when no longer in use. 115 | } 116 | 117 | -- | Create a new writer. 118 | newWriter :: hnd -> TxId -> Map TxId Integer 119 | -> S 'TypeData PageId -> S 'TypeIndex PageId 120 | -> S 'TypeData [FreePage] -> S 'TypeIndex [FreePage] 121 | -> S 'TypeData FreeTree -> S 'TypeIndex FreeTree 122 | -> WriterEnv hnd 123 | newWriter hnd tx readers 124 | numDataPages numIndexPages 125 | dataFreePages indexFreePages 126 | dataFreeTree indexFreeTree = 127 | WriterEnv { 128 | writerHnds = hnd 129 | , writerTxId = tx 130 | , writerReaders = readers 131 | 132 | , writerIndexFileState = newFileState numIndexPages indexFreePages indexFreeTree 133 | , writerDataFileState = newFileState numDataPages dataFreePages dataFreeTree 134 | 135 | , writerQueryFreeTreeOn = True 136 | , writerDirtyOverflows = S.empty 137 | , writerOverflowCounter = 0 138 | , writerRemovedOverflows = [] 139 | } 140 | where 141 | newFileState numPages freePages freeTree = FileState { 142 | fileStateNewlyFreedPages = [] 143 | , fileStateOriginalNumPages = numPages 144 | , fileStateNewNumPages = numPages 145 | , fileStateDirtyPages = S.empty 146 | , fileStateCachedFreePages = freePages 147 | , fileStateFreeTree = freeTree 148 | } 149 | 150 | -- | Wrapper around 'PageId' indicating it is newly free'd and cannot be reused 151 | -- in the same transaction. 152 | newtype NewlyFreed = NewlyFreed PageId deriving (Eq, Ord, Show) 153 | 154 | -- | Wrapper around 'PageId' indicating it is free and can be reused in any 155 | -- transaction. 156 | newtype FreePage = FreePage PageId deriving (Binary, Eq, Ord, Show) 157 | 158 | -- | Wrapper around 'PageId' indicating that it is dirty, i.e. written to in 159 | -- this transaction. 160 | newtype Dirty = Dirty PageId deriving (Eq, Ord, Show) 161 | 162 | -- | Try to free a page, given a set of dirty pages. 163 | -- 164 | -- If the page was dirty, a 'FreePage' page is added to the environment, if 165 | -- not a 'NewlyFreed' page is added to the environment. 166 | -- 167 | -- Btw, give me lenses... 168 | freePage :: (Functor m, MonadState (WriterEnv hnd) m) => S stateType PageId -> m () 169 | freePage pid@(DataState pid') = do 170 | dirty' <- dirty pid 171 | modify' $ \e -> 172 | e { writerDataFileState = 173 | updateFileState (writerDataFileState e) DataState 174 | dirty' pid' 175 | } 176 | 177 | freePage pid@(IndexState pid') = do 178 | dirty' <- dirty pid 179 | modify' $ \e -> 180 | e { writerIndexFileState = 181 | updateFileState (writerIndexFileState e) IndexState 182 | dirty' pid' 183 | } 184 | 185 | updateFileState :: FileState t 186 | -> (forall a. a -> S t a) 187 | -> Maybe Dirty 188 | -> PageId 189 | -> FileState t 190 | updateFileState e cons dirty' pid' = 191 | if | Just (Dirty p) <- dirty' -> 192 | e { fileStateCachedFreePages = 193 | cons $ FreePage p : getSValue (fileStateCachedFreePages e) } 194 | 195 | | p <- pid' -> 196 | e { fileStateNewlyFreedPages = 197 | NewlyFreed p : fileStateNewlyFreedPages e } 198 | 199 | -- | Get a 'Dirty' page, by first proving it is in fact dirty. 200 | dirty :: (Functor m, MonadState (WriterEnv hnd) m) => S stateType PageId -> m (Maybe Dirty) 201 | dirty pid = case pid of 202 | DataState p -> (page p . fileStateDirtyPages . writerDataFileState) <$> get 203 | IndexState p -> (page p . fileStateDirtyPages . writerIndexFileState) <$> get 204 | where 205 | page p dirtyPages 206 | | p `S.member` dirtyPages = Just (Dirty p) 207 | | otherwise = Nothing 208 | 209 | -- | Touch a fresh page, make it dirty. 210 | -- 211 | -- We really need lenses... 212 | touchPage :: MonadState (WriterEnv hnd) m => S stateType PageId -> m () 213 | touchPage (DataState pid) = do 214 | modify' $ \e -> 215 | let dirtyPages = fileStateDirtyPages (writerDataFileState e) in 216 | e { writerDataFileState = (writerDataFileState e) { 217 | fileStateDirtyPages = S.insert pid dirtyPages } 218 | } 219 | modify' $ \e -> 220 | let oldNum = getSValue $ fileStateNewNumPages (writerDataFileState e) 221 | newNum = max oldNum (pid + 1) 222 | in e { writerDataFileState = (writerDataFileState e) { 223 | fileStateNewNumPages = DataState newNum } 224 | } 225 | 226 | touchPage (IndexState pid) = do 227 | modify' $ \e -> 228 | let dirtyPages = fileStateDirtyPages (writerIndexFileState e) in 229 | e { writerIndexFileState = (writerIndexFileState e) { 230 | fileStateDirtyPages = S.insert pid dirtyPages } 231 | } 232 | modify' $ \e -> 233 | let oldNum = getSValue $ fileStateNewNumPages (writerIndexFileState e) 234 | newNum = max oldNum (pid + 1) 235 | in e { writerIndexFileState = (writerIndexFileState e) { 236 | fileStateNewNumPages = IndexState newNum } 237 | } 238 | 239 | -- | Wrapper around 'OverflowId' indicating that it is dirty. 240 | newtype DirtyOverflow = DirtyOverflow OverflowId deriving (Eq, Ord, Show) 241 | 242 | -- | Wrapper around 'OverflowId' indicating that it is an overflow 243 | -- page from a previous transaction. 244 | newtype OldOverflow = OldOverflow OverflowId deriving (Eq, Ord, Show) 245 | 246 | -- | Touch a fresh overflow page, making it dirty. 247 | touchOverflow :: MonadState (WriterEnv hnd) m => OverflowId -> m () 248 | touchOverflow i = modify' $ 249 | \e -> e { writerDirtyOverflows = 250 | S.insert (DirtyOverflow i) (writerDirtyOverflows e) } 251 | 252 | -- | Get the type of the overflow page. 253 | overflowType :: MonadState (WriterEnv hnd) m => OverflowId -> m (Either DirtyOverflow OldOverflow) 254 | overflowType i = do 255 | dirty' <- gets $ \e -> S.member (DirtyOverflow i) (writerDirtyOverflows e) 256 | if dirty' then return $ Left (DirtyOverflow i) 257 | else return $ Right (OldOverflow i) 258 | 259 | -- | Free an old overflow page. 260 | removeOldOverflow :: MonadState (WriterEnv hdn) m => OldOverflow -> m () 261 | removeOldOverflow i = 262 | modify' $ \e -> e { writerRemovedOverflows = i : writerRemovedOverflows e } 263 | -------------------------------------------------------------------------------- /src/Database/Haskey/Alloc/Concurrent/Internal/FreePages/Query.hs: -------------------------------------------------------------------------------- 1 | {-# LANGUAGE FlexibleContexts #-} 2 | {-# LANGUAGE GADTs #-} 3 | {-# LANGUAGE LambdaCase #-} 4 | {-# LANGUAGE RankNTypes #-} 5 | module Database.Haskey.Alloc.Concurrent.Internal.FreePages.Query where 6 | 7 | import Control.Applicative ((<|>), (<$>)) 8 | import Control.Concurrent.STM 9 | import Control.Monad.State 10 | import Control.Monad.Trans.Maybe 11 | 12 | import Data.List.NonEmpty (NonEmpty((:|))) 13 | import qualified Data.List.NonEmpty as NE 14 | 15 | import Data.BTree.Alloc.Class 16 | import Data.BTree.Primitives 17 | import qualified Data.BTree.Impure as B 18 | import qualified Data.BTree.Impure.NonEmpty as NEB 19 | 20 | import Database.Haskey.Alloc.Concurrent.Internal.Environment 21 | import Database.Haskey.Alloc.Concurrent.Internal.FreePages.Tree 22 | import Database.Haskey.Utils.Monad (ifM) 23 | import qualified Database.Haskey.Utils.STM.Map as Map 24 | 25 | -- | Get a free page. 26 | -- 27 | -- First try to get one from the in-memory dirty pages. Then try to get one 28 | -- from the in-memory free page cache stored in 'fileStateCachedFreePages'. If 29 | -- that one is empty, actually query one from the free database. 30 | getFreePageId :: (Functor m, AllocM m, MonadIO m, MonadState (WriterEnv hnd) m) 31 | => S stateType () 32 | -> m (Maybe PageId) 33 | getFreePageId t = 34 | runMaybeT $ MaybeT (getCachedFreePageId t) 35 | <|> MaybeT (queryNewFreePageIds t) 36 | 37 | -- | Get a cached free page. 38 | -- 39 | -- Get a free page from the free database cache stored in 40 | -- 'fileStateCachedFreePages'. 41 | getCachedFreePageId :: (Functor m, MonadState (WriterEnv hnd) m) 42 | => S stateType () 43 | -> m (Maybe PageId) 44 | getCachedFreePageId stateType = 45 | case stateType of 46 | DataState () -> do 47 | s <- writerDataFileState <$> get 48 | let (pid, s') = query DataState s 49 | modify' $ \env -> env { writerDataFileState = s' } 50 | return pid 51 | IndexState () -> do 52 | s <- writerIndexFileState <$> get 53 | let (pid, s') = query IndexState s 54 | modify' $ \env -> env { writerIndexFileState = s' } 55 | return pid 56 | where 57 | query :: (forall a. a -> S t a) 58 | -> FileState t 59 | -> (Maybe PageId, FileState t) 60 | query cons env = case getSValue $ fileStateCachedFreePages env of 61 | [] -> (Nothing, env) 62 | FreePage pid : pageIds -> 63 | let env' = env { fileStateCachedFreePages = cons pageIds } in 64 | (Just pid, env') 65 | 66 | -- | Try to get a list of free pages from the free page database, return the 67 | -- first free one for immediate use, and store the rest in the environment. 68 | -- 69 | -- Immediately remove the queried free pages from the free tree. 70 | queryNewFreePageIds :: (AllocM m, MonadIO m, MonadState (WriterEnv hnd) m) 71 | => S stateType () 72 | -> m (Maybe PageId) 73 | queryNewFreePageIds stateType = ifM (not . writerQueryFreeTreeOn <$> get) (return Nothing) $ do 74 | flag <- case stateType of 75 | DataState () -> 76 | query DataState 77 | writerDataFileState 78 | (\e s -> e { writerDataFileState = s }) 79 | 80 | IndexState () -> 81 | query IndexState 82 | writerIndexFileState 83 | (\e s -> e { writerIndexFileState = s }) 84 | 85 | if flag then getFreePageId stateType 86 | else return Nothing 87 | where 88 | query :: (AllocM m, MonadIO m, MonadState (WriterEnv hnd) m) 89 | => (forall a. a -> S t a) 90 | -> (forall h. WriterEnv h -> FileState t) 91 | -> (forall h. WriterEnv h -> FileState t -> WriterEnv h) 92 | -> m Bool 93 | query cons getState setState = do 94 | tree <- gets $ getSValue . fileStateFreeTree . getState 95 | 96 | -- Lookup the oldest free page 97 | lookupValidFreePageIds tree >>= \case 98 | Nothing -> return False 99 | Just (txId, x :| xs) -> do 100 | -- Save them for reuse 101 | modify' $ \e -> 102 | let s = getState e 103 | pids = map FreePage (x:xs) 104 | in setState e $ 105 | s { fileStateCachedFreePages = 106 | cons $ pids ++ getSValue (fileStateCachedFreePages s) } 107 | 108 | -- Remove the entry from the tree 109 | modify' $ \e -> e { writerQueryFreeTreeOn = False } 110 | tree' <- txId `deleteSubtree` tree 111 | modify' $ \e -> e { writerQueryFreeTreeOn = True } 112 | 113 | -- Update the tree 114 | modify' $ \e -> setState e $ 115 | (getState e) { fileStateFreeTree = cons tree' } 116 | 117 | return True 118 | 119 | -- | Lookup a list of free pages from the free page database, guaranteed to be old enough. 120 | lookupValidFreePageIds :: (MonadIO m, AllocReaderM m, MonadState (WriterEnv hnd) m) 121 | => FreeTree 122 | -> m (Maybe (TxId, NonEmpty PageId)) 123 | lookupValidFreePageIds tree = runMaybeT $ 124 | MaybeT (lookupFreePageIds tree) >>= (MaybeT . checkFreePages) 125 | 126 | -- | Lookup a list of free pages from the free page database. 127 | lookupFreePageIds :: (Functor m, AllocReaderM m, MonadState (WriterEnv hnd) m) 128 | => FreeTree 129 | -> m (Maybe (Unchecked (TxId, NonEmpty PageId))) 130 | lookupFreePageIds tree = B.lookupMin tree >>= \case 131 | Nothing -> return Nothing 132 | Just (tx, subtree) -> do 133 | pids <- subtreeToList subtree 134 | return . Just $ Unchecked (tx, pids) 135 | where 136 | subtreeToList subtree = NE.map fst <$> NEB.toList subtree 137 | 138 | -- | Auxiliry type to ensure the transaction ID of free pages are checked. 139 | newtype Unchecked a = Unchecked a 140 | 141 | -- | Check the transaction ID of the free pages, if it's to old, return 142 | -- 'Nothing'. 143 | checkFreePages :: (Functor m, MonadIO m, MonadState (WriterEnv hnd) m) 144 | => Unchecked (TxId, NonEmpty PageId) 145 | -> m (Maybe (TxId, NonEmpty PageId)) 146 | checkFreePages (Unchecked v) = do 147 | readers <- writerReaders <$> get 148 | oldest <- liftIO . atomically $ Map.lookupMinKey readers 149 | tx <- writerTxId <$> get 150 | if maybe True (> fst v) oldest && fst v + 1 < tx 151 | then return (Just v) 152 | else return Nothing 153 | -------------------------------------------------------------------------------- /src/Database/Haskey/Alloc/Concurrent/Internal/FreePages/Save.hs: -------------------------------------------------------------------------------- 1 | module Database.Haskey.Alloc.Concurrent.Internal.FreePages.Save where 2 | 3 | import Data.List.NonEmpty (NonEmpty((:|))) 4 | 5 | import Data.BTree.Alloc.Class 6 | import Data.BTree.Primitives 7 | 8 | import Database.Haskey.Alloc.Concurrent.Internal.Environment 9 | import Database.Haskey.Alloc.Concurrent.Internal.FreePages.Tree 10 | 11 | -- | Save the free pages from the dirty page list and the free page 12 | -- cache. 13 | saveFreePages :: AllocM m 14 | => TxId 15 | -> FileState t 16 | -> m FreeTree 17 | saveFreePages tx env = saveNewlyFreedPages tx env tree 18 | where 19 | tree = getSValue $ fileStateFreeTree env 20 | 21 | -- | Save the newly free pages of the current transaction, as stored by 22 | -- 'fileStateNewlyFreedPages'. 23 | saveNewlyFreedPages :: AllocM m 24 | => TxId 25 | -> FileState t 26 | -> FreeTree 27 | -> m FreeTree 28 | saveNewlyFreedPages tx env tree = 29 | case newlyFreed of 30 | [] -> deleteSubtree tx tree 31 | x:xs -> replaceSubtree tx (x :| xs) tree 32 | where 33 | newlyFreed = map (\(NewlyFreed pid) -> pid) $ fileStateNewlyFreedPages env 34 | -------------------------------------------------------------------------------- /src/Database/Haskey/Alloc/Concurrent/Internal/FreePages/Tree.hs: -------------------------------------------------------------------------------- 1 | {-# LANGUAGE GADTs #-} 2 | {-# LANGUAGE LambdaCase #-} 3 | -- | Module describing the tree structure of the free page database. 4 | module Database.Haskey.Alloc.Concurrent.Internal.FreePages.Tree where 5 | 6 | import Control.Monad ((>=>)) 7 | 8 | import Data.Foldable (traverse_) 9 | import Data.List.NonEmpty (NonEmpty) 10 | import qualified Data.List.NonEmpty as NE 11 | 12 | import Data.BTree.Alloc.Class 13 | import Data.BTree.Impure (Tree) 14 | import Data.BTree.Impure.NonEmpty (NonEmptyTree(..)) 15 | import Data.BTree.Primitives 16 | import qualified Data.BTree.Impure as B 17 | import qualified Data.BTree.Impure.NonEmpty as NET 18 | 19 | -- | The main tree structure of the free page database. 20 | -- 21 | -- The main free page database tree maps a 'TxId' to a 'FreeSubtree'. 22 | type FreeTree = Tree TxId FreeSubtree 23 | 24 | -- | the subtree structure of the free page database. 25 | -- 26 | -- Just a collection of free 'PageId's. 27 | type FreeSubtree = NonEmptyTree PageId () 28 | 29 | -- | Replace the subtree of a certain 'TxId'. 30 | replaceSubtree :: AllocM m 31 | => TxId 32 | -> NonEmpty PageId 33 | -> FreeTree 34 | -> m FreeTree 35 | replaceSubtree tx pids = deleteSubtree tx >=> insertSubtree tx pids 36 | 37 | -- | Delete the subtree of a certain 'TxId'. 38 | -- 39 | -- The 'TxId' will not be present anymore in the free tree after this call. 40 | deleteSubtree :: AllocM m 41 | => TxId 42 | -> FreeTree 43 | -> m FreeTree 44 | deleteSubtree tx tree = B.lookup tx tree >>= \case 45 | Nothing -> return tree 46 | Just (NonEmptyTree h nid) -> do 47 | freeAllNodes h nid 48 | B.delete tx tree 49 | where 50 | freeAllNodes :: (AllocM m, Key key, Value val) 51 | => Height h 52 | -> NodeId h key val 53 | -> m () 54 | freeAllNodes h nid = readNode h nid >>= \case 55 | NET.Leaf _ -> freeNode h nid 56 | NET.Idx idx -> do 57 | let subHgt = decrHeight h 58 | traverse_ (freeAllNodes subHgt) idx 59 | freeNode h nid 60 | 61 | -- | Insert a subtree for a certain 'TxId'. 62 | insertSubtree :: AllocM m 63 | => TxId 64 | -> NonEmpty PageId 65 | -> FreeTree 66 | -> m FreeTree 67 | insertSubtree tx pids tree = do 68 | subtree <- NET.fromList (NE.zip pids (NE.repeat ())) 69 | B.insert tx subtree tree 70 | -------------------------------------------------------------------------------- /src/Database/Haskey/Alloc/Concurrent/Internal/Meta.hs: -------------------------------------------------------------------------------- 1 | {-# LANGUAGE DataKinds #-} 2 | {-# LANGUAGE DeriveDataTypeable #-} 3 | {-# LANGUAGE DeriveGeneric #-} 4 | {-# LANGUAGE FlexibleContexts #-} 5 | {-# LANGUAGE MultiParamTypeClasses #-} 6 | {-# LANGUAGE StandaloneDeriving #-} 7 | -- | This module implements data structures and function related to the 8 | -- metadata of the concurrent page allocator. 9 | module Database.Haskey.Alloc.Concurrent.Internal.Meta where 10 | 11 | import Data.Binary (Binary) 12 | import Data.Proxy (Proxy) 13 | import Data.Typeable (Typeable) 14 | 15 | import GHC.Generics (Generic) 16 | 17 | import Data.BTree.Impure.Internal.Structures 18 | import Data.BTree.Primitives 19 | 20 | import Database.Haskey.Alloc.Concurrent.Internal.Environment 21 | import Database.Haskey.Alloc.Concurrent.Internal.FreePages.Tree 22 | import Database.Haskey.Alloc.Concurrent.Internal.Overflow 23 | import Database.Haskey.Store 24 | 25 | -- | User-defined data root stored inside 'ConcurrentMeta'. 26 | -- 27 | -- This can be a user-defined collection of 'Tree' roots. 28 | class Value root => Root root where 29 | 30 | instance (Key k, Value v) => Root (Tree k v) where 31 | 32 | -- | Data type used to point to the most recent version of the meta data. 33 | data CurrentMetaPage = Meta1 | Meta2 34 | 35 | -- | Meta data of the page allocator. 36 | -- 37 | -- The @root@ type parameter should be a user-defined collection of 'Tree' 38 | -- roots, instantiating the 'Root' type class. 39 | -- 40 | -- To store store a single tree, use @ConcurrentMeta (Tree k v)@. 41 | data ConcurrentMeta root = ConcurrentMeta { 42 | concurrentMetaRevision :: TxId 43 | , concurrentMetaDataNumPages :: S 'TypeData PageId 44 | , concurrentMetaIndexNumPages :: S 'TypeIndex PageId 45 | , concurrentMetaRoot :: root 46 | , concurrentMetaDataFreeTree :: S 'TypeData FreeTree 47 | , concurrentMetaIndexFreeTree :: S 'TypeIndex FreeTree 48 | , concurrentMetaOverflowTree :: OverflowTree 49 | , concurrentMetaDataCachedFreePages :: S 'TypeData [FreePage] 50 | , concurrentMetaIndexCachedFreePages :: S 'TypeIndex [FreePage] 51 | } deriving (Generic, Typeable) 52 | 53 | deriving instance (Show root) => Show (ConcurrentMeta root) 54 | 55 | instance (Binary root) => Binary (ConcurrentMeta root) where 56 | 57 | -- | A class representing the storage requirements of the page allocator. 58 | -- 59 | -- A store supporting the page allocator should be an instance of this class. 60 | class StoreM FilePath m => ConcurrentMetaStoreM m where 61 | -- | Write the meta-data structure to a certain page. 62 | putConcurrentMeta :: Root root 63 | => FilePath 64 | -> ConcurrentMeta root 65 | -> m () 66 | 67 | -- | Try to read the meta-data structure from a handle, or return 'Nothing' 68 | -- if the handle doesn't contain a meta page. 69 | readConcurrentMeta :: Root root 70 | => FilePath 71 | -> Proxy root 72 | -> m (Maybe (ConcurrentMeta root)) 73 | 74 | -------------------------------------------------------------------------------- /src/Database/Haskey/Alloc/Concurrent/Internal/Monad.hs: -------------------------------------------------------------------------------- 1 | {-# LANGUAGE FlexibleContexts #-} 2 | {-# LANGUAGE FlexibleInstances #-} 3 | {-# LANGUAGE GADTs #-} 4 | {-# LANGUAGE GeneralizedNewtypeDeriving #-} 5 | {-# LANGUAGE LambdaCase #-} 6 | {-# LANGUAGE MultiWayIf #-} 7 | {-# LANGUAGE RankNTypes #-} 8 | -- | This module implements the 'ConcurrentT' monad. 9 | -- 10 | -- The 'ConcurrentT' monad is used to implement a page allocator with 11 | -- concurrent readers and serialized writers. 12 | module Database.Haskey.Alloc.Concurrent.Internal.Monad where 13 | 14 | import Control.Applicative (Applicative, (<$>)) 15 | import Control.Monad.Catch 16 | import Control.Monad.State 17 | 18 | import Data.Proxy (Proxy(..)) 19 | 20 | import System.FilePath (()) 21 | 22 | import Data.BTree.Alloc.Class 23 | import Data.BTree.Primitives 24 | 25 | import Database.Haskey.Alloc.Concurrent.Internal.Environment 26 | import Database.Haskey.Alloc.Concurrent.Internal.FreePages.Query 27 | import Database.Haskey.Alloc.Concurrent.Internal.Meta 28 | import Database.Haskey.Alloc.Concurrent.Internal.Overflow 29 | import Database.Haskey.Store 30 | import qualified Database.Haskey.Store.Class as Store 31 | 32 | -- | All necessary database handles. 33 | data ConcurrentHandles = ConcurrentHandles { 34 | concurrentHandlesRoot :: FilePath 35 | , concurrentHandlesData :: FilePath 36 | , concurrentHandlesIndex :: FilePath 37 | , concurrentHandlesMetadata1 :: FilePath 38 | , concurrentHandlesMetadata2 :: FilePath 39 | , concurrentHandlesOverflowDir :: FilePath 40 | } deriving (Show) 41 | 42 | -- | Construct a set of 'ConcurrentHandles' from a root directory. 43 | concurrentHandles :: FilePath -> ConcurrentHandles 44 | concurrentHandles fp = ConcurrentHandles { 45 | concurrentHandlesRoot = fp 46 | , concurrentHandlesData = fp "data" "data" 47 | , concurrentHandlesIndex = fp "index" "index" 48 | , concurrentHandlesMetadata1 = fp "meta" "1" 49 | , concurrentHandlesMetadata2 = fp "meta" "2" 50 | , concurrentHandlesOverflowDir = fp "overflow" 51 | } 52 | 53 | -- | Monad in which page allocations can take place. 54 | -- 55 | -- The monad has access to a 'ConcurrentMetaStoreM' back-end which manages can 56 | -- store and retreive the corresponding metadata. 57 | newtype ConcurrentT env hnd m a = ConcurrentT { fromConcurrentT :: StateT (env hnd) m a } 58 | deriving (Functor, Applicative, Monad, 59 | MonadIO, MonadThrow, MonadCatch, MonadMask, 60 | MonadState (env hnd)) 61 | 62 | instance MonadTrans (ConcurrentT env hnd) where 63 | lift = ConcurrentT . lift 64 | 65 | -- | Run the actions in an 'ConcurrentT' monad, given a reader or writer 66 | -- environment. 67 | runConcurrentT :: ConcurrentMetaStoreM m 68 | => ConcurrentT env ConcurrentHandles m a 69 | -> env ConcurrentHandles 70 | -> m (a, env ConcurrentHandles) 71 | runConcurrentT m = runStateT (fromConcurrentT m) 72 | 73 | -- | Evaluate the actions in an 'ConcurrentT' monad, given a reader or writer 74 | -- environment. 75 | evalConcurrentT :: ConcurrentMetaStoreM m 76 | => ConcurrentT env ConcurrentHandles m a 77 | -> env ConcurrentHandles -> 78 | m a 79 | evalConcurrentT m env = fst <$> runConcurrentT m env 80 | 81 | instance 82 | (ConcurrentMetaStoreM m, MonadIO m) 83 | => AllocM (ConcurrentT WriterEnv ConcurrentHandles m) 84 | where 85 | nodePageSize = ConcurrentT Store.nodePageSize 86 | maxPageSize = ConcurrentT Store.maxPageSize 87 | maxKeySize = ConcurrentT Store.maxKeySize 88 | maxValueSize = ConcurrentT Store.maxValueSize 89 | 90 | allocNode height n = do 91 | hnd <- getWriterHnd height 92 | pid <- getAndTouchPid 93 | 94 | let nid = pageIdToNodeId pid 95 | lift $ putNodePage hnd height nid n 96 | return nid 97 | where 98 | getAndTouchPid = getAndTouchFreePageId >>= \case 99 | Just pid -> return pid 100 | Nothing -> newTouchedPid 101 | 102 | getAndTouchFreePageId = case viewHeight height of 103 | UZero -> getFreePageId (DataState ()) >>= \case 104 | Nothing -> return Nothing 105 | Just pid -> do 106 | touchPage (DataState pid) 107 | return (Just pid) 108 | USucc _ -> getFreePageId (IndexState ()) >>= \case 109 | Nothing -> return Nothing 110 | Just pid -> do 111 | touchPage (IndexState pid) 112 | return (Just pid) 113 | 114 | newTouchedPid = case viewHeight height of 115 | UZero -> do 116 | pid <- fileStateNewNumPages . writerDataFileState <$> get 117 | touchPage pid 118 | return $ getSValue pid 119 | USucc _ -> do 120 | pid <- fileStateNewNumPages . writerIndexFileState <$> get 121 | touchPage pid 122 | return $ getSValue pid 123 | 124 | 125 | freeNode height nid = case viewHeight height of 126 | UZero -> freePage (DataState $ nodeIdToPageId nid) 127 | USucc _ -> freePage (IndexState $ nodeIdToPageId nid) 128 | 129 | allocOverflow v = do 130 | root <- concurrentHandlesOverflowDir . writerHnds <$> get 131 | oid <- getNewOverflowId 132 | touchOverflow oid 133 | 134 | let hnd = getOverflowHandle root oid 135 | lift $ openHandle hnd 136 | lift $ putOverflow hnd v 137 | lift $ closeHandle hnd 138 | return oid 139 | 140 | freeOverflow oid = overflowType oid >>= \case 141 | Right i -> removeOldOverflow i 142 | Left (DirtyOverflow i) -> deleteOverflowData i 143 | 144 | deleteOverflowData i = do 145 | root <- concurrentHandlesOverflowDir . writerHnds <$> get 146 | lift $ removeHandle (getOverflowHandle root i) 147 | 148 | instance 149 | ConcurrentMetaStoreM m 150 | => AllocReaderM (ConcurrentT WriterEnv ConcurrentHandles m) 151 | where 152 | readNode height nid = do 153 | hnd <- getWriterHnd height 154 | lift $ getNodePage hnd height Proxy Proxy nid 155 | 156 | readOverflow i = do 157 | root <- concurrentHandlesOverflowDir . writerHnds <$> get 158 | readOverflow' root i 159 | 160 | instance 161 | ConcurrentMetaStoreM m 162 | => AllocReaderM (ConcurrentT ReaderEnv ConcurrentHandles m) 163 | where 164 | readNode height nid = do 165 | hnd <- getReaderHnd height 166 | lift $ getNodePage hnd height Proxy Proxy nid 167 | 168 | readOverflow i = do 169 | root <- concurrentHandlesOverflowDir . readerHnds <$> get 170 | readOverflow' root i 171 | 172 | readOverflow' :: (ConcurrentMetaStoreM m, Value v) 173 | => FilePath -> OverflowId -> ConcurrentT env hnd m v 174 | readOverflow' root oid = do 175 | let hnd = getOverflowHandle root oid 176 | lift $ openHandle hnd 177 | v <- lift $ getOverflow hnd Proxy 178 | lift $ closeHandle hnd 179 | return v 180 | 181 | getWriterHnd :: MonadState (WriterEnv ConcurrentHandles) m 182 | => Height height 183 | -> m FilePath 184 | getWriterHnd h = case viewHeight h of 185 | UZero -> gets $ concurrentHandlesData . writerHnds 186 | USucc _ -> gets $ concurrentHandlesIndex . writerHnds 187 | 188 | getReaderHnd :: MonadState (ReaderEnv ConcurrentHandles) m 189 | => Height height 190 | -> m FilePath 191 | getReaderHnd h = case viewHeight h of 192 | UZero -> gets $ concurrentHandlesData . readerHnds 193 | USucc _ -> gets $ concurrentHandlesIndex . readerHnds 194 | -------------------------------------------------------------------------------- /src/Database/Haskey/Alloc/Concurrent/Internal/Overflow.hs: -------------------------------------------------------------------------------- 1 | {-# LANGUAGE DataKinds #-} 2 | {-# LANGUAGE FlexibleContexts #-} 3 | {-# LANGUAGE GADTs #-} 4 | {-# LANGUAGE LambdaCase #-} 5 | -- | Data structures and functions related to handling overflow pages. 6 | module Database.Haskey.Alloc.Concurrent.Internal.Overflow where 7 | 8 | import Control.Applicative ((<$>)) 9 | import Control.Concurrent.STM 10 | import Control.Monad.State 11 | 12 | import Data.Bits (shiftR) 13 | import Data.Foldable (traverse_) 14 | import Data.List.NonEmpty (NonEmpty) 15 | import Data.Maybe (fromMaybe, listToMaybe) 16 | import Data.Word (Word8) 17 | import qualified Data.List.NonEmpty as NE 18 | import qualified Data.Map as M 19 | 20 | import Numeric (showHex, readHex) 21 | 22 | import System.FilePath ((), (<.>), dropExtension, takeFileName) 23 | 24 | import Data.BTree.Alloc.Class 25 | import Data.BTree.Impure (Tree) 26 | import Data.BTree.Impure.NonEmpty (NonEmptyTree(..)) 27 | import Data.BTree.Primitives 28 | import qualified Data.BTree.Impure as B 29 | import qualified Data.BTree.Impure.NonEmpty as NEB 30 | 31 | import Database.Haskey.Alloc.Concurrent.Internal.Environment 32 | import qualified Database.Haskey.Utils.STM.Map as Map 33 | 34 | getNewOverflowId :: (Functor m, MonadState (WriterEnv hnd) m) 35 | => m OverflowId 36 | getNewOverflowId = do 37 | tx <- writerTxId <$> get 38 | c <- writerOverflowCounter <$> get 39 | modify' $ \e -> e { writerOverflowCounter = 1 + writerOverflowCounter e } 40 | return (tx, c) 41 | 42 | getOverflowHandle :: FilePath -> OverflowId -> FilePath 43 | getOverflowHandle root (TxId tx, c) = 44 | getOverflowDir root (TxId tx) showHex' tx <.> showHex' c <.> "overflow" 45 | 46 | getOverflowDir :: FilePath -> TxId -> FilePath 47 | getOverflowDir root (TxId tx) = 48 | root lsb1 lsb2 49 | where 50 | lsb1 = showHex' (fromIntegral tx :: Word8) 51 | lsb2 = showHex' (fromIntegral (tx `shiftR` 8) :: Word8) 52 | 53 | readOverflowId :: FilePath -> Maybe OverflowId 54 | readOverflowId fp = parse (dropExtension $ takeFileName fp) 55 | where 56 | parse s = do 57 | (tx, s') <- readHex' s 58 | s'' <- case s' of '.':xs -> return xs 59 | _ -> Nothing 60 | (c, _) <- readHex' s'' 61 | return (tx, c) 62 | 63 | showHex' :: (Integral a, Show a) => a -> String 64 | showHex' = flip showHex "" 65 | 66 | readHex' :: (Eq a, Num a) => String -> Maybe (a, String) 67 | readHex' s = listToMaybe $ readHex s 68 | 69 | -------------------------------------------------------------------------------- 70 | 71 | -- | The main tree structure of the freed overflow page tree 72 | type OverflowTree = Tree TxId OverflowSubtree 73 | 74 | -- | The subtree structure of the freed overflow page tree 75 | type OverflowSubtree = NonEmptyTree OverflowId () 76 | 77 | -- | Save a set of overflow ids that were free'd in the transaction. 78 | insertOverflowIds :: AllocM m 79 | => TxId 80 | -> NonEmpty OverflowId 81 | -> OverflowTree 82 | -> m OverflowTree 83 | insertOverflowIds tx oids tree = do 84 | subtree <- NEB.fromList (NE.zip oids (NE.repeat ())) 85 | B.insert tx subtree tree 86 | 87 | -- | Delete the set of overflow ids that were free'd in the transaction. 88 | deleteOverflowIds :: AllocM m 89 | => TxId 90 | -> OverflowTree 91 | -> m OverflowTree 92 | deleteOverflowIds tx tree = B.lookup tx tree >>= \case 93 | Nothing -> return tree 94 | Just (NonEmptyTree h nid) -> do 95 | freeAllNodes h nid 96 | B.delete tx tree 97 | where 98 | freeAllNodes :: (AllocM m) 99 | => Height h 100 | -> NodeId h OverflowId () 101 | -> m () 102 | freeAllNodes h nid = readNode h nid >>= \case 103 | l@(NEB.Leaf _) -> freeOverflowInLeaf l >> freeNode h nid 104 | NEB.Idx idx -> do 105 | let subHgt = decrHeight h 106 | traverse_ (freeAllNodes subHgt) idx 107 | freeNode h nid 108 | 109 | freeOverflowInLeaf :: (AllocM m) 110 | => NEB.Node 'Z OverflowId () 111 | -> m () 112 | freeOverflowInLeaf (NEB.Leaf items) = mapM_ deleteOverflowData $ M.keys items 113 | 114 | -------------------------------------------------------------------------------- 115 | 116 | deleteOutdatedOverflowIds :: (Functor m, AllocM m, MonadIO m, 117 | MonadState (WriterEnv hnd) m) 118 | => OverflowTree 119 | -> m (Maybe OverflowTree) 120 | deleteOutdatedOverflowIds tree = do 121 | defaultTx <- writerTxId <$> get 122 | readers <- writerReaders <$> get 123 | oldest <- liftIO . atomically $ 124 | fromMaybe defaultTx <$> Map.lookupMinKey readers 125 | 126 | B.lookupMin tree >>= \case 127 | Nothing -> return Nothing 128 | Just (tx, _) -> if tx >= oldest 129 | then return Nothing 130 | else Just <$> go oldest tx tree 131 | where 132 | go oldest tx t = do 133 | t' <- deleteOverflowIds tx t 134 | B.lookupMin t' >>= \case 135 | Nothing -> return t' 136 | Just (tx', _) -> if tx' >= oldest 137 | then return t' 138 | else go oldest tx' t' 139 | 140 | -------------------------------------------------------------------------------- 141 | -------------------------------------------------------------------------------- /src/Database/Haskey/Alloc/Transaction.hs: -------------------------------------------------------------------------------- 1 | -- | This module implements mechanisms to work with transactions. 2 | module Database.Haskey.Alloc.Transaction where 3 | 4 | import Data.BTree.Alloc.Class 5 | 6 | -- | A committed or aborted transaction, with a return value of type @a@. 7 | data Transaction r a = 8 | Commit r a 9 | | Abort a 10 | 11 | -- | Commit the new tree and return a computed value. 12 | commit :: AllocM n => a -> r -> n (Transaction r a) 13 | commit v t = return $ Commit t v 14 | 15 | -- | Commit the new tree, without return a computed value. 16 | commit_ :: AllocM n => r -> n (Transaction r ()) 17 | commit_ = commit () 18 | 19 | -- | Abort the transaction and return a computed value. 20 | abort :: AllocM n => a -> n (Transaction r a) 21 | abort = return . Abort 22 | 23 | -- | Abort the transaction, without returning a computed value. 24 | abort_ :: AllocM n => n (Transaction r ()) 25 | abort_ = return $ Abort () 26 | -------------------------------------------------------------------------------- /src/Database/Haskey/Store.hs: -------------------------------------------------------------------------------- 1 | -- | Storage back-ends that manage physical storage of pages. 2 | module Database.Haskey.Store ( 3 | module Database.Haskey.Store.Class 4 | ) where 5 | 6 | import Database.Haskey.Store.Class 7 | -------------------------------------------------------------------------------- /src/Database/Haskey/Store/Class.hs: -------------------------------------------------------------------------------- 1 | {-# LANGUAGE DataKinds #-} 2 | {-# LANGUAGE DeriveDataTypeable #-} 3 | {-# LANGUAGE FlexibleInstances #-} 4 | {-# LANGUAGE FunctionalDependencies #-} 5 | {-# LANGUAGE MultiParamTypeClasses #-} 6 | {-# LANGUAGE UndecidableInstances #-} 7 | -- | A storage back-end manages physical storage of pages. 8 | module Database.Haskey.Store.Class ( 9 | -- * Class 10 | StoreM(..) 11 | 12 | -- * Helpers 13 | , arbitrarySearch 14 | , calculateMaxKeySize 15 | , calculateMaxValueSize 16 | , ZeroEncoded(..) 17 | ) where 18 | 19 | import Prelude hiding (max, min, pred) 20 | 21 | import Control.Applicative (Applicative) 22 | import Control.Monad.Trans 23 | import Control.Monad.Trans.Reader (ReaderT) 24 | import Control.Monad.Trans.State (StateT) 25 | 26 | import Data.Binary (Binary(..), Get) 27 | import Data.Proxy 28 | import Data.Typeable (Typeable) 29 | import Data.Word (Word8, Word64) 30 | import qualified Data.Map as M 31 | 32 | import Data.BTree.Impure 33 | import Data.BTree.Impure.Internal.Structures 34 | import Data.BTree.Primitives 35 | 36 | -------------------------------------------------------------------------------- 37 | 38 | -- | A storage back-end that can store and fetch physical pages. 39 | class (Applicative m, Monad m) => StoreM hnd m | m -> hnd where 40 | -- | Open a database handle for reading and writing. 41 | openHandle :: hnd -> m () 42 | 43 | -- | Obtain a lock on the given handle, so no other process can access it. 44 | lockHandle :: hnd -> m () 45 | 46 | -- | Release the lock on the given handle, so other processes can access it. 47 | releaseHandle :: hnd -> m () 48 | 49 | -- | Flush the contents of a handle to disk (or other storage). 50 | flushHandle :: hnd -> m () 51 | 52 | -- | Close a database handle. 53 | closeHandle :: hnd -> m () 54 | 55 | -- | Remove a handle from the storage back-end. 56 | removeHandle :: hnd -> m () 57 | 58 | -- | A function that calculates the hypothetical size of a node, if it were 59 | -- to be written to a page (regardless of the maximum page size). 60 | nodePageSize :: (Key key, Value val) 61 | => m (Height height -> Node height key val -> PageSize) 62 | 63 | -- | The maximum page size the allocator can handle. 64 | maxPageSize :: m PageSize 65 | 66 | -- | Get the maximum key size 67 | -- 68 | -- The default implementation will repeatedly call 'calculateMaxKeySize'. 69 | -- You might want to cache this value in your own implementation. 70 | maxKeySize :: m Word64 71 | maxKeySize = do 72 | f <- nodePageSize 73 | fmax <- maxPageSize 74 | return $ calculateMaxKeySize fmax (f zeroHeight) 75 | 76 | -- | Get the maximum value size 77 | -- 78 | -- The default implementation will repeatedly call 'calculateMaxValueSize'. 79 | -- You might want to cache this value in your own implementation. 80 | maxValueSize :: m Word64 81 | maxValueSize = do 82 | f <- nodePageSize 83 | key <- maxKeySize 84 | fmax <- maxPageSize 85 | return $ calculateMaxValueSize fmax key (f zeroHeight) 86 | 87 | -- | Read a page and return the actual node and the transaction id when the 88 | -- node was written. 89 | getNodePage :: (Key key, Value val) 90 | => hnd 91 | -> Height height 92 | -> Proxy key 93 | -> Proxy val 94 | -> NodeId height key val 95 | -> m (Node height key val) 96 | 97 | -- | Write a node to a physical page. 98 | putNodePage :: (Key key, Value val) 99 | => hnd 100 | -> Height height 101 | -> NodeId height key val 102 | -> Node height key val 103 | -> m () 104 | 105 | -- | Read a value from an overflow page 106 | getOverflow :: (Value val) 107 | => hnd 108 | -> Proxy val 109 | -> m val 110 | 111 | -- | Write a value to an overflow page 112 | putOverflow :: (Value val) 113 | => hnd 114 | -> val 115 | -> m () 116 | 117 | -- | List overflow pages in the specific overflow directory. 118 | -- 119 | -- The result should include **AT LEAST** the handles in the specified 120 | -- directory, but it may contain more handles, even handles that do not 121 | -- belong to an overflow page. 122 | listOverflows :: hnd -> m [hnd] 123 | 124 | 125 | instance StoreM hnd m => StoreM hnd (StateT s m) where 126 | openHandle = lift. openHandle 127 | lockHandle = lift. lockHandle 128 | releaseHandle = lift. releaseHandle 129 | flushHandle = lift. flushHandle 130 | closeHandle = lift. closeHandle 131 | removeHandle = lift. closeHandle 132 | nodePageSize = lift nodePageSize 133 | maxPageSize = lift maxPageSize 134 | maxKeySize = lift maxKeySize 135 | maxValueSize = lift maxValueSize 136 | getNodePage = ((((lift.).).).). getNodePage 137 | putNodePage = (((lift.).).). putNodePage 138 | getOverflow = (lift.). getOverflow 139 | putOverflow = (lift.). putOverflow 140 | listOverflows = lift. listOverflows 141 | 142 | instance StoreM hnd m => StoreM hnd (ReaderT s m) where 143 | openHandle = lift. openHandle 144 | lockHandle = lift. lockHandle 145 | releaseHandle = lift. releaseHandle 146 | flushHandle = lift. flushHandle 147 | closeHandle = lift. closeHandle 148 | removeHandle = lift. closeHandle 149 | nodePageSize = lift nodePageSize 150 | maxPageSize = lift maxPageSize 151 | maxKeySize = lift maxKeySize 152 | maxValueSize = lift maxValueSize 153 | getNodePage = ((((lift.).).).). getNodePage 154 | putNodePage = (((lift.).).). putNodePage 155 | getOverflow = (lift.). getOverflow 156 | putOverflow = (lift.). putOverflow 157 | listOverflows = lift. listOverflows 158 | 159 | -------------------------------------------------------------------------------- 160 | 161 | -- | Calculate the maximum key size. 162 | -- 163 | -- Return the size for which at least 4 key-value pairs with keys and values of 164 | -- that size can fit in a leaf node. 165 | calculateMaxKeySize :: PageSize 166 | -- ^ Maximum pages size 167 | -> (Node 'Z ZeroEncoded ZeroEncoded -> PageSize) 168 | -- ^ Function that calculates the page size of a node 169 | -> Word64 170 | -- ^ Maximum key size 171 | calculateMaxKeySize fmax f = arbitrarySearch 2 pred fmax 172 | where 173 | pred n = f (Leaf $ kvs n) 174 | kvs n = M.fromList 175 | [(ZeroEncoded n i, RawValue $ ZeroEncoded n i) | i <- [1..4]] 176 | 177 | -- | Calculate the maximum value size. 178 | -- 179 | -- Return the size for which at least 4 key-value pairs of the specified 180 | -- maximum key size and values of the returned size can fit in a leaf node. 181 | -- that size can fit in a leaf node. 182 | calculateMaxValueSize :: PageSize 183 | -- ^ Maximum page size 184 | -> Word64 185 | -- ^ Maximum key size 186 | -> (Node 'Z ZeroEncoded ZeroEncoded -> PageSize) 187 | -- ^ Function that calculates the page size of a node 188 | -> Word64 189 | -- ^ Maximum value size 190 | calculateMaxValueSize fmax keySize f = arbitrarySearch 2 pred fmax 191 | where 192 | pred n = f (Leaf $ kvs n) 193 | kvs n = M.fromList 194 | [(ZeroEncoded keySize i, RawValue $ ZeroEncoded n i) | i <- [1..4]] 195 | 196 | -- | Search an arbitrary number, less than a limit, greater than a starting 197 | -- value. 198 | arbitrarySearch :: (Ord v, Integral n) => n -> (n -> v) -> v -> n 199 | arbitrarySearch start f fmax = go start 200 | where 201 | go n = 202 | let s = f n in 203 | if s == fmax 204 | then n 205 | else if s > fmax 206 | then search (n `quot` 2) n 207 | else go (n*2) 208 | 209 | search min max 210 | | max - min == 1 = min 211 | | otherwise 212 | = 213 | let c = min + ((max - min) `quot` 2) 214 | s = f c in 215 | if s == fmax 216 | then c 217 | else if s > fmax 218 | then search min c 219 | else search c max 220 | 221 | -- | Data type which encodes the integer using a variable amount of NULL or ONE 222 | -- bytes. 223 | data ZeroEncoded = ZeroEncoded { getZeroEncoded :: Word64 224 | , getZeroEncodedValue :: Word64 } 225 | deriving (Eq, Ord, Show, Typeable) 226 | 227 | 228 | instance Binary ZeroEncoded where 229 | put (ZeroEncoded 0 _) = error "must be >0" 230 | put (ZeroEncoded 1 0) = put (255 :: Word8) 231 | put (ZeroEncoded 1 _) = error "value too large" 232 | put (ZeroEncoded n v) = put byte >> put (ZeroEncoded (n-1) v') 233 | where 234 | byte = fromIntegral $ v `rem` 255 :: Word8 235 | v' = v `quot` 255 236 | 237 | get = do 238 | byte <- get :: Get Word8 239 | case byte of 240 | 0 -> return (ZeroEncoded 1 0) 241 | _ -> do 242 | next <- get 243 | return $ ZeroEncoded (getZeroEncoded next + 1) 0 244 | 245 | instance Key ZeroEncoded where 246 | instance Value ZeroEncoded where 247 | 248 | -------------------------------------------------------------------------------- 249 | -------------------------------------------------------------------------------- /src/Database/Haskey/Store/File.hs: -------------------------------------------------------------------------------- 1 | {-# LANGUAGE DataKinds #-} 2 | {-# LANGUAGE DeriveDataTypeable #-} 3 | {-# LANGUAGE ExistentialQuantification #-} 4 | {-# LANGUAGE FlexibleContexts #-} 5 | {-# LANGUAGE FlexibleInstances #-} 6 | {-# LANGUAGE GADTs #-} 7 | {-# LANGUAGE GeneralizedNewtypeDeriving #-} 8 | {-# LANGUAGE LambdaCase #-} 9 | {-# LANGUAGE MultiParamTypeClasses #-} 10 | {-# LANGUAGE ScopedTypeVariables #-} 11 | -- | On-disk storage back-end. Can be used as a storage back-end for the 12 | -- append-only page allocator (see "Data.BTree.Alloc"). 13 | module Database.Haskey.Store.File ( 14 | -- * Storage 15 | Page(..) 16 | , FileStoreConfig(..) 17 | , defFileStoreConfig 18 | , fileStoreConfigWithPageSize 19 | , FileStoreT 20 | , runFileStoreT 21 | 22 | -- * Binary encoding 23 | , encodeAndPad 24 | 25 | -- * Exceptions 26 | , FileNotFoundError(..) 27 | , PageOverflowError(..) 28 | , WrongNodeTypeError(..) 29 | , WrongOverflowValueError(..) 30 | ) where 31 | 32 | import Control.Applicative (Applicative, (<$>)) 33 | import Control.Monad 34 | import Control.Monad.Catch 35 | import Control.Monad.IO.Class 36 | import Control.Monad.Reader 37 | import Control.Monad.State.Class 38 | import Control.Monad.Trans.State.Strict ( StateT, evalStateT) 39 | 40 | import Data.Map (Map) 41 | import Data.Maybe (fromJust) 42 | import Data.Monoid ((<>)) 43 | import Data.Typeable (Typeable, cast) 44 | import Data.Word (Word64) 45 | import qualified Data.ByteString.Lazy as BL 46 | import qualified Data.Map as M 47 | 48 | import qualified FileIO as IO 49 | 50 | import System.Directory (createDirectoryIfMissing, removeFile, getDirectoryContents) 51 | import System.FilePath (takeDirectory) 52 | import System.IO.Error (ioError, isDoesNotExistError) 53 | 54 | import Data.BTree.Impure.Internal.Structures 55 | import Data.BTree.Primitives 56 | 57 | import Database.Haskey.Alloc.Concurrent 58 | import Database.Haskey.Store.Class 59 | import Database.Haskey.Store.Page 60 | import Database.Haskey.Utils.IO (readByteString, writeLazyByteString) 61 | import Database.Haskey.Utils.Monad.Catch (justErrM) 62 | 63 | -------------------------------------------------------------------------------- 64 | 65 | -- | Encode a page padding it to the maxim page size. 66 | -- 67 | -- Return 'Nothing' of the page is too large to fit into one page size. 68 | encodeAndPad :: PageSize -> Page t -> Maybe BL.ByteString 69 | encodeAndPad size page 70 | | Just n <- padding = Just . prependChecksum $ 71 | enc <> BL.replicate n 0 72 | | otherwise = Nothing 73 | where 74 | enc = encodeNoChecksum page 75 | 76 | -- Reserve 8 bytes for the checksum 77 | padding | n <- fromIntegral size - BL.length enc - 8, n >= 0 = Just n 78 | | otherwise = Nothing 79 | 80 | -------------------------------------------------------------------------------- 81 | 82 | -- | A collection of files, each associated with a certain @fp@ handle. 83 | -- 84 | -- Each file is a 'Handle' opened in 'System.IO.ReadWriteMode' and contains a 85 | -- collection of physical pages. 86 | type Files fp = Map fp IO.FHandle 87 | 88 | lookupHandle :: (Functor m, MonadThrow m, Ord fp, Show fp, Typeable fp) 89 | => fp -> Files fp -> m IO.FHandle 90 | lookupHandle fp m = justErrM (FileNotFoundError fp) $ M.lookup fp m 91 | 92 | -- | Monad in which on-disk storage operations can take place. 93 | -- 94 | -- Two important instances are 'StoreM' making it a storage back-end, and 95 | -- 'ConcurrentMetaStoreM' making it a storage back-end compatible with the 96 | -- concurrent page allocator. 97 | newtype FileStoreT fp m a = FileStoreT 98 | { fromFileStoreT :: ReaderT FileStoreConfig (StateT (Files fp) m) a 99 | } deriving (Applicative, Functor, Monad, 100 | MonadIO, MonadThrow, MonadCatch, MonadMask, 101 | MonadReader FileStoreConfig, MonadState (Files fp)) 102 | 103 | -- | File store configuration. 104 | -- 105 | -- The default configuration can be obtained by using 'defFileStoreConfig' 106 | -- 107 | -- A configuration with a specific page size can be obtained by using 108 | -- 'fileStoreConfigWithPageSize'. 109 | data FileStoreConfig = FileStoreConfig { 110 | fileStoreConfigPageSize :: !PageSize 111 | , fileStoreConfigMaxKeySize :: !Word64 112 | , fileStoreConfigMaxValueSize :: !Word64 113 | } deriving (Show) 114 | 115 | -- | The default configuration 116 | -- 117 | -- This is an unwrapped 'fileStoreConfigWithPageSize' with a page size of 4096 118 | -- bytes. 119 | defFileStoreConfig :: FileStoreConfig 120 | defFileStoreConfig = fromJust (fileStoreConfigWithPageSize 4096) 121 | 122 | -- | Create a configuration with a specific page size. 123 | -- 124 | -- The maximum key and value sizes are calculated using 'calculateMaxKeySize' 125 | -- and 'calculateMaxValueSize'. 126 | -- 127 | -- If the page size is too small, 'Nothing' is returned. 128 | fileStoreConfigWithPageSize :: PageSize -> Maybe FileStoreConfig 129 | fileStoreConfigWithPageSize pageSize 130 | | keySize < 8 && valueSize < 8 = Nothing 131 | | otherwise = Just FileStoreConfig { 132 | fileStoreConfigPageSize = pageSize 133 | , fileStoreConfigMaxKeySize = keySize 134 | , fileStoreConfigMaxValueSize = valueSize } 135 | where 136 | keySize = calculateMaxKeySize pageSize (encodedPageSize zeroHeight) 137 | valueSize = calculateMaxValueSize pageSize keySize (encodedPageSize zeroHeight) 138 | 139 | -- | Run the storage operations in the 'FileStoreT' monad, given a collection of 140 | -- open files. 141 | runFileStoreT :: Monad m 142 | => FileStoreT FilePath m a -- ^ Action 143 | -> FileStoreConfig -- ^ Configuration 144 | -> m a 145 | runFileStoreT m config = evalStateT (runReaderT (fromFileStoreT m) config) M.empty 146 | 147 | -------------------------------------------------------------------------------- 148 | 149 | instance (Applicative m, Monad m, MonadIO m, MonadThrow m) => 150 | StoreM FilePath (FileStoreT FilePath m) 151 | where 152 | openHandle fp = do 153 | alreadyOpen <- M.member fp <$> get 154 | unless alreadyOpen $ do 155 | liftIO $ createDirectoryIfMissing True (takeDirectory fp) 156 | fh <- liftIO $ IO.openReadWrite fp 157 | modify $ M.insert fp fh 158 | 159 | lockHandle = void . liftIO . IO.obtainPrefixLock 160 | 161 | releaseHandle = liftIO . IO.releasePrefixLock . IO.prefixLockFromPrefix 162 | 163 | flushHandle fp = do 164 | fh <- get >>= lookupHandle fp 165 | liftIO $ IO.flush fh 166 | 167 | closeHandle fp = do 168 | fh <- get >>= lookupHandle fp 169 | liftIO $ IO.flush fh 170 | liftIO $ IO.close fh 171 | modify (M.delete fp) 172 | 173 | removeHandle fp = 174 | liftIO $ removeFile fp `catchIOError` \e -> 175 | unless (isDoesNotExistError e) (ioError e) 176 | 177 | 178 | nodePageSize = return encodedPageSize 179 | maxPageSize = asks fileStoreConfigPageSize 180 | maxKeySize = asks fileStoreConfigMaxKeySize 181 | maxValueSize = asks fileStoreConfigMaxValueSize 182 | 183 | getNodePage fp height key val nid = do 184 | h <- get >>= lookupHandle fp 185 | size <- maxPageSize 186 | 187 | let PageId pid = nodeIdToPageId nid 188 | offset = fromIntegral $ pid * fromIntegral size 189 | 190 | liftIO $ IO.seek h offset 191 | bs <- liftIO $ readByteString h (fromIntegral size) 192 | 193 | case viewHeight height of 194 | UZero -> decodeM (leafNodePage height key val) bs >>= \case 195 | LeafNodePage hgtSrc tree -> 196 | justErrM WrongNodeTypeError $ castNode hgtSrc height tree 197 | USucc _ -> decodeM (indexNodePage height key val) bs >>= \case 198 | IndexNodePage hgtSrc tree -> 199 | justErrM WrongNodeTypeError $ castNode hgtSrc height tree 200 | 201 | putNodePage fp hgt nid node = do 202 | h <- get >>= lookupHandle fp 203 | size <- maxPageSize 204 | 205 | let PageId pid = nodeIdToPageId nid 206 | offset = fromIntegral $ pid * fromIntegral size 207 | 208 | liftIO $ IO.seek h offset 209 | bs <- justErrM PageOverflowError $ pg size 210 | liftIO $ writeLazyByteString h bs 211 | where 212 | pg size = case viewHeight hgt of 213 | UZero -> encodeAndPad size $ LeafNodePage hgt node 214 | USucc _ -> encodeAndPad size $ IndexNodePage hgt node 215 | 216 | getOverflow fp val = do 217 | h <- get >>= lookupHandle fp 218 | 219 | len <- liftIO $ IO.getFileSize h 220 | liftIO $ IO.seek h 0 221 | bs <- liftIO $ readByteString h (fromIntegral len) 222 | n <- decodeM (overflowPage val) bs 223 | case n of 224 | OverflowPage v -> justErrM WrongOverflowValueError $ castValue v 225 | 226 | 227 | putOverflow fp val = do 228 | fh <- get >>= lookupHandle fp 229 | liftIO $ IO.setFileSize fh (fromIntegral $ BL.length bs) 230 | liftIO $ IO.seek fh 0 231 | liftIO $ writeLazyByteString fh bs 232 | where 233 | bs = encode $ OverflowPage val 234 | 235 | listOverflows dir = liftIO $ getDirectoryContents dir `catch` catch' 236 | where catch' e | isDoesNotExistError e = return [] 237 | | otherwise = ioError e 238 | 239 | -------------------------------------------------------------------------------- 240 | 241 | instance (Applicative m, Monad m, MonadIO m, MonadCatch m) => 242 | ConcurrentMetaStoreM (FileStoreT FilePath m) 243 | where 244 | putConcurrentMeta fp meta = do 245 | h <- get >>= lookupHandle fp 246 | 247 | let page = ConcurrentMetaPage meta 248 | bs = encode page 249 | liftIO $ IO.setFileSize h (fromIntegral $ BL.length bs) 250 | liftIO $ IO.seek h 0 251 | liftIO $ writeLazyByteString h bs 252 | 253 | readConcurrentMeta fp root = do 254 | fh <- get >>= lookupHandle fp 255 | 256 | len <- liftIO $ IO.getFileSize fh 257 | liftIO $ IO.seek fh 0 258 | bs <- liftIO $ readByteString fh (fromIntegral len) 259 | handle handle' (Just <$> decodeM (concurrentMetaPage root) bs) >>= \case 260 | Just (ConcurrentMetaPage meta) -> return $! cast meta 261 | Nothing -> return Nothing 262 | where 263 | handle' (DecodeError _) = return Nothing 264 | 265 | -------------------------------------------------------------------------------- 266 | 267 | -- | Exception thrown when a file is accessed that doesn't exist. 268 | newtype FileNotFoundError hnd = FileNotFoundError hnd deriving (Show, Typeable) 269 | 270 | instance (Typeable hnd, Show hnd) => Exception (FileNotFoundError hnd) where 271 | 272 | -- | Exception thrown when a page that is too large is written. 273 | -- 274 | -- As used in 'putNodePage'. 275 | data PageOverflowError = PageOverflowError deriving (Show, Typeable) 276 | 277 | instance Exception PageOverflowError where 278 | 279 | -- | Exception thrown when a node cannot be cast to the right type. 280 | -- 281 | -- As used in 'getNodePage'. 282 | data WrongNodeTypeError = WrongNodeTypeError deriving (Show, Typeable) 283 | 284 | instance Exception WrongNodeTypeError where 285 | 286 | -- | Exception thrown when a value from an overflow page cannot be cast. 287 | -- 288 | -- As used in 'getOverflow'. 289 | data WrongOverflowValueError = WrongOverflowValueError deriving (Show, Typeable) 290 | 291 | instance Exception WrongOverflowValueError where 292 | 293 | -------------------------------------------------------------------------------- 294 | -------------------------------------------------------------------------------- /src/Database/Haskey/Store/InMemory.hs: -------------------------------------------------------------------------------- 1 | {-# LANGUAGE DeriveDataTypeable #-} 2 | {-# LANGUAGE ExistentialQuantification #-} 3 | {-# LANGUAGE FlexibleContexts #-} 4 | {-# LANGUAGE FlexibleInstances #-} 5 | {-# LANGUAGE GADTs #-} 6 | {-# LANGUAGE GeneralizedNewtypeDeriving #-} 7 | {-# LANGUAGE InstanceSigs #-} 8 | {-# LANGUAGE LambdaCase #-} 9 | {-# LANGUAGE MultiParamTypeClasses #-} 10 | {-# LANGUAGE ScopedTypeVariables #-} 11 | -- | Binary in-memory storage back-end. Can be used as a storage back-end for 12 | -- the append-only page allocator (see "Data.BTree.Alloc"). 13 | module Database.Haskey.Store.InMemory ( 14 | -- * Storage 15 | Page(..) 16 | , MemoryFile 17 | , MemoryFiles 18 | , MemoryStoreConfig(..) 19 | , defMemoryStoreConfig 20 | , memoryStoreConfigWithPageSize 21 | , MemoryStoreT 22 | , runMemoryStoreT 23 | , newEmptyMemoryStore 24 | 25 | -- * Exceptions 26 | , FileNotFoundError(..) 27 | , PageNotFoundError(..) 28 | , WrongNodeTypeError(..) 29 | , WrongOverflowValueError(..) 30 | ) where 31 | 32 | import Control.Applicative (Applicative, (<$>)) 33 | import Control.Concurrent.MVar 34 | import Control.Monad 35 | import Control.Monad.Catch 36 | import Control.Monad.IO.Class 37 | import Control.Monad.Reader 38 | 39 | import Data.ByteString (ByteString) 40 | import Data.ByteString.Lazy (toStrict) 41 | import Data.Map (Map) 42 | import Data.Maybe (fromJust) 43 | import Data.Typeable (Typeable, cast) 44 | import Data.Word (Word64) 45 | import qualified Data.Map as M 46 | 47 | import Data.BTree.Impure.Internal.Structures 48 | import Data.BTree.Primitives 49 | 50 | import Database.Haskey.Alloc.Concurrent 51 | import Database.Haskey.Store.Class 52 | import Database.Haskey.Store.Page 53 | import Database.Haskey.Utils.Monad.Catch (justErrM) 54 | 55 | -------------------------------------------------------------------------------- 56 | 57 | -- | A file containing a collection of pages. 58 | type MemoryFile = Map PageId ByteString 59 | 60 | -- | A collection of 'File's, each associated with a certain @fp@ handle. 61 | -- 62 | -- This is shareable amongst multiple threads. 63 | type MemoryFiles fp = MVar (Map fp MemoryFile) 64 | 65 | -- | Access the files. 66 | get :: MonadIO m => MemoryStoreT fp m (Map fp MemoryFile) 67 | get = MemoryStoreT . lift $ ask >>= liftIO . readMVar 68 | 69 | -- | Access the files. 70 | gets :: (Functor m, MonadIO m) 71 | => (Map fp MemoryFile -> a) 72 | -> MemoryStoreT fp m a 73 | gets f = f <$> get 74 | 75 | -- | Modify the files. 76 | modify' :: MonadIO m => 77 | (Map fp MemoryFile -> Map fp MemoryFile) 78 | -> MemoryStoreT fp m () 79 | modify' f = MemoryStoreT . lift $ ask >>= liftIO . flip modifyMVar_ (return . f) 80 | 81 | lookupFile :: (MonadThrow m, Ord fp, Show fp, Typeable fp) 82 | => fp -> Map fp MemoryFile -> m MemoryFile 83 | lookupFile fp m = justErrM (FileNotFoundError fp) $ M.lookup fp m 84 | 85 | lookupPage :: (Functor m, MonadThrow m, Ord fp, Show fp, Typeable fp) 86 | => fp -> PageId -> Map fp MemoryFile -> m ByteString 87 | lookupPage fp pid m = M.lookup pid <$> lookupFile fp m 88 | >>= justErrM (PageNotFoundError fp pid) 89 | 90 | -- | Monad in which binary storage operations can take place. 91 | -- 92 | -- Two important instances are 'StoreM' making it a storage back-end, and 93 | -- 'ConcurrentMetaStoreM' making it a storage back-end compatible with the 94 | -- concurrent page allocator. 95 | newtype MemoryStoreT fp m a = MemoryStoreT 96 | { fromMemoryStoreT :: ReaderT MemoryStoreConfig (ReaderT (MemoryFiles fp) m) a 97 | } deriving (Applicative, Functor, Monad, 98 | MonadIO, MonadThrow, MonadCatch, MonadMask, 99 | MonadReader MemoryStoreConfig) 100 | 101 | -- | Memory store configuration. 102 | -- 103 | -- The default configuration can be obtained by using 'defMemoryStoreConfig'. 104 | -- 105 | -- A configuration with a specific page size can be obtained by using 106 | -- 'memoryStoreConfigWithPageSize'. 107 | data MemoryStoreConfig = MemoryStoreConfig { 108 | memoryStoreConfigPageSize :: !PageSize 109 | , memoryStoreConfigMaxKeySize :: !Word64 110 | , memoryStoreConfigMaxValueSize :: !Word64 111 | } deriving (Show) 112 | 113 | -- | The default configuration. 114 | -- 115 | -- This is an unwrapped 'memoryStoreConfigWithPageSize' with a page size of 116 | -- 4096. 117 | defMemoryStoreConfig :: MemoryStoreConfig 118 | defMemoryStoreConfig = fromJust (memoryStoreConfigWithPageSize 4096) 119 | 120 | -- | Create a configuration with a specific page size. 121 | -- 122 | -- The maximum key and value sizes are calculated using 'calculateMaxKeySize' 123 | -- and 'calculateMaxValueSize'. 124 | -- 125 | -- If the page size is too small, 'Nothing' is returned. 126 | memoryStoreConfigWithPageSize :: PageSize -> Maybe MemoryStoreConfig 127 | memoryStoreConfigWithPageSize pageSize 128 | | keySize < 8 && valueSize < 8 = Nothing 129 | | otherwise = Just MemoryStoreConfig { 130 | memoryStoreConfigPageSize = pageSize 131 | , memoryStoreConfigMaxKeySize = keySize 132 | , memoryStoreConfigMaxValueSize = valueSize } 133 | where 134 | keySize = calculateMaxKeySize pageSize (encodedPageSize zeroHeight) 135 | valueSize = calculateMaxValueSize pageSize keySize (encodedPageSize zeroHeight) 136 | 137 | -- | Run the storage operations in the 'MemoryStoreT' monad, given a collection of 138 | -- 'File's. 139 | runMemoryStoreT :: MemoryStoreT fp m a -- ^ Action to run 140 | -> MemoryStoreConfig -- ^ Configuration 141 | -> MemoryFiles fp -- ^ Data 142 | -> m a 143 | runMemoryStoreT m config = runReaderT (runReaderT (fromMemoryStoreT m) config) 144 | 145 | -- | Construct a store with an empty database with name of type @hnd@. 146 | newEmptyMemoryStore :: IO (MemoryFiles hnd) 147 | newEmptyMemoryStore = newMVar M.empty 148 | 149 | -------------------------------------------------------------------------------- 150 | 151 | instance (Applicative m, Monad m, MonadIO m, MonadThrow m, 152 | Ord fp, Show fp, Typeable fp) => 153 | StoreM fp (MemoryStoreT fp m) 154 | where 155 | openHandle fp = 156 | modify' $ M.insertWith (\_new old -> old) fp M.empty 157 | 158 | lockHandle _ = return () 159 | 160 | releaseHandle _ = return () 161 | 162 | flushHandle _ = return () 163 | 164 | closeHandle _ = return () 165 | 166 | removeHandle fp = 167 | modify' $ M.delete fp 168 | 169 | nodePageSize = return encodedPageSize 170 | maxPageSize = asks memoryStoreConfigPageSize 171 | maxKeySize = asks memoryStoreConfigMaxKeySize 172 | maxValueSize = asks memoryStoreConfigMaxValueSize 173 | 174 | getNodePage hnd h key val nid = do 175 | bs <- get >>= lookupPage hnd (nodeIdToPageId nid) 176 | case viewHeight h of 177 | UZero -> decodeM (leafNodePage h key val) bs >>= \case 178 | LeafNodePage heightSrc n -> 179 | justErrM WrongNodeTypeError $ castNode heightSrc h n 180 | USucc _ -> decodeM (indexNodePage h key val) bs >>= \case 181 | IndexNodePage heightSrc n -> 182 | justErrM WrongNodeTypeError $ castNode heightSrc h n 183 | 184 | putNodePage hnd height nid node = 185 | modify' $ M.update (Just . M.insert (nodeIdToPageId nid) pg) hnd 186 | where 187 | pg = case viewHeight height of 188 | UZero -> toStrict . encode $ LeafNodePage height node 189 | USucc _ -> toStrict . encode $ IndexNodePage height node 190 | 191 | getOverflow hnd val = do 192 | bs <- get >>= lookupPage hnd 0 193 | decodeM (overflowPage val) bs >>= \case 194 | OverflowPage v -> justErrM WrongOverflowValueError $ castValue v 195 | 196 | putOverflow hnd val = 197 | modify' $ M.update (Just . M.insert 0 pg) hnd 198 | where 199 | pg = toStrict . encode $ OverflowPage val 200 | 201 | listOverflows _ = gets M.keys 202 | 203 | -------------------------------------------------------------------------------- 204 | 205 | instance (Applicative m, Monad m, MonadIO m, MonadCatch m) => 206 | ConcurrentMetaStoreM (MemoryStoreT FilePath m) 207 | where 208 | putConcurrentMeta h meta = 209 | modify' $ M.update (Just . M.insert 0 pg) h 210 | where 211 | pg = toStrict . encode $ ConcurrentMetaPage meta 212 | 213 | readConcurrentMeta hnd root = do 214 | maybeBs <- gets (M.lookup hnd >=> M.lookup 0) 215 | case maybeBs of 216 | Nothing -> return Nothing 217 | Just bs -> 218 | handle handle' (Just <$> decodeM (concurrentMetaPage root) bs) >>= \case 219 | Just (ConcurrentMetaPage meta) -> return $! cast meta 220 | Nothing -> return Nothing 221 | where 222 | handle' (DecodeError _) = return Nothing 223 | 224 | -------------------------------------------------------------------------------- 225 | 226 | -- | Exception thrown when a file is accessed that doesn't exist. 227 | newtype FileNotFoundError hnd = FileNotFoundError hnd deriving (Show, Typeable) 228 | 229 | instance (Typeable hnd, Show hnd) => Exception (FileNotFoundError hnd) where 230 | 231 | -- | Exception thrown when a page that is accessed doesn't exist. 232 | data PageNotFoundError hnd = PageNotFoundError hnd PageId deriving (Show, Typeable) 233 | 234 | instance (Typeable hnd, Show hnd) => Exception (PageNotFoundError hnd) where 235 | 236 | -- | Exception thrown when a node cannot be cast to the right type. 237 | -- 238 | -- As used in 'getNodePage'. 239 | data WrongNodeTypeError = WrongNodeTypeError deriving (Show, Typeable) 240 | 241 | instance Exception WrongNodeTypeError where 242 | 243 | -- | Exception thrown when a value from an overflow page cannot be cast. 244 | -- 245 | -- As used in 'getOverflow'. 246 | data WrongOverflowValueError = WrongOverflowValueError deriving (Show, Typeable) 247 | 248 | instance Exception WrongOverflowValueError where 249 | 250 | -------------------------------------------------------------------------------- 251 | 252 | -------------------------------------------------------------------------------- /src/Database/Haskey/Store/Page.hs: -------------------------------------------------------------------------------- 1 | {-# LANGUAGE DataKinds #-} 2 | {-# LANGUAGE DeriveDataTypeable #-} 3 | {-# LANGUAGE ExistentialQuantification #-} 4 | {-# LANGUAGE FlexibleContexts #-} 5 | {-# LANGUAGE GADTs #-} 6 | {-# LANGUAGE KindSignatures #-} 7 | {-# LANGUAGE LambdaCase #-} 8 | {-# LANGUAGE OverloadedStrings #-} 9 | -- | This module contains structures and functions to decode and encode 10 | -- pages. 11 | module Database.Haskey.Store.Page where 12 | 13 | import Codec.Compression.LZ4 14 | 15 | import Control.Applicative ((<$>)) 16 | import Control.Monad.Catch 17 | 18 | import Data.Binary (Binary(..), Put, Get) 19 | import Data.Binary.Get (runGetOrFail) 20 | import Data.Binary.Put (runPut) 21 | import Data.Bits ((.&.), (.|.)) 22 | import Data.ByteString (ByteString) 23 | import Data.ByteString.Lazy (fromStrict, toStrict) 24 | import Data.Digest.XXHash.FFI (xxh64) 25 | import Data.Maybe (fromMaybe) 26 | import Data.Proxy 27 | import Data.Typeable (Typeable) 28 | import Data.Word (Word8, Word64) 29 | import qualified Data.Binary as B 30 | import qualified Data.ByteString as BS 31 | import qualified Data.ByteString.Lazy as BL 32 | 33 | import Numeric (showHex) 34 | 35 | import Data.BTree.Impure 36 | import Data.BTree.Impure.Internal.Structures (putLeafNode, getLeafNode, putIndexNode, getIndexNode) 37 | import Data.BTree.Primitives 38 | 39 | import Database.Haskey.Alloc.Concurrent 40 | 41 | -- | The type of a page. 42 | data PageType = TypeEmpty 43 | | TypeConcurrentMeta 44 | | TypeOverflow 45 | | TypeLeafNode 46 | | TypeIndexNode 47 | deriving (Eq, Show) 48 | 49 | data SPageType t where 50 | STypeEmpty :: SPageType 'TypeEmpty 51 | STypeConcurrentMeta :: SPageType 'TypeConcurrentMeta 52 | STypeOverflow :: SPageType 'TypeOverflow 53 | STypeLeafNode :: SPageType 'TypeLeafNode 54 | STypeIndexNode :: SPageType 'TypeIndexNode 55 | 56 | instance Binary PageType where 57 | put TypeEmpty = put (0x00 :: Word8) 58 | put TypeConcurrentMeta = put (0x20 :: Word8) 59 | put TypeOverflow = put (0x40 :: Word8) 60 | put TypeLeafNode = put (0x60 :: Word8) 61 | put TypeIndexNode = put (0x80 :: Word8) 62 | get = (get :: Get Word8) >>= \case 63 | 0x00 -> return TypeEmpty 64 | 0x20 -> return TypeConcurrentMeta 65 | 0x40 -> return TypeOverflow 66 | 0x60 -> return TypeLeafNode 67 | 0x80 -> return TypeIndexNode 68 | t -> fail $ "unknown page type: " ++ showHex t "" 69 | 70 | -- | A decoded page, of a certain type @t@ of kind 'PageType'. 71 | data Page (t :: PageType) where 72 | EmptyPage :: Page 'TypeEmpty 73 | ConcurrentMetaPage :: Root root 74 | => ConcurrentMeta root 75 | -> Page 'TypeConcurrentMeta 76 | OverflowPage :: (Value v) 77 | => v 78 | -> Page 'TypeOverflow 79 | LeafNodePage :: (Key k, Value v) 80 | => Height 'Z 81 | -> Node 'Z k v 82 | -> Page 'TypeLeafNode 83 | IndexNodePage :: (Key k, Value v) 84 | => Height ('S h) 85 | -> Node ('S h) k v 86 | -> Page 'TypeIndexNode 87 | 88 | -- | A decoder with its type. 89 | data SGet t = SGet (SPageType t) (Get (Page t)) 90 | 91 | -- | Get the type of a 'Page'. 92 | pageType :: SPageType t -> PageType 93 | pageType STypeEmpty = TypeEmpty 94 | pageType STypeConcurrentMeta = TypeConcurrentMeta 95 | pageType STypeOverflow = TypeOverflow 96 | pageType STypeLeafNode = TypeLeafNode 97 | pageType STypeIndexNode = TypeIndexNode 98 | 99 | -- | Encode a page to a lazy byte string, but with the checksum set to zero. 100 | encodeZeroChecksum :: Page t -> BL.ByteString 101 | encodeZeroChecksum p = zero `BL.append` encodeNoChecksum p 102 | where zero = "\NUL\NUL\NUL\NUL\NUL\NUL\NUL\NUL" 103 | 104 | -- | Encode a page to a lazy byte string, and prepend the calculated checksum. 105 | encode :: Page t -> BL.ByteString 106 | encode = prependChecksum . encodeNoChecksum 107 | 108 | -- | Prepend the xxHash 64-bit checksum of a bytestring to itself. 109 | prependChecksum :: BL.ByteString -> BL.ByteString 110 | prependChecksum bs = B.encode (xxh64 bs checksumSeed :: Word64) `BL.append` bs 111 | 112 | -- | Encode a page to a lazy byte string, without prepending the checksum. 113 | encodeNoChecksum :: Page t -> BL.ByteString 114 | encodeNoChecksum = runPut . putPage 115 | -- let bs = runPut (putPage p) in fromMaybe bs (tryCompress bs) 116 | where 117 | _tryCompress bs = do 118 | (t, body) <- BL.uncons bs 119 | c <- compress (toStrict body) 120 | if BS.length c < fromIntegral (BL.length bs) 121 | then Just $ maskCompressed t `BL.cons` fromStrict c 122 | else Nothing 123 | 124 | maskCompressed t = t .|. 0x01 125 | 126 | -- | Size of a node, if it were to be encoded. 127 | encodedPageSize :: (Key k, Value v) => Height h -> Node h k v -> PageSize 128 | encodedPageSize h = case viewHeight h of 129 | UZero -> fromIntegral . BL.length . encodeZeroChecksum . LeafNodePage h 130 | USucc _ -> fromIntegral . BL.length . encodeZeroChecksum . IndexNodePage h 131 | 132 | -- | Decode a page, and verify the checksum. 133 | decode :: SGet t -> ByteString -> Either String (Page t) 134 | decode g@(SGet t _) bs = do 135 | let (cksumBs, body) = BS.splitAt 8 bs 136 | cksum <- if BS.length cksumBs < 8 137 | then Left $ "could not decode " ++ show (pageType t) ++ ": " 138 | ++ "not enough checksum bytes" 139 | else Right $ B.decode (fromStrict cksumBs) 140 | let cksum' = xxh64 body checksumSeed 141 | if cksum' /= cksum 142 | then Left $ "could not decode " ++ show (pageType t) ++ ": " 143 | ++ "expected checksum " ++ show cksum' ++ " but checksum " 144 | ++ "field contains " ++ show cksum 145 | else decodeNoChecksum g body 146 | 147 | 148 | -- | Decode a page with a specific decoder, or return the error. 149 | decodeNoChecksum :: SGet t -> ByteString -> Either String (Page t) 150 | decodeNoChecksum (SGet t g) bs = case runGetOrFail g (fromStrict bs) of 151 | Left err -> Left $ err' err 152 | Right (_, _, v) -> Right v 153 | where 154 | err' (bs', offset, err) = 155 | "could not decode " ++ show (pageType t) ++ ": " ++ err ++ 156 | "at pos " ++ show offset ++ ", remaining bytes: " ++ show bs' ++ 157 | ", full body: " ++ show bs 158 | 159 | _decompressed = fromMaybe (fromStrict bs) $ do 160 | (tb, body) <- BS.uncons bs 161 | if isCompressed tb 162 | then do 163 | c <- decompress body 164 | Just $ unmaskCompressed tb `BL.cons` fromStrict c 165 | else Nothing 166 | 167 | isCompressed b = b .&. 0x01 == 0x01 168 | unmaskCompressed b = b .&. 0xFE 169 | 170 | 171 | -- | Monadic wrapper around 'decode' 172 | decodeM :: MonadThrow m => SGet t -> ByteString -> m (Page t) 173 | decodeM g bs = case decode g bs of 174 | Left err -> throwM $ DecodeError err 175 | Right v -> return v 176 | 177 | -- | The encoder of a 'Page'. 178 | putPage :: Page t -> Put 179 | putPage EmptyPage = put TypeEmpty 180 | putPage (ConcurrentMetaPage m) = put TypeConcurrentMeta >> put m 181 | putPage (OverflowPage v) = put TypeOverflow >> put v 182 | putPage (LeafNodePage _ n) = put TypeLeafNode >> putLeafNode n 183 | putPage (IndexNodePage h n) = put TypeIndexNode >> put h >> putIndexNode n 184 | 185 | -- | Decoder for an empty page. 186 | emptyPage :: SGet 'TypeEmpty 187 | emptyPage = SGet STypeEmpty $ get >>= \case 188 | TypeEmpty -> return EmptyPage 189 | x -> fail $ "unexpected " ++ show x ++ " while decoding TypeEmpty" 190 | 191 | -- | Decoder for a leaf node page. 192 | leafNodePage :: (Key k, Value v) 193 | => Height 'Z 194 | -> Proxy k 195 | -> Proxy v 196 | -> SGet 'TypeLeafNode 197 | leafNodePage h k v = SGet STypeLeafNode $ get >>= \case 198 | TypeLeafNode -> LeafNodePage h <$> get' h k v 199 | x -> fail $ "unexpected " ++ show x ++ " while decoding TypeLeafNode" 200 | where 201 | get' :: (Key k, Value v) 202 | => Height 'Z -> Proxy k -> Proxy v -> Get (Node 'Z k v) 203 | get' h' _ _ = getLeafNode h' 204 | 205 | -- | Decoder for a leaf node page. 206 | indexNodePage :: (Key k, Value v) 207 | => Height ('S n) 208 | -> Proxy k 209 | -> Proxy v 210 | -> SGet 'TypeIndexNode 211 | indexNodePage h k v = SGet STypeIndexNode $ get >>= \case 212 | TypeIndexNode -> do 213 | h' <- get 214 | if fromHeight h == fromHeight h' 215 | then IndexNodePage h <$> get' h k v 216 | else fail $ "expected height " ++ show h ++ " but got " 217 | ++ show h' ++ " while decoding TypeNode" 218 | x -> fail $ "unexpected " ++ show x ++ " while decoding TypeIndexNode" 219 | where 220 | get' :: (Key k, Value v) 221 | => Height ('S n) -> Proxy k -> Proxy v -> Get (Node ('S n) k v) 222 | get' h' _ _ = getIndexNode h' 223 | 224 | -- | Decoder for an overflow page. 225 | overflowPage :: (Value v) => Proxy v -> SGet 'TypeOverflow 226 | overflowPage v = SGet STypeOverflow $ get >>= \case 227 | TypeOverflow -> OverflowPage <$> get' v 228 | x -> fail $ "unexpected " ++ show x ++ " while decoding TypeOverflow" 229 | where 230 | get' :: (Value v) => Proxy v -> Get v 231 | get' _ = get 232 | 233 | concurrentMetaPage :: Root root 234 | => Proxy root 235 | -> SGet 'TypeConcurrentMeta 236 | concurrentMetaPage root = SGet STypeConcurrentMeta $ get >>= \ case 237 | TypeConcurrentMeta -> ConcurrentMetaPage <$> get' root 238 | x -> fail $ "unexpected " ++ show x ++ " while decoding TypeConcurrentMeta" 239 | where 240 | get' :: Root root => Proxy root -> Get (ConcurrentMeta root) 241 | get' _ = get 242 | 243 | -- | Exception thrown when decoding of a page fails. 244 | newtype DecodeError = DecodeError String deriving (Show, Typeable) 245 | 246 | instance Exception DecodeError where 247 | 248 | checksumSeed :: Word64 249 | checksumSeed = 0 250 | -------------------------------------------------------------------------------- /src/Database/Haskey/Utils/IO.hs: -------------------------------------------------------------------------------- 1 | module Database.Haskey.Utils.IO where 2 | 3 | import Data.ByteString (ByteString, packCStringLen) 4 | import Data.ByteString.Unsafe (unsafeUseAsCString) 5 | import qualified Data.ByteString as BS 6 | import qualified Data.ByteString.Lazy as BL 7 | 8 | import Foreign (allocaBytes, castPtr, plusPtr) 9 | 10 | import qualified FileIO as IO 11 | 12 | import System.IO.Error (mkIOError, eofErrorType, ioeSetErrorString) 13 | 14 | readByteString :: IO.FHandle -> Int -> IO ByteString 15 | readByteString fd n = allocaBytes n $ \buf -> do 16 | go 0 buf 17 | packCStringLen (castPtr buf, fromIntegral n) 18 | where 19 | go c buf 20 | | c == n = return () 21 | | otherwise = do 22 | rc <- IO.read fd buf (fromIntegral (n - c)) 23 | case rc of 24 | 0 -> ioError (ioeSetErrorString (mkIOError eofErrorType "getByteString" Nothing Nothing) "EOF") 25 | n' -> go (c + fromIntegral n') (buf `plusPtr` fromIntegral n') 26 | 27 | writeByteString :: IO.FHandle -> ByteString -> IO () 28 | writeByteString fd bs = unsafeUseAsCString bs $ \buf -> go 0 buf 29 | where 30 | n = BS.length bs 31 | go c buf 32 | | c == n = return () 33 | | otherwise = do 34 | n' <- IO.write fd (castPtr buf) (fromIntegral (n - c)) 35 | go (c + fromIntegral n') (buf `plusPtr` fromIntegral n') 36 | 37 | writeLazyByteString :: IO.FHandle -> BL.ByteString -> IO () 38 | writeLazyByteString fh bs = mapM_ (writeByteString fh) (BL.toChunks bs) 39 | -------------------------------------------------------------------------------- /src/Database/Haskey/Utils/Monad.hs: -------------------------------------------------------------------------------- 1 | module Database.Haskey.Utils.Monad where 2 | 3 | ifM :: Monad m => m Bool -> m a -> m a -> m a 4 | ifM f y n = do f' <- f; if f' then y else n 5 | -------------------------------------------------------------------------------- /src/Database/Haskey/Utils/Monad/Catch.hs: -------------------------------------------------------------------------------- 1 | module Database.Haskey.Utils.Monad.Catch where 2 | 3 | import Control.Monad.Catch 4 | 5 | justErrM :: (MonadThrow m, Exception e) => e -> Maybe a -> m a 6 | justErrM _ (Just v) = return v 7 | justErrM e Nothing = throwM e 8 | -------------------------------------------------------------------------------- /src/Database/Haskey/Utils/RLock.hs: -------------------------------------------------------------------------------- 1 | {-# LANGUAGE DeriveDataTypeable #-} 2 | -- | Simple implementations of reentrant locks using 'MVar' 3 | module Database.Haskey.Utils.RLock where 4 | 5 | import Control.Concurrent (ThreadId, myThreadId) 6 | import Control.Concurrent.MVar 7 | import Control.Exception (Exception, throwIO) 8 | import Control.Monad (unless, when) 9 | import Control.Monad.Catch (MonadMask, bracket_) 10 | import Control.Monad.IO.Class (MonadIO, liftIO) 11 | 12 | import Data.Typeable (Typeable) 13 | 14 | -- | A reentrant lock. 15 | type RLock = (MVar (Maybe (ThreadId, Integer)), MVar ()) 16 | 17 | -- | Create a new reentrant lock. 18 | newRLock :: IO RLock 19 | newRLock = do { a <- newMVar Nothing; b <- newMVar (); return (a, b) } 20 | 21 | -- | Acquire a reentrant lock, blocks. 22 | acquireRLock :: RLock -> IO () 23 | acquireRLock (r, l) = do 24 | myId <- myThreadId 25 | ok <- modifyMVar r $ \state -> case state of 26 | Nothing -> return (state, False) 27 | Just (tId, x) -> if tId == myId 28 | then return (Just (myId, x + 1), True) 29 | else return (state, False) 30 | 31 | unless ok $ do 32 | () <- takeMVar l 33 | modifyMVar_ r $ const (return $ Just (myId, 1)) 34 | 35 | -- | Release a reentrant lock. 36 | releaseRLock :: RLock -> IO () 37 | releaseRLock (r, l) = do 38 | myId <- myThreadId 39 | done <- modifyMVar r $ \state -> case state of 40 | Nothing -> throwIO $ RLockError "the lock has no inhabitant" 41 | Just (_, 0) -> throwIO $ RLockError "the lock is already released" 42 | Just (tId, n) -> if tId == myId 43 | then if n == 1 44 | then return (Nothing, True) 45 | else return (Just (myId, n-1), False) 46 | else throwIO $ RLockError "lock not held by releaser" 47 | 48 | when done $ 49 | putMVar l () 50 | 51 | -- | Execute an action with the lock, bracketed, exception-safe 52 | withRLock :: (MonadIO m, MonadMask m) => RLock -> m a -> m a 53 | withRLock l = bracket_ (liftIO $ acquireRLock l) 54 | (liftIO $ releaseRLock l) 55 | 56 | -- | Exception raised when 'RLock' is improperly used. 57 | newtype RLockError = RLockError String deriving (Show, Typeable) 58 | 59 | instance Exception RLockError where 60 | -------------------------------------------------------------------------------- /src/Database/Haskey/Utils/STM/Map.hs: -------------------------------------------------------------------------------- 1 | {-# LANGUAGE CPP #-} 2 | {-# LANGUAGE ConstraintKinds #-} 3 | module Database.Haskey.Utils.STM.Map where 4 | 5 | import Control.Applicative ((<$>)) 6 | import Control.Concurrent.STM (STM) 7 | 8 | import qualified Focus as F 9 | import qualified ListT as L 10 | 11 | #if MIN_VERSION_stm_containers(1,1,0) 12 | import StmContainers.Map (Map) 13 | import qualified StmContainers.Map as M 14 | import Data.Hashable 15 | type Key key = (Eq key, Hashable key) 16 | #else 17 | import STMContainers.Map (Map, Key) 18 | import qualified STMContainers.Map as M 19 | #endif 20 | 21 | lookupMin :: Map k v -> STM (Maybe (k, v)) 22 | #if MIN_VERSION_stm_containers(1,1,0) 23 | lookupMin = L.head . M.listT 24 | #else 25 | lookupMin = L.head . M.stream 26 | #endif 27 | 28 | lookupMinKey :: Map k v -> STM (Maybe k) 29 | lookupMinKey = ((fst <$>) <$>) . lookupMin 30 | 31 | alter :: Key k => k -> (Maybe v -> Maybe v) -> Map k v -> STM () 32 | alter k f = M.focus (F.alterM (return . f)) k 33 | -------------------------------------------------------------------------------- /stack.yaml: -------------------------------------------------------------------------------- 1 | resolver: lts-9.0 2 | packages: 3 | - '.' 4 | extra-deps: 5 | #- haskey-btree-0.2.0.1 6 | - git: https://github.com/haskell-haskey/haskey-btree 7 | commit: 4ff3a73d8eb480902478772f1be5515967624ff6 8 | - lz4-0.2.3.1 9 | - numerals-0.4.1 10 | - xxhash-ffi-0.2.0.0 11 | flags: {} 12 | extra-package-dbs: [] 13 | -------------------------------------------------------------------------------- /tests/Integration.hs: -------------------------------------------------------------------------------- 1 | {-# LANGUAGE FlexibleInstances #-} 2 | {-# LANGUAGE RankNTypes #-} 3 | module Main (main) where 4 | 5 | import Test.Framework (Test, defaultMain) 6 | 7 | import qualified Integration.CreateAndOpen 8 | import qualified Integration.WriteOpenRead.Concurrent 9 | 10 | tests :: [Test] 11 | tests = 12 | [ Integration.CreateAndOpen.tests 13 | , Integration.WriteOpenRead.Concurrent.tests 14 | ] 15 | 16 | main :: IO () 17 | main = defaultMain tests 18 | -------------------------------------------------------------------------------- /tests/Integration/CreateAndOpen.hs: -------------------------------------------------------------------------------- 1 | {-# LANGUAGE DeriveDataTypeable #-} 2 | {-# LANGUAGE GeneralizedNewtypeDeriving #-} 3 | module Integration.CreateAndOpen where 4 | 5 | import Test.Framework (Test, testGroup) 6 | import Test.Framework.Providers.HUnit (testCase) 7 | import Test.HUnit hiding (Test, Node) 8 | 9 | import Control.Applicative ((<$>)) 10 | 11 | import Data.Binary (Binary) 12 | import Data.Maybe (fromJust) 13 | import Data.Typeable (Typeable) 14 | 15 | import System.Directory (removeDirectoryRecursive, 16 | getTemporaryDirectory, doesDirectoryExist, 17 | writable, getPermissions) 18 | import System.IO.Temp (createTempDirectory) 19 | 20 | import Data.BTree.Primitives (Value) 21 | 22 | import Database.Haskey.Alloc.Concurrent 23 | import Database.Haskey.Store.File 24 | 25 | tests :: Test 26 | tests = testGroup "CreateAndOpen" 27 | [ testCase "file backend" case_file_backend 28 | ] 29 | 30 | case_file_backend :: Assertion 31 | case_file_backend = do 32 | exists <- doesDirectoryExist "/var/run/shm" 33 | w <- if exists then writable <$> getPermissions "/var/run/shm" 34 | else return False 35 | tmpDir <- if w then return "/var/run/shm" 36 | else getTemporaryDirectory 37 | fp <- createTempDirectory tmpDir "db.haskey" 38 | let hnds = concurrentHandles fp 39 | 40 | _ <- create hnds 41 | root' <- open hnds 42 | 43 | removeDirectoryRecursive fp 44 | 45 | assertEqual "should've read back initial root" (Just root) root' 46 | where 47 | create :: ConcurrentHandles -> IO (ConcurrentDb TestRoot) 48 | create hnds = runFileStoreT (createConcurrentDb hnds root) config 49 | 50 | open :: ConcurrentHandles -> IO (Maybe TestRoot) 51 | open hnds = do 52 | maybeDb <- runFileStoreT (openConcurrentDb hnds) config 53 | case maybeDb of 54 | Nothing -> return Nothing 55 | Just db -> Just <$> runFileStoreT (transactReadOnly return db) config 56 | 57 | config = fromJust $ fileStoreConfigWithPageSize 256 58 | 59 | root = TestRoot "Hello World!" 60 | 61 | newtype TestRoot = TestRoot String deriving (Binary, Eq, Value, Show, Typeable) 62 | 63 | instance Root TestRoot where 64 | -------------------------------------------------------------------------------- /tests/Integration/WriteOpenRead/Concurrent.hs: -------------------------------------------------------------------------------- 1 | {-# LANGUAGE DeriveDataTypeable #-} 2 | {-# LANGUAGE DeriveGeneric #-} 3 | {-# LANGUAGE FlexibleContexts #-} 4 | {-# LANGUAGE FlexibleInstances #-} 5 | {-# LANGUAGE RankNTypes #-} 6 | module Integration.WriteOpenRead.Concurrent where 7 | 8 | import Test.Framework (Test, testGroup) 9 | import Test.Framework.Providers.QuickCheck2 (testProperty) 10 | import Test.QuickCheck 11 | import Test.QuickCheck.Monadic 12 | import Test.QuickCheck.Random 13 | 14 | import Control.Applicative ((<$>)) 15 | import Control.Monad 16 | import Control.Monad.Catch (MonadMask, Exception, throwM, catch) 17 | import Control.Monad.IO.Class 18 | import Control.Monad.Trans (lift) 19 | import Control.Monad.Trans.Maybe 20 | 21 | import Data.Binary (Binary(..)) 22 | import Data.Foldable (foldlM) 23 | import Data.Map (Map) 24 | import Data.Maybe (isJust, fromJust, fromMaybe) 25 | import Data.Typeable (Typeable) 26 | import Data.Word (Word8) 27 | import qualified Data.Map as M 28 | 29 | import GHC.Generics (Generic) 30 | 31 | import System.Directory (removeDirectoryRecursive, 32 | getTemporaryDirectory, doesDirectoryExist, 33 | writable, getPermissions) 34 | import System.IO.Temp (createTempDirectory) 35 | 36 | import Data.BTree.Alloc.Class 37 | import Data.BTree.Impure 38 | import Data.BTree.Primitives 39 | import qualified Data.BTree.Impure as B 40 | 41 | import Database.Haskey.Alloc.Concurrent 42 | import Database.Haskey.Store.File 43 | import Database.Haskey.Store.InMemory 44 | 45 | import Integration.WriteOpenRead.Transactions 46 | 47 | tests :: Test 48 | tests = testGroup "WriteOpenRead.Concurrent" 49 | [ testProperty "memory backend" (monadicIO prop_memory_backend) 50 | , testProperty "file backend" (monadicIO prop_file_backend) 51 | ] 52 | 53 | case_bad_seed :: IO () 54 | case_bad_seed = do 55 | putStrLn "Testing bad case..." 56 | quickCheckWith args (monadicIO prop_memory_backend) 57 | putStrLn " done" 58 | where 59 | -- This seed results in out of memory!! 60 | seed = 1576280407925194075 61 | gen = (mkQCGen seed, seed) 62 | args = stdArgs { replay = Just gen } 63 | 64 | type Root' = Tree Integer TestValue 65 | 66 | prop_memory_backend :: PropertyM IO () 67 | prop_memory_backend = forAllM (genTestSequence False) $ \(TestSequence txs) -> do 68 | files <- run newEmptyMemoryStore 69 | db <- run $ create files 70 | _ <- run $ foldlM (writeReadTest db files) 71 | M.empty 72 | txs 73 | return () 74 | where 75 | 76 | writeReadTest :: ConcurrentDb Root' 77 | -> MemoryFiles String 78 | -> Map Integer TestValue 79 | -> TestTransaction Integer TestValue 80 | -> IO (Map Integer TestValue) 81 | writeReadTest db files m tx = do 82 | openAndWrite db files tx 83 | read' <- openAndRead db files 84 | let expected = fromMaybe m $ testTransactionResult m tx 85 | if read' == M.toList expected 86 | then return expected 87 | else error $ "error:" 88 | ++ "\n after: " ++ show tx 89 | ++ "\n expectd: " ++ show (M.toList expected) 90 | ++ "\n got: " ++ show read' 91 | 92 | create :: MemoryFiles String -> IO (ConcurrentDb Root') 93 | create = runMemoryStoreT (createConcurrentDb hnds B.empty) config 94 | where 95 | hnds = concurrentHandles "" 96 | 97 | openAndRead db = runMemoryStoreT (readAll db) config 98 | 99 | openAndWrite db files tx = 100 | runMemoryStoreT (writeTransaction tx db) config files 101 | 102 | config = fromJust $ memoryStoreConfigWithPageSize 256 103 | 104 | -------------------------------------------------------------------------------- 105 | 106 | prop_file_backend :: PropertyM IO () 107 | prop_file_backend = forAllM (genTestSequence True) $ \(TestSequence txs) -> do 108 | exists <- run $ doesDirectoryExist "/var/run/shm" 109 | w <- if exists then run $ writable <$> getPermissions "/var/run/shm" 110 | else return False 111 | tmpDir <- if w then return "/var/run/shm" 112 | else run getTemporaryDirectory 113 | fp <- run $ createTempDirectory tmpDir "db.haskey" 114 | let hnds = concurrentHandles fp 115 | 116 | db <- run $ create hnds 117 | result <- run . runMaybeT $ foldM (writeReadTest db) 118 | M.empty 119 | txs 120 | 121 | run $ removeDirectoryRecursive fp 122 | 123 | assert $ isJust result 124 | where 125 | writeReadTest :: ConcurrentDb Root' 126 | -> Map Integer TestValue 127 | -> TestTransaction Integer TestValue 128 | -> MaybeT IO (Map Integer TestValue) 129 | writeReadTest db m tx = do 130 | _ <- lift $ void (openAndWrite db tx) `catch` 131 | \TestException -> return () 132 | read' <- lift $ openAndRead db 133 | let expected = fromMaybe m $ testTransactionResult m tx 134 | if read' == M.toList expected 135 | then return expected 136 | else error $ "error:" 137 | ++ "\n after: " ++ show tx 138 | ++ "\n expectd: " ++ show (M.toList expected) 139 | ++ "\n got: " ++ show read' 140 | 141 | create :: ConcurrentHandles 142 | -> IO (ConcurrentDb Root') 143 | create hnds = runFileStoreT (createConcurrentDb hnds B.empty) config 144 | 145 | 146 | openAndRead :: ConcurrentDb Root' 147 | -> IO [(Integer, TestValue)] 148 | openAndRead db = runFileStoreT (readAll db) config 149 | 150 | openAndWrite :: ConcurrentDb Root' 151 | -> TestTransaction Integer TestValue 152 | -> IO () 153 | openAndWrite db tx = 154 | runFileStoreT (void $ writeTransaction tx db) config 155 | 156 | config = fromJust $ fileStoreConfigWithPageSize 256 157 | 158 | -------------------------------------------------------------------------------- 159 | 160 | writeTransaction :: (MonadIO m, MonadMask m, ConcurrentMetaStoreM m, Key k, Value v) 161 | => TestTransaction k v 162 | -> ConcurrentDb (Tree k v) 163 | -> m () 164 | writeTransaction (TestTransaction txType actions) = 165 | transaction 166 | where 167 | writeAction (Insert k v) = B.insert k v 168 | writeAction (Replace k v) = B.insert k v 169 | writeAction (Delete k) = B.delete k 170 | writeAction ThrowException = const (throwM TestException) 171 | 172 | transaction = transact_ $ 173 | foldl (>=>) return (map writeAction actions) 174 | >=> commitOrAbort 175 | 176 | commitOrAbort :: (AllocM n, MonadMask n) => root -> n (Transaction root ()) 177 | commitOrAbort 178 | | TxAbort <- txType = const abort_ 179 | | TxCommit <- txType = commit_ 180 | 181 | readAll :: (MonadIO m, MonadMask m, ConcurrentMetaStoreM m, Key k, Value v) 182 | => ConcurrentDb (Tree k v) 183 | -> m [(k, v)] 184 | readAll = transactReadOnly B.toList 185 | 186 | -------------------------------------------------------------------------------- 187 | 188 | -- | Value used for testing. 189 | -- 190 | -- This value will overflow 20% of the time. 191 | newtype TestValue = TestValue (Either Integer [Word8]) 192 | deriving (Eq, Generic, Show, Typeable) 193 | 194 | instance Binary TestValue where 195 | instance Value TestValue where 196 | 197 | instance Arbitrary TestValue where 198 | arbitrary = 199 | TestValue <$> frequency [(80, Left <$> small), (20, Right <$> big)] 200 | where 201 | small = arbitrary 202 | big = arbitrary 203 | 204 | -- | Exception used for testing 205 | data TestException = TestException deriving (Show, Typeable) 206 | 207 | instance Exception TestException where 208 | 209 | -------------------------------------------------------------------------------- 210 | -------------------------------------------------------------------------------- /tests/Integration/WriteOpenRead/Transactions.hs: -------------------------------------------------------------------------------- 1 | {-# LANGUAGE RecordWildCards #-} 2 | module Integration.WriteOpenRead.Transactions where 3 | 4 | import Test.QuickCheck 5 | 6 | import Control.Applicative ((<$>), (<*>), pure) 7 | import Control.Monad.State 8 | 9 | import Data.Foldable (foldlM) 10 | import Data.List (inits) 11 | import Data.Map (Map) 12 | import Data.Maybe (fromMaybe) 13 | import qualified Data.Map as M 14 | 15 | -------------------------------------------------------------------------------- 16 | 17 | newtype TestSequence k v = TestSequence [TestTransaction k v] 18 | deriving (Show) 19 | 20 | data TransactionSetup = TransactionSetup { sequenceInsertFrequency :: !Int 21 | , sequenceReplaceFrequency :: !Int 22 | , sequenceDeleteFrequency :: !Int 23 | , sequenceExceptionFrequency :: !Int } 24 | deriving (Show) 25 | 26 | deleteHeavySetup :: TransactionSetup 27 | deleteHeavySetup = TransactionSetup { sequenceInsertFrequency = 35 28 | , sequenceReplaceFrequency = 20 29 | , sequenceDeleteFrequency = 45 30 | , sequenceExceptionFrequency = 0 } 31 | 32 | insertHeavySetup :: TransactionSetup 33 | insertHeavySetup = TransactionSetup { sequenceInsertFrequency = 12 34 | , sequenceReplaceFrequency = 4 35 | , sequenceDeleteFrequency = 4 36 | , sequenceExceptionFrequency = 0} 37 | 38 | withExceptionSetup :: TransactionSetup 39 | withExceptionSetup = insertHeavySetup { sequenceExceptionFrequency = 5 } 40 | 41 | genTransactionSetup :: Bool -> Gen TransactionSetup 42 | genTransactionSetup withExc = 43 | frequency [(45, return deleteHeavySetup), 44 | (45, return insertHeavySetup), 45 | (f, return withExceptionSetup)] 46 | where 47 | f | withExc = 10 48 | | otherwise = 0 49 | 50 | data TxType = TxAbort | TxCommit 51 | deriving (Show) 52 | 53 | genTxType :: Gen TxType 54 | genTxType = elements [TxAbort, TxCommit] 55 | 56 | data TestTransaction k v = TestTransaction TxType [TestAction k v] 57 | deriving (Show) 58 | 59 | testTransactionResult :: Ord k => Map k v -> TestTransaction k v -> Maybe (Map k v) 60 | testTransactionResult m (TestTransaction TxAbort _) = Just m 61 | testTransactionResult m (TestTransaction TxCommit actions) 62 | = foldlM (flip doAction) m actions 63 | 64 | data TestAction k v = Insert k v 65 | | Replace k v 66 | | Delete k 67 | | ThrowException 68 | deriving (Show) 69 | 70 | doAction :: Ord k => TestAction k v -> Map k v -> Maybe (Map k v) 71 | doAction action m 72 | | Insert k v <- action = Just $ M.insert k v m 73 | | Replace k v <- action = Just $ M.insert k v m 74 | | Delete k <- action = Just $ M.delete k m 75 | | ThrowException <- action = Nothing 76 | 77 | genTestTransaction :: (Ord k, Arbitrary k, Arbitrary v) => Map k v -> TransactionSetup -> Gen (TestTransaction k v, Maybe (Map k v)) 78 | genTestTransaction db TransactionSetup{..} = sized $ \n -> do 79 | k <- choose (0, n) 80 | (m, actions) <- execStateT (replicateM k next) (Just db, []) 81 | tx <- TestTransaction <$> genTxType <*> pure (reverse actions) 82 | return (tx, m) 83 | where 84 | genAction :: (Ord k, Arbitrary k, Arbitrary v) 85 | => Maybe (Map k v) 86 | -> Gen (TestAction k v) 87 | genAction Nothing = genException 88 | genAction (Just m) 89 | | M.null m = genInsert 90 | | otherwise = frequency [(sequenceInsertFrequency, genInsert ), 91 | (sequenceReplaceFrequency, genReplace m), 92 | (sequenceDeleteFrequency, genDelete m ), 93 | (sequenceExceptionFrequency, genException)] 94 | 95 | genInsert :: (Arbitrary k, Arbitrary v) => Gen (TestAction k v) 96 | genInsert = Insert <$> arbitrary <*> arbitrary 97 | genReplace m = Replace <$> elements (M.keys m) <*> arbitrary 98 | genDelete m = Delete <$> elements (M.keys m) 99 | genException = return ThrowException 100 | 101 | next :: (Ord k, Arbitrary k, Arbitrary v) 102 | => StateT (Maybe (Map k v), [TestAction k v]) Gen () 103 | next = do 104 | (m, actions) <- get 105 | action <- lift $ genAction m 106 | put (m >>= doAction action, action:actions) 107 | 108 | shrinkTestTransaction :: (Ord k, Arbitrary k, Arbitrary v) 109 | => TestTransaction k v 110 | -> [TestTransaction k v] 111 | shrinkTestTransaction (TestTransaction _ []) = [] 112 | shrinkTestTransaction (TestTransaction t actions) = map (TestTransaction t) (init (inits actions)) 113 | 114 | genTestSequence :: (Ord k, Arbitrary k, Arbitrary v) => Bool -> Gen (TestSequence k v) 115 | genTestSequence withExc = sized $ \n -> do 116 | k <- choose (0, n) 117 | (_, txs) <- execStateT (replicateM k next) (M.empty, []) 118 | return $ TestSequence (reverse txs) 119 | where 120 | next :: (Ord k, Arbitrary k, Arbitrary v) 121 | => StateT (Map k v, [TestTransaction k v]) Gen () 122 | next = do 123 | (m, txs) <- get 124 | (tx, m') <- lift $ genTransactionSetup withExc >>= genTestTransaction m 125 | put (fromMaybe m m', tx:txs) 126 | 127 | shrinkTestSequence :: (Ord k, Arbitrary k, Arbitrary v) 128 | => TestSequence k v 129 | -> [TestSequence k v] 130 | shrinkTestSequence (TestSequence txs) = map TestSequence (shrinkList shrinkTestTransaction txs) 131 | -------------------------------------------------------------------------------- /tests/Properties.hs: -------------------------------------------------------------------------------- 1 | module Main (main) where 2 | 3 | import Test.Framework (Test, defaultMain) 4 | 5 | import qualified Properties.Store.Page 6 | 7 | -------------------------------------------------------------------------------- 8 | 9 | tests :: [Test] 10 | tests = 11 | [ Properties.Store.Page.tests 12 | ] 13 | 14 | main :: IO () 15 | main = defaultMain tests 16 | 17 | -------------------------------------------------------------------------------- 18 | -------------------------------------------------------------------------------- /tests/Properties/Store/Page.hs: -------------------------------------------------------------------------------- 1 | {-# LANGUAGE DataKinds #-} 2 | {-# LANGUAGE GADTs #-} 3 | {-# LANGUAGE GeneralizedNewtypeDeriving #-} 4 | {-# LANGUAGE OverloadedStrings #-} 5 | {-# LANGUAGE StandaloneDeriving #-} 6 | {-# OPTIONS_GHC -fno-warn-orphans #-} 7 | module Properties.Store.Page where 8 | 9 | import Test.Framework (Test, testGroup) 10 | import Test.Framework.Providers.HUnit (testCase) 11 | import Test.Framework.Providers.QuickCheck2 (testProperty) 12 | import Test.HUnit hiding (Test, Node) 13 | import Test.QuickCheck 14 | 15 | import Control.Applicative ((<$>)) 16 | 17 | import Data.Int 18 | import Data.List (nub) 19 | import Data.Monoid ((<>)) 20 | import Data.Proxy 21 | import qualified Data.Binary as B 22 | import qualified Data.ByteString.Lazy as BL 23 | import qualified Data.Map as M 24 | import qualified Data.Vector as V 25 | 26 | import Data.BTree.Impure.Internal.Structures 27 | import Data.BTree.Primitives 28 | 29 | import Database.Haskey.Store.Page 30 | 31 | -------------------------------------------------------------------------------- 32 | 33 | tests :: Test 34 | tests = testGroup "Store.Page" 35 | [ testProperty "binary pageType" prop_binary_pageType 36 | , testProperty "binary emptyPage" prop_binary_emptyPage 37 | , testProperty "binary nodePage leaf" prop_binary_leafNodePage 38 | , testProperty "binary nodePage idx" prop_binary_indexNodePage 39 | , testCase "zero checksum length" case_zero_checksum_length 40 | ] 41 | 42 | prop_binary_pageType :: Property 43 | prop_binary_pageType = forAll types $ \t -> 44 | let bs = B.encode t in BL.length bs == 1 && B.decode bs == t 45 | where 46 | types = elements [TypeEmpty, 47 | TypeConcurrentMeta, 48 | TypeOverflow, 49 | TypeLeafNode, 50 | TypeIndexNode] 51 | 52 | prop_binary_emptyPage :: Bool 53 | prop_binary_emptyPage = case decode' emptyPage (encode EmptyPage) of 54 | Right EmptyPage -> True 55 | Left _ -> False 56 | 57 | prop_binary_leafNodePage :: Property 58 | prop_binary_leafNodePage = forAll genLeafNode $ \leaf -> 59 | case decode' (leafNodePage zeroHeight key val) 60 | (encode (LeafNodePage zeroHeight leaf)) of 61 | Right (LeafNodePage h n) -> maybe False (== leaf) $ castNode h zeroHeight n 62 | Left _ -> False 63 | where 64 | key = Proxy :: Proxy Int64 65 | val = Proxy :: Proxy Bool 66 | 67 | prop_binary_indexNodePage :: Property 68 | prop_binary_indexNodePage = forAll genIndexNode $ \(srcHgt, idx) -> 69 | case decode' (indexNodePage srcHgt key val) 70 | (encode (IndexNodePage srcHgt idx)) of 71 | Right (IndexNodePage h n) -> maybe False (== idx) $ castNode h srcHgt n 72 | Left _ -> False 73 | where 74 | key = Proxy :: Proxy Int64 75 | val = Proxy :: Proxy Bool 76 | 77 | case_zero_checksum_length :: Assertion 78 | case_zero_checksum_length = do 79 | assertEqual "zero checksum should prepend 8 bytes" 8 $ 80 | BL.length withZero' - BL.length without' 81 | assertEqual "zero checksum length should equal regular checksum lenth" 82 | (BL.length withZero') 83 | (BL.length with') 84 | where 85 | withZero' = encodeZeroChecksum pg 86 | without' = encodeNoChecksum pg 87 | with' = encode pg 88 | 89 | pg = LeafNodePage zeroHeight (Leaf M.empty :: Node 'Z Int64 Int64) 90 | 91 | decode' :: SGet t -> BL.ByteString -> Either String (Page t) 92 | decode' x = decode x . BL.toStrict 93 | 94 | -------------------------------------------------------------------------------- 95 | 96 | genIndexNode :: Gen (Height ('S h), Node ('S h) Int64 Bool) 97 | genIndexNode = do 98 | h <- genNonZeroHeight 99 | n <- Idx <$> arbitrary 100 | return (h, n) 101 | 102 | genLeafNode :: Gen (Node 'Z Int64 Bool) 103 | genLeafNode = Leaf <$> arbitrary 104 | 105 | instance Arbitrary v => Arbitrary (LeafValue v) where 106 | arbitrary = oneof [RawValue <$> arbitrary, OverflowValue <$> arbitrary] 107 | 108 | instance Arbitrary TxId where 109 | arbitrary = TxId <$> arbitrary 110 | 111 | deriving instance Arbitrary (Height h) 112 | 113 | genNonZeroHeight :: Gen (Height h) 114 | genNonZeroHeight = suchThat arbitrary $ \h -> case viewHeight h of 115 | UZero -> False 116 | USucc _ -> True 117 | 118 | instance (Key k, Arbitrary k, Arbitrary v) => Arbitrary (Index k v) where 119 | arbitrary = do 120 | keys <- V.fromList . nub <$> orderedList 121 | vals <- V.fromList <$> vector (V.length keys + 1) 122 | return (Index keys vals) 123 | shrink (Index keys vals) = 124 | [ Index newKeys newVals 125 | | k <- [0..V.length keys - 1] 126 | , let (preKeys,sufKeys) = V.splitAt k keys 127 | newKeys = preKeys <> V.drop 1 sufKeys 128 | (preVals,sufVals) = V.splitAt k vals 129 | newVals = preVals <> V.drop 1 sufVals 130 | ] 131 | 132 | deriving instance Arbitrary (NodeId height key val) 133 | -------------------------------------------------------------------------------- /tests/Properties/Utils.hs: -------------------------------------------------------------------------------- 1 | module Properties.Utils where 2 | 3 | import qualified Data.Binary as B 4 | 5 | testBinary :: (Eq a, B.Binary a) => a -> Bool 6 | testBinary x = B.decode (B.encode x) == x 7 | --------------------------------------------------------------------------------