├── .github └── workflows │ └── scala.yml ├── .gitignore ├── .scalafix.conf ├── .scalafmt.conf ├── LICENSE ├── Makefile ├── README.md ├── RELEASE_NOTES.md ├── build.sbt ├── project ├── build.properties └── plugins.sbt ├── publish.sbt ├── src ├── main │ └── scala │ │ └── zio │ │ └── lmdb │ │ ├── LMDB.scala │ │ ├── LMDBCodec.scala │ │ ├── LMDBCollection.scala │ │ ├── LMDBConfig.scala │ │ ├── LMDBIssues.scala │ │ ├── LMDBLive.scala │ │ ├── json │ │ ├── LMDBCodecJson.scala │ │ └── package.scala │ │ └── package.scala └── test │ └── scala │ └── zio │ └── lmdb │ ├── Commons.scala │ ├── LMDBBasicUsageSpec.scala │ ├── LMDBConcurrencySpec.scala │ ├── LMDBDataClassSpec.scala │ └── LMDBFeaturesSpec.scala └── version.sbt /.github/workflows/scala.yml: -------------------------------------------------------------------------------- 1 | name: CI 2 | on: 3 | pull_request: 4 | push: 5 | jobs: 6 | test: 7 | runs-on: ubuntu-latest 8 | steps: 9 | - name: Checkout 10 | uses: actions/checkout@v4 11 | - name: Setup JDK 12 | uses: actions/setup-java@v4 13 | with: 14 | distribution: temurin 15 | java-version: 17 16 | - name: Setup sbt launcher 17 | uses: sbt/setup-sbt@v1 18 | - name: Build and Test 19 | run: sbt +test 20 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # build tools related 2 | target/ 3 | project/target 4 | 5 | # IDE related 6 | .metals/ 7 | .bloop/ 8 | .bsp/ 9 | .vscode/ 10 | .idea/ 11 | .scala-build/ 12 | project/metals.sbt 13 | 14 | # application related 15 | lmdb-test-data/ 16 | 17 | # temporary files 18 | .DS_Store/ 19 | *~ 20 | .~* 21 | nohup.out 22 | *.swp 23 | *.iml 24 | test*.log 25 | *.tmp 26 | null 27 | .attach_pid* 28 | 29 | # Security purposes 30 | .envrc 31 | -------------------------------------------------------------------------------- /.scalafix.conf: -------------------------------------------------------------------------------- 1 | rules = [ 2 | DisableSyntax, 3 | RedundantSyntax, 4 | //ExplicitResultTypes // not yet compatible with Scala3 5 | ] 6 | DisableSyntax.noVars = true 7 | DisableSyntax.noThrows = true 8 | DisableSyntax.noNulls = true 9 | DisableSyntax.noReturns = true 10 | DisableSyntax.noWhileLoops = true 11 | DisableSyntax.noAsInstanceOf = true 12 | DisableSyntax.noIsInstanceOf = true 13 | -------------------------------------------------------------------------------- /.scalafmt.conf: -------------------------------------------------------------------------------- 1 | version = 3.9.4 2 | runner.dialect = scala3 3 | align.preset = most 4 | maxColumn = 250 5 | assumeStandardLibraryStripMargin = true 6 | align.stripMargin = true 7 | indent.defnSite = 2 8 | 9 | //align.tokens.add = [ 10 | // {code = "=", owner = "Term.Arg.Named"} 11 | //] 12 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Apache NON-AI License, Version 2.0 2 | 3 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 4 | 5 | 1. Definitions. 6 | 7 | “License” shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. 8 | 9 | “Licensor” shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. 10 | 11 | “Legal Entity” shall mean the union of the acting entity and all other entities that control, are controlled by, 12 | or are under common control with that entity. For the purposes of this definition, “control” means (i) the power, direct or indirect, 13 | to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) 14 | or more of the outstanding shares, or (iii) beneficial ownership of such entity. 15 | 16 | “You” (or “Your”) shall mean an individual or Legal Entity exercising permissions granted by this License. 17 | 18 | “Source” form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, 19 | and configuration files. 20 | 21 | “Object” form shall mean any form resulting from mechanical transformation or translation of a Source form, including 22 | but not limited to compiled object code, generated documentation, and conversions to other media types. 23 | 24 | “Work” shall mean the work of authorship, whether in Source or Object form, made available under the License, 25 | as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). 26 | 27 | “Derivative Works” shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and 28 | for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. 29 | For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) 30 | to the interfaces of, the Work and Derivative Works thereof. 31 | 32 | “Contribution” shall mean any work of authorship, including the original version of the Work and any modifications or additions to 33 | that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or 34 | by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, “submitted” 35 | means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to 36 | communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, 37 | the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated 38 | in writing by the copyright owner as “Not a Contribution.” 39 | 40 | “Contributor” shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and 41 | subsequently incorporated within the Work. 42 | 43 | 2. Grant of Copyright License. 44 | 45 | Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, 46 | no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, 47 | sublicense, and distribute the Work and such Derivative Works in Source or Object form, 48 | under the following conditions: 49 | 50 | 2.1. You shall not use the Covered Software in the creation of an Artificial Intelligence training dataset, 51 | including but not limited to any use that contributes to the training or development of an AI model or algorithm, 52 | unless You obtain explicit written permission from the Contributor to do so. 53 | 54 | 2.2. You acknowledge that the Covered Software is not intended for use in the creation of an Artificial Intelligence training dataset, 55 | and that the Contributor has no obligation to provide support or assistance for any use that violates this license. 56 | 57 | 3. Grant of Patent License. 58 | 59 | Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, 60 | royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, 61 | and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are 62 | necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which 63 | such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) 64 | alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, 65 | then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. 66 | 67 | 4. Redistribution. 68 | 69 | You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, 70 | and in Source or Object form, provided that You meet the following conditions: 71 | 72 | 1. You must give any other recipients of the Work or Derivative Works a copy of this License; and 73 | 74 | 2. You must cause any modified files to carry prominent notices stating that You changed the files; and 75 | 76 | 3. You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, 77 | and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and 78 | 79 | 4. If the Work includes a “NOTICE” text file as part of its distribution, then any Derivative Works that You distribute 80 | must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that 81 | do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed 82 | as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; 83 | or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. 84 | The contents of the NOTICE file are for informational purposes only and do not modify the License. 85 | 86 | You may add Your own attribution notices within Derivative Works that You distribute, 87 | alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as 88 | modifying the License. 89 | You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, 90 | reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, 91 | and distribution of the Work otherwise complies with the conditions stated in this License. 92 | 93 | 5. Submission of Contributions. 94 | 95 | Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor 96 | shall be under the terms and conditions of this License, without any additional terms or conditions. 97 | Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed 98 | with Licensor regarding such Contributions. 99 | 100 | 6. Trademarks. 101 | 102 | This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, 103 | except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. 104 | 105 | 7. Disclaimer of Warranty. 106 | 107 | Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) 108 | on an “AS IS” BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, 109 | any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. 110 | You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated 111 | with Your exercise of permissions under this License. 112 | 113 | 8. Limitation of Liability. 114 | 115 | In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, 116 | unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, 117 | shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages 118 | of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages 119 | for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), 120 | even if such Contributor has been advised of the possibility of such damages. 121 | 122 | 9. Accepting Warranty or Additional Liability. 123 | 124 | While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, 125 | warranty, indemnity, or other liability obligations and/or rights consistent with this License. 126 | However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, 127 | not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, 128 | or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. 129 | 130 | END OF TERMS AND CONDITIONS -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | all: readme-test unit-tests 2 | 3 | dependency-check: 4 | sbt dependencyUpdates 5 | 6 | unit-tests: 7 | sbt test 8 | 9 | readme-test: 10 | scala-cli README.md 11 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Lightning Memory Database (LMDB) for ZIO 2 | [![][ZIOLMDBManagerImg]][ZIOLMDBManagerLnk] [![scaladoc][ScalaDocImg]][ScalaDoc] 3 | 4 | Why ZIO-lmdb? Because I wanted a straightforward **embedded** (in the same process) ACID database for small 5 | applications while keeping deployment, maintenance, upgrades as simple as possible. 6 | 7 | ZIO-lmdb is based on the powerful [lmdb-java][JLMDB] library and brings a higher level API to enhance the developer experience. 8 | 9 | So ZIO-lmdb is an embedded key/value database with an easy-to-use opinionated API. 10 | Choices have been made to make the developer experience as simple as possible : 11 | - JSON-based default storage using zio-json, 12 | - *Custom serialization is supported* 13 | - Safe update by using a lambda which will be called with the previous value if it exists and returns the new value, 14 | - Identifiers are managed by the developer, just use [UUID][UUID] or [ULID][ZIO-ULID]. 15 | - Remember that identifiers are automatically lexicographically sorted :) 16 | 17 | API is designed to not lie. All functions signatures describe precisely 18 | what you must expect from them, thanks to [ZIO][ZIO] and [Scala3][Scala3]. 19 | 20 | ## Definitions 21 | 22 | For a better understanding, this library uses slightly different vocabulary from LMDB original one : 23 | - **Database** : (*LMDB talk about Environment*) 24 | - The place where the database file is stored on your file system 25 | - A set of configurations for this database (expected maximum size, expected collection number) 26 | - **Collection** : (*LMDB talk about Database*) 27 | - A sorted Map ([B+ Tree][btree]) where your data is stored 28 | - One database contains multiple collection 29 | - **Transaction** : (*the same for LMDB*) 30 | - for global coherency within the same database 31 | - only one simultaneous write access is possible within the same database 32 | 33 | ## Configuration 34 | 35 | Configuration is based on the standard ZIO config mechanism, the default configuration provider uses environnment variables 36 | or java properties to resolve this library configuration parameters. 37 | 38 | 39 | | Configuration key | Environment variable | Description | Default value | 40 | |---------------------|----------------------|----------------------------------------------------------------|------------------| 41 | | lmdb.name | LMDB_NAME | Database name, which will be also used as the directory name | default | 42 | | lmdb.home | LMDB_HOME | Where to store the database directory | $HOME/.lmdb | 43 | | lmdb.sync | LMDB_SYNC | Synchronize the file system with all database write operations | false | 44 | | lmdb.maxReaders | LMDB_MAXREADERS | The maximum number of readers | 100 | 45 | | lmdb.maxCollections | LMDB_MAXCOLLECTIONS | The maximum number of collections which can be created | 10_000 | 46 | | lmdb.mapSize | LMDB_MAPSIZE | The maximum size of the whole database including metadata | 100_000_000_000L | 47 | 48 | 49 | ## Usages example 50 | 51 | ```scala 52 | //> using scala 3.6.4 53 | //> using dep fr.janalyse::zio-lmdb:2.0.1 54 | //> using javaOpt --add-opens java.base/java.nio=ALL-UNNAMED --add-opens java.base/sun.nio.ch=ALL-UNNAMED 55 | 56 | import zio.*, zio.json.*,zio.lmdb.*,zio.lmdb.json.* 57 | import java.io.File, java.util.UUID, java.time.OffsetDateTime 58 | 59 | case class Record(uuid: UUID, name: String, age: Int, addedOn: OffsetDateTime) derives LMDBCodecJson 60 | 61 | object SimpleExample extends ZIOAppDefault { 62 | override def run = example.provide(LMDB.liveWithDatabaseName("lmdb-data-simple-example"), Scope.default) 63 | 64 | val collectionName = "examples" 65 | val example = for { 66 | examples <- LMDB.collectionCreate[Record](collectionName, failIfExists = false) 67 | recordId <- Random.nextUUID 68 | dateTime <- Clock.currentDateTime 69 | record = Record(recordId, "John Doe", 42, dateTime) 70 | _ <- examples.upsertOverwrite(recordId.toString, record) 71 | gotten <- examples.fetch(recordId.toString).some 72 | collected <- examples.collect() 73 | _ <- Console.printLine(s"collection $collectionName contains ${collected.size} records") 74 | _ <- ZIO.foreach(collected)(record => Console.printLine(record)) 75 | lmdb <- ZIO.service[LMDB] 76 | _ <- Console.printLine("""LMDB standard tools can be used to manage the database content : sudo apt-get install lmdb-utils""") 77 | _ <- Console.printLine(s"""To get some statistics : mdb_stat -s $collectionName ${lmdb.databasePath}/""") 78 | _ <- Console.printLine(s"""To dump collection content : mdb_dump -p -s $collectionName ${lmdb.databasePath}/""") 79 | } yield () 80 | } 81 | 82 | SimpleExample.main(Array.empty) 83 | ``` 84 | 85 | To run the previous logic, you'll have to provide the LMDB layer. Two layers are available : 86 | - `LMDB.live` : Fully configurable using standard zio-config 87 | - `LMDB.liveWithDatabaseName("chosen-database-name")` : to override/force the database name 88 | (quite useful when writing scala scripts) 89 | 90 | ### ZIO-LMDB based Applications 91 | - [sotohp - photos management][SOTOHP] which uses zio-lmdb intensively 92 | - [code-examples-manager - snippets/gists management][CEM] lmdb used for caching and data sharing 93 | - [zwords - wordle like game][ZWORDS-CODE] which can be played [zwords game][ZWORDS-LIVE] 94 | 95 | ### Code snippets using ZIO-LMDB, runnable with [scala-cli][SCL] 96 | - [ZIO LMDB simple example (scala-3)](https://gist.github.com/dacr/dcb8a11f095ef0a2a95c24701e6eb804) 97 | - [ZIO LMDB feeding with French town postal codes](https://gist.github.com/dacr/6d24baf827ae0c590133e0f27f1ef20b) 98 | - [ZIO LMDB using custom configuration provider](https://gist.github.com/dacr/790df1705c7ec19ae2fe4098dad8d762) 99 | - [Extract photos records from elasticsearch and save them into LMDB](https://gist.github.com/dacr/6ea121f251ad316a64657cbe78085ab7) 100 | - [Export code examples and executions results from lmdb to elastisearch](https://gist.github.com/dacr/f25da8222b2ac644c3195c5982b7367e) 101 | 102 | ## Operating lmdb databases 103 | 104 | LMDB standard tools can be used to manage the LMDB database content : `sudo apt-get install lmdb-utils` 105 | - to get some database statistics : `mdb_stat -a database_directory_path/` 106 | - to dump the content of a database : `mdb_dump -a -p database_directory_path/` 107 | - to dump the content of a database collection : `mdb_dump -s collectionName -p database_directory_path/` 108 | - to restore some collection or the entire database use the command named `mdb_load` which uses the same format as for `mdb_dump` 109 | 110 | As zio-lmdb is using JSON format, dumps are just text, which can be edited and then loaded back. So simple data migration is straightforward. 111 | 112 | ## Requirements 113 | 114 | When LVMDB is used as a persistence store with recent JVM, it requires some JVM options : 115 | 116 | ``` 117 | --add-opens java.base/java.nio=ALL-UNNAMED 118 | --add-opens java.base/sun.nio.ch=ALL-UNNAMED 119 | ``` 120 | 121 | ## Contributors :) 122 | 123 | - [François Armand](https://github.com/fanf) : for scala 2.13 support initiative 124 | 125 | 126 | [ZIOLMDBManager]: https://github.com/dacr/zio-lmdb 127 | [ZIOLMDBManagerImg]: https://img.shields.io/maven-central/v/fr.janalyse/zio-lmdb_3.svg 128 | [ZIOLMDBManagerLnk]: https://mvnrepository.com/artifact/fr.janalyse/zio-lmdb 129 | [ZIO]: https://zio.dev/ 130 | [Scala3]: https://docs.scala-lang.org/scala3/reference/ 131 | [JLMDB]: https://github.com/lmdbjava/lmdbjava 132 | [LMDB]: https://www.symas.com/lmdb 133 | [ZIO-ULID]: https://zio-ulid.bilal-fazlani.com/ 134 | [UUID]: https://en.wikipedia.org/wiki/Universally_unique_identifier 135 | [ZWORDS-CODE]: https://github.com/dacr/zwords 136 | [ZWORDS-LIVE]: https://zwords.mapland.fr/ 137 | [CEM]: https://github.com/dacr/code-examples-manager 138 | [SOTOHP]: https://github.com/dacr/sotohp 139 | [SCL]: https://scala-cli.virtuslab.org/ 140 | [ScalaDocImg]: https://javadoc.io/badge2/fr.janalyse/zio-lmdb_3/scaladoc.svg 141 | [ScalaDoc]: https://javadoc.io/doc/fr.janalyse/zio-lmdb_3/latest/zio/lmdb/LMDB$.html 142 | [btree]: https://en.wikipedia.org/wiki/B%2B_tree -------------------------------------------------------------------------------- /RELEASE_NOTES.md: -------------------------------------------------------------------------------- 1 | # ZIO-LMDB RELEASE NOTES 2 | 3 | ## 2.0 - 2025-04-07 4 | 5 | - support custom serialization layer using type class 6 | - provide default json serialization layer using zio json 7 | - support derivation for serialization auto-configuration 8 | - enhance streaming internals 9 | - drop scala 2.13 support 10 | 11 | ## 1.8 - 2024-01-21 12 | 13 | - dependency updates 14 | - add update operation 15 | - `def update(key: RecordKey, modifier: T => T): IO[UpdateErrors, Option[T]]` 16 | - will return None if no record was found 17 | - change upsert method signature to return the updated/inserted record (instead of Unit previously) 18 | - `def upsert(key: RecordKey, modifier: Option[T] => T): IO[UpsertErrors, T]` 19 | - now the updated or inserted record is returned 20 | 21 | ## 1.7 - 2024-01-01 22 | 23 | - upgrade to lmdb-java 1.9.0 24 | - update dependencies 25 | - add collectionDrop operation to delete a collection 26 | - add the failIfExists parameter to collectionCreate 27 | - simplify API usage for various use cases 28 | - enhance collect / stream / streamWithKey (#19) 29 | - in forward or backward key ordering 30 | - start after/before a given key 31 | - do not display logs during unit test execution 32 | - add more unit tests 33 | 34 | ## 1.5 - 2023-09-24 35 | 36 | - add collection head, previous, next, last record operations (#18) 37 | - update scala releases 38 | - update dependencies 39 | 40 | ## 1.4 - 2023-08-25 41 | 42 | - Add stream operations (#13) 43 | 44 | ## 1.3 - 2023-08-05 45 | 46 | - `UpsertOverwrite` now doesn't care about the json definition of the previous stored value (#6) 47 | - Change `upsert` & `upsertOverwrite` return type (#12) 48 | - `Unit` instead of `UpsertState` 49 | - `UpsertState` data type has been removed 50 | - Add collection `contains` key operation 51 | 52 | ## 1.2 - 2023-06-17 53 | 54 | - Add collection `clear` all content operation (#7) 55 | -------------------------------------------------------------------------------- /build.sbt: -------------------------------------------------------------------------------- 1 | name := "zio-lmdb" 2 | organization := "fr.janalyse" 3 | description := "Lightning Memory Database (LMDB) for scala ZIO" 4 | 5 | licenses += "NON-AI-APACHE2" -> url(s"https://github.com/non-ai-licenses/non-ai-licenses/blob/main/NON-AI-APACHE2") 6 | 7 | scalaVersion := "3.3.5" // FOR LIBS USE SCALA LTS 8 | 9 | lazy val versions = new { 10 | val zio = "2.1.17" 11 | val zionio = "2.0.2" 12 | val ziojson = "0.7.42" 13 | val zioconfig = "4.0.4" 14 | val ziologging = "2.5.0" 15 | val lmdb = "0.9.1" 16 | } 17 | 18 | libraryDependencies ++= Seq( 19 | "dev.zio" %% "zio" % versions.zio, 20 | "dev.zio" %% "zio-streams" % versions.zio, 21 | "dev.zio" %% "zio-json" % versions.ziojson, 22 | "dev.zio" %% "zio-config" % versions.zioconfig, 23 | "org.lmdbjava" % "lmdbjava" % versions.lmdb, 24 | "dev.zio" %% "zio-test" % versions.zio % Test, 25 | "dev.zio" %% "zio-logging" % versions.ziologging % Test, 26 | "dev.zio" %% "zio-test-sbt" % versions.zio % Test, 27 | "dev.zio" %% "zio-test-scalacheck" % versions.zio % Test, 28 | "dev.zio" %% "zio-nio" % versions.zionio % Test 29 | ) 30 | 31 | testFrameworks += new TestFramework("zio.test.sbt.ZTestFramework") 32 | 33 | ThisBuild / fork := true 34 | ThisBuild / javaOptions ++= Seq("--add-opens", "java.base/java.nio=ALL-UNNAMED", "--add-opens", "java.base/sun.nio.ch=ALL-UNNAMED") 35 | 36 | homepage := Some(new URL("https://github.com/dacr/zio-lmdb")) 37 | scmInfo := Some(ScmInfo(url(s"https://github.com/dacr/zio-lmdb.git"), s"git@github.com:dacr/zio-lmdb.git")) 38 | developers := List( 39 | Developer( 40 | id = "dacr", 41 | name = "David Crosson", 42 | email = "crosson.david@gmail.com", 43 | url = url("https://github.com/dacr") 44 | ) 45 | ) 46 | -------------------------------------------------------------------------------- /project/build.properties: -------------------------------------------------------------------------------- 1 | sbt.version=1.10.11 2 | -------------------------------------------------------------------------------- /project/plugins.sbt: -------------------------------------------------------------------------------- 1 | addSbtPlugin("com.github.sbt" % "sbt-release" % "1.4.0") 2 | addSbtPlugin("com.github.sbt" % "sbt-pgp" % "2.3.1") 3 | addSbtPlugin("org.xerial.sbt" % "sbt-sonatype" % "3.12.2") 4 | addSbtPlugin("com.timushev.sbt" % "sbt-updates" % "0.6.4") 5 | addSbtPlugin("com.github.sbt" % "sbt-native-packager" % "1.11.1") 6 | addSbtPlugin("ch.epfl.scala" % "sbt-scalafix" % "0.14.2") 7 | -------------------------------------------------------------------------------- /publish.sbt: -------------------------------------------------------------------------------- 1 | pomIncludeRepository := { _ => false } 2 | publishMavenStyle := true 3 | Test / publishArtifact := false 4 | releaseCrossBuild := true 5 | versionScheme := Some("semver-spec") 6 | 7 | publishTo := { 8 | // For accounts created after Feb 2021: 9 | // val nexus = "https://s01.oss.sonatype.org/" 10 | val nexus = "https://oss.sonatype.org/" 11 | if (isSnapshot.value) Some("snapshots" at nexus + "content/repositories/snapshots") 12 | else Some("releases" at nexus + "service/local/staging/deploy/maven2") 13 | } 14 | 15 | releasePublishArtifactsAction := PgpKeys.publishSigned.value 16 | 17 | releaseTagComment := s"Releasing ${(ThisBuild / version).value}" 18 | releaseCommitMessage := s"Setting version to ${(ThisBuild / version).value}" 19 | releaseNextCommitMessage := s"[ci skip] Setting version to ${(ThisBuild / version).value}" 20 | 21 | import ReleaseTransformations.* 22 | releaseProcess := Seq[ReleaseStep]( 23 | checkSnapshotDependencies, 24 | inquireVersions, 25 | runClean, 26 | runTest, 27 | setReleaseVersion, 28 | commitReleaseVersion, 29 | tagRelease, 30 | publishArtifacts, 31 | releaseStepCommand("sonatypeReleaseAll"), 32 | setNextVersion, 33 | commitNextVersion, 34 | pushChanges 35 | ) 36 | -------------------------------------------------------------------------------- /src/main/scala/zio/lmdb/LMDB.scala: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2025 David Crosson 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | 17 | package zio.lmdb 18 | 19 | import zio._ 20 | import zio.stream.ZStream 21 | import zio.config._ 22 | 23 | trait LMDB { 24 | 25 | def databasePath: String 26 | 27 | def platformCheck(): IO[StorageSystemError, Unit] 28 | 29 | def collectionsAvailable(): IO[StorageSystemError, List[CollectionName]] 30 | 31 | def collectionExists(name: CollectionName): IO[StorageSystemError, Boolean] 32 | 33 | def collectionCreate[T](name: CollectionName, failIfExists: Boolean = true)(implicit codec: LMDBCodec[T]): IO[CreateErrors, LMDBCollection[T]] 34 | 35 | def collectionAllocate(name: CollectionName): IO[CreateErrors, Unit] 36 | 37 | def collectionGet[T](name: CollectionName)(implicit codec: LMDBCodec[T]): IO[GetErrors, LMDBCollection[T]] 38 | 39 | def collectionSize(name: CollectionName): IO[SizeErrors, Long] 40 | 41 | def collectionClear(name: CollectionName): IO[ClearErrors, Unit] 42 | 43 | def collectionDrop(name: CollectionName): IO[DropErrors, Unit] 44 | 45 | def fetch[T](collectionName: CollectionName, key: RecordKey)(implicit codec: LMDBCodec[T]): IO[FetchErrors, Option[T]] 46 | 47 | def head[T](collectionName: CollectionName)(implicit codec: LMDBCodec[T]): IO[FetchErrors, Option[(RecordKey, T)]] 48 | 49 | def previous[T](collectionName: CollectionName, beforeThatKey: RecordKey)(implicit codec: LMDBCodec[T]): IO[FetchErrors, Option[(RecordKey, T)]] 50 | 51 | def next[T](collectionName: CollectionName, afterThatKey: RecordKey)(implicit codec: LMDBCodec[T]): IO[FetchErrors, Option[(RecordKey, T)]] 52 | 53 | def last[T](collectionName: CollectionName)(implicit codec: LMDBCodec[T]): IO[FetchErrors, Option[(RecordKey, T)]] 54 | 55 | def contains(collectionName: CollectionName, key: RecordKey): IO[ContainsErrors, Boolean] 56 | 57 | def update[T](collectionName: CollectionName, key: RecordKey, modifier: T => T)(implicit codec: LMDBCodec[T]): IO[UpdateErrors, Option[T]] 58 | 59 | def upsert[T](collectionName: CollectionName, key: RecordKey, modifier: Option[T] => T)(implicit codec: LMDBCodec[T]): IO[UpsertErrors, T] 60 | 61 | def upsertOverwrite[T](collectionName: CollectionName, key: RecordKey, document: T)(implicit codec: LMDBCodec[T]): IO[UpsertErrors, Unit] 62 | 63 | def delete[T](collectionName: CollectionName, key: RecordKey)(implicit codec: LMDBCodec[T]): IO[DeleteErrors, Option[T]] 64 | 65 | def collect[T]( 66 | collectionName: CollectionName, 67 | keyFilter: RecordKey => Boolean = _ => true, 68 | valueFilter: T => Boolean = (_: T) => true, 69 | startAfter: Option[RecordKey] = None, 70 | backward: Boolean = false, 71 | limit: Option[Int] = None 72 | )(implicit codec: LMDBCodec[T]): IO[CollectErrors, List[T]] 73 | 74 | def stream[T]( 75 | collectionName: CollectionName, 76 | keyFilter: RecordKey => Boolean = _ => true, 77 | startAfter: Option[RecordKey] = None, 78 | backward: Boolean = false 79 | )(implicit codec: LMDBCodec[T]): ZStream[Any, StreamErrors, T] 80 | 81 | def streamWithKeys[T]( 82 | collectionName: CollectionName, 83 | keyFilter: RecordKey => Boolean = _ => true, 84 | startAfter: Option[RecordKey] = None, 85 | backward: Boolean = false 86 | )(implicit codec: LMDBCodec[T]): ZStream[Any, StreamErrors, (RecordKey, T)] 87 | } 88 | 89 | object LMDB { 90 | 91 | val config: Config[LMDBConfig] = ((Config.string("name").withDefault(LMDBConfig.default.databaseName) 92 | ?? "Database name, which will be also used as the directory name") ++ 93 | (Config.string("home").optional.withDefault(LMDBConfig.default.databasesHome) 94 | ?? "Where to store the database directory") ++ 95 | (Config.boolean("sync").withDefault(LMDBConfig.default.fileSystemSynchronized) 96 | ?? "Synchronize the file system with all database write operations") ++ 97 | (Config.int("maxReaders").withDefault(LMDBConfig.default.maxReaders) 98 | ?? "The maximum number of readers") ++ 99 | (Config.int("maxCollections").withDefault(LMDBConfig.default.maxCollections) 100 | ?? "The maximum number of collections which can be created") ++ 101 | (Config.bigInt("mapSize").withDefault(LMDBConfig.default.mapSize) 102 | ?? "The maximum size of the whole database including metadata")) 103 | .to[LMDBConfig] 104 | .nested("lmdb") 105 | 106 | /** Default live implementation using the current configuration provider 107 | */ 108 | val live: ZLayer[Scope, Any, LMDB] = ZLayer.fromZIO( 109 | for { 110 | config <- ZIO.config(LMDB.config) 111 | // doc = generateDocs(LMDB.config).toTable.toGithubFlavouredMarkdown 112 | // _ <- ZIO.logInfo(s"Configuration documentation:\n$doc") 113 | _ <- ZIO.logInfo(s"Configuration : $config") 114 | lmdb <- LMDBLive.setup(config) 115 | } yield lmdb 116 | ) 117 | 118 | /** Default live implementation using the current configuration provider but overriding any configured database name with the provided one 119 | * @param name 120 | * database name to use 121 | * @return 122 | */ 123 | def liveWithDatabaseName(name: String): ZLayer[Scope, Any, LMDB] = ZLayer.fromZIO( 124 | for { 125 | config <- ZIO.config(LMDB.config).map(_.copy(databaseName = name)) 126 | // doc = generateDocs(LMDB.config).toTable.toGithubFlavouredMarkdown 127 | // _ <- Console.printLine(s"Configuration documentation:\n$doc") 128 | _ <- ZIO.logInfo(s"Configuration : $config") 129 | lmdb <- LMDBLive.setup(config) 130 | } yield lmdb 131 | ) 132 | 133 | /** Get the used storage directory in your file system. 134 | * 135 | * @return 136 | * storage directory path 137 | */ 138 | def databasePath: ZIO[LMDB, StorageSystemError, String] = ZIO.serviceWith(_.databasePath) 139 | 140 | /** Check LMDB server current configuration compatibility 141 | */ 142 | def platformCheck(): ZIO[LMDB, StorageSystemError, Unit] = ZIO.serviceWithZIO(_.platformCheck()) 143 | 144 | /** List all available collections 145 | * 146 | * @return 147 | * the list of collection names 148 | */ 149 | def collectionsAvailable(): ZIO[LMDB, StorageSystemError, List[CollectionName]] = ZIO.serviceWithZIO(_.collectionsAvailable()) 150 | 151 | /** check if a collection exists 152 | * 153 | * @param name 154 | * the collection name 155 | * @return 156 | * true if the collection exists 157 | */ 158 | def collectionExists(name: CollectionName): ZIO[LMDB, StorageSystemError, Boolean] = ZIO.serviceWithZIO(_.collectionExists(name)) 159 | 160 | /** Create a collection and return the collection helper facade. Use collection helper facade when all records are using the same json data type. 161 | * 162 | * @param name 163 | * the collection name 164 | * @param failIfExists 165 | * raise an error if the collection already exists, default to true 166 | * @tparam T 167 | * the data type of the records which must be Json serializable 168 | * @return 169 | * the collection helper facade 170 | */ 171 | def collectionCreate[T](name: CollectionName, failIfExists: Boolean = true)(implicit codec: LMDBCodec[T]): ZIO[LMDB, CreateErrors, LMDBCollection[T]] = ZIO.serviceWithZIO(_.collectionCreate(name, failIfExists)) 172 | 173 | /** Create a collection 174 | * 175 | * @param name 176 | * the collection name 177 | */ 178 | def collectionAllocate(name: CollectionName): ZIO[LMDB, CreateErrors, Unit] = ZIO.serviceWithZIO(_.collectionAllocate(name)) 179 | 180 | /** Get a collection helper facade. Use collection helper facade when all records are using the same json data type. 181 | * 182 | * @param name 183 | * the collection name 184 | * @return 185 | * the collection helper facade 186 | */ 187 | def collectionGet[T](name: CollectionName)(implicit codec: LMDBCodec[T]): ZIO[LMDB, GetErrors, LMDBCollection[T]] = ZIO.serviceWithZIO(_.collectionGet(name)) 188 | 189 | /** Get how many items a collection contains 190 | * 191 | * @param name 192 | * the collection name 193 | * @tparam T 194 | * the data type of the records which must be Json serializable 195 | * @return 196 | * the collection size 197 | */ 198 | def collectionSize(name: CollectionName): ZIO[LMDB, SizeErrors, Long] = ZIO.serviceWithZIO(_.collectionSize(name)) 199 | 200 | /** Remove all the content of a collection 201 | * 202 | * @param name 203 | * the collection name 204 | */ 205 | def collectionClear(name: CollectionName): ZIO[LMDB, ClearErrors, Unit] = ZIO.serviceWithZIO(_.collectionClear(name)) 206 | 207 | /** Drop a collection 208 | * 209 | * @param name 210 | * the collection name 211 | */ 212 | def collectionDrop(name: CollectionName): ZIO[LMDB, DropErrors, Unit] = ZIO.serviceWithZIO(_.collectionDrop(name)) 213 | 214 | /** Get a collection record 215 | * 216 | * @param collectionName 217 | * the collection name 218 | * @param key 219 | * the key of the record to get 220 | * @tparam T 221 | * the data type of the record which must be Json serializable 222 | * @return 223 | * some record or none if no record has been found for the given key 224 | */ 225 | def fetch[T](collectionName: CollectionName, key: RecordKey)(implicit codec: LMDBCodec[T]): ZIO[LMDB, FetchErrors, Option[T]] = ZIO.serviceWithZIO(_.fetch(collectionName, key)) 226 | 227 | /** Get collection first record 228 | * 229 | * @param collectionName 230 | * the collection name 231 | * @tparam T 232 | * the data type of the record which must be Json serializable 233 | * @return 234 | * some (key,record) tuple or none if the collection is empty 235 | */ 236 | def head[T](collectionName: CollectionName)(implicit codec: LMDBCodec[T]): ZIO[LMDB, FetchErrors, Option[(RecordKey, T)]] = ZIO.serviceWithZIO(_.head(collectionName)) 237 | 238 | /** Get the previous record for the given key 239 | * 240 | * @param collectionName 241 | * the collection name 242 | * @param beforeThatKey 243 | * the key of the reference record 244 | * @tparam T 245 | * the data type of the record which must be Json serializable 246 | * @return 247 | * some (key,record) tuple or none if the key is the first one 248 | */ 249 | def previous[T](collectionName: CollectionName, beforeThatKey: RecordKey)(implicit codec: LMDBCodec[T]): ZIO[LMDB, FetchErrors, Option[(RecordKey, T)]] = ZIO.serviceWithZIO(_.previous(collectionName, beforeThatKey)) 250 | 251 | /** Get the next record for the given key 252 | * 253 | * @param collectionName 254 | * the collection name 255 | * @param afterThatKey 256 | * the key of the reference record 257 | * @tparam T 258 | * the data type of the record which must be Json serializable 259 | * @return 260 | * some (key,record) tuple or none if the key is the last one 261 | */ 262 | def next[T](collectionName: CollectionName, afterThatKey: RecordKey)(implicit codec: LMDBCodec[T]): ZIO[LMDB, FetchErrors, Option[(RecordKey, T)]] = ZIO.serviceWithZIO(_.next(collectionName, afterThatKey)) 263 | 264 | /** Get collection last record 265 | * 266 | * @param collectionName 267 | * the collection name 268 | * @tparam T 269 | * the data type of the record which must be Json serializable 270 | * @return 271 | * some (key,record) tuple or none if the collection is empty 272 | */ 273 | def last[T](collectionName: CollectionName)(implicit codec: LMDBCodec[T]): ZIO[LMDB, FetchErrors, Option[(RecordKey, T)]] = ZIO.serviceWithZIO(_.last(collectionName)) 274 | 275 | /** Check if a collection contains the given key 276 | * 277 | * @param collectionName 278 | * the collection name 279 | * @param key 280 | * the key of the record to look for 281 | * @return 282 | * true if the key is used by the given collection 283 | */ 284 | def contains(collectionName: CollectionName, key: RecordKey): ZIO[LMDB, ContainsErrors, Boolean] = ZIO.serviceWithZIO(_.contains(collectionName, key)) 285 | 286 | /** update atomically a record in a collection. 287 | * 288 | * @param collectionName 289 | * the collection name 290 | * @param key 291 | * the key for the record upsert 292 | * @param modifier 293 | * the lambda used to update the record content 294 | * @tparam T 295 | * the data type of the record which must be Json serializable 296 | * @returns 297 | * the updated record if a record exists for the given key 298 | */ 299 | 300 | def update[T](collectionName: CollectionName, key: RecordKey, modifier: T => T)(implicit codec: LMDBCodec[T]): ZIO[LMDB, UpdateErrors, Option[T]] = { 301 | ZIO.serviceWithZIO(_.update[T](collectionName, key, modifier)) 302 | } 303 | 304 | /** update or insert atomically a record in a collection. 305 | * 306 | * @param collectionName 307 | * the collection name 308 | * @param key 309 | * the key for the record upsert 310 | * @param modifier 311 | * the lambda used to update the record content 312 | * @tparam T 313 | * the data type of the record which must be Json serializable 314 | * @returns 315 | * the updated or inserted record 316 | */ 317 | def upsert[T](collectionName: CollectionName, key: RecordKey, modifier: Option[T] => T)(implicit codec: LMDBCodec[T]): ZIO[LMDB, UpsertErrors, T] = { 318 | ZIO.serviceWithZIO(_.upsert[T](collectionName, key, modifier)) 319 | } 320 | 321 | /** Overwrite or insert a record in a collection. If the key is already being used for a record then the previous record will be overwritten by the new one. 322 | * 323 | * @param collectionName 324 | * the collection name 325 | * @param key 326 | * the key for the record upsert 327 | * @param document 328 | * the record content to upsert 329 | * @tparam T 330 | * the data type of the record which must be Json serializable 331 | */ 332 | def upsertOverwrite[T](collectionName: CollectionName, key: RecordKey, document: T)(implicit codec: LMDBCodec[T]): ZIO[LMDB, UpsertErrors, Unit] = 333 | ZIO.serviceWithZIO(_.upsertOverwrite[T](collectionName, key, document)) 334 | 335 | /** Delete a record in a collection 336 | * 337 | * @param collectionName 338 | * the collection name 339 | * @param key 340 | * the key of the record to delete 341 | * @tparam T 342 | * the data type of the record which must be Json serializable the deleted record 343 | * @return 344 | * the deleted content 345 | */ 346 | def delete[T](collectionName: CollectionName, key: RecordKey)(implicit codec: LMDBCodec[T]): ZIO[LMDB, DeleteErrors, Option[T]] = 347 | ZIO.serviceWithZIO(_.delete[T](collectionName, key)) 348 | 349 | /** Collect collection content into the memory, use keyFilter or valueFilter to limit the amount of loaded entries. 350 | * 351 | * @param collectionName 352 | * the collection name 353 | * @param keyFilter 354 | * filter lambda to select only the keys you want, default is no filter, the value deserialization is done **after** the filtering step 355 | * @param valueFilter 356 | * filter lambda to select only the record your want, default is no filter 357 | * @param startAfter 358 | * start the stream after the given key, default is start from the beginning (when backward is false) or from end (when backward is true) 359 | * @param backward 360 | * going in reverse key order, default is false 361 | * @param limit 362 | * maximum number of item you want to get 363 | * @tparam T 364 | * the data type of the record which must be Json serializable 365 | * @return 366 | * All matching records 367 | */ 368 | def collect[T]( 369 | collectionName: CollectionName, 370 | keyFilter: RecordKey => Boolean = _ => true, 371 | valueFilter: T => Boolean = (_: T) => true, 372 | startAfter: Option[RecordKey] = None, 373 | backward: Boolean = false, 374 | limit: Option[Int] = None 375 | )(implicit codec: LMDBCodec[T]): ZIO[LMDB, CollectErrors, List[T]] = 376 | ZIO.serviceWithZIO(_.collect[T](collectionName, keyFilter, valueFilter, startAfter, backward, limit)) 377 | 378 | /** Stream collection records, use keyFilter to apply filtering before record deserialization. 379 | * 380 | * @param collectionName 381 | * the collection name 382 | * @param keyFilter 383 | * filter lambda to select only the keys you want, default is no filter, the value deserialization is done **after** the filtering step 384 | * @param startAfter 385 | * start the stream after the given key, default is start from the beginning (when backward is false) or from end (when backward is true) 386 | * @param backward 387 | * going in reverse key order, default is false 388 | * @tparam T 389 | * the data type of the record which must be Json serializable 390 | * @return 391 | * the stream of records 392 | */ 393 | def stream[T]( 394 | collectionName: CollectionName, 395 | keyFilter: RecordKey => Boolean = _ => true, 396 | startAfter: Option[RecordKey] = None, 397 | backward: Boolean = false 398 | )(implicit codec: LMDBCodec[T]): ZStream[LMDB, StreamErrors, T] = 399 | ZStream.serviceWithStream(_.stream(collectionName, keyFilter, startAfter, backward)) 400 | 401 | /** stream collection Key/record tuples, use keyFilter to apply filtering before record deserialization. 402 | * 403 | * @param collectionName 404 | * the collection name 405 | * @param keyFilter 406 | * filter lambda to select only the keys you want, default is no filter, the value deserialization is done **after** the filtering step 407 | * @param startAfter 408 | * start the stream after the given key, default is start from the beginning (when backward is false) or from end (when backward is true) 409 | * @param backward 410 | * going in reverse key order, default is false 411 | * @tparam T 412 | * the data type of the record which must be Json serializable 413 | * @return 414 | * the tuple of key and record stream 415 | */ 416 | def streamWithKeys[T]( 417 | collectionName: CollectionName, 418 | keyFilter: RecordKey => Boolean = _ => true, 419 | startAfter: Option[RecordKey] = None, 420 | backward: Boolean = false 421 | )(implicit codec: LMDBCodec[T]): ZStream[LMDB, StreamErrors, (RecordKey, T)] = 422 | ZStream.serviceWithStream(_.streamWithKeys(collectionName, keyFilter, startAfter, backward)) 423 | } 424 | -------------------------------------------------------------------------------- /src/main/scala/zio/lmdb/LMDBCodec.scala: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2025 David Crosson 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | package zio.lmdb 17 | 18 | import java.nio.ByteBuffer 19 | 20 | trait LMDBCodec[T] { 21 | def encode(t: T): Array[Byte] 22 | def decode(bytes: ByteBuffer): Either[String, T] 23 | } 24 | -------------------------------------------------------------------------------- /src/main/scala/zio/lmdb/LMDBCollection.scala: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2025 David Crosson 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | package zio.lmdb 17 | 18 | import zio._ 19 | import zio.lmdb.StorageUserError._ 20 | import zio.lmdb.StorageSystemError 21 | import zio.stream._ 22 | 23 | /** A helper class to simplify user experience by avoiding repeating collection name and data types 24 | * 25 | * @param name 26 | * collection name 27 | * @param JsonEncoder[T] 28 | * @param JsonDecoder[T] 29 | * @tparam T 30 | * the data class type for collection content 31 | */ 32 | case class LMDBCollection[T](name: String, lmdb: LMDB)(implicit codec:LMDBCodec[T]) { 33 | 34 | /** Get how many items a collection contains 35 | * 36 | * @return 37 | * the collection size 38 | */ 39 | def size(): IO[SizeErrors, Long] = lmdb.collectionSize(name) 40 | 41 | /** Remove all the content 42 | */ 43 | def clear(): IO[ClearErrors, Unit] = lmdb.collectionClear(name) 44 | 45 | /** Get a collection record 46 | * 47 | * @param key 48 | * the key of the record to get 49 | * @return 50 | * some record or none if no record has been found for the given key 51 | */ 52 | def fetch(key: RecordKey): IO[FetchErrors, Option[T]] = lmdb.fetch(name, key) 53 | 54 | /** Get collection first record 55 | * 56 | * @return 57 | * some (key,record) tuple or none if the collection is empty 58 | */ 59 | def head(): IO[FetchErrors, Option[(RecordKey, T)]] = lmdb.head(name) 60 | 61 | /** Get the previous record for the given key 62 | * 63 | * @param beforeThatKey 64 | * the key of the reference record 65 | * @return 66 | * some (key,record) tuple or none if the key is the first one 67 | */ 68 | def previous(beforeThatKey: RecordKey): IO[FetchErrors, Option[(RecordKey, T)]] = lmdb.previous(name, beforeThatKey) 69 | 70 | /** Get the next record for the given key 71 | * 72 | * @param afterThatKey 73 | * the key of the reference record 74 | * @return 75 | * some (key,record) tuple or none if the key is the last one 76 | */ 77 | def next(afterThatKey: RecordKey): IO[FetchErrors, Option[(RecordKey, T)]] = lmdb.next(name, afterThatKey) 78 | 79 | /** Get collection last record 80 | * 81 | * @return 82 | * some (key,record) tuple or none if the collection is empty 83 | */ 84 | def last(): IO[FetchErrors, Option[(RecordKey, T)]] = lmdb.last(name) 85 | 86 | /** Check if a collection contains the given key 87 | * 88 | * @param key 89 | * the key of the record to look for 90 | * @return 91 | * true if the key is used by the given collection 92 | */ 93 | def contains(key: RecordKey): IO[ContainsErrors, Boolean] = lmdb.contains(name, key) 94 | 95 | /** update atomically a record in a collection. 96 | * 97 | * @param key 98 | * the key for the record upsert 99 | * @param modifier 100 | * the lambda used to update the record content 101 | * @returns 102 | * the updated record if a record exists for the given key 103 | */ 104 | def update(key: RecordKey, modifier: T => T): IO[UpdateErrors, Option[T]] = lmdb.update(name, key, modifier) 105 | 106 | /** update or insert atomically a record in a collection. 107 | * 108 | * @param key 109 | * the key for the record upsert 110 | * @param modifier 111 | * the lambda used to update the record content 112 | * @returns 113 | * the updated or inserted record 114 | */ 115 | def upsert(key: RecordKey, modifier: Option[T] => T): IO[UpsertErrors, T] = 116 | lmdb.upsert[T](name, key, modifier) 117 | 118 | /** Overwrite or insert a record in a collection. If the key is already being used for a record then the previous record will be overwritten by the new one. 119 | * 120 | * @param key 121 | * the key for the record upsert 122 | * @param document 123 | * the record content to upsert 124 | */ 125 | def upsertOverwrite(key: RecordKey, document: T): IO[UpsertErrors, Unit] = 126 | lmdb.upsertOverwrite[T](name, key, document) 127 | 128 | /** Delete a record in a collection 129 | * 130 | * @param key 131 | * the key of the record to delete 132 | * @return 133 | * the deleted content 134 | */ 135 | def delete(key: RecordKey): IO[DeleteErrors, Option[T]] = 136 | lmdb.delete[T](name, key) 137 | 138 | /** Collect collection content into the memory, use keyFilter or valueFilter to limit the amount of loaded entries. 139 | * 140 | * @param keyFilter 141 | * filter lambda to select only the keys you want, default is no filter, the value deserialization is done **after** the filtering step 142 | * @param valueFilter 143 | * filter lambda to select only the record your want, default is no filter 144 | * @param startAfter 145 | * start the stream after the given key, default is start from the beginning (when backward is false) or from end (when backward is true) 146 | * @param backward 147 | * going in reverse key order, default is false 148 | * @param limit 149 | * maximum number of item you want to get 150 | * @return 151 | * All matching records 152 | */ 153 | def collect( 154 | keyFilter: RecordKey => Boolean = _ => true, 155 | valueFilter: T => Boolean = (_: T) => true, 156 | startAfter: Option[RecordKey] = None, 157 | backward: Boolean = false, 158 | limit: Option[Int] = None 159 | ): IO[CollectErrors, List[T]] = 160 | lmdb.collect[T](name, keyFilter, valueFilter, startAfter, backward, limit) 161 | 162 | /** Stream collection records, use keyFilter to apply filtering before record deserialization. 163 | * 164 | * @param keyFilter 165 | * filter lambda to select only the keys you want, default is no filter, the value deserialization is done **after** the filtering step 166 | * @param startAfter 167 | * start the stream after the given key, default is start from the beginning (when backward is false) or from end (when backward is true) 168 | * @param backward 169 | * going in reverse key order, default is false 170 | * @return 171 | * the stream of records 172 | */ 173 | def stream( 174 | keyFilter: RecordKey => Boolean = _ => true, 175 | startAfter: Option[RecordKey] = None, 176 | backward: Boolean = false 177 | ): ZStream[Any, StreamErrors, T] = 178 | lmdb.stream(name, keyFilter, startAfter, backward) 179 | 180 | /** stream collection Key/record tuples, use keyFilter to apply filtering before record deserialization. 181 | * 182 | * @param keyFilter 183 | * filter lambda to select only the keys you want, default is no filter, the value deserialization is done **after** the filtering step 184 | * @param startAfter 185 | * start the stream after the given key, default is start from the beginning (when backward is false) or from end (when backward is true) 186 | * @param backward 187 | * going in reverse key order, default is false 188 | * @return 189 | * the tuple of key and record stream 190 | */ 191 | def streamWithKeys( 192 | keyFilter: RecordKey => Boolean = _ => true, 193 | startAfter: Option[RecordKey] = None, 194 | backward: Boolean = false 195 | ): ZStream[Any, StreamErrors, (RecordKey, T)] = 196 | lmdb.streamWithKeys(name, keyFilter, startAfter, backward) 197 | } 198 | -------------------------------------------------------------------------------- /src/main/scala/zio/lmdb/LMDBConfig.scala: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2025 David Crosson 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | 17 | package zio.lmdb 18 | 19 | import java.io.File 20 | import zio._ 21 | 22 | case class LMDBConfig( 23 | databaseName: String, 24 | databasesHome: Option[String], 25 | fileSystemSynchronized: Boolean, 26 | maxReaders: Int, 27 | maxCollections: Int, 28 | mapSize: BigInt 29 | ) 30 | 31 | object LMDBConfig { 32 | val default = 33 | LMDBConfig( 34 | databaseName = "default", 35 | databasesHome = None, 36 | fileSystemSynchronized = false, 37 | maxReaders = 100, 38 | mapSize = BigInt(100_000_000_000L), 39 | maxCollections = 10_000 40 | ) 41 | } 42 | -------------------------------------------------------------------------------- /src/main/scala/zio/lmdb/LMDBIssues.scala: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2025 David Crosson 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | 17 | package zio.lmdb 18 | 19 | enum StorageUserError { 20 | case CollectionAlreadExists(name: CollectionName) 21 | case CollectionNotFound(name: CollectionName) 22 | case CodecFailure(issue: String) 23 | case OverSizedKey(id: String, expandedSize: Int, limit: Int) 24 | } 25 | 26 | enum StorageSystemError { 27 | case InternalError(message: String, cause: Option[Throwable] = None) extends StorageSystemError 28 | } 29 | -------------------------------------------------------------------------------- /src/main/scala/zio/lmdb/LMDBLive.scala: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2025 David Crosson 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | package zio.lmdb 17 | 18 | import zio._ 19 | import zio.stm._ 20 | import zio.stream._ 21 | 22 | import java.io.File 23 | import org.lmdbjava.{Cursor, Dbi, DbiFlags, Env, EnvFlags, KeyRange, Txn, Verifier} 24 | import org.lmdbjava.SeekOp._ 25 | import org.lmdbjava.CursorIterable.KeyVal 26 | 27 | import java.nio.charset.StandardCharsets 28 | import java.nio.ByteBuffer 29 | import java.time.OffsetDateTime 30 | import java.util.concurrent.TimeUnit 31 | import scala.jdk.CollectionConverters._ 32 | import zio.lmdb.StorageUserError._ 33 | import zio.lmdb.StorageSystemError._ 34 | 35 | /** LMDB ZIO abstraction layer, provides standard atomic operations implementations 36 | * @param env 37 | * @param openedCollectionDbisRef 38 | * @param reentrantLock 39 | */ 40 | class LMDBLive( 41 | env: Env[ByteBuffer], 42 | openedCollectionDbisRef: Ref[Map[String, Dbi[ByteBuffer]]], 43 | reentrantLock: TReentrantLock, 44 | val databasePath: String 45 | ) extends LMDB { 46 | private val charset = StandardCharsets.UTF_8 // TODO enhance charset support 47 | 48 | private def makeKeyByteBuffer(id: String): IO[KeyErrors, ByteBuffer] = { 49 | val keyBytes = id.getBytes(charset) 50 | if (keyBytes.length > env.getMaxKeySize) ZIO.fail(OverSizedKey(id, keyBytes.length, env.getMaxKeySize)) 51 | else 52 | for { 53 | key <- ZIO.attempt(ByteBuffer.allocateDirect(env.getMaxKeySize)).mapError(err => InternalError("Couldn't allocate byte buffer for key", Some(err))) 54 | _ <- ZIO.attempt(key.put(keyBytes).flip).mapError(err => InternalError("Couldn't copy key bytes to buffer", Some(err))) 55 | } yield key 56 | } 57 | 58 | private def getCollectionDbi(name: CollectionName): IO[CollectionNotFound, Dbi[ByteBuffer]] = { 59 | val alreadyHereLogic = for { 60 | openedCollectionDbis <- openedCollectionDbisRef.get 61 | } yield openedCollectionDbis.get(name) 62 | 63 | val openAndRememberLogic = for { 64 | openedCollectionDbis <- reentrantLock.withWriteLock( // See https://github.com/lmdbjava/lmdbjava/issues/195 65 | openedCollectionDbisRef.updateAndGet(before => 66 | if (before.contains(name)) before 67 | else before + (name -> env.openDbi(name)) 68 | ) 69 | ) 70 | } yield openedCollectionDbis.get(name) 71 | 72 | alreadyHereLogic.some 73 | .orElse(openAndRememberLogic.some) 74 | .mapError(err => CollectionNotFound(name)) 75 | } 76 | 77 | override def collectionExists(name: CollectionName): IO[StorageSystemError, Boolean] = { 78 | for { 79 | openedCollectionDbis <- openedCollectionDbisRef.get 80 | found <- if (openedCollectionDbis.contains(name)) ZIO.succeed(true) 81 | else collectionsAvailable().map(_.contains(name)) 82 | } yield found 83 | } 84 | 85 | override def collectionGet[T](name: CollectionName)(implicit codec: LMDBCodec[T]): IO[GetErrors, LMDBCollection[T]] = { 86 | for { 87 | exists <- collectionExists(name) 88 | collection <- ZIO.cond[CollectionNotFound, LMDBCollection[T]](exists, LMDBCollection[T](name, this), CollectionNotFound(name)) 89 | } yield collection 90 | } 91 | 92 | override def collectionSize(name: CollectionName): IO[SizeErrors, Long] = { 93 | for { 94 | collectionDbi <- getCollectionDbi(name) 95 | stats <- withReadTransaction(name) { txn => 96 | ZIO 97 | .attempt(collectionDbi.stat(txn)) 98 | .mapError(err => InternalError(s"Couldn't get $name size", Some(err))) 99 | } 100 | } yield stats.entries 101 | } 102 | 103 | override def collectionAllocate(name: CollectionName): IO[CreateErrors, Unit] = { 104 | for { 105 | exists <- collectionExists(name) 106 | _ <- ZIO.cond[CollectionAlreadExists, Unit](!exists, (), CollectionAlreadExists(name)) 107 | _ <- collectionCreateLogic(name) 108 | } yield () 109 | } 110 | 111 | override def collectionCreate[T](name: CollectionName, failIfExists: Boolean = true)(implicit codec: LMDBCodec[T]): IO[CreateErrors, LMDBCollection[T]] = { 112 | val allocateLogic = if (failIfExists) collectionAllocate(name) else collectionAllocate(name).ignore 113 | allocateLogic *> ZIO.succeed(LMDBCollection[T](name, this)) 114 | } 115 | 116 | private def collectionCreateLogic(name: CollectionName): ZIO[Any, StorageSystemError, Unit] = reentrantLock.withWriteLock { 117 | for { 118 | openedCollectionDbis <- reentrantLock.withWriteLock( // See https://github.com/lmdbjava/lmdbjava/issues/195 119 | openedCollectionDbisRef.updateAndGet(before => 120 | if (before.contains(name)) before 121 | else before + (name -> env.openDbi(name, DbiFlags.MDB_CREATE)) // TODO 122 | ) 123 | ) 124 | collectionDbi <- ZIO 125 | .from(openedCollectionDbis.get(name)) 126 | .mapError(err => InternalError(s"Couldn't create DB $name")) 127 | } yield () 128 | } 129 | 130 | private def withWriteTransaction(colName: CollectionName): ZIO.Release[Any, StorageSystemError, Txn[ByteBuffer]] = 131 | ZIO.acquireReleaseWith( 132 | ZIO 133 | .attemptBlocking(env.txnWrite()) 134 | .mapError(err => InternalError(s"Couldn't acquire write transaction on $colName", Some(err))) 135 | )(txn => 136 | ZIO 137 | .attemptBlocking(txn.close()) 138 | .ignoreLogged 139 | ) 140 | 141 | private def withReadTransaction(colName: CollectionName): ZIO.Release[Any, StorageSystemError, Txn[ByteBuffer]] = 142 | ZIO.acquireReleaseWith( 143 | ZIO 144 | .attemptBlocking(env.txnRead()) 145 | .mapError(err => InternalError(s"Couldn't acquire read transaction on $colName", Some(err))) 146 | )(txn => 147 | ZIO 148 | .attemptBlocking(txn.close()) 149 | .ignoreLogged 150 | ) 151 | 152 | private def collectionClearOrDropLogic(colDbi: Dbi[ByteBuffer], collectionName: CollectionName, dropDatabase: Boolean): ZIO[Any, ClearErrors, Unit] = { 153 | reentrantLock.withWriteLock( 154 | withWriteTransaction(collectionName) { txn => 155 | for { 156 | _ <- ZIO 157 | .attemptBlocking(colDbi.drop(txn, dropDatabase)) 158 | .mapError(err => InternalError(s"Couldn't ${if (dropDatabase) "drop" else "clear"} $collectionName", Some(err))) 159 | _ <- ZIO 160 | .attemptBlocking(txn.commit()) 161 | .mapError[ClearErrors](err => InternalError("Couldn't commit transaction", Some(err))) 162 | } yield () 163 | } 164 | ) 165 | } 166 | 167 | override def collectionClear(colName: CollectionName): IO[ClearErrors, Unit] = { 168 | for { 169 | collectionDbi <- getCollectionDbi(colName) 170 | _ <- collectionClearOrDropLogic(collectionDbi, colName, false) 171 | } yield () 172 | } 173 | 174 | override def collectionDrop(colName: CollectionName): IO[DropErrors, Unit] = { 175 | for { 176 | collectionDbi <- getCollectionDbi(colName) 177 | _ <- collectionClearOrDropLogic(collectionDbi, colName, true) 178 | _ <- openedCollectionDbisRef.updateAndGet(_.removed(colName)) 179 | _ <- reentrantLock.withWriteLock( 180 | ZIO 181 | .attemptBlocking(collectionDbi.close()) // TODO check close documentation more precisely at it states : It is very rare that closing a database handle is useful. 182 | .mapError[DropErrors](err => InternalError("Couldn't close collection internal handler", Some(err))) 183 | ) 184 | } yield () 185 | } 186 | 187 | /** @inheritdoc */ 188 | override def platformCheck(): IO[StorageSystemError, Unit] = reentrantLock.withWriteLock { 189 | ZIO 190 | .attemptBlockingIO(new Verifier(env).runFor(5, TimeUnit.SECONDS)) 191 | .mapError(err => InternalError(err.getMessage, Some(err))) 192 | .unit 193 | } 194 | 195 | override def collectionsAvailable(): IO[StorageSystemError, List[CollectionName]] = { 196 | reentrantLock.withWriteLock( // See https://github.com/lmdbjava/lmdbjava/issues/195 197 | for { 198 | collectionNames <- ZIO 199 | .attempt( 200 | env 201 | .getDbiNames() 202 | .asScala 203 | .map(bytes => new String(bytes)) 204 | .toList 205 | ) 206 | .mapError(err => InternalError("Couldn't list collections", Some(err))) 207 | } yield collectionNames 208 | ) 209 | } 210 | 211 | override def delete[T](colName: CollectionName, key: RecordKey)(implicit codec: LMDBCodec[T]): IO[DeleteErrors, Option[T]] = { 212 | def deleteLogic(colDbi: Dbi[ByteBuffer]): IO[DeleteErrors, Option[T]] = { 213 | reentrantLock.withWriteLock( 214 | withWriteTransaction(colName) { txn => 215 | for { 216 | key <- makeKeyByteBuffer(key) 217 | found <- ZIO.attemptBlocking(Option(colDbi.get(txn, key))).mapError[DeleteErrors](err => InternalError(s"Couldn't fetch $key for delete on $colName", Some(err))) 218 | mayBeRawValue <- ZIO.foreach(found)(_ => ZIO.succeed(txn.`val`())) 219 | mayBeDoc <- ZIO.foreach(mayBeRawValue) { rawValue => 220 | ZIO.fromEither(codec.decode(rawValue)).mapError[DeleteErrors](msg => CodecFailure(msg)) 221 | } 222 | keyFound <- ZIO.attemptBlocking(colDbi.delete(txn, key)).mapError[DeleteErrors](err => InternalError(s"Couldn't delete $key from $colName", Some(err))) 223 | _ <- ZIO.attemptBlocking(txn.commit()).mapError[DeleteErrors](err => InternalError("Couldn't commit transaction", Some(err))) 224 | } yield mayBeDoc 225 | } 226 | ) 227 | } 228 | 229 | for { 230 | db <- getCollectionDbi(colName) 231 | status <- deleteLogic(db) 232 | } yield status 233 | } 234 | 235 | override def fetch[T](colName: CollectionName, key: RecordKey)(implicit codec: LMDBCodec[T]): IO[FetchErrors, Option[T]] = { 236 | def fetchLogic(colDbi: Dbi[ByteBuffer]): ZIO[Any, FetchErrors, Option[T]] = { 237 | withReadTransaction(colName) { txn => 238 | for { 239 | key <- makeKeyByteBuffer(key) 240 | found <- ZIO.attemptBlocking(Option(colDbi.get(txn, key))).mapError[FetchErrors](err => InternalError(s"Couldn't fetch $key on $colName", Some(err))) 241 | mayBeRawValue <- ZIO.foreach(found)(_ => ZIO.succeed(txn.`val`())) 242 | document <- ZIO 243 | .foreach(mayBeRawValue) { rawValue => 244 | ZIO.fromEither(codec.decode(rawValue)).mapError[FetchErrors](msg => CodecFailure(msg)) 245 | } 246 | } yield document 247 | } 248 | } 249 | 250 | for { 251 | db <- getCollectionDbi(colName) 252 | result <- fetchLogic(db) 253 | } yield result 254 | } 255 | 256 | import org.lmdbjava.GetOp 257 | import org.lmdbjava.SeekOp 258 | 259 | private def seek[T](colName: CollectionName, recordKey: Option[RecordKey], seekOperation: SeekOp)(implicit codec: LMDBCodec[T]): IO[FetchErrors, Option[(RecordKey, T)]] = { 260 | // TODO TOO COMPLEX !!!! 261 | def seekLogic(colDbi: Dbi[ByteBuffer]): ZIO[Scope, FetchErrors, Option[(RecordKey, T)]] = for { 262 | txn <- ZIO.acquireRelease( 263 | ZIO 264 | .attemptBlocking(env.txnRead()) 265 | .mapError[FetchErrors](err => InternalError(s"Couldn't acquire read transaction on $colName", Some(err))) 266 | )(txn => 267 | ZIO 268 | .attemptBlocking(txn.close()) 269 | .ignoreLogged 270 | ) 271 | cursor <- ZIO.acquireRelease( 272 | ZIO 273 | .attemptBlocking(colDbi.openCursor(txn)) 274 | .mapError[FetchErrors](err => InternalError(s"Couldn't acquire iterable on $colName", Some(err))) 275 | )(cursor => 276 | ZIO 277 | .attemptBlocking(cursor.close()) 278 | .ignoreLogged 279 | ) 280 | key <- ZIO.foreach(recordKey)(rk => makeKeyByteBuffer(rk)) 281 | _ <- ZIO.foreachDiscard(key) { k => 282 | ZIO 283 | .attempt(cursor.get(k, GetOp.MDB_SET)) 284 | .mapError[FetchErrors](err => InternalError(s"Couldn't set cursor at $recordKey for $colName", Some(err))) 285 | } 286 | seekSuccess <- ZIO 287 | .attempt(cursor.seek(seekOperation)) 288 | .mapError[FetchErrors](err => InternalError(s"Couldn't seek cursor for $colName", Some(err))) 289 | seekedKey <- ZIO 290 | .attempt(charset.decode(cursor.key()).toString) 291 | .when(seekSuccess) 292 | .mapError[FetchErrors](err => InternalError(s"Couldn't get key at cursor for $colName", Some(err))) 293 | valBuffer <- ZIO 294 | .attempt(cursor.`val`()) 295 | .when(seekSuccess) 296 | .mapError[FetchErrors](err => InternalError(s"Couldn't get value at cursor for stored $colName", Some(err))) 297 | seekedValue <- ZIO 298 | .foreach(valBuffer) { rawValue => 299 | ZIO 300 | .fromEither(codec.decode(rawValue)) 301 | .mapError[FetchErrors](msg => CodecFailure(msg)) 302 | } 303 | .when(seekSuccess) 304 | .map(_.flatten) 305 | } yield seekedValue.flatMap(v => seekedKey.map(k => k -> v)) 306 | 307 | for { 308 | db <- getCollectionDbi(colName) 309 | result <- ZIO.scoped(seekLogic(db)) 310 | } yield result 311 | } 312 | 313 | override def head[T](collectionName: CollectionName)(implicit codec: LMDBCodec[T]): IO[FetchErrors, Option[(RecordKey, T)]] = { 314 | seek(collectionName, None, SeekOp.MDB_FIRST) 315 | } 316 | 317 | override def previous[T](collectionName: CollectionName, beforeThatKey: RecordKey)(implicit codec: LMDBCodec[T]): IO[FetchErrors, Option[(RecordKey, T)]] = { 318 | seek(collectionName, Some(beforeThatKey), SeekOp.MDB_PREV) 319 | } 320 | 321 | override def next[T](collectionName: CollectionName, afterThatKey: RecordKey)(implicit codec: LMDBCodec[T]): IO[FetchErrors, Option[(RecordKey, T)]] = { 322 | seek(collectionName, Some(afterThatKey), SeekOp.MDB_NEXT) 323 | } 324 | 325 | override def last[T](collectionName: CollectionName)(implicit codec: LMDBCodec[T]): IO[FetchErrors, Option[(RecordKey, T)]] = { 326 | seek(collectionName, None, SeekOp.MDB_LAST) 327 | } 328 | 329 | override def contains(colName: CollectionName, key: RecordKey): IO[ContainsErrors, Boolean] = { 330 | def containsLogic(colDbi: Dbi[ByteBuffer]): ZIO[Any, ContainsErrors, Boolean] = { 331 | withReadTransaction(colName) { txn => 332 | for { 333 | key <- makeKeyByteBuffer(key) 334 | found <- ZIO.attemptBlocking(Option(colDbi.get(txn, key))).mapError[ContainsErrors](err => InternalError(s"Couldn't check $key on $colName", Some(err))) 335 | } yield found.isDefined 336 | } 337 | } 338 | 339 | for { 340 | db <- getCollectionDbi(colName) 341 | result <- containsLogic(db) 342 | } yield result 343 | } 344 | 345 | override def update[T](collectionName: CollectionName, key: RecordKey, modifier: T => T)(implicit codec: LMDBCodec[T]): IO[UpdateErrors, Option[T]] = { 346 | def updateLogic(collectionDbi: Dbi[ByteBuffer]): IO[UpdateErrors, Option[T]] = { 347 | reentrantLock.withWriteLock( 348 | withWriteTransaction(collectionName) { txn => 349 | for { 350 | key <- makeKeyByteBuffer(key) 351 | found <- ZIO.attemptBlocking(Option(collectionDbi.get(txn, key))).mapError(err => InternalError(s"Couldn't fetch $key for upsert on $collectionName", Some(err))) 352 | mayBeRawValue <- ZIO.foreach(found)(_ => ZIO.succeed(txn.`val`())) 353 | mayBeDocBefore <- ZIO.foreach(mayBeRawValue) { rawValue => 354 | ZIO.fromEither(codec.decode(rawValue)).mapError[UpdateErrors](msg => CodecFailure(msg)) 355 | } 356 | mayBeDocAfter = mayBeDocBefore.map(modifier) 357 | _ <- ZIO.foreachDiscard(mayBeDocAfter) { docAfter => 358 | val docBytes = codec.encode(docAfter) 359 | for { 360 | valueBuffer <- ZIO.attemptBlocking(ByteBuffer.allocateDirect(docBytes.size)).mapError(err => InternalError("Couldn't allocate byte buffer for encoded value", Some(err))) 361 | _ <- ZIO.attemptBlocking(valueBuffer.put(docBytes).flip).mapError(err => InternalError("Couldn't copy value bytes to buffer", Some(err))) 362 | _ <- ZIO.attemptBlocking(collectionDbi.put(txn, key, valueBuffer)).mapError(err => InternalError(s"Couldn't upsert $key into $collectionName", Some(err))) 363 | _ <- ZIO.attemptBlocking(txn.commit()).mapError(err => InternalError(s"Couldn't commit upsert $key into $collectionName", Some(err))) 364 | } yield () 365 | } 366 | } yield mayBeDocAfter 367 | } 368 | ) 369 | } 370 | 371 | for { 372 | collectionDbi <- getCollectionDbi(collectionName) 373 | result <- updateLogic(collectionDbi) 374 | } yield result 375 | } 376 | 377 | override def upsertOverwrite[T](colName: CollectionName, key: RecordKey, document: T)(implicit codec: LMDBCodec[T]): IO[UpsertErrors, Unit] = { 378 | def upsertLogic(collectionDbi: Dbi[ByteBuffer]): IO[UpsertErrors, Unit] = { 379 | reentrantLock.withWriteLock( 380 | withWriteTransaction(colName) { txn => 381 | for { 382 | key <- makeKeyByteBuffer(key) 383 | found <- ZIO.attemptBlocking(Option(collectionDbi.get(txn, key))).mapError(err => InternalError(s"Couldn't fetch $key for upsertOverwrite on $colName", Some(err))) 384 | mayBeRawValue <- ZIO.foreach(found)(_ => ZIO.succeed(txn.`val`())) 385 | docBytes = codec.encode(document) 386 | valueBuffer <- ZIO.attemptBlocking(ByteBuffer.allocateDirect(docBytes.size)).mapError(err => InternalError("Couldn't allocate byte buffer for encoded value", Some(err))) 387 | _ <- ZIO.attemptBlocking(valueBuffer.put(docBytes).flip).mapError(err => InternalError("Couldn't copy value bytes to buffer", Some(err))) 388 | _ <- ZIO.attemptBlocking(collectionDbi.put(txn, key, valueBuffer)).mapError(err => InternalError(s"Couldn't upsertOverwrite $key into $colName", Some(err))) 389 | _ <- ZIO.attemptBlocking(txn.commit()).mapError(err => InternalError(s"Couldn't commit upsertOverwrite $key into $colName", Some(err))) 390 | } yield () 391 | } 392 | ) 393 | } 394 | 395 | for { 396 | collectionDbi <- getCollectionDbi(colName) 397 | result <- upsertLogic(collectionDbi) 398 | } yield result 399 | } 400 | 401 | override def upsert[T](colName: CollectionName, key: RecordKey, modifier: Option[T] => T)(implicit codec: LMDBCodec[T]): IO[UpsertErrors, T] = { 402 | def upsertLogic(collectionDbi: Dbi[ByteBuffer]): IO[UpsertErrors, T] = { 403 | reentrantLock.withWriteLock( 404 | withWriteTransaction(colName) { txn => 405 | for { 406 | key <- makeKeyByteBuffer(key) 407 | found <- ZIO.attemptBlocking(Option(collectionDbi.get(txn, key))).mapError(err => InternalError(s"Couldn't fetch $key for upsert on $colName", Some(err))) 408 | mayBeRawValue <- ZIO.foreach(found)(_ => ZIO.succeed(txn.`val`())) 409 | mayBeDocBefore <- ZIO.foreach(mayBeRawValue) { rawValue => 410 | ZIO.fromEither(codec.decode(rawValue)).mapError[UpsertErrors](msg => CodecFailure(msg)) 411 | } 412 | docAfter = modifier(mayBeDocBefore) 413 | docBytes = codec.encode(docAfter) 414 | valueBuffer <- ZIO.attemptBlocking(ByteBuffer.allocateDirect(docBytes.size)).mapError(err => InternalError("Couldn't allocate byte buffer for encoded value", Some(err))) 415 | _ <- ZIO.attemptBlocking(valueBuffer.put(docBytes).flip).mapError(err => InternalError("Couldn't copy value bytes to buffer", Some(err))) 416 | _ <- ZIO.attemptBlocking(collectionDbi.put(txn, key, valueBuffer)).mapError(err => InternalError(s"Couldn't upsert $key into $colName", Some(err))) 417 | _ <- ZIO.attemptBlocking(txn.commit()).mapError(err => InternalError(s"Couldn't commit upsert $key into $colName", Some(err))) 418 | } yield docAfter 419 | } 420 | ) 421 | } 422 | 423 | for { 424 | collectionDbi <- getCollectionDbi(colName) 425 | result <- upsertLogic(collectionDbi) 426 | } yield result 427 | } 428 | 429 | private def makeRange( 430 | startAfter: Option[ByteBuffer] = None, 431 | backward: Boolean = false 432 | ): KeyRange[ByteBuffer] = { 433 | startAfter match { 434 | case None => 435 | if (backward) KeyRange.allBackward() 436 | else KeyRange.all() 437 | case Some(key) => 438 | if (backward) KeyRange.greaterThanBackward(key) 439 | else KeyRange.greaterThan(key) 440 | } 441 | } 442 | 443 | override def collect[T]( 444 | colName: CollectionName, 445 | keyFilter: RecordKey => Boolean = _ => true, 446 | valueFilter: T => Boolean = (_: T) => true, 447 | startAfter: Option[RecordKey] = None, 448 | backward: Boolean = false, 449 | limit: Option[Int] = None 450 | )(implicit codec: LMDBCodec[T]): IO[CollectErrors, List[T]] = { 451 | def collectLogic(collectionDbi: Dbi[ByteBuffer]): ZIO[Scope, CollectErrors, List[T]] = for { 452 | txn <- ZIO.acquireRelease( 453 | ZIO 454 | .attemptBlocking(env.txnRead()) 455 | .mapError[CollectErrors](err => InternalError(s"Couldn't acquire read transaction on $colName", Some(err))) 456 | )(txn => 457 | ZIO 458 | .attemptBlocking(txn.close()) 459 | .ignoreLogged 460 | ) 461 | startAfterBB <- ZIO.foreach(startAfter)(makeKeyByteBuffer) 462 | iterable <- ZIO.acquireRelease( 463 | ZIO 464 | .attemptBlocking(collectionDbi.iterate(txn, makeRange(startAfterBB, backward))) 465 | .mapError[CollectErrors](err => InternalError(s"Couldn't acquire iterable on $colName", Some(err))) 466 | )(cursor => 467 | ZIO 468 | .attemptBlocking(cursor.close()) 469 | .ignoreLogged 470 | ) 471 | collected <- ZIO 472 | .attempt { 473 | def content = LazyList 474 | .from(KeyValueIterator(iterable.iterator())) 475 | .filter { entry => keyFilter(entry.key) } 476 | .flatMap { entry => entry.value.toOption } // TODO error are hidden !!! 477 | .filter(valueFilter) 478 | limit match { 479 | case None => content.toList 480 | case Some(l) => content.take(l).toList 481 | } 482 | } 483 | .mapError[CollectErrors](err => InternalError(s"Couldn't collect documents stored in $colName", Some(err))) 484 | } yield collected 485 | 486 | for { 487 | collectionDbi <- getCollectionDbi(colName) 488 | collected <- ZIO.scoped(collectLogic(collectionDbi)) 489 | } yield collected 490 | } 491 | 492 | // class LazyKeyValue[T](keyGetter: => RecordKey, valueGetter: => Either[String, T]) { 493 | // private var decodedKey: RecordKey = null // hidden optim to avoid memory pressure 494 | // private var decodedValue: Either[String, T] = null 495 | // 496 | // def key: RecordKey = { 497 | // if (decodedKey == null) { 498 | // decodedKey = keyGetter 499 | // } 500 | // decodedKey 501 | // } 502 | // 503 | // def value: Either[String, T] = { 504 | // if (decodedValue == null) { 505 | // decodedValue = valueGetter 506 | // } 507 | // decodedValue 508 | // } 509 | // } 510 | 511 | case class KeyValue[T](key:RecordKey, value:Either[String, T]) 512 | 513 | case class KeyValueIterator[T](jiterator: java.util.Iterator[KeyVal[ByteBuffer]])(implicit codec: LMDBCodec[T]) extends Iterator[KeyValue[T]] { 514 | 515 | private def extractKeyVal[T](keyval: KeyVal[ByteBuffer])(implicit codec: LMDBCodec[T]): KeyValue[T] = { 516 | val key = keyval.key() 517 | val value = keyval.`val`() 518 | KeyValue(charset.decode(key).toString, codec.decode(value)) 519 | } 520 | 521 | override def hasNext: Boolean = jiterator.hasNext() 522 | 523 | override def next(): KeyValue[T] = { 524 | extractKeyVal(jiterator.next()) 525 | } 526 | } 527 | 528 | def stream[T]( 529 | colName: CollectionName, 530 | keyFilter: RecordKey => Boolean = _ => true, 531 | startAfter: Option[RecordKey] = None, 532 | backward: Boolean = false 533 | )(implicit codec: LMDBCodec[T]): ZStream[Any, StreamErrors, T] = { 534 | def streamLogic(colDbi: Dbi[ByteBuffer]): ZIO[Scope, StreamErrors, ZStream[Any, StreamErrors, T]] = for { 535 | txn <- ZIO.acquireRelease( 536 | ZIO 537 | .attemptBlocking(env.txnRead()) 538 | .mapError(err => InternalError(s"Couldn't acquire read transaction on $colName", Some(err))) 539 | )(txn => 540 | ZIO 541 | .attemptBlocking(txn.close()) 542 | .ignoreLogged 543 | ) 544 | startAfterBB <- ZIO.foreach(startAfter)(makeKeyByteBuffer) 545 | iterable <- ZIO.acquireRelease( 546 | ZIO 547 | .attemptBlocking(colDbi.iterate(txn, makeRange(startAfterBB, backward))) 548 | .mapError(err => InternalError(s"Couldn't acquire iterable on $colName", Some(err))) 549 | )(cursor => 550 | ZIO 551 | .attemptBlocking(cursor.close()) 552 | .ignoreLogged 553 | ) 554 | } yield ZStream 555 | .fromIterator(KeyValueIterator(iterable.iterator())) 556 | .filter { entry => keyFilter(entry.key) } 557 | .mapZIO { entry => ZIO.from(entry.value).mapError(err => CodecFailure(err)) } 558 | .mapError { 559 | case err: CodecFailure => err 560 | case err: Throwable => InternalError(s"Couldn't stream from $colName", Some(err)) 561 | case err => InternalError(s"Couldn't stream from $colName : ${err.toString}", None) 562 | } 563 | 564 | val result = 565 | for { 566 | db <- getCollectionDbi(colName) 567 | _ <- reentrantLock.readLock 568 | stream <- streamLogic(db) 569 | } yield stream 570 | 571 | ZStream.unwrapScoped(result) // TODO not sure this is the good way ??? 572 | } 573 | 574 | def streamWithKeys[T]( 575 | colName: CollectionName, 576 | keyFilter: RecordKey => Boolean = _ => true, 577 | startAfter: Option[RecordKey] = None, 578 | backward: Boolean = false 579 | )(implicit codec: LMDBCodec[T]): ZStream[Any, StreamErrors, (RecordKey, T)] = { 580 | def streamLogic(colDbi: Dbi[ByteBuffer]): ZIO[Scope, StreamErrors, ZStream[Any, StreamErrors, (RecordKey, T)]] = for { 581 | txn <- ZIO.acquireRelease( 582 | ZIO 583 | .attemptBlocking(env.txnRead()) 584 | .mapError(err => InternalError(s"Couldn't acquire read transaction on $colName", Some(err))) 585 | )(txn => 586 | ZIO 587 | .attemptBlocking(txn.close()) 588 | .ignoreLogged 589 | ) 590 | startAfterBB <- ZIO.foreach(startAfter)(makeKeyByteBuffer) 591 | iterable <- ZIO.acquireRelease( 592 | ZIO 593 | .attemptBlocking(colDbi.iterate(txn, makeRange(startAfterBB, backward))) 594 | .mapError(err => InternalError(s"Couldn't acquire iterable on $colName", Some(err))) 595 | )(cursor => 596 | ZIO 597 | .attemptBlocking(cursor.close()) 598 | .ignoreLogged 599 | ) 600 | } yield ZStream 601 | .fromIterator(KeyValueIterator(iterable.iterator())) 602 | .filter { entry => keyFilter(entry.key) } 603 | .mapZIO { entry => ZIO.fromEither(entry.value).map(value => entry.key-> value).mapError(err => CodecFailure(err)) } 604 | .mapError { 605 | case err: CodecFailure => err 606 | case err: Throwable => InternalError(s"Couldn't stream from $colName", Some(err)) 607 | case err => InternalError(s"Couldn't stream from $colName : ${err.toString}", None) 608 | } 609 | 610 | val result = 611 | for { 612 | db <- getCollectionDbi(colName) 613 | _ <- reentrantLock.readLock 614 | stream <- streamLogic(db) 615 | } yield stream 616 | 617 | ZStream.unwrapScoped(result) // TODO not sure this is the good way ??? 618 | } 619 | 620 | } 621 | 622 | object LMDBLive { 623 | 624 | private def lmdbCreateEnv(config: LMDBConfig, databasePath: File) = { 625 | val syncFlag = if (!config.fileSystemSynchronized) Some(EnvFlags.MDB_NOSYNC) else None 626 | 627 | val flags = Array( 628 | EnvFlags.MDB_NOTLS, 629 | // MDB_NOLOCK : the caller must enforce single-writer semantics 630 | // MDB_NOLOCK : the caller must ensure that no readers are using old transactions while a writer is active 631 | EnvFlags.MDB_NOLOCK // Locks managed using ZIO ReentrantLock 632 | ) ++ syncFlag 633 | 634 | Env 635 | .create() 636 | .setMapSize(config.mapSize.toLong) 637 | .setMaxDbs(config.maxCollections) 638 | .setMaxReaders(config.maxReaders) 639 | .open( 640 | databasePath, 641 | flags: _* 642 | ) 643 | } 644 | 645 | def setup(config: LMDBConfig): ZIO[Scope, Throwable, LMDBLive] = { 646 | for { 647 | databasesHome <- ZIO 648 | .from(config.databasesHome) 649 | .orElse(System.envOrElse("HOME", ".").map(home => home + File.separator + ".lmdb")) 650 | databasePath = new File(databasesHome, config.databaseName) 651 | _ <- ZIO.logInfo(s"LMDB databasePath=$databasePath") 652 | _ <- ZIO.attemptBlockingIO(databasePath.mkdirs()) 653 | environment <- ZIO.acquireRelease( 654 | ZIO.attemptBlocking(lmdbCreateEnv(config, databasePath)) 655 | )(env => ZIO.attemptBlocking(env.close).ignoreLogged) 656 | openedCollectionDbis <- Ref.make[Map[String, Dbi[ByteBuffer]]](Map.empty) 657 | reentrantLock <- TReentrantLock.make.commit 658 | } yield new LMDBLive(environment, openedCollectionDbis, reentrantLock, databasePath.toString) 659 | } 660 | } 661 | -------------------------------------------------------------------------------- /src/main/scala/zio/lmdb/json/LMDBCodecJson.scala: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2025 David Crosson 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | package zio.lmdb.json 17 | 18 | import zio.json.internal.{RetractReader, Write} 19 | import zio.json.{DeriveJsonDecoder, DeriveJsonEncoder, JsonCodec, JsonDecoder, JsonEncoder, JsonError} 20 | import zio.lmdb.LMDBCodec 21 | 22 | import java.nio.ByteBuffer 23 | import java.nio.charset.StandardCharsets 24 | import scala.deriving.Mirror 25 | 26 | trait LMDBCodecJson[T] extends LMDBCodec[T] with JsonEncoder[T] with JsonDecoder[T] 27 | 28 | object LMDBCodecJson { 29 | 30 | private def createCodec[T](encoder: JsonEncoder[T], decoder: JsonDecoder[T]): LMDBCodecJson[T] = { 31 | val charset = StandardCharsets.UTF_8 // TODO enhance charset support 32 | 33 | new LMDBCodecJson[T] { 34 | override def unsafeEncode(a: T, indent: Option[Int], out: Write): Unit = encoder.unsafeEncode(a, indent, out) 35 | override def unsafeDecode(trace: List[JsonError], in: RetractReader): T = decoder.unsafeDecode(trace, in) 36 | 37 | def encode(t: T): Array[Byte] = encoder.encodeJson(t).toString.getBytes 38 | def decode(bytes: ByteBuffer): Either[String, T] = decoder.decodeJson(charset.decode(bytes)) 39 | } 40 | } 41 | 42 | inline def derived[T](using m: Mirror.Of[T]): LMDBCodecJson[T] = { 43 | val encoder = DeriveJsonEncoder.gen[T] 44 | val decoder = DeriveJsonDecoder.gen[T] 45 | createCodec(encoder, decoder) 46 | } 47 | 48 | inline given [T](using m: Mirror.Of[T]): LMDBCodecJson[T] = derived 49 | } 50 | -------------------------------------------------------------------------------- /src/main/scala/zio/lmdb/json/package.scala: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2025 David Crosson 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | package zio.lmdb 17 | 18 | import zio.lmdb.LMDBCodec 19 | 20 | import java.nio.ByteBuffer 21 | import java.nio.charset.StandardCharsets 22 | 23 | import zio.json.ast.Json 24 | import zio.json.ast.Json.* 25 | 26 | package object json { 27 | 28 | private val charset = StandardCharsets.UTF_8 // TODO enhance charset support 29 | 30 | implicit val jsonCodec: LMDBCodec[Json] = new LMDBCodec { 31 | def encode(t: Json): Array[Byte] = Json.encoder.encodeJson(t).toString.getBytes 32 | 33 | def decode(bytes: ByteBuffer): Either[String, Json] = Json.decoder.decodeJson(charset.decode(bytes)) 34 | } 35 | 36 | implicit val stringCodec: LMDBCodec[String] = new LMDBCodec { 37 | def encode(t: String): Array[Byte] = Str.encoder.encodeJson(Str(t)).toString.getBytes 38 | 39 | def decode(bytes: ByteBuffer): Either[String, String] = Str.decoder.decodeJson(charset.decode(bytes)).map(_.value) 40 | } 41 | 42 | implicit val strCodec: LMDBCodec[Str] = new LMDBCodec { 43 | def encode(t: Str): Array[Byte] = Str.encoder.encodeJson(t).toString.getBytes 44 | 45 | def decode(bytes: ByteBuffer): Either[String, Str] = Str.decoder.decodeJson(charset.decode(bytes)) 46 | } 47 | 48 | implicit val intCodec: LMDBCodec[Int] = new LMDBCodec { 49 | def encode(t: Int): Array[Byte] = Num.encoder.encodeJson(Num(t)).toString.getBytes 50 | 51 | def decode(bytes: ByteBuffer): Either[String, Int] = Num.decoder.decodeJson(charset.decode(bytes)).map(_.value.intValue()) 52 | } 53 | 54 | implicit val doubleCodec: LMDBCodec[Double] = new LMDBCodec { 55 | def encode(t: Double): Array[Byte] = Num.encoder.encodeJson(Num(t)).toString.getBytes 56 | 57 | def decode(bytes: ByteBuffer): Either[String, Double] = Num.decoder.decodeJson(charset.decode(bytes)).map(_.value.doubleValue()) 58 | } 59 | 60 | implicit val floatCodec: LMDBCodec[Float] = new LMDBCodec { 61 | def encode(t: Float): Array[Byte] = Num.encoder.encodeJson(Num(t)).toString.getBytes 62 | 63 | def decode(bytes: ByteBuffer): Either[String, Float] = Num.decoder.decodeJson(charset.decode(bytes)).map(_.value.floatValue()) 64 | } 65 | 66 | implicit val numCodec: LMDBCodec[Num] = new LMDBCodec { 67 | def encode(t: Num): Array[Byte] = Num.encoder.encodeJson(t).toString.getBytes 68 | 69 | def decode(bytes: ByteBuffer): Either[String, Num] = Num.decoder.decodeJson(charset.decode(bytes)) 70 | } 71 | 72 | } 73 | -------------------------------------------------------------------------------- /src/main/scala/zio/lmdb/package.scala: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2025 David Crosson 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | 17 | package zio 18 | 19 | import zio.lmdb.StorageUserError.* 20 | 21 | package object lmdb { 22 | type CollectionName = String 23 | type RecordKey = String 24 | 25 | type KeyErrors = OverSizedKey | StorageSystemError 26 | type SizeErrors = CollectionNotFound | StorageSystemError 27 | type ClearErrors = CollectionNotFound | StorageSystemError 28 | type DropErrors = CollectionNotFound | StorageSystemError 29 | type GetErrors = CollectionNotFound | StorageSystemError 30 | type CreateErrors = CollectionAlreadExists | StorageSystemError 31 | type FetchErrors = OverSizedKey | CollectionNotFound | CodecFailure | StorageSystemError 32 | type ContainsErrors = OverSizedKey | CollectionNotFound | StorageSystemError 33 | type UpdateErrors = OverSizedKey | CollectionNotFound | CodecFailure | StorageSystemError 34 | type UpsertErrors = OverSizedKey | CollectionNotFound | CodecFailure | StorageSystemError 35 | type DeleteErrors = OverSizedKey | CollectionNotFound | CodecFailure | StorageSystemError 36 | type CollectErrors = OverSizedKey | CollectionNotFound | CodecFailure | StorageSystemError 37 | type StreamErrors = OverSizedKey | CollectionNotFound | CodecFailure | StorageSystemError 38 | } 39 | -------------------------------------------------------------------------------- /src/test/scala/zio/lmdb/Commons.scala: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2025 David Crosson 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | package zio.lmdb 17 | 18 | import zio._ 19 | import zio.logging._ 20 | import zio.nio.file.Files 21 | 22 | trait Commons { 23 | val config: ConsoleLoggerConfig = ConsoleLoggerConfig( 24 | LogFormat.default, 25 | LogFilter.LogLevelByNameConfig(LogLevel.None) 26 | ) 27 | 28 | val logger = Runtime.removeDefaultLoggers >>> consoleLogger(config) 29 | 30 | val lmdbLayer = ZLayer.scoped( 31 | for { 32 | path <- Files.createTempDirectoryScoped(prefix = Some("lmdb"), fileAttributes = Nil) 33 | config = LMDBConfig.default.copy(databasesHome = Some(path.toString)) 34 | lmdb <- LMDBLive.setup(config) 35 | } yield lmdb 36 | ) 37 | 38 | val randomUUID = Random.nextUUID.map(_.toString) 39 | 40 | val randomCollectionName = for { 41 | uuid <- randomUUID 42 | name = s"collection-$uuid" 43 | } yield name 44 | 45 | } 46 | -------------------------------------------------------------------------------- /src/test/scala/zio/lmdb/LMDBBasicUsageSpec.scala: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2025 David Crosson 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | package zio.lmdb 17 | 18 | import zio.* 19 | import zio.test.* 20 | import zio.test.TestAspect.* 21 | import zio.json.* 22 | import zio.nio.file.Files 23 | import zio.lmdb.json.* 24 | 25 | case class Record(name: String, age: Long) derives LMDBCodecJson 26 | 27 | object LMDBBasicUsageSpec extends ZIOSpecDefault with Commons { 28 | 29 | override val bootstrap: ZLayer[Any, Any, TestEnvironment] = logger >>> testEnvironment 30 | 31 | override def spec = suite("LMDB for ZIO as a service")( 32 | test("basic usage")( 33 | for { 34 | collection <- LMDB.collectionCreate[Record]("example") 35 | record = Record("John Doe", 42) 36 | recordId <- Random.nextUUID.map(_.toString) 37 | _ <- collection.upsert(recordId, previousRecord => record) 38 | exists <- collection.contains(recordId) 39 | gotten <- collection.fetch(recordId).some 40 | deletedRecord <- collection.delete(recordId) 41 | gotNothing <- collection.fetch(recordId) 42 | } yield assertTrue( 43 | gotten == record, 44 | deletedRecord.contains(record), 45 | gotNothing.isEmpty, 46 | exists 47 | ) 48 | ) 49 | ).provide(lmdbLayer) @@ withLiveClock @@ withLiveRandom @@ timed 50 | } 51 | -------------------------------------------------------------------------------- /src/test/scala/zio/lmdb/LMDBConcurrencySpec.scala: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2025 David Crosson 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | package zio.lmdb 17 | 18 | import zio.* 19 | import zio.json.* 20 | import zio.test.TestAspect.* 21 | import zio.test.* 22 | import zio.lmdb.json.* 23 | 24 | import java.util.UUID 25 | 26 | case class Dummy( 27 | uuid: UUID, 28 | x: Int, 29 | y: Double 30 | ) 31 | 32 | object Dummy { 33 | implicit val codec: LMDBCodec[Dummy] = LMDBCodecJson.derived 34 | 35 | def random = for { 36 | uuid <- Random.nextUUID 37 | x <- Random.nextInt 38 | y <- Random.nextDouble 39 | } yield Dummy(uuid, x, y) 40 | } 41 | 42 | object LMDBConcurrencySpec extends ZIOSpecDefault with Commons { 43 | 44 | override val bootstrap: ZLayer[Any, Any, TestEnvironment] = logger >>> testEnvironment 45 | 46 | val collectionLimit = 50 47 | val recordsLimit = 1_000 48 | 49 | override def spec = suite("concurrency behavior checks")( 50 | List(1, 5, 10, 20).map { parallelism => 51 | // ----------------------------------------------------------------------------- 52 | test(s"many collections writes in parallel ${recordsLimit * collectionLimit} records through $collectionLimit collections - parallelism=$parallelism") { 53 | val strategy = ExecutionStrategy.ParallelN(parallelism) 54 | for { 55 | collections <- ZIO.foreachExec(1.to(collectionLimit))(strategy) { n => 56 | LMDB.collectionCreate[Dummy](s"concurrent-collection-$n") 57 | } 58 | _ <- ZIO.foreachExec(collections)(strategy) { collection => 59 | ZIO.foreachExec(1.to(recordsLimit))(strategy) { n => 60 | Dummy.random.flatMap { dummy => 61 | collection.upsertOverwrite(dummy.uuid.toString, dummy) 62 | } 63 | } 64 | } 65 | sizes <- ZIO.foreachExec(collections)(strategy) { collection => 66 | collection.size() 67 | } 68 | } yield assertTrue( 69 | collections.size == collectionLimit, 70 | sizes.size == collectionLimit, 71 | sizes.forall(_ == recordsLimit) 72 | ) 73 | } 74 | } 75 | ).provide(lmdbLayer) @@ withLiveClock @@ withLiveRandom @@ timed 76 | } 77 | -------------------------------------------------------------------------------- /src/test/scala/zio/lmdb/LMDBDataClassSpec.scala: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2025 David Crosson 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | package zio.lmdb 17 | 18 | import zio.* 19 | import zio.test.* 20 | import zio.test.TestAspect.* 21 | import zio.json.* 22 | import zio.nio.file.Files 23 | import zio.lmdb.json.* 24 | 25 | case class User(firstName:String, lastName:String, age:Option[Int]) derives LMDBCodecJson 26 | case class Login(username:String, user:User) derives LMDBCodecJson 27 | 28 | object LMDBDataClassSpec extends ZIOSpecDefault with Commons { 29 | 30 | override val bootstrap: ZLayer[Any, Any, TestEnvironment] = logger >>> testEnvironment 31 | 32 | override def spec = suite("Json serialization codec")( 33 | test("support product type")( 34 | for { 35 | collection <- LMDB.collectionCreate[Login]("logins") 36 | user = User("John", "Doe", Some(42)) 37 | record = Login("joe", user) 38 | recordId <- Random.nextUUID.map(_.toString) 39 | _ <- collection.upsert(recordId, previousRecord => record) 40 | exists <- collection.contains(recordId) 41 | gotten <- collection.fetch(recordId).some 42 | deletedRecord <- collection.delete(recordId) 43 | gotNothing <- collection.fetch(recordId) 44 | } yield assertTrue( 45 | gotten == record, 46 | deletedRecord.contains(record), 47 | gotNothing.isEmpty, 48 | exists 49 | ) 50 | ) 51 | ).provide(lmdbLayer) @@ withLiveClock @@ withLiveRandom @@ timed 52 | } 53 | -------------------------------------------------------------------------------- /src/test/scala/zio/lmdb/LMDBFeaturesSpec.scala: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2025 David Crosson 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | package zio.lmdb 17 | 18 | import zio.* 19 | import zio.json.ast.Json 20 | import zio.json.ast.Json.* 21 | import zio.nio.file.* 22 | import zio.stream.{ZSink, ZStream} 23 | import zio.test.* 24 | import zio.test.Gen.* 25 | import zio.test.TestAspect.* 26 | 27 | import zio.lmdb.json.* 28 | 29 | object LMDBFeaturesSpec extends ZIOSpecDefault with Commons { 30 | 31 | override val bootstrap: ZLayer[Any, Any, TestEnvironment] = logger >>> testEnvironment 32 | 33 | val keygen = stringBounded(1, 510)(asciiChar) 34 | val valuegen = stringBounded(0, 1024)(asciiChar) 35 | val limit = 30_000 36 | 37 | override def spec = suite("Lightening Memory Mapped Database abstraction layer spec")( 38 | // ----------------------------------------------------------------------------- 39 | test("platform check")( 40 | for { 41 | hasSucceeded <- LMDB.platformCheck().isSuccess 42 | } yield assertTrue( 43 | hasSucceeded 44 | ) 45 | ), // @@ ignore, // AS IT HAS A GLOBAL IMPACT ON THE DATABASE IF IT HAS BEEN SHARED BETWEEN ALL TESTS !! 46 | // ----------------------------------------------------------------------------- 47 | test("create collection")( 48 | for { 49 | colName <- randomCollectionName 50 | createSuccess <- LMDB.collectionCreate[String](colName).isSuccess 51 | recreateFailure <- LMDB.collectionCreate[String](colName, failIfExists = true).isFailure 52 | createIfNeededSuccess <- LMDB.collectionCreate[String](colName, failIfExists = false).isSuccess 53 | } yield assertTrue( 54 | createSuccess, 55 | recreateFailure, 56 | createIfNeededSuccess 57 | ) 58 | ), 59 | // ----------------------------------------------------------------------------- 60 | test("delete collection")( 61 | for { 62 | colName <- randomCollectionName 63 | _ <- LMDB.collectionCreate[String](colName) 64 | collectionCreated <- LMDB.collectionExists(colName) 65 | _ <- LMDB.collectionDrop(colName) 66 | collectionStillExists <- LMDB.collectionExists(colName) 67 | collectionCreatedAgain <- LMDB.collectionCreate[String](colName, failIfExists = true).isSuccess 68 | } yield assertTrue( 69 | collectionCreated, 70 | !collectionStillExists, 71 | collectionCreatedAgain 72 | ) 73 | ), 74 | // ----------------------------------------------------------------------------- 75 | test("create and list collections")( 76 | for { 77 | colName1 <- randomCollectionName 78 | colName2 <- randomCollectionName 79 | colName3 <- randomCollectionName 80 | _ <- LMDB.collectionCreate[String](colName1) 81 | _ <- LMDB.collectionCreate[Double](colName2) 82 | _ <- LMDB.collectionCreate[Json](colName3) 83 | databases <- LMDB.collectionsAvailable() 84 | } yield assertTrue( 85 | databases.contains(colName1), 86 | databases.contains(colName2) 87 | ).label(s"colName1=$colName1 colName2=$colName2") 88 | ), 89 | // ----------------------------------------------------------------------------- 90 | test("try to set/get a key")( 91 | check(keygen, string) { (id, data) => 92 | val value = Str(data) 93 | for { 94 | colName <- randomCollectionName 95 | col <- LMDB.collectionCreate[Str](colName) 96 | _ <- col.upsertOverwrite(id, value) 97 | gotten <- col.fetch(id) 98 | } yield assertTrue( 99 | gotten == Some(value) 100 | ).label(s"for key $id") 101 | } 102 | ) @@ samples(100), 103 | // ----------------------------------------------------------------------------- 104 | test("try to get an non existent key")( 105 | for { 106 | colName <- randomCollectionName 107 | id <- randomUUID 108 | col <- LMDB.collectionCreate[Str](colName) 109 | isFailed <- col.fetch(id).some.isFailure 110 | } yield assertTrue(isFailed).label(s"for key $id") 111 | ), 112 | // ----------------------------------------------------------------------------- 113 | test("check key existence")( 114 | for { 115 | colName <- randomCollectionName 116 | id <- randomUUID 117 | col <- LMDB.collectionCreate[Str](colName) 118 | _ <- col.upsertOverwrite(id, Str("some data")) 119 | result <- col.contains(id) 120 | } yield assertTrue( 121 | result == true 122 | ).label(s"for key $id") 123 | ), 124 | // ----------------------------------------------------------------------------- 125 | test("check key non existence")( 126 | for { 127 | colName <- randomCollectionName 128 | id <- randomUUID 129 | col <- LMDB.collectionCreate[Str](colName) 130 | result <- col.contains(id) 131 | } yield assertTrue( 132 | result == false 133 | ).label(s"for key $id") 134 | ), 135 | // ----------------------------------------------------------------------------- 136 | test("basic CRUDL operations") { 137 | check(keygen, valuegen, valuegen) { (id, data1, data2) => 138 | val value = Str(data1) 139 | val updatedValue = Str(data2) 140 | for { 141 | lmdb <- ZIO.service[LMDBLive] 142 | colName <- randomCollectionName 143 | col <- lmdb.collectionCreate[Str](colName) 144 | _ <- col.upsertOverwrite(id, value) 145 | gotten <- col.fetch(id) 146 | _ <- col.upsertOverwrite(id, updatedValue) 147 | gottenUpdated <- col.fetch(id) 148 | listed <- col.collect() 149 | _ <- col.delete(id) 150 | isFailed <- col.fetch(id).some.isFailure 151 | } yield assertTrue( 152 | gotten.contains(value), 153 | gottenUpdated.contains(updatedValue), 154 | listed.contains(updatedValue), 155 | listed.size == 1, 156 | isFailed 157 | ).label(s"for key $id") 158 | } 159 | } @@ tag("slow") @@ samples(50), 160 | // ----------------------------------------------------------------------------- 161 | test("clear collection content") { 162 | for { 163 | lmdb <- ZIO.service[LMDBLive] 164 | colName <- randomCollectionName 165 | col <- lmdb.collectionCreate[Str](colName) 166 | id1 <- randomUUID 167 | id2 <- randomUUID 168 | _ <- col.upsertOverwrite(id1, Str("value1")) 169 | _ <- col.upsertOverwrite(id2, Str("value2")) 170 | sizeBefore <- col.size() 171 | _ <- col.clear() 172 | sizeAfter <- col.size() 173 | } yield assertTrue( 174 | sizeBefore == 2, 175 | sizeAfter == 0 176 | ) 177 | }, 178 | // ----------------------------------------------------------------------------- 179 | test("many overwrite updates") { 180 | for { 181 | lmdb <- ZIO.service[LMDBLive] 182 | id <- randomUUID 183 | maxValue = limit 184 | colName <- randomCollectionName 185 | col <- lmdb.collectionCreate[Num](colName) 186 | _ <- ZIO.foreachDiscard(1.to(maxValue))(i => col.upsertOverwrite(id, Num(i))) 187 | num <- col.fetch(id) 188 | } yield assertTrue( 189 | num.map(_.value.intValue()).contains(maxValue) 190 | ) 191 | } @@ tag("slow"), 192 | // ----------------------------------------------------------------------------- 193 | test("safe update in place") { 194 | def modifier(from: Num): Num = Num(from.value.intValue() + 1) 195 | 196 | for { 197 | id <- randomUUID 198 | count = limit 199 | colName <- randomCollectionName 200 | col <- LMDB.collectionCreate[Num](colName) 201 | shouldBeEmpty <- col.update(id, modifier) 202 | _ <- col.upsertOverwrite(id, Num(0)) 203 | _ <- ZIO.foreachDiscard(1.to(count))(i => col.update(id, modifier)) 204 | num <- col.fetch(id) 205 | } yield assertTrue( 206 | shouldBeEmpty.isEmpty, 207 | num.map(_.value.intValue()).contains(count) 208 | ) 209 | }, // ----------------------------------------------------------------------------- 210 | test("safe upsert in place") { 211 | def modifier(from: Option[Num]): Num = from match { 212 | case None => Num(1) 213 | case Some(num) => Num(num.value.intValue() + 1) 214 | } 215 | 216 | for { 217 | id <- randomUUID 218 | count = limit 219 | colName <- randomCollectionName 220 | col <- LMDB.collectionCreate[Num](colName) 221 | _ <- ZIO.foreachDiscard(1.to(count))(i => col.upsert(id, modifier)) 222 | num <- col.fetch(id) 223 | } yield assertTrue( 224 | num.map(_.value.intValue()).contains(count) 225 | ) 226 | }, 227 | // ----------------------------------------------------------------------------- 228 | test("many updates within multiple collection") { 229 | def modifier(from: Option[Num]): Num = from match { 230 | case None => Num(1) 231 | case Some(num) => Num(num.value.intValue() + 1) 232 | } 233 | 234 | val localLimit = 10_000 235 | val colCount = if (localLimit < 1000) 5 else 100 236 | val max = localLimit 237 | 238 | for { 239 | id <- randomUUID 240 | colName <- randomCollectionName 241 | cols <- ZIO.foreach(1.to(colCount))(i => LMDB.collectionCreate[Num](s"$colName#${i % colCount}")).map(_.toVector) 242 | _ <- ZIO.foreachParDiscard(1.to(max))(i => cols(i % colCount).upsert(id, modifier)) 243 | num1 <- cols(0).fetch(id) 244 | num2 <- cols(1).fetch(id) 245 | createdDatabases <- LMDB.collectionsAvailable() 246 | } yield assertTrue( 247 | num1.map(_.value.intValue()).contains(max / colCount), 248 | num2.map(_.value.intValue()).contains(max / colCount), 249 | createdDatabases.size >= colCount 250 | ) 251 | }, 252 | // ----------------------------------------------------------------------------- 253 | test("list collection content") { 254 | val count = limit 255 | val value = Num(42) 256 | for { 257 | colName <- randomCollectionName 258 | col <- LMDB.collectionCreate[Num](colName) 259 | _ <- ZIO.foreachDiscard(1.to(count))(num => col.upsertOverwrite(s"id#$num", value)) 260 | gottenSize <- col.size() 261 | collected <- col.collect() 262 | } yield assertTrue( 263 | collected.size == count, 264 | gottenSize == count 265 | ) 266 | }, 267 | // ----------------------------------------------------------------------------- 268 | test("stream collection content") { 269 | val count = limit 270 | for { 271 | colName <- randomCollectionName 272 | col <- LMDB.collectionCreate[Num](colName) 273 | _ <- ZIO.foreachDiscard(1.to(count))(num => col.upsertOverwrite(s"id#$num", Num(num))) 274 | returnedCount1 <- col.stream().filter(_.value.intValue() % 2 == 0).runCount 275 | returnedCount2 <- col.streamWithKeys().filter { case (key, record) => record.value.intValue() % 2 == 0 }.runCount 276 | } yield assertTrue( 277 | returnedCount1.toInt == count / 2, 278 | returnedCount2.toInt == count / 2 279 | ) 280 | }, 281 | // ----------------------------------------------------------------------------- 282 | test("moves in empty collection") { 283 | for { 284 | colName <- randomCollectionName 285 | col <- LMDB.collectionCreate[Num](colName) 286 | headOption <- col.head() 287 | lastOption <- col.last() 288 | } yield assertTrue( 289 | headOption.isEmpty, 290 | lastOption.isEmpty 291 | ) 292 | }, 293 | // ----------------------------------------------------------------------------- 294 | test("going forward / backward in a collection using collect or stream") { 295 | for { 296 | colName <- randomCollectionName 297 | col <- LMDB.collectionCreate[Num](colName) 298 | keys = 'A'.to('Z').toList.map(_.toString) 299 | values = keys.zipWithIndex.map { case (k, v) => Num(v) } 300 | keyvalues = keys.zip(values) 301 | _ <- ZIO.foreachDiscard(keyvalues) { case (k, v) => col.upsertOverwrite(k, v) } 302 | // ---------------------- 303 | backwards <- col.collect(startAfter = None, backward = true) 304 | backwardsAfter <- col.collect(startAfter = Some("D"), backward = true) 305 | forwards <- col.collect(startAfter = None, backward = false) 306 | forwardsAfter <- col.collect(startAfter = Some("T"), backward = false) 307 | // ---------------------- 308 | streamBackwards <- col.stream(startAfter = None, backward = true).runCollect 309 | streamBackwardsAfter <- col.stream(startAfter = Some("D"), backward = true).runCollect 310 | streamForwards <- col.stream(startAfter = None, backward = false).runCollect 311 | streamForwardsAfter <- col.stream(startAfter = Some("T"), backward = false).runCollect 312 | // ---------------------- 313 | streamWithKeysBackwards <- col.streamWithKeys(startAfter = None, backward = true).runCollect 314 | streamWithKeysBackwardsAfter <- col.streamWithKeys(startAfter = Some("D"), backward = true).runCollect 315 | streamWithKeysForwards <- col.streamWithKeys(startAfter = None, backward = false).runCollect 316 | streamWithKeysForwardsAfter <- col.streamWithKeys(startAfter = Some("T"), backward = false).runCollect 317 | } yield assertTrue( 318 | // ---------------------- 319 | backwards == values.reverse, 320 | backwardsAfter == values.take(3).reverse, 321 | forwards == values, 322 | forwardsAfter == values.takeRight(6), 323 | // ---------------------- 324 | streamBackwards.toList == values.reverse, 325 | streamBackwardsAfter.toList == values.take(3).reverse, 326 | streamForwards.toList == values, 327 | streamForwardsAfter.toList == values.takeRight(6), 328 | // ---------------------- 329 | streamWithKeysBackwards.toList == keyvalues.reverse, 330 | streamWithKeysBackwardsAfter.toList == keyvalues.take(3).reverse, 331 | streamWithKeysForwards.toList == keyvalues, 332 | streamWithKeysForwardsAfter.toList == keyvalues.takeRight(6) 333 | ) 334 | }, 335 | // ----------------------------------------------------------------------------- 336 | test("moves in collection") { 337 | for { 338 | colName <- randomCollectionName 339 | col <- LMDB.collectionCreate[Num](colName) 340 | data = List("bbb" -> 2, "aaa" -> 1, "ddd" -> 4, "ccc" -> 3) 341 | _ <- ZIO.foreachDiscard(data) { case (key, value) => col.upsertOverwrite(key, Num(value)) } 342 | head <- col.head() 343 | last <- col.last() 344 | next <- col.next("aaa") 345 | prev <- col.previous("ddd") 346 | noNext <- col.next("ddd") 347 | noPrev <- col.previous("aaa") 348 | } yield assertTrue( 349 | head.contains("aaa" -> Num(1)), 350 | last.contains("ddd" -> Num(4)), 351 | next.contains("bbb" -> Num(2)), 352 | prev.contains("ccc" -> Num(3)), 353 | noNext.isEmpty, 354 | noPrev.isEmpty 355 | ) 356 | } 357 | ).provide(lmdbLayer) @@ withLiveClock @@ withLiveRandom @@ timed 358 | } 359 | -------------------------------------------------------------------------------- /version.sbt: -------------------------------------------------------------------------------- 1 | ThisBuild / version := "2.0.2-SNAPSHOT" 2 | --------------------------------------------------------------------------------