├── .appveyor.yml ├── .gitignore ├── .gitmodules ├── .rustfmt.toml ├── .travis.yml ├── Cargo.toml ├── LICENSE ├── README.md ├── lmdb-sys ├── Cargo.toml ├── build.rs └── src │ ├── constants.rs │ ├── ffi.rs │ └── lib.rs └── src ├── cursor.rs ├── database.rs ├── environment.rs ├── error.rs ├── flags.rs ├── lib.rs └── transaction.rs /.appveyor.yml: -------------------------------------------------------------------------------- 1 | environment: 2 | matrix: 3 | - TARGET: x86_64-pc-windows-msvc 4 | - TARGET: i686-pc-windows-msvc 5 | 6 | install: 7 | - curl -sSf -o rustup-init.exe https://win.rustup.rs/ 8 | - rustup-init.exe -y --default-host %TARGET% --default-toolchain nightly 9 | - set PATH=%PATH%;C:\Users\appveyor\.cargo\bin 10 | - rustc -Vv 11 | - cargo -V 12 | 13 | build_script: 14 | - git submodule -q update --init 15 | 16 | test_script: 17 | - SET RUST_BACKTRACE=1 18 | - cargo test --target %TARGET% --all -v 19 | - cargo test --release --target %TARGET% --all -v 20 | 21 | cache: 22 | - C:\Users\appveyor\.cargo\registry 23 | - target 24 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | target/ 2 | Cargo.lock 3 | -------------------------------------------------------------------------------- /.gitmodules: -------------------------------------------------------------------------------- 1 | [submodule "lmdb-sys/lmdb"] 2 | path = lmdb-sys/lmdb 3 | url = https://github.com/LMDB/lmdb 4 | -------------------------------------------------------------------------------- /.rustfmt.toml: -------------------------------------------------------------------------------- 1 | disable_all_formatting = true 2 | -------------------------------------------------------------------------------- /.travis.yml: -------------------------------------------------------------------------------- 1 | language: rust 2 | dist: trusty 3 | sudo: false 4 | 5 | cache: cargo 6 | 7 | os: 8 | - linux 9 | - osx 10 | 11 | rust: 12 | - 1.20.0 13 | - stable 14 | - nightly 15 | 16 | script: 17 | - cargo build --verbose 18 | - if [[ $TRAVIS_RUST_VERSION = nightly* ]]; then 19 | env RUST_BACKTRACE=1 cargo test --all -v; 20 | env RUST_BACKTRACE=1 cargo test --all -v --release; 21 | fi 22 | -------------------------------------------------------------------------------- /Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | 3 | name = "lmdb" 4 | # NB: When modifying, also modify html_root_url in lib.rs 5 | version = "0.8.0" 6 | authors = ["Dan Burkert "] 7 | license = "Apache-2.0" 8 | 9 | description = "Idiomatic and safe LMDB wrapper." 10 | repository = "https://github.com/danburkert/lmdb-rs.git" 11 | readme = "README.md" 12 | documentation = "https://docs.rs/lmdb" 13 | keywords = ["LMDB", "database", "storage-engine", "bindings", "library"] 14 | categories = ["database"] 15 | 16 | [badges] 17 | travis-ci = { repository = "danburkert/lmdb-rs" } 18 | appveyor = { repository = "danburkert/lmdb-rs" } 19 | 20 | [workspace] 21 | members = [ 22 | "lmdb-sys", 23 | ] 24 | 25 | [dependencies] 26 | bitflags = "1" 27 | libc = "0.2" 28 | lmdb-sys = { version = "0.8.0", path = "lmdb-sys" } 29 | 30 | [dev-dependencies] 31 | rand = "0.4" 32 | tempdir = "0.3" 33 | byteorder = "1.0" 34 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | 2 | Apache License 3 | Version 2.0, January 2004 4 | http://www.apache.org/licenses/ 5 | 6 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 7 | 8 | 1. Definitions. 9 | 10 | "License" shall mean the terms and conditions for use, reproduction, 11 | and distribution as defined by Sections 1 through 9 of this document. 12 | 13 | "Licensor" shall mean the copyright owner or entity authorized by 14 | the copyright owner that is granting the License. 15 | 16 | "Legal Entity" shall mean the union of the acting entity and all 17 | other entities that control, are controlled by, or are under common 18 | control with that entity. For the purposes of this definition, 19 | "control" means (i) the power, direct or indirect, to cause the 20 | direction or management of such entity, whether by contract or 21 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 22 | outstanding shares, or (iii) beneficial ownership of such entity. 23 | 24 | "You" (or "Your") shall mean an individual or Legal Entity 25 | exercising permissions granted by this License. 26 | 27 | "Source" form shall mean the preferred form for making modifications, 28 | including but not limited to software source code, documentation 29 | source, and configuration files. 30 | 31 | "Object" form shall mean any form resulting from mechanical 32 | transformation or translation of a Source form, including but 33 | not limited to compiled object code, generated documentation, 34 | and conversions to other media types. 35 | 36 | "Work" shall mean the work of authorship, whether in Source or 37 | Object form, made available under the License, as indicated by a 38 | copyright notice that is included in or attached to the work 39 | (an example is provided in the Appendix below). 40 | 41 | "Derivative Works" shall mean any work, whether in Source or Object 42 | form, that is based on (or derived from) the Work and for which the 43 | editorial revisions, annotations, elaborations, or other modifications 44 | represent, as a whole, an original work of authorship. For the purposes 45 | of this License, Derivative Works shall not include works that remain 46 | separable from, or merely link (or bind by name) to the interfaces of, 47 | the Work and Derivative Works thereof. 48 | 49 | "Contribution" shall mean any work of authorship, including 50 | the original version of the Work and any modifications or additions 51 | to that Work or Derivative Works thereof, that is intentionally 52 | submitted to Licensor for inclusion in the Work by the copyright owner 53 | or by an individual or Legal Entity authorized to submit on behalf of 54 | the copyright owner. For the purposes of this definition, "submitted" 55 | means any form of electronic, verbal, or written communication sent 56 | to the Licensor or its representatives, including but not limited to 57 | communication on electronic mailing lists, source code control systems, 58 | and issue tracking systems that are managed by, or on behalf of, the 59 | Licensor for the purpose of discussing and improving the Work, but 60 | excluding communication that is conspicuously marked or otherwise 61 | designated in writing by the copyright owner as "Not a Contribution." 62 | 63 | "Contributor" shall mean Licensor and any individual or Legal Entity 64 | on behalf of whom a Contribution has been received by Licensor and 65 | subsequently incorporated within the Work. 66 | 67 | 2. Grant of Copyright License. Subject to the terms and conditions of 68 | this License, each Contributor hereby grants to You a perpetual, 69 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 70 | copyright license to reproduce, prepare Derivative Works of, 71 | publicly display, publicly perform, sublicense, and distribute the 72 | Work and such Derivative Works in Source or Object form. 73 | 74 | 3. Grant of Patent License. Subject to the terms and conditions of 75 | this License, each Contributor hereby grants to You a perpetual, 76 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 77 | (except as stated in this section) patent license to make, have made, 78 | use, offer to sell, sell, import, and otherwise transfer the Work, 79 | where such license applies only to those patent claims licensable 80 | by such Contributor that are necessarily infringed by their 81 | Contribution(s) alone or by combination of their Contribution(s) 82 | with the Work to which such Contribution(s) was submitted. If You 83 | institute patent litigation against any entity (including a 84 | cross-claim or counterclaim in a lawsuit) alleging that the Work 85 | or a Contribution incorporated within the Work constitutes direct 86 | or contributory patent infringement, then any patent licenses 87 | granted to You under this License for that Work shall terminate 88 | as of the date such litigation is filed. 89 | 90 | 4. Redistribution. You may reproduce and distribute copies of the 91 | Work or Derivative Works thereof in any medium, with or without 92 | modifications, and in Source or Object form, provided that You 93 | meet the following conditions: 94 | 95 | (a) You must give any other recipients of the Work or 96 | Derivative Works a copy of this License; and 97 | 98 | (b) You must cause any modified files to carry prominent notices 99 | stating that You changed the files; and 100 | 101 | (c) You must retain, in the Source form of any Derivative Works 102 | that You distribute, all copyright, patent, trademark, and 103 | attribution notices from the Source form of the Work, 104 | excluding those notices that do not pertain to any part of 105 | the Derivative Works; and 106 | 107 | (d) If the Work includes a "NOTICE" text file as part of its 108 | distribution, then any Derivative Works that You distribute must 109 | include a readable copy of the attribution notices contained 110 | within such NOTICE file, excluding those notices that do not 111 | pertain to any part of the Derivative Works, in at least one 112 | of the following places: within a NOTICE text file distributed 113 | as part of the Derivative Works; within the Source form or 114 | documentation, if provided along with the Derivative Works; or, 115 | within a display generated by the Derivative Works, if and 116 | wherever such third-party notices normally appear. The contents 117 | of the NOTICE file are for informational purposes only and 118 | do not modify the License. You may add Your own attribution 119 | notices within Derivative Works that You distribute, alongside 120 | or as an addendum to the NOTICE text from the Work, provided 121 | that such additional attribution notices cannot be construed 122 | as modifying the License. 123 | 124 | You may add Your own copyright statement to Your modifications and 125 | may provide additional or different license terms and conditions 126 | for use, reproduction, or distribution of Your modifications, or 127 | for any such Derivative Works as a whole, provided Your use, 128 | reproduction, and distribution of the Work otherwise complies with 129 | the conditions stated in this License. 130 | 131 | 5. Submission of Contributions. Unless You explicitly state otherwise, 132 | any Contribution intentionally submitted for inclusion in the Work 133 | by You to the Licensor shall be under the terms and conditions of 134 | this License, without any additional terms or conditions. 135 | Notwithstanding the above, nothing herein shall supersede or modify 136 | the terms of any separate license agreement you may have executed 137 | with Licensor regarding such Contributions. 138 | 139 | 6. Trademarks. This License does not grant permission to use the trade 140 | names, trademarks, service marks, or product names of the Licensor, 141 | except as required for reasonable and customary use in describing the 142 | origin of the Work and reproducing the content of the NOTICE file. 143 | 144 | 7. Disclaimer of Warranty. Unless required by applicable law or 145 | agreed to in writing, Licensor provides the Work (and each 146 | Contributor provides its Contributions) on an "AS IS" BASIS, 147 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 148 | implied, including, without limitation, any warranties or conditions 149 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 150 | PARTICULAR PURPOSE. You are solely responsible for determining the 151 | appropriateness of using or redistributing the Work and assume any 152 | risks associated with Your exercise of permissions under this License. 153 | 154 | 8. Limitation of Liability. In no event and under no legal theory, 155 | whether in tort (including negligence), contract, or otherwise, 156 | unless required by applicable law (such as deliberate and grossly 157 | negligent acts) or agreed to in writing, shall any Contributor be 158 | liable to You for damages, including any direct, indirect, special, 159 | incidental, or consequential damages of any character arising as a 160 | result of this License or out of the use or inability to use the 161 | Work (including but not limited to damages for loss of goodwill, 162 | work stoppage, computer failure or malfunction, or any and all 163 | other commercial damages or losses), even if such Contributor 164 | has been advised of the possibility of such damages. 165 | 166 | 9. Accepting Warranty or Additional Liability. While redistributing 167 | the Work or Derivative Works thereof, You may choose to offer, 168 | and charge a fee for, acceptance of support, warranty, indemnity, 169 | or other liability obligations and/or rights consistent with this 170 | License. However, in accepting such obligations, You may act only 171 | on Your own behalf and on Your sole responsibility, not on behalf 172 | of any other Contributor, and only if You agree to indemnify, 173 | defend, and hold each Contributor harmless for any liability 174 | incurred by, or claims asserted against, such Contributor by reason 175 | of your accepting any such warranty or additional liability. 176 | 177 | END OF TERMS AND CONDITIONS 178 | 179 | APPENDIX: How to apply the Apache License to your work. 180 | 181 | To apply the Apache License to your work, attach the following 182 | boilerplate notice, with the fields enclosed by brackets "[]" 183 | replaced with your own identifying information. (Don't include 184 | the brackets!) The text should be enclosed in the appropriate 185 | comment syntax for the file format. We also recommend that a 186 | file or class name and description of purpose be included on the 187 | same "printed page" as the copyright notice for easier 188 | identification within third-party archives. 189 | 190 | Copyright 2014 Dan Burkert 191 | 192 | Licensed under the Apache License, Version 2.0 (the "License"); 193 | you may not use this file except in compliance with the License. 194 | You may obtain a copy of the License at 195 | 196 | http://www.apache.org/licenses/LICENSE-2.0 197 | 198 | Unless required by applicable law or agreed to in writing, software 199 | distributed under the License is distributed on an "AS IS" BASIS, 200 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 201 | See the License for the specific language governing permissions and 202 | limitations under the License. 203 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | [![Build Status](https://travis-ci.org/danburkert/lmdb-rs.svg?branch=master)](https://travis-ci.org/danburkert/lmdb-rs) 2 | [![Windows Build Status](https://ci.appveyor.com/api/projects/status/0bw21yfqsrsv3soh/branch/master?svg=true)](https://ci.appveyor.com/project/danburkert/lmdb-rs/branch/master) 3 | [![Documentation](https://docs.rs/lmdb/badge.svg)](https://docs.rs/lmdb/) 4 | [![Crate](https://img.shields.io/crates/v/lmdb.svg)](https://crates.io/crates/lmdb) 5 | 6 | # lmdb-rs 7 | 8 | Idiomatic and safe APIs for interacting with the 9 | [Symas Lightning Memory-Mapped Database (LMDB)](http://symas.com/mdb/). 10 | 11 | ## Building from Source 12 | 13 | ```bash 14 | git clone --recursive git@github.com:danburkert/lmdb-rs.git 15 | cd lmdb-rs 16 | cargo build 17 | ``` 18 | 19 | ## Features 20 | 21 | * [x] lmdb-sys. 22 | * [x] Cursors. 23 | * [x] Zero-copy put API. 24 | * [x] Nested transactions. 25 | * [x] Database statistics. 26 | -------------------------------------------------------------------------------- /lmdb-sys/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | 3 | name = "lmdb-sys" 4 | # NB: When modifying, also modify html_root_url in lib.rs 5 | version = "0.8.0" 6 | authors = ["Dan Burkert "] 7 | license = "Apache-2.0" 8 | 9 | description = "Rust bindings for liblmdb." 10 | repository = "https://github.com/danburkert/lmdb-rs.git" 11 | readme = "../README.md" 12 | documentation = "https://docs.rs/lmdb-sys" 13 | keywords = ["LMDB", "database", "storage-engine", "bindings", "library"] 14 | categories = ["database", "external-ffi-bindings"] 15 | 16 | build = "build.rs" 17 | 18 | [dependencies] 19 | libc = "0.2" 20 | 21 | [build-dependencies] 22 | pkg-config = "0.3.2" 23 | cc = "1" 24 | -------------------------------------------------------------------------------- /lmdb-sys/build.rs: -------------------------------------------------------------------------------- 1 | extern crate pkg_config; 2 | extern crate cc; 3 | 4 | use std::env; 5 | use std::path::PathBuf; 6 | 7 | fn main() { 8 | let mut lmdb: PathBuf = PathBuf::from(&env::var("CARGO_MANIFEST_DIR").unwrap()); 9 | lmdb.push("lmdb"); 10 | lmdb.push("libraries"); 11 | lmdb.push("liblmdb"); 12 | 13 | if !pkg_config::find_library("liblmdb").is_ok() { 14 | let target = env::var("TARGET").expect("No TARGET found"); 15 | let mut build = cc::Build::new(); 16 | if target.contains("android") { 17 | build.define("ANDROID", "1"); 18 | } 19 | build 20 | .file(lmdb.join("mdb.c")) 21 | .file(lmdb.join("midl.c")) 22 | // https://github.com/LMDB/lmdb/blob/LMDB_0.9.21/libraries/liblmdb/Makefile#L25 23 | .opt_level(2) 24 | .compile("liblmdb.a") 25 | } 26 | } 27 | -------------------------------------------------------------------------------- /lmdb-sys/src/constants.rs: -------------------------------------------------------------------------------- 1 | use libc::{c_int, c_uint}; 2 | 3 | //////////////////////////////////////////////////////////////////////////////////////////////////// 4 | //// Environment Flags 5 | //////////////////////////////////////////////////////////////////////////////////////////////////// 6 | 7 | /// mmap at a fixed address (experimental) 8 | pub const MDB_FIXEDMAP: c_uint = 0x01; 9 | /// no environment directory 10 | pub const MDB_NOSUBDIR: c_uint = 0x4000; 11 | /// don't fsync after commit 12 | pub const MDB_NOSYNC: c_uint = 0x10000; 13 | /// read only 14 | pub const MDB_RDONLY: c_uint = 0x20000; 15 | /// don't fsync metapage after commit 16 | pub const MDB_NOMETASYNC: c_uint = 0x40000; 17 | /// use writable mmap 18 | pub const MDB_WRITEMAP: c_uint = 0x80000; 19 | /// use asynchronous msync when #MDB_WRITEMAP is used 20 | pub const MDB_MAPASYNC: c_uint = 0x100000; 21 | /// tie reader locktable slots to #MDB_txn objects instead of to threads 22 | pub const MDB_NOTLS: c_uint = 0x200000; 23 | /// don't do any locking, caller must manage their own locks 24 | pub const MDB_NOLOCK: c_uint = 0x400000; 25 | /// don't do readahead (no effect on Windows) 26 | pub const MDB_NORDAHEAD: c_uint = 0x800000; 27 | /// don't initialize malloc'd memory before writing to datafile 28 | pub const MDB_NOMEMINIT: c_uint = 0x1000000; 29 | 30 | //////////////////////////////////////////////////////////////////////////////////////////////////// 31 | //// Database Flags 32 | //////////////////////////////////////////////////////////////////////////////////////////////////// 33 | 34 | /// use reverse string keys 35 | pub const MDB_REVERSEKEY: c_uint = 0x02; 36 | /// use sorted duplicates 37 | pub const MDB_DUPSORT: c_uint = 0x04; 38 | /// numeric keys in native byte order. The keys must all be of the same size. 39 | pub const MDB_INTEGERKEY: c_uint = 0x08; 40 | /// with `MDB_DUPSORT`, sorted dup items have fixed size. 41 | pub const MDB_DUPFIXED: c_uint = 0x10; 42 | /// with `MDB_DUPSORT`, dups are numeric in native byte order. 43 | pub const MDB_INTEGERDUP: c_uint = 0x20; 44 | /// with #MDB_DUPSORT, use reverse string dups. 45 | pub const MDB_REVERSEDUP: c_uint = 0x40; 46 | /// create DB if not already existing. 47 | pub const MDB_CREATE: c_uint = 0x40000; 48 | 49 | //////////////////////////////////////////////////////////////////////////////////////////////////// 50 | //// Write Flags 51 | //////////////////////////////////////////////////////////////////////////////////////////////////// 52 | 53 | /// For put: Don't write if the key already exists. 54 | pub const MDB_NOOVERWRITE: c_uint = 0x10; 55 | /// Only for `MDB_DUPSORT`. 56 | /// 57 | /// For put: don't write if the key and data pair already exist. 58 | /// For `mdb_cursor_del`: remove all duplicate data items. 59 | pub const MDB_NODUPDATA: c_uint = 0x20; 60 | /// For `mdb_cursor_put`: overwrite the current key/data pair. 61 | pub const MDB_CURRENT: c_uint = 0x40; 62 | /// For put: Just reserve space for data, don't copy it. Return a pointer to the reserved space. 63 | pub const MDB_RESERVE: c_uint = 0x10000; 64 | /// Data is being appended, don't split full pages. 65 | pub const MDB_APPEND: c_uint = 0x20000; 66 | /// Duplicate data is being appended, don't split full pages. 67 | pub const MDB_APPENDDUP: c_uint = 0x40000; 68 | /// Store multiple data items in one call. Only for #MDB_DUPFIXED. 69 | pub const MDB_MULTIPLE: c_uint = 0x80000; 70 | 71 | //////////////////////////////////////////////////////////////////////////////////////////////////// 72 | //// Copy Flags 73 | //////////////////////////////////////////////////////////////////////////////////////////////////// 74 | 75 | /// Compacting copy: Omit free space from copy, and renumber all pages sequentially. 76 | pub const MDB_CP_COMPACT: c_uint = 0x01; 77 | 78 | //////////////////////////////////////////////////////////////////////////////////////////////////// 79 | //// Return Codes 80 | //////////////////////////////////////////////////////////////////////////////////////////////////// 81 | 82 | /// Successful result. 83 | pub const MDB_SUCCESS: c_int = 0; 84 | /// key/data pair already exists. 85 | pub const MDB_KEYEXIST: c_int = -30799; 86 | /// key/data pair not found (EOF). 87 | pub const MDB_NOTFOUND: c_int = -30798; 88 | /// Requested page not found - this usually indicates corruption. 89 | pub const MDB_PAGE_NOTFOUND: c_int = -30797; 90 | /// Located page was wrong type. 91 | pub const MDB_CORRUPTED: c_int = -30796; 92 | /// Update of meta page failed or environment had fatal error. 93 | pub const MDB_PANIC: c_int = -30795; 94 | /// Environment version mismatch. 95 | pub const MDB_VERSION_MISMATCH: c_int = -30794; 96 | /// File is not a valid LMDB file. 97 | pub const MDB_INVALID: c_int = -30793; 98 | /// Environment mapsize reached. 99 | pub const MDB_MAP_FULL: c_int = -30792; 100 | /// Environment maxdbs reached. 101 | pub const MDB_DBS_FULL: c_int = -30791; 102 | /// Environment maxreaders reached. 103 | pub const MDB_READERS_FULL: c_int = -30790; 104 | /// Too many TLS keys in use - Windows only. 105 | pub const MDB_TLS_FULL: c_int = -30789; 106 | /// Txn has too many dirty pages. 107 | pub const MDB_TXN_FULL: c_int = -30788; 108 | /// Cursor stack too deep - internal error. 109 | pub const MDB_CURSOR_FULL: c_int = -30787; 110 | /// Page has not enough space - internal error. 111 | pub const MDB_PAGE_FULL: c_int = -30786; 112 | /// Database contents grew beyond environment mapsize. 113 | pub const MDB_MAP_RESIZED: c_int = -30785; 114 | /// MDB_INCOMPATIBLE: Operation and DB incompatible, or DB flags changed. 115 | pub const MDB_INCOMPATIBLE: c_int = -30784; 116 | /// Invalid reuse of reader locktable slot. 117 | pub const MDB_BAD_RSLOT: c_int = -30783; 118 | /// Transaction cannot recover - it must be aborted. 119 | pub const MDB_BAD_TXN: c_int = -30782; 120 | /// Unsupported size of key/DB name/data, or wrong DUPFIXED size. 121 | pub const MDB_BAD_VALSIZE: c_int = -30781; 122 | /// The specified DBI was changed unexpectedly. 123 | pub const MDB_BAD_DBI: c_int = -30780; 124 | /// The last defined error code. 125 | pub const MDB_LAST_ERRCODE: c_int = MDB_BAD_DBI; 126 | -------------------------------------------------------------------------------- /lmdb-sys/src/ffi.rs: -------------------------------------------------------------------------------- 1 | /* automatically generated by rust-bindgen and modified by hand */ 2 | 3 | pub enum MDB_env { } 4 | pub enum MDB_txn { } 5 | pub type MDB_dbi = ::libc::c_uint; 6 | pub enum MDB_cursor { } 7 | 8 | #[repr(C)] 9 | pub struct MDB_val { 10 | pub mv_size: ::libc::size_t, 11 | pub mv_data: *mut ::libc::c_void, 12 | } 13 | 14 | pub type MDB_cmp_func = extern "C" fn(a: *const MDB_val, b: *const MDB_val) -> ::libc::c_int; 15 | pub type MDB_rel_func = extern "C" fn (item: *mut MDB_val, oldptr: *mut ::libc::c_void, newptr: *mut ::libc::c_void, relctx: *mut ::libc::c_void) -> (); 16 | 17 | pub const MDB_FIRST: ::libc::c_uint = 0; 18 | pub const MDB_FIRST_DUP: ::libc::c_uint = 1; 19 | pub const MDB_GET_BOTH: ::libc::c_uint = 2; 20 | pub const MDB_GET_BOTH_RANGE: ::libc::c_uint = 3; 21 | pub const MDB_GET_CURRENT: ::libc::c_uint = 4; 22 | pub const MDB_GET_MULTIPLE: ::libc::c_uint = 5; 23 | pub const MDB_LAST: ::libc::c_uint = 6; 24 | pub const MDB_LAST_DUP: ::libc::c_uint = 7; 25 | pub const MDB_NEXT: ::libc::c_uint = 8; 26 | pub const MDB_NEXT_DUP: ::libc::c_uint = 9; 27 | pub const MDB_NEXT_MULTIPLE: ::libc::c_uint = 10; 28 | pub const MDB_NEXT_NODUP: ::libc::c_uint = 11; 29 | pub const MDB_PREV: ::libc::c_uint = 12; 30 | pub const MDB_PREV_DUP: ::libc::c_uint = 13; 31 | pub const MDB_PREV_NODUP: ::libc::c_uint = 14; 32 | pub const MDB_SET: ::libc::c_uint = 15; 33 | pub const MDB_SET_KEY: ::libc::c_uint = 16; 34 | pub const MDB_SET_RANGE: ::libc::c_uint = 17; 35 | pub type MDB_cursor_op = ::libc::c_uint; 36 | 37 | #[repr(C)] 38 | #[derive(Clone, Copy)] 39 | pub struct MDB_stat { 40 | pub ms_psize: ::libc::c_uint, 41 | pub ms_depth: ::libc::c_uint, 42 | pub ms_branch_pages: ::libc::size_t, 43 | pub ms_leaf_pages: ::libc::size_t, 44 | pub ms_overflow_pages: ::libc::size_t, 45 | pub ms_entries: ::libc::size_t, 46 | } 47 | 48 | #[repr(C)] 49 | pub struct MDB_envinfo { 50 | pub me_mapaddr: *mut ::libc::c_void, 51 | pub me_mapsize: ::libc::size_t, 52 | pub me_last_pgno: ::libc::size_t, 53 | pub me_last_txnid: ::libc::size_t, 54 | pub me_maxreaders: ::libc::c_uint, 55 | pub me_numreaders: ::libc::c_uint, 56 | } 57 | 58 | pub type MDB_assert_func = extern "C" fn(env: *mut MDB_env, msg: *const ::libc::c_char) -> (); 59 | pub type MDB_msg_func = extern "C" fn(msg: *const ::libc::c_char, ctx: *mut ::libc::c_void) -> ::libc::c_int; 60 | 61 | extern "C" { 62 | pub fn mdb_version(major: *mut ::libc::c_int, minor: *mut ::libc::c_int, patch: *mut ::libc::c_int) -> *mut ::libc::c_char; 63 | pub fn mdb_strerror(err: ::libc::c_int) -> *mut ::libc::c_char; 64 | pub fn mdb_env_create(env: *mut *mut MDB_env) -> ::libc::c_int; 65 | pub fn mdb_env_open(env: *mut MDB_env, path: *const ::libc::c_char, flags: ::libc::c_uint, mode: super::mode_t) -> ::libc::c_int; 66 | pub fn mdb_env_copy(env: *mut MDB_env, path: *const ::libc::c_char) -> ::libc::c_int; 67 | pub fn mdb_env_copyfd(env: *mut MDB_env, fd: ::libc::c_int) -> ::libc::c_int; 68 | pub fn mdb_env_copy2(env: *mut MDB_env, path: *const ::libc::c_char, flags: ::libc::c_uint) -> ::libc::c_int; 69 | pub fn mdb_env_copyfd2(env: *mut MDB_env, fd: ::libc::c_int, flags: ::libc::c_uint) -> ::libc::c_int; 70 | pub fn mdb_env_stat(env: *mut MDB_env, stat: *mut MDB_stat) -> ::libc::c_int; 71 | pub fn mdb_env_info(env: *mut MDB_env, stat: *mut MDB_envinfo) -> ::libc::c_int; 72 | pub fn mdb_env_sync(env: *mut MDB_env, force: ::libc::c_int) -> ::libc::c_int; 73 | pub fn mdb_env_close(env: *mut MDB_env) -> (); 74 | pub fn mdb_env_set_flags(env: *mut MDB_env, flags: ::libc::c_uint, onoff: ::libc::c_int) -> ::libc::c_int; 75 | pub fn mdb_env_get_flags(env: *mut MDB_env, flags: *mut ::libc::c_uint) -> ::libc::c_int; 76 | pub fn mdb_env_get_path(env: *mut MDB_env, path: *mut *const ::libc::c_char) -> ::libc::c_int; 77 | pub fn mdb_env_get_fd(env: *mut MDB_env, fd: *mut ::libc::c_int) -> ::libc::c_int; 78 | pub fn mdb_env_set_mapsize(env: *mut MDB_env, size: ::libc::size_t) -> ::libc::c_int; 79 | pub fn mdb_env_set_maxreaders(env: *mut MDB_env, readers: ::libc::c_uint) -> ::libc::c_int; 80 | pub fn mdb_env_get_maxreaders(env: *mut MDB_env, readers: *mut ::libc::c_uint) -> ::libc::c_int; 81 | pub fn mdb_env_set_maxdbs(env: *mut MDB_env, dbs: MDB_dbi) -> ::libc::c_int; 82 | pub fn mdb_env_get_maxkeysize(env: *mut MDB_env) -> ::libc::c_int; 83 | pub fn mdb_env_set_userctx(env: *mut MDB_env, ctx: *mut ::libc::c_void) -> ::libc::c_int; 84 | pub fn mdb_env_get_userctx(env: *mut MDB_env) -> *mut ::libc::c_void; 85 | pub fn mdb_env_set_assert(env: *mut MDB_env, func: *mut ::std::option::Option ()>) -> ::libc::c_int; 86 | pub fn mdb_txn_begin(env: *mut MDB_env, parent: *mut MDB_txn, flags: ::libc::c_uint, txn: *mut *mut MDB_txn) -> ::libc::c_int; 87 | pub fn mdb_txn_env(txn: *mut MDB_txn) -> *mut MDB_env; 88 | pub fn mdb_txn_id(txn: *mut MDB_txn) -> ::libc::size_t; 89 | pub fn mdb_txn_commit(txn: *mut MDB_txn) -> ::libc::c_int; 90 | pub fn mdb_txn_abort(txn: *mut MDB_txn) -> (); 91 | pub fn mdb_txn_reset(txn: *mut MDB_txn) -> (); 92 | pub fn mdb_txn_renew(txn: *mut MDB_txn) -> ::libc::c_int; 93 | pub fn mdb_dbi_open(txn: *mut MDB_txn, name: *const ::libc::c_char, flags: ::libc::c_uint, dbi: *mut MDB_dbi) -> ::libc::c_int; 94 | pub fn mdb_stat(txn: *mut MDB_txn, dbi: MDB_dbi, stat: *mut MDB_stat) -> ::libc::c_int; 95 | pub fn mdb_dbi_flags(txn: *mut MDB_txn, dbi: MDB_dbi, flags: *mut ::libc::c_uint) -> ::libc::c_int; 96 | pub fn mdb_dbi_close(env: *mut MDB_env, dbi: MDB_dbi) -> (); 97 | pub fn mdb_drop(txn: *mut MDB_txn, dbi: MDB_dbi, del: ::libc::c_int) -> ::libc::c_int; 98 | pub fn mdb_set_compare(txn: *mut MDB_txn, dbi: MDB_dbi, cmp: *mut MDB_cmp_func) -> ::libc::c_int; 99 | pub fn mdb_set_dupsort(txn: *mut MDB_txn, dbi: MDB_dbi, cmp: *mut MDB_cmp_func) -> ::libc::c_int; 100 | pub fn mdb_set_relfunc(txn: *mut MDB_txn, dbi: MDB_dbi, rel: *mut MDB_rel_func) -> ::libc::c_int; 101 | pub fn mdb_set_relctx(txn: *mut MDB_txn, dbi: MDB_dbi, ctx: *mut ::libc::c_void) -> ::libc::c_int; 102 | pub fn mdb_get(txn: *mut MDB_txn, dbi: MDB_dbi, key: *mut MDB_val, data: *mut MDB_val) -> ::libc::c_int; 103 | pub fn mdb_put(txn: *mut MDB_txn, dbi: MDB_dbi, key: *mut MDB_val, data: *mut MDB_val, flags: ::libc::c_uint) -> ::libc::c_int; 104 | pub fn mdb_del(txn: *mut MDB_txn, dbi: MDB_dbi, key: *mut MDB_val, data: *mut MDB_val) -> ::libc::c_int; 105 | pub fn mdb_cursor_open(txn: *mut MDB_txn, dbi: MDB_dbi, cursor: *mut *mut MDB_cursor) -> ::libc::c_int; 106 | pub fn mdb_cursor_close(cursor: *mut MDB_cursor) -> (); 107 | pub fn mdb_cursor_renew(txn: *mut MDB_txn, cursor: *mut MDB_cursor) -> ::libc::c_int; 108 | pub fn mdb_cursor_txn(cursor: *mut MDB_cursor) -> *mut MDB_txn; 109 | pub fn mdb_cursor_dbi(cursor: *mut MDB_cursor) -> MDB_dbi; 110 | pub fn mdb_cursor_get(cursor: *mut MDB_cursor, key: *mut MDB_val, data: *mut MDB_val, op: MDB_cursor_op) -> ::libc::c_int; 111 | pub fn mdb_cursor_put(cursor: *mut MDB_cursor, key: *mut MDB_val, data: *mut MDB_val, flags: ::libc::c_uint) -> ::libc::c_int; 112 | pub fn mdb_cursor_del(cursor: *mut MDB_cursor, flags: ::libc::c_uint) -> ::libc::c_int; 113 | pub fn mdb_cursor_count(cursor: *mut MDB_cursor, countp: *mut ::libc::size_t) -> ::libc::c_int; 114 | pub fn mdb_cmp(txn: *mut MDB_txn, dbi: MDB_dbi, a: *const MDB_val, b: *const MDB_val) -> ::libc::c_int; 115 | pub fn mdb_dcmp(txn: *mut MDB_txn, dbi: MDB_dbi, a: *const MDB_val, b: *const MDB_val) -> ::libc::c_int; 116 | pub fn mdb_reader_list(env: *mut MDB_env, func: *mut MDB_msg_func, ctx: *mut ::libc::c_void) -> ::libc::c_int; 117 | pub fn mdb_reader_check(env: *mut MDB_env, dead: *mut ::libc::c_int) -> ::libc::c_int; 118 | } 119 | -------------------------------------------------------------------------------- /lmdb-sys/src/lib.rs: -------------------------------------------------------------------------------- 1 | #![allow(non_camel_case_types)] 2 | #![deny(warnings)] 3 | #![doc(html_root_url = "https://docs.rs/lmdb-sys/0.8.0")] 4 | 5 | extern crate libc; 6 | 7 | #[cfg(unix)] 8 | #[allow(non_camel_case_types)] 9 | pub type mode_t = ::libc::mode_t; 10 | #[cfg(windows)] 11 | #[allow(non_camel_case_types)] 12 | pub type mode_t = ::libc::c_int; 13 | 14 | pub use constants::*; 15 | pub use ffi::*; 16 | 17 | mod ffi; 18 | mod constants; 19 | -------------------------------------------------------------------------------- /src/cursor.rs: -------------------------------------------------------------------------------- 1 | use std::marker::PhantomData; 2 | use std::{fmt, mem, ptr, result, slice}; 3 | 4 | use libc::{EINVAL, c_void, size_t, c_uint}; 5 | 6 | use database::Database; 7 | use error::{Error, Result, lmdb_result}; 8 | use ffi; 9 | use flags::WriteFlags; 10 | use transaction::Transaction; 11 | 12 | /// An LMDB cursor. 13 | pub trait Cursor<'txn> { 14 | 15 | /// Returns a raw pointer to the underlying LMDB cursor. 16 | /// 17 | /// The caller **must** ensure that the pointer is not used after the 18 | /// lifetime of the cursor. 19 | fn cursor(&self) -> *mut ffi::MDB_cursor; 20 | 21 | /// Retrieves a key/data pair from the cursor. Depending on the cursor op, 22 | /// the current key may be returned. 23 | fn get(&self, key: Option<&[u8]>, data: Option<&[u8]>, op: c_uint) -> Result<(Option<&'txn [u8]>, &'txn [u8])> { 24 | unsafe { 25 | let mut key_val = slice_to_val(key); 26 | let mut data_val = slice_to_val(data); 27 | let key_ptr = key_val.mv_data; 28 | lmdb_result(ffi::mdb_cursor_get(self.cursor(), &mut key_val, &mut data_val, op))?; 29 | let key_out = if key_ptr != key_val.mv_data { Some(val_to_slice(key_val)) } else { None }; 30 | let data_out = val_to_slice(data_val); 31 | Ok((key_out, data_out)) 32 | } 33 | } 34 | 35 | /// Iterate over database items. The iterator will begin with item next 36 | /// after the cursor, and continue until the end of the database. For new 37 | /// cursors, the iterator will begin with the first item in the database. 38 | /// 39 | /// For databases with duplicate data items (`DatabaseFlags::DUP_SORT`), the 40 | /// duplicate data items of each key will be returned before moving on to 41 | /// the next key. 42 | fn iter(&mut self) -> Iter<'txn> { 43 | Iter::new(self.cursor(), ffi::MDB_NEXT, ffi::MDB_NEXT) 44 | } 45 | 46 | /// Iterate over database items starting from the beginning of the database. 47 | /// 48 | /// For databases with duplicate data items (`DatabaseFlags::DUP_SORT`), the 49 | /// duplicate data items of each key will be returned before moving on to 50 | /// the next key. 51 | fn iter_start(&mut self) -> Iter<'txn> { 52 | Iter::new(self.cursor(), ffi::MDB_FIRST, ffi::MDB_NEXT) 53 | } 54 | 55 | /// Iterate over database items starting from the given key. 56 | /// 57 | /// For databases with duplicate data items (`DatabaseFlags::DUP_SORT`), the 58 | /// duplicate data items of each key will be returned before moving on to 59 | /// the next key. 60 | fn iter_from(&mut self, key: K) -> Iter<'txn> where K: AsRef<[u8]> { 61 | match self.get(Some(key.as_ref()), None, ffi::MDB_SET_RANGE) { 62 | Ok(_) | Err(Error::NotFound) => (), 63 | Err(error) => panic!("mdb_cursor_get returned an unexpected error: {}", error), 64 | }; 65 | Iter::new(self.cursor(), ffi::MDB_GET_CURRENT, ffi::MDB_NEXT) 66 | } 67 | 68 | /// Iterate over duplicate database items. The iterator will begin with the 69 | /// item next after the cursor, and continue until the end of the database. 70 | /// Each item will be returned as an iterator of its duplicates. 71 | fn iter_dup(&mut self) -> IterDup<'txn> { 72 | IterDup::new(self.cursor(), ffi::MDB_NEXT) 73 | } 74 | 75 | /// Iterate over duplicate database items starting from the beginning of the 76 | /// database. Each item will be returned as an iterator of its duplicates. 77 | fn iter_dup_start(&mut self) -> IterDup<'txn> { 78 | IterDup::new(self.cursor(), ffi::MDB_FIRST) 79 | } 80 | 81 | /// Iterate over duplicate items in the database starting from the given 82 | /// key. Each item will be returned as an iterator of its duplicates. 83 | fn iter_dup_from(&mut self, key: &K) -> IterDup<'txn> where K: AsRef<[u8]> { 84 | match self.get(Some(key.as_ref()), None, ffi::MDB_SET_RANGE) { 85 | Ok(_) | Err(Error::NotFound) => (), 86 | Err(error) => panic!("mdb_cursor_get returned an unexpected error: {}", error), 87 | }; 88 | IterDup::new(self.cursor(), ffi::MDB_GET_CURRENT) 89 | } 90 | 91 | /// Iterate over the duplicates of the item in the database with the given key. 92 | fn iter_dup_of(&mut self, key: &K) -> Iter<'txn> where K: AsRef<[u8]> { 93 | match self.get(Some(key.as_ref()), None, ffi::MDB_SET) { 94 | Ok(_) | Err(Error::NotFound) => (), 95 | Err(error) => panic!("mdb_cursor_get returned an unexpected error: {}", error), 96 | }; 97 | Iter::new(self.cursor(), ffi::MDB_GET_CURRENT, ffi::MDB_NEXT_DUP) 98 | } 99 | } 100 | 101 | /// A read-only cursor for navigating the items within a database. 102 | pub struct RoCursor<'txn> { 103 | cursor: *mut ffi::MDB_cursor, 104 | _marker: PhantomData &'txn ()>, 105 | } 106 | 107 | impl <'txn> Cursor<'txn> for RoCursor<'txn> { 108 | fn cursor(&self) -> *mut ffi::MDB_cursor { 109 | self.cursor 110 | } 111 | } 112 | 113 | impl <'txn> fmt::Debug for RoCursor<'txn> { 114 | fn fmt(&self, f: &mut fmt::Formatter) -> result::Result<(), fmt::Error> { 115 | f.debug_struct("RoCursor").finish() 116 | } 117 | } 118 | 119 | impl <'txn> Drop for RoCursor<'txn> { 120 | fn drop(&mut self) { 121 | unsafe { ffi::mdb_cursor_close(self.cursor) } 122 | } 123 | } 124 | 125 | impl <'txn> RoCursor<'txn> { 126 | 127 | /// Creates a new read-only cursor in the given database and transaction. 128 | /// Prefer using `Transaction::open_cursor`. 129 | pub(crate) fn new(txn: &'txn T, db: Database) -> Result> where T: Transaction { 130 | let mut cursor: *mut ffi::MDB_cursor = ptr::null_mut(); 131 | unsafe { lmdb_result(ffi::mdb_cursor_open(txn.txn(), db.dbi(), &mut cursor))?; } 132 | Ok(RoCursor { 133 | cursor: cursor, 134 | _marker: PhantomData, 135 | }) 136 | } 137 | } 138 | 139 | /// A read-write cursor for navigating items within a database. 140 | pub struct RwCursor<'txn> { 141 | cursor: *mut ffi::MDB_cursor, 142 | _marker: PhantomData &'txn ()>, 143 | } 144 | 145 | impl <'txn> Cursor<'txn> for RwCursor<'txn> { 146 | fn cursor(&self) -> *mut ffi::MDB_cursor { 147 | self.cursor 148 | } 149 | } 150 | 151 | impl <'txn> fmt::Debug for RwCursor<'txn> { 152 | fn fmt(&self, f: &mut fmt::Formatter) -> result::Result<(), fmt::Error> { 153 | f.debug_struct("RwCursor").finish() 154 | } 155 | } 156 | 157 | impl <'txn> Drop for RwCursor<'txn> { 158 | fn drop(&mut self) { 159 | unsafe { ffi::mdb_cursor_close(self.cursor) } 160 | } 161 | } 162 | 163 | impl <'txn> RwCursor<'txn> { 164 | 165 | /// Creates a new read-only cursor in the given database and transaction. 166 | /// Prefer using `RwTransaction::open_rw_cursor`. 167 | pub(crate) fn new(txn: &'txn T, db: Database) -> Result> where T: Transaction { 168 | let mut cursor: *mut ffi::MDB_cursor = ptr::null_mut(); 169 | unsafe { lmdb_result(ffi::mdb_cursor_open(txn.txn(), db.dbi(), &mut cursor))?; } 170 | Ok(RwCursor { cursor: cursor, _marker: PhantomData }) 171 | } 172 | 173 | /// Puts a key/data pair into the database. The cursor will be positioned at 174 | /// the new data item, or on failure usually near it. 175 | pub fn put(&mut self, key: &K, data: &D, flags: WriteFlags) -> Result<()> 176 | where K: AsRef<[u8]>, D: AsRef<[u8]> { 177 | let key = key.as_ref(); 178 | let data = data.as_ref(); 179 | let mut key_val: ffi::MDB_val = ffi::MDB_val { mv_size: key.len() as size_t, 180 | mv_data: key.as_ptr() as *mut c_void }; 181 | let mut data_val: ffi::MDB_val = ffi::MDB_val { mv_size: data.len() as size_t, 182 | mv_data: data.as_ptr() as *mut c_void }; 183 | unsafe { 184 | lmdb_result(ffi::mdb_cursor_put(self.cursor(), 185 | &mut key_val, 186 | &mut data_val, 187 | flags.bits())) 188 | } 189 | } 190 | 191 | /// Deletes the current key/data pair. 192 | /// 193 | /// ### Flags 194 | /// 195 | /// `WriteFlags::NO_DUP_DATA` may be used to delete all data items for the 196 | /// current key, if the database was opened with `DatabaseFlags::DUP_SORT`. 197 | pub fn del(&mut self, flags: WriteFlags) -> Result<()> { 198 | unsafe { lmdb_result(ffi::mdb_cursor_del(self.cursor(), flags.bits())) } 199 | } 200 | } 201 | 202 | unsafe fn slice_to_val(slice: Option<&[u8]>) -> ffi::MDB_val { 203 | match slice { 204 | Some(slice) => 205 | ffi::MDB_val { mv_size: slice.len() as size_t, 206 | mv_data: slice.as_ptr() as *mut c_void }, 207 | None => 208 | ffi::MDB_val { mv_size: 0, 209 | mv_data: ptr::null_mut() }, 210 | } 211 | } 212 | 213 | unsafe fn val_to_slice<'a>(val: ffi::MDB_val) -> &'a [u8] { 214 | slice::from_raw_parts(val.mv_data as *const u8, val.mv_size as usize) 215 | } 216 | 217 | /// An iterator over the values in an LMDB database. 218 | pub struct Iter<'txn> { 219 | cursor: *mut ffi::MDB_cursor, 220 | op: c_uint, 221 | next_op: c_uint, 222 | _marker: PhantomData, 223 | } 224 | 225 | impl <'txn> Iter<'txn> { 226 | 227 | /// Creates a new iterator backed by the given cursor. 228 | fn new<'t>(cursor: *mut ffi::MDB_cursor, op: c_uint, next_op: c_uint) -> Iter<'t> { 229 | Iter { cursor: cursor, op: op, next_op: next_op, _marker: PhantomData } 230 | } 231 | } 232 | 233 | impl <'txn> fmt::Debug for Iter<'txn> { 234 | fn fmt(&self, f: &mut fmt::Formatter) -> result::Result<(), fmt::Error> { 235 | f.debug_struct("Iter").finish() 236 | } 237 | } 238 | 239 | impl <'txn> Iterator for Iter<'txn> { 240 | 241 | type Item = (&'txn [u8], &'txn [u8]); 242 | 243 | fn next(&mut self) -> Option<(&'txn [u8], &'txn [u8])> { 244 | let mut key = ffi::MDB_val { mv_size: 0, mv_data: ptr::null_mut() }; 245 | let mut data = ffi::MDB_val { mv_size: 0, mv_data: ptr::null_mut() }; 246 | let op = mem::replace(&mut self.op, self.next_op); 247 | unsafe { 248 | match ffi::mdb_cursor_get(self.cursor, &mut key, &mut data, op) { 249 | ffi::MDB_SUCCESS => Some((val_to_slice(key), val_to_slice(data))), 250 | // EINVAL can occur when the cursor was previously seeked to a non-existent value, 251 | // e.g. iter_from with a key greater than all values in the database. 252 | ffi::MDB_NOTFOUND | EINVAL => None, 253 | error => panic!("mdb_cursor_get returned an unexpected error: {}", error), 254 | } 255 | } 256 | } 257 | } 258 | 259 | /// An iterator over the keys and duplicate values in an LMDB database. 260 | /// 261 | /// The yielded items of the iterator are themselves iterators over the duplicate values for a 262 | /// specific key. 263 | pub struct IterDup<'txn> { 264 | cursor: *mut ffi::MDB_cursor, 265 | op: c_uint, 266 | _marker: PhantomData, 267 | } 268 | 269 | impl <'txn> IterDup<'txn> { 270 | 271 | /// Creates a new iterator backed by the given cursor. 272 | fn new<'t>(cursor: *mut ffi::MDB_cursor, op: c_uint) -> IterDup<'t> { 273 | IterDup { cursor: cursor, op: op, _marker: PhantomData } 274 | } 275 | } 276 | 277 | impl <'txn> fmt::Debug for IterDup<'txn> { 278 | fn fmt(&self, f: &mut fmt::Formatter) -> result::Result<(), fmt::Error> { 279 | f.debug_struct("IterDup").finish() 280 | } 281 | } 282 | 283 | impl <'txn> Iterator for IterDup<'txn> { 284 | 285 | type Item = Iter<'txn>; 286 | 287 | fn next(&mut self) -> Option> { 288 | let mut key = ffi::MDB_val { mv_size: 0, mv_data: ptr::null_mut() }; 289 | let mut data = ffi::MDB_val { mv_size: 0, mv_data: ptr::null_mut() }; 290 | let op = mem::replace(&mut self.op, ffi::MDB_NEXT_NODUP); 291 | let err_code = unsafe { 292 | ffi::mdb_cursor_get(self.cursor, &mut key, &mut data, op) 293 | }; 294 | 295 | if err_code == ffi::MDB_SUCCESS { 296 | Some(Iter::new(self.cursor, ffi::MDB_GET_CURRENT, ffi::MDB_NEXT_DUP)) 297 | } else { 298 | None 299 | } 300 | } 301 | } 302 | 303 | #[cfg(test)] 304 | mod test { 305 | 306 | use std::ptr; 307 | use test::{Bencher, black_box}; 308 | 309 | use tempdir::TempDir; 310 | 311 | use environment::*; 312 | use ffi::*; 313 | use flags::*; 314 | use super::*; 315 | use test_utils::*; 316 | 317 | #[test] 318 | fn test_get() { 319 | let dir = TempDir::new("test").unwrap(); 320 | let env = Environment::new().open(dir.path()).unwrap(); 321 | let db = env.open_db(None).unwrap(); 322 | 323 | let mut txn = env.begin_rw_txn().unwrap(); 324 | txn.put(db, b"key1", b"val1", WriteFlags::empty()).unwrap(); 325 | txn.put(db, b"key2", b"val2", WriteFlags::empty()).unwrap(); 326 | txn.put(db, b"key3", b"val3", WriteFlags::empty()).unwrap(); 327 | 328 | let cursor = txn.open_ro_cursor(db).unwrap(); 329 | assert_eq!((Some(&b"key1"[..]), &b"val1"[..]), 330 | cursor.get(None, None, MDB_FIRST).unwrap()); 331 | assert_eq!((Some(&b"key1"[..]), &b"val1"[..]), 332 | cursor.get(None, None, MDB_GET_CURRENT).unwrap()); 333 | assert_eq!((Some(&b"key2"[..]), &b"val2"[..]), 334 | cursor.get(None, None, MDB_NEXT).unwrap()); 335 | assert_eq!((Some(&b"key1"[..]), &b"val1"[..]), 336 | cursor.get(None, None, MDB_PREV).unwrap()); 337 | assert_eq!((Some(&b"key3"[..]), &b"val3"[..]), 338 | cursor.get(None, None, MDB_LAST).unwrap()); 339 | assert_eq!((None, &b"val2"[..]), 340 | cursor.get(Some(b"key2"), None, MDB_SET).unwrap()); 341 | assert_eq!((Some(&b"key3"[..]), &b"val3"[..]), 342 | cursor.get(Some(&b"key3"[..]), None, MDB_SET_KEY).unwrap()); 343 | assert_eq!((Some(&b"key3"[..]), &b"val3"[..]), 344 | cursor.get(Some(&b"key2\0"[..]), None, MDB_SET_RANGE).unwrap()); 345 | } 346 | 347 | #[test] 348 | fn test_get_dup() { 349 | let dir = TempDir::new("test").unwrap(); 350 | let env = Environment::new().open(dir.path()).unwrap(); 351 | let db = env.create_db(None, DatabaseFlags::DUP_SORT).unwrap(); 352 | 353 | let mut txn = env.begin_rw_txn().unwrap(); 354 | txn.put(db, b"key1", b"val1", WriteFlags::empty()).unwrap(); 355 | txn.put(db, b"key1", b"val2", WriteFlags::empty()).unwrap(); 356 | txn.put(db, b"key1", b"val3", WriteFlags::empty()).unwrap(); 357 | txn.put(db, b"key2", b"val1", WriteFlags::empty()).unwrap(); 358 | txn.put(db, b"key2", b"val2", WriteFlags::empty()).unwrap(); 359 | txn.put(db, b"key2", b"val3", WriteFlags::empty()).unwrap(); 360 | 361 | let cursor = txn.open_ro_cursor(db).unwrap(); 362 | assert_eq!((Some(&b"key1"[..]), &b"val1"[..]), 363 | cursor.get(None, None, MDB_FIRST).unwrap()); 364 | assert_eq!((None, &b"val1"[..]), 365 | cursor.get(None, None, MDB_FIRST_DUP).unwrap()); 366 | assert_eq!((Some(&b"key1"[..]), &b"val1"[..]), 367 | cursor.get(None, None, MDB_GET_CURRENT).unwrap()); 368 | assert_eq!((Some(&b"key2"[..]), &b"val1"[..]), 369 | cursor.get(None, None, MDB_NEXT_NODUP).unwrap()); 370 | assert_eq!((Some(&b"key2"[..]), &b"val2"[..]), 371 | cursor.get(None, None, MDB_NEXT_DUP).unwrap()); 372 | assert_eq!((Some(&b"key2"[..]), &b"val3"[..]), 373 | cursor.get(None, None, MDB_NEXT_DUP).unwrap()); 374 | assert!(cursor.get(None, None, MDB_NEXT_DUP).is_err()); 375 | assert_eq!((Some(&b"key2"[..]), &b"val2"[..]), 376 | cursor.get(None, None, MDB_PREV_DUP).unwrap()); 377 | assert_eq!((None, &b"val3"[..]), 378 | cursor.get(None, None, MDB_LAST_DUP).unwrap()); 379 | assert_eq!((Some(&b"key1"[..]), &b"val3"[..]), 380 | cursor.get(None, None, MDB_PREV_NODUP).unwrap()); 381 | assert_eq!((None, &b"val1"[..]), 382 | cursor.get(Some(&b"key1"[..]), None, MDB_SET).unwrap()); 383 | assert_eq!((Some(&b"key2"[..]), &b"val1"[..]), 384 | cursor.get(Some(&b"key2"[..]), None, MDB_SET_KEY).unwrap()); 385 | assert_eq!((Some(&b"key2"[..]), &b"val1"[..]), 386 | cursor.get(Some(&b"key1\0"[..]), None, MDB_SET_RANGE).unwrap()); 387 | assert_eq!((None, &b"val3"[..]), 388 | cursor.get(Some(&b"key1"[..]), Some(&b"val3"[..]), MDB_GET_BOTH).unwrap()); 389 | assert_eq!((None, &b"val1"[..]), 390 | cursor.get(Some(&b"key2"[..]), Some(&b"val"[..]), MDB_GET_BOTH_RANGE).unwrap()); 391 | } 392 | 393 | #[test] 394 | fn test_get_dupfixed() { 395 | let dir = TempDir::new("test").unwrap(); 396 | let env = Environment::new().open(dir.path()).unwrap(); 397 | let db = env.create_db(None, DatabaseFlags::DUP_SORT | DatabaseFlags::DUP_FIXED).unwrap(); 398 | 399 | let mut txn = env.begin_rw_txn().unwrap(); 400 | txn.put(db, b"key1", b"val1", WriteFlags::empty()).unwrap(); 401 | txn.put(db, b"key1", b"val2", WriteFlags::empty()).unwrap(); 402 | txn.put(db, b"key1", b"val3", WriteFlags::empty()).unwrap(); 403 | txn.put(db, b"key2", b"val4", WriteFlags::empty()).unwrap(); 404 | txn.put(db, b"key2", b"val5", WriteFlags::empty()).unwrap(); 405 | txn.put(db, b"key2", b"val6", WriteFlags::empty()).unwrap(); 406 | 407 | let cursor = txn.open_ro_cursor(db).unwrap(); 408 | assert_eq!((Some(&b"key1"[..]), &b"val1"[..]), 409 | cursor.get(None, None, MDB_FIRST).unwrap()); 410 | assert_eq!((None, &b"val1val2val3"[..]), 411 | cursor.get(None, None, MDB_GET_MULTIPLE).unwrap()); 412 | assert!(cursor.get(None, None, MDB_NEXT_MULTIPLE).is_err()); 413 | } 414 | 415 | #[test] 416 | fn test_iter() { 417 | let dir = TempDir::new("test").unwrap(); 418 | let env = Environment::new().open(dir.path()).unwrap(); 419 | let db = env.open_db(None).unwrap(); 420 | 421 | let items: Vec<(&[u8], &[u8])> = vec!((b"key1", b"val1"), 422 | (b"key2", b"val2"), 423 | (b"key3", b"val3"), 424 | (b"key5", b"val5")); 425 | 426 | { 427 | let mut txn = env.begin_rw_txn().unwrap(); 428 | for &(ref key, ref data) in &items { 429 | txn.put(db, key, data, WriteFlags::empty()).unwrap(); 430 | } 431 | txn.commit().unwrap(); 432 | } 433 | 434 | let txn = env.begin_ro_txn().unwrap(); 435 | let mut cursor = txn.open_ro_cursor(db).unwrap(); 436 | assert_eq!(items, cursor.iter().collect::>()); 437 | 438 | cursor.get(Some(b"key2"), None, MDB_SET).unwrap(); 439 | assert_eq!(items.clone().into_iter().skip(2).collect::>(), 440 | cursor.iter().collect::>()); 441 | 442 | assert_eq!(items, cursor.iter_start().collect::>()); 443 | 444 | assert_eq!(items.clone().into_iter().skip(1).collect::>(), 445 | cursor.iter_from(b"key2").collect::>()); 446 | 447 | assert_eq!(items.clone().into_iter().skip(3).collect::>(), 448 | cursor.iter_from(b"key4").collect::>()); 449 | 450 | assert_eq!(vec!().into_iter().collect::>(), 451 | cursor.iter_from(b"key6").collect::>()); 452 | } 453 | 454 | #[test] 455 | fn test_iter_empty_database() { 456 | let dir = TempDir::new("test").unwrap(); 457 | let env = Environment::new().open(dir.path()).unwrap(); 458 | let db = env.open_db(None).unwrap(); 459 | let txn = env.begin_ro_txn().unwrap(); 460 | let mut cursor = txn.open_ro_cursor(db).unwrap(); 461 | 462 | assert_eq!(0, cursor.iter().count()); 463 | assert_eq!(0, cursor.iter_start().count()); 464 | assert_eq!(0, cursor.iter_from(b"foo").count()); 465 | } 466 | 467 | #[test] 468 | fn test_iter_empty_dup_database() { 469 | let dir = TempDir::new("test").unwrap(); 470 | let env = Environment::new().open(dir.path()).unwrap(); 471 | let db = env.create_db(None, DatabaseFlags::DUP_SORT).unwrap(); 472 | let txn = env.begin_ro_txn().unwrap(); 473 | let mut cursor = txn.open_ro_cursor(db).unwrap(); 474 | 475 | assert_eq!(0, cursor.iter().count()); 476 | assert_eq!(0, cursor.iter_start().count()); 477 | assert_eq!(0, cursor.iter_from(b"foo").count()); 478 | assert_eq!(0, cursor.iter_dup().count()); 479 | assert_eq!(0, cursor.iter_dup_start().count()); 480 | assert_eq!(0, cursor.iter_dup_from(b"foo").count()); 481 | assert_eq!(0, cursor.iter_dup_of(b"foo").count()); 482 | } 483 | 484 | #[test] 485 | fn test_iter_dup() { 486 | let dir = TempDir::new("test").unwrap(); 487 | let env = Environment::new().open(dir.path()).unwrap(); 488 | let db = env.create_db(None, DatabaseFlags::DUP_SORT).unwrap(); 489 | 490 | let items: Vec<(&[u8], &[u8])> = vec!((b"a", b"1"), 491 | (b"a", b"2"), 492 | (b"a", b"3"), 493 | (b"b", b"1"), 494 | (b"b", b"2"), 495 | (b"b", b"3"), 496 | (b"c", b"1"), 497 | (b"c", b"2"), 498 | (b"c", b"3"), 499 | (b"e", b"1"), 500 | (b"e", b"2"), 501 | (b"e", b"3")); 502 | 503 | { 504 | let mut txn = env.begin_rw_txn().unwrap(); 505 | for &(ref key, ref data) in &items { 506 | txn.put(db, key, data, WriteFlags::empty()).unwrap(); 507 | } 508 | txn.commit().unwrap(); 509 | } 510 | 511 | let txn = env.begin_ro_txn().unwrap(); 512 | let mut cursor = txn.open_ro_cursor(db).unwrap(); 513 | assert_eq!(items, cursor.iter_dup().flat_map(|x| x).collect::>()); 514 | 515 | cursor.get(Some(b"b"), None, MDB_SET).unwrap(); 516 | assert_eq!(items.clone().into_iter().skip(4).collect::>(), 517 | cursor.iter_dup().flat_map(|x| x).collect::>()); 518 | 519 | assert_eq!(items, 520 | cursor.iter_dup_start().flat_map(|x| x).collect::>()); 521 | 522 | assert_eq!(items.clone().into_iter().skip(3).collect::>(), 523 | cursor.iter_dup_from(b"b").flat_map(|x| x).collect::>()); 524 | 525 | assert_eq!(items.clone().into_iter().skip(3).collect::>(), 526 | cursor.iter_dup_from(b"ab").flat_map(|x| x).collect::>()); 527 | 528 | assert_eq!(items.clone().into_iter().skip(9).collect::>(), 529 | cursor.iter_dup_from(b"d").flat_map(|x| x).collect::>()); 530 | 531 | assert_eq!(vec!().into_iter().collect::>(), 532 | cursor.iter_dup_from(b"f").flat_map(|x| x).collect::>()); 533 | 534 | assert_eq!(items.clone().into_iter().skip(3).take(3).collect::>(), 535 | cursor.iter_dup_of(b"b").collect::>()); 536 | 537 | assert_eq!(0, cursor.iter_dup_of(b"foo").count()); 538 | } 539 | 540 | #[test] 541 | fn test_put_del() { 542 | let dir = TempDir::new("test").unwrap(); 543 | let env = Environment::new().open(dir.path()).unwrap(); 544 | let db = env.open_db(None).unwrap(); 545 | 546 | let mut txn = env.begin_rw_txn().unwrap(); 547 | let mut cursor = txn.open_rw_cursor(db).unwrap(); 548 | 549 | cursor.put(b"key1", b"val1", WriteFlags::empty()).unwrap(); 550 | cursor.put(b"key2", b"val2", WriteFlags::empty()).unwrap(); 551 | cursor.put(b"key3", b"val3", WriteFlags::empty()).unwrap(); 552 | 553 | assert_eq!((Some(&b"key3"[..]), &b"val3"[..]), 554 | cursor.get(None, None, MDB_GET_CURRENT).unwrap()); 555 | 556 | cursor.del(WriteFlags::empty()).unwrap(); 557 | assert_eq!((Some(&b"key2"[..]), &b"val2"[..]), 558 | cursor.get(None, None, MDB_LAST).unwrap()); 559 | } 560 | 561 | /// Benchmark of iterator sequential read performance. 562 | #[bench] 563 | fn bench_get_seq_iter(b: &mut Bencher) { 564 | let n = 100; 565 | let (_dir, env) = setup_bench_db(n); 566 | let db = env.open_db(None).unwrap(); 567 | let txn = env.begin_ro_txn().unwrap(); 568 | 569 | b.iter(|| { 570 | let mut cursor = txn.open_ro_cursor(db).unwrap(); 571 | let mut i = 0; 572 | let mut count = 0u32; 573 | 574 | for (key, data) in cursor.iter() { 575 | i = i + key.len() + data.len(); 576 | count = count + 1; 577 | } 578 | 579 | black_box(i); 580 | assert_eq!(count, n); 581 | }); 582 | } 583 | 584 | /// Benchmark of cursor sequential read performance. 585 | #[bench] 586 | fn bench_get_seq_cursor(b: &mut Bencher) { 587 | let n = 100; 588 | let (_dir, env) = setup_bench_db(n); 589 | let db = env.open_db(None).unwrap(); 590 | let txn = env.begin_ro_txn().unwrap(); 591 | 592 | b.iter(|| { 593 | let cursor = txn.open_ro_cursor(db).unwrap(); 594 | let mut i = 0; 595 | let mut count = 0u32; 596 | 597 | while let Ok((key_opt, val)) = cursor.get(None, None, MDB_NEXT) { 598 | i += key_opt.map(|key| key.len()).unwrap_or(0) + val.len(); 599 | count += 1; 600 | } 601 | 602 | black_box(i); 603 | assert_eq!(count, n); 604 | }); 605 | } 606 | 607 | /// Benchmark of raw LMDB sequential read performance (control). 608 | #[bench] 609 | fn bench_get_seq_raw(b: &mut Bencher) { 610 | let n = 100; 611 | let (_dir, env) = setup_bench_db(n); 612 | let db = env.open_db(None).unwrap(); 613 | 614 | let dbi: MDB_dbi = db.dbi(); 615 | let _txn = env.begin_ro_txn().unwrap(); 616 | let txn = _txn.txn(); 617 | 618 | let mut key = MDB_val { mv_size: 0, mv_data: ptr::null_mut() }; 619 | let mut data = MDB_val { mv_size: 0, mv_data: ptr::null_mut() }; 620 | let mut cursor: *mut MDB_cursor = ptr::null_mut(); 621 | 622 | b.iter(|| unsafe { 623 | mdb_cursor_open(txn, dbi, &mut cursor); 624 | let mut i = 0; 625 | let mut count = 0u32; 626 | 627 | while mdb_cursor_get(cursor, &mut key, &mut data, MDB_NEXT) == 0 { 628 | i += key.mv_size + data.mv_size; 629 | count += 1; 630 | }; 631 | 632 | black_box(i); 633 | assert_eq!(count, n); 634 | mdb_cursor_close(cursor); 635 | }); 636 | } 637 | } 638 | -------------------------------------------------------------------------------- /src/database.rs: -------------------------------------------------------------------------------- 1 | use libc::c_uint; 2 | use std::ffi::CString; 3 | use std::ptr; 4 | 5 | use ffi; 6 | 7 | use error::{Result, lmdb_result}; 8 | 9 | /// A handle to an individual database in an environment. 10 | /// 11 | /// A database handle denotes the name and parameters of a database in an environment. 12 | #[derive(Clone, Copy, Debug, Eq, PartialEq)] 13 | pub struct Database { 14 | dbi: ffi::MDB_dbi, 15 | } 16 | 17 | impl Database { 18 | 19 | /// Opens a new database handle in the given transaction. 20 | /// 21 | /// Prefer using `Environment::open_db`, `Environment::create_db`, `TransactionExt::open_db`, 22 | /// or `RwTransaction::create_db`. 23 | pub(crate) unsafe fn new(txn: *mut ffi::MDB_txn, 24 | name: Option<&str>, 25 | flags: c_uint) 26 | -> Result { 27 | let c_name = name.map(|n| CString::new(n).unwrap()); 28 | let name_ptr = if let Some(ref c_name) = c_name { c_name.as_ptr() } else { ptr::null() }; 29 | let mut dbi: ffi::MDB_dbi = 0; 30 | lmdb_result(ffi::mdb_dbi_open(txn, name_ptr, flags, &mut dbi))?; 31 | Ok(Database { dbi: dbi }) 32 | } 33 | 34 | /// Returns the underlying LMDB database handle. 35 | /// 36 | /// The caller **must** ensure that the handle is not used after the lifetime of the 37 | /// environment, or after the database has been closed. 38 | pub fn dbi(&self) -> ffi::MDB_dbi { 39 | self.dbi 40 | } 41 | } 42 | 43 | unsafe impl Sync for Database {} 44 | unsafe impl Send for Database {} 45 | -------------------------------------------------------------------------------- /src/environment.rs: -------------------------------------------------------------------------------- 1 | use libc::{c_uint, size_t}; 2 | use std::{fmt, ptr, result, mem}; 3 | use std::ffi::CString; 4 | #[cfg(unix)] 5 | use std::os::unix::ffi::OsStrExt; 6 | #[cfg(windows)] 7 | use std::ffi::OsStr; 8 | use std::path::Path; 9 | use std::sync::Mutex; 10 | 11 | use ffi; 12 | 13 | use error::{Result, lmdb_result}; 14 | use database::Database; 15 | use transaction::{RoTransaction, RwTransaction, Transaction}; 16 | use flags::{DatabaseFlags, EnvironmentFlags}; 17 | 18 | #[cfg(windows)] 19 | /// Adding a 'missing' trait from windows OsStrExt 20 | trait OsStrExtLmdb { 21 | fn as_bytes(&self) -> &[u8]; 22 | } 23 | #[cfg(windows)] 24 | impl OsStrExtLmdb for OsStr { 25 | fn as_bytes(&self) -> &[u8] { 26 | &self.to_str().unwrap().as_bytes() 27 | } 28 | } 29 | 30 | /// An LMDB environment. 31 | /// 32 | /// An environment supports multiple databases, all residing in the same shared-memory map. 33 | pub struct Environment { 34 | env: *mut ffi::MDB_env, 35 | dbi_open_mutex: Mutex<()>, 36 | } 37 | 38 | impl Environment { 39 | 40 | /// Creates a new builder for specifying options for opening an LMDB environment. 41 | pub fn new() -> EnvironmentBuilder { 42 | EnvironmentBuilder { 43 | flags: EnvironmentFlags::empty(), 44 | max_readers: None, 45 | max_dbs: None, 46 | map_size: None 47 | } 48 | } 49 | 50 | /// Returns a raw pointer to the underlying LMDB environment. 51 | /// 52 | /// The caller **must** ensure that the pointer is not dereferenced after the lifetime of the 53 | /// environment. 54 | pub fn env(&self) -> *mut ffi::MDB_env { 55 | self.env 56 | } 57 | 58 | /// Opens a handle to an LMDB database. 59 | /// 60 | /// If `name` is `None`, then the returned handle will be for the default database. 61 | /// 62 | /// If `name` is not `None`, then the returned handle will be for a named database. In this 63 | /// case the environment must be configured to allow named databases through 64 | /// `EnvironmentBuilder::set_max_dbs`. 65 | /// 66 | /// The returned database handle may be shared among any transaction in the environment. 67 | /// 68 | /// This function will fail with `Error::BadRslot` if called by a thread which has an ongoing 69 | /// transaction. 70 | /// 71 | /// The database name may not contain the null character. 72 | pub fn open_db<'env>(&'env self, name: Option<&str>) -> Result { 73 | let mutex = self.dbi_open_mutex.lock(); 74 | let txn = self.begin_ro_txn()?; 75 | let db = unsafe { txn.open_db(name)? }; 76 | txn.commit()?; 77 | drop(mutex); 78 | Ok(db) 79 | } 80 | 81 | /// Opens a handle to an LMDB database, creating the database if necessary. 82 | /// 83 | /// If the database is already created, the given option flags will be added to it. 84 | /// 85 | /// If `name` is `None`, then the returned handle will be for the default database. 86 | /// 87 | /// If `name` is not `None`, then the returned handle will be for a named database. In this 88 | /// case the environment must be configured to allow named databases through 89 | /// `EnvironmentBuilder::set_max_dbs`. 90 | /// 91 | /// The returned database handle may be shared among any transaction in the environment. 92 | /// 93 | /// This function will fail with `Error::BadRslot` if called by a thread with an open 94 | /// transaction. 95 | pub fn create_db<'env>(&'env self, 96 | name: Option<&str>, 97 | flags: DatabaseFlags) 98 | -> Result { 99 | let mutex = self.dbi_open_mutex.lock(); 100 | let txn = self.begin_rw_txn()?; 101 | let db = unsafe { txn.create_db(name, flags)? }; 102 | txn.commit()?; 103 | drop(mutex); 104 | Ok(db) 105 | } 106 | 107 | /// Retrieves the set of flags which the database is opened with. 108 | /// 109 | /// The database must belong to to this environment. 110 | pub fn get_db_flags<'env>(&'env self, db: Database) -> Result { 111 | let txn = self.begin_ro_txn()?; 112 | let mut flags: c_uint = 0; 113 | unsafe { 114 | lmdb_result(ffi::mdb_dbi_flags(txn.txn(), db.dbi(), &mut flags))?; 115 | } 116 | Ok(DatabaseFlags::from_bits(flags).unwrap()) 117 | } 118 | 119 | /// Create a read-only transaction for use with the environment. 120 | pub fn begin_ro_txn<'env>(&'env self) -> Result> { 121 | RoTransaction::new(self) 122 | } 123 | 124 | /// Create a read-write transaction for use with the environment. This method will block while 125 | /// there are any other read-write transactions open on the environment. 126 | pub fn begin_rw_txn<'env>(&'env self) -> Result> { 127 | RwTransaction::new(self) 128 | } 129 | 130 | /// Flush data buffers to disk. 131 | /// 132 | /// Data is always written to disk when `Transaction::commit` is called, but the operating 133 | /// system may keep it buffered. LMDB always flushes the OS buffers upon commit as well, unless 134 | /// the environment was opened with `MDB_NOSYNC` or in part `MDB_NOMETASYNC`. 135 | pub fn sync(&self, force: bool) -> Result<()> { 136 | unsafe { 137 | lmdb_result(ffi::mdb_env_sync(self.env(), if force { 1 } else { 0 })) 138 | } 139 | } 140 | 141 | /// Closes the database handle. Normally unnecessary. 142 | /// 143 | /// Closing a database handle is not necessary, but lets `Transaction::open_database` reuse the 144 | /// handle value. Usually it's better to set a bigger `EnvironmentBuilder::set_max_dbs`, unless 145 | /// that value would be large. 146 | /// 147 | /// ## Safety 148 | /// 149 | /// This call is not mutex protected. Databases should only be closed by a single thread, and 150 | /// only if no other threads are going to reference the database handle or one of its cursors 151 | /// any further. Do not close a handle if an existing transaction has modified its database. 152 | /// Doing so can cause misbehavior from database corruption to errors like 153 | /// `Error::BadValSize` (since the DB name is gone). 154 | pub unsafe fn close_db(&mut self, db: Database) { 155 | ffi::mdb_dbi_close(self.env, db.dbi()); 156 | } 157 | 158 | /// Retrieves statistics about this environment. 159 | pub fn stat(&self) -> Result { 160 | unsafe { 161 | let mut stat = Stat(mem::zeroed()); 162 | lmdb_try!(ffi::mdb_env_stat(self.env(), &mut stat.0)); 163 | Ok(stat) 164 | } 165 | } 166 | } 167 | 168 | /// Environment statistics. 169 | /// 170 | /// Contains information about the size and layout of an LMDB environment. 171 | pub struct Stat(ffi::MDB_stat); 172 | 173 | impl Stat { 174 | /// Size of a database page. This is the same for all databases in the environment. 175 | #[inline] 176 | pub fn page_size(&self) -> u32 { 177 | self.0.ms_psize 178 | } 179 | 180 | /// Depth (height) of the B-tree. 181 | #[inline] 182 | pub fn depth(&self) -> u32 { 183 | self.0.ms_depth 184 | } 185 | 186 | /// Number of internal (non-leaf) pages. 187 | #[inline] 188 | pub fn branch_pages(&self) -> usize { 189 | self.0.ms_branch_pages 190 | } 191 | 192 | /// Number of leaf pages. 193 | #[inline] 194 | pub fn leaf_pages(&self) -> usize { 195 | self.0.ms_leaf_pages 196 | } 197 | 198 | /// Number of overflow pages. 199 | #[inline] 200 | pub fn overflow_pages(&self) -> usize { 201 | self.0.ms_overflow_pages 202 | } 203 | 204 | /// Number of data items. 205 | #[inline] 206 | pub fn entries(&self) -> usize { 207 | self.0.ms_entries 208 | } 209 | } 210 | 211 | unsafe impl Send for Environment {} 212 | unsafe impl Sync for Environment {} 213 | 214 | impl fmt::Debug for Environment { 215 | fn fmt(&self, f: &mut fmt::Formatter) -> result::Result<(), fmt::Error> { 216 | f.debug_struct("Environment").finish() 217 | } 218 | } 219 | 220 | impl Drop for Environment { 221 | fn drop(&mut self) { 222 | unsafe { ffi::mdb_env_close(self.env) } 223 | } 224 | } 225 | 226 | /////////////////////////////////////////////////////////////////////////////////////////////////// 227 | //// Environment Builder 228 | /////////////////////////////////////////////////////////////////////////////////////////////////// 229 | 230 | /// Options for opening or creating an environment. 231 | #[derive(Debug, PartialEq, Eq, Copy, Clone)] 232 | pub struct EnvironmentBuilder { 233 | flags: EnvironmentFlags, 234 | max_readers: Option, 235 | max_dbs: Option, 236 | map_size: Option, 237 | } 238 | 239 | impl EnvironmentBuilder { 240 | 241 | /// Open an environment. 242 | /// 243 | /// On UNIX, the database files will be opened with 644 permissions. 244 | /// 245 | /// The path may not contain the null character, Windows UNC (Uniform Naming Convention) 246 | /// paths are not supported either. 247 | pub fn open(&self, path: &Path) -> Result { 248 | self.open_with_permissions(path, 0o644) 249 | } 250 | 251 | /// Open an environment with the provided UNIX permissions. 252 | /// 253 | /// On Windows, the permissions will be ignored. 254 | /// 255 | /// The path may not contain the null character, Windows UNC (Uniform Naming Convention) 256 | /// paths are not supported either. 257 | pub fn open_with_permissions(&self, path: &Path, mode: ffi::mode_t) -> Result { 258 | let mut env: *mut ffi::MDB_env = ptr::null_mut(); 259 | unsafe { 260 | lmdb_try!(ffi::mdb_env_create(&mut env)); 261 | if let Some(max_readers) = self.max_readers { 262 | lmdb_try_with_cleanup!(ffi::mdb_env_set_maxreaders(env, max_readers), 263 | ffi::mdb_env_close(env)) 264 | } 265 | if let Some(max_dbs) = self.max_dbs { 266 | lmdb_try_with_cleanup!(ffi::mdb_env_set_maxdbs(env, max_dbs), 267 | ffi::mdb_env_close(env)) 268 | } 269 | if let Some(map_size) = self.map_size { 270 | lmdb_try_with_cleanup!(ffi::mdb_env_set_mapsize(env, map_size), 271 | ffi::mdb_env_close(env)) 272 | } 273 | let path = match CString::new(path.as_os_str().as_bytes()) { 274 | Ok(path) => path, 275 | Err(..) => return Err(::Error::Invalid), 276 | }; 277 | lmdb_try_with_cleanup!(ffi::mdb_env_open(env, path.as_ptr(), self.flags.bits(), mode), 278 | ffi::mdb_env_close(env)); 279 | } 280 | Ok(Environment { env: env, dbi_open_mutex: Mutex::new(()) }) 281 | 282 | } 283 | 284 | /// Sets the provided options in the environment. 285 | pub fn set_flags(&mut self, flags: EnvironmentFlags) -> &mut EnvironmentBuilder { 286 | self.flags = flags; 287 | self 288 | } 289 | 290 | /// Sets the maximum number of threads or reader slots for the environment. 291 | /// 292 | /// This defines the number of slots in the lock table that is used to track readers in the 293 | /// the environment. The default is 126. Starting a read-only transaction normally ties a lock 294 | /// table slot to the current thread until the environment closes or the thread exits. If 295 | /// `MDB_NOTLS` is in use, `Environment::open_txn` instead ties the slot to the `Transaction` 296 | /// object until it or the `Environment` object is destroyed. 297 | pub fn set_max_readers(&mut self, max_readers: c_uint) -> &mut EnvironmentBuilder { 298 | self.max_readers = Some(max_readers); 299 | self 300 | } 301 | 302 | /// Sets the maximum number of named databases for the environment. 303 | /// 304 | /// This function is only needed if multiple databases will be used in the 305 | /// environment. Simpler applications that use the environment as a single 306 | /// unnamed database can ignore this option. 307 | /// 308 | /// Currently a moderate number of slots are cheap but a huge number gets 309 | /// expensive: 7-120 words per transaction, and every `Transaction::open_db` 310 | /// does a linear search of the opened slots. 311 | pub fn set_max_dbs(&mut self, max_readers: c_uint) -> &mut EnvironmentBuilder { 312 | self.max_dbs = Some(max_readers); 313 | self 314 | } 315 | 316 | /// Sets the size of the memory map to use for the environment. 317 | /// 318 | /// The size should be a multiple of the OS page size. The default is 319 | /// 10485760 bytes. The size of the memory map is also the maximum size 320 | /// of the database. The value should be chosen as large as possible, 321 | /// to accommodate future growth of the database. It may be increased at 322 | /// later times. 323 | /// 324 | /// Any attempt to set a size smaller than the space already consumed 325 | /// by the environment will be silently changed to the current size of the used space. 326 | pub fn set_map_size(&mut self, map_size: size_t) -> &mut EnvironmentBuilder { 327 | self.map_size = Some(map_size); 328 | self 329 | } 330 | } 331 | 332 | #[cfg(test)] 333 | mod test { 334 | 335 | extern crate byteorder; 336 | 337 | use tempdir::TempDir; 338 | use self::byteorder::{ByteOrder, LittleEndian}; 339 | 340 | use flags::*; 341 | 342 | use super::*; 343 | 344 | #[test] 345 | fn test_open() { 346 | let dir = TempDir::new("test").unwrap(); 347 | 348 | // opening non-existent env with read-only should fail 349 | assert!(Environment::new().set_flags(EnvironmentFlags::READ_ONLY) 350 | .open(dir.path()) 351 | .is_err()); 352 | 353 | // opening non-existent env should succeed 354 | assert!(Environment::new().open(dir.path()).is_ok()); 355 | 356 | // opening env with read-only should succeed 357 | assert!(Environment::new().set_flags(EnvironmentFlags::READ_ONLY) 358 | .open(dir.path()) 359 | .is_ok()); 360 | } 361 | 362 | #[test] 363 | fn test_begin_txn() { 364 | let dir = TempDir::new("test").unwrap(); 365 | 366 | { // writable environment 367 | let env = Environment::new().open(dir.path()).unwrap(); 368 | 369 | assert!(env.begin_rw_txn().is_ok()); 370 | assert!(env.begin_ro_txn().is_ok()); 371 | } 372 | 373 | { // read-only environment 374 | let env = Environment::new().set_flags(EnvironmentFlags::READ_ONLY) 375 | .open(dir.path()) 376 | .unwrap(); 377 | 378 | assert!(env.begin_rw_txn().is_err()); 379 | assert!(env.begin_ro_txn().is_ok()); 380 | } 381 | } 382 | 383 | #[test] 384 | fn test_open_db() { 385 | let dir = TempDir::new("test").unwrap(); 386 | let env = Environment::new().set_max_dbs(1) 387 | .open(dir.path()) 388 | .unwrap(); 389 | 390 | assert!(env.open_db(None).is_ok()); 391 | assert!(env.open_db(Some("testdb")).is_err()); 392 | } 393 | 394 | #[test] 395 | fn test_create_db() { 396 | let dir = TempDir::new("test").unwrap(); 397 | let env = Environment::new().set_max_dbs(11) 398 | .open(dir.path()) 399 | .unwrap(); 400 | assert!(env.open_db(Some("testdb")).is_err()); 401 | assert!(env.create_db(Some("testdb"), DatabaseFlags::empty()).is_ok()); 402 | assert!(env.open_db(Some("testdb")).is_ok()) 403 | } 404 | 405 | #[test] 406 | fn test_close_database() { 407 | let dir = TempDir::new("test").unwrap(); 408 | let mut env = Environment::new().set_max_dbs(10) 409 | .open(dir.path()) 410 | .unwrap(); 411 | 412 | let db = env.create_db(Some("db"), DatabaseFlags::empty()).unwrap(); 413 | unsafe { env.close_db(db); } 414 | assert!(env.open_db(Some("db")).is_ok()); 415 | } 416 | 417 | #[test] 418 | fn test_sync() { 419 | let dir = TempDir::new("test").unwrap(); 420 | { 421 | let env = Environment::new().open(dir.path()).unwrap(); 422 | assert!(env.sync(true).is_ok()); 423 | } { 424 | let env = Environment::new().set_flags(EnvironmentFlags::READ_ONLY) 425 | .open(dir.path()) 426 | .unwrap(); 427 | assert!(env.sync(true).is_err()); 428 | } 429 | } 430 | 431 | #[test] 432 | fn test_stat() { 433 | let dir = TempDir::new("test").unwrap(); 434 | let env = Environment::new().open(dir.path()).unwrap(); 435 | 436 | // Stats should be empty initially. 437 | let stat = env.stat().unwrap(); 438 | assert_eq!(stat.page_size(), 4096); 439 | assert_eq!(stat.depth(), 0); 440 | assert_eq!(stat.branch_pages(), 0); 441 | assert_eq!(stat.leaf_pages(), 0); 442 | assert_eq!(stat.overflow_pages(), 0); 443 | assert_eq!(stat.entries(), 0); 444 | 445 | let db = env.open_db(None).unwrap(); 446 | 447 | // Write a few small values. 448 | for i in 0..64 { 449 | let mut value = [0u8; 8]; 450 | LittleEndian::write_u64(&mut value, i); 451 | let mut tx = env.begin_rw_txn().expect("begin_rw_txn"); 452 | tx.put(db, &value, &value, WriteFlags::default()).expect("tx.put"); 453 | tx.commit().expect("tx.commit") 454 | } 455 | 456 | // Stats should now reflect inserted values. 457 | let stat = env.stat().unwrap(); 458 | assert_eq!(stat.page_size(), 4096); 459 | assert_eq!(stat.depth(), 1); 460 | assert_eq!(stat.branch_pages(), 0); 461 | assert_eq!(stat.leaf_pages(), 1); 462 | assert_eq!(stat.overflow_pages(), 0); 463 | assert_eq!(stat.entries(), 64); 464 | } 465 | } 466 | -------------------------------------------------------------------------------- /src/error.rs: -------------------------------------------------------------------------------- 1 | use libc::c_int; 2 | use std::error::Error as StdError; 3 | use std::ffi::CStr; 4 | use std::os::raw::c_char; 5 | use std::{fmt, result, str}; 6 | 7 | use ffi; 8 | 9 | /// An LMDB error kind. 10 | #[derive(Debug, Eq, PartialEq, Copy, Clone)] 11 | pub enum Error { 12 | /// key/data pair already exists. 13 | KeyExist, 14 | /// key/data pair not found (EOF). 15 | NotFound, 16 | /// Requested page not found - this usually indicates corruption. 17 | PageNotFound, 18 | /// Located page was wrong type. 19 | Corrupted, 20 | /// Update of meta page failed or environment had fatal error. 21 | Panic, 22 | /// Environment version mismatch. 23 | VersionMismatch, 24 | /// File is not a valid LMDB file. 25 | Invalid, 26 | /// Environment mapsize reached. 27 | MapFull, 28 | /// Environment maxdbs reached. 29 | DbsFull, 30 | /// Environment maxreaders reached. 31 | ReadersFull, 32 | /// Too many TLS keys in use - Windows only. 33 | TlsFull, 34 | /// Txn has too many dirty pages. 35 | TxnFull, 36 | /// Cursor stack too deep - internal error. 37 | CursorFull, 38 | /// Page has not enough space - internal error. 39 | PageFull, 40 | /// Database contents grew beyond environment mapsize. 41 | MapResized, 42 | /// MDB_Incompatible: Operation and DB incompatible, or DB flags changed. 43 | Incompatible, 44 | /// Invalid reuse of reader locktable slot. 45 | BadRslot, 46 | /// Transaction cannot recover - it must be aborted. 47 | BadTxn, 48 | /// Unsupported size of key/DB name/data, or wrong DUP_FIXED size. 49 | BadValSize, 50 | /// The specified DBI was changed unexpectedly. 51 | BadDbi, 52 | /// Other error. 53 | Other(c_int), 54 | } 55 | 56 | impl Error { 57 | 58 | /// Converts a raw error code to an `Error`. 59 | pub fn from_err_code(err_code: c_int) -> Error { 60 | match err_code { 61 | ffi::MDB_KEYEXIST => Error::KeyExist, 62 | ffi::MDB_NOTFOUND => Error::NotFound, 63 | ffi::MDB_PAGE_NOTFOUND => Error::PageNotFound, 64 | ffi::MDB_CORRUPTED => Error::Corrupted, 65 | ffi::MDB_PANIC => Error::Panic, 66 | ffi::MDB_VERSION_MISMATCH => Error::VersionMismatch, 67 | ffi::MDB_INVALID => Error::Invalid, 68 | ffi::MDB_MAP_FULL => Error::MapFull, 69 | ffi::MDB_DBS_FULL => Error::DbsFull, 70 | ffi::MDB_READERS_FULL => Error::ReadersFull, 71 | ffi::MDB_TLS_FULL => Error::TlsFull, 72 | ffi::MDB_TXN_FULL => Error::TxnFull, 73 | ffi::MDB_CURSOR_FULL => Error::CursorFull, 74 | ffi::MDB_PAGE_FULL => Error::PageFull, 75 | ffi::MDB_MAP_RESIZED => Error::MapResized, 76 | ffi::MDB_INCOMPATIBLE => Error::Incompatible, 77 | ffi::MDB_BAD_RSLOT => Error::BadRslot, 78 | ffi::MDB_BAD_TXN => Error::BadTxn, 79 | ffi::MDB_BAD_VALSIZE => Error::BadValSize, 80 | ffi::MDB_BAD_DBI => Error::BadDbi, 81 | other => Error::Other(other), 82 | } 83 | } 84 | 85 | /// Converts an `Error` to the raw error code. 86 | pub fn to_err_code(&self) -> c_int { 87 | match *self { 88 | Error::KeyExist => ffi::MDB_KEYEXIST, 89 | Error::NotFound => ffi::MDB_NOTFOUND, 90 | Error::PageNotFound => ffi::MDB_PAGE_NOTFOUND, 91 | Error::Corrupted => ffi::MDB_CORRUPTED, 92 | Error::Panic => ffi::MDB_PANIC, 93 | Error::VersionMismatch => ffi::MDB_VERSION_MISMATCH, 94 | Error::Invalid => ffi::MDB_INVALID, 95 | Error::MapFull => ffi::MDB_MAP_FULL, 96 | Error::DbsFull => ffi::MDB_DBS_FULL, 97 | Error::ReadersFull => ffi::MDB_READERS_FULL, 98 | Error::TlsFull => ffi::MDB_TLS_FULL, 99 | Error::TxnFull => ffi::MDB_TXN_FULL, 100 | Error::CursorFull => ffi::MDB_CURSOR_FULL, 101 | Error::PageFull => ffi::MDB_PAGE_FULL, 102 | Error::MapResized => ffi::MDB_MAP_RESIZED, 103 | Error::Incompatible => ffi::MDB_INCOMPATIBLE, 104 | Error::BadRslot => ffi::MDB_BAD_RSLOT, 105 | Error::BadTxn => ffi::MDB_BAD_TXN, 106 | Error::BadValSize => ffi::MDB_BAD_VALSIZE, 107 | Error::BadDbi => ffi::MDB_BAD_DBI, 108 | Error::Other(err_code) => err_code, 109 | } 110 | } 111 | } 112 | 113 | impl fmt::Display for Error { 114 | fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { 115 | write!(fmt, "{}", self.description()) 116 | } 117 | } 118 | 119 | impl StdError for Error { 120 | fn description(&self) -> &str { 121 | unsafe { 122 | // This is safe since the error messages returned from mdb_strerror are static. 123 | let err: *const c_char = ffi::mdb_strerror(self.to_err_code()) as *const c_char; 124 | str::from_utf8_unchecked(CStr::from_ptr(err).to_bytes()) 125 | } 126 | } 127 | } 128 | 129 | /// An LMDB result. 130 | pub type Result = result::Result; 131 | 132 | pub fn lmdb_result(err_code: c_int) -> Result<()> { 133 | if err_code == ffi::MDB_SUCCESS { 134 | Ok(()) 135 | } else { 136 | Err(Error::from_err_code(err_code)) 137 | } 138 | } 139 | 140 | #[cfg(test)] 141 | mod test { 142 | 143 | use std::error::Error as StdError; 144 | 145 | use super::*; 146 | 147 | #[test] 148 | fn test_description() { 149 | assert_eq!("Permission denied", 150 | Error::from_err_code(13).description()); 151 | assert_eq!("MDB_NOTFOUND: No matching key/data pair found", 152 | Error::NotFound.description()); 153 | } 154 | } 155 | -------------------------------------------------------------------------------- /src/flags.rs: -------------------------------------------------------------------------------- 1 | use libc::c_uint; 2 | 3 | use ffi::*; 4 | 5 | bitflags! { 6 | #[doc="Environment options."] 7 | #[derive(Default)] 8 | pub struct EnvironmentFlags: c_uint { 9 | 10 | #[doc="Use a fixed address for the mmap region. This flag must be specified"] 11 | #[doc="when creating the environment, and is stored persistently in the environment."] 12 | #[doc="If successful, the memory map will always reside at the same virtual address"] 13 | #[doc="and pointers used to reference data items in the database will be constant"] 14 | #[doc="across multiple invocations. This option may not always work, depending on"] 15 | #[doc="how the operating system has allocated memory to shared libraries and other uses."] 16 | #[doc="The feature is highly experimental."] 17 | const FIXED_MAP = MDB_FIXEDMAP; 18 | 19 | #[doc="By default, LMDB creates its environment in a directory whose pathname is given in"] 20 | #[doc="`path`, and creates its data and lock files under that directory. With this option,"] 21 | #[doc="`path` is used as-is for the database main data file. The database lock file is the"] 22 | #[doc="`path` with `-lock` appended."] 23 | const NO_SUB_DIR = MDB_NOSUBDIR; 24 | 25 | #[doc="Use a writeable memory map unless `READ_ONLY` is set. This is faster and uses"] 26 | #[doc="fewer mallocs, but loses protection from application bugs like wild pointer writes"] 27 | #[doc="and other bad updates into the database. Incompatible with nested transactions."] 28 | #[doc="Processes with and without `WRITE_MAP` on the same environment do not cooperate"] 29 | #[doc="well."] 30 | const WRITE_MAP = MDB_WRITEMAP; 31 | 32 | #[doc="Open the environment in read-only mode. No write operations will be allowed."] 33 | #[doc="When opening an environment, LMDB will still modify the lock file - except on"] 34 | #[doc="read-only filesystems, where LMDB does not use locks."] 35 | const READ_ONLY = MDB_RDONLY; 36 | 37 | #[doc="Flush system buffers to disk only once per transaction, omit the metadata flush."] 38 | #[doc="Defer that until the system flushes files to disk, or next non-`READ_ONLY` commit"] 39 | #[doc="or `Environment::sync`. This optimization maintains database integrity, but a"] 40 | #[doc="system crash may undo the last committed transaction. I.e. it preserves the ACI"] 41 | #[doc="(atomicity, consistency, isolation) but not D (durability) database property."] 42 | #[doc="\n\nThis flag may be changed at any time using `Environment::set_flags`."] 43 | const NO_META_SYNC = MDB_NOMETASYNC; 44 | 45 | #[doc="Don't flush system buffers to disk when committing a transaction. This optimization"] 46 | #[doc="means a system crash can corrupt the database or lose the last transactions if"] 47 | #[doc="buffers are not yet flushed to disk. The risk is governed by how often the system"] 48 | #[doc="flushes dirty buffers to disk and how often `Environment::sync` is called. However,"] 49 | #[doc="if the filesystem preserves write order and the `WRITE_MAP` flag is not used,"] 50 | #[doc="transactions exhibit ACI (atomicity, consistency, isolation) properties and only"] 51 | #[doc="lose D (durability). I.e. database integrity is maintained, but a system"] 52 | #[doc="crash may undo the final transactions. Note that (`NO_SYNC | WRITE_MAP`) leaves the"] 53 | #[doc="system with no hint for when to write transactions to disk, unless"] 54 | #[doc="`Environment::sync` is called. (`MAP_ASYNC | WRITE_MAP`) may be preferable."] 55 | #[doc="\n\nThis flag may be changed at any time using `Environment::set_flags`."] 56 | const NO_SYNC = MDB_NOSYNC; 57 | 58 | #[doc="When using `WRITE_MAP`, use asynchronous flushes to disk. As with `NO_SYNC`, a"] 59 | #[doc="system crash can then corrupt the database or lose the last transactions. Calling"] 60 | #[doc="`Environment::sync` ensures on-disk database integrity until next commit."] 61 | #[doc="\n\nThis flag may be changed at any time using `Environment::set_flags`."] 62 | const MAP_ASYNC = MDB_MAPASYNC; 63 | 64 | #[doc="Don't use thread-local storage. Tie reader locktable slots to transaction objects"] 65 | #[doc="instead of to threads. I.e. `RoTransaction::reset` keeps the slot reserved for the"] 66 | #[doc="transaction object. A thread may use parallel read-only transactions. A read-only"] 67 | #[doc="transaction may span threads if the user synchronizes its use. Applications that"] 68 | #[doc="multiplex many the user synchronizes its use. Applications that multiplex many user"] 69 | #[doc="threads over individual OS threads need this option. Such an application must also"] 70 | #[doc="serialize the write transactions in an OS thread, since LMDB's write locking is"] 71 | #[doc="unaware of the user threads."] 72 | const NO_TLS = MDB_NOTLS; 73 | 74 | #[doc="Do not do any locking. If concurrent access is anticipated, the caller must manage"] 75 | #[doc="all concurrency themself. For proper operation the caller must enforce"] 76 | #[doc="single-writer semantics, and must ensure that no readers are using old"] 77 | #[doc="transactions while a writer is active. The simplest approach is to use an exclusive"] 78 | #[doc="lock so that no readers may be active at all when a writer begins."] 79 | const NO_LOCK = MDB_NOLOCK; 80 | 81 | #[doc="Turn off readahead. Most operating systems perform readahead on read requests by"] 82 | #[doc="default. This option turns it off if the OS supports it. Turning it off may help"] 83 | #[doc="random read performance when the DB is larger than RAM and system RAM is full."] 84 | #[doc="The option is not implemented on Windows."] 85 | const NO_READAHEAD = MDB_NORDAHEAD; 86 | 87 | #[doc="Do not initialize malloc'd memory before writing to unused spaces in the data file."] 88 | #[doc="By default, memory for pages written to the data file is obtained using malloc."] 89 | #[doc="While these pages may be reused in subsequent transactions, freshly malloc'd pages"] 90 | #[doc="will be initialized to zeroes before use. This avoids persisting leftover data from"] 91 | #[doc="other code (that used the heap and subsequently freed the memory) into the data"] 92 | #[doc="file. Note that many other system libraries may allocate and free memory from the"] 93 | #[doc="heap for arbitrary uses. E.g., stdio may use the heap for file I/O buffers. This"] 94 | #[doc="initialization step has a modest performance cost so some applications may want to"] 95 | #[doc="disable it using this flag. This option can be a problem for applications which"] 96 | #[doc="handle sensitive data like passwords, and it makes memory checkers like Valgrind"] 97 | #[doc="noisy. This flag is not needed with `WRITE_MAP`, which writes directly to the mmap"] 98 | #[doc="instead of using malloc for pages. The initialization is also skipped if writing"] 99 | #[doc="with reserve; the caller is expected to overwrite all of the memory that was"] 100 | #[doc="reserved in that case."] 101 | #[doc="\n\nThis flag may be changed at any time using `Environment::set_flags`."] 102 | const NO_MEM_INIT = MDB_NOMEMINIT; 103 | } 104 | } 105 | 106 | bitflags! { 107 | #[doc="Database options."] 108 | #[derive(Default)] 109 | pub struct DatabaseFlags: c_uint { 110 | 111 | #[doc="Keys are strings to be compared in reverse order, from the end of the strings"] 112 | #[doc="to the beginning. By default, Keys are treated as strings and compared from"] 113 | #[doc="beginning to end."] 114 | const REVERSE_KEY = MDB_REVERSEKEY; 115 | 116 | #[doc="Duplicate keys may be used in the database. (Or, from another perspective,"] 117 | #[doc="keys may have multiple data items, stored in sorted order.) By default"] 118 | #[doc="keys must be unique and may have only a single data item."] 119 | const DUP_SORT = MDB_DUPSORT; 120 | 121 | #[doc="Keys are binary integers in native byte order. Setting this option requires all"] 122 | #[doc="keys to be the same size, typically 32 or 64 bits."] 123 | const INTEGER_KEY = MDB_INTEGERKEY; 124 | 125 | #[doc="This flag may only be used in combination with `DUP_SORT`. This option tells"] 126 | #[doc="the library that the data items for this database are all the same size, which"] 127 | #[doc="allows further optimizations in storage and retrieval. When all data items are"] 128 | #[doc="the same size, the `GET_MULTIPLE` and `NEXT_MULTIPLE` cursor operations may be"] 129 | #[doc="used to retrieve multiple items at once."] 130 | const DUP_FIXED = MDB_DUPFIXED; 131 | 132 | #[doc="This option specifies that duplicate data items are also integers, and"] 133 | #[doc="should be sorted as such."] 134 | const INTEGER_DUP = MDB_INTEGERDUP; 135 | 136 | #[doc="This option specifies that duplicate data items should be compared as strings"] 137 | #[doc="in reverse order."] 138 | const REVERSE_DUP = MDB_REVERSEDUP; 139 | } 140 | } 141 | 142 | bitflags! { 143 | #[doc="Write options."] 144 | #[derive(Default)] 145 | pub struct WriteFlags: c_uint { 146 | 147 | #[doc="Insert the new item only if the key does not already appear in the database."] 148 | #[doc="The function will return `LmdbError::KeyExist` if the key already appears in the"] 149 | #[doc="database, even if the database supports duplicates (`DUP_SORT`)."] 150 | const NO_OVERWRITE = MDB_NOOVERWRITE; 151 | 152 | #[doc="Insert the new item only if it does not already appear in the database."] 153 | #[doc="This flag may only be specified if the database was opened with `DUP_SORT`."] 154 | #[doc="The function will return `LmdbError::KeyExist` if the item already appears in the"] 155 | #[doc="database."] 156 | const NO_DUP_DATA = MDB_NODUPDATA; 157 | 158 | #[doc="For `Cursor::put`. Replace the item at the current cursor position. The key"] 159 | #[doc="parameter must match the current position. If using sorted duplicates (`DUP_SORT`)"] 160 | #[doc="the data item must still sort into the same position. This is intended to be used"] 161 | #[doc="when the new data is the same size as the old. Otherwise it will simply perform a"] 162 | #[doc="delete of the old record followed by an insert."] 163 | const CURRENT = MDB_CURRENT; 164 | 165 | #[doc="Append the given item to the end of the database. No key comparisons are performed."] 166 | #[doc="This option allows fast bulk loading when keys are already known to be in the"] 167 | #[doc="correct order. Loading unsorted keys with this flag will cause data corruption."] 168 | const APPEND = MDB_APPEND; 169 | 170 | #[doc="Same as `APPEND`, but for sorted dup data."] 171 | const APPEND_DUP = MDB_APPENDDUP; 172 | } 173 | } 174 | -------------------------------------------------------------------------------- /src/lib.rs: -------------------------------------------------------------------------------- 1 | //! Idiomatic and safe APIs for interacting with the 2 | //! [Lightning Memory-mapped Database (LMDB)](https://symas.com/lmdb). 3 | 4 | #![cfg_attr(test, feature(test))] 5 | #![deny(missing_docs)] 6 | #![doc(html_root_url = "https://docs.rs/lmdb/0.8.0")] 7 | 8 | extern crate libc; 9 | extern crate lmdb_sys as ffi; 10 | 11 | #[cfg(test)] extern crate rand; 12 | #[cfg(test)] extern crate tempdir; 13 | #[cfg(test)] extern crate test; 14 | #[macro_use] extern crate bitflags; 15 | 16 | pub use cursor::{ 17 | Cursor, 18 | RoCursor, 19 | RwCursor, 20 | Iter, 21 | IterDup, 22 | }; 23 | pub use database::Database; 24 | pub use environment::{Environment, Stat, EnvironmentBuilder}; 25 | pub use error::{Error, Result}; 26 | pub use flags::*; 27 | pub use transaction::{ 28 | InactiveTransaction, 29 | RoTransaction, 30 | RwTransaction, 31 | Transaction, 32 | }; 33 | 34 | macro_rules! lmdb_try { 35 | ($expr:expr) => ({ 36 | match $expr { 37 | ::ffi::MDB_SUCCESS => (), 38 | err_code => return Err(::Error::from_err_code(err_code)), 39 | } 40 | }) 41 | } 42 | 43 | macro_rules! lmdb_try_with_cleanup { 44 | ($expr:expr, $cleanup:expr) => ({ 45 | match $expr { 46 | ::ffi::MDB_SUCCESS => (), 47 | err_code => { 48 | let _ = $cleanup; 49 | return Err(::Error::from_err_code(err_code)) 50 | }, 51 | } 52 | }) 53 | } 54 | 55 | mod flags; 56 | mod cursor; 57 | mod database; 58 | mod environment; 59 | mod error; 60 | mod transaction; 61 | 62 | #[cfg(test)] 63 | mod test_utils { 64 | 65 | extern crate byteorder; 66 | 67 | use self::byteorder::{ByteOrder, LittleEndian}; 68 | use tempdir::TempDir; 69 | 70 | use super::*; 71 | 72 | pub fn get_key(n: u32) -> String { 73 | format!("key{}", n) 74 | } 75 | 76 | pub fn get_data(n: u32) -> String { 77 | format!("data{}", n) 78 | } 79 | 80 | pub fn setup_bench_db<'a>(num_rows: u32) -> (TempDir, Environment) { 81 | let dir = TempDir::new("test").unwrap(); 82 | let env = Environment::new().open(dir.path()).unwrap(); 83 | 84 | { 85 | let db = env.open_db(None).unwrap(); 86 | let mut txn = env.begin_rw_txn().unwrap(); 87 | for i in 0..num_rows { 88 | txn.put(db, &get_key(i), &get_data(i), WriteFlags::empty()).unwrap(); 89 | } 90 | txn.commit().unwrap(); 91 | } 92 | (dir, env) 93 | } 94 | 95 | /// Regression test for https://github.com/danburkert/lmdb-rs/issues/21. 96 | /// This test reliably segfaults when run against lmbdb compiled with opt level -O3 and newer 97 | /// GCC compilers. 98 | #[test] 99 | fn issue_21_regression() { 100 | const HEIGHT_KEY: [u8; 1] = [0]; 101 | 102 | let dir = TempDir::new("test").unwrap(); 103 | 104 | let env = { 105 | let mut builder = Environment::new(); 106 | builder.set_max_dbs(2); 107 | builder.set_map_size(1_000_000); 108 | builder.open(dir.path()).expect("open lmdb env") 109 | }; 110 | let index = env.create_db(None, DatabaseFlags::DUP_SORT).expect("open index db"); 111 | 112 | for height in 0..1000 { 113 | let mut value = [0u8; 8]; 114 | LittleEndian::write_u64(&mut value, height); 115 | let mut tx = env.begin_rw_txn().expect("begin_rw_txn"); 116 | tx.put(index, 117 | &HEIGHT_KEY, 118 | &value, 119 | WriteFlags::empty()).expect("tx.put"); 120 | tx.commit().expect("tx.commit") 121 | } 122 | } 123 | } 124 | -------------------------------------------------------------------------------- /src/transaction.rs: -------------------------------------------------------------------------------- 1 | use libc::{c_uint, c_void, size_t}; 2 | use std::{fmt, mem, ptr, result, slice}; 3 | use std::marker::PhantomData ; 4 | 5 | use ffi; 6 | 7 | use cursor::{RoCursor, RwCursor}; 8 | use environment::Environment; 9 | use database::Database; 10 | use error::{Error, Result, lmdb_result}; 11 | use flags::{DatabaseFlags, EnvironmentFlags, WriteFlags}; 12 | 13 | /// An LMDB transaction. 14 | /// 15 | /// All database operations require a transaction. 16 | pub trait Transaction : Sized { 17 | 18 | /// Returns a raw pointer to the underlying LMDB transaction. 19 | /// 20 | /// The caller **must** ensure that the pointer is not used after the 21 | /// lifetime of the transaction. 22 | fn txn(&self) -> *mut ffi::MDB_txn; 23 | 24 | /// Commits the transaction. 25 | /// 26 | /// Any pending operations will be saved. 27 | fn commit(self) -> Result<()> { 28 | unsafe { 29 | let result = lmdb_result(ffi::mdb_txn_commit(self.txn())); 30 | mem::forget(self); 31 | result 32 | } 33 | } 34 | 35 | /// Aborts the transaction. 36 | /// 37 | /// Any pending operations will not be saved. 38 | fn abort(self) { 39 | // Abort should be performed in transaction destructors. 40 | } 41 | 42 | /// Opens a database in the transaction. 43 | /// 44 | /// If `name` is `None`, then the default database will be opened, otherwise 45 | /// a named database will be opened. The database handle will be private to 46 | /// the transaction until the transaction is successfully committed. If the 47 | /// transaction is aborted the returned database handle should no longer be 48 | /// used. 49 | /// 50 | /// Prefer using `Environment::open_db`. 51 | /// 52 | /// ## Safety 53 | /// 54 | /// This function (as well as `Environment::open_db`, 55 | /// `Environment::create_db`, and `Database::create`) **must not** be called 56 | /// from multiple concurrent transactions in the same environment. A 57 | /// transaction which uses this function must finish (either commit or 58 | /// abort) before any other transaction may use this function. 59 | unsafe fn open_db(&self, name: Option<&str>) -> Result { 60 | Database::new(self.txn(), name, 0) 61 | } 62 | 63 | /// Gets an item from a database. 64 | /// 65 | /// This function retrieves the data associated with the given key in the 66 | /// database. If the database supports duplicate keys 67 | /// (`DatabaseFlags::DUP_SORT`) then the first data item for the key will be 68 | /// returned. Retrieval of other items requires the use of 69 | /// `Transaction::cursor_get`. If the item is not in the database, then 70 | /// `Error::NotFound` will be returned. 71 | fn get<'txn, K>(&'txn self, 72 | database: Database, 73 | key: &K) 74 | -> Result<&'txn [u8]> 75 | where K: AsRef<[u8]> { 76 | let key = key.as_ref(); 77 | let mut key_val: ffi::MDB_val = ffi::MDB_val { mv_size: key.len() as size_t, 78 | mv_data: key.as_ptr() as *mut c_void }; 79 | let mut data_val: ffi::MDB_val = ffi::MDB_val { mv_size: 0, 80 | mv_data: ptr::null_mut() }; 81 | unsafe { 82 | match ffi::mdb_get(self.txn(), database.dbi(), &mut key_val, &mut data_val) { 83 | ffi::MDB_SUCCESS => { 84 | Ok(slice::from_raw_parts(data_val.mv_data as *const u8, 85 | data_val.mv_size as usize)) 86 | }, 87 | err_code => Err(Error::from_err_code(err_code)), 88 | } 89 | } 90 | } 91 | 92 | /// Open a new read-only cursor on the given database. 93 | fn open_ro_cursor<'txn>(&'txn self, db: Database) -> Result> { 94 | RoCursor::new(self, db) 95 | } 96 | 97 | /// Gets the option flags for the given database in the transaction. 98 | fn db_flags(&self, db: Database) -> Result { 99 | let mut flags: c_uint = 0; 100 | unsafe { 101 | lmdb_result(ffi::mdb_dbi_flags(self.txn(), db.dbi(), &mut flags))?; 102 | } 103 | Ok(DatabaseFlags::from_bits_truncate(flags)) 104 | } 105 | } 106 | 107 | /// An LMDB read-only transaction. 108 | pub struct RoTransaction<'env> { 109 | txn: *mut ffi::MDB_txn, 110 | _marker: PhantomData<&'env ()>, 111 | } 112 | 113 | impl <'env> fmt::Debug for RoTransaction<'env> { 114 | fn fmt(&self, f: &mut fmt::Formatter) -> result::Result<(), fmt::Error> { 115 | f.debug_struct("RoTransaction").finish() 116 | } 117 | } 118 | 119 | impl <'env> Drop for RoTransaction<'env> { 120 | fn drop(&mut self) { 121 | unsafe { ffi::mdb_txn_abort(self.txn) } 122 | } 123 | } 124 | 125 | impl <'env> RoTransaction<'env> { 126 | 127 | /// Creates a new read-only transaction in the given environment. Prefer 128 | /// using `Environment::begin_ro_txn`. 129 | pub(crate) fn new(env: &'env Environment) -> Result> { 130 | let mut txn: *mut ffi::MDB_txn = ptr::null_mut(); 131 | unsafe { 132 | lmdb_result(ffi::mdb_txn_begin(env.env(), ptr::null_mut(), ffi::MDB_RDONLY, &mut txn))?; 133 | Ok(RoTransaction { txn: txn, _marker: PhantomData }) 134 | } 135 | } 136 | 137 | /// Resets the read-only transaction. 138 | /// 139 | /// Abort the transaction like `Transaction::abort`, but keep the 140 | /// transaction handle. `InactiveTransaction::renew` may reuse the handle. 141 | /// This saves allocation overhead if the process will start a new read-only 142 | /// transaction soon, and also locking overhead if 143 | /// `EnvironmentFlags::NO_TLS` is in use. The reader table lock is released, 144 | /// but the table slot stays tied to its thread or transaction. Reader locks 145 | /// generally don't interfere with writers, but they keep old versions of 146 | /// database pages allocated. Thus they prevent the old pages from being 147 | /// reused when writers commit new data, and so under heavy load the 148 | /// database size may grow much more rapidly than otherwise. 149 | pub fn reset(self) -> InactiveTransaction<'env> { 150 | let txn = self.txn; 151 | unsafe { 152 | mem::forget(self); 153 | ffi::mdb_txn_reset(txn) 154 | }; 155 | InactiveTransaction { txn: txn, _marker: PhantomData } 156 | } 157 | } 158 | 159 | impl <'env> Transaction for RoTransaction<'env> { 160 | fn txn(&self) -> *mut ffi::MDB_txn { 161 | self.txn 162 | } 163 | } 164 | 165 | /// An inactive read-only transaction. 166 | pub struct InactiveTransaction<'env> { 167 | txn: *mut ffi::MDB_txn, 168 | _marker: PhantomData<&'env ()>, 169 | } 170 | 171 | impl <'env> fmt::Debug for InactiveTransaction<'env> { 172 | fn fmt(&self, f: &mut fmt::Formatter) -> result::Result<(), fmt::Error> { 173 | f.debug_struct("InactiveTransaction").finish() 174 | } 175 | } 176 | 177 | impl <'env> Drop for InactiveTransaction<'env> { 178 | fn drop(&mut self) { 179 | unsafe { ffi::mdb_txn_abort(self.txn) } 180 | } 181 | } 182 | 183 | impl <'env> InactiveTransaction<'env> { 184 | 185 | /// Renews the inactive transaction, returning an active read-only 186 | /// transaction. 187 | /// 188 | /// This acquires a new reader lock for a transaction handle that had been 189 | /// released by `RoTransaction::reset`. 190 | pub fn renew(self) -> Result> { 191 | let txn = self.txn; 192 | unsafe { 193 | mem::forget(self); 194 | lmdb_result(ffi::mdb_txn_renew(txn))? 195 | }; 196 | Ok(RoTransaction { txn: txn, _marker: PhantomData }) 197 | } 198 | } 199 | 200 | /// An LMDB read-write transaction. 201 | pub struct RwTransaction<'env> { 202 | txn: *mut ffi::MDB_txn, 203 | _marker: PhantomData<&'env ()>, 204 | } 205 | 206 | impl <'env> fmt::Debug for RwTransaction<'env> { 207 | fn fmt(&self, f: &mut fmt::Formatter) -> result::Result<(), fmt::Error> { 208 | f.debug_struct("RwTransaction").finish() 209 | } 210 | } 211 | 212 | impl <'env> Drop for RwTransaction<'env> { 213 | fn drop(&mut self) { 214 | unsafe { ffi::mdb_txn_abort(self.txn) } 215 | } 216 | } 217 | 218 | impl <'env> RwTransaction<'env> { 219 | 220 | /// Creates a new read-write transaction in the given environment. Prefer 221 | /// using `Environment::begin_ro_txn`. 222 | pub(crate) fn new(env: &'env Environment) -> Result> { 223 | let mut txn: *mut ffi::MDB_txn = ptr::null_mut(); 224 | unsafe { 225 | lmdb_result(ffi::mdb_txn_begin(env.env(), 226 | ptr::null_mut(), 227 | EnvironmentFlags::empty().bits(), 228 | &mut txn))?; 229 | Ok(RwTransaction { txn: txn, _marker: PhantomData }) 230 | } 231 | } 232 | 233 | /// Opens a database in the provided transaction, creating it if necessary. 234 | /// 235 | /// If `name` is `None`, then the default database will be opened, otherwise 236 | /// a named database will be opened. The database handle will be private to 237 | /// the transaction until the transaction is successfully committed. If the 238 | /// transaction is aborted the returned database handle should no longer be 239 | /// used. 240 | /// 241 | /// Prefer using `Environment::create_db`. 242 | /// 243 | /// ## Safety 244 | /// 245 | /// This function (as well as `Environment::open_db`, 246 | /// `Environment::create_db`, and `Database::open`) **must not** be called 247 | /// from multiple concurrent transactions in the same environment. A 248 | /// transaction which uses this function must finish (either commit or 249 | /// abort) before any other transaction may use this function. 250 | pub unsafe fn create_db(&self, name: Option<&str>, flags: DatabaseFlags) -> Result { 251 | Database::new(self.txn(), name, flags.bits() | ffi::MDB_CREATE) 252 | } 253 | 254 | /// Opens a new read-write cursor on the given database and transaction. 255 | pub fn open_rw_cursor<'txn>(&'txn mut self, db: Database) -> Result> { 256 | RwCursor::new(self, db) 257 | } 258 | 259 | /// Stores an item into a database. 260 | /// 261 | /// This function stores key/data pairs in the database. The default 262 | /// behavior is to enter the new key/data pair, replacing any previously 263 | /// existing key if duplicates are disallowed, or adding a duplicate data 264 | /// item if duplicates are allowed (`DatabaseFlags::DUP_SORT`). 265 | pub fn put(&mut self, 266 | database: Database, 267 | key: &K, 268 | data: &D, 269 | flags: WriteFlags) 270 | -> Result<()> 271 | where K: AsRef<[u8]>, D: AsRef<[u8]> { 272 | let key = key.as_ref(); 273 | let data = data.as_ref(); 274 | let mut key_val: ffi::MDB_val = ffi::MDB_val { mv_size: key.len() as size_t, 275 | mv_data: key.as_ptr() as *mut c_void }; 276 | let mut data_val: ffi::MDB_val = ffi::MDB_val { mv_size: data.len() as size_t, 277 | mv_data: data.as_ptr() as *mut c_void }; 278 | unsafe { 279 | lmdb_result(ffi::mdb_put(self.txn(), 280 | database.dbi(), 281 | &mut key_val, 282 | &mut data_val, 283 | flags.bits())) 284 | } 285 | } 286 | 287 | /// Returns a buffer which can be used to write a value into the item at the 288 | /// given key and with the given length. The buffer must be completely 289 | /// filled by the caller. 290 | pub fn reserve<'txn, K>(&'txn mut self, 291 | database: Database, 292 | key: &K, 293 | len: size_t, 294 | flags: WriteFlags) 295 | -> Result<&'txn mut [u8]> 296 | where K: AsRef<[u8]> { 297 | let key = key.as_ref(); 298 | let mut key_val: ffi::MDB_val = ffi::MDB_val { mv_size: key.len() as size_t, 299 | mv_data: key.as_ptr() as *mut c_void }; 300 | let mut data_val: ffi::MDB_val = ffi::MDB_val { mv_size: len, 301 | mv_data: ptr::null_mut::() }; 302 | unsafe { 303 | lmdb_result(ffi::mdb_put(self.txn(), 304 | database.dbi(), 305 | &mut key_val, 306 | &mut data_val, 307 | flags.bits() | ffi::MDB_RESERVE))?; 308 | Ok(slice::from_raw_parts_mut(data_val.mv_data as *mut u8, 309 | data_val.mv_size as usize)) 310 | } 311 | } 312 | 313 | /// Deletes an item from a database. 314 | /// 315 | /// This function removes key/data pairs from the database. If the database 316 | /// does not support sorted duplicate data items (`DatabaseFlags::DUP_SORT`) 317 | /// the data parameter is ignored. If the database supports sorted 318 | /// duplicates and the data parameter is `None`, all of the duplicate data 319 | /// items for the key will be deleted. Otherwise, if the data parameter is 320 | /// `Some` only the matching data item will be deleted. This function will 321 | /// return `Error::NotFound` if the specified key/data pair is not in the 322 | /// database. 323 | pub fn del(&mut self, 324 | database: Database, 325 | key: &K, 326 | data: Option<&[u8]>) 327 | -> Result<()> 328 | where K: AsRef<[u8]> { 329 | let key = key.as_ref(); 330 | let mut key_val: ffi::MDB_val = ffi::MDB_val { mv_size: key.len() as size_t, 331 | mv_data: key.as_ptr() as *mut c_void }; 332 | let data_val: Option = 333 | data.map(|data| ffi::MDB_val { mv_size: data.len() as size_t, 334 | mv_data: data.as_ptr() as *mut c_void }); 335 | unsafe { 336 | lmdb_result(ffi::mdb_del(self.txn(), 337 | database.dbi(), 338 | &mut key_val, 339 | data_val.map(|mut data_val| &mut data_val as *mut _) 340 | .unwrap_or(ptr::null_mut()))) 341 | } 342 | } 343 | 344 | /// Empties the given database. All items will be removed. 345 | pub fn clear_db(&mut self, db: Database) -> Result<()> { 346 | unsafe { lmdb_result(ffi::mdb_drop(self.txn(), db.dbi(), 0)) } 347 | } 348 | 349 | /// Drops the database from the environment. 350 | /// 351 | /// ## Safety 352 | /// 353 | /// This method is unsafe in the same ways as `Environment::close_db`, and 354 | /// should be used accordingly. 355 | pub unsafe fn drop_db(&mut self, db: Database) -> Result<()> { 356 | lmdb_result(ffi::mdb_drop(self.txn, db.dbi(), 1)) 357 | } 358 | 359 | /// Begins a new nested transaction inside of this transaction. 360 | pub fn begin_nested_txn<'txn>(&'txn mut self) -> Result> { 361 | let mut nested: *mut ffi::MDB_txn = ptr::null_mut(); 362 | unsafe { 363 | let env: *mut ffi::MDB_env = ffi::mdb_txn_env(self.txn()); 364 | ffi::mdb_txn_begin(env, self.txn(), 0, &mut nested); 365 | } 366 | Ok(RwTransaction { txn: nested, _marker: PhantomData }) 367 | } 368 | } 369 | 370 | impl <'env> Transaction for RwTransaction<'env> { 371 | fn txn(&self) -> *mut ffi::MDB_txn { 372 | self.txn 373 | } 374 | } 375 | 376 | #[cfg(test)] 377 | mod test { 378 | 379 | use libc::size_t; 380 | use rand::{Rng, XorShiftRng}; 381 | use std::io::Write; 382 | use std::ptr; 383 | use std::sync::{Arc, Barrier}; 384 | use std::thread::{self, JoinHandle}; 385 | use test::{Bencher, black_box}; 386 | 387 | use tempdir::TempDir; 388 | 389 | use environment::*; 390 | use error::*; 391 | use ffi::*; 392 | use flags::*; 393 | use super::*; 394 | use test_utils::*; 395 | 396 | #[test] 397 | fn test_put_get_del() { 398 | let dir = TempDir::new("test").unwrap(); 399 | let env = Environment::new().open(dir.path()).unwrap(); 400 | let db = env.open_db(None).unwrap(); 401 | 402 | let mut txn = env.begin_rw_txn().unwrap(); 403 | txn.put(db, b"key1", b"val1", WriteFlags::empty()).unwrap(); 404 | txn.put(db, b"key2", b"val2", WriteFlags::empty()).unwrap(); 405 | txn.put(db, b"key3", b"val3", WriteFlags::empty()).unwrap(); 406 | txn.commit().unwrap(); 407 | 408 | let mut txn = env.begin_rw_txn().unwrap(); 409 | assert_eq!(b"val1", txn.get(db, b"key1").unwrap()); 410 | assert_eq!(b"val2", txn.get(db, b"key2").unwrap()); 411 | assert_eq!(b"val3", txn.get(db, b"key3").unwrap()); 412 | assert_eq!(txn.get(db, b"key"), Err(Error::NotFound)); 413 | 414 | txn.del(db, b"key1", None).unwrap(); 415 | assert_eq!(txn.get(db, b"key1"), Err(Error::NotFound)); 416 | } 417 | 418 | #[test] 419 | fn test_reserve() { 420 | let dir = TempDir::new("test").unwrap(); 421 | let env = Environment::new().open(dir.path()).unwrap(); 422 | let db = env.open_db(None).unwrap(); 423 | 424 | let mut txn = env.begin_rw_txn().unwrap(); 425 | { 426 | let mut writer = txn.reserve(db, b"key1", 4, WriteFlags::empty()).unwrap(); 427 | writer.write_all(b"val1").unwrap(); 428 | } 429 | txn.commit().unwrap(); 430 | 431 | let mut txn = env.begin_rw_txn().unwrap(); 432 | assert_eq!(b"val1", txn.get(db, b"key1").unwrap()); 433 | assert_eq!(txn.get(db, b"key"), Err(Error::NotFound)); 434 | 435 | txn.del(db, b"key1", None).unwrap(); 436 | assert_eq!(txn.get(db, b"key1"), Err(Error::NotFound)); 437 | } 438 | 439 | #[test] 440 | fn test_inactive_txn() { 441 | let dir = TempDir::new("test").unwrap(); 442 | let env = Environment::new().open(dir.path()).unwrap(); 443 | let db = env.open_db(None).unwrap(); 444 | 445 | { 446 | let mut txn = env.begin_rw_txn().unwrap(); 447 | txn.put(db, b"key", b"val", WriteFlags::empty()).unwrap(); 448 | txn.commit().unwrap(); 449 | } 450 | 451 | let txn = env.begin_ro_txn().unwrap(); 452 | let inactive = txn.reset(); 453 | let active = inactive.renew().unwrap(); 454 | assert!(active.get(db, b"key").is_ok()); 455 | } 456 | 457 | #[test] 458 | fn test_nested_txn() { 459 | let dir = TempDir::new("test").unwrap(); 460 | let env = Environment::new().open(dir.path()).unwrap(); 461 | let db = env.open_db(None).unwrap(); 462 | 463 | let mut txn = env.begin_rw_txn().unwrap(); 464 | txn.put(db, b"key1", b"val1", WriteFlags::empty()).unwrap(); 465 | 466 | { 467 | let mut nested = txn.begin_nested_txn().unwrap(); 468 | nested.put(db, b"key2", b"val2", WriteFlags::empty()).unwrap(); 469 | assert_eq!(nested.get(db, b"key1").unwrap(), b"val1"); 470 | assert_eq!(nested.get(db, b"key2").unwrap(), b"val2"); 471 | } 472 | 473 | assert_eq!(txn.get(db, b"key1").unwrap(), b"val1"); 474 | assert_eq!(txn.get(db, b"key2"), Err(Error::NotFound)); 475 | } 476 | 477 | #[test] 478 | fn test_clear_db() { 479 | let dir = TempDir::new("test").unwrap(); 480 | let env = Environment::new().open(dir.path()).unwrap(); 481 | let db = env.open_db(None).unwrap(); 482 | 483 | { 484 | let mut txn = env.begin_rw_txn().unwrap(); 485 | txn.put(db, b"key", b"val", WriteFlags::empty()).unwrap(); 486 | txn.commit().unwrap(); 487 | } 488 | 489 | { 490 | let mut txn = env.begin_rw_txn().unwrap(); 491 | txn.clear_db(db).unwrap(); 492 | txn.commit().unwrap(); 493 | } 494 | 495 | let txn = env.begin_ro_txn().unwrap(); 496 | assert_eq!(txn.get(db, b"key"), Err(Error::NotFound)); 497 | } 498 | 499 | 500 | #[test] 501 | fn test_drop_db() { 502 | let dir = TempDir::new("test").unwrap(); 503 | let env = Environment::new().set_max_dbs(2) 504 | .open(dir.path()).unwrap(); 505 | let db = env.create_db(Some("test"), DatabaseFlags::empty()).unwrap(); 506 | 507 | { 508 | let mut txn = env.begin_rw_txn().unwrap(); 509 | txn.put(db, b"key", b"val", WriteFlags::empty()).unwrap(); 510 | txn.commit().unwrap(); 511 | } 512 | { 513 | let mut txn = env.begin_rw_txn().unwrap(); 514 | unsafe { txn.drop_db(db).unwrap(); } 515 | txn.commit().unwrap(); 516 | } 517 | 518 | assert_eq!(env.open_db(Some("test")), Err(Error::NotFound)); 519 | } 520 | 521 | #[test] 522 | fn test_concurrent_readers_single_writer() { 523 | let dir = TempDir::new("test").unwrap(); 524 | let env: Arc = Arc::new(Environment::new().open(dir.path()).unwrap()); 525 | 526 | let n = 10usize; // Number of concurrent readers 527 | let barrier = Arc::new(Barrier::new(n + 1)); 528 | let mut threads: Vec> = Vec::with_capacity(n); 529 | 530 | let key = b"key"; 531 | let val = b"val"; 532 | 533 | for _ in 0..n { 534 | let reader_env = env.clone(); 535 | let reader_barrier = barrier.clone(); 536 | 537 | threads.push(thread::spawn(move|| { 538 | let db = reader_env.open_db(None).unwrap(); 539 | { 540 | let txn = reader_env.begin_ro_txn().unwrap(); 541 | assert_eq!(txn.get(db, key), Err(Error::NotFound)); 542 | txn.abort(); 543 | } 544 | reader_barrier.wait(); 545 | reader_barrier.wait(); 546 | { 547 | let txn = reader_env.begin_ro_txn().unwrap(); 548 | txn.get(db, key).unwrap() == val 549 | } 550 | })); 551 | } 552 | 553 | let db = env.open_db(None).unwrap(); 554 | let mut txn = env.begin_rw_txn().unwrap(); 555 | barrier.wait(); 556 | txn.put(db, key, val, WriteFlags::empty()).unwrap(); 557 | txn.commit().unwrap(); 558 | barrier.wait(); 559 | 560 | assert!(threads.into_iter().all(|b| b.join().unwrap())) 561 | } 562 | 563 | #[test] 564 | fn test_concurrent_writers() { 565 | let dir = TempDir::new("test").unwrap(); 566 | let env = Arc::new(Environment::new().open(dir.path()).unwrap()); 567 | 568 | let n = 10usize; // Number of concurrent writers 569 | let mut threads: Vec> = Vec::with_capacity(n); 570 | 571 | let key = "key"; 572 | let val = "val"; 573 | 574 | for i in 0..n { 575 | let writer_env = env.clone(); 576 | 577 | threads.push(thread::spawn(move|| { 578 | let db = writer_env.open_db(None).unwrap(); 579 | let mut txn = writer_env.begin_rw_txn().unwrap(); 580 | txn.put(db, 581 | &format!("{}{}", key, i), 582 | &format!("{}{}", val, i), 583 | WriteFlags::empty()) 584 | .unwrap(); 585 | txn.commit().is_ok() 586 | })); 587 | } 588 | assert!(threads.into_iter().all(|b| b.join().unwrap())); 589 | 590 | let db = env.open_db(None).unwrap(); 591 | let txn = env.begin_ro_txn().unwrap(); 592 | 593 | for i in 0..n { 594 | assert_eq!(format!("{}{}", val, i).as_bytes(), 595 | txn.get(db, &format!("{}{}", key, i)).unwrap()); 596 | } 597 | } 598 | 599 | #[bench] 600 | fn bench_get_rand(b: &mut Bencher) { 601 | let n = 100u32; 602 | let (_dir, env) = setup_bench_db(n); 603 | let db = env.open_db(None).unwrap(); 604 | let txn = env.begin_ro_txn().unwrap(); 605 | 606 | let mut keys: Vec = (0..n).map(|n| get_key(n)).collect(); 607 | XorShiftRng::new_unseeded().shuffle(&mut keys[..]); 608 | 609 | b.iter(|| { 610 | let mut i = 0usize; 611 | for key in &keys { 612 | i = i + txn.get(db, key).unwrap().len(); 613 | } 614 | black_box(i); 615 | }); 616 | } 617 | 618 | #[bench] 619 | fn bench_get_rand_raw(b: &mut Bencher) { 620 | let n = 100u32; 621 | let (_dir, env) = setup_bench_db(n); 622 | let db = env.open_db(None).unwrap(); 623 | let _txn = env.begin_ro_txn().unwrap(); 624 | 625 | let mut keys: Vec = (0..n).map(|n| get_key(n)).collect(); 626 | XorShiftRng::new_unseeded().shuffle(&mut keys[..]); 627 | 628 | let dbi = db.dbi(); 629 | let txn = _txn.txn(); 630 | 631 | let mut key_val: MDB_val = MDB_val { mv_size: 0, mv_data: ptr::null_mut() }; 632 | let mut data_val: MDB_val = MDB_val { mv_size: 0, mv_data: ptr::null_mut() }; 633 | 634 | b.iter(|| unsafe { 635 | let mut i: size_t = 0; 636 | for key in &keys { 637 | key_val.mv_size = key.len() as size_t; 638 | key_val.mv_data = key.as_bytes().as_ptr() as *mut _; 639 | 640 | mdb_get(txn, dbi, &mut key_val, &mut data_val); 641 | 642 | i = i + key_val.mv_size; 643 | } 644 | black_box(i); 645 | }); 646 | } 647 | 648 | #[bench] 649 | fn bench_put_rand(b: &mut Bencher) { 650 | let n = 100u32; 651 | let (_dir, env) = setup_bench_db(0); 652 | let db = env.open_db(None).unwrap(); 653 | 654 | let mut items: Vec<(String, String)> = (0..n).map(|n| (get_key(n), get_data(n))).collect(); 655 | XorShiftRng::new_unseeded().shuffle(&mut items[..]); 656 | 657 | b.iter(|| { 658 | let mut txn = env.begin_rw_txn().unwrap(); 659 | for &(ref key, ref data) in items.iter() { 660 | txn.put(db, key, data, WriteFlags::empty()).unwrap(); 661 | } 662 | txn.abort(); 663 | }); 664 | } 665 | 666 | #[bench] 667 | fn bench_put_rand_raw(b: &mut Bencher) { 668 | let n = 100u32; 669 | let (_dir, _env) = setup_bench_db(0); 670 | let db = _env.open_db(None).unwrap(); 671 | 672 | let mut items: Vec<(String, String)> = (0..n).map(|n| (get_key(n), get_data(n))).collect(); 673 | XorShiftRng::new_unseeded().shuffle(&mut items[..]); 674 | 675 | let dbi = db.dbi(); 676 | let env = _env.env(); 677 | 678 | let mut key_val: MDB_val = MDB_val { mv_size: 0, mv_data: ptr::null_mut() }; 679 | let mut data_val: MDB_val = MDB_val { mv_size: 0, mv_data: ptr::null_mut() }; 680 | 681 | b.iter(|| unsafe { 682 | let mut txn: *mut MDB_txn = ptr::null_mut(); 683 | mdb_txn_begin(env, ptr::null_mut(), 0, &mut txn); 684 | 685 | let mut i: ::libc::c_int = 0; 686 | for &(ref key, ref data) in items.iter() { 687 | 688 | key_val.mv_size = key.len() as size_t; 689 | key_val.mv_data = key.as_bytes().as_ptr() as *mut _; 690 | data_val.mv_size = data.len() as size_t; 691 | data_val.mv_data = data.as_bytes().as_ptr() as *mut _; 692 | 693 | i += mdb_put(txn, dbi, &mut key_val, &mut data_val, 0); 694 | } 695 | assert_eq!(0, i); 696 | mdb_txn_abort(txn); 697 | }); 698 | } 699 | } 700 | --------------------------------------------------------------------------------