├── .github └── workflows │ └── main.yml ├── .gitignore ├── Cargo.toml ├── LICENSE ├── README.md ├── benches ├── cursor.rs ├── transaction.rs └── utils.rs ├── mdbx-sys ├── Cargo.toml ├── build.rs ├── libmdbx │ ├── CMakeLists.txt │ ├── ChangeLog.md │ ├── GNUmakefile │ ├── LICENSE │ ├── Makefile │ ├── README.md │ ├── VERSION.txt │ ├── cmake │ │ ├── compiler.cmake │ │ ├── profile.cmake │ │ └── utils.cmake │ ├── config.h.in │ ├── man1 │ │ ├── mdbx_chk.1 │ │ ├── mdbx_copy.1 │ │ ├── mdbx_drop.1 │ │ ├── mdbx_dump.1 │ │ ├── mdbx_load.1 │ │ └── mdbx_stat.1 │ ├── mdbx.c │ ├── mdbx.c++ │ ├── mdbx.h │ ├── mdbx.h++ │ ├── mdbx_chk.c │ ├── mdbx_copy.c │ ├── mdbx_drop.c │ ├── mdbx_dump.c │ ├── mdbx_load.c │ ├── mdbx_stat.c │ └── ntdll.def └── src │ └── lib.rs ├── src ├── codec.rs ├── cursor.rs ├── database.rs ├── error.rs ├── flags.rs ├── lib.rs ├── orm │ ├── cursor.rs │ ├── database.rs │ ├── impls.rs │ ├── mod.rs │ ├── traits.rs │ └── transaction.rs ├── table.rs └── transaction.rs └── tests ├── cursor.rs ├── environment.rs └── transaction.rs /.github/workflows/main.yml: -------------------------------------------------------------------------------- 1 | on: 2 | pull_request: 3 | push: 4 | branches: 5 | - master 6 | 7 | name: CI 8 | 9 | jobs: 10 | ci: 11 | strategy: 12 | matrix: 13 | os: [ubuntu-latest, macos-latest, windows-latest] 14 | runs-on: ${{ matrix.os }} 15 | 16 | steps: 17 | - uses: KyleMayes/install-llvm-action@v2 18 | if: matrix.os == 'windows-latest' 19 | with: 20 | version: "18.1" 21 | directory: ${{ runner.temp }}/llvm 22 | - name: Set LIBCLANG_PATH 23 | run: echo "LIBCLANG_PATH=$((gcm clang).source -replace "clang.exe")" >> $env:GITHUB_ENV 24 | if: matrix.os == 'windows-latest' 25 | - uses: actions/checkout@main 26 | - uses: dtolnay/rust-toolchain@master 27 | with: 28 | toolchain: stable 29 | components: rustfmt, clippy 30 | - run: cargo fmt --all --check -- --config=imports_granularity=Crate 31 | - uses: taiki-e/install-action@v2 32 | with: 33 | tool: cargo-hack 34 | - run: cargo hack clippy --workspace --each-feature -- -D warnings 35 | - run: cargo hack test --workspace --each-feature 36 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | target/ 2 | Cargo.lock 3 | -------------------------------------------------------------------------------- /Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "libmdbx" 3 | version = "0.5.5" 4 | edition = "2024" 5 | license = "MPL-2.0" 6 | description = "Idiomatic and safe MDBX wrapper." 7 | documentation = "https://docs.rs/libmdbx" 8 | homepage = "https://github.com/vorot93/libmdbx-rs" 9 | repository = "https://github.com/vorot93/libmdbx-rs" 10 | readme = "README.md" 11 | keywords = ["MDBX", "database", "storage-engine", "bindings"] 12 | categories = ["database"] 13 | 14 | [lib] 15 | name = "libmdbx" 16 | 17 | [workspace] 18 | members = ["mdbx-sys"] 19 | 20 | [dependencies] 21 | anyhow = { version = "1", optional = true } 22 | arrayref = { version = "0.3", optional = true } 23 | arrayvec = { version = "0.7", optional = true } 24 | bytes = { version = "1", optional = true } 25 | bitflags = "2" 26 | ciborium = { version = "0.2", optional = true } 27 | derive_more = { version = "2", features = [ 28 | "deref", 29 | "deref_mut", 30 | "display", 31 | "from", 32 | ] } 33 | impls = { version = "1", optional = true } 34 | indexmap = "2" 35 | libc = "0.2" 36 | parking_lot = "0.12" 37 | sealed = "0.6" 38 | tempfile = { version = "3", optional = true } 39 | thiserror = "2" 40 | 41 | ffi = { package = "mdbx-sys", version = "=12.13.1", path = "./mdbx-sys" } 42 | 43 | lifetimed-bytes = { version = "0.1", optional = true } 44 | 45 | [dev-dependencies] 46 | criterion = "0.6" 47 | once_cell = "1" 48 | rand = "0.9" 49 | rand_xorshift = "0.4" 50 | serde = { version = "1", features = ["derive"] } 51 | tempfile = "3" 52 | 53 | [features] 54 | cbor = ["ciborium"] 55 | orm = ["anyhow", "arrayref", "arrayvec", "impls", "tempfile"] 56 | 57 | [[bench]] 58 | name = "cursor" 59 | harness = false 60 | 61 | [[bench]] 62 | name = "transaction" 63 | harness = false 64 | 65 | [package.metadata.docs.rs] 66 | all-features = true 67 | rustdoc-args = ["--cfg", "docsrs"] 68 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Mozilla Public License Version 2.0 2 | ================================== 3 | 4 | 1. Definitions 5 | -------------- 6 | 7 | 1.1. "Contributor" 8 | means each individual or legal entity that creates, contributes to 9 | the creation of, or owns Covered Software. 10 | 11 | 1.2. "Contributor Version" 12 | means the combination of the Contributions of others (if any) used 13 | by a Contributor and that particular Contributor's Contribution. 14 | 15 | 1.3. "Contribution" 16 | means Covered Software of a particular Contributor. 17 | 18 | 1.4. "Covered Software" 19 | means Source Code Form to which the initial Contributor has attached 20 | the notice in Exhibit A, the Executable Form of such Source Code 21 | Form, and Modifications of such Source Code Form, in each case 22 | including portions thereof. 23 | 24 | 1.5. "Incompatible With Secondary Licenses" 25 | means 26 | 27 | (a) that the initial Contributor has attached the notice described 28 | in Exhibit B to the Covered Software; or 29 | 30 | (b) that the Covered Software was made available under the terms of 31 | version 1.1 or earlier of the License, but not also under the 32 | terms of a Secondary License. 33 | 34 | 1.6. "Executable Form" 35 | means any form of the work other than Source Code Form. 36 | 37 | 1.7. "Larger Work" 38 | means a work that combines Covered Software with other material, in 39 | a separate file or files, that is not Covered Software. 40 | 41 | 1.8. "License" 42 | means this document. 43 | 44 | 1.9. "Licensable" 45 | means having the right to grant, to the maximum extent possible, 46 | whether at the time of the initial grant or subsequently, any and 47 | all of the rights conveyed by this License. 48 | 49 | 1.10. "Modifications" 50 | means any of the following: 51 | 52 | (a) any file in Source Code Form that results from an addition to, 53 | deletion from, or modification of the contents of Covered 54 | Software; or 55 | 56 | (b) any new file in Source Code Form that contains any Covered 57 | Software. 58 | 59 | 1.11. "Patent Claims" of a Contributor 60 | means any patent claim(s), including without limitation, method, 61 | process, and apparatus claims, in any patent Licensable by such 62 | Contributor that would be infringed, but for the grant of the 63 | License, by the making, using, selling, offering for sale, having 64 | made, import, or transfer of either its Contributions or its 65 | Contributor Version. 66 | 67 | 1.12. "Secondary License" 68 | means either the GNU General Public License, Version 2.0, the GNU 69 | Lesser General Public License, Version 2.1, the GNU Affero General 70 | Public License, Version 3.0, or any later versions of those 71 | licenses. 72 | 73 | 1.13. "Source Code Form" 74 | means the form of the work preferred for making modifications. 75 | 76 | 1.14. "You" (or "Your") 77 | means an individual or a legal entity exercising rights under this 78 | License. For legal entities, "You" includes any entity that 79 | controls, is controlled by, or is under common control with You. For 80 | purposes of this definition, "control" means (a) the power, direct 81 | or indirect, to cause the direction or management of such entity, 82 | whether by contract or otherwise, or (b) ownership of more than 83 | fifty percent (50%) of the outstanding shares or beneficial 84 | ownership of such entity. 85 | 86 | 2. License Grants and Conditions 87 | -------------------------------- 88 | 89 | 2.1. Grants 90 | 91 | Each Contributor hereby grants You a world-wide, royalty-free, 92 | non-exclusive license: 93 | 94 | (a) under intellectual property rights (other than patent or trademark) 95 | Licensable by such Contributor to use, reproduce, make available, 96 | modify, display, perform, distribute, and otherwise exploit its 97 | Contributions, either on an unmodified basis, with Modifications, or 98 | as part of a Larger Work; and 99 | 100 | (b) under Patent Claims of such Contributor to make, use, sell, offer 101 | for sale, have made, import, and otherwise transfer either its 102 | Contributions or its Contributor Version. 103 | 104 | 2.2. Effective Date 105 | 106 | The licenses granted in Section 2.1 with respect to any Contribution 107 | become effective for each Contribution on the date the Contributor first 108 | distributes such Contribution. 109 | 110 | 2.3. Limitations on Grant Scope 111 | 112 | The licenses granted in this Section 2 are the only rights granted under 113 | this License. No additional rights or licenses will be implied from the 114 | distribution or licensing of Covered Software under this License. 115 | Notwithstanding Section 2.1(b) above, no patent license is granted by a 116 | Contributor: 117 | 118 | (a) for any code that a Contributor has removed from Covered Software; 119 | or 120 | 121 | (b) for infringements caused by: (i) Your and any other third party's 122 | modifications of Covered Software, or (ii) the combination of its 123 | Contributions with other software (except as part of its Contributor 124 | Version); or 125 | 126 | (c) under Patent Claims infringed by Covered Software in the absence of 127 | its Contributions. 128 | 129 | This License does not grant any rights in the trademarks, service marks, 130 | or logos of any Contributor (except as may be necessary to comply with 131 | the notice requirements in Section 3.4). 132 | 133 | 2.4. Subsequent Licenses 134 | 135 | No Contributor makes additional grants as a result of Your choice to 136 | distribute the Covered Software under a subsequent version of this 137 | License (see Section 10.2) or under the terms of a Secondary License (if 138 | permitted under the terms of Section 3.3). 139 | 140 | 2.5. Representation 141 | 142 | Each Contributor represents that the Contributor believes its 143 | Contributions are its original creation(s) or it has sufficient rights 144 | to grant the rights to its Contributions conveyed by this License. 145 | 146 | 2.6. Fair Use 147 | 148 | This License is not intended to limit any rights You have under 149 | applicable copyright doctrines of fair use, fair dealing, or other 150 | equivalents. 151 | 152 | 2.7. Conditions 153 | 154 | Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted 155 | in Section 2.1. 156 | 157 | 3. Responsibilities 158 | ------------------- 159 | 160 | 3.1. Distribution of Source Form 161 | 162 | All distribution of Covered Software in Source Code Form, including any 163 | Modifications that You create or to which You contribute, must be under 164 | the terms of this License. You must inform recipients that the Source 165 | Code Form of the Covered Software is governed by the terms of this 166 | License, and how they can obtain a copy of this License. You may not 167 | attempt to alter or restrict the recipients' rights in the Source Code 168 | Form. 169 | 170 | 3.2. Distribution of Executable Form 171 | 172 | If You distribute Covered Software in Executable Form then: 173 | 174 | (a) such Covered Software must also be made available in Source Code 175 | Form, as described in Section 3.1, and You must inform recipients of 176 | the Executable Form how they can obtain a copy of such Source Code 177 | Form by reasonable means in a timely manner, at a charge no more 178 | than the cost of distribution to the recipient; and 179 | 180 | (b) You may distribute such Executable Form under the terms of this 181 | License, or sublicense it under different terms, provided that the 182 | license for the Executable Form does not attempt to limit or alter 183 | the recipients' rights in the Source Code Form under this License. 184 | 185 | 3.3. Distribution of a Larger Work 186 | 187 | You may create and distribute a Larger Work under terms of Your choice, 188 | provided that You also comply with the requirements of this License for 189 | the Covered Software. If the Larger Work is a combination of Covered 190 | Software with a work governed by one or more Secondary Licenses, and the 191 | Covered Software is not Incompatible With Secondary Licenses, this 192 | License permits You to additionally distribute such Covered Software 193 | under the terms of such Secondary License(s), so that the recipient of 194 | the Larger Work may, at their option, further distribute the Covered 195 | Software under the terms of either this License or such Secondary 196 | License(s). 197 | 198 | 3.4. Notices 199 | 200 | You may not remove or alter the substance of any license notices 201 | (including copyright notices, patent notices, disclaimers of warranty, 202 | or limitations of liability) contained within the Source Code Form of 203 | the Covered Software, except that You may alter any license notices to 204 | the extent required to remedy known factual inaccuracies. 205 | 206 | 3.5. Application of Additional Terms 207 | 208 | You may choose to offer, and to charge a fee for, warranty, support, 209 | indemnity or liability obligations to one or more recipients of Covered 210 | Software. However, You may do so only on Your own behalf, and not on 211 | behalf of any Contributor. You must make it absolutely clear that any 212 | such warranty, support, indemnity, or liability obligation is offered by 213 | You alone, and You hereby agree to indemnify every Contributor for any 214 | liability incurred by such Contributor as a result of warranty, support, 215 | indemnity or liability terms You offer. You may include additional 216 | disclaimers of warranty and limitations of liability specific to any 217 | jurisdiction. 218 | 219 | 4. Inability to Comply Due to Statute or Regulation 220 | --------------------------------------------------- 221 | 222 | If it is impossible for You to comply with any of the terms of this 223 | License with respect to some or all of the Covered Software due to 224 | statute, judicial order, or regulation then You must: (a) comply with 225 | the terms of this License to the maximum extent possible; and (b) 226 | describe the limitations and the code they affect. Such description must 227 | be placed in a text file included with all distributions of the Covered 228 | Software under this License. Except to the extent prohibited by statute 229 | or regulation, such description must be sufficiently detailed for a 230 | recipient of ordinary skill to be able to understand it. 231 | 232 | 5. Termination 233 | -------------- 234 | 235 | 5.1. The rights granted under this License will terminate automatically 236 | if You fail to comply with any of its terms. However, if You become 237 | compliant, then the rights granted under this License from a particular 238 | Contributor are reinstated (a) provisionally, unless and until such 239 | Contributor explicitly and finally terminates Your grants, and (b) on an 240 | ongoing basis, if such Contributor fails to notify You of the 241 | non-compliance by some reasonable means prior to 60 days after You have 242 | come back into compliance. Moreover, Your grants from a particular 243 | Contributor are reinstated on an ongoing basis if such Contributor 244 | notifies You of the non-compliance by some reasonable means, this is the 245 | first time You have received notice of non-compliance with this License 246 | from such Contributor, and You become compliant prior to 30 days after 247 | Your receipt of the notice. 248 | 249 | 5.2. If You initiate litigation against any entity by asserting a patent 250 | infringement claim (excluding declaratory judgment actions, 251 | counter-claims, and cross-claims) alleging that a Contributor Version 252 | directly or indirectly infringes any patent, then the rights granted to 253 | You by any and all Contributors for the Covered Software under Section 254 | 2.1 of this License shall terminate. 255 | 256 | 5.3. In the event of termination under Sections 5.1 or 5.2 above, all 257 | end user license agreements (excluding distributors and resellers) which 258 | have been validly granted by You or Your distributors under this License 259 | prior to termination shall survive termination. 260 | 261 | ************************************************************************ 262 | * * 263 | * 6. Disclaimer of Warranty * 264 | * ------------------------- * 265 | * * 266 | * Covered Software is provided under this License on an "as is" * 267 | * basis, without warranty of any kind, either expressed, implied, or * 268 | * statutory, including, without limitation, warranties that the * 269 | * Covered Software is free of defects, merchantable, fit for a * 270 | * particular purpose or non-infringing. The entire risk as to the * 271 | * quality and performance of the Covered Software is with You. * 272 | * Should any Covered Software prove defective in any respect, You * 273 | * (not any Contributor) assume the cost of any necessary servicing, * 274 | * repair, or correction. This disclaimer of warranty constitutes an * 275 | * essential part of this License. No use of any Covered Software is * 276 | * authorized under this License except under this disclaimer. * 277 | * * 278 | ************************************************************************ 279 | 280 | ************************************************************************ 281 | * * 282 | * 7. Limitation of Liability * 283 | * -------------------------- * 284 | * * 285 | * Under no circumstances and under no legal theory, whether tort * 286 | * (including negligence), contract, or otherwise, shall any * 287 | * Contributor, or anyone who distributes Covered Software as * 288 | * permitted above, be liable to You for any direct, indirect, * 289 | * special, incidental, or consequential damages of any character * 290 | * including, without limitation, damages for lost profits, loss of * 291 | * goodwill, work stoppage, computer failure or malfunction, or any * 292 | * and all other commercial damages or losses, even if such party * 293 | * shall have been informed of the possibility of such damages. This * 294 | * limitation of liability shall not apply to liability for death or * 295 | * personal injury resulting from such party's negligence to the * 296 | * extent applicable law prohibits such limitation. Some * 297 | * jurisdictions do not allow the exclusion or limitation of * 298 | * incidental or consequential damages, so this exclusion and * 299 | * limitation may not apply to You. * 300 | * * 301 | ************************************************************************ 302 | 303 | 8. Litigation 304 | ------------- 305 | 306 | Any litigation relating to this License may be brought only in the 307 | courts of a jurisdiction where the defendant maintains its principal 308 | place of business and such litigation shall be governed by laws of that 309 | jurisdiction, without reference to its conflict-of-law provisions. 310 | Nothing in this Section shall prevent a party's ability to bring 311 | cross-claims or counter-claims. 312 | 313 | 9. Miscellaneous 314 | ---------------- 315 | 316 | This License represents the complete agreement concerning the subject 317 | matter hereof. If any provision of this License is held to be 318 | unenforceable, such provision shall be reformed only to the extent 319 | necessary to make it enforceable. Any law or regulation which provides 320 | that the language of a contract shall be construed against the drafter 321 | shall not be used to construe this License against a Contributor. 322 | 323 | 10. Versions of the License 324 | --------------------------- 325 | 326 | 10.1. New Versions 327 | 328 | Mozilla Foundation is the license steward. Except as provided in Section 329 | 10.3, no one other than the license steward has the right to modify or 330 | publish new versions of this License. Each version will be given a 331 | distinguishing version number. 332 | 333 | 10.2. Effect of New Versions 334 | 335 | You may distribute the Covered Software under the terms of the version 336 | of the License under which You originally received the Covered Software, 337 | or under the terms of any subsequent version published by the license 338 | steward. 339 | 340 | 10.3. Modified Versions 341 | 342 | If you create software not governed by this License, and you want to 343 | create a new license for such software, you may create and use a 344 | modified version of this License if you rename the license and remove 345 | any references to the name of the license steward (except to note that 346 | such modified license differs from this License). 347 | 348 | 10.4. Distributing Source Code Form that is Incompatible With Secondary 349 | Licenses 350 | 351 | If You choose to distribute Source Code Form that is Incompatible With 352 | Secondary Licenses under the terms of this version of the License, the 353 | notice described in Exhibit B of this License must be attached. 354 | 355 | Exhibit A - Source Code Form License Notice 356 | ------------------------------------------- 357 | 358 | This Source Code Form is subject to the terms of the Mozilla Public 359 | License, v. 2.0. If a copy of the MPL was not distributed with this 360 | file, You can obtain one at https://mozilla.org/MPL/2.0/. 361 | 362 | If it is not possible or desirable to put the notice in a particular 363 | file, then You may include the notice in a location (such as a LICENSE 364 | file in a relevant directory) where a recipient would be likely to look 365 | for such a notice. 366 | 367 | You may add additional accurate notices of copyright ownership. 368 | 369 | Exhibit B - "Incompatible With Secondary Licenses" Notice 370 | --------------------------------------------------------- 371 | 372 | This Source Code Form is "Incompatible With Secondary Licenses", as 373 | defined by the Mozilla Public License, v. 2.0. 374 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # libmdbx-rs 2 | 3 | Rust bindings for [libmdbx](https://libmdbx.dqdkfa.ru). 4 | 5 | ## Updating the libmdbx Version 6 | 7 | To update the libmdbx version you must clone it and copy the `dist/` folder in `mdbx-sys/`. 8 | Make sure to follow the [building steps](https://libmdbx.dqdkfa.ru/usage.html#getting). 9 | 10 | ```bash 11 | # clone libmmdbx to a repository outside at specific tag 12 | git clone https://gitflic.ru/project/erthink/libmdbx.git ../libmdbx --branch v0.7.0 13 | make -C ../libmdbx dist 14 | 15 | # copy the `libmdbx/dist/` folder just created into `mdbx-sys/libmdbx` 16 | rm -rf mdbx-sys/libmdbx 17 | cp -R ../libmdbx/dist mdbx-sys/libmdbx 18 | 19 | # add the changes to the next commit you will make 20 | git add mdbx-sys/libmdbx 21 | ``` 22 | 23 | ## License 24 | The entire code within this repository is licensed under the [Mozilla Public License v2.0](./LICENSE) 25 | -------------------------------------------------------------------------------- /benches/cursor.rs: -------------------------------------------------------------------------------- 1 | mod utils; 2 | 3 | use criterion::{Criterion, black_box, criterion_group, criterion_main}; 4 | use ffi::*; 5 | use libmdbx::*; 6 | use std::ptr; 7 | use utils::*; 8 | 9 | /// Benchmark of iterator sequential read performance. 10 | fn bench_get_seq_iter(c: &mut Criterion) { 11 | let n = 100; 12 | let (_dir, db) = setup_bench_db(n); 13 | let txn = db.begin_ro_txn().unwrap(); 14 | let table = txn.open_table(None).unwrap(); 15 | 16 | c.bench_function("bench_get_seq_iter", |b| { 17 | b.iter(|| { 18 | let mut cursor = txn.cursor(&table).unwrap(); 19 | let mut i = 0; 20 | let mut count = 0u32; 21 | 22 | for (key_len, data_len) in cursor 23 | .iter::() 24 | .map(Result::unwrap) 25 | { 26 | i = i + *key_len + *data_len; 27 | count += 1; 28 | } 29 | for (key_len, data_len) in cursor 30 | .iter::() 31 | .filter_map(Result::ok) 32 | { 33 | i = i + *key_len + *data_len; 34 | count += 1; 35 | } 36 | 37 | fn iterate(cursor: &mut Cursor<'_, K>) -> Result<()> { 38 | let mut i = 0; 39 | for result in cursor.iter::() { 40 | let (key_len, data_len) = result?; 41 | i = i + *key_len + *data_len; 42 | } 43 | Ok(()) 44 | } 45 | iterate(&mut cursor).unwrap(); 46 | 47 | black_box(i); 48 | assert_eq!(count, n); 49 | }) 50 | }); 51 | } 52 | 53 | /// Benchmark of cursor sequential read performance. 54 | fn bench_get_seq_cursor(c: &mut Criterion) { 55 | let n = 100; 56 | let (_dir, db) = setup_bench_db(n); 57 | let txn = db.begin_ro_txn().unwrap(); 58 | let table = txn.open_table(None).unwrap(); 59 | 60 | c.bench_function("bench_get_seq_cursor", |b| { 61 | b.iter(|| { 62 | let (i, count) = txn 63 | .cursor(&table) 64 | .unwrap() 65 | .iter::() 66 | .map(Result::unwrap) 67 | .fold((0, 0), |(i, count), (key, val)| { 68 | (i + *key + *val, count + 1) 69 | }); 70 | 71 | black_box(i); 72 | assert_eq!(count, n); 73 | }) 74 | }); 75 | } 76 | 77 | /// Benchmark of raw MDBX sequential read performance (control). 78 | fn bench_get_seq_raw(c: &mut Criterion) { 79 | let n = 100; 80 | let (_dir, db) = setup_bench_db(n); 81 | 82 | let dbi = db.begin_ro_txn().unwrap().open_table(None).unwrap().dbi(); 83 | let _txn = db.begin_ro_txn().unwrap(); 84 | let txn = _txn.txn(); 85 | 86 | let mut key = MDBX_val { 87 | iov_len: 0, 88 | iov_base: ptr::null_mut(), 89 | }; 90 | let mut data = MDBX_val { 91 | iov_len: 0, 92 | iov_base: ptr::null_mut(), 93 | }; 94 | let mut cursor: *mut MDBX_cursor = ptr::null_mut(); 95 | 96 | c.bench_function("bench_get_seq_raw", |b| { 97 | b.iter(|| unsafe { 98 | mdbx_cursor_open(txn.0, dbi, &mut cursor); 99 | let mut i = 0; 100 | let mut count = 0u32; 101 | 102 | while mdbx_cursor_get(cursor, &mut key, &mut data, MDBX_NEXT) == 0 { 103 | i += key.iov_len + data.iov_len; 104 | count += 1; 105 | } 106 | 107 | black_box(i); 108 | assert_eq!(count, n); 109 | mdbx_cursor_close(cursor); 110 | }) 111 | }); 112 | } 113 | 114 | criterion_group!( 115 | benches, 116 | bench_get_seq_iter, 117 | bench_get_seq_cursor, 118 | bench_get_seq_raw 119 | ); 120 | criterion_main!(benches); 121 | -------------------------------------------------------------------------------- /benches/transaction.rs: -------------------------------------------------------------------------------- 1 | mod utils; 2 | 3 | use criterion::{Criterion, black_box, criterion_group, criterion_main}; 4 | use ffi::*; 5 | use libc::size_t; 6 | use libmdbx::{ObjectLength, WriteFlags}; 7 | use rand::{SeedableRng, prelude::SliceRandom}; 8 | use rand_xorshift::XorShiftRng; 9 | use std::ptr; 10 | use utils::*; 11 | 12 | fn bench_get_rand(c: &mut Criterion) { 13 | let n = 100u32; 14 | let (_dir, db) = setup_bench_db(n); 15 | let txn = db.begin_ro_txn().unwrap(); 16 | let table = txn.open_table(None).unwrap(); 17 | 18 | let mut keys: Vec = (0..n).map(get_key).collect(); 19 | keys.shuffle(&mut XorShiftRng::from_seed(Default::default())); 20 | 21 | c.bench_function("bench_get_rand", |b| { 22 | b.iter(|| { 23 | let mut i = 0usize; 24 | for key in &keys { 25 | i += *txn 26 | .get::(&table, key.as_bytes()) 27 | .unwrap() 28 | .unwrap(); 29 | } 30 | black_box(i); 31 | }) 32 | }); 33 | } 34 | 35 | fn bench_get_rand_raw(c: &mut Criterion) { 36 | let n = 100u32; 37 | let (_dir, db) = setup_bench_db(n); 38 | let _txn = db.begin_ro_txn().unwrap(); 39 | let table = _txn.open_table(None).unwrap(); 40 | 41 | let mut keys: Vec = (0..n).map(get_key).collect(); 42 | keys.shuffle(&mut XorShiftRng::from_seed(Default::default())); 43 | 44 | let dbi = table.dbi(); 45 | let txn = _txn.txn(); 46 | 47 | let mut key_val: MDBX_val = MDBX_val { 48 | iov_len: 0, 49 | iov_base: ptr::null_mut(), 50 | }; 51 | let mut data_val: MDBX_val = MDBX_val { 52 | iov_len: 0, 53 | iov_base: ptr::null_mut(), 54 | }; 55 | 56 | c.bench_function("bench_get_rand_raw", |b| { 57 | b.iter(|| unsafe { 58 | let mut i: size_t = 0; 59 | for key in &keys { 60 | key_val.iov_len = key.len() as size_t; 61 | key_val.iov_base = key.as_bytes().as_ptr() as *mut _; 62 | 63 | mdbx_get(txn.0, dbi, &key_val, &mut data_val); 64 | 65 | i += key_val.iov_len; 66 | } 67 | black_box(i); 68 | }) 69 | }); 70 | } 71 | 72 | fn bench_put_rand(c: &mut Criterion) { 73 | let n = 100u32; 74 | let (_dir, db) = setup_bench_db(0); 75 | 76 | let txn = db.begin_ro_txn().unwrap(); 77 | let table = txn.open_table(None).unwrap(); 78 | txn.prime_for_permaopen(table); 79 | let table = txn.commit_and_rebind_open_dbs().unwrap().1.remove(0); 80 | 81 | let mut items: Vec<(String, String)> = (0..n).map(|n| (get_key(n), get_data(n))).collect(); 82 | items.shuffle(&mut XorShiftRng::from_seed(Default::default())); 83 | 84 | c.bench_function("bench_put_rand", |b| { 85 | b.iter(|| { 86 | let txn = db.begin_rw_txn().unwrap(); 87 | for (key, data) in items.iter() { 88 | txn.put(&table, key, data, WriteFlags::empty()).unwrap(); 89 | } 90 | }) 91 | }); 92 | } 93 | 94 | fn bench_put_rand_raw(c: &mut Criterion) { 95 | let n = 100u32; 96 | let (_dir, _db) = setup_bench_db(0); 97 | 98 | let mut items: Vec<(String, String)> = (0..n).map(|n| (get_key(n), get_data(n))).collect(); 99 | items.shuffle(&mut XorShiftRng::from_seed(Default::default())); 100 | 101 | let dbi = _db.begin_ro_txn().unwrap().open_table(None).unwrap().dbi(); 102 | let env = _db.ptr(); 103 | 104 | let mut key_val: MDBX_val = MDBX_val { 105 | iov_len: 0, 106 | iov_base: ptr::null_mut(), 107 | }; 108 | let mut data_val: MDBX_val = MDBX_val { 109 | iov_len: 0, 110 | iov_base: ptr::null_mut(), 111 | }; 112 | 113 | c.bench_function("bench_put_rand_raw", |b| { 114 | b.iter(|| unsafe { 115 | let mut txn: *mut MDBX_txn = ptr::null_mut(); 116 | mdbx_txn_begin_ex(env.0, ptr::null_mut(), 0, &mut txn, ptr::null_mut()); 117 | 118 | let mut i: ::libc::c_int = 0; 119 | for (key, data) in items.iter() { 120 | key_val.iov_len = key.len() as size_t; 121 | key_val.iov_base = key.as_bytes().as_ptr() as *mut _; 122 | data_val.iov_len = data.len() as size_t; 123 | data_val.iov_base = data.as_bytes().as_ptr() as *mut _; 124 | 125 | i += mdbx_put(txn, dbi, &key_val, &mut data_val, 0); 126 | } 127 | assert_eq!(0, i); 128 | mdbx_txn_abort(txn); 129 | }) 130 | }); 131 | } 132 | 133 | criterion_group!( 134 | benches, 135 | bench_get_rand, 136 | bench_get_rand_raw, 137 | bench_put_rand, 138 | bench_put_rand_raw 139 | ); 140 | criterion_main!(benches); 141 | -------------------------------------------------------------------------------- /benches/utils.rs: -------------------------------------------------------------------------------- 1 | use libmdbx::{Database, NoWriteMap, WriteFlags}; 2 | use tempfile::{TempDir, tempdir}; 3 | 4 | pub fn get_key(n: u32) -> String { 5 | format!("key{n}") 6 | } 7 | 8 | pub fn get_data(n: u32) -> String { 9 | format!("data{n}") 10 | } 11 | 12 | pub fn setup_bench_db(num_rows: u32) -> (TempDir, Database) { 13 | let dir = tempdir().unwrap(); 14 | let db = Database::open(&dir).unwrap(); 15 | 16 | { 17 | let txn = db.begin_rw_txn().unwrap(); 18 | let table = txn.open_table(None).unwrap(); 19 | for i in 0..num_rows { 20 | txn.put(&table, get_key(i), get_data(i), WriteFlags::empty()) 21 | .unwrap(); 22 | } 23 | txn.commit().unwrap(); 24 | } 25 | (dir, db) 26 | } 27 | -------------------------------------------------------------------------------- /mdbx-sys/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "mdbx-sys" 3 | version = "12.13.1" 4 | edition = "2024" 5 | license = "MPL-2.0" 6 | description = "Rust bindings for libmdbx." 7 | documentation = "https://docs.rs/mdbx-sys" 8 | homepage = "https://github.com/vorot93/libmdbx-rs" 9 | repository = "https://github.com/vorot93/libmdbx-rs" 10 | keywords = ["MDBX", "database", "storage-engine", "bindings", "library"] 11 | categories = ["database", "external-ffi-bindings"] 12 | 13 | [lib] 14 | name = "mdbx_sys" 15 | 16 | [dependencies] 17 | libc = "0.2" 18 | 19 | [build-dependencies] 20 | cc = "1.0" 21 | bindgen = { version = "0.71", default-features = false, features = ["runtime"] } 22 | -------------------------------------------------------------------------------- /mdbx-sys/build.rs: -------------------------------------------------------------------------------- 1 | use bindgen::{ 2 | Formatter, 3 | callbacks::{IntKind, ParseCallbacks}, 4 | }; 5 | use std::{env, path::PathBuf}; 6 | 7 | #[derive(Debug)] 8 | struct Callbacks; 9 | 10 | impl ParseCallbacks for Callbacks { 11 | fn int_macro(&self, name: &str, _value: i64) -> Option { 12 | match name { 13 | "MDBX_SUCCESS" 14 | | "MDBX_KEYEXIST" 15 | | "MDBX_NOTFOUND" 16 | | "MDBX_PAGE_NOTFOUND" 17 | | "MDBX_CORRUPTED" 18 | | "MDBX_PANIC" 19 | | "MDBX_VERSION_MISMATCH" 20 | | "MDBX_INVALID" 21 | | "MDBX_MAP_FULL" 22 | | "MDBX_DBS_FULL" 23 | | "MDBX_READERS_FULL" 24 | | "MDBX_TLS_FULL" 25 | | "MDBX_TXN_FULL" 26 | | "MDBX_CURSOR_FULL" 27 | | "MDBX_PAGE_FULL" 28 | | "MDBX_MAP_RESIZED" 29 | | "MDBX_INCOMPATIBLE" 30 | | "MDBX_BAD_RSLOT" 31 | | "MDBX_BAD_TXN" 32 | | "MDBX_BAD_VALSIZE" 33 | | "MDBX_BAD_DBI" 34 | | "MDBX_LOG_DONTCHANGE" 35 | | "MDBX_DBG_DONTCHANGE" 36 | | "MDBX_RESULT_TRUE" 37 | | "MDBX_UNABLE_EXTEND_MAPSIZE" 38 | | "MDBX_PROBLEM" 39 | | "MDBX_LAST_LMDB_ERRCODE" 40 | | "MDBX_BUSY" 41 | | "MDBX_EMULTIVAL" 42 | | "MDBX_EBADSIGN" 43 | | "MDBX_WANNA_RECOVERY" 44 | | "MDBX_EKEYMISMATCH" 45 | | "MDBX_TOO_LARGE" 46 | | "MDBX_THREAD_MISMATCH" 47 | | "MDBX_TXN_OVERLAPPING" 48 | | "MDBX_LAST_ERRCODE" => Some(IntKind::Int), 49 | _ => Some(IntKind::UInt), 50 | } 51 | } 52 | } 53 | 54 | fn main() { 55 | let mut mdbx = PathBuf::from(&env::var("CARGO_MANIFEST_DIR").unwrap()); 56 | mdbx.push("libmdbx"); 57 | 58 | let out_path = PathBuf::from(env::var("OUT_DIR").unwrap()); 59 | 60 | let bindings = bindgen::Builder::default() 61 | .header(mdbx.join("mdbx.h").to_string_lossy()) 62 | .allowlist_var("^(MDBX|mdbx)_.*") 63 | .allowlist_type("^(MDBX|mdbx)_.*") 64 | .allowlist_function("^(MDBX|mdbx)_.*") 65 | .size_t_is_usize(true) 66 | .ctypes_prefix("::libc") 67 | .parse_callbacks(Box::new(Callbacks)) 68 | .layout_tests(false) 69 | .prepend_enum_name(false) 70 | .generate_comments(false) 71 | .disable_header_comment() 72 | .formatter(Formatter::None) 73 | .generate() 74 | .expect("Unable to generate bindings"); 75 | 76 | bindings 77 | .write_to_file(out_path.join("bindings.rs")) 78 | .expect("Couldn't write bindings!"); 79 | 80 | let mut mdbx = PathBuf::from(&env::var("CARGO_MANIFEST_DIR").unwrap()); 81 | mdbx.push("libmdbx"); 82 | 83 | let mut cc_builder = cc::Build::new(); 84 | cc_builder 85 | .flag_if_supported("-Wno-unused-parameter") 86 | .flag_if_supported("-Wbad-function-cast") 87 | .flag_if_supported("-Wuninitialized"); 88 | 89 | let flags = format!( 90 | "\"-NDEBUG={} {}\"", 91 | u8::from(!cfg!(debug_assertions)), 92 | cc_builder 93 | .get_compiler() 94 | .cflags_env() 95 | .to_str() 96 | .unwrap() 97 | .trim() 98 | ); 99 | 100 | cc_builder 101 | .define("MDBX_BUILD_FLAGS", flags.as_str()) 102 | .define("MDBX_TXN_CHECKOWNER", "0"); 103 | 104 | // __cpu_model is not available in musl 105 | if env::var("TARGET").unwrap().ends_with("-musl") { 106 | cc_builder.define("MDBX_HAVE_BUILTIN_CPU_SUPPORTS", "0"); 107 | } 108 | 109 | if cfg!(windows) { 110 | println!(r"cargo:rustc-link-lib=dylib=ntdll"); 111 | println!(r"cargo:rustc-link-lib=dylib=user32"); 112 | } 113 | 114 | cc_builder.file(mdbx.join("mdbx.c")).compile("libmdbx.a"); 115 | } 116 | -------------------------------------------------------------------------------- /mdbx-sys/libmdbx/GNUmakefile: -------------------------------------------------------------------------------- 1 | # This makefile is for GNU Make 3.80 or above, and nowadays provided 2 | # just for compatibility and preservation of traditions. 3 | # 4 | # Please use CMake in case of any difficulties or 5 | # problems with this old-school's magic. 6 | # 7 | ################################################################################ 8 | # 9 | # Basic internal definitions. For a customizable variables and options see below. 10 | # 11 | $(info // The GNU Make $(MAKE_VERSION)) 12 | SHELL := $(shell env bash -c 'echo $$BASH') 13 | MAKE_VERx3 := $(shell printf "%3s%3s%3s" $(subst ., ,$(MAKE_VERSION))) 14 | make_lt_3_81 := $(shell expr "$(MAKE_VERx3)" "<" " 3 81") 15 | ifneq ($(make_lt_3_81),0) 16 | $(error Please use GNU Make 3.81 or above) 17 | endif 18 | make_ge_4_1 := $(shell expr "$(MAKE_VERx3)" ">=" " 4 1") 19 | SRC_PROBE_C := $(shell [ -f mdbx.c ] && echo mdbx.c || echo src/osal.c) 20 | SRC_PROBE_CXX := $(shell [ -f mdbx.c++ ] && echo mdbx.c++ || echo src/mdbx.c++) 21 | UNAME := $(shell uname -s 2>/dev/null || echo Unknown) 22 | 23 | define cxx_filesystem_probe 24 | int main(int argc, const char*argv[]) { 25 | mdbx::filesystem::path probe(argv[0]); 26 | if (argc != 1) throw mdbx::filesystem::filesystem_error(std::string("fake"), std::error_code()); 27 | return mdbx::filesystem::is_directory(probe.relative_path()); 28 | } 29 | endef 30 | # 31 | ################################################################################ 32 | # 33 | # Use `make options` to list the available libmdbx build options. 34 | # 35 | # Note that the defaults should already be correct for most platforms; 36 | # you should not need to change any of these. Read their descriptions 37 | # in README and source code (see src/options.h) if you do. 38 | # 39 | 40 | # install sandbox 41 | DESTDIR ?= 42 | INSTALL ?= install 43 | # install prefixes (inside sandbox) 44 | prefix ?= /usr/local 45 | mandir ?= $(prefix)/man 46 | # lib/bin suffix for multiarch/biarch, e.g. '.x86_64' 47 | suffix ?= 48 | 49 | # toolchain 50 | CC ?= gcc 51 | CXX ?= g++ 52 | CFLAGS_EXTRA ?= 53 | LD ?= ld 54 | 55 | # build options 56 | MDBX_BUILD_OPTIONS ?=-DNDEBUG=1 57 | MDBX_BUILD_TIMESTAMP ?=$(shell date +%Y-%m-%dT%H:%M:%S%z) 58 | MDBX_BUILD_CXX ?= YES 59 | 60 | # probe and compose common compiler flags with variable expansion trick (seems this work two times per session for GNU Make 3.81) 61 | CFLAGS ?= $(strip $(eval CFLAGS := -std=gnu11 -O2 -g -Wall -Werror -Wextra -Wpedantic -ffunction-sections -fPIC -fvisibility=hidden -pthread -Wno-error=attributes $$(shell for opt in -fno-semantic-interposition -Wno-unused-command-line-argument -Wno-tautological-compare; do [ -z "$$$$($(CC) '-DMDBX_BUILD_FLAGS="probe"' $$$${opt} -c $(SRC_PROBE_C) -o /dev/null >/dev/null 2>&1 || echo failed)" ] && echo "$$$${opt} "; done)$(CFLAGS_EXTRA))$(CFLAGS)) 62 | 63 | # choosing C++ standard with variable expansion trick (seems this work two times per session for GNU Make 3.81) 64 | CXXSTD ?= $(eval CXXSTD := $$(shell for std in gnu++23 c++23 gnu++2b c++2b gnu++20 c++20 gnu++2a c++2a gnu++17 c++17 gnu++1z c++1z gnu++14 c++14 gnu++1y c++1y gnu+11 c++11 gnu++0x c++0x; do $(CXX) -std=$$$${std} -c $(SRC_PROBE_CXX) -o /dev/null 2>probe4std-$$$${std}.err >/dev/null && echo "-std=$$$${std}" && exit; done))$(CXXSTD) 65 | CXXFLAGS ?= $(strip $(CXXSTD) $(filter-out -std=gnu11,$(CFLAGS))) 66 | 67 | # libraries and options for linking 68 | EXE_LDFLAGS ?= -pthread 69 | ifneq ($(make_ge_4_1),1) 70 | # don't use variable expansion trick as workaround for bugs of GNU Make before 4.1 71 | LIBS ?= $(shell $(uname2libs)) 72 | LDFLAGS ?= $(shell $(uname2ldflags)) 73 | LIB_STDCXXFS ?= $(shell echo '$(cxx_filesystem_probe)' | cat mdbx.h++ - | sed $$'1s/\xef\xbb\xbf//' | $(CXX) -x c++ $(CXXFLAGS) -Wno-error - -Wl,--allow-multiple-definition -lstdc++fs $(LIBS) $(LDFLAGS) $(EXE_LDFLAGS) -o /dev/null 2>probe4lstdfs.err >/dev/null && echo '-Wl,--allow-multiple-definition -lstdc++fs') 74 | else 75 | # using variable expansion trick to avoid repeaded probes 76 | LIBS ?= $(eval LIBS := $$(shell $$(uname2libs)))$(LIBS) 77 | LDFLAGS ?= $(eval LDFLAGS := $$(shell $$(uname2ldflags)))$(LDFLAGS) 78 | LIB_STDCXXFS ?= $(eval LIB_STDCXXFS := $$(shell echo '$$(cxx_filesystem_probe)' | cat mdbx.h++ - | sed $$$$'1s/\xef\xbb\xbf//' | $(CXX) -x c++ $(CXXFLAGS) -Wno-error - -Wl,--allow-multiple-definition -lstdc++fs $(LIBS) $(LDFLAGS) $(EXE_LDFLAGS) -o /dev/null 2>probe4lstdfs.err >/dev/null && echo '-Wl,--allow-multiple-definition -lstdc++fs'))$(LIB_STDCXXFS) 79 | endif 80 | 81 | ################################################################################ 82 | 83 | define uname2sosuffix 84 | case "$(UNAME)" in 85 | Darwin*|Mach*) echo dylib;; 86 | CYGWIN*|MINGW*|MSYS*|Windows*) echo dll;; 87 | *) echo so;; 88 | esac 89 | endef 90 | 91 | define uname2ldflags 92 | case "$(UNAME)" in 93 | CYGWIN*|MINGW*|MSYS*|Windows*) 94 | echo '-Wl,--gc-sections,-O1'; 95 | ;; 96 | *) 97 | $(LD) --help 2>/dev/null | grep -q -- --gc-sections && echo '-Wl,--gc-sections,-z,relro,-O1'; 98 | $(LD) --help 2>/dev/null | grep -q -- -dead_strip && echo '-Wl,-dead_strip'; 99 | ;; 100 | esac 101 | endef 102 | 103 | # TIP: try add the'-Wl, --no-as-needed,-lrt' for ability to built with modern glibc, but then use with the old. 104 | define uname2libs 105 | case "$(UNAME)" in 106 | CYGWIN*|MINGW*|MSYS*|Windows*) 107 | echo '-lm -lntdll -lwinmm'; 108 | ;; 109 | *SunOS*|*Solaris*) 110 | echo '-lm -lkstat -lrt'; 111 | ;; 112 | *Darwin*|OpenBSD*) 113 | echo '-lm'; 114 | ;; 115 | *) 116 | echo '-lm -lrt'; 117 | ;; 118 | esac 119 | endef 120 | 121 | SO_SUFFIX := $(shell $(uname2sosuffix)) 122 | HEADERS := mdbx.h mdbx.h++ 123 | LIBRARIES := libmdbx.a libmdbx.$(SO_SUFFIX) 124 | TOOLS := mdbx_stat mdbx_copy mdbx_dump mdbx_load mdbx_chk mdbx_drop 125 | MANPAGES := mdbx_stat.1 mdbx_copy.1 mdbx_dump.1 mdbx_load.1 mdbx_chk.1 mdbx_drop.1 126 | TIP := // TIP: 127 | 128 | .PHONY: all help options lib libs tools clean install uninstall check_buildflags_tag tools-static 129 | .PHONY: install-strip install-no-strip strip libmdbx mdbx show-options lib-static lib-shared 130 | 131 | boolean = $(if $(findstring $(strip $($1)),YES Yes yes y ON On on 1 true True TRUE),1,$(if $(findstring $(strip $($1)),NO No no n OFF Off off 0 false False FALSE),,$(error Wrong value `$($1)` of $1 for YES/NO option))) 132 | select_by = $(if $(call boolean,$(1)),$(2),$(3)) 133 | 134 | ifeq ("$(origin V)", "command line") 135 | MDBX_BUILD_VERBOSE := $(V) 136 | endif 137 | ifndef MDBX_BUILD_VERBOSE 138 | MDBX_BUILD_VERBOSE := 0 139 | endif 140 | 141 | ifeq ($(call boolean,MDBX_BUILD_VERBOSE),1) 142 | QUIET := 143 | HUSH := 144 | $(info $(TIP) Use `make V=0` for quiet.) 145 | else 146 | QUIET := @ 147 | HUSH := >/dev/null 148 | $(info $(TIP) Use `make V=1` for verbose.) 149 | endif 150 | 151 | all: show-options $(LIBRARIES) $(TOOLS) 152 | 153 | help: 154 | @echo " make all - build libraries and tools" 155 | @echo " make help - print this help" 156 | @echo " make options - list build options" 157 | @echo " make lib - build libraries, also lib-static and lib-shared" 158 | @echo " make tools - build the tools" 159 | @echo " make tools-static - build the tools with statically linking with system libraries and compiler runtime" 160 | @echo " make clean " 161 | @echo " make install " 162 | @echo " make uninstall " 163 | @echo "" 164 | @echo " make strip - strip debug symbols from binaries" 165 | @echo " make install-no-strip - install explicitly without strip" 166 | @echo " make install-strip - install explicitly with strip" 167 | @echo "" 168 | @echo " make bench - run ioarena-benchmark" 169 | @echo " make bench-couple - run ioarena-benchmark for mdbx and lmdb" 170 | @echo " make bench-triplet - run ioarena-benchmark for mdbx, lmdb, sqlite3" 171 | @echo " make bench-quartet - run ioarena-benchmark for mdbx, lmdb, rocksdb, wiredtiger" 172 | @echo " make bench-clean - remove temp database(s) after benchmark" 173 | 174 | show-options: 175 | @echo " MDBX_BUILD_OPTIONS = $(MDBX_BUILD_OPTIONS)" 176 | @echo " MDBX_BUILD_CXX = $(MDBX_BUILD_CXX)" 177 | @echo " MDBX_BUILD_TIMESTAMP = $(MDBX_BUILD_TIMESTAMP)" 178 | @echo '$(TIP) Use `make options` to listing available build options.' 179 | @echo $(call select_by,MDBX_BUILD_CXX," CXX =`which $(CXX)` | `$(CXX) --version | head -1`"," CC =`which $(CC)` | `$(CC) --version | head -1`") 180 | @echo $(call select_by,MDBX_BUILD_CXX," CXXFLAGS =$(CXXFLAGS)"," CFLAGS =$(CFLAGS)") 181 | @echo $(call select_by,MDBX_BUILD_CXX," LDFLAGS =$(LDFLAGS) $(LIB_STDCXXFS) $(LIBS) $(EXE_LDFLAGS)"," LDFLAGS =$(LDFLAGS) $(LIBS) $(EXE_LDFLAGS)") 182 | @echo '$(TIP) Use `make help` to listing available targets.' 183 | 184 | options: 185 | @echo " INSTALL =$(INSTALL)" 186 | @echo " DESTDIR =$(DESTDIR)" 187 | @echo " prefix =$(prefix)" 188 | @echo " mandir =$(mandir)" 189 | @echo " suffix =$(suffix)" 190 | @echo "" 191 | @echo " CC =$(CC)" 192 | @echo " CFLAGS_EXTRA =$(CFLAGS_EXTRA)" 193 | @echo " CFLAGS =$(CFLAGS)" 194 | @echo " CXX =$(CXX)" 195 | @echo " CXXSTD =$(CXXSTD)" 196 | @echo " CXXFLAGS =$(CXXFLAGS)" 197 | @echo "" 198 | @echo " LD =$(LD)" 199 | @echo " LDFLAGS =$(LDFLAGS)" 200 | @echo " EXE_LDFLAGS =$(EXE_LDFLAGS)" 201 | @echo " LIBS =$(LIBS)" 202 | @echo "" 203 | @echo " MDBX_BUILD_OPTIONS = $(MDBX_BUILD_OPTIONS)" 204 | @echo " MDBX_BUILD_TIMESTAMP = $(MDBX_BUILD_TIMESTAMP)" 205 | @echo "" 206 | @echo "## Assortment items for MDBX_BUILD_OPTIONS:" 207 | @echo "## Note that the defaults should already be correct for most platforms;" 208 | @echo "## you should not need to change any of these. Read their descriptions" 209 | @echo "## in README and source code (see mdbx.c) if you do." 210 | @grep -h '#ifndef MDBX_' mdbx.c | grep -v BUILD | uniq | sed 's/#ifndef / /' 211 | 212 | lib libs libmdbx mdbx: libmdbx.a libmdbx.$(SO_SUFFIX) 213 | 214 | tools: $(TOOLS) 215 | tools-static: $(addsuffix .static,$(TOOLS)) $(addsuffix .static-lto,$(TOOLS)) 216 | 217 | strip: all 218 | @echo ' STRIP libmdbx.$(SO_SUFFIX) $(TOOLS)' 219 | $(TRACE )strip libmdbx.$(SO_SUFFIX) $(TOOLS) 220 | 221 | clean: 222 | @echo ' REMOVE ...' 223 | $(QUIET)rm -rf $(TOOLS) mdbx_test @* *.[ao] *.[ls]o *.$(SO_SUFFIX) *.dSYM *~ tmp.db/* \ 224 | *.gcov *.log *.err src/*.o test/*.o mdbx_example dist \ 225 | config.h src/config.h src/version.c *.tar* buildflags.tag \ 226 | mdbx_*.static mdbx_*.static-lto 227 | 228 | MDBX_BUILD_FLAGS =$(strip MDBX_BUILD_CXX=$(MDBX_BUILD_CXX) $(MDBX_BUILD_OPTIONS) $(call select_by,MDBX_BUILD_CXX,$(CXXFLAGS) $(LDFLAGS) $(LIB_STDCXXFS) $(LIBS),$(CFLAGS) $(LDFLAGS) $(LIBS))) 229 | check_buildflags_tag: 230 | $(QUIET)if [ "$(MDBX_BUILD_FLAGS)" != "$$(cat buildflags.tag 2>&1)" ]; then \ 231 | echo -n " CLEAN for build with specified flags..." && \ 232 | $(MAKE) IOARENA=false CXXSTD= -s clean >/dev/null && echo " Ok" && \ 233 | echo '$(MDBX_BUILD_FLAGS)' > buildflags.tag; \ 234 | fi 235 | 236 | buildflags.tag: check_buildflags_tag 237 | 238 | lib-static libmdbx.a: mdbx-static.o $(call select_by,MDBX_BUILD_CXX,mdbx++-static.o) 239 | @echo ' AR $@' 240 | $(QUIET)$(AR) rcs $@ $? $(HUSH) 241 | 242 | lib-shared libmdbx.$(SO_SUFFIX): mdbx-dylib.o $(call select_by,MDBX_BUILD_CXX,mdbx++-dylib.o) 243 | @echo ' LD $@' 244 | $(QUIET)$(call select_by,MDBX_BUILD_CXX,$(CXX) $(CXXFLAGS),$(CC) $(CFLAGS)) $^ -pthread -shared $(LDFLAGS) $(call select_by,MDBX_BUILD_CXX,$(LIB_STDCXXFS)) $(LIBS) -o $@ 245 | 246 | 247 | ################################################################################ 248 | # Amalgamated source code, i.e. distributed after `make dist` 249 | MAN_SRCDIR := man1/ 250 | 251 | config.h: buildflags.tag mdbx.c $(lastword $(MAKEFILE_LIST)) 252 | @echo ' MAKE $@' 253 | $(QUIET)(echo '#define MDBX_BUILD_TIMESTAMP "$(MDBX_BUILD_TIMESTAMP)"' \ 254 | && echo "#define MDBX_BUILD_FLAGS \"$$(cat buildflags.tag)\"" \ 255 | && echo '#define MDBX_BUILD_COMPILER "$(shell (LC_ALL=C $(CC) --version || echo 'Please use GCC or CLANG compatible compiler') | head -1)"' \ 256 | && echo '#define MDBX_BUILD_TARGET "$(shell set -o pipefail; (LC_ALL=C $(CC) -v 2>&1 | grep -i '^Target:' | cut -d ' ' -f 2- || (LC_ALL=C $(CC) --version | grep -qi e2k && echo E2K) || echo 'Please use GCC or CLANG compatible compiler') | head -1)"' \ 257 | ) >$@ 258 | 259 | mdbx-dylib.o: config.h mdbx.c mdbx.h $(lastword $(MAKEFILE_LIST)) 260 | @echo ' CC $@' 261 | $(QUIET)$(CC) $(CFLAGS) $(MDBX_BUILD_OPTIONS) '-DMDBX_CONFIG_H="config.h"' -DLIBMDBX_EXPORTS=1 -c mdbx.c -o $@ 262 | 263 | mdbx-static.o: config.h mdbx.c mdbx.h $(lastword $(MAKEFILE_LIST)) 264 | @echo ' CC $@' 265 | $(QUIET)$(CC) $(CFLAGS) $(MDBX_BUILD_OPTIONS) '-DMDBX_CONFIG_H="config.h"' -ULIBMDBX_EXPORTS -c mdbx.c -o $@ 266 | 267 | mdbx++-dylib.o: config.h mdbx.c++ mdbx.h mdbx.h++ $(lastword $(MAKEFILE_LIST)) 268 | @echo ' CC $@' 269 | $(QUIET)$(CXX) $(CXXFLAGS) $(MDBX_BUILD_OPTIONS) '-DMDBX_CONFIG_H="config.h"' -DLIBMDBX_EXPORTS=1 -c mdbx.c++ -o $@ 270 | 271 | mdbx++-static.o: config.h mdbx.c++ mdbx.h mdbx.h++ $(lastword $(MAKEFILE_LIST)) 272 | @echo ' CC $@' 273 | $(QUIET)$(CXX) $(CXXFLAGS) $(MDBX_BUILD_OPTIONS) '-DMDBX_CONFIG_H="config.h"' -ULIBMDBX_EXPORTS -c mdbx.c++ -o $@ 274 | 275 | mdbx_%: mdbx_%.c mdbx-static.o 276 | @echo ' CC+LD $@' 277 | $(QUIET)$(CC) $(CFLAGS) $(MDBX_BUILD_OPTIONS) '-DMDBX_CONFIG_H="config.h"' $^ $(EXE_LDFLAGS) $(LIBS) -o $@ 278 | 279 | mdbx_%.static: mdbx_%.c mdbx-static.o 280 | @echo ' CC+LD $@' 281 | $(QUIET)$(CC) $(CFLAGS) $(MDBX_BUILD_OPTIONS) '-DMDBX_CONFIG_H="config.h"' $^ $(EXE_LDFLAGS) -static -Wl,--strip-all -o $@ 282 | 283 | mdbx_%.static-lto: mdbx_%.c config.h mdbx.c mdbx.h 284 | @echo ' CC+LD $@' 285 | $(QUIET)$(CC) $(CFLAGS) -Os -flto $(MDBX_BUILD_OPTIONS) '-DLIBMDBX_API=' '-DMDBX_CONFIG_H="config.h"' \ 286 | $< mdbx.c $(EXE_LDFLAGS) $(LIBS) -static -Wl,--strip-all -o $@ 287 | 288 | 289 | install: $(LIBRARIES) $(TOOLS) $(HEADERS) 290 | @echo ' INSTALLING...' 291 | $(QUIET)mkdir -p $(DESTDIR)$(prefix)/bin$(suffix) && \ 292 | $(INSTALL) -p $(EXE_INSTALL_FLAGS) $(TOOLS) $(DESTDIR)$(prefix)/bin$(suffix)/ && \ 293 | mkdir -p $(DESTDIR)$(prefix)/lib$(suffix)/ && \ 294 | $(INSTALL) -p $(EXE_INSTALL_FLAGS) $(filter-out libmdbx.a,$(LIBRARIES)) $(DESTDIR)$(prefix)/lib$(suffix)/ && \ 295 | mkdir -p $(DESTDIR)$(prefix)/lib$(suffix)/ && \ 296 | $(INSTALL) -p libmdbx.a $(DESTDIR)$(prefix)/lib$(suffix)/ && \ 297 | mkdir -p $(DESTDIR)$(prefix)/include/ && \ 298 | $(INSTALL) -p -m 444 $(HEADERS) $(DESTDIR)$(prefix)/include/ && \ 299 | mkdir -p $(DESTDIR)$(mandir)/man1/ && \ 300 | $(INSTALL) -p -m 444 $(addprefix $(MAN_SRCDIR), $(MANPAGES)) $(DESTDIR)$(mandir)/man1/ 301 | 302 | install-strip: EXE_INSTALL_FLAGS = -s 303 | install-strip: install 304 | 305 | install-no-strip: EXE_INSTALL_FLAGS = 306 | install-no-strip: install 307 | 308 | uninstall: 309 | @echo ' UNINSTALLING/REMOVE...' 310 | $(QUIET)rm -f $(addprefix $(DESTDIR)$(prefix)/bin$(suffix)/,$(TOOLS)) \ 311 | $(addprefix $(DESTDIR)$(prefix)/lib$(suffix)/,$(LIBRARIES)) \ 312 | $(addprefix $(DESTDIR)$(prefix)/include/,$(HEADERS)) \ 313 | $(addprefix $(DESTDIR)$(mandir)/man1/,$(MANPAGES)) 314 | 315 | ################################################################################ 316 | # Benchmarking by ioarena 317 | 318 | ifeq ($(origin IOARENA),undefined) 319 | IOARENA := $(shell \ 320 | (test -x ../ioarena/@BUILD/src/ioarena && echo ../ioarena/@BUILD/src/ioarena) || \ 321 | (test -x ../../@BUILD/src/ioarena && echo ../../@BUILD/src/ioarena) || \ 322 | (test -x ../../src/ioarena && echo ../../src/ioarena) || which ioarena 2>&- || \ 323 | (echo false && echo '$(TIP) Clone and build the https://abf.io/erthink/ioarena.git within a neighbouring directory for availability of benchmarking.' >&2)) 324 | endif 325 | NN ?= 25000000 326 | BENCH_CRUD_MODE ?= nosync 327 | 328 | bench-clean: 329 | @echo ' REMOVE bench-*.txt _ioarena/*' 330 | $(QUIET)rm -rf bench-*.txt _ioarena/* 331 | 332 | re-bench: bench-clean bench 333 | 334 | ifeq ($(or $(IOARENA),false),false) 335 | bench bench-quartet bench-triplet bench-couple: 336 | $(QUIET)echo 'The `ioarena` benchmark is required.' >&2 && \ 337 | echo 'Please clone and build the https://abf.io/erthink/ioarena.git within a neighbouring `ioarena` directory.' >&2 && \ 338 | false 339 | 340 | else 341 | 342 | .PHONY: bench bench-clean bench-couple re-bench bench-quartet bench-triplet 343 | 344 | define bench-rule 345 | bench-$(1)_$(2).txt: $(3) $(IOARENA) $(lastword $(MAKEFILE_LIST)) 346 | @echo ' RUNNING ioarena for $1/$2...' 347 | $(QUIET)(export LD_LIBRARY_PATH="./:$$$${LD_LIBRARY_PATH}"; \ 348 | ldd $(IOARENA) | grep -i $(1) && \ 349 | $(IOARENA) -D $(1) -B batch -m $(BENCH_CRUD_MODE) -n $(2) \ 350 | | tee $$@ | grep throughput | sed 's/throughput/batch×N/' && \ 351 | $(IOARENA) -D $(1) -B crud -m $(BENCH_CRUD_MODE) -n $(2) \ 352 | | tee -a $$@ | grep throughput | sed 's/throughput/ crud/' && \ 353 | $(IOARENA) -D $(1) -B iterate,get,iterate,get,iterate -m $(BENCH_CRUD_MODE) -r 4 -n $(2) \ 354 | | tee -a $$@ | grep throughput | sed '0,/throughput/{s/throughput/iterate/};s/throughput/ get/' && \ 355 | $(IOARENA) -D $(1) -B delete -m $(BENCH_CRUD_MODE) -n $(2) \ 356 | | tee -a $$@ | grep throughput | sed 's/throughput/ delete/' && \ 357 | true) || mv -f $$@ $$@.error 358 | 359 | endef 360 | 361 | 362 | $(eval $(call bench-rule,mdbx,$(NN),libmdbx.$(SO_SUFFIX))) 363 | 364 | $(eval $(call bench-rule,sophia,$(NN))) 365 | $(eval $(call bench-rule,leveldb,$(NN))) 366 | $(eval $(call bench-rule,rocksdb,$(NN))) 367 | $(eval $(call bench-rule,wiredtiger,$(NN))) 368 | $(eval $(call bench-rule,forestdb,$(NN))) 369 | $(eval $(call bench-rule,lmdb,$(NN))) 370 | $(eval $(call bench-rule,nessdb,$(NN))) 371 | $(eval $(call bench-rule,sqlite3,$(NN))) 372 | $(eval $(call bench-rule,ejdb,$(NN))) 373 | $(eval $(call bench-rule,vedisdb,$(NN))) 374 | $(eval $(call bench-rule,dummy,$(NN))) 375 | bench: bench-mdbx_$(NN).txt 376 | bench-quartet: bench-mdbx_$(NN).txt bench-lmdb_$(NN).txt bench-rocksdb_$(NN).txt bench-wiredtiger_$(NN).txt 377 | bench-triplet: bench-mdbx_$(NN).txt bench-lmdb_$(NN).txt bench-sqlite3_$(NN).txt 378 | bench-couple: bench-mdbx_$(NN).txt bench-lmdb_$(NN).txt 379 | 380 | # $(eval $(call bench-rule,debug,10)) 381 | # .PHONY: bench-debug 382 | # bench-debug: bench-debug_10.txt 383 | 384 | endif 385 | -------------------------------------------------------------------------------- /mdbx-sys/libmdbx/LICENSE: -------------------------------------------------------------------------------- 1 | The OpenLDAP Public License 2 | Version 2.8, 17 August 2003 3 | 4 | Redistribution and use of this software and associated documentation 5 | ("Software"), with or without modification, are permitted provided 6 | that the following conditions are met: 7 | 8 | 1. Redistributions in source form must retain copyright statements 9 | and notices, 10 | 11 | 2. Redistributions in binary form must reproduce applicable copyright 12 | statements and notices, this list of conditions, and the following 13 | disclaimer in the documentation and/or other materials provided 14 | with the distribution, and 15 | 16 | 3. Redistributions must contain a verbatim copy of this document. 17 | 18 | The OpenLDAP Foundation may revise this license from time to time. 19 | Each revision is distinguished by a version number. You may use 20 | this Software under terms of this license revision or under the 21 | terms of any subsequent revision of the license. 22 | 23 | THIS SOFTWARE IS PROVIDED BY THE OPENLDAP FOUNDATION AND ITS 24 | CONTRIBUTORS ``AS IS'' AND ANY EXPRESSED OR IMPLIED WARRANTIES, 25 | INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY 26 | AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT 27 | SHALL THE OPENLDAP FOUNDATION, ITS CONTRIBUTORS, OR THE AUTHOR(S) 28 | OR OWNER(S) OF THE SOFTWARE BE LIABLE FOR ANY DIRECT, INDIRECT, 29 | INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, 30 | BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 31 | LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 32 | CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 33 | LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN 34 | ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 35 | POSSIBILITY OF SUCH DAMAGE. 36 | 37 | The names of the authors and copyright holders must not be used in 38 | advertising or otherwise to promote the sale, use or other dealing 39 | in this Software without specific, written prior permission. Title 40 | to copyright in this Software shall at all times remain with copyright 41 | holders. 42 | 43 | OpenLDAP is a registered trademark of the OpenLDAP Foundation. 44 | 45 | Copyright 1999-2003 The OpenLDAP Foundation, Redwood City, 46 | California, USA. All Rights Reserved. Permission to copy and 47 | distribute verbatim copies of this document is granted. 48 | -------------------------------------------------------------------------------- /mdbx-sys/libmdbx/Makefile: -------------------------------------------------------------------------------- 1 | # This is thunk-Makefile for calling GNU Make 3.80 or above 2 | 3 | all help options \ 4 | clean install install-no-strip install-strip strip tools uninstall \ 5 | bench bench-clean bench-couple bench-quartet bench-triplet re-bench \ 6 | lib libs lib-static lib-shared tools-static \ 7 | libmdbx mdbx mdbx_chk mdbx_copy mdbx_drop mdbx_dump mdbx_load mdbx_stat \ 8 | check dist memcheck cross-gcc cross-qemu doxygen gcc-analyzer reformat \ 9 | release-assets tags test build-test mdbx_test smoke smoke-fault smoke-singleprocess \ 10 | smoke-assertion test-assertion long-test-assertion \ 11 | test-asan test-leak test-singleprocess test-ubsan test-valgrind: 12 | @CC=$(CC) \ 13 | CXX=`if test -n "$(CXX)" && which "$(CXX)" > /dev/null; then echo "$(CXX)"; elif test -n "$(CCC)" && which "$(CCC)" > /dev/null; then echo "$(CCC)"; else echo "c++"; fi` \ 14 | `which gmake || which gnumake || echo 'echo "GNU Make 3.80 or above is required"; exit 2;'` \ 15 | $(MAKEFLAGS) -f GNUmakefile $@ 16 | -------------------------------------------------------------------------------- /mdbx-sys/libmdbx/VERSION.txt: -------------------------------------------------------------------------------- 1 | 0.12.13.5 2 | -------------------------------------------------------------------------------- /mdbx-sys/libmdbx/cmake/profile.cmake: -------------------------------------------------------------------------------- 1 | ## Copyright (c) 2012-2024 Leonid Yuriev . 2 | ## 3 | ## Licensed under the Apache License, Version 2.0 (the "License"); 4 | ## you may not use this file except in compliance with the License. 5 | ## You may obtain a copy of the License at 6 | ## 7 | ## http://www.apache.org/licenses/LICENSE-2.0 8 | ## 9 | ## Unless required by applicable law or agreed to in writing, software 10 | ## distributed under the License is distributed on an "AS IS" BASIS, 11 | ## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | ## See the License for the specific language governing permissions and 13 | ## limitations under the License. 14 | ## 15 | 16 | if(CMAKE_VERSION VERSION_LESS 3.8.2) 17 | cmake_minimum_required(VERSION 3.0.2) 18 | elseif(CMAKE_VERSION VERSION_LESS 3.12) 19 | cmake_minimum_required(VERSION 3.8.2) 20 | else() 21 | cmake_minimum_required(VERSION 3.12) 22 | endif() 23 | 24 | cmake_policy(PUSH) 25 | cmake_policy(VERSION ${CMAKE_MINIMUM_REQUIRED_VERSION}) 26 | 27 | include(CheckLibraryExists) 28 | check_library_exists(gcov __gcov_flush "" HAVE_GCOV) 29 | 30 | option(ENABLE_GCOV 31 | "Enable integration with gcov, a code coverage program" OFF) 32 | 33 | option(ENABLE_GPROF 34 | "Enable integration with gprof, a performance analyzing tool" OFF) 35 | 36 | if(CMAKE_CXX_COMPILER_LOADED) 37 | include(CheckIncludeFileCXX) 38 | check_include_file_cxx(valgrind/memcheck.h HAVE_VALGRIND_MEMCHECK_H) 39 | else() 40 | include(CheckIncludeFile) 41 | check_include_file(valgrind/memcheck.h HAVE_VALGRIND_MEMCHECK_H) 42 | endif() 43 | 44 | option(MDBX_USE_VALGRIND "Enable integration with valgrind, a memory analyzing tool" OFF) 45 | if(MDBX_USE_VALGRIND AND NOT HAVE_VALGRIND_MEMCHECK_H) 46 | message(FATAL_ERROR "MDBX_USE_VALGRIND option is set but valgrind/memcheck.h is not found") 47 | endif() 48 | 49 | option(ENABLE_ASAN 50 | "Enable AddressSanitizer, a fast memory error detector based on compiler instrumentation" OFF) 51 | 52 | option(ENABLE_UBSAN 53 | "Enable UndefinedBehaviorSanitizer, a fast undefined behavior detector based on compiler instrumentation" OFF) 54 | 55 | cmake_policy(POP) 56 | -------------------------------------------------------------------------------- /mdbx-sys/libmdbx/cmake/utils.cmake: -------------------------------------------------------------------------------- 1 | ## Copyright (c) 2012-2024 Leonid Yuriev . 2 | ## 3 | ## Licensed under the Apache License, Version 2.0 (the "License"); 4 | ## you may not use this file except in compliance with the License. 5 | ## You may obtain a copy of the License at 6 | ## 7 | ## http://www.apache.org/licenses/LICENSE-2.0 8 | ## 9 | ## Unless required by applicable law or agreed to in writing, software 10 | ## distributed under the License is distributed on an "AS IS" BASIS, 11 | ## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | ## See the License for the specific language governing permissions and 13 | ## limitations under the License. 14 | ## 15 | 16 | if(CMAKE_VERSION VERSION_LESS 3.8.2) 17 | cmake_minimum_required(VERSION 3.0.2) 18 | elseif(CMAKE_VERSION VERSION_LESS 3.12) 19 | cmake_minimum_required(VERSION 3.8.2) 20 | else() 21 | cmake_minimum_required(VERSION 3.12) 22 | endif() 23 | 24 | cmake_policy(PUSH) 25 | cmake_policy(VERSION ${CMAKE_MINIMUM_REQUIRED_VERSION}) 26 | 27 | macro(add_compile_flags languages) 28 | foreach(_lang ${languages}) 29 | string(REPLACE ";" " " _flags "${ARGN}") 30 | if(CMAKE_CXX_COMPILER_LOADED AND _lang STREQUAL "CXX") 31 | set("${_lang}_FLAGS" "${${_lang}_FLAGS} ${_flags}") 32 | endif() 33 | if(CMAKE_C_COMPILER_LOADED AND _lang STREQUAL "C") 34 | set("${_lang}_FLAGS" "${${_lang}_FLAGS} ${_flags}") 35 | endif() 36 | endforeach() 37 | unset(_lang) 38 | unset(_flags) 39 | endmacro(add_compile_flags) 40 | 41 | macro(remove_flag varname flag) 42 | string(REGEX REPLACE "^(.*)( ${flag} )(.*)$" "\\1 \\3" ${varname} ${${varname}}) 43 | string(REGEX REPLACE "^((.+ )*)(${flag})(( .+)*)$" "\\1\\4" ${varname} ${${varname}}) 44 | endmacro(remove_flag) 45 | 46 | macro(remove_compile_flag languages flag) 47 | foreach(_lang ${languages}) 48 | if(CMAKE_CXX_COMPILER_LOADED AND _lang STREQUAL "CXX") 49 | remove_flag(${_lang}_FLAGS ${flag}) 50 | endif() 51 | if(CMAKE_C_COMPILER_LOADED AND _lang STREQUAL "C") 52 | remove_flag(${_lang}_FLAGS ${flag}) 53 | endif() 54 | endforeach() 55 | unset(_lang) 56 | endmacro(remove_compile_flag) 57 | 58 | macro(set_source_files_compile_flags) 59 | foreach(file ${ARGN}) 60 | get_filename_component(_file_ext ${file} EXT) 61 | set(_lang "") 62 | if("${_file_ext}" STREQUAL ".m") 63 | set(_lang OBJC) 64 | # CMake believes that Objective C is a flavor of C++, not C, 65 | # and uses g++ compiler for .m files. 66 | # LANGUAGE property forces CMake to use CC for ${file} 67 | set_source_files_properties(${file} PROPERTIES LANGUAGE C) 68 | elseif("${_file_ext}" STREQUAL ".mm") 69 | set(_lang OBJCXX) 70 | endif() 71 | 72 | if(_lang) 73 | get_source_file_property(_flags ${file} COMPILE_FLAGS) 74 | if("${_flags}" STREQUAL "NOTFOUND") 75 | set(_flags "${CMAKE_${_lang}_FLAGS}") 76 | else() 77 | set(_flags "${_flags} ${CMAKE_${_lang}_FLAGS}") 78 | endif() 79 | # message(STATUS "Set (${file} ${_flags}") 80 | set_source_files_properties(${file} PROPERTIES COMPILE_FLAGS 81 | "${_flags}") 82 | endif() 83 | endforeach() 84 | unset(_file_ext) 85 | unset(_lang) 86 | endmacro(set_source_files_compile_flags) 87 | 88 | macro(fetch_version name source_root_directory parent_scope) 89 | set(${name}_VERSION "") 90 | set(${name}_GIT_DESCRIBE "") 91 | set(${name}_GIT_TIMESTAMP "") 92 | set(${name}_GIT_TREE "") 93 | set(${name}_GIT_COMMIT "") 94 | set(${name}_GIT_REVISION 0) 95 | set(${name}_GIT_VERSION "") 96 | if(GIT AND EXISTS "${source_root_directory}/.git") 97 | execute_process(COMMAND ${GIT} show --no-patch --format=%cI HEAD 98 | OUTPUT_VARIABLE ${name}_GIT_TIMESTAMP 99 | OUTPUT_STRIP_TRAILING_WHITESPACE 100 | WORKING_DIRECTORY ${source_root_directory} 101 | RESULT_VARIABLE rc) 102 | if(rc OR "${name}_GIT_TIMESTAMP" STREQUAL "%cI") 103 | execute_process(COMMAND ${GIT} show --no-patch --format=%ci HEAD 104 | OUTPUT_VARIABLE ${name}_GIT_TIMESTAMP 105 | OUTPUT_STRIP_TRAILING_WHITESPACE 106 | WORKING_DIRECTORY ${source_root_directory} 107 | RESULT_VARIABLE rc) 108 | if(rc OR "${name}_GIT_TIMESTAMP" STREQUAL "%ci") 109 | message(FATAL_ERROR "Please install latest version of git (`show --no-patch --format=%cI HEAD` failed)") 110 | endif() 111 | endif() 112 | 113 | execute_process(COMMAND ${GIT} show --no-patch --format=%T HEAD 114 | OUTPUT_VARIABLE ${name}_GIT_TREE 115 | OUTPUT_STRIP_TRAILING_WHITESPACE 116 | WORKING_DIRECTORY ${source_root_directory} 117 | RESULT_VARIABLE rc) 118 | if(rc OR "${name}_GIT_TREE" STREQUAL "") 119 | message(FATAL_ERROR "Please install latest version of git (`show --no-patch --format=%T HEAD` failed)") 120 | endif() 121 | 122 | execute_process(COMMAND ${GIT} show --no-patch --format=%H HEAD 123 | OUTPUT_VARIABLE ${name}_GIT_COMMIT 124 | OUTPUT_STRIP_TRAILING_WHITESPACE 125 | WORKING_DIRECTORY ${source_root_directory} 126 | RESULT_VARIABLE rc) 127 | if(rc OR "${name}_GIT_COMMIT" STREQUAL "") 128 | message(FATAL_ERROR "Please install latest version of git (`show --no-patch --format=%H HEAD` failed)") 129 | endif() 130 | 131 | execute_process(COMMAND ${GIT} rev-list --tags --count 132 | OUTPUT_VARIABLE tag_count 133 | OUTPUT_STRIP_TRAILING_WHITESPACE 134 | WORKING_DIRECTORY ${source_root_directory} 135 | RESULT_VARIABLE rc) 136 | if(rc) 137 | message(FATAL_ERROR "Please install latest version of git (`git rev-list --tags --count` failed)") 138 | endif() 139 | 140 | if(tag_count EQUAL 0) 141 | execute_process(COMMAND ${GIT} rev-list --all --count 142 | OUTPUT_VARIABLE whole_count 143 | OUTPUT_STRIP_TRAILING_WHITESPACE 144 | WORKING_DIRECTORY ${source_root_directory} 145 | RESULT_VARIABLE rc) 146 | if(rc) 147 | message(FATAL_ERROR "Please install latest version of git (`git rev-list --all --count` failed)") 148 | endif() 149 | if(whole_count GREATER 42) 150 | message(FATAL_ERROR "Please fetch tags (no any tags for ${whole_count} commits)") 151 | endif() 152 | set(${name}_GIT_VERSION "0;0;0") 153 | execute_process(COMMAND ${GIT} rev-list --count --all --no-merges 154 | OUTPUT_VARIABLE ${name}_GIT_REVISION 155 | OUTPUT_STRIP_TRAILING_WHITESPACE 156 | WORKING_DIRECTORY ${source_root_directory} 157 | RESULT_VARIABLE rc) 158 | if(rc OR "${name}_GIT_REVISION" STREQUAL "") 159 | message(FATAL_ERROR "Please install latest version of git (`rev-list --count --all --no-merges` failed)") 160 | endif() 161 | else(tag_count EQUAL 0) 162 | execute_process(COMMAND ${GIT} describe --tags --long --dirty=-dirty "--match=v[0-9]*" 163 | OUTPUT_VARIABLE ${name}_GIT_DESCRIBE 164 | OUTPUT_STRIP_TRAILING_WHITESPACE 165 | WORKING_DIRECTORY ${source_root_directory} 166 | RESULT_VARIABLE rc) 167 | if(rc OR "${name}_GIT_DESCRIBE" STREQUAL "") 168 | if(_whole_count GREATER 42) 169 | message(FATAL_ERROR "Please fetch tags (`describe --tags --long --dirty --match=v[0-9]*` failed)") 170 | else() 171 | execute_process(COMMAND ${GIT} describe --all --long --dirty=-dirty 172 | OUTPUT_VARIABLE ${name}_GIT_DESCRIBE 173 | OUTPUT_STRIP_TRAILING_WHITESPACE 174 | WORKING_DIRECTORY ${source_root_directory} 175 | RESULT_VARIABLE rc) 176 | if(rc OR "${name}_GIT_DESCRIBE" STREQUAL "") 177 | message(FATAL_ERROR "Please install latest version of git (`git rev-list --tags --count` and/or `git rev-list --all --count` failed)") 178 | endif() 179 | endif() 180 | endif() 181 | 182 | execute_process(COMMAND ${GIT} describe --tags --abbrev=0 "--match=v[0-9]*" 183 | OUTPUT_VARIABLE last_release_tag 184 | OUTPUT_STRIP_TRAILING_WHITESPACE 185 | WORKING_DIRECTORY ${source_root_directory} 186 | RESULT_VARIABLE rc) 187 | if(rc) 188 | message(FATAL_ERROR "Please install latest version of git (`describe --tags --abbrev=0 --match=v[0-9]*` failed)") 189 | endif() 190 | if (last_release_tag) 191 | set(git_revlist_arg "${last_release_tag}..HEAD") 192 | else() 193 | execute_process(COMMAND ${GIT} tag --sort=-version:refname 194 | OUTPUT_VARIABLE tag_list 195 | OUTPUT_STRIP_TRAILING_WHITESPACE 196 | WORKING_DIRECTORY ${source_root_directory} 197 | RESULT_VARIABLE rc) 198 | if(rc) 199 | message(FATAL_ERROR "Please install latest version of git (`tag --sort=-version:refname` failed)") 200 | endif() 201 | string(REGEX REPLACE "\n" ";" tag_list "${tag_list}") 202 | set(git_revlist_arg "HEAD") 203 | foreach(tag IN LISTS tag_list) 204 | if(NOT last_release_tag) 205 | string(REGEX MATCH "^v[0-9]+(\.[0-9]+)+" last_release_tag "${tag}") 206 | set(git_revlist_arg "${tag}..HEAD") 207 | endif() 208 | endforeach(tag) 209 | endif() 210 | execute_process(COMMAND ${GIT} rev-list --count "${git_revlist_arg}" 211 | OUTPUT_VARIABLE ${name}_GIT_REVISION 212 | OUTPUT_STRIP_TRAILING_WHITESPACE 213 | WORKING_DIRECTORY ${source_root_directory} 214 | RESULT_VARIABLE rc) 215 | if(rc OR "${name}_GIT_REVISION" STREQUAL "") 216 | message(FATAL_ERROR "Please install latest version of git (`rev-list --count ${git_revlist_arg}` failed)") 217 | endif() 218 | 219 | string(REGEX MATCH "^(v)?([0-9]+)\\.([0-9]+)\\.([0-9]+)(.*)?" git_version_valid "${${name}_GIT_DESCRIBE}") 220 | if(git_version_valid) 221 | string(REGEX REPLACE "^(v)?([0-9]+)\\.([0-9]+)\\.([0-9]+)(.*)?" "\\2;\\3;\\4" ${name}_GIT_VERSION ${${name}_GIT_DESCRIBE}) 222 | else() 223 | string(REGEX MATCH "^(v)?([0-9]+)\\.([0-9]+)(.*)?" git_version_valid "${${name}_GIT_DESCRIBE}") 224 | if(git_version_valid) 225 | string(REGEX REPLACE "^(v)?([0-9]+)\\.([0-9]+)(.*)?" "\\2;\\3;0" ${name}_GIT_VERSION ${${name}_GIT_DESCRIBE}) 226 | else() 227 | message(AUTHOR_WARNING "Bad ${name} version \"${${name}_GIT_DESCRIBE}\"; falling back to 0.0.0 (have you made an initial release?)") 228 | set(${name}_GIT_VERSION "0;0;0") 229 | endif() 230 | endif() 231 | endif(tag_count EQUAL 0) 232 | endif() 233 | 234 | if(NOT ${name}_GIT_VERSION OR NOT ${name}_GIT_TIMESTAMP OR ${name}_GIT_REVISION STREQUAL "") 235 | if(GIT AND EXISTS "${source_root_directory}/.git") 236 | message(WARNING "Unable to retrieve ${name} version from git.") 237 | endif() 238 | set(${name}_GIT_VERSION "0;0;0;0") 239 | set(${name}_GIT_TIMESTAMP "") 240 | set(${name}_GIT_REVISION 0) 241 | 242 | # Try to get version from VERSION file 243 | set(version_file "${source_root_directory}/VERSION.txt") 244 | if(NOT EXISTS "${version_file}") 245 | set(version_file "${source_root_directory}/VERSION") 246 | endif() 247 | if(EXISTS "${version_file}") 248 | file(STRINGS "${version_file}" ${name}_VERSION LIMIT_COUNT 1 LIMIT_INPUT 42) 249 | endif() 250 | 251 | if(NOT ${name}_VERSION) 252 | message(WARNING "Unable to retrieve ${name} version from \"${version_file}\" file.") 253 | set(${name}_VERSION_LIST ${${name}_GIT_VERSION}) 254 | string(REPLACE ";" "." ${name}_VERSION "${${name}_GIT_VERSION}") 255 | else() 256 | string(REPLACE "." ";" ${name}_VERSION_LIST ${${name}_VERSION}) 257 | endif() 258 | 259 | else() 260 | list(APPEND ${name}_GIT_VERSION ${${name}_GIT_REVISION}) 261 | set(${name}_VERSION_LIST ${${name}_GIT_VERSION}) 262 | string(REPLACE ";" "." ${name}_VERSION "${${name}_GIT_VERSION}") 263 | endif() 264 | 265 | list(GET ${name}_VERSION_LIST 0 "${name}_VERSION_MAJOR") 266 | list(GET ${name}_VERSION_LIST 1 "${name}_VERSION_MINOR") 267 | list(GET ${name}_VERSION_LIST 2 "${name}_VERSION_RELEASE") 268 | list(GET ${name}_VERSION_LIST 3 "${name}_VERSION_REVISION") 269 | 270 | if(${parent_scope}) 271 | set(${name}_VERSION_MAJOR "${${name}_VERSION_MAJOR}" PARENT_SCOPE) 272 | set(${name}_VERSION_MINOR "${${name}_VERSION_MINOR}" PARENT_SCOPE) 273 | set(${name}_VERSION_RELEASE "${${name}_VERSION_RELEASE}" PARENT_SCOPE) 274 | set(${name}_VERSION_REVISION "${${name}_VERSION_REVISION}" PARENT_SCOPE) 275 | set(${name}_VERSION "${${name}_VERSION}" PARENT_SCOPE) 276 | 277 | set(${name}_GIT_DESCRIBE "${${name}_GIT_DESCRIBE}" PARENT_SCOPE) 278 | set(${name}_GIT_TIMESTAMP "${${name}_GIT_TIMESTAMP}" PARENT_SCOPE) 279 | set(${name}_GIT_TREE "${${name}_GIT_TREE}" PARENT_SCOPE) 280 | set(${name}_GIT_COMMIT "${${name}_GIT_COMMIT}" PARENT_SCOPE) 281 | set(${name}_GIT_REVISION "${${name}_GIT_REVISION}" PARENT_SCOPE) 282 | set(${name}_GIT_VERSION "${${name}_GIT_VERSION}" PARENT_SCOPE) 283 | endif() 284 | endmacro(fetch_version) 285 | 286 | cmake_policy(POP) 287 | -------------------------------------------------------------------------------- /mdbx-sys/libmdbx/config.h.in: -------------------------------------------------------------------------------- 1 | /* This is CMake-template for libmdbx's config.h 2 | ******************************************************************************/ 3 | 4 | /* *INDENT-OFF* */ 5 | /* clang-format off */ 6 | 7 | #cmakedefine LTO_ENABLED 8 | #cmakedefine MDBX_USE_VALGRIND 9 | #cmakedefine ENABLE_GPROF 10 | #cmakedefine ENABLE_GCOV 11 | #cmakedefine ENABLE_ASAN 12 | #cmakedefine ENABLE_UBSAN 13 | #cmakedefine01 MDBX_FORCE_ASSERTIONS 14 | 15 | /* Common */ 16 | #cmakedefine01 MDBX_TXN_CHECKOWNER 17 | #cmakedefine MDBX_ENV_CHECKPID_AUTO 18 | #ifndef MDBX_ENV_CHECKPID_AUTO 19 | #cmakedefine01 MDBX_ENV_CHECKPID 20 | #endif 21 | #cmakedefine MDBX_LOCKING_AUTO 22 | #ifndef MDBX_LOCKING_AUTO 23 | #cmakedefine MDBX_LOCKING @MDBX_LOCKING@ 24 | #endif 25 | #cmakedefine MDBX_TRUST_RTC_AUTO 26 | #ifndef MDBX_TRUST_RTC_AUTO 27 | #cmakedefine01 MDBX_TRUST_RTC 28 | #endif 29 | #cmakedefine01 MDBX_DISABLE_VALIDATION 30 | #cmakedefine01 MDBX_AVOID_MSYNC 31 | #cmakedefine01 MDBX_ENABLE_REFUND 32 | #cmakedefine01 MDBX_ENABLE_MADVISE 33 | #cmakedefine01 MDBX_ENABLE_BIGFOOT 34 | #cmakedefine01 MDBX_ENABLE_PGOP_STAT 35 | #cmakedefine01 MDBX_ENABLE_PROFGC 36 | 37 | /* Windows */ 38 | #cmakedefine01 MDBX_WITHOUT_MSVC_CRT 39 | 40 | /* MacOS & iOS */ 41 | #cmakedefine01 MDBX_OSX_SPEED_INSTEADOF_DURABILITY 42 | 43 | /* POSIX */ 44 | #cmakedefine01 MDBX_DISABLE_GNU_SOURCE 45 | #cmakedefine MDBX_USE_OFDLOCKS_AUTO 46 | #ifndef MDBX_USE_OFDLOCKS_AUTO 47 | #cmakedefine01 MDBX_USE_OFDLOCKS 48 | #endif 49 | 50 | /* Build Info */ 51 | #ifndef MDBX_BUILD_TIMESTAMP 52 | #cmakedefine MDBX_BUILD_TIMESTAMP "@MDBX_BUILD_TIMESTAMP@" 53 | #endif 54 | #ifndef MDBX_BUILD_TARGET 55 | #cmakedefine MDBX_BUILD_TARGET "@MDBX_BUILD_TARGET@" 56 | #endif 57 | #ifndef MDBX_BUILD_TYPE 58 | #cmakedefine MDBX_BUILD_TYPE "@MDBX_BUILD_TYPE@" 59 | #endif 60 | #ifndef MDBX_BUILD_COMPILER 61 | #cmakedefine MDBX_BUILD_COMPILER "@MDBX_BUILD_COMPILER@" 62 | #endif 63 | #ifndef MDBX_BUILD_FLAGS 64 | #cmakedefine MDBX_BUILD_FLAGS "@MDBX_BUILD_FLAGS@" 65 | #endif 66 | #cmakedefine MDBX_BUILD_SOURCERY @MDBX_BUILD_SOURCERY@ 67 | 68 | /* *INDENT-ON* */ 69 | /* clang-format on */ 70 | -------------------------------------------------------------------------------- /mdbx-sys/libmdbx/man1/mdbx_chk.1: -------------------------------------------------------------------------------- 1 | .\" Copyright 2015-2024 Leonid Yuriev . 2 | .\" Copying restrictions apply. See COPYRIGHT/LICENSE. 3 | .TH MDBX_CHK 1 "2024-03-13" "MDBX 0.12.10" 4 | .SH NAME 5 | mdbx_chk \- MDBX checking tool 6 | .SH SYNOPSIS 7 | .B mdbx_chk 8 | [\c 9 | .BR \-V ] 10 | [\c 11 | .BR \-v [ v [ v ]]] 12 | [\c 13 | .BR \-n ] 14 | [\c 15 | .BR \-q ] 16 | [\c 17 | .BR \-c ] 18 | [\c 19 | .BR \-w ] 20 | [\c 21 | .BR \-d ] 22 | [\c 23 | .BR \-i ] 24 | [\c 25 | .BI \-s \ subdb\fR] 26 | .BR \ dbpath 27 | .SH DESCRIPTION 28 | The 29 | .B mdbx_chk 30 | utility intended to check an MDBX database file. 31 | .SH OPTIONS 32 | .TP 33 | .BR \-V 34 | Write the library version number to the standard output, and exit. 35 | .TP 36 | .BR \-v 37 | Produce verbose output, including summarize space and page usage statistics. 38 | If \fB\-vv\fP is given, be more verbose, show summarized B-tree info 39 | and space allocation. 40 | If \fB\-vvv\fP is given, be more verbose, include summarized statistics 41 | of leaf B-tree pages. 42 | If \fB\-vvvv\fP is given, be even more verbose, show info of each page 43 | during B-tree traversal and basic info of each GC record. 44 | If \fB\-vvvvv\fP is given, turn maximal verbosity, display the full list 45 | of page IDs in the GC records and size of each key-value pair of database(s). 46 | .TP 47 | .BR \-q 48 | Be quiet; do not output anything even if an error was detected. 49 | .TP 50 | .BR \-c 51 | Force using cooperative mode while opening environment, i.e. don't try to open 52 | in exclusive/monopolistic mode. Only exclusive/monopolistic mode allow complete 53 | check, including full check of all meta-pages and actual size of database file. 54 | .TP 55 | .BR \-w 56 | Open environment in read-write mode and lock for writing while checking. 57 | This could be impossible if environment already used by another process(s) 58 | in an incompatible read-write mode. This allow rollback to last steady commit 59 | (in case environment was not closed properly) and then check transaction IDs 60 | of meta-pages. Otherwise, without \fB\-w\fP option environment will be 61 | opened in read-only mode. 62 | .TP 63 | .BR \-d 64 | Disable page-by-page traversal of B-tree. In this case, without B-tree 65 | traversal, it is unable to check for lost-unused pages nor for double-used 66 | pages. 67 | .TP 68 | .BR \-i 69 | Ignore wrong order errors, which will likely false-positive if custom 70 | comparator(s) was used. 71 | .TP 72 | .BR \-s \ subdb 73 | Verify and show info only for a specific subdatabase. 74 | .TP 75 | .BR \-0 | \-1 | \-2 76 | Using specific meta-page 0, or 2 for checking. 77 | .TP 78 | .BR \-t 79 | Turn to a specified meta-page on successful check. 80 | .TP 81 | .BR \-T 82 | Turn to a specified meta-page EVEN ON UNSUCCESSFUL CHECK! 83 | .TP 84 | .BR \-u 85 | Warms up the DB before checking via notifying OS kernel of subsequent access to the database pages. 86 | .TP 87 | .BR \-U 88 | Warms up the DB before checking, notifying the OS kernel of subsequent access to the database pages, 89 | then forcibly loads ones by sequential access and tries to lock database pages in memory. 90 | .TP 91 | .BR \-n 92 | Open MDBX environment(s) which do not use subdirectories. 93 | This is legacy option. For now MDBX handles this automatically. 94 | 95 | .SH DIAGNOSTICS 96 | Exit status is zero if no errors occur. Errors result in a non-zero exit status 97 | and a diagnostic message being written to standard error 98 | if no quiet mode was requested. 99 | .SH "SEE ALSO" 100 | .BR mdbx_stat (1), 101 | .BR mdbx_copy (1), 102 | .BR mdbx_dump (1), 103 | .BR mdbx_load (1) 104 | .BR mdbx_drop (1) 105 | .SH AUTHOR 106 | Leonid Yuriev 107 | -------------------------------------------------------------------------------- /mdbx-sys/libmdbx/man1/mdbx_copy.1: -------------------------------------------------------------------------------- 1 | .\" Copyright 2015-2024 Leonid Yuriev . 2 | .\" Copyright 2015,2016 Peter-Service R&D LLC . 3 | .\" Copyright 2012-2015 Howard Chu, Symas Corp. All Rights Reserved. 4 | .\" Copying restrictions apply. See COPYRIGHT/LICENSE. 5 | .TH MDBX_COPY 1 "2024-03-13" "MDBX 0.12.10" 6 | .SH NAME 7 | mdbx_copy \- MDBX environment copy tool 8 | .SH SYNOPSIS 9 | .B mdbx_copy 10 | [\c 11 | .BR \-V ] 12 | [\c 13 | .BR \-q ] 14 | [\c 15 | .BR \-c ] 16 | [\c 17 | .BR \-n ] 18 | .B src_path 19 | [\c 20 | .BR dest_path ] 21 | .SH DESCRIPTION 22 | The 23 | .B mdbx_copy 24 | utility copies an MDBX environment. The environment can 25 | be copied regardless of whether it is currently in use. 26 | No lockfile is created, since it gets recreated at need. 27 | 28 | If 29 | .I dest_path 30 | is specified it must be the path of an empty directory 31 | for storing the backup. Otherwise, the backup will be 32 | written to stdout. 33 | 34 | .SH OPTIONS 35 | .TP 36 | .BR \-V 37 | Write the library version number to the standard output, and exit. 38 | .TP 39 | .BR \-q 40 | Be quiet. 41 | .TP 42 | .BR \-c 43 | Compact while copying. Only current data pages will be copied; freed 44 | or unused pages will be omitted from the copy. This option will 45 | slow down the backup process as it is more CPU-intensive. 46 | Currently it fails if the environment has suffered a page leak. 47 | .TP 48 | .BR \-u 49 | Warms up the DB before copying via notifying OS kernel of subsequent access to the database pages. 50 | .TP 51 | .BR \-U 52 | Warms up the DB before copying, notifying the OS kernel of subsequent access to the database pages, 53 | then forcibly loads ones by sequential access and tries to lock database pages in memory. 54 | .TP 55 | .BR \-n 56 | Open MDBX environment(s) which do not use subdirectories. 57 | This is legacy option. For now MDBX handles this automatically. 58 | 59 | .SH DIAGNOSTICS 60 | Exit status is zero if no errors occur. 61 | Errors result in a non-zero exit status and 62 | a diagnostic message being written to standard error. 63 | .SH CAVEATS 64 | This utility can trigger significant file size growth if run 65 | in parallel with write transactions, because pages which they 66 | free during copying cannot be reused until the copy is done. 67 | .SH "SEE ALSO" 68 | .BR mdbx_dump (1), 69 | .BR mdbx_chk (1), 70 | .BR mdbx_stat (1), 71 | .BR mdbx_load (1) 72 | .BR mdbx_drop (1) 73 | .SH AUTHOR 74 | Howard Chu of Symas Corporation , 75 | Leonid Yuriev 76 | -------------------------------------------------------------------------------- /mdbx-sys/libmdbx/man1/mdbx_drop.1: -------------------------------------------------------------------------------- 1 | .\" Copyright 2021-2024 Leonid Yuriev . 2 | .\" Copyright 2014-2021 Howard Chu, Symas Corp. All Rights Reserved. 3 | .\" Copying restrictions apply. See COPYRIGHT/LICENSE. 4 | .TH MDBX_DROP 1 "2024-03-13" "MDBX 0.12.10" 5 | .SH NAME 6 | mdbx_drop \- MDBX database delete tool 7 | .SH SYNOPSIS 8 | .B mdbx_drop 9 | [\c 10 | .BR \-V ] 11 | [\c 12 | .BR \-d ] 13 | [\c 14 | .BI \-s \ subdb\fR] 15 | [\c 16 | .BR \-n ] 17 | .BR \ dbpath 18 | .SH DESCRIPTION 19 | The 20 | .B mdbx_drop 21 | utility empties or deletes a database in the specified 22 | environment. 23 | .SH OPTIONS 24 | .TP 25 | .BR \-V 26 | Write the library version number to the standard output, and exit. 27 | .TP 28 | .BR \-d 29 | Delete the specified database, don't just empty it. 30 | .TP 31 | .BR \-s \ subdb 32 | Operate on a specific subdatabase. If no database is specified, only the main database is dropped. 33 | .TP 34 | .BR \-n 35 | Dump an MDBX database which does not use subdirectories. 36 | This is legacy option. For now MDBX handles this automatically. 37 | 38 | .SH DIAGNOSTICS 39 | Exit status is zero if no errors occur. 40 | Errors result in a non-zero exit status and 41 | a diagnostic message being written to standard error. 42 | .SH "SEE ALSO" 43 | .BR mdbx_load (1), 44 | .BR mdbx_copy (1), 45 | .BR mdbx_chk (1), 46 | .BR mdbx_stat (1) 47 | .SH AUTHOR 48 | Howard Chu of Symas Corporation 49 | -------------------------------------------------------------------------------- /mdbx-sys/libmdbx/man1/mdbx_dump.1: -------------------------------------------------------------------------------- 1 | .\" Copyright 2015-2024 Leonid Yuriev . 2 | .\" Copyright 2015,2016 Peter-Service R&D LLC . 3 | .\" Copyright 2014-2015 Howard Chu, Symas Corp. All Rights Reserved. 4 | .\" Copying restrictions apply. See COPYRIGHT/LICENSE. 5 | .TH MDBX_DUMP 1 "2024-03-13" "MDBX 0.12.10" 6 | .SH NAME 7 | mdbx_dump \- MDBX environment export tool 8 | .SH SYNOPSIS 9 | .B mdbx_dump 10 | [\c 11 | .BR \-V ] 12 | [\c 13 | .BR \-q ] 14 | [\c 15 | .BI \-f \ file\fR] 16 | [\c 17 | .BR \-l ] 18 | [\c 19 | .BR \-p ] 20 | [\c 21 | .BR \-a \ | 22 | .BI \-s \ subdb\fR] 23 | [\c 24 | .BR \-r ] 25 | [\c 26 | .BR \-n ] 27 | .BR \ dbpath 28 | .SH DESCRIPTION 29 | The 30 | .B mdbx_dump 31 | utility reads a database and writes its contents to the 32 | standard output using a portable flat-text format 33 | understood by the 34 | .BR mdbx_load (1) 35 | utility. 36 | .SH OPTIONS 37 | .TP 38 | .BR \-V 39 | Write the library version number to the standard output, and exit. 40 | .TP 41 | .BR \-q 42 | Be quiet. 43 | .TP 44 | .BR \-f \ file 45 | Write to the specified file instead of to the standard output. 46 | .TP 47 | .BR \-l 48 | List the databases stored in the environment. Just the 49 | names will be listed, no data will be output. 50 | .TP 51 | .BR \-p 52 | If characters in either the key or data items are printing characters (as 53 | defined by isprint(3)), output them directly. This option permits users to 54 | use standard text editors and tools to modify the contents of databases. 55 | 56 | Note: different systems may have different notions about what characters 57 | are considered printing characters, and databases dumped in this manner may 58 | be less portable to external systems. 59 | .TP 60 | .BR \-a 61 | Dump all of the subdatabases in the environment. 62 | .TP 63 | .BR \-s \ subdb 64 | Dump a specific subdatabase. If no database is specified, only the main database is dumped. 65 | .TP 66 | .BR \-r 67 | Rescure mode. Ignore some errors to dump corrupted DB. 68 | .TP 69 | .BR \-u 70 | Warms up the DB before dumping via notifying OS kernel of subsequent access to the database pages. 71 | .TP 72 | .BR \-U 73 | Warms up the DB before dumping, notifying the OS kernel of subsequent access to the database pages, 74 | then forcibly loads ones by sequential access and tries to lock database pages in memory. 75 | .TP 76 | .BR \-n 77 | Dump an MDBX database which does not use subdirectories. 78 | This is legacy option. For now MDBX handles this automatically. 79 | 80 | .SH DIAGNOSTICS 81 | Exit status is zero if no errors occur. 82 | Errors result in a non-zero exit status and 83 | a diagnostic message being written to standard error. 84 | 85 | Dumping and reloading databases that use user-defined comparison functions 86 | will result in new databases that use the default comparison functions. 87 | \fBIn this case it is quite likely that the reloaded database will be 88 | damaged beyond repair permitting neither record storage nor retrieval.\fP 89 | 90 | The only available workaround is to modify the source for the 91 | .BR mdbx_load (1) 92 | utility to load the database using the correct comparison functions. 93 | .SH "SEE ALSO" 94 | .BR mdbx_load (1), 95 | .BR mdbx_copy (1), 96 | .BR mdbx_chk (1), 97 | .BR mdbx_stat (1) 98 | .BR mdbx_drop (1) 99 | .SH AUTHOR 100 | Howard Chu of Symas Corporation , 101 | Leonid Yuriev 102 | -------------------------------------------------------------------------------- /mdbx-sys/libmdbx/man1/mdbx_load.1: -------------------------------------------------------------------------------- 1 | .\" Copyright 2015-2024 Leonid Yuriev . 2 | .\" Copyright 2015,2016 Peter-Service R&D LLC . 3 | .\" Copyright 2014-2015 Howard Chu, Symas Corp. All Rights Reserved. 4 | .\" Copying restrictions apply. See COPYRIGHT/LICENSE. 5 | .TH MDBX_LOAD 1 "2024-03-13" "MDBX 0.12.10" 6 | .SH NAME 7 | mdbx_load \- MDBX environment import tool 8 | .SH SYNOPSIS 9 | .B mdbx_load 10 | [\c 11 | .BR \-V ] 12 | [\c 13 | .BR \-q ] 14 | [\c 15 | .BR \-a ] 16 | [\c 17 | .BI \-f \ file\fR] 18 | [\c 19 | .BI \-s \ subdb\fR] 20 | [\c 21 | .BR \-N ] 22 | [\c 23 | .BR \-T ] 24 | [\c 25 | .BR \-r ] 26 | [\c 27 | .BR \-n ] 28 | .BR \ dbpath 29 | .SH DESCRIPTION 30 | The 31 | .B mdbx_load 32 | utility reads from the standard input and loads it into the 33 | MDBX environment 34 | .BR dbpath . 35 | 36 | The input to 37 | .B mdbx_load 38 | must be in the output format specified by the 39 | .BR mdbx_dump (1) 40 | utility or as specified by the 41 | .B -T 42 | option below. 43 | 44 | A simple escape mechanism, where newline and backslash (\\) characters are special, is 45 | applied to the text input. Newline characters are interpreted as record separators. 46 | Backslash characters in the text will be interpreted in one of two ways: If the backslash 47 | character precedes another backslash character, the pair will be interpreted as a literal 48 | backslash. If the backslash character precedes any other character, the two characters 49 | following the backslash will be interpreted as a hexadecimal specification of a single 50 | character; for example, \\0a is a newline character in the ASCII character set. 51 | 52 | For this reason, any backslash or newline characters that naturally occur in the text 53 | input must be escaped to avoid misinterpretation by 54 | .BR mdbx_load . 55 | 56 | .SH OPTIONS 57 | .TP 58 | .BR \-V 59 | Write the library version number to the standard output, and exit. 60 | .TP 61 | .BR \-q 62 | Be quiet. 63 | .TP 64 | .BR \-a 65 | Append all records in the order they appear in the input. The input is assumed to already be 66 | in correctly sorted order and no sorting or checking for redundant values will be performed. 67 | This option must be used to reload data that was produced by running 68 | .B mdbx_dump 69 | on a database that uses custom compare functions. 70 | .TP 71 | .BR \-f \ file 72 | Read from the specified file instead of from the standard input. 73 | .TP 74 | .BR \-s \ subdb 75 | Load a specific subdatabase. If no database is specified, data is loaded into the main database. 76 | .TP 77 | .BR \-N 78 | Don't overwrite existing records when loading into an already existing database; just skip them. 79 | .TP 80 | .BR \-T 81 | Load data from simple text files. The input must be paired lines of text, where the first 82 | line of the pair is the key item, and the second line of the pair is its corresponding 83 | data item. 84 | .TP 85 | .BR \-r 86 | Rescure mode. Ignore errors to load corrupted DB dump. 87 | .TP 88 | .BR \-n 89 | Load an MDBX database which does not use subdirectories. 90 | This is legacy option. For now MDBX handles this automatically. 91 | 92 | .SH DIAGNOSTICS 93 | Exit status is zero if no errors occur. 94 | Errors result in a non-zero exit status and 95 | a diagnostic message being written to standard error. 96 | 97 | .SH "SEE ALSO" 98 | .BR mdbx_dump (1), 99 | .BR mdbx_chk (1), 100 | .BR mdbx_stat (1), 101 | .BR mdbx_copy (1) 102 | .BR mdbx_drop (1) 103 | .SH AUTHOR 104 | Howard Chu of Symas Corporation , 105 | Leonid Yuriev 106 | -------------------------------------------------------------------------------- /mdbx-sys/libmdbx/man1/mdbx_stat.1: -------------------------------------------------------------------------------- 1 | .\" Copyright 2015-2024 Leonid Yuriev . 2 | .\" Copyright 2015,2016 Peter-Service R&D LLC . 3 | .\" Copyright 2012-2015 Howard Chu, Symas Corp. All Rights Reserved. 4 | .\" Copying restrictions apply. See COPYRIGHT/LICENSE. 5 | .TH MDBX_STAT 1 "2024-03-13" "MDBX 0.12.10" 6 | .SH NAME 7 | mdbx_stat \- MDBX environment status tool 8 | .SH SYNOPSIS 9 | .B mdbx_stat 10 | [\c 11 | .BR \-V ] 12 | [\c 13 | .BR \-q ] 14 | [\c 15 | .BR \-p ] 16 | [\c 17 | .BR \-e ] 18 | [\c 19 | .BR \-f [ f [ f ]]] 20 | [\c 21 | .BR \-r [ r ]] 22 | [\c 23 | .BR \-a \ | 24 | .BI \-s \ subdb\fR] 25 | .BR \ dbpath 26 | [\c 27 | .BR \-n ] 28 | .SH DESCRIPTION 29 | The 30 | .B mdbx_stat 31 | utility displays the status of an MDBX environment. 32 | .SH OPTIONS 33 | .TP 34 | .BR \-V 35 | Write the library version number to the standard output, and exit. 36 | .TP 37 | .BR \-q 38 | Be quiet. 39 | .TP 40 | .BR \-p 41 | Display overall statistics of page operations of all (running, completed 42 | and aborted) transactions in the current multi-process session (since the 43 | first process opened the database after everyone had previously closed it). 44 | .TP 45 | .BR \-e 46 | Display information about the database environment. 47 | .TP 48 | .BR \-f 49 | Display information about the environment GC. 50 | If \fB\-ff\fP is given, summarize each GC/freelist entry. 51 | If \fB\-fff\fP is given, display the full list of page IDs in the GC/freelist. 52 | .TP 53 | .BR \-r 54 | Display information about the environment reader table. 55 | Shows the process ID, thread ID, and transaction ID for each active 56 | reader slot. The process ID and transaction ID are in decimal, the 57 | thread ID is in hexadecimal. The transaction ID is displayed as "-" 58 | if the reader does not currently have a read transaction open. 59 | If \fB\-rr\fP is given, check for stale entries in the reader 60 | table and clear them. The reader table will be printed again 61 | after the check is performed. 62 | .TP 63 | .BR \-a 64 | Display the status of all of the subdatabases in the environment. 65 | .TP 66 | .BR \-s \ subdb 67 | Display the status of a specific subdatabase. 68 | .TP 69 | .BR \-n 70 | Display the status of an MDBX database which does not use subdirectories. 71 | This is legacy option. For now MDBX handles this automatically 72 | for existing databases, but may be required while creating new. 73 | 74 | .SH DIAGNOSTICS 75 | Exit status is zero if no errors occur. 76 | Errors result in a non-zero exit status and 77 | a diagnostic message being written to standard error. 78 | .SH "SEE ALSO" 79 | .BR mdbx_chk (1), 80 | .BR mdbx_copy (1), 81 | .BR mdbx_dump (1), 82 | .BR mdbx_load (1) 83 | .BR mdbx_drop (1) 84 | .SH AUTHOR 85 | Howard Chu of Symas Corporation , 86 | Leonid Yuriev 87 | -------------------------------------------------------------------------------- /mdbx-sys/src/lib.rs: -------------------------------------------------------------------------------- 1 | #![deny(warnings)] 2 | #![allow(non_upper_case_globals)] 3 | #![allow(non_camel_case_types)] 4 | #![allow(non_snake_case)] 5 | #![allow(clippy::all)] 6 | 7 | include!(concat!(env!("OUT_DIR"), "/bindings.rs")); 8 | -------------------------------------------------------------------------------- /src/codec.rs: -------------------------------------------------------------------------------- 1 | use crate::{Error, TransactionKind, error::mdbx_result}; 2 | use derive_more::{Deref, DerefMut, Display}; 3 | use std::{borrow::Cow, slice}; 4 | use thiserror::Error; 5 | 6 | /// Implement this to be able to decode data values 7 | pub trait Decodable<'tx> { 8 | fn decode(data_val: &[u8]) -> Result 9 | where 10 | Self: Sized; 11 | 12 | #[doc(hidden)] 13 | unsafe fn decode_val( 14 | _: *const ffi::MDBX_txn, 15 | data_val: &ffi::MDBX_val, 16 | ) -> Result 17 | where 18 | Self: Sized, 19 | { 20 | let s = unsafe { slice::from_raw_parts(data_val.iov_base as *const u8, data_val.iov_len) }; 21 | 22 | Decodable::decode(s) 23 | } 24 | } 25 | 26 | impl<'tx> Decodable<'tx> for Cow<'tx, [u8]> { 27 | fn decode(_: &[u8]) -> Result { 28 | unreachable!() 29 | } 30 | 31 | #[doc(hidden)] 32 | unsafe fn decode_val( 33 | txn: *const ffi::MDBX_txn, 34 | data_val: &ffi::MDBX_val, 35 | ) -> Result { 36 | let is_dirty = 37 | (!K::ONLY_CLEAN) && mdbx_result(unsafe { ffi::mdbx_is_dirty(txn, data_val.iov_base) })?; 38 | 39 | let s = unsafe { slice::from_raw_parts(data_val.iov_base as *const u8, data_val.iov_len) }; 40 | 41 | Ok(if is_dirty { 42 | Cow::Owned(s.to_vec()) 43 | } else { 44 | Cow::Borrowed(s) 45 | }) 46 | } 47 | } 48 | 49 | #[cfg(feature = "lifetimed-bytes")] 50 | impl<'tx> Decodable<'tx> for lifetimed_bytes::Bytes<'tx> { 51 | fn decode(_: &[u8]) -> Result { 52 | unreachable!() 53 | } 54 | 55 | #[doc(hidden)] 56 | unsafe fn decode_val( 57 | txn: *const ffi::MDBX_txn, 58 | data_val: &ffi::MDBX_val, 59 | ) -> Result { 60 | unsafe { Cow::<'tx, [u8]>::decode_val::(txn, data_val).map(From::from) } 61 | } 62 | } 63 | 64 | impl Decodable<'_> for Vec { 65 | fn decode(data_val: &[u8]) -> Result 66 | where 67 | Self: Sized, 68 | { 69 | Ok(data_val.to_vec()) 70 | } 71 | } 72 | 73 | impl Decodable<'_> for () { 74 | fn decode(_: &[u8]) -> Result { 75 | Ok(()) 76 | } 77 | 78 | unsafe fn decode_val( 79 | _: *const ffi::MDBX_txn, 80 | _: &ffi::MDBX_val, 81 | ) -> Result { 82 | Ok(()) 83 | } 84 | } 85 | 86 | /// If you don't need the data itself, just its length. 87 | #[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, Deref, DerefMut)] 88 | pub struct ObjectLength(pub usize); 89 | 90 | impl Decodable<'_> for ObjectLength { 91 | fn decode(data_val: &[u8]) -> Result 92 | where 93 | Self: Sized, 94 | { 95 | Ok(Self(data_val.len())) 96 | } 97 | } 98 | 99 | impl Decodable<'_> for [u8; LEN] { 100 | fn decode(data_val: &[u8]) -> Result 101 | where 102 | Self: Sized, 103 | { 104 | #[derive(Clone, Debug, Display, Error)] 105 | struct InvalidSize { 106 | got: usize, 107 | } 108 | 109 | if data_val.len() != LEN { 110 | return Err(Error::DecodeError(Box::new(InvalidSize:: { 111 | got: data_val.len(), 112 | }))); 113 | } 114 | let mut a = [0; LEN]; 115 | a[..].copy_from_slice(data_val); 116 | Ok(a) 117 | } 118 | } 119 | -------------------------------------------------------------------------------- /src/database.rs: -------------------------------------------------------------------------------- 1 | use crate::{ 2 | Mode, ReadWriteOptions, SyncMode, Transaction, TransactionKind, 3 | error::{Error, Result, mdbx_result}, 4 | table::Table, 5 | transaction::{RO, RW}, 6 | }; 7 | use libc::c_uint; 8 | use mem::size_of; 9 | use sealed::sealed; 10 | #[cfg(windows)] 11 | use std::ffi::OsStr; 12 | #[cfg(unix)] 13 | use std::os::unix::ffi::OsStrExt; 14 | use std::{ 15 | ffi::CString, 16 | fmt, 17 | fmt::Debug, 18 | marker::PhantomData, 19 | mem, 20 | ops::Deref, 21 | path::Path, 22 | ptr, result, 23 | sync::mpsc::{SyncSender, sync_channel}, 24 | thread::sleep, 25 | time::Duration, 26 | }; 27 | 28 | #[cfg(windows)] 29 | /// Adding a 'missing' trait from windows OsStrExt 30 | trait OsStrExtLmdb { 31 | fn as_bytes(&self) -> &[u8]; 32 | } 33 | #[cfg(windows)] 34 | impl OsStrExtLmdb for OsStr { 35 | fn as_bytes(&self) -> &[u8] { 36 | self.to_str().unwrap().as_bytes() 37 | } 38 | } 39 | 40 | #[sealed] 41 | pub trait DatabaseKind: Debug + 'static { 42 | const EXTRA_FLAGS: ffi::MDBX_env_flags_t; 43 | } 44 | 45 | #[derive(Debug)] 46 | pub struct NoWriteMap; 47 | #[derive(Debug)] 48 | pub struct WriteMap; 49 | 50 | #[sealed] 51 | impl DatabaseKind for NoWriteMap { 52 | const EXTRA_FLAGS: ffi::MDBX_env_flags_t = ffi::MDBX_ENV_DEFAULTS; 53 | } 54 | #[sealed] 55 | impl DatabaseKind for WriteMap { 56 | const EXTRA_FLAGS: ffi::MDBX_env_flags_t = ffi::MDBX_WRITEMAP; 57 | } 58 | 59 | #[derive(Copy, Clone, Debug)] 60 | pub struct TxnPtr(pub *mut ffi::MDBX_txn); 61 | unsafe impl Send for TxnPtr {} 62 | 63 | #[derive(Copy, Clone, Debug)] 64 | pub struct DbPtr(pub *mut ffi::MDBX_env); 65 | unsafe impl Send for DbPtr {} 66 | unsafe impl Sync for DbPtr {} 67 | 68 | pub(crate) enum TxnManagerMessage { 69 | Begin { 70 | parent: TxnPtr, 71 | flags: ffi::MDBX_txn_flags_t, 72 | sender: SyncSender>, 73 | }, 74 | Abort { 75 | tx: TxnPtr, 76 | sender: SyncSender>, 77 | }, 78 | Commit { 79 | tx: TxnPtr, 80 | sender: SyncSender>, 81 | }, 82 | } 83 | 84 | /// Supports multiple tables, all residing in the same shared-memory map. 85 | pub struct Database 86 | where 87 | E: DatabaseKind, 88 | { 89 | inner: DbPtr, 90 | pub(crate) txn_manager: Option>, 91 | _marker: PhantomData, 92 | } 93 | 94 | #[derive(Clone, Default)] 95 | pub struct DatabaseOptions { 96 | pub permissions: Option, 97 | pub max_readers: Option, 98 | pub max_tables: Option, 99 | pub rp_augment_limit: Option, 100 | pub loose_limit: Option, 101 | pub dp_reserve_limit: Option, 102 | pub txn_dp_limit: Option, 103 | pub spill_max_denominator: Option, 104 | pub spill_min_denominator: Option, 105 | pub page_size: Option, 106 | pub no_sub_dir: bool, 107 | pub exclusive: bool, 108 | pub accede: bool, 109 | pub mode: Mode, 110 | pub no_rdahead: bool, 111 | pub no_meminit: bool, 112 | pub coalesce: bool, 113 | pub liforeclaim: bool, 114 | } 115 | 116 | impl DatabaseOptions { 117 | pub(crate) fn make_flags(&self) -> ffi::MDBX_env_flags_t { 118 | let mut flags = 0; 119 | 120 | if self.no_sub_dir { 121 | flags |= ffi::MDBX_NOSUBDIR; 122 | } 123 | 124 | if self.exclusive { 125 | flags |= ffi::MDBX_EXCLUSIVE; 126 | } 127 | 128 | if self.accede { 129 | flags |= ffi::MDBX_ACCEDE; 130 | } 131 | 132 | match self.mode { 133 | Mode::ReadOnly => { 134 | flags |= ffi::MDBX_RDONLY; 135 | } 136 | Mode::ReadWrite(ReadWriteOptions { sync_mode, .. }) => { 137 | flags |= match sync_mode { 138 | SyncMode::Durable => ffi::MDBX_SYNC_DURABLE, 139 | SyncMode::NoMetaSync => ffi::MDBX_NOMETASYNC, 140 | SyncMode::SafeNoSync => ffi::MDBX_SAFE_NOSYNC, 141 | SyncMode::UtterlyNoSync => ffi::MDBX_UTTERLY_NOSYNC, 142 | }; 143 | } 144 | } 145 | 146 | if self.no_rdahead { 147 | flags |= ffi::MDBX_NORDAHEAD; 148 | } 149 | 150 | if self.no_meminit { 151 | flags |= ffi::MDBX_NOMEMINIT; 152 | } 153 | 154 | if self.coalesce { 155 | flags |= ffi::MDBX_COALESCE; 156 | } 157 | 158 | if self.liforeclaim { 159 | flags |= ffi::MDBX_LIFORECLAIM; 160 | } 161 | 162 | flags |= ffi::MDBX_NOTLS; 163 | 164 | flags 165 | } 166 | } 167 | 168 | impl Database 169 | where 170 | E: DatabaseKind, 171 | { 172 | /// Open a database. 173 | pub fn open(path: impl AsRef) -> Result> { 174 | Self::open_with_options(path, Default::default()) 175 | } 176 | 177 | pub fn open_with_options( 178 | path: impl AsRef, 179 | options: DatabaseOptions, 180 | ) -> Result> { 181 | let mut db: *mut ffi::MDBX_env = ptr::null_mut(); 182 | unsafe { 183 | mdbx_result(ffi::mdbx_env_create(&mut db))?; 184 | if let Err(e) = (|| { 185 | if let Mode::ReadWrite(ReadWriteOptions { 186 | min_size, 187 | max_size, 188 | growth_step, 189 | shrink_threshold, 190 | .. 191 | }) = options.mode 192 | { 193 | mdbx_result(ffi::mdbx_env_set_geometry( 194 | db, 195 | min_size.unwrap_or(-1), 196 | -1, 197 | max_size.unwrap_or(-1), 198 | growth_step.unwrap_or(-1), 199 | shrink_threshold.unwrap_or(-1), 200 | match options.page_size { 201 | None => -1, 202 | Some(PageSize::MinimalAcceptable) => 0, 203 | Some(PageSize::Set(size)) => size as isize, 204 | }, 205 | ))?; 206 | } 207 | for (opt, v) in [ 208 | (ffi::MDBX_opt_max_db, options.max_tables), 209 | (ffi::MDBX_opt_rp_augment_limit, options.rp_augment_limit), 210 | (ffi::MDBX_opt_loose_limit, options.loose_limit), 211 | (ffi::MDBX_opt_dp_reserve_limit, options.dp_reserve_limit), 212 | (ffi::MDBX_opt_txn_dp_limit, options.txn_dp_limit), 213 | ( 214 | ffi::MDBX_opt_spill_max_denominator, 215 | options.spill_max_denominator, 216 | ), 217 | ( 218 | ffi::MDBX_opt_spill_min_denominator, 219 | options.spill_min_denominator, 220 | ), 221 | ] { 222 | if let Some(v) = v { 223 | mdbx_result(ffi::mdbx_env_set_option(db, opt, v))?; 224 | } 225 | } 226 | 227 | let path = match CString::new(path.as_ref().as_os_str().as_bytes()) { 228 | Ok(path) => path, 229 | Err(..) => return Err(crate::Error::Invalid), 230 | }; 231 | mdbx_result(ffi::mdbx_env_open( 232 | db, 233 | path.as_ptr(), 234 | options.make_flags() | E::EXTRA_FLAGS, 235 | options.permissions.unwrap_or(0o644), 236 | ))?; 237 | 238 | Ok(()) 239 | })() { 240 | ffi::mdbx_env_close_ex(db, false); 241 | 242 | return Err(e); 243 | } 244 | } 245 | 246 | let mut db = Database { 247 | inner: DbPtr(db), 248 | txn_manager: None, 249 | _marker: PhantomData, 250 | }; 251 | 252 | #[allow(clippy::redundant_locals)] 253 | if let Mode::ReadWrite { .. } = options.mode { 254 | let (tx, rx) = std::sync::mpsc::sync_channel(0); 255 | let e = db.inner; 256 | std::thread::spawn(move || { 257 | loop { 258 | match rx.recv() { 259 | Ok(msg) => match msg { 260 | TxnManagerMessage::Begin { 261 | parent, 262 | flags, 263 | sender, 264 | } => { 265 | let e = e; 266 | let mut txn: *mut ffi::MDBX_txn = ptr::null_mut(); 267 | sender 268 | .send( 269 | mdbx_result(unsafe { 270 | ffi::mdbx_txn_begin_ex( 271 | e.0, 272 | parent.0, 273 | flags, 274 | &mut txn, 275 | ptr::null_mut(), 276 | ) 277 | }) 278 | .map(|_| TxnPtr(txn)), 279 | ) 280 | .unwrap() 281 | } 282 | TxnManagerMessage::Abort { tx, sender } => { 283 | sender 284 | .send(mdbx_result(unsafe { ffi::mdbx_txn_abort(tx.0) })) 285 | .unwrap(); 286 | } 287 | TxnManagerMessage::Commit { tx, sender } => { 288 | sender 289 | .send(mdbx_result(unsafe { 290 | ffi::mdbx_txn_commit_ex(tx.0, ptr::null_mut()) 291 | })) 292 | .unwrap(); 293 | } 294 | }, 295 | Err(_) => return, 296 | } 297 | } 298 | }); 299 | 300 | db.txn_manager = Some(tx); 301 | } 302 | 303 | Ok(db) 304 | } 305 | 306 | /// Returns a raw pointer to the underlying MDBX database. 307 | /// 308 | /// The caller **must** ensure that the pointer is not dereferenced after the lifetime of the 309 | /// database. 310 | pub fn ptr(&self) -> DbPtr { 311 | self.inner 312 | } 313 | 314 | /// Create a read-only transaction for use with the database. 315 | pub fn begin_ro_txn(&self) -> Result> { 316 | Transaction::new(self) 317 | } 318 | 319 | /// Create a read-write transaction for use with the database. This method will block while 320 | /// there are any other read-write transactions open on the database. 321 | pub fn begin_rw_txn(&self) -> Result> { 322 | let sender = self.txn_manager.as_ref().ok_or(Error::Access)?; 323 | let txn = loop { 324 | let (tx, rx) = sync_channel(0); 325 | sender 326 | .send(TxnManagerMessage::Begin { 327 | parent: TxnPtr(ptr::null_mut()), 328 | flags: RW::OPEN_FLAGS, 329 | sender: tx, 330 | }) 331 | .unwrap(); 332 | let res = rx.recv().unwrap(); 333 | if let Err(Error::Busy) = &res { 334 | sleep(Duration::from_millis(250)); 335 | continue; 336 | } 337 | 338 | break res; 339 | }?; 340 | Ok(Transaction::new_from_ptr(self, txn.0)) 341 | } 342 | 343 | /// Flush the database data buffers to disk. 344 | pub fn sync(&self, force: bool) -> Result { 345 | mdbx_result(unsafe { ffi::mdbx_env_sync_ex(self.ptr().0, force, false) }) 346 | } 347 | 348 | /// Retrieves statistics about this database. 349 | pub fn stat(&self) -> Result { 350 | unsafe { 351 | let mut stat = Stat::new(); 352 | mdbx_result(ffi::mdbx_env_stat_ex( 353 | self.ptr().0, 354 | ptr::null(), 355 | stat.mdb_stat(), 356 | size_of::(), 357 | ))?; 358 | Ok(stat) 359 | } 360 | } 361 | 362 | /// Retrieves info about this database. 363 | pub fn info(&self) -> Result { 364 | unsafe { 365 | let mut info = Info(mem::zeroed()); 366 | mdbx_result(ffi::mdbx_env_info_ex( 367 | self.ptr().0, 368 | ptr::null(), 369 | &mut info.0, 370 | size_of::(), 371 | ))?; 372 | Ok(info) 373 | } 374 | } 375 | 376 | /// Retrieves the total number of pages on the freelist. 377 | /// 378 | /// Along with [Database::info()], this can be used to calculate the exact number 379 | /// of used pages as well as free pages in this database. 380 | /// 381 | /// ``` 382 | /// # use libmdbx::Database; 383 | /// # use libmdbx::NoWriteMap; 384 | /// let dir = tempfile::tempdir().unwrap(); 385 | /// let db = Database::::open(&dir).unwrap(); 386 | /// let info = db.info().unwrap(); 387 | /// let stat = db.stat().unwrap(); 388 | /// let freelist = db.freelist().unwrap(); 389 | /// let last_pgno = info.last_pgno() + 1; // pgno is 0 based. 390 | /// let total_pgs = info.map_size() / stat.page_size() as usize; 391 | /// let pgs_in_use = last_pgno - freelist; 392 | /// let pgs_free = total_pgs - pgs_in_use; 393 | /// ``` 394 | /// 395 | /// Note: 396 | /// 397 | /// * MDBX stores all the freelists in the designated table 0 in each database, 398 | /// and the freelist count is stored at the beginning of the value as 32-bit integer 399 | /// in the native byte order. 400 | /// 401 | /// * It will create a read transaction to traverse the freelist table. 402 | pub fn freelist(&self) -> Result { 403 | let mut freelist: usize = 0; 404 | let txn = self.begin_ro_txn()?; 405 | let table = Table::freelist_table(); 406 | let cursor = txn.cursor(&table)?; 407 | 408 | for result in cursor { 409 | let (_key, value) = result?; 410 | if value.len() < mem::size_of::() { 411 | return Err(Error::Corrupted); 412 | } 413 | 414 | freelist += 415 | u32::from_ne_bytes(value.deref()[..mem::size_of::()].try_into().unwrap()) 416 | as usize; 417 | } 418 | 419 | Ok(freelist) 420 | } 421 | } 422 | 423 | /// Database statistics. 424 | /// 425 | /// Contains information about the size and layout of an MDBX database or table. 426 | #[repr(transparent)] 427 | pub struct Stat(ffi::MDBX_stat); 428 | 429 | impl Stat { 430 | /// Create a new Stat with zero'd inner struct `ffi::MDB_stat`. 431 | pub(crate) fn new() -> Stat { 432 | unsafe { Stat(mem::zeroed()) } 433 | } 434 | 435 | /// Returns a mut pointer to `ffi::MDB_stat`. 436 | pub(crate) fn mdb_stat(&mut self) -> *mut ffi::MDBX_stat { 437 | &mut self.0 438 | } 439 | } 440 | 441 | impl Stat { 442 | /// Size of a table page. This is the same for all tables in the database. 443 | #[inline] 444 | pub const fn page_size(&self) -> u32 { 445 | self.0.ms_psize 446 | } 447 | 448 | /// Depth (height) of the B-tree. 449 | #[inline] 450 | pub const fn depth(&self) -> u32 { 451 | self.0.ms_depth 452 | } 453 | 454 | /// Number of internal (non-leaf) pages. 455 | #[inline] 456 | pub const fn branch_pages(&self) -> usize { 457 | self.0.ms_branch_pages as usize 458 | } 459 | 460 | /// Number of leaf pages. 461 | #[inline] 462 | pub const fn leaf_pages(&self) -> usize { 463 | self.0.ms_leaf_pages as usize 464 | } 465 | 466 | /// Number of overflow pages. 467 | #[inline] 468 | pub const fn overflow_pages(&self) -> usize { 469 | self.0.ms_overflow_pages as usize 470 | } 471 | 472 | /// Number of data items. 473 | #[inline] 474 | pub const fn entries(&self) -> usize { 475 | self.0.ms_entries as usize 476 | } 477 | 478 | /// Total size in bytes. 479 | #[inline] 480 | pub const fn total_size(&self) -> u64 { 481 | (self.leaf_pages() + self.branch_pages() + self.overflow_pages()) as u64 482 | * self.page_size() as u64 483 | } 484 | } 485 | 486 | #[repr(transparent)] 487 | pub struct GeometryInfo(ffi::MDBX_envinfo__bindgen_ty_1); 488 | 489 | impl GeometryInfo { 490 | pub fn min(&self) -> u64 { 491 | self.0.lower 492 | } 493 | } 494 | 495 | /// Database information. 496 | /// 497 | /// Contains database information about the map size, readers, last txn id etc. 498 | #[repr(transparent)] 499 | pub struct Info(ffi::MDBX_envinfo); 500 | 501 | impl Info { 502 | pub fn geometry(&self) -> GeometryInfo { 503 | GeometryInfo(self.0.mi_geo) 504 | } 505 | 506 | /// Size of memory map. 507 | #[inline] 508 | pub fn map_size(&self) -> usize { 509 | self.0.mi_mapsize as usize 510 | } 511 | 512 | /// Last used page number 513 | #[inline] 514 | pub fn last_pgno(&self) -> usize { 515 | self.0.mi_last_pgno as usize 516 | } 517 | 518 | /// Last transaction ID 519 | #[inline] 520 | pub fn last_txnid(&self) -> usize { 521 | self.0.mi_recent_txnid as usize 522 | } 523 | 524 | /// Max reader slots in the database 525 | #[inline] 526 | pub fn max_readers(&self) -> usize { 527 | self.0.mi_maxreaders as usize 528 | } 529 | 530 | /// Max reader slots used in the database 531 | #[inline] 532 | pub fn num_readers(&self) -> usize { 533 | self.0.mi_numreaders as usize 534 | } 535 | } 536 | 537 | impl fmt::Debug for Database 538 | where 539 | E: DatabaseKind, 540 | { 541 | fn fmt(&self, f: &mut fmt::Formatter) -> result::Result<(), fmt::Error> { 542 | f.debug_struct("Database").finish() 543 | } 544 | } 545 | 546 | impl Drop for Database 547 | where 548 | E: DatabaseKind, 549 | { 550 | fn drop(&mut self) { 551 | unsafe { 552 | ffi::mdbx_env_close_ex(self.inner.0, false); 553 | } 554 | } 555 | } 556 | 557 | #[derive(Clone, Debug, PartialEq, Eq)] 558 | pub enum PageSize { 559 | MinimalAcceptable, 560 | Set(usize), 561 | } 562 | -------------------------------------------------------------------------------- /src/error.rs: -------------------------------------------------------------------------------- 1 | use libc::c_int; 2 | use std::{ffi::CStr, fmt, result, str}; 3 | 4 | /// An MDBX error kind. 5 | #[derive(Debug)] 6 | pub enum Error { 7 | KeyExist, 8 | NotFound, 9 | NoData, 10 | PageNotFound, 11 | Corrupted, 12 | Panic, 13 | VersionMismatch, 14 | Invalid, 15 | MapFull, 16 | DbsFull, 17 | ReadersFull, 18 | TxnFull, 19 | CursorFull, 20 | PageFull, 21 | UnableExtendMapsize, 22 | Incompatible, 23 | BadRslot, 24 | BadTxn, 25 | BadValSize, 26 | BadDbi, 27 | Problem, 28 | Busy, 29 | Multival, 30 | WannaRecovery, 31 | KeyMismatch, 32 | InvalidValue, 33 | Access, 34 | TooLarge, 35 | DecodeError(Box), 36 | Other(c_int), 37 | } 38 | 39 | impl Error { 40 | /// Converts a raw error code to an [Error]. 41 | pub fn from_err_code(err_code: c_int) -> Error { 42 | match err_code { 43 | ffi::MDBX_KEYEXIST => Error::KeyExist, 44 | ffi::MDBX_NOTFOUND => Error::NotFound, 45 | ffi::MDBX_ENODATA => Error::NoData, 46 | ffi::MDBX_PAGE_NOTFOUND => Error::PageNotFound, 47 | ffi::MDBX_CORRUPTED => Error::Corrupted, 48 | ffi::MDBX_PANIC => Error::Panic, 49 | ffi::MDBX_VERSION_MISMATCH => Error::VersionMismatch, 50 | ffi::MDBX_INVALID => Error::Invalid, 51 | ffi::MDBX_MAP_FULL => Error::MapFull, 52 | ffi::MDBX_DBS_FULL => Error::DbsFull, 53 | ffi::MDBX_READERS_FULL => Error::ReadersFull, 54 | ffi::MDBX_TXN_FULL => Error::TxnFull, 55 | ffi::MDBX_CURSOR_FULL => Error::CursorFull, 56 | ffi::MDBX_PAGE_FULL => Error::PageFull, 57 | ffi::MDBX_UNABLE_EXTEND_MAPSIZE => Error::UnableExtendMapsize, 58 | ffi::MDBX_INCOMPATIBLE => Error::Incompatible, 59 | ffi::MDBX_BAD_RSLOT => Error::BadRslot, 60 | ffi::MDBX_BAD_TXN => Error::BadTxn, 61 | ffi::MDBX_BAD_VALSIZE => Error::BadValSize, 62 | ffi::MDBX_BAD_DBI => Error::BadDbi, 63 | ffi::MDBX_PROBLEM => Error::Problem, 64 | ffi::MDBX_BUSY => Error::Busy, 65 | ffi::MDBX_EMULTIVAL => Error::Multival, 66 | ffi::MDBX_WANNA_RECOVERY => Error::WannaRecovery, 67 | ffi::MDBX_EKEYMISMATCH => Error::KeyMismatch, 68 | ffi::MDBX_EINVAL => Error::InvalidValue, 69 | ffi::MDBX_EACCESS => Error::Access, 70 | ffi::MDBX_TOO_LARGE => Error::TooLarge, 71 | other => Error::Other(other), 72 | } 73 | } 74 | 75 | /// Converts an [Error] to the raw error code. 76 | fn to_err_code(&self) -> c_int { 77 | match self { 78 | Error::KeyExist => ffi::MDBX_KEYEXIST, 79 | Error::NotFound => ffi::MDBX_NOTFOUND, 80 | Error::NoData => ffi::MDBX_ENODATA, 81 | Error::PageNotFound => ffi::MDBX_PAGE_NOTFOUND, 82 | Error::Corrupted => ffi::MDBX_CORRUPTED, 83 | Error::Panic => ffi::MDBX_PANIC, 84 | Error::VersionMismatch => ffi::MDBX_VERSION_MISMATCH, 85 | Error::Invalid => ffi::MDBX_INVALID, 86 | Error::MapFull => ffi::MDBX_MAP_FULL, 87 | Error::DbsFull => ffi::MDBX_DBS_FULL, 88 | Error::ReadersFull => ffi::MDBX_READERS_FULL, 89 | Error::TxnFull => ffi::MDBX_TXN_FULL, 90 | Error::CursorFull => ffi::MDBX_CURSOR_FULL, 91 | Error::PageFull => ffi::MDBX_PAGE_FULL, 92 | Error::UnableExtendMapsize => ffi::MDBX_UNABLE_EXTEND_MAPSIZE, 93 | Error::Incompatible => ffi::MDBX_INCOMPATIBLE, 94 | Error::BadRslot => ffi::MDBX_BAD_RSLOT, 95 | Error::BadTxn => ffi::MDBX_BAD_TXN, 96 | Error::BadValSize => ffi::MDBX_BAD_VALSIZE, 97 | Error::BadDbi => ffi::MDBX_BAD_DBI, 98 | Error::Problem => ffi::MDBX_PROBLEM, 99 | Error::Busy => ffi::MDBX_BUSY, 100 | Error::Multival => ffi::MDBX_EMULTIVAL, 101 | Error::WannaRecovery => ffi::MDBX_WANNA_RECOVERY, 102 | Error::KeyMismatch => ffi::MDBX_EKEYMISMATCH, 103 | Error::InvalidValue => ffi::MDBX_EINVAL, 104 | Error::Access => ffi::MDBX_EACCESS, 105 | Error::TooLarge => ffi::MDBX_TOO_LARGE, 106 | Error::Other(err_code) => *err_code, 107 | _ => unreachable!(), 108 | } 109 | } 110 | } 111 | 112 | impl fmt::Display for Error { 113 | fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { 114 | match self { 115 | Error::DecodeError(reason) => write!(fmt, "{reason}"), 116 | other => { 117 | write!(fmt, "{}", unsafe { 118 | let err = ffi::mdbx_strerror(other.to_err_code()); 119 | str::from_utf8_unchecked(CStr::from_ptr(err).to_bytes()) 120 | }) 121 | } 122 | } 123 | } 124 | } 125 | 126 | impl std::error::Error for Error {} 127 | 128 | /// An MDBX result. 129 | pub type Result = result::Result; 130 | 131 | pub fn mdbx_result(err_code: c_int) -> Result { 132 | match err_code { 133 | ffi::MDBX_SUCCESS => Ok(false), 134 | ffi::MDBX_RESULT_TRUE => Ok(true), 135 | other => Err(Error::from_err_code(other)), 136 | } 137 | } 138 | 139 | #[macro_export] 140 | macro_rules! mdbx_try_optional { 141 | ($expr:expr) => {{ 142 | match $expr { 143 | Err(Error::NotFound | Error::NoData) => return Ok(None), 144 | Err(e) => return Err(e), 145 | Ok(v) => v, 146 | } 147 | }}; 148 | } 149 | 150 | #[cfg(test)] 151 | mod test { 152 | use super::*; 153 | 154 | #[test] 155 | fn test_description() { 156 | #[cfg(not(windows))] 157 | assert_eq!("Permission denied", Error::from_err_code(13).to_string()); 158 | 159 | assert_eq!( 160 | "MDBX_INVALID: File is not an MDBX file", 161 | Error::Invalid.to_string() 162 | ); 163 | } 164 | } 165 | -------------------------------------------------------------------------------- /src/flags.rs: -------------------------------------------------------------------------------- 1 | use bitflags::bitflags; 2 | use ffi::*; 3 | use libc::c_uint; 4 | 5 | /// MDBX sync mode 6 | #[derive(Clone, Copy, Debug)] 7 | pub enum SyncMode { 8 | /// Default robust and durable sync mode. 9 | /// Metadata is written and flushed to disk after a data is written and flushed, which guarantees the integrity of the database in the event of a crash at any time. 10 | Durable, 11 | 12 | /// Don't sync the meta-page after commit. 13 | /// 14 | /// Flush system buffers to disk only once per transaction commit, omit the metadata flush. 15 | /// Defer that until the system flushes files to disk, or next non-read-only commit or [Database::sync()](crate::Database::sync). 16 | /// Depending on the platform and hardware, with [SyncMode::NoMetaSync] you may get a doubling of write performance. 17 | /// 18 | /// This trade-off maintains database integrity, but a system crash may undo the last committed transaction. 19 | /// I.e. it preserves the ACI (atomicity, consistency, isolation) but not D (durability) database property. 20 | NoMetaSync, 21 | 22 | /// Don't sync anything but keep previous steady commits. 23 | /// 24 | /// [SyncMode::UtterlyNoSync] the [SyncMode::SafeNoSync] flag disable similarly flush system buffers to disk when committing a transaction. 25 | /// But there is a huge difference in how are recycled the MVCC snapshots corresponding to previous "steady" transactions (see below). 26 | /// 27 | /// With [crate::WriteMap] the [SyncMode::SafeNoSync] instructs MDBX to use asynchronous mmap-flushes to disk. 28 | /// Asynchronous mmap-flushes means that actually all writes will scheduled and performed by operation system on it own manner, i.e. unordered. 29 | /// MDBX itself just notify operating system that it would be nice to write data to disk, but no more. 30 | /// 31 | /// Depending on the platform and hardware, with [SyncMode::SafeNoSync] you may get a multiple increase of write performance, even 10 times or more. 32 | /// 33 | /// In contrast to [SyncMode::UtterlyNoSync] mode, with [SyncMode::SafeNoSync] flag MDBX will keeps untouched pages within B-tree of the last transaction "steady" which was synced to disk completely. 34 | /// This has big implications for both data durability and (unfortunately) performance: 35 | /// 36 | /// A system crash can't corrupt the database, but you will lose the last transactions; because MDBX will rollback to last steady commit since it kept explicitly. 37 | /// The last steady transaction makes an effect similar to "long-lived" read transaction since prevents reuse of pages freed by newer write transactions, thus the any data changes will be placed in newly allocated pages. 38 | /// To avoid rapid database growth, the system will sync data and issue a steady commit-point to resume reuse pages, each time there is insufficient space and before increasing the size of the file on disk. 39 | /// In other words, with [SyncMode::SafeNoSync] flag MDBX protects you from the whole database corruption, at the cost increasing database size and/or number of disk IOPs. 40 | /// So, [SyncMode::SafeNoSync] flag could be used with [Database::sync()](crate::Database::sync) as alternatively for batch committing or nested transaction (in some cases). 41 | /// 42 | /// The number and volume of of disk IOPs with [SyncMode::SafeNoSync] flag will exactly the as without any no-sync flags. 43 | /// However, you should expect a larger process's work set and significantly worse a locality of reference, due to the more intensive allocation of previously unused pages and increase the size of the database. 44 | SafeNoSync, 45 | 46 | /// Don't sync anything and wipe previous steady commits. 47 | /// 48 | /// Don't flush system buffers to disk when committing a transaction. 49 | /// This optimization means a system crash can corrupt the database, if buffers are not yet flushed to disk. 50 | /// Depending on the platform and hardware, with [SyncMode::UtterlyNoSync] you may get a multiple increase of write performance, even 100 times or more. 51 | /// 52 | /// If the filesystem preserves write order (which is rare and never provided unless explicitly noted) and the [WriteMap](crate::WriteMap) and [DatabaseFlags::liforeclaim] flags are not used, 53 | /// then a system crash can't corrupt the database, but you can lose the last transactions, if at least one buffer is not yet flushed to disk. 54 | /// The risk is governed by how often the system flushes dirty buffers to disk and how often [Database::sync()](crate::Database::sync) is called. 55 | /// So, transactions exhibit ACI (atomicity, consistency, isolation) properties and only lose D (durability). 56 | /// I.e. database integrity is maintained, but a system crash may undo the final transactions. 57 | /// 58 | /// Otherwise, if the filesystem not preserves write order (which is typically) or [WriteMap](crate::WriteMap) or [DatabaseFlags::liforeclaim] flags are used, you should expect the corrupted database after a system crash. 59 | /// 60 | /// So, most important thing about [SyncMode::UtterlyNoSync]: 61 | /// 62 | /// A system crash immediately after commit the write transaction high likely lead to database corruption. 63 | /// Successful completion of [Database::sync(force=true)](crate::Database::sync) after one or more committed transactions guarantees consistency and durability. 64 | /// BUT by committing two or more transactions you back database into a weak state, in which a system crash may lead to database corruption! 65 | /// In case single transaction after [Database::sync()](crate::Database::sync), you may lose transaction itself, but not a whole database. 66 | /// Nevertheless, [SyncMode::UtterlyNoSync] provides "weak" durability in case of an application crash (but no durability on system failure), 67 | /// and therefore may be very useful in scenarios where data durability is not required over a system failure (e.g for short-lived data), or if you can take such risk. 68 | UtterlyNoSync, 69 | } 70 | 71 | impl Default for SyncMode { 72 | fn default() -> Self { 73 | Self::Durable 74 | } 75 | } 76 | 77 | #[derive(Clone, Copy, Debug)] 78 | pub enum Mode { 79 | ReadOnly, 80 | ReadWrite(ReadWriteOptions), 81 | } 82 | 83 | impl Default for Mode { 84 | fn default() -> Self { 85 | Self::ReadWrite(ReadWriteOptions::default()) 86 | } 87 | } 88 | 89 | #[derive(Clone, Copy, Debug, Default)] 90 | pub struct ReadWriteOptions { 91 | pub sync_mode: SyncMode, 92 | pub min_size: Option, 93 | pub max_size: Option, 94 | pub growth_step: Option, 95 | pub shrink_threshold: Option, 96 | } 97 | 98 | bitflags! { 99 | #[doc="Table options."] 100 | #[derive(Default, PartialEq, Eq, PartialOrd, Ord, Hash, Debug, Clone, Copy)] 101 | pub struct TableFlags: c_uint { 102 | const REVERSE_KEY = MDBX_REVERSEKEY as u32; 103 | const DUP_SORT = MDBX_DUPSORT as u32; 104 | const INTEGER_KEY = MDBX_INTEGERKEY as u32; 105 | const DUP_FIXED = MDBX_DUPFIXED as u32; 106 | const INTEGER_DUP = MDBX_INTEGERDUP as u32; 107 | const REVERSE_DUP = MDBX_REVERSEDUP as u32; 108 | const CREATE = MDBX_CREATE as u32; 109 | const ACCEDE = MDBX_DB_ACCEDE as u32; 110 | } 111 | } 112 | 113 | bitflags! { 114 | #[doc="Write options."] 115 | #[derive(Default, PartialEq, Eq, PartialOrd, Ord, Hash, Debug, Clone, Copy)] 116 | pub struct WriteFlags: c_uint { 117 | const UPSERT = MDBX_UPSERT as u32; 118 | const NO_OVERWRITE = MDBX_NOOVERWRITE as u32; 119 | const NO_DUP_DATA = MDBX_NODUPDATA as u32; 120 | const CURRENT = MDBX_CURRENT as u32; 121 | const ALLDUPS = MDBX_ALLDUPS as u32; 122 | const RESERVE = MDBX_RESERVE as u32; 123 | const APPEND = MDBX_APPEND as u32; 124 | const APPEND_DUP = MDBX_APPENDDUP as u32; 125 | const MULTIPLE = MDBX_MULTIPLE as u32; 126 | } 127 | } 128 | 129 | /// Compatibility shim to convert between `i32` and `u32` enums on Windows and UNIX. 130 | /// 131 | /// Windows treats C enums as `i32`, while Unix uses `u32`. We use `u32` enums internally 132 | /// and then cast back to `i32` only where Windows requires it. 133 | /// 134 | /// See https://github.com/rust-lang/rust-bindgen/issues/1907 135 | #[cfg(windows)] 136 | #[inline(always)] 137 | pub const fn c_enum(rust_value: u32) -> i32 { 138 | rust_value as i32 139 | } 140 | 141 | #[cfg(not(windows))] 142 | #[inline(always)] 143 | pub const fn c_enum(rust_value: u32) -> u32 { 144 | rust_value 145 | } 146 | -------------------------------------------------------------------------------- /src/lib.rs: -------------------------------------------------------------------------------- 1 | #![allow(clippy::type_complexity, clippy::unnecessary_cast)] 2 | #![doc = include_str!("../README.md")] 3 | #![cfg_attr(docsrs, feature(doc_cfg))] 4 | 5 | pub use crate::{ 6 | codec::*, 7 | cursor::{Cursor, IntoIter, Iter, IterDup}, 8 | database::{ 9 | Database, DatabaseKind, DatabaseOptions, Info, NoWriteMap, PageSize, Stat, WriteMap, 10 | }, 11 | error::{Error, Result}, 12 | flags::*, 13 | table::Table, 14 | transaction::{RO, RW, Transaction, TransactionKind}, 15 | }; 16 | 17 | mod codec; 18 | mod cursor; 19 | mod database; 20 | mod error; 21 | mod flags; 22 | mod table; 23 | mod transaction; 24 | 25 | /// Fully typed ORM for use with libmdbx. 26 | #[cfg(feature = "orm")] 27 | #[cfg_attr(docsrs, doc(cfg(feature = "orm")))] 28 | pub mod orm; 29 | 30 | #[cfg(feature = "orm")] 31 | mod orm_uses { 32 | #[doc(hidden)] 33 | pub use arrayref; 34 | 35 | #[doc(hidden)] 36 | pub use impls; 37 | 38 | #[cfg(feature = "cbor")] 39 | #[doc(hidden)] 40 | pub use ciborium; 41 | } 42 | 43 | #[cfg(feature = "orm")] 44 | pub use orm_uses::*; 45 | 46 | #[cfg(test)] 47 | mod test_utils { 48 | use super::*; 49 | use tempfile::tempdir; 50 | 51 | type Database = crate::Database; 52 | 53 | /// Regression test for https://github.com/danburkert/lmdb-rs/issues/21. 54 | /// This test reliably segfaults when run against lmbdb compiled with opt level -O3 and newer 55 | /// GCC compilers. 56 | #[test] 57 | fn issue_21_regression() { 58 | const HEIGHT_KEY: [u8; 1] = [0]; 59 | 60 | let dir = tempdir().unwrap(); 61 | 62 | let db = Database::open_with_options( 63 | &dir, 64 | DatabaseOptions { 65 | max_tables: Some(2), 66 | ..Default::default() 67 | }, 68 | ) 69 | .unwrap(); 70 | 71 | for height in 0..1000_u64 { 72 | let value = height.to_le_bytes(); 73 | let tx = db.begin_rw_txn().unwrap(); 74 | let index = tx.create_table(None, TableFlags::DUP_SORT).unwrap(); 75 | tx.put(&index, HEIGHT_KEY, value, WriteFlags::empty()) 76 | .unwrap(); 77 | tx.commit().unwrap(); 78 | } 79 | } 80 | } 81 | -------------------------------------------------------------------------------- /src/orm/cursor.rs: -------------------------------------------------------------------------------- 1 | use super::traits::*; 2 | use crate::{RW, TransactionKind, WriteFlags}; 3 | use std::marker::PhantomData; 4 | 5 | #[derive(Clone, Debug)] 6 | pub(crate) struct DecodableWrapper(pub T); 7 | 8 | impl crate::Decodable<'_> for DecodableWrapper 9 | where 10 | T: Decodable, 11 | { 12 | fn decode(data_val: &[u8]) -> Result 13 | where 14 | Self: Sized, 15 | { 16 | T::decode(data_val) 17 | .map_err(|e| crate::Error::DecodeError(e.into())) 18 | .map(Self) 19 | } 20 | } 21 | 22 | #[derive(Debug)] 23 | pub struct Cursor<'tx, K, T> 24 | where 25 | K: TransactionKind, 26 | T: Table, 27 | { 28 | pub(crate) inner: crate::Cursor<'tx, K>, 29 | pub(crate) _marker: PhantomData, 30 | } 31 | 32 | #[allow(clippy::type_complexity)] 33 | fn map_res_inner( 34 | v: Result, DecodableWrapper)>, E>, 35 | ) -> anyhow::Result> 36 | where 37 | T: Table, 38 | E: std::error::Error + Send + Sync + 'static, 39 | { 40 | if let Some((k, v)) = v? { 41 | return Ok(Some((k.0, v.0))); 42 | } 43 | 44 | Ok(None) 45 | } 46 | 47 | impl Cursor<'_, K, T> 48 | where 49 | K: TransactionKind, 50 | T: Table, 51 | { 52 | pub fn first(&mut self) -> anyhow::Result> 53 | where 54 | T::Key: Decodable, 55 | { 56 | map_res_inner::(self.inner.first()) 57 | } 58 | 59 | pub fn seek_closest(&mut self, key: T::SeekKey) -> anyhow::Result> 60 | where 61 | T::Key: Decodable, 62 | { 63 | map_res_inner::(self.inner.set_range(key.encode().as_ref())) 64 | } 65 | 66 | pub fn seek_exact(&mut self, key: T::Key) -> anyhow::Result> 67 | where 68 | T::Key: Decodable, 69 | { 70 | map_res_inner::(self.inner.set_key(key.encode().as_ref())) 71 | } 72 | 73 | #[allow(clippy::should_implement_trait)] 74 | pub fn next(&mut self) -> anyhow::Result> 75 | where 76 | T::Key: Decodable, 77 | { 78 | map_res_inner::(self.inner.next()) 79 | } 80 | 81 | pub fn prev(&mut self) -> anyhow::Result> 82 | where 83 | T::Key: Decodable, 84 | { 85 | map_res_inner::(self.inner.prev()) 86 | } 87 | 88 | pub fn last(&mut self) -> anyhow::Result> 89 | where 90 | T::Key: Decodable, 91 | { 92 | map_res_inner::(self.inner.last()) 93 | } 94 | 95 | pub fn current(&mut self) -> anyhow::Result> 96 | where 97 | T::Key: Decodable, 98 | { 99 | map_res_inner::(self.inner.get_current()) 100 | } 101 | 102 | pub fn walk( 103 | self, 104 | start: Option, 105 | ) -> impl Iterator> 106 | where 107 | T: Table, 108 | { 109 | struct I<'tx, K, T> 110 | where 111 | K: TransactionKind, 112 | T: Table, 113 | { 114 | cursor: Cursor<'tx, K, T>, 115 | start: Option, 116 | 117 | first: bool, 118 | } 119 | 120 | impl Iterator for I<'_, K, T> 121 | where 122 | K: TransactionKind, 123 | T: Table, 124 | { 125 | type Item = anyhow::Result<(T::Key, T::Value)>; 126 | 127 | fn next(&mut self) -> Option { 128 | if self.first { 129 | self.first = false; 130 | if let Some(start) = self.start.take() { 131 | self.cursor.seek_closest(start) 132 | } else { 133 | self.cursor.first() 134 | } 135 | } else { 136 | self.cursor.next() 137 | } 138 | .transpose() 139 | } 140 | } 141 | 142 | I { 143 | cursor: self, 144 | start, 145 | first: true, 146 | } 147 | } 148 | 149 | pub fn walk_back( 150 | self, 151 | start: Option, 152 | ) -> impl Iterator> 153 | where 154 | T: Table, 155 | { 156 | struct I<'tx, K, T> 157 | where 158 | K: TransactionKind, 159 | T: Table, 160 | { 161 | cursor: Cursor<'tx, K, T>, 162 | start: Option, 163 | 164 | first: bool, 165 | } 166 | 167 | impl Iterator for I<'_, K, T> 168 | where 169 | K: TransactionKind, 170 | T: Table, 171 | { 172 | type Item = anyhow::Result<(T::Key, T::Value)>; 173 | 174 | fn next(&mut self) -> Option { 175 | if self.first { 176 | self.first = false; 177 | if let Some(start_key) = self.start.take() { 178 | self.cursor.seek_closest(start_key) 179 | } else { 180 | self.cursor.last() 181 | } 182 | } else { 183 | self.cursor.prev() 184 | } 185 | .transpose() 186 | } 187 | } 188 | 189 | I { 190 | cursor: self, 191 | start, 192 | first: true, 193 | } 194 | } 195 | } 196 | 197 | impl Cursor<'_, K, T> 198 | where 199 | K: TransactionKind, 200 | T: DupSort, 201 | { 202 | pub fn seek_value( 203 | &mut self, 204 | key: T::Key, 205 | seek_value: T::SeekValue, 206 | ) -> anyhow::Result> 207 | where 208 | T::Key: Clone, 209 | { 210 | let res = self.inner.get_both_range::>( 211 | key.encode().as_ref(), 212 | seek_value.encode().as_ref(), 213 | )?; 214 | 215 | if let Some(v) = res { 216 | return Ok(Some(v.0)); 217 | } 218 | 219 | Ok(None) 220 | } 221 | 222 | pub fn last_value(&mut self) -> anyhow::Result> 223 | where 224 | T::Key: Decodable, 225 | { 226 | Ok(self 227 | .inner 228 | .last_dup::>()? 229 | .map(|v| v.0)) 230 | } 231 | 232 | pub fn next_key(&mut self) -> anyhow::Result> 233 | where 234 | T::Key: Decodable, 235 | { 236 | map_res_inner::(self.inner.next_nodup()) 237 | } 238 | 239 | pub fn next_value(&mut self) -> anyhow::Result> 240 | where 241 | T::Key: Decodable, 242 | { 243 | map_res_inner::(self.inner.next_dup()) 244 | } 245 | 246 | pub fn prev_key(&mut self) -> anyhow::Result> 247 | where 248 | T::Key: Decodable, 249 | { 250 | map_res_inner::(self.inner.prev_nodup()) 251 | } 252 | 253 | pub fn prev_value(&mut self) -> anyhow::Result> 254 | where 255 | T::Key: Decodable, 256 | { 257 | map_res_inner::(self.inner.prev_dup()) 258 | } 259 | 260 | pub fn walk_key( 261 | self, 262 | start: T::Key, 263 | seek_value: Option, 264 | ) -> impl Iterator> 265 | where 266 | T::Key: Clone + Decodable, 267 | { 268 | struct I<'tx, K, T> 269 | where 270 | K: TransactionKind, 271 | T: DupSort, 272 | { 273 | cursor: Cursor<'tx, K, T>, 274 | start: Option, 275 | seek_value: Option, 276 | 277 | first: bool, 278 | } 279 | 280 | impl Iterator for I<'_, K, T> 281 | where 282 | K: TransactionKind, 283 | T: DupSort, 284 | { 285 | type Item = anyhow::Result; 286 | 287 | fn next(&mut self) -> Option { 288 | if self.first { 289 | self.first = false; 290 | let start_key = self.start.take().unwrap(); 291 | if let Some(seek_both_key) = self.seek_value.take() { 292 | self.cursor.seek_value(start_key, seek_both_key) 293 | } else { 294 | self.cursor.seek_exact(start_key).map(|v| v.map(|(_, v)| v)) 295 | } 296 | } else { 297 | self.cursor.next_value().map(|v| v.map(|(_, v)| v)) 298 | } 299 | .transpose() 300 | } 301 | } 302 | 303 | I { 304 | cursor: self, 305 | start: Some(start), 306 | seek_value, 307 | first: true, 308 | } 309 | } 310 | } 311 | 312 | impl Cursor<'_, RW, T> 313 | where 314 | T: Table, 315 | { 316 | pub fn upsert(&mut self, key: T::Key, value: T::Value) -> anyhow::Result<()> { 317 | Ok(self.inner.put( 318 | key.encode().as_ref(), 319 | value.encode().as_ref(), 320 | WriteFlags::UPSERT, 321 | )?) 322 | } 323 | 324 | pub fn append(&mut self, key: T::Key, value: T::Value) -> anyhow::Result<()> { 325 | Ok(self.inner.put( 326 | key.encode().as_ref(), 327 | value.encode().as_ref(), 328 | WriteFlags::APPEND, 329 | )?) 330 | } 331 | 332 | pub fn delete_current(&mut self) -> anyhow::Result<()> { 333 | self.inner.del(WriteFlags::CURRENT)?; 334 | 335 | Ok(()) 336 | } 337 | } 338 | 339 | impl Cursor<'_, RW, T> 340 | where 341 | T: DupSort, 342 | { 343 | pub fn delete_current_key(&mut self) -> anyhow::Result<()> { 344 | Ok(self.inner.del(WriteFlags::NO_DUP_DATA)?) 345 | } 346 | pub fn append_value(&mut self, key: T::Key, value: T::Value) -> anyhow::Result<()> { 347 | Ok(self.inner.put( 348 | key.encode().as_ref(), 349 | value.encode().as_ref(), 350 | WriteFlags::APPEND_DUP, 351 | )?) 352 | } 353 | } 354 | -------------------------------------------------------------------------------- /src/orm/database.rs: -------------------------------------------------------------------------------- 1 | use super::{traits::*, transaction::Transaction}; 2 | use crate::{DatabaseOptions, Mode, RO, RW, TableFlags, WriteMap}; 3 | use anyhow::Context; 4 | use std::{ 5 | collections::BTreeMap, 6 | fs::DirBuilder, 7 | ops::Deref, 8 | path::{Path, PathBuf}, 9 | }; 10 | use tempfile::tempdir; 11 | 12 | #[derive(Debug)] 13 | enum DbFolder { 14 | Persisted(std::path::PathBuf), 15 | Temporary(tempfile::TempDir), 16 | } 17 | 18 | impl DbFolder { 19 | fn path(&self) -> &Path { 20 | match self { 21 | Self::Persisted(p) => p.as_path(), 22 | Self::Temporary(temp_dir) => temp_dir.path(), 23 | } 24 | } 25 | } 26 | 27 | #[derive(Debug)] 28 | pub struct Database { 29 | inner: crate::Database, 30 | folder: DbFolder, 31 | } 32 | 33 | impl Database { 34 | pub fn path(&self) -> &Path { 35 | self.folder.path() 36 | } 37 | 38 | fn open_db(folder: DbFolder, options: DatabaseOptions) -> anyhow::Result { 39 | Ok(Self { 40 | inner: crate::Database::open_with_options(folder.path(), options).with_context( 41 | || format!("failed to open database at {}", folder.path().display()), 42 | )?, 43 | folder, 44 | }) 45 | } 46 | 47 | fn new( 48 | folder: DbFolder, 49 | mut options: DatabaseOptions, 50 | chart: &DatabaseChart, 51 | ) -> anyhow::Result { 52 | options.max_tables = Some(std::cmp::max(chart.len() as u64, 1)); 53 | 54 | if let Mode::ReadOnly = options.mode { 55 | Self::open_db(folder, options) 56 | } else { 57 | let _ = DirBuilder::new().recursive(true).create(folder.path()); 58 | 59 | let this = Self::open_db(folder, options)?; 60 | 61 | let tx = this.inner.begin_rw_txn()?; 62 | for (table, settings) in chart { 63 | tx.create_table( 64 | Some(table), 65 | if settings.dup_sort { 66 | TableFlags::DUP_SORT 67 | } else { 68 | TableFlags::default() 69 | }, 70 | )?; 71 | } 72 | tx.commit()?; 73 | 74 | Ok(this) 75 | } 76 | } 77 | 78 | pub fn create(path: Option, chart: &DatabaseChart) -> anyhow::Result { 79 | Self::create_with_options(path, DatabaseOptions::default(), chart) 80 | } 81 | 82 | pub fn create_with_options( 83 | path: Option, 84 | options: DatabaseOptions, 85 | chart: &DatabaseChart, 86 | ) -> anyhow::Result { 87 | let folder = if let Some(path) = path { 88 | DbFolder::Persisted(path) 89 | } else { 90 | let path = tempdir()?; 91 | DbFolder::Temporary(path) 92 | }; 93 | 94 | Self::new(folder, options, chart) 95 | } 96 | 97 | pub fn open(path: impl AsRef, chart: &DatabaseChart) -> anyhow::Result { 98 | Self::open_with_options(path, DatabaseOptions::default(), chart) 99 | } 100 | 101 | pub fn open_with_options( 102 | path: impl AsRef, 103 | mut options: DatabaseOptions, 104 | chart: &DatabaseChart, 105 | ) -> anyhow::Result { 106 | options.mode = Mode::ReadOnly; 107 | 108 | Self::new( 109 | DbFolder::Persisted(path.as_ref().to_path_buf()), 110 | options, 111 | chart, 112 | ) 113 | } 114 | } 115 | 116 | impl Deref for Database { 117 | type Target = crate::Database; 118 | 119 | fn deref(&self) -> &Self::Target { 120 | &self.inner 121 | } 122 | } 123 | 124 | impl Database { 125 | pub fn begin_read(&self) -> anyhow::Result> { 126 | Ok(Transaction { 127 | inner: self.inner.begin_ro_txn()?, 128 | }) 129 | } 130 | 131 | pub fn begin_readwrite(&self) -> anyhow::Result> { 132 | Ok(Transaction { 133 | inner: self.inner.begin_rw_txn()?, 134 | }) 135 | } 136 | } 137 | 138 | #[derive(Debug)] 139 | pub struct UntypedTable(pub T) 140 | where 141 | T: Table; 142 | 143 | impl Table for UntypedTable 144 | where 145 | T: Table, 146 | { 147 | const NAME: &'static str = T::NAME; 148 | 149 | type Key = Vec; 150 | type Value = Vec; 151 | type SeekKey = Vec; 152 | } 153 | 154 | impl UntypedTable 155 | where 156 | T: Table, 157 | { 158 | pub fn encode_key(key: T::Key) -> <::Key as Encodable>::Encoded { 159 | key.encode() 160 | } 161 | 162 | pub fn decode_key(encoded: &[u8]) -> anyhow::Result 163 | where 164 | T::Key: Decodable, 165 | { 166 | ::decode(encoded) 167 | } 168 | 169 | pub fn encode_value(value: T::Value) -> <::Value as Encodable>::Encoded { 170 | value.encode() 171 | } 172 | 173 | pub fn decode_value(encoded: &[u8]) -> anyhow::Result { 174 | ::decode(encoded) 175 | } 176 | 177 | pub fn encode_seek_key(value: T::SeekKey) -> <::SeekKey as Encodable>::Encoded { 178 | value.encode() 179 | } 180 | } 181 | 182 | #[macro_export] 183 | macro_rules! table { 184 | ($(#[$docs:meta])+ ( $name:ident ) $key:ty [ $seek_key:ty ] => $value:ty) => { 185 | $(#[$docs])+ 186 | /// 187 | #[doc = concat!("Takes [`", stringify!($key), "`] as a key and returns [`", stringify!($value), "`]")] 188 | #[derive(Clone, Copy, Debug, Default)] 189 | pub struct $name; 190 | 191 | impl $crate::orm::Table for $name { 192 | const NAME: &'static str = stringify!($name); 193 | 194 | type Key = $key; 195 | type SeekKey = $seek_key; 196 | type Value = $value; 197 | } 198 | 199 | impl $name { 200 | pub const fn untyped(self) -> $crate::orm::UntypedTable { 201 | $crate::orm::UntypedTable(self) 202 | } 203 | } 204 | 205 | impl std::fmt::Display for $name { 206 | fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { 207 | write!(f, "{}", ::NAME) 208 | } 209 | } 210 | }; 211 | ($(#[$docs:meta])+ ( $name:ident ) $key:ty => $value:ty) => { 212 | table!( 213 | $(#[$docs])+ 214 | ( $name ) $key [ $key ] => $value 215 | ); 216 | }; 217 | } 218 | 219 | #[macro_export] 220 | macro_rules! dupsort { 221 | ($(#[$docs:meta])+ ( $table_name:ident ) $key:ty [$seek_key:ty] => $value:ty [$seek_value:ty] ) => { 222 | table!( 223 | $(#[$docs])+ 224 | /// 225 | #[doc = concat!("`DUPSORT` table with seek value type being: [`", stringify!($seek_value), "`].")] 226 | ( $table_name ) $key [$seek_key] => $value 227 | ); 228 | impl $crate::orm::DupSort for $table_name { 229 | type SeekValue = $seek_value; 230 | } 231 | }; 232 | 233 | ($(#[$docs:meta])+ ( $table_name:ident ) $key:ty [$seek_key:ty] => $value:ty ) => { 234 | dupsort!( 235 | $(#[$docs])+ 236 | ( $table_name ) $key [$seek_key] => $value [$value] 237 | ); 238 | }; 239 | 240 | ($(#[$docs:meta])+ ( $table_name:ident ) $key:ty => $value:ty [$seek_value:ty] ) => { 241 | dupsort!( 242 | $(#[$docs])+ 243 | ( $table_name ) $key [$key] => $value [$seek_value] 244 | ); 245 | }; 246 | 247 | ($(#[$docs:meta])+ ( $table_name:ident ) $key:ty => $value:ty ) => { 248 | dupsort!( 249 | $(#[$docs])+ 250 | ( $table_name ) $key [$key] => $value [$value] 251 | ); 252 | }; 253 | } 254 | 255 | #[derive(Clone, Debug, Default)] 256 | pub struct TableSettings { 257 | pub dup_sort: bool, 258 | } 259 | 260 | /// Contains settings for each table in the database to be created or opened. 261 | pub type DatabaseChart = BTreeMap<&'static str, TableSettings>; 262 | 263 | #[macro_export] 264 | macro_rules! table_info { 265 | ($t:ty) => { 266 | ( 267 | <$t as $crate::orm::Table>::NAME, 268 | $crate::orm::TableSettings { 269 | dup_sort: $crate::impls::impls!($t: $crate::orm::DupSort), 270 | }, 271 | ) 272 | }; 273 | } 274 | -------------------------------------------------------------------------------- /src/orm/impls.rs: -------------------------------------------------------------------------------- 1 | use super::traits::*; 2 | use anyhow::bail; 3 | use arrayvec::ArrayVec; 4 | use derive_more::*; 5 | use std::fmt::Display; 6 | 7 | #[derive( 8 | Clone, 9 | Copy, 10 | Debug, 11 | Deref, 12 | DerefMut, 13 | Default, 14 | Display, 15 | PartialEq, 16 | Eq, 17 | From, 18 | PartialOrd, 19 | Ord, 20 | Hash, 21 | )] 22 | pub struct CutStart(pub T); 23 | 24 | impl Encodable for () { 25 | type Encoded = [u8; 0]; 26 | 27 | fn encode(self) -> Self::Encoded { 28 | [] 29 | } 30 | } 31 | 32 | impl Decodable for () { 33 | fn decode(b: &[u8]) -> anyhow::Result { 34 | if !b.is_empty() { 35 | return Err(TooLong::<0> { received: b.len() }.into()); 36 | } 37 | 38 | Ok(()) 39 | } 40 | } 41 | 42 | impl Encodable for Vec { 43 | type Encoded = Self; 44 | 45 | fn encode(self) -> Self::Encoded { 46 | self 47 | } 48 | } 49 | 50 | impl Decodable for Vec { 51 | fn decode(b: &[u8]) -> anyhow::Result { 52 | Ok(b.to_vec()) 53 | } 54 | } 55 | 56 | #[cfg(feature = "bytes")] 57 | impl Encodable for bytes::Bytes { 58 | type Encoded = Self; 59 | 60 | fn encode(self) -> Self::Encoded { 61 | self 62 | } 63 | } 64 | 65 | #[cfg(feature = "bytes")] 66 | impl Decodable for bytes::Bytes { 67 | fn decode(b: &[u8]) -> anyhow::Result { 68 | Ok(b.to_vec().into()) 69 | } 70 | } 71 | 72 | impl Encodable for String { 73 | type Encoded = Vec; 74 | 75 | fn encode(self) -> Self::Encoded { 76 | self.into_bytes() 77 | } 78 | } 79 | 80 | impl Decodable for String { 81 | fn decode(b: &[u8]) -> anyhow::Result { 82 | Ok(String::from_utf8(b.into())?) 83 | } 84 | } 85 | 86 | impl Encodable for ArrayVec { 87 | type Encoded = Self; 88 | 89 | fn encode(self) -> Self::Encoded { 90 | self 91 | } 92 | } 93 | 94 | impl Decodable for ArrayVec { 95 | fn decode(v: &[u8]) -> anyhow::Result { 96 | let mut out = Self::default(); 97 | out.try_extend_from_slice(v)?; 98 | Ok(out) 99 | } 100 | } 101 | 102 | impl Encodable for [u8; LEN] { 103 | type Encoded = Self; 104 | 105 | fn encode(self) -> Self::Encoded { 106 | self 107 | } 108 | } 109 | 110 | impl Decodable for [u8; LEN] { 111 | fn decode(b: &[u8]) -> anyhow::Result { 112 | if b.len() != LEN { 113 | return Err(BadLength:: { received: b.len() }.into()); 114 | } 115 | 116 | let mut l = [0; LEN]; 117 | l.copy_from_slice(b); 118 | Ok(l) 119 | } 120 | } 121 | 122 | #[derive(Clone, Debug)] 123 | pub struct BadLength { 124 | pub received: usize, 125 | } 126 | 127 | impl Display for BadLength { 128 | fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { 129 | write!(f, "Bad length: {EXPECTED} != {}", self.received) 130 | } 131 | } 132 | 133 | impl std::error::Error for BadLength {} 134 | 135 | #[derive(Clone, Debug)] 136 | pub struct TooShort { 137 | pub received: usize, 138 | } 139 | 140 | impl Display for TooShort { 141 | fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { 142 | write!(f, "Value too short: {} < {MINIMUM}", self.received) 143 | } 144 | } 145 | 146 | impl std::error::Error for TooShort {} 147 | 148 | #[derive(Clone, Debug)] 149 | pub struct TooLong { 150 | pub received: usize, 151 | } 152 | impl Display for TooLong { 153 | fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { 154 | write!(f, "Value too long: {} > {MAXIMUM}", self.received) 155 | } 156 | } 157 | 158 | impl std::error::Error for TooLong {} 159 | 160 | #[macro_export] 161 | macro_rules! table_integer { 162 | ($ty:ident => $real_ty:ident) => { 163 | impl $crate::orm::Encodable for $ty { 164 | type Encoded = [u8; $real_ty::BITS as usize / 8]; 165 | 166 | fn encode(self) -> Self::Encoded { 167 | self.to_be_bytes() 168 | } 169 | } 170 | 171 | impl $crate::orm::Decodable for $ty { 172 | fn decode(b: &[u8]) -> anyhow::Result { 173 | const EXPECTED: usize = $real_ty::BITS as usize / 8; 174 | 175 | match b.len() { 176 | EXPECTED => Ok($real_ty::from_be_bytes(*$crate::arrayref::array_ref!( 177 | &*b, 0, EXPECTED 178 | )) 179 | .into()), 180 | other => Err($crate::orm::BadLength:: { received: other }.into()), 181 | } 182 | } 183 | } 184 | }; 185 | } 186 | 187 | table_integer!(u32 => u32); 188 | table_integer!(u64 => u64); 189 | table_integer!(u128 => u128); 190 | 191 | impl Encodable for CutStart 192 | where 193 | T: Encodable, 194 | { 195 | type Encoded = ArrayVec; 196 | 197 | fn encode(self) -> Self::Encoded { 198 | let arr = self.0.encode(); 199 | 200 | let mut out = ::default(); 201 | out.try_extend_from_slice(&arr[arr.iter().take_while(|b| **b == 0).count()..]) 202 | .unwrap(); 203 | out 204 | } 205 | } 206 | 207 | impl Decodable for CutStart 208 | where 209 | T: Encodable + Decodable, 210 | { 211 | fn decode(b: &[u8]) -> anyhow::Result { 212 | if b.len() > LEN { 213 | return Err(TooLong:: { received: b.len() }.into()); 214 | } 215 | 216 | let mut array = [0; LEN]; 217 | array[LEN - b.len()..].copy_from_slice(b); 218 | T::decode(&array).map(Self) 219 | } 220 | } 221 | 222 | #[cfg(feature = "cbor")] 223 | #[macro_export] 224 | macro_rules! cbor_table_object { 225 | ($ty:ident) => { 226 | impl Encodable for $ty { 227 | type Encoded = Vec; 228 | 229 | fn encode(self) -> Self::Encoded { 230 | let mut v = vec![]; 231 | $crate::ciborium::ser::into_writer(&self, &mut v).unwrap(); 232 | v 233 | } 234 | } 235 | 236 | impl Decodable for $ty { 237 | fn decode(v: &[u8]) -> anyhow::Result { 238 | Ok($crate::ciborium::de::from_reader(v)?) 239 | } 240 | } 241 | }; 242 | } 243 | 244 | impl Encodable for (A, B) 245 | where 246 | A: Encodable, 247 | B: Encodable, 248 | { 249 | type Encoded = Vec; 250 | 251 | fn encode(self) -> Self::Encoded { 252 | let mut v = Vec::with_capacity(A_LEN + B_LEN); 253 | v.extend_from_slice(&self.0.encode()); 254 | v.extend_from_slice(&self.1.encode()); 255 | v 256 | } 257 | } 258 | 259 | impl Decodable for (A, B) 260 | where 261 | A: TableObject, 262 | B: TableObject, 263 | { 264 | fn decode(v: &[u8]) -> anyhow::Result { 265 | if v.len() != A_LEN + B_LEN { 266 | bail!("Bad length: {} != {} + {}", v.len(), A_LEN, B_LEN); 267 | } 268 | Ok(( 269 | A::decode(&v[..A_LEN]).unwrap(), 270 | B::decode(&v[A_LEN..]).unwrap(), 271 | )) 272 | } 273 | } 274 | -------------------------------------------------------------------------------- /src/orm/mod.rs: -------------------------------------------------------------------------------- 1 | //! Fully typed ORM based on libmdbx. 2 | //! 3 | //! Much simpler in usage but slightly more limited. 4 | //! 5 | //! ```rust,no_run 6 | //! use libmdbx::orm::{table, table_info, DatabaseChart, Decodable, Encodable}; 7 | //! use std::sync::Arc; 8 | //! use once_cell::sync::Lazy; 9 | //! use serde::{Deserialize, Serialize}; 10 | //! 11 | //! #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] 12 | //! pub struct UserInfo { 13 | //! pub age: u8, 14 | //! pub first_name: String, 15 | //! pub last_name: String, 16 | //! } 17 | //! 18 | //! impl Encodable for UserInfo { 19 | //! type Encoded = Vec; 20 | //! 21 | //! fn encode(self) -> Self::Encoded { 22 | //! // Here we define serialization of UserInfo 23 | //! # todo!() 24 | //! } 25 | //! } 26 | //! 27 | //! impl Decodable for UserInfo { 28 | //! fn decode(v: &[u8]) -> anyhow::Result { 29 | //! // Here we define deserialization of UserInfo 30 | //! # todo!() 31 | //! } 32 | //! } 33 | //! 34 | //! // Define the users table 35 | //! table!( 36 | //! /// Table with users info. 37 | //! ( Users ) String => UserInfo 38 | //! ); 39 | //! 40 | //! // Assemble database chart 41 | //! pub static TABLES: Lazy> = 42 | //! Lazy::new(|| Arc::new([table_info!(Users)].into_iter().collect())); 43 | //! 44 | //! // Create database with the database chart 45 | //! let db = Arc::new(libmdbx::orm::Database::create(None, &TABLES).unwrap()); 46 | //! 47 | //! let users = vec![ 48 | //! ( 49 | //! "l33tc0der".to_string(), 50 | //! UserInfo { 51 | //! age: 42, 52 | //! first_name: "Leet".to_string(), 53 | //! last_name: "Coder".to_string(), 54 | //! }, 55 | //! ), 56 | //! ( 57 | //! "lameguy".to_string(), 58 | //! UserInfo { 59 | //! age: 25, 60 | //! first_name: "Lame".to_string(), 61 | //! last_name: "Guy".to_string(), 62 | //! }, 63 | //! ), 64 | //! ]; 65 | //! 66 | //! let tx = db.begin_readwrite().unwrap(); 67 | //! 68 | //! let mut cursor = tx.cursor::().unwrap(); 69 | //! 70 | //! // Insert user info into table 71 | //! for (nickname, user_info) in &users { 72 | //! cursor.upsert(nickname.clone(), user_info.clone()).unwrap(); 73 | //! } 74 | //! 75 | //! // Walk over table and collect its contents 76 | //! assert_eq!( 77 | //! users, 78 | //! cursor.walk(None).collect::>>().unwrap() 79 | //! ); 80 | //! ``` 81 | 82 | mod cursor; 83 | mod database; 84 | mod impls; 85 | mod traits; 86 | mod transaction; 87 | 88 | pub use self::{cursor::*, database::*, impls::*, traits::*, transaction::*}; 89 | pub use crate::{ 90 | DatabaseKind, DatabaseOptions, Mode, NoWriteMap, RO, RW, ReadWriteOptions, SyncMode, 91 | TransactionKind, WriteMap, dupsort, table, table_info, 92 | }; 93 | -------------------------------------------------------------------------------- /src/orm/traits.rs: -------------------------------------------------------------------------------- 1 | use std::fmt::Debug; 2 | 3 | pub trait Encodable: Send + Sync + Sized { 4 | type Encoded: AsRef<[u8]> + Send + Sync; 5 | 6 | fn encode(self) -> Self::Encoded; 7 | } 8 | 9 | pub trait Decodable: Send + Sync + Sized { 10 | fn decode(b: &[u8]) -> anyhow::Result; 11 | } 12 | 13 | pub trait TableObject: Encodable + Decodable {} 14 | 15 | impl TableObject for T where T: Encodable + Decodable {} 16 | 17 | pub trait Table: Send + Sync + Debug + 'static { 18 | const NAME: &'static str; 19 | 20 | type Key: Encodable; 21 | type Value: TableObject; 22 | type SeekKey: Encodable; 23 | } 24 | pub trait DupSort: Table { 25 | type SeekValue: Encodable; 26 | } 27 | -------------------------------------------------------------------------------- /src/orm/transaction.rs: -------------------------------------------------------------------------------- 1 | use super::{cursor::*, traits::*}; 2 | use crate::{RO, RW, Stat, TransactionKind, WriteFlags, WriteMap}; 3 | use anyhow::Context; 4 | use std::{collections::HashMap, marker::PhantomData}; 5 | 6 | #[derive(Debug)] 7 | pub struct Transaction<'db, K> 8 | where 9 | K: TransactionKind, 10 | { 11 | pub(crate) inner: crate::Transaction<'db, K, WriteMap>, 12 | } 13 | 14 | impl Transaction<'_, RO> { 15 | pub fn table_sizes(&self) -> anyhow::Result> { 16 | let mut out = HashMap::new(); 17 | let main_table = self.inner.open_table(None)?; 18 | let mut cursor = self.inner.cursor(&main_table)?; 19 | while let Some((table, _)) = cursor.next_nodup::, ()>()? { 20 | let table = String::from_utf8(table)?; 21 | let db = self 22 | .inner 23 | .open_table(Some(&table)) 24 | .with_context(|| format!("failed to open table: {table}"))?; 25 | let stats = self 26 | .inner 27 | .table_stat(&db) 28 | .with_context(|| format!("failed to get stats for table: {table}"))?; 29 | 30 | out.insert(table, stats.total_size()); 31 | 32 | unsafe { 33 | self.inner.close_table(db)?; 34 | } 35 | } 36 | 37 | Ok(out) 38 | } 39 | } 40 | 41 | impl<'db, K> Transaction<'db, K> 42 | where 43 | K: TransactionKind, 44 | { 45 | pub fn table_stat(&self) -> Result 46 | where 47 | T: Table, 48 | { 49 | self.inner 50 | .table_stat(&self.inner.open_table(Some(T::NAME))?) 51 | } 52 | 53 | pub fn cursor<'tx, T>(&'tx self) -> anyhow::Result> 54 | where 55 | 'db: 'tx, 56 | T: Table, 57 | { 58 | Ok(Cursor { 59 | inner: self.inner.cursor(&self.inner.open_table(Some(T::NAME))?)?, 60 | _marker: PhantomData, 61 | }) 62 | } 63 | 64 | pub fn get(&self, key: T::Key) -> anyhow::Result> 65 | where 66 | T: Table, 67 | { 68 | Ok(self 69 | .inner 70 | .get::>( 71 | &self.inner.open_table(Some(T::NAME))?, 72 | key.encode().as_ref(), 73 | )? 74 | .map(|v| v.0)) 75 | } 76 | } 77 | 78 | impl Transaction<'_, RW> { 79 | pub fn upsert(&self, key: T::Key, value: T::Value) -> anyhow::Result<()> 80 | where 81 | T: Table, 82 | { 83 | Ok(self.inner.put( 84 | &self.inner.open_table(Some(T::NAME))?, 85 | key.encode(), 86 | value.encode(), 87 | WriteFlags::UPSERT, 88 | )?) 89 | } 90 | 91 | pub fn delete(&self, key: T::Key, value: Option) -> anyhow::Result 92 | where 93 | T: Table, 94 | { 95 | let mut vref = None; 96 | let value = value.map(Encodable::encode); 97 | 98 | if let Some(v) = &value { 99 | vref = Some(v.as_ref()); 100 | }; 101 | Ok(self 102 | .inner 103 | .del(&self.inner.open_table(Some(T::NAME))?, key.encode(), vref)?) 104 | } 105 | 106 | pub fn clear_table(&self) -> anyhow::Result<()> 107 | where 108 | T: Table, 109 | { 110 | self.inner 111 | .clear_table(&self.inner.open_table(Some(T::NAME))?)?; 112 | 113 | Ok(()) 114 | } 115 | 116 | pub fn commit(self) -> anyhow::Result<()> { 117 | self.inner.commit()?; 118 | 119 | Ok(()) 120 | } 121 | } 122 | -------------------------------------------------------------------------------- /src/table.rs: -------------------------------------------------------------------------------- 1 | use crate::{ 2 | Transaction, 3 | database::DatabaseKind, 4 | error::{Result, mdbx_result}, 5 | flags::c_enum, 6 | transaction::{TransactionKind, txn_execute}, 7 | }; 8 | use libc::c_uint; 9 | use std::{ffi::CString, marker::PhantomData, ptr}; 10 | 11 | /// A handle to an individual table in a database. 12 | /// 13 | /// A table handle denotes the name and parameters of a table in a database. 14 | #[derive(Debug)] 15 | pub struct Table<'txn> { 16 | dbi: ffi::MDBX_dbi, 17 | _marker: PhantomData<&'txn ()>, 18 | } 19 | 20 | impl<'txn> Table<'txn> { 21 | pub(crate) fn new<'db, K: TransactionKind, E: DatabaseKind>( 22 | txn: &'txn Transaction<'db, K, E>, 23 | name: Option<&str>, 24 | flags: c_uint, 25 | ) -> Result { 26 | let c_name = name.map(|n| CString::new(n).unwrap()); 27 | let name_ptr = if let Some(c_name) = &c_name { 28 | c_name.as_ptr() 29 | } else { 30 | ptr::null() 31 | }; 32 | let mut dbi: ffi::MDBX_dbi = 0; 33 | mdbx_result(txn_execute(&txn.txn_mutex(), |txn| unsafe { 34 | ffi::mdbx_dbi_open(txn, name_ptr, c_enum(flags), &mut dbi) 35 | }))?; 36 | Ok(Self::new_from_ptr(dbi)) 37 | } 38 | 39 | pub(crate) fn new_from_ptr(dbi: ffi::MDBX_dbi) -> Self { 40 | Self { 41 | dbi, 42 | _marker: PhantomData, 43 | } 44 | } 45 | 46 | pub(crate) fn freelist_table() -> Self { 47 | Table { 48 | dbi: 0, 49 | _marker: PhantomData, 50 | } 51 | } 52 | 53 | /// Returns the underlying MDBX table handle (dbi). 54 | /// 55 | /// The caller **must** ensure that the handle is not used after the lifetime of the 56 | /// database, or after the table has been closed. 57 | pub fn dbi(&self) -> ffi::MDBX_dbi { 58 | self.dbi 59 | } 60 | } 61 | -------------------------------------------------------------------------------- /src/transaction.rs: -------------------------------------------------------------------------------- 1 | use crate::{ 2 | Cursor, Decodable, Error, Stat, 3 | database::{Database, DatabaseKind, NoWriteMap, TxnManagerMessage, TxnPtr}, 4 | error::{Result, mdbx_result}, 5 | flags::{TableFlags, WriteFlags, c_enum}, 6 | table::Table, 7 | }; 8 | use ffi::{MDBX_TXN_RDONLY, MDBX_TXN_READWRITE, MDBX_txn_flags_t}; 9 | use indexmap::IndexSet; 10 | use libc::{c_uint, c_void}; 11 | use parking_lot::Mutex; 12 | use sealed::sealed; 13 | use std::{ 14 | fmt, 15 | fmt::Debug, 16 | marker::PhantomData, 17 | mem::size_of, 18 | ptr, result, slice, 19 | sync::{Arc, mpsc::sync_channel}, 20 | }; 21 | 22 | #[sealed] 23 | pub trait TransactionKind: Debug + 'static { 24 | #[doc(hidden)] 25 | const ONLY_CLEAN: bool; 26 | 27 | #[doc(hidden)] 28 | const OPEN_FLAGS: MDBX_txn_flags_t; 29 | } 30 | 31 | #[derive(Debug)] 32 | pub struct RO; 33 | #[derive(Debug)] 34 | pub struct RW; 35 | 36 | #[sealed] 37 | impl TransactionKind for RO { 38 | const ONLY_CLEAN: bool = true; 39 | const OPEN_FLAGS: MDBX_txn_flags_t = MDBX_TXN_RDONLY; 40 | } 41 | #[sealed] 42 | impl TransactionKind for RW { 43 | const ONLY_CLEAN: bool = false; 44 | const OPEN_FLAGS: MDBX_txn_flags_t = MDBX_TXN_READWRITE; 45 | } 46 | 47 | /// An MDBX transaction. 48 | /// 49 | /// All table operations require a transaction. 50 | pub struct Transaction<'db, K, E> 51 | where 52 | K: TransactionKind, 53 | E: DatabaseKind, 54 | { 55 | txn: Arc>, 56 | primed_dbis: Mutex>, 57 | committed: bool, 58 | db: &'db Database, 59 | _marker: PhantomData, 60 | } 61 | 62 | impl<'db, K, E> Transaction<'db, K, E> 63 | where 64 | K: TransactionKind, 65 | E: DatabaseKind, 66 | { 67 | pub(crate) fn new(db: &'db Database) -> Result { 68 | let mut txn: *mut ffi::MDBX_txn = ptr::null_mut(); 69 | unsafe { 70 | mdbx_result(ffi::mdbx_txn_begin_ex( 71 | db.ptr().0, 72 | ptr::null_mut(), 73 | K::OPEN_FLAGS, 74 | &mut txn, 75 | ptr::null_mut(), 76 | ))?; 77 | Ok(Self::new_from_ptr(db, txn)) 78 | } 79 | } 80 | 81 | pub(crate) fn new_from_ptr(db: &'db Database, txn: *mut ffi::MDBX_txn) -> Self { 82 | Self { 83 | txn: Arc::new(Mutex::new(TxnPtr(txn))), 84 | primed_dbis: Mutex::new(IndexSet::new()), 85 | committed: false, 86 | db, 87 | _marker: PhantomData, 88 | } 89 | } 90 | 91 | /// Returns a raw pointer to the underlying MDBX transaction. 92 | /// 93 | /// The caller **must** ensure that the pointer is not used after the 94 | /// lifetime of the transaction. 95 | pub(crate) fn txn_mutex(&self) -> Arc> { 96 | self.txn.clone() 97 | } 98 | 99 | pub fn txn(&self) -> TxnPtr { 100 | *self.txn.lock() 101 | } 102 | 103 | /// Returns a raw pointer to the MDBX database. 104 | pub fn db(&self) -> &Database { 105 | self.db 106 | } 107 | 108 | /// Returns the transaction id. 109 | pub fn id(&self) -> u64 { 110 | txn_execute(&self.txn, |txn| unsafe { ffi::mdbx_txn_id(txn) }) 111 | } 112 | 113 | /// Gets an item from a table. 114 | /// 115 | /// This function retrieves the data associated with the given key in the 116 | /// table. If the table supports duplicate keys 117 | /// ([TableFlags::DUP_SORT]) then the first data item for the key will be 118 | /// returned. Retrieval of other items requires the use of 119 | /// [Cursor]. If the item is not in the table, then 120 | /// [None] will be returned. 121 | pub fn get<'txn, Key>(&'txn self, table: &Table<'txn>, key: &[u8]) -> Result> 122 | where 123 | Key: Decodable<'txn>, 124 | { 125 | let key_val: ffi::MDBX_val = ffi::MDBX_val { 126 | iov_len: key.len(), 127 | iov_base: key.as_ptr() as *mut c_void, 128 | }; 129 | let mut data_val: ffi::MDBX_val = ffi::MDBX_val { 130 | iov_len: 0, 131 | iov_base: ptr::null_mut(), 132 | }; 133 | 134 | txn_execute(&self.txn, |txn| unsafe { 135 | match ffi::mdbx_get(txn, table.dbi(), &key_val, &mut data_val) { 136 | ffi::MDBX_SUCCESS => Key::decode_val::(txn, &data_val).map(Some), 137 | ffi::MDBX_NOTFOUND => Ok(None), 138 | err_code => Err(Error::from_err_code(err_code)), 139 | } 140 | }) 141 | } 142 | 143 | /// Commits the transaction. 144 | /// 145 | /// Any pending operations will be saved. 146 | pub fn commit(self) -> Result { 147 | self.commit_and_rebind_open_dbs().map(|v| v.0) 148 | } 149 | 150 | pub fn prime_for_permaopen(&self, table: Table<'_>) { 151 | self.primed_dbis.lock().insert(table.dbi()); 152 | } 153 | 154 | /// Commits the transaction and returns table handles permanently open for the lifetime of `Database`. 155 | pub fn commit_and_rebind_open_dbs(mut self) -> Result<(bool, Vec>)> { 156 | let txnlck = self.txn.lock(); 157 | let txn = txnlck.0; 158 | let result = if K::ONLY_CLEAN { 159 | mdbx_result(unsafe { ffi::mdbx_txn_commit_ex(txn, ptr::null_mut()) }) 160 | } else { 161 | let (sender, rx) = sync_channel(0); 162 | self.db 163 | .txn_manager 164 | .as_ref() 165 | .unwrap() 166 | .send(TxnManagerMessage::Commit { 167 | tx: TxnPtr(txn), 168 | sender, 169 | }) 170 | .unwrap(); 171 | rx.recv().unwrap() 172 | }; 173 | self.committed = true; 174 | result.map(|v| { 175 | ( 176 | v, 177 | self.primed_dbis 178 | .lock() 179 | .iter() 180 | .map(|&dbi| Table::new_from_ptr(dbi)) 181 | .collect(), 182 | ) 183 | }) 184 | } 185 | 186 | /// Opens a handle to an MDBX table. 187 | /// 188 | /// If `name` is [None], then the returned handle will be for the default table. 189 | /// 190 | /// If `name` is not [None], then the returned handle will be for a named table. In this 191 | /// case the database must be configured to allow named tables through 192 | /// [DatabaseBuilder::set_max_tables()](crate::DatabaseBuilder::set_max_tables). 193 | /// 194 | /// The returned table handle may be shared among any transaction in the database. 195 | /// 196 | /// The table name may not contain the null character. 197 | pub fn open_table<'txn>(&'txn self, name: Option<&str>) -> Result> { 198 | Table::new(self, name, 0) 199 | } 200 | 201 | /// Gets the option flags for the given table in the transaction. 202 | pub fn table_flags<'txn>(&'txn self, table: &Table<'txn>) -> Result { 203 | let mut flags: c_uint = 0; 204 | unsafe { 205 | mdbx_result(txn_execute(&self.txn, |txn| { 206 | ffi::mdbx_dbi_flags_ex(txn, table.dbi(), &mut flags, ptr::null_mut()) 207 | }))?; 208 | } 209 | Ok(TableFlags::from_bits_truncate(flags)) 210 | } 211 | 212 | /// Retrieves table statistics. 213 | pub fn table_stat<'txn>(&'txn self, table: &Table<'txn>) -> Result { 214 | unsafe { 215 | let mut stat = Stat::new(); 216 | mdbx_result(txn_execute(&self.txn, |txn| { 217 | ffi::mdbx_dbi_stat(txn, table.dbi(), stat.mdb_stat(), size_of::()) 218 | }))?; 219 | Ok(stat) 220 | } 221 | } 222 | 223 | /// Open a new cursor on the given table. 224 | pub fn cursor<'txn>(&'txn self, table: &Table<'txn>) -> Result> { 225 | Cursor::new(self, table) 226 | } 227 | } 228 | 229 | pub(crate) fn txn_execute T, T>(txn: &Mutex, f: F) -> T { 230 | let lck = txn.lock(); 231 | (f)(lck.0) 232 | } 233 | 234 | impl Transaction<'_, RW, E> 235 | where 236 | E: DatabaseKind, 237 | { 238 | fn open_table_with_flags<'txn>( 239 | &'txn self, 240 | name: Option<&str>, 241 | flags: TableFlags, 242 | ) -> Result> { 243 | Table::new(self, name, flags.bits()) 244 | } 245 | 246 | /// Opens a handle to an MDBX table, creating the table if necessary. 247 | /// 248 | /// If the table is already created, the given option flags will be added to it. 249 | /// 250 | /// If `name` is [None], then the returned handle will be for the default table. 251 | /// 252 | /// If `name` is not [None], then the returned handle will be for a named table. In this 253 | /// case the database must be configured to allow named tables through 254 | /// [DatabaseBuilder::set_max_tables()](crate::DatabaseBuilder::set_max_tables). 255 | /// 256 | /// This function will fail with [Error::BadRslot](crate::error::Error::BadRslot) if called by a thread with an open 257 | /// transaction. 258 | pub fn create_table<'txn>( 259 | &'txn self, 260 | name: Option<&str>, 261 | flags: TableFlags, 262 | ) -> Result> { 263 | self.open_table_with_flags(name, flags | TableFlags::CREATE) 264 | } 265 | 266 | /// Stores an item into a table. 267 | /// 268 | /// This function stores key/data pairs in the table. The default 269 | /// behavior is to enter the new key/data pair, replacing any previously 270 | /// existing key if duplicates are disallowed, or adding a duplicate data 271 | /// item if duplicates are allowed ([TableFlags::DUP_SORT]). 272 | pub fn put<'txn>( 273 | &'txn self, 274 | table: &Table<'txn>, 275 | key: impl AsRef<[u8]>, 276 | data: impl AsRef<[u8]>, 277 | flags: WriteFlags, 278 | ) -> Result<()> { 279 | let key = key.as_ref(); 280 | let data = data.as_ref(); 281 | let key_val: ffi::MDBX_val = ffi::MDBX_val { 282 | iov_len: key.len(), 283 | iov_base: key.as_ptr() as *mut c_void, 284 | }; 285 | let mut data_val: ffi::MDBX_val = ffi::MDBX_val { 286 | iov_len: data.len(), 287 | iov_base: data.as_ptr() as *mut c_void, 288 | }; 289 | mdbx_result(txn_execute(&self.txn, |txn| unsafe { 290 | ffi::mdbx_put( 291 | txn, 292 | table.dbi(), 293 | &key_val, 294 | &mut data_val, 295 | c_enum(flags.bits()), 296 | ) 297 | }))?; 298 | 299 | Ok(()) 300 | } 301 | 302 | /// Returns a buffer which can be used to write a value into the item at the 303 | /// given key and with the given length. The buffer must be completely 304 | /// filled by the caller. 305 | pub fn reserve<'txn>( 306 | &'txn self, 307 | table: &Table<'txn>, 308 | key: impl AsRef<[u8]>, 309 | len: usize, 310 | flags: WriteFlags, 311 | ) -> Result<&'txn mut [u8]> { 312 | let key = key.as_ref(); 313 | let key_val: ffi::MDBX_val = ffi::MDBX_val { 314 | iov_len: key.len(), 315 | iov_base: key.as_ptr() as *mut c_void, 316 | }; 317 | let mut data_val: ffi::MDBX_val = ffi::MDBX_val { 318 | iov_len: len, 319 | iov_base: ptr::null_mut::(), 320 | }; 321 | unsafe { 322 | mdbx_result(txn_execute(&self.txn, |txn| { 323 | ffi::mdbx_put( 324 | txn, 325 | table.dbi(), 326 | &key_val, 327 | &mut data_val, 328 | c_enum(flags.bits() | ffi::MDBX_RESERVE as u32), 329 | ) 330 | }))?; 331 | Ok(slice::from_raw_parts_mut( 332 | data_val.iov_base as *mut u8, 333 | data_val.iov_len, 334 | )) 335 | } 336 | } 337 | 338 | /// Delete items from a table. 339 | /// This function removes key/data pairs from the table. 340 | /// 341 | /// The data parameter is NOT ignored regardless the table does support sorted duplicate data items or not. 342 | /// If the data parameter is [Some] only the matching data item will be deleted. 343 | /// Otherwise, if data parameter is [None], any/all value(s) for specified key will be deleted. 344 | /// 345 | /// Returns `true` if the key/value pair was present. 346 | pub fn del<'txn>( 347 | &'txn self, 348 | table: &Table<'txn>, 349 | key: impl AsRef<[u8]>, 350 | data: Option<&[u8]>, 351 | ) -> Result { 352 | let key = key.as_ref(); 353 | let key_val: ffi::MDBX_val = ffi::MDBX_val { 354 | iov_len: key.len(), 355 | iov_base: key.as_ptr() as *mut c_void, 356 | }; 357 | let data_val: Option = data.map(|data| ffi::MDBX_val { 358 | iov_len: data.len(), 359 | iov_base: data.as_ptr() as *mut c_void, 360 | }); 361 | 362 | mdbx_result({ 363 | txn_execute(&self.txn, |txn| { 364 | if let Some(d) = data_val { 365 | unsafe { ffi::mdbx_del(txn, table.dbi(), &key_val, &d) } 366 | } else { 367 | unsafe { ffi::mdbx_del(txn, table.dbi(), &key_val, ptr::null()) } 368 | } 369 | }) 370 | }) 371 | .map(|_| true) 372 | .or_else(|e| match e { 373 | Error::NotFound => Ok(false), 374 | other => Err(other), 375 | }) 376 | } 377 | 378 | /// Empties the given table. All items will be removed. 379 | pub fn clear_table<'txn>(&'txn self, table: &Table<'txn>) -> Result<()> { 380 | mdbx_result(txn_execute(&self.txn, |txn| unsafe { 381 | ffi::mdbx_drop(txn, table.dbi(), false) 382 | }))?; 383 | 384 | Ok(()) 385 | } 386 | 387 | /// Drops the table from the database. 388 | /// 389 | /// # Safety 390 | /// Caller must close ALL other [Table] and [Cursor] instances pointing to the same dbi BEFORE calling this function. 391 | pub unsafe fn drop_table<'txn>(&'txn self, table: Table<'txn>) -> Result<()> { 392 | mdbx_result(txn_execute(&self.txn, |txn| unsafe { 393 | ffi::mdbx_drop(txn, table.dbi(), true) 394 | }))?; 395 | 396 | Ok(()) 397 | } 398 | } 399 | 400 | impl Transaction<'_, RO, E> 401 | where 402 | E: DatabaseKind, 403 | { 404 | /// Closes the table handle. 405 | /// 406 | /// # Safety 407 | /// Caller must close ALL other [Table] and [Cursor] instances pointing to the same dbi BEFORE calling this function. 408 | pub unsafe fn close_table(&self, table: Table<'_>) -> Result<()> { 409 | mdbx_result(unsafe { ffi::mdbx_dbi_close(self.db.ptr().0, table.dbi()) })?; 410 | 411 | Ok(()) 412 | } 413 | } 414 | 415 | impl Transaction<'_, RW, NoWriteMap> { 416 | /// Begins a new nested transaction inside of this transaction. 417 | pub fn begin_nested_txn(&mut self) -> Result> { 418 | txn_execute(&self.txn, |txn| { 419 | let (tx, rx) = sync_channel(0); 420 | self.db 421 | .txn_manager 422 | .as_ref() 423 | .unwrap() 424 | .send(TxnManagerMessage::Begin { 425 | parent: TxnPtr(txn), 426 | flags: RW::OPEN_FLAGS, 427 | sender: tx, 428 | }) 429 | .unwrap(); 430 | 431 | rx.recv() 432 | .unwrap() 433 | .map(|ptr| Transaction::new_from_ptr(self.db, ptr.0)) 434 | }) 435 | } 436 | } 437 | 438 | impl fmt::Debug for Transaction<'_, K, E> 439 | where 440 | K: TransactionKind, 441 | E: DatabaseKind, 442 | { 443 | fn fmt(&self, f: &mut fmt::Formatter) -> result::Result<(), fmt::Error> { 444 | f.debug_struct("RoTransaction").finish() 445 | } 446 | } 447 | 448 | impl Drop for Transaction<'_, K, E> 449 | where 450 | K: TransactionKind, 451 | E: DatabaseKind, 452 | { 453 | fn drop(&mut self) { 454 | txn_execute(&self.txn, |txn| { 455 | if !self.committed { 456 | if K::ONLY_CLEAN { 457 | unsafe { 458 | ffi::mdbx_txn_abort(txn); 459 | } 460 | } else { 461 | let (sender, rx) = sync_channel(0); 462 | self.db 463 | .txn_manager 464 | .as_ref() 465 | .unwrap() 466 | .send(TxnManagerMessage::Abort { 467 | tx: TxnPtr(txn), 468 | sender, 469 | }) 470 | .unwrap(); 471 | rx.recv().unwrap().unwrap(); 472 | } 473 | } 474 | }) 475 | } 476 | } 477 | -------------------------------------------------------------------------------- /tests/cursor.rs: -------------------------------------------------------------------------------- 1 | use libmdbx::*; 2 | use std::borrow::Cow; 3 | use tempfile::tempdir; 4 | 5 | type Database = libmdbx::Database; 6 | 7 | #[test] 8 | fn test_get() { 9 | let dir = tempdir().unwrap(); 10 | let db = Database::open(&dir).unwrap(); 11 | 12 | let txn = db.begin_rw_txn().unwrap(); 13 | let table = txn.open_table(None).unwrap(); 14 | 15 | assert_eq!(None, txn.cursor(&table).unwrap().first::<(), ()>().unwrap()); 16 | 17 | for (k, v) in [(b"key1", b"val1"), (b"key2", b"val2"), (b"key3", b"val3")] { 18 | txn.put(&table, k, v, WriteFlags::empty()).unwrap(); 19 | } 20 | 21 | let mut cursor = txn.cursor(&table).unwrap(); 22 | assert_eq!(cursor.first().unwrap(), Some((*b"key1", *b"val1"))); 23 | assert_eq!(cursor.get_current().unwrap(), Some((*b"key1", *b"val1"))); 24 | assert_eq!(cursor.next().unwrap(), Some((*b"key2", *b"val2"))); 25 | assert_eq!(cursor.prev().unwrap(), Some((*b"key1", *b"val1"))); 26 | assert_eq!(cursor.last().unwrap(), Some((*b"key3", *b"val3"))); 27 | assert_eq!(cursor.set(b"key1").unwrap(), Some(*b"val1")); 28 | assert_eq!(cursor.set_key(b"key3").unwrap(), Some((*b"key3", *b"val3"))); 29 | assert_eq!( 30 | cursor.set_range(b"key2\0").unwrap(), 31 | Some((*b"key3", *b"val3")) 32 | ); 33 | } 34 | 35 | #[test] 36 | fn test_get_dup() { 37 | let dir = tempdir().unwrap(); 38 | let db = Database::open(&dir).unwrap(); 39 | 40 | let txn = db.begin_rw_txn().unwrap(); 41 | let table = txn.create_table(None, TableFlags::DUP_SORT).unwrap(); 42 | for (k, v) in [ 43 | (b"key1", b"val1"), 44 | (b"key1", b"val2"), 45 | (b"key1", b"val3"), 46 | (b"key2", b"val1"), 47 | (b"key2", b"val2"), 48 | (b"key2", b"val3"), 49 | ] { 50 | txn.put(&table, k, v, WriteFlags::empty()).unwrap(); 51 | } 52 | 53 | let mut cursor = txn.cursor(&table).unwrap(); 54 | assert_eq!(cursor.first().unwrap(), Some((*b"key1", *b"val1"))); 55 | assert_eq!(cursor.first_dup().unwrap(), Some(*b"val1")); 56 | assert_eq!(cursor.get_current().unwrap(), Some((*b"key1", *b"val1"))); 57 | assert_eq!(cursor.next_nodup().unwrap(), Some((*b"key2", *b"val1"))); 58 | assert_eq!(cursor.next().unwrap(), Some((*b"key2", *b"val2"))); 59 | assert_eq!(cursor.prev().unwrap(), Some((*b"key2", *b"val1"))); 60 | assert_eq!(cursor.next_dup().unwrap(), Some((*b"key2", *b"val2"))); 61 | assert_eq!(cursor.next_dup().unwrap(), Some((*b"key2", *b"val3"))); 62 | assert_eq!(cursor.next_dup::<(), ()>().unwrap(), None); 63 | assert_eq!(cursor.prev_dup().unwrap(), Some((*b"key2", *b"val2"))); 64 | assert_eq!(cursor.last_dup().unwrap(), Some(*b"val3")); 65 | assert_eq!(cursor.prev_nodup().unwrap(), Some((*b"key1", *b"val3"))); 66 | assert_eq!(cursor.next_dup::<(), ()>().unwrap(), None); 67 | assert_eq!(cursor.set(b"key1").unwrap(), Some(*b"val1")); 68 | assert_eq!(cursor.set(b"key2").unwrap(), Some(*b"val1")); 69 | assert_eq!( 70 | cursor.set_range(b"key1\0").unwrap(), 71 | Some((*b"key2", *b"val1")) 72 | ); 73 | assert_eq!(cursor.get_both(b"key1", b"val3").unwrap(), Some(*b"val3")); 74 | assert_eq!(cursor.get_both_range::<()>(b"key1", b"val4").unwrap(), None); 75 | assert_eq!( 76 | cursor.get_both_range(b"key2", b"val").unwrap(), 77 | Some(*b"val1") 78 | ); 79 | 80 | for kv in [ 81 | (*b"key2", *b"val3"), 82 | (*b"key2", *b"val2"), 83 | (*b"key2", *b"val1"), 84 | (*b"key1", *b"val3"), 85 | ] { 86 | assert_eq!(cursor.last().unwrap(), Some(kv)); 87 | cursor.del(WriteFlags::empty()).unwrap(); 88 | } 89 | } 90 | 91 | #[test] 92 | fn test_get_dupfixed() { 93 | let dir = tempdir().unwrap(); 94 | let db = Database::open(&dir).unwrap(); 95 | 96 | let txn = db.begin_rw_txn().unwrap(); 97 | let table = txn 98 | .create_table(None, TableFlags::DUP_SORT | TableFlags::DUP_FIXED) 99 | .unwrap(); 100 | for (k, v) in [ 101 | (b"key1", b"val1"), 102 | (b"key1", b"val2"), 103 | (b"key1", b"val3"), 104 | (b"key2", b"val1"), 105 | (b"key2", b"val2"), 106 | (b"key2", b"val3"), 107 | ] { 108 | txn.put(&table, k, v, WriteFlags::empty()).unwrap(); 109 | } 110 | 111 | let mut cursor = txn.cursor(&table).unwrap(); 112 | assert_eq!(cursor.first().unwrap(), Some((*b"key1", *b"val1"))); 113 | assert_eq!(cursor.get_multiple().unwrap(), Some(*b"val1val2val3")); 114 | assert_eq!(cursor.next_multiple::<(), ()>().unwrap(), None); 115 | } 116 | 117 | #[test] 118 | fn test_iter() { 119 | let dir = tempdir().unwrap(); 120 | let db = Database::open(&dir).unwrap(); 121 | 122 | let items = vec![ 123 | (*b"key1", *b"val1"), 124 | (*b"key2", *b"val2"), 125 | (*b"key3", *b"val3"), 126 | (*b"key5", *b"val5"), 127 | ]; 128 | 129 | { 130 | let txn = db.begin_rw_txn().unwrap(); 131 | let table = txn.open_table(None).unwrap(); 132 | for (key, data) in &items { 133 | txn.put(&table, key, data, WriteFlags::empty()).unwrap(); 134 | } 135 | assert!(!txn.commit().unwrap()); 136 | } 137 | 138 | let txn = db.begin_ro_txn().unwrap(); 139 | let table = txn.open_table(None).unwrap(); 140 | let mut cursor = txn.cursor(&table).unwrap(); 141 | 142 | // Because Result implements FromIterator, we can collect the iterator 143 | // of items of type Result<_, E> into a Result> by specifying 144 | // the collection type via the turbofish syntax. 145 | assert_eq!(items, cursor.iter().collect::>>().unwrap()); 146 | 147 | // Alternately, we can collect it into an appropriately typed variable. 148 | let retr: Result> = cursor.iter_start().collect(); 149 | assert_eq!(items, retr.unwrap()); 150 | 151 | cursor.set::<()>(b"key2").unwrap(); 152 | assert_eq!( 153 | items.clone().into_iter().skip(2).collect::>(), 154 | cursor.iter().collect::>>().unwrap() 155 | ); 156 | 157 | assert_eq!( 158 | items, 159 | cursor.iter_start().collect::>>().unwrap() 160 | ); 161 | 162 | assert_eq!( 163 | items.clone().into_iter().skip(1).collect::>(), 164 | cursor 165 | .iter_from(b"key2") 166 | .collect::>>() 167 | .unwrap() 168 | ); 169 | 170 | assert_eq!( 171 | items.into_iter().skip(3).collect::>(), 172 | cursor 173 | .iter_from(b"key4") 174 | .collect::>>() 175 | .unwrap() 176 | ); 177 | 178 | assert_eq!( 179 | Vec::<((), ())>::new(), 180 | cursor 181 | .iter_from(b"key6") 182 | .collect::>>() 183 | .unwrap() 184 | ); 185 | } 186 | 187 | #[test] 188 | fn test_iter_empty_database() { 189 | let dir = tempdir().unwrap(); 190 | let db = Database::open(&dir).unwrap(); 191 | let txn = db.begin_ro_txn().unwrap(); 192 | let table = txn.open_table(None).unwrap(); 193 | let mut cursor = txn.cursor(&table).unwrap(); 194 | 195 | assert!(cursor.iter::<(), ()>().next().is_none()); 196 | assert!(cursor.iter_start::<(), ()>().next().is_none()); 197 | assert!(cursor.iter_from::<(), ()>(b"foo").next().is_none()); 198 | } 199 | 200 | #[test] 201 | fn test_iter_empty_dup_database() { 202 | let dir = tempdir().unwrap(); 203 | let db = Database::open(&dir).unwrap(); 204 | 205 | let txn = db.begin_rw_txn().unwrap(); 206 | txn.create_table(None, TableFlags::DUP_SORT).unwrap(); 207 | txn.commit().unwrap(); 208 | 209 | let txn = db.begin_ro_txn().unwrap(); 210 | let table = txn.open_table(None).unwrap(); 211 | let mut cursor = txn.cursor(&table).unwrap(); 212 | 213 | assert!(cursor.iter::<(), ()>().next().is_none()); 214 | assert!(cursor.iter_start::<(), ()>().next().is_none()); 215 | assert!(cursor.iter_from::<(), ()>(b"foo").next().is_none()); 216 | assert!(cursor.iter_from::<(), ()>(b"foo").next().is_none()); 217 | assert!(cursor.iter_dup::<(), ()>().flatten().next().is_none()); 218 | assert!(cursor.iter_dup_start::<(), ()>().flatten().next().is_none()); 219 | assert!( 220 | cursor 221 | .iter_dup_from::<(), ()>(b"foo") 222 | .flatten() 223 | .next() 224 | .is_none() 225 | ); 226 | assert!(cursor.iter_dup_of::<(), ()>(b"foo").next().is_none()); 227 | } 228 | 229 | #[test] 230 | fn test_iter_dup() { 231 | let dir = tempdir().unwrap(); 232 | let db = Database::open(&dir).unwrap(); 233 | 234 | let txn = db.begin_rw_txn().unwrap(); 235 | txn.create_table(None, TableFlags::DUP_SORT).unwrap(); 236 | txn.commit().unwrap(); 237 | 238 | let items = [ 239 | (b"a", b"1"), 240 | (b"a", b"2"), 241 | (b"a", b"3"), 242 | (b"b", b"1"), 243 | (b"b", b"2"), 244 | (b"b", b"3"), 245 | (b"c", b"1"), 246 | (b"c", b"2"), 247 | (b"c", b"3"), 248 | (b"e", b"1"), 249 | (b"e", b"2"), 250 | (b"e", b"3"), 251 | ] 252 | .iter() 253 | .map(|&(&k, &v)| (k, v)) 254 | .collect::>(); 255 | 256 | { 257 | let txn = db.begin_rw_txn().unwrap(); 258 | for (key, data) in items.clone() { 259 | let table = txn.open_table(None).unwrap(); 260 | txn.put(&table, key, data, WriteFlags::empty()).unwrap(); 261 | } 262 | txn.commit().unwrap(); 263 | } 264 | 265 | let txn = db.begin_ro_txn().unwrap(); 266 | let table = txn.open_table(None).unwrap(); 267 | let mut cursor = txn.cursor(&table).unwrap(); 268 | assert_eq!( 269 | items, 270 | cursor 271 | .iter_dup() 272 | .flatten() 273 | .collect::>>() 274 | .unwrap() 275 | ); 276 | 277 | cursor.set::<()>(b"b").unwrap(); 278 | assert_eq!( 279 | items.iter().copied().skip(4).collect::>(), 280 | cursor 281 | .iter_dup() 282 | .flatten() 283 | .collect::>>() 284 | .unwrap() 285 | ); 286 | 287 | assert_eq!( 288 | items, 289 | cursor 290 | .iter_dup_start() 291 | .flatten() 292 | .collect::>>() 293 | .unwrap() 294 | ); 295 | 296 | assert_eq!( 297 | items.iter().copied().skip(3).collect::>(), 298 | cursor 299 | .iter_dup_from(b"b") 300 | .flatten() 301 | .collect::>>() 302 | .unwrap() 303 | ); 304 | 305 | assert_eq!( 306 | items.iter().copied().skip(3).collect::>(), 307 | cursor 308 | .iter_dup_from(b"ab") 309 | .flatten() 310 | .collect::>>() 311 | .unwrap() 312 | ); 313 | 314 | assert_eq!( 315 | items.iter().copied().skip(9).collect::>(), 316 | cursor 317 | .iter_dup_from(b"d") 318 | .flatten() 319 | .collect::>>() 320 | .unwrap() 321 | ); 322 | 323 | assert_eq!( 324 | Vec::<([u8; 1], [u8; 1])>::new(), 325 | cursor 326 | .iter_dup_from(b"f") 327 | .flatten() 328 | .collect::>>() 329 | .unwrap() 330 | ); 331 | 332 | assert_eq!( 333 | items.iter().copied().skip(3).take(3).collect::>(), 334 | cursor 335 | .iter_dup_of(b"b") 336 | .collect::>>() 337 | .unwrap() 338 | ); 339 | 340 | assert_eq!(0, cursor.iter_dup_of::<(), ()>(b"foo").count()); 341 | } 342 | 343 | #[test] 344 | fn test_iter_del_get() { 345 | let dir = tempdir().unwrap(); 346 | let db = Database::open(&dir).unwrap(); 347 | 348 | let items = vec![(*b"a", *b"1"), (*b"b", *b"2")]; 349 | { 350 | let txn = db.begin_rw_txn().unwrap(); 351 | let table = txn.create_table(None, TableFlags::DUP_SORT).unwrap(); 352 | assert_eq!( 353 | txn.cursor(&table) 354 | .unwrap() 355 | .iter_dup_of::<(), ()>(b"a") 356 | .collect::>>() 357 | .unwrap() 358 | .len(), 359 | 0 360 | ); 361 | txn.commit().unwrap(); 362 | } 363 | 364 | { 365 | let txn = db.begin_rw_txn().unwrap(); 366 | let table = txn.open_table(None).unwrap(); 367 | for (key, data) in &items { 368 | txn.put(&table, key, data, WriteFlags::empty()).unwrap(); 369 | } 370 | txn.commit().unwrap(); 371 | } 372 | 373 | let txn = db.begin_rw_txn().unwrap(); 374 | let table = txn.open_table(None).unwrap(); 375 | let mut cursor = txn.cursor(&table).unwrap(); 376 | assert_eq!( 377 | items, 378 | cursor 379 | .iter_dup() 380 | .flatten() 381 | .collect::>>() 382 | .unwrap() 383 | ); 384 | 385 | assert_eq!( 386 | items.iter().copied().take(1).collect::>(), 387 | cursor 388 | .iter_dup_of(b"a") 389 | .collect::>>() 390 | .unwrap() 391 | ); 392 | 393 | assert_eq!(cursor.set(b"a").unwrap(), Some(*b"1")); 394 | 395 | cursor.del(WriteFlags::empty()).unwrap(); 396 | 397 | assert_eq!( 398 | cursor 399 | .iter_dup_of::<(), ()>(b"a") 400 | .collect::>>() 401 | .unwrap() 402 | .len(), 403 | 0 404 | ); 405 | } 406 | 407 | #[test] 408 | fn test_put_del() { 409 | let dir = tempdir().unwrap(); 410 | let db = Database::open(&dir).unwrap(); 411 | 412 | let txn = db.begin_rw_txn().unwrap(); 413 | let table = txn.open_table(None).unwrap(); 414 | let mut cursor = txn.cursor(&table).unwrap(); 415 | 416 | for (k, v) in [(b"key1", b"val1"), (b"key2", b"val2"), (b"key3", b"val3")] { 417 | cursor.put(k, v, WriteFlags::empty()).unwrap(); 418 | } 419 | 420 | assert_eq!( 421 | cursor.get_current().unwrap().unwrap(), 422 | ( 423 | Cow::Borrowed(b"key3" as &[u8]), 424 | Cow::Borrowed(b"val3" as &[u8]) 425 | ) 426 | ); 427 | 428 | cursor.del(WriteFlags::empty()).unwrap(); 429 | assert_eq!(cursor.get_current::, Vec>().unwrap(), None); 430 | assert_eq!( 431 | cursor.last().unwrap().unwrap(), 432 | ( 433 | Cow::Borrowed(b"key2" as &[u8]), 434 | Cow::Borrowed(b"val2" as &[u8]) 435 | ) 436 | ); 437 | } 438 | -------------------------------------------------------------------------------- /tests/environment.rs: -------------------------------------------------------------------------------- 1 | use libmdbx::*; 2 | use tempfile::tempdir; 3 | 4 | type Database = libmdbx::Database; 5 | 6 | #[test] 7 | fn test_open() { 8 | let dir = tempdir().unwrap(); 9 | 10 | // opening non-existent database with read-only should fail 11 | assert!( 12 | Database::open_with_options( 13 | &dir, 14 | DatabaseOptions { 15 | mode: Mode::ReadOnly, 16 | ..Default::default() 17 | } 18 | ) 19 | .is_err() 20 | ); 21 | 22 | // opening non-existent database should succeed 23 | assert!(Database::open(&dir).is_ok()); 24 | 25 | // opening database with read-only should succeed 26 | assert!( 27 | Database::open_with_options( 28 | &dir, 29 | DatabaseOptions { 30 | mode: Mode::ReadOnly, 31 | ..Default::default() 32 | } 33 | ) 34 | .is_ok() 35 | ); 36 | } 37 | 38 | #[test] 39 | fn test_begin_txn() { 40 | let dir = tempdir().unwrap(); 41 | 42 | { 43 | // writable database 44 | let db = Database::open(&dir).unwrap(); 45 | 46 | assert!(db.begin_rw_txn().is_ok()); 47 | assert!(db.begin_ro_txn().is_ok()); 48 | } 49 | 50 | { 51 | // read-only database 52 | let db = Database::open_with_options( 53 | &dir, 54 | DatabaseOptions { 55 | mode: Mode::ReadOnly, 56 | ..Default::default() 57 | }, 58 | ) 59 | .unwrap(); 60 | 61 | assert!(db.begin_rw_txn().is_err()); 62 | assert!(db.begin_ro_txn().is_ok()); 63 | } 64 | } 65 | 66 | #[test] 67 | fn test_open_table() { 68 | let dir = tempdir().unwrap(); 69 | let db = Database::open_with_options( 70 | &dir, 71 | DatabaseOptions { 72 | max_tables: Some(1), 73 | ..Default::default() 74 | }, 75 | ) 76 | .unwrap(); 77 | 78 | let txn = db.begin_ro_txn().unwrap(); 79 | assert!(txn.open_table(None).is_ok()); 80 | assert!(txn.open_table(Some("test")).is_err()); 81 | } 82 | 83 | #[test] 84 | fn test_create_table() { 85 | let dir = tempdir().unwrap(); 86 | let db = Database::open_with_options( 87 | &dir, 88 | DatabaseOptions { 89 | max_tables: Some(11), 90 | ..Default::default() 91 | }, 92 | ) 93 | .unwrap(); 94 | 95 | let txn = db.begin_rw_txn().unwrap(); 96 | assert!(txn.open_table(Some("test")).is_err()); 97 | assert!(txn.create_table(Some("test"), TableFlags::empty()).is_ok()); 98 | assert!(txn.open_table(Some("test")).is_ok()) 99 | } 100 | 101 | #[test] 102 | fn test_close_table() { 103 | let dir = tempdir().unwrap(); 104 | let db = Database::open_with_options( 105 | &dir, 106 | DatabaseOptions { 107 | max_tables: Some(10), 108 | ..Default::default() 109 | }, 110 | ) 111 | .unwrap(); 112 | 113 | let txn = db.begin_rw_txn().unwrap(); 114 | txn.create_table(Some("test"), TableFlags::empty()).unwrap(); 115 | txn.open_table(Some("test")).unwrap(); 116 | } 117 | 118 | #[test] 119 | fn test_sync() { 120 | let dir = tempdir().unwrap(); 121 | { 122 | let db = Database::open(&dir).unwrap(); 123 | db.sync(true).unwrap(); 124 | } 125 | { 126 | let db = Database::open_with_options( 127 | &dir, 128 | DatabaseOptions { 129 | mode: Mode::ReadOnly, 130 | ..Default::default() 131 | }, 132 | ) 133 | .unwrap(); 134 | db.sync(true).unwrap_err(); 135 | } 136 | } 137 | 138 | #[test] 139 | fn test_stat() { 140 | let dir = tempdir().unwrap(); 141 | let db = Database::open(&dir).unwrap(); 142 | 143 | // Stats should be empty initially. 144 | let stat = db.stat().unwrap(); 145 | assert_eq!(stat.depth(), 0); 146 | assert_eq!(stat.branch_pages(), 0); 147 | assert_eq!(stat.leaf_pages(), 0); 148 | assert_eq!(stat.overflow_pages(), 0); 149 | assert_eq!(stat.entries(), 0); 150 | 151 | // Write a few small values. 152 | for i in 0..64_u64 { 153 | let value = i.to_le_bytes(); 154 | let tx = db.begin_rw_txn().unwrap(); 155 | tx.put( 156 | &tx.open_table(None).unwrap(), 157 | value, 158 | value, 159 | WriteFlags::default(), 160 | ) 161 | .unwrap(); 162 | tx.commit().unwrap(); 163 | } 164 | 165 | // Stats should now reflect inserted values. 166 | let stat = db.stat().unwrap(); 167 | assert_eq!(stat.depth(), 1); 168 | assert_eq!(stat.branch_pages(), 0); 169 | assert_eq!(stat.leaf_pages(), 1); 170 | assert_eq!(stat.overflow_pages(), 0); 171 | assert_eq!(stat.entries(), 64); 172 | } 173 | 174 | #[test] 175 | fn test_info() { 176 | let dir = tempdir().unwrap(); 177 | let db = Database::open(&dir).unwrap(); 178 | 179 | let info = db.info().unwrap(); 180 | // assert_eq!(info.geometry().min(), map_size as u64); 181 | // assert_eq!(info.last_pgno(), 1); 182 | // assert_eq!(info.last_txnid(), 0); 183 | assert_eq!(info.num_readers(), 0); 184 | } 185 | 186 | #[test] 187 | fn test_freelist() { 188 | let dir = tempdir().unwrap(); 189 | let db = Database::open(&dir).unwrap(); 190 | 191 | let mut freelist = db.freelist().unwrap(); 192 | assert_eq!(freelist, 0); 193 | 194 | // Write a few small values. 195 | for i in 0..64_u64 { 196 | let value = i.to_le_bytes(); 197 | let tx = db.begin_rw_txn().unwrap(); 198 | tx.put( 199 | &tx.open_table(None).unwrap(), 200 | value, 201 | value, 202 | WriteFlags::default(), 203 | ) 204 | .unwrap(); 205 | tx.commit().unwrap(); 206 | } 207 | let tx = db.begin_rw_txn().unwrap(); 208 | tx.clear_table(&tx.open_table(None).unwrap()).unwrap(); 209 | tx.commit().unwrap(); 210 | 211 | // Freelist should not be empty after clear_table. 212 | freelist = db.freelist().unwrap(); 213 | assert!(freelist > 0); 214 | } 215 | -------------------------------------------------------------------------------- /tests/transaction.rs: -------------------------------------------------------------------------------- 1 | use libmdbx::*; 2 | use std::{ 3 | borrow::Cow, 4 | io::Write, 5 | sync::{Arc, Barrier}, 6 | thread::{self, JoinHandle}, 7 | }; 8 | use tempfile::tempdir; 9 | 10 | type Database = libmdbx::Database; 11 | 12 | #[test] 13 | fn test_put_get_del() { 14 | let dir = tempdir().unwrap(); 15 | let db = Database::open(&dir).unwrap(); 16 | 17 | let txn = db.begin_rw_txn().unwrap(); 18 | let table = txn.open_table(None).unwrap(); 19 | 20 | let data = [(b"key1", b"val1"), (b"key2", b"val2"), (b"key3", b"val3")]; 21 | 22 | for (k, v) in data { 23 | txn.put(&table, k, v, WriteFlags::empty()).unwrap(); 24 | } 25 | txn.commit().unwrap(); 26 | 27 | let txn = db.begin_rw_txn().unwrap(); 28 | let table = txn.open_table(None).unwrap(); 29 | 30 | for (k, v) in data { 31 | assert_eq!(txn.get(&table, k).unwrap(), Some(*v)); 32 | assert_eq!(txn.get(&table, k).unwrap(), Some(*v)); 33 | assert_eq!(txn.get(&table, k).unwrap(), Some(*v)); 34 | } 35 | assert_eq!(txn.get::<()>(&table, b"key").unwrap(), None); 36 | 37 | txn.del(&table, b"key1", None).unwrap(); 38 | assert_eq!(txn.get::<()>(&table, b"key1").unwrap(), None); 39 | } 40 | 41 | #[test] 42 | fn test_put_get_del_multi() { 43 | let dir = tempdir().unwrap(); 44 | let db = Database::open(&dir).unwrap(); 45 | 46 | let txn = db.begin_rw_txn().unwrap(); 47 | let table = txn.create_table(None, TableFlags::DUP_SORT).unwrap(); 48 | for (k, v) in [ 49 | (b"key1", b"val1"), 50 | (b"key1", b"val2"), 51 | (b"key1", b"val3"), 52 | (b"key2", b"val1"), 53 | (b"key2", b"val2"), 54 | (b"key2", b"val3"), 55 | (b"key3", b"val1"), 56 | (b"key3", b"val2"), 57 | (b"key3", b"val3"), 58 | ] { 59 | txn.put(&table, k, v, WriteFlags::empty()).unwrap(); 60 | } 61 | txn.commit().unwrap(); 62 | 63 | let txn = db.begin_rw_txn().unwrap(); 64 | let table = txn.open_table(None).unwrap(); 65 | { 66 | let mut cur = txn.cursor(&table).unwrap(); 67 | let iter = cur.iter_dup_of::<(), [u8; 4]>(b"key1"); 68 | let vals = iter.map(|x| x.unwrap()).map(|(_, x)| x).collect::>(); 69 | assert_eq!(vals, vec![*b"val1", *b"val2", *b"val3"]); 70 | } 71 | txn.commit().unwrap(); 72 | 73 | let txn = db.begin_rw_txn().unwrap(); 74 | let table = txn.open_table(None).unwrap(); 75 | for (k, v) in [(b"key1", Some(b"val2" as &[u8])), (b"key2", None)] { 76 | txn.del(&table, k, v).unwrap(); 77 | } 78 | txn.commit().unwrap(); 79 | 80 | let txn = db.begin_rw_txn().unwrap(); 81 | let table = txn.open_table(None).unwrap(); 82 | { 83 | let mut cur = txn.cursor(&table).unwrap(); 84 | let iter = cur.iter_dup_of::<(), [u8; 4]>(b"key1"); 85 | let vals = iter.map(|x| x.unwrap()).map(|(_, x)| x).collect::>(); 86 | assert_eq!(vals, vec![*b"val1", *b"val3"]); 87 | 88 | let iter = cur.iter_dup_of::<(), ()>(b"key2"); 89 | assert_eq!(0, iter.count()); 90 | } 91 | txn.commit().unwrap(); 92 | } 93 | 94 | #[test] 95 | fn test_put_get_del_empty_key() { 96 | let dir = tempdir().unwrap(); 97 | let db = Database::open(&dir).unwrap(); 98 | 99 | let txn = db.begin_rw_txn().unwrap(); 100 | let table = txn.create_table(None, Default::default()).unwrap(); 101 | txn.put(&table, b"", b"hello", WriteFlags::empty()).unwrap(); 102 | assert_eq!(txn.get(&table, b"").unwrap(), Some(*b"hello")); 103 | txn.commit().unwrap(); 104 | 105 | let txn = db.begin_rw_txn().unwrap(); 106 | let table = txn.open_table(None).unwrap(); 107 | assert_eq!(txn.get(&table, b"").unwrap(), Some(*b"hello")); 108 | txn.put(&table, b"", b"", WriteFlags::empty()).unwrap(); 109 | assert_eq!(txn.get(&table, b"").unwrap(), Some(*b"")); 110 | } 111 | 112 | #[test] 113 | fn test_reserve() { 114 | let dir = tempdir().unwrap(); 115 | let db = Database::open(&dir).unwrap(); 116 | 117 | let txn = db.begin_rw_txn().unwrap(); 118 | let table = txn.open_table(None).unwrap(); 119 | { 120 | let mut writer = txn 121 | .reserve(&table, b"key1", 4, WriteFlags::empty()) 122 | .unwrap(); 123 | writer.write_all(b"val1").unwrap(); 124 | } 125 | txn.commit().unwrap(); 126 | 127 | let txn = db.begin_rw_txn().unwrap(); 128 | let table = txn.open_table(None).unwrap(); 129 | assert_eq!(txn.get(&table, b"key1").unwrap(), Some(*b"val1")); 130 | assert_eq!(txn.get::<()>(&table, b"key").unwrap(), None); 131 | 132 | txn.del(&table, b"key1", None).unwrap(); 133 | assert_eq!(txn.get::<()>(&table, b"key1").unwrap(), None); 134 | } 135 | 136 | #[test] 137 | fn test_nested_txn() { 138 | let dir = tempdir().unwrap(); 139 | let db = Database::open(&dir).unwrap(); 140 | 141 | let mut txn = db.begin_rw_txn().unwrap(); 142 | txn.put( 143 | &txn.open_table(None).unwrap(), 144 | b"key1", 145 | b"val1", 146 | WriteFlags::empty(), 147 | ) 148 | .unwrap(); 149 | 150 | { 151 | let nested = txn.begin_nested_txn().unwrap(); 152 | let table = nested.open_table(None).unwrap(); 153 | nested 154 | .put(&table, b"key2", b"val2", WriteFlags::empty()) 155 | .unwrap(); 156 | assert_eq!(nested.get(&table, b"key1").unwrap(), Some(*b"val1")); 157 | assert_eq!(nested.get(&table, b"key2").unwrap(), Some(*b"val2")); 158 | } 159 | 160 | let table = txn.open_table(None).unwrap(); 161 | assert_eq!(txn.get(&table, b"key1").unwrap(), Some(*b"val1")); 162 | assert_eq!(txn.get::<()>(&table, b"key2").unwrap(), None); 163 | } 164 | 165 | #[test] 166 | fn test_clear_table() { 167 | let dir = tempdir().unwrap(); 168 | let db = Database::open(&dir).unwrap(); 169 | 170 | { 171 | let txn = db.begin_rw_txn().unwrap(); 172 | txn.put( 173 | &txn.open_table(None).unwrap(), 174 | b"key", 175 | b"val", 176 | WriteFlags::empty(), 177 | ) 178 | .unwrap(); 179 | assert!(!txn.commit().unwrap()); 180 | } 181 | 182 | { 183 | let txn = db.begin_rw_txn().unwrap(); 184 | txn.clear_table(&txn.open_table(None).unwrap()).unwrap(); 185 | assert!(!txn.commit().unwrap()); 186 | } 187 | 188 | let txn = db.begin_ro_txn().unwrap(); 189 | assert_eq!( 190 | txn.get::<()>(&txn.open_table(None).unwrap(), b"key") 191 | .unwrap(), 192 | None 193 | ); 194 | } 195 | 196 | #[test] 197 | fn test_drop_table() { 198 | let dir = tempdir().unwrap(); 199 | { 200 | let db = Database::open_with_options( 201 | &dir, 202 | DatabaseOptions { 203 | max_tables: Some(2), 204 | ..Default::default() 205 | }, 206 | ) 207 | .unwrap(); 208 | 209 | { 210 | let txn = db.begin_rw_txn().unwrap(); 211 | txn.put( 212 | &txn.create_table(Some("test"), TableFlags::empty()).unwrap(), 213 | b"key", 214 | b"val", 215 | WriteFlags::empty(), 216 | ) 217 | .unwrap(); 218 | // Workaround for MDBX dbi drop issue 219 | txn.create_table(Some("canary"), TableFlags::empty()) 220 | .unwrap(); 221 | assert!(!txn.commit().unwrap()); 222 | } 223 | { 224 | let txn = db.begin_rw_txn().unwrap(); 225 | let table = txn.open_table(Some("test")).unwrap(); 226 | unsafe { 227 | txn.drop_table(table).unwrap(); 228 | } 229 | assert!(matches!( 230 | txn.open_table(Some("test")).unwrap_err(), 231 | Error::NotFound 232 | )); 233 | assert!(!txn.commit().unwrap()); 234 | } 235 | } 236 | 237 | let db = Database::open_with_options( 238 | &dir, 239 | DatabaseOptions { 240 | max_tables: Some(2), 241 | ..Default::default() 242 | }, 243 | ) 244 | .unwrap(); 245 | 246 | let txn = db.begin_ro_txn().unwrap(); 247 | txn.open_table(Some("canary")).unwrap(); 248 | assert!(matches!( 249 | txn.open_table(Some("test")).unwrap_err(), 250 | Error::NotFound 251 | )); 252 | } 253 | 254 | #[test] 255 | fn test_concurrent_readers_single_writer() { 256 | let dir = tempdir().unwrap(); 257 | let db: Arc = Arc::new(Database::open(&dir).unwrap()); 258 | 259 | let n = 10usize; // Number of concurrent readers 260 | let barrier = Arc::new(Barrier::new(n + 1)); 261 | let mut threads: Vec> = Vec::with_capacity(n); 262 | 263 | let key = b"key"; 264 | let val = b"val"; 265 | 266 | for _ in 0..n { 267 | let reader_db = db.clone(); 268 | let reader_barrier = barrier.clone(); 269 | 270 | threads.push(thread::spawn(move || { 271 | { 272 | let txn = reader_db.begin_ro_txn().unwrap(); 273 | let table = txn.open_table(None).unwrap(); 274 | assert_eq!(txn.get::<()>(&table, key).unwrap(), None); 275 | } 276 | reader_barrier.wait(); 277 | reader_barrier.wait(); 278 | { 279 | let txn = reader_db.begin_ro_txn().unwrap(); 280 | let table = txn.open_table(None).unwrap(); 281 | txn.get::<[u8; 3]>(&table, key).unwrap().unwrap() == *val 282 | } 283 | })); 284 | } 285 | 286 | let txn = db.begin_rw_txn().unwrap(); 287 | let table = txn.open_table(None).unwrap(); 288 | println!("wait2"); 289 | barrier.wait(); 290 | txn.put(&table, key, val, WriteFlags::empty()).unwrap(); 291 | txn.commit().unwrap(); 292 | 293 | println!("wait1"); 294 | barrier.wait(); 295 | 296 | assert!(threads.into_iter().all(|b| b.join().unwrap())) 297 | } 298 | 299 | #[test] 300 | fn test_concurrent_writers() { 301 | let dir = tempdir().unwrap(); 302 | let db = Arc::new(Database::open(&dir).unwrap()); 303 | 304 | let n = 10usize; // Number of concurrent writers 305 | let mut threads: Vec> = Vec::with_capacity(n); 306 | 307 | let key = "key"; 308 | let val = "val"; 309 | 310 | for i in 0..n { 311 | let writer_db = db.clone(); 312 | 313 | threads.push(thread::spawn(move || { 314 | let txn = writer_db.begin_rw_txn().unwrap(); 315 | let table = txn.open_table(None).unwrap(); 316 | txn.put( 317 | &table, 318 | format!("{key}{i}"), 319 | format!("{val}{i}"), 320 | WriteFlags::empty(), 321 | ) 322 | .unwrap(); 323 | txn.commit().is_ok() 324 | })); 325 | } 326 | assert!(threads.into_iter().all(|b| b.join().unwrap())); 327 | 328 | let txn = db.begin_ro_txn().unwrap(); 329 | let table = txn.open_table(None).unwrap(); 330 | 331 | for i in 0..n { 332 | assert_eq!( 333 | Cow::>::Owned(format!("{val}{i}").into_bytes()), 334 | txn.get(&table, format!("{key}{i}").as_bytes()) 335 | .unwrap() 336 | .unwrap() 337 | ); 338 | } 339 | } 340 | 341 | #[test] 342 | fn test_stat() { 343 | let dir = tempdir().unwrap(); 344 | let db = Database::open(&dir).unwrap(); 345 | 346 | let txn = db.begin_rw_txn().unwrap(); 347 | let table = txn.create_table(None, TableFlags::empty()).unwrap(); 348 | for (k, v) in [(b"key1", b"val1"), (b"key2", b"val2"), (b"key3", b"val3")] { 349 | txn.put(&table, k, v, WriteFlags::empty()).unwrap(); 350 | } 351 | txn.commit().unwrap(); 352 | 353 | { 354 | let txn = db.begin_ro_txn().unwrap(); 355 | let table = txn.open_table(None).unwrap(); 356 | let stat = txn.table_stat(&table).unwrap(); 357 | assert_eq!(stat.entries(), 3); 358 | } 359 | 360 | let txn = db.begin_rw_txn().unwrap(); 361 | let table = txn.open_table(None).unwrap(); 362 | for k in [b"key1", b"key2"] { 363 | txn.del(&table, k, None).unwrap(); 364 | } 365 | txn.commit().unwrap(); 366 | 367 | { 368 | let txn = db.begin_ro_txn().unwrap(); 369 | let table = txn.open_table(None).unwrap(); 370 | let stat = txn.table_stat(&table).unwrap(); 371 | assert_eq!(stat.entries(), 1); 372 | } 373 | 374 | let txn = db.begin_rw_txn().unwrap(); 375 | let table = txn.open_table(None).unwrap(); 376 | for (k, v) in [(b"key4", b"val4"), (b"key5", b"val5"), (b"key6", b"val6")] { 377 | txn.put(&table, k, v, WriteFlags::empty()).unwrap(); 378 | } 379 | txn.commit().unwrap(); 380 | 381 | { 382 | let txn = db.begin_ro_txn().unwrap(); 383 | let table = txn.open_table(None).unwrap(); 384 | let stat = txn.table_stat(&table).unwrap(); 385 | assert_eq!(stat.entries(), 4); 386 | } 387 | } 388 | 389 | #[test] 390 | fn test_stat_dupsort() { 391 | let dir = tempdir().unwrap(); 392 | let db = Database::open(&dir).unwrap(); 393 | 394 | let txn = db.begin_rw_txn().unwrap(); 395 | let table = txn.create_table(None, TableFlags::DUP_SORT).unwrap(); 396 | for (k, v) in [ 397 | (b"key1", b"val1"), 398 | (b"key1", b"val2"), 399 | (b"key1", b"val3"), 400 | (b"key2", b"val1"), 401 | (b"key2", b"val2"), 402 | (b"key2", b"val3"), 403 | (b"key3", b"val1"), 404 | (b"key3", b"val2"), 405 | (b"key3", b"val3"), 406 | ] { 407 | txn.put(&table, k, v, WriteFlags::empty()).unwrap(); 408 | } 409 | txn.commit().unwrap(); 410 | 411 | { 412 | let txn = db.begin_ro_txn().unwrap(); 413 | let stat = txn.table_stat(&txn.open_table(None).unwrap()).unwrap(); 414 | assert_eq!(stat.entries(), 9); 415 | } 416 | 417 | let txn = db.begin_rw_txn().unwrap(); 418 | let table = txn.open_table(None).unwrap(); 419 | for (k, v) in [(b"key1", Some(b"val2" as &[u8])), (b"key2", None)] { 420 | txn.del(&table, k, v).unwrap(); 421 | } 422 | txn.commit().unwrap(); 423 | 424 | { 425 | let txn = db.begin_ro_txn().unwrap(); 426 | let stat = txn.table_stat(&txn.open_table(None).unwrap()).unwrap(); 427 | assert_eq!(stat.entries(), 5); 428 | } 429 | 430 | let txn = db.begin_rw_txn().unwrap(); 431 | let table = txn.open_table(None).unwrap(); 432 | for (k, v) in [(b"key4", b"val1"), (b"key4", b"val2"), (b"key4", b"val3")] { 433 | txn.put(&table, k, v, WriteFlags::empty()).unwrap(); 434 | } 435 | txn.commit().unwrap(); 436 | 437 | { 438 | let txn = db.begin_ro_txn().unwrap(); 439 | let stat = txn.table_stat(&txn.open_table(None).unwrap()).unwrap(); 440 | assert_eq!(stat.entries(), 8); 441 | } 442 | } 443 | --------------------------------------------------------------------------------