├── .github └── workflows │ └── main.yml ├── .gitignore ├── LICENSE ├── README.md ├── build.zig ├── lmdb.zig └── zig.mod /.github/workflows/main.yml: -------------------------------------------------------------------------------- 1 | name: Tests 2 | 3 | on: 4 | push: 5 | paths: 6 | - "**.zig" 7 | pull_request: 8 | paths: 9 | - "**.zig" 10 | schedule: 11 | - cron: "0 0 * * *" 12 | workflow_dispatch: 13 | 14 | jobs: 15 | lint: 16 | runs-on: ubuntu-latest 17 | steps: 18 | - name: Checkout 19 | uses: actions/checkout@v2 20 | 21 | - name: Setup Zig 22 | uses: goto-bus-stop/setup-zig@v1 23 | with: 24 | version: master 25 | 26 | - name: Lint 27 | run: zig fmt --check *.zig 28 | 29 | test: 30 | needs: lint 31 | strategy: 32 | matrix: 33 | os: [ubuntu-latest, windows-latest] 34 | runs-on: ${{ matrix.os }} 35 | steps: 36 | - name: Checkout 37 | uses: actions/checkout@v2 38 | 39 | - name: Setup Zig 40 | uses: goto-bus-stop/setup-zig@v1 41 | with: 42 | version: master 43 | 44 | - name: Setup Package Manager (MacOS) 45 | if: matrix.os == 'macos-latest' 46 | run: | 47 | wget https://github.com/nektro/zigmod/releases/download/v31/zigmod-x86_64-macos -O zigmod 48 | chmod +x zigmod 49 | 50 | - name: Setup Package Manager (Linux) 51 | if: matrix.os == 'ubuntu-latest' 52 | run: | 53 | wget https://github.com/nektro/zigmod/releases/download/v31/zigmod-x86_64-linux -O zigmod 54 | chmod +x zigmod 55 | 56 | - name: Setup Package Manager (Windows) 57 | if: matrix.os == 'windows-latest' 58 | run: | 59 | (new-object net.webclient).DownloadFile('https://github.com/nektro/zigmod/releases/download/v31/zigmod-x86_64-windows.exe', 'zigmod.exe') 60 | 61 | - name: Test 62 | run: | 63 | ./zigmod fetch 64 | zig build test 65 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | zig-cache/ 2 | deps.zig -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2021 Kenta Iwasaki 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # lmdb-zig 2 | 3 | Lightweight, fully-featured, idiomatic cross-platform [Zig](https://ziglang.org) bindings to [Lightning Memory-Mapped Database (LMDB)](http://www.lmdb.tech/doc/). 4 | 5 | LMDB is a tiny, extraordinarily fast Btree-based embedded KV database with some excellent properties: 6 | - Zero-copy lookup and iteration: the entire database is memory-mapped. 7 | - Transactions may create, drop, and interact with multiple named databases at once. 8 | - Multiple readers, single writer. Writers don't block readers, readers don't block writers. 9 | - Keys are lexicographically sorted by default. A custom key ordering may be defined per named database. 10 | - Zero maintenance: does not require any compaction, external processes, or background threads running. 11 | - Entire database is exposed as a single file accompanied by a lockfile. A single database file may comprise of multiple named databases. 12 | - Fully exploits the operating system's buffer cache given its memory mapping and compact size being a mere 32KB worth of object code. 13 | 14 | Refer to the 12 extensive unit tests provided [here](lmdb.zig#L874) for usage instructions and guidelines. 15 | 16 | Built and tested against Zig's master branch over all possible optimization modes. 17 | 18 | 19 | ## Motivation 20 | 21 | These bindings were built in mind for utilizing LMDB as the underlying backend of a database project. 22 | 23 | As a result, extensive effort was put into exposing and testing as many different aspects of LMDB's functionality as possible with an emphasis on minimal overhead, such as fixed memory map addressing, in-place cursor updates, duplicate keys, on-the-fly backups, crash recovery, etc. 24 | 25 | ## Setup 26 | 27 | These bindings were built with first-class support for the [zigmod](https://github.com/nektro/zigmod) package manager. 28 | 29 | To incorporate these bindings into your project, include the following into your project's `zig.mod`: 30 | 31 | ```yml 32 | - type: git 33 | path: https://github.com/lithdew/lmdb-zig 34 | ``` 35 | 36 | Afterwards, run: 37 | 38 | ```shell 39 | zigmod fetch 40 | ``` 41 | 42 | ## Status 43 | 44 | Presently, these bindings completely cover the entire API surface for LMDB except for the list of methods provided below that were deemed unnecessary. In the case you require any of these methods exported, please file an issue describing your use case. 45 | 46 | - [mdb_env_set_userctx](http://www.lmdb.tech/doc/group__mdb.html#gaf2fe09eb9c96eeb915a76bf713eecc46) 47 | - [mdb_env_get_userctx](http://www.lmdb.tech/doc/group__mdb.html#ga45df6a4fb150cda2316b5ae224ba52f1) 48 | - [mdb_cmp](http://www.lmdb.tech/doc/group__mdb.html#gaba790a2493f744965b810efac73bac0e) 49 | - [mdb_dcmp](http://www.lmdb.tech/doc/group__mdb.html#gac61d3087282b0824c8c5caff6caabdf3) 50 | - [mdb_reader_list](http://www.lmdb.tech/doc/group__mdb.html#ga8550000cd0501a44f57ee6dff0188744) 51 | - [mdb_set_relfunc](http://www.lmdb.tech/doc/group__mdb.html#ga697d82c7afe79f142207ad5adcdebfeb) 52 | - [mdb_set_relctx](http://www.lmdb.tech/doc/group__mdb.html#ga7c34246308cee01724a1839a8f5cc594) 53 | 54 | -------------------------------------------------------------------------------- /build.zig: -------------------------------------------------------------------------------- 1 | const std = @import("std"); 2 | const deps = @import("deps.zig"); 3 | 4 | const Builder = std.build.Builder; 5 | 6 | pub fn build(b: *Builder) void { 7 | var target = b.standardTargetOptions(.{}); 8 | if (target.isGnuLibC()) target.abi = .musl; 9 | 10 | const mode = b.standardReleaseOptions(); 11 | 12 | const tests = b.addTest("lmdb.zig"); 13 | tests.setTarget(target); 14 | tests.setBuildMode(mode); 15 | deps.addAllTo(tests); 16 | 17 | const test_step = b.step("test", "Run libary tests"); 18 | test_step.dependOn(&tests.step); 19 | } 20 | -------------------------------------------------------------------------------- /lmdb.zig: -------------------------------------------------------------------------------- 1 | const std = @import("std"); 2 | const c = @cImport(@cInclude("lmdb.h")); 3 | 4 | const os = std.os; 5 | const fs = std.fs; 6 | const mem = std.mem; 7 | const math = std.math; 8 | const meta = std.meta; 9 | const debug = std.debug; 10 | const testing = std.testing; 11 | 12 | const panic = debug.panic; 13 | const assert = debug.assert; 14 | 15 | pub const Environment = packed struct { 16 | pub const Statistics = struct { 17 | page_size: usize, 18 | tree_height: usize, 19 | num_branch_pages: usize, 20 | num_leaf_pages: usize, 21 | num_overflow_pages: usize, 22 | num_entries: usize, 23 | }; 24 | 25 | pub const Info = struct { 26 | map_address: ?[*]u8, 27 | map_size: usize, 28 | last_page_num: usize, 29 | last_tx_id: usize, 30 | max_num_reader_slots: usize, 31 | num_used_reader_slots: usize, 32 | }; 33 | 34 | const Self = @This(); 35 | 36 | inner: ?*c.MDB_env, 37 | 38 | pub const OpenFlags = struct { 39 | mode: c.mdb_mode_t = 0o664, 40 | map_size: ?usize = null, 41 | max_num_readers: ?usize = null, 42 | max_num_dbs: ?usize = null, 43 | 44 | fix_mapped_address: bool = false, 45 | no_sub_directory: bool = false, 46 | read_only: bool = false, 47 | use_writable_memory_map: bool = false, 48 | dont_sync_metadata: bool = false, 49 | dont_sync: bool = false, 50 | flush_asynchronously: bool = false, 51 | disable_thread_local_storage: bool = false, 52 | disable_locks: bool = false, 53 | disable_readahead: bool = false, 54 | disable_memory_initialization: bool = false, 55 | pub inline fn into(self: Self.OpenFlags) c_uint { 56 | var flags: c_uint = 0; 57 | if (self.fix_mapped_address) flags |= c.MDB_FIXEDMAP; 58 | if (self.no_sub_directory) flags |= c.MDB_NOSUBDIR; 59 | if (self.read_only) flags |= c.MDB_RDONLY; 60 | if (self.use_writable_memory_map) flags |= c.MDB_WRITEMAP; 61 | if (self.dont_sync_metadata) flags |= c.MDB_NOMETASYNC; 62 | if (self.dont_sync) flags |= c.MDB_NOSYNC; 63 | if (self.flush_asynchronously) flags |= c.MDB_MAPASYNC; 64 | if (self.disable_thread_local_storage) flags |= c.MDB_NOTLS; 65 | if (self.disable_locks) flags |= c.MDB_NOLOCK; 66 | if (self.disable_readahead) flags |= c.MDB_NORDAHEAD; 67 | if (self.disable_memory_initialization) flags |= c.MDB_NOMEMINIT; 68 | return flags; 69 | } 70 | }; 71 | pub inline fn init(env_path: []const u8, flags: Self.OpenFlags) !Self { 72 | var inner: ?*c.MDB_env = null; 73 | 74 | try call(c.mdb_env_create, .{&inner}); 75 | errdefer call(c.mdb_env_close, .{inner}); 76 | 77 | if (flags.map_size) |map_size| { 78 | try call(c.mdb_env_set_mapsize, .{ inner, map_size }); 79 | } 80 | if (flags.max_num_readers) |max_num_readers| { 81 | try call(c.mdb_env_set_maxreaders, .{ inner, @intCast(c_uint, max_num_readers) }); 82 | } 83 | if (flags.max_num_dbs) |max_num_dbs| { 84 | try call(c.mdb_env_set_maxdbs, .{ inner, @intCast(c_uint, max_num_dbs) }); 85 | } 86 | 87 | if (!mem.endsWith(u8, env_path, &[_]u8{0})) { 88 | assert(env_path.len + 1 <= fs.MAX_PATH_BYTES); 89 | 90 | var fixed_path: [fs.MAX_PATH_BYTES + 1]u8 = undefined; 91 | mem.copy(u8, &fixed_path, env_path); 92 | fixed_path[env_path.len] = 0; 93 | 94 | try call(c.mdb_env_open, .{ inner, fixed_path[0 .. env_path.len + 1].ptr, flags.into(), flags.mode }); 95 | } else { 96 | try call(c.mdb_env_open, .{ inner, env_path.ptr, flags.into(), flags.mode }); 97 | } 98 | 99 | return Self{ .inner = inner }; 100 | } 101 | pub inline fn deinit(self: Self) void { 102 | call(c.mdb_env_close, .{self.inner}); 103 | } 104 | 105 | pub const CopyFlags = packed struct { 106 | compact: bool = false, 107 | pub inline fn into(self: Self.CopyFlags) c_uint { 108 | var flags: c_uint = 0; 109 | if (self.compact) flags |= c.MDB_CP_COMPACT; 110 | return flags; 111 | } 112 | }; 113 | pub inline fn copyTo(self: Self, backup_path: []const u8, flags: CopyFlags) !void { 114 | if (!mem.endsWith(u8, backup_path, &[_]u8{0})) { 115 | assert(backup_path.len + 1 <= fs.MAX_PATH_BYTES); 116 | 117 | var fixed_path: [fs.MAX_PATH_BYTES + 1]u8 = undefined; 118 | mem.copy(u8, &fixed_path, backup_path); 119 | fixed_path[backup_path.len] = 0; 120 | 121 | try call(c.mdb_env_copy2, .{ self.inner, fixed_path[0 .. backup_path.len + 1].ptr, flags.into() }); 122 | } else { 123 | try call(c.mdb_env_copy2, .{ self.inner, backup_path.ptr, flags.into() }); 124 | } 125 | } 126 | pub inline fn pipeTo(self: Self, fd_handle: os.fd_t, flags: CopyFlags) !void { 127 | try call(c.mdb_env_copyfd2, .{ self.inner, fd_handle, flags.into() }); 128 | } 129 | pub inline fn getMaxKeySize(self: Self) usize { 130 | return @intCast(usize, c.mdb_env_get_maxkeysize(self.inner)); 131 | } 132 | pub inline fn getMaxNumReaders(self: Self) usize { 133 | var max_num_readers: c_uint = 0; 134 | call(c.mdb_env_get_maxreaders, .{ self.inner, &max_num_readers }) catch |err| { 135 | panic("Environment.getMaxNumReaders(): {}", .{err}); 136 | }; 137 | return @intCast(usize, max_num_readers); 138 | } 139 | pub inline fn setMapSize(self: Self, map_size: ?usize) !void { 140 | try call(c.mdb_env_set_mapsize, .{ self.inner, if (map_size) |size| size else 0 }); 141 | } 142 | 143 | pub const Flags = struct { 144 | fix_mapped_address: bool = false, 145 | no_sub_directory: bool = false, 146 | read_only: bool = false, 147 | use_writable_memory_map: bool = false, 148 | dont_sync_metadata: bool = false, 149 | dont_sync: bool = false, 150 | flush_asynchronously: bool = false, 151 | disable_thread_local_storage: bool = false, 152 | disable_locks: bool = false, 153 | disable_readahead: bool = false, 154 | disable_memory_initialization: bool = false, 155 | pub inline fn from(flags: c_uint) Flags { 156 | return Flags{ 157 | .fix_mapped_address = flags & c.MDB_FIXEDMAP != 0, 158 | .no_sub_directory = flags & c.MDB_NOSUBDIR != 0, 159 | .read_only = flags & c.MDB_RDONLY != 0, 160 | .use_writable_memory_map = flags & c.MDB_WRITEMAP != 0, 161 | .dont_sync_metadata = flags & c.MDB_NOMETASYNC != 0, 162 | .dont_sync = flags & c.MDB_NOSYNC != 0, 163 | .flush_asynchronously = flags & c.MDB_MAPASYNC != 0, 164 | .disable_thread_local_storage = flags & c.MDB_NOTLS != 0, 165 | .disable_locks = flags & c.MDB_NOLOCK != 0, 166 | .disable_readahead = flags & c.MDB_NORDAHEAD != 0, 167 | .disable_memory_initialization = flags & c.MDB_NOMEMINIT != 0, 168 | }; 169 | } 170 | pub inline fn into(self: Self.Flags) c_uint { 171 | var flags: c_uint = 0; 172 | if (self.fix_mapped_address) flags |= c.MDB_FIXEDMAP; 173 | if (self.no_sub_directory) flags |= c.MDB_NOSUBDIR; 174 | if (self.read_only) flags |= c.MDB_RDONLY; 175 | if (self.use_writable_memory_map) flags |= c.MDB_WRITEMAP; 176 | if (self.dont_sync_metadata) flags |= c.MDB_NOMETASYNC; 177 | if (self.dont_sync) flags |= c.MDB_NOSYNC; 178 | if (self.flush_asynchronously) flags |= c.MDB_MAPASYNC; 179 | if (self.disable_thread_local_storage) flags |= c.MDB_NOTLS; 180 | if (self.disable_locks) flags |= c.MDB_NOLOCK; 181 | if (self.disable_readahead) flags |= c.MDB_NORDAHEAD; 182 | if (self.disable_memory_initialization) flags |= c.MDB_NOMEMINIT; 183 | return flags; 184 | } 185 | }; 186 | pub inline fn getFlags(self: Self) Flags { 187 | var inner: c_uint = undefined; 188 | call(c.mdb_env_get_flags, .{ self.inner, &inner }) catch |err| { 189 | panic("Environment.getFlags(): {}", .{err}); 190 | }; 191 | return Flags.from(inner); 192 | } 193 | 194 | pub const MutableFlags = struct { 195 | dont_sync_metadata: bool = false, 196 | dont_sync: bool = false, 197 | flush_asynchronously: bool = false, 198 | disable_memory_initialization: bool = false, 199 | pub inline fn into(self: Self.MutableFlags) c_uint { 200 | var flags: c_uint = 0; 201 | if (self.dont_sync_metadata) flags |= c.MDB_NOMETASYNC; 202 | if (self.dont_sync) flags |= c.MDB_NOSYNC; 203 | if (self.flush_asynchronously) flags |= c.MDB_MAPASYNC; 204 | if (self.disable_memory_initialization) flags |= c.MDB_NOMEMINIT; 205 | return flags; 206 | } 207 | }; 208 | pub inline fn enableFlags(self: Self, flags: MutableFlags) void { 209 | call(c.mdb_env_set_flags, .{ self.inner, flags.into(), 1 }) catch |err| { 210 | panic("Environment.enableFlags(): {}", .{err}); 211 | }; 212 | } 213 | pub inline fn disableFlags(self: Self, flags: MutableFlags) void { 214 | call(c.mdb_env_set_flags, .{ self.inner, flags.into(), 0 }) catch |err| { 215 | panic("Environment.disableFlags(): {}", .{err}); 216 | }; 217 | } 218 | pub inline fn path(self: Self) []const u8 { 219 | var env_path: [:0]const u8 = undefined; 220 | call(c.mdb_env_get_path, .{ self.inner, @ptrCast([*c][*c]const u8, &env_path.ptr) }) catch |err| { 221 | panic("Environment.path(): {}", .{err}); 222 | }; 223 | env_path.len = mem.indexOfSentinel(u8, 0, env_path.ptr); 224 | return mem.span(env_path); 225 | } 226 | pub inline fn stat(self: Self) Statistics { 227 | var inner: c.MDB_stat = undefined; 228 | call(c.mdb_env_stat, .{ self.inner, &inner }) catch |err| { 229 | panic("Environment.stat(): {}", .{err}); 230 | }; 231 | return Statistics{ 232 | .page_size = @intCast(usize, inner.ms_psize), 233 | .tree_height = @intCast(usize, inner.ms_depth), 234 | .num_branch_pages = @intCast(usize, inner.ms_branch_pages), 235 | .num_leaf_pages = @intCast(usize, inner.ms_leaf_pages), 236 | .num_overflow_pages = @intCast(usize, inner.ms_overflow_pages), 237 | .num_entries = @intCast(usize, inner.ms_entries), 238 | }; 239 | } 240 | pub inline fn fd(self: Self) os.fd_t { 241 | var inner: os.fd_t = undefined; 242 | call(c.mdb_env_get_fd, .{ self.inner, &inner }) catch |err| { 243 | panic("Environment.fd(): {}", .{err}); 244 | }; 245 | return inner; 246 | } 247 | pub inline fn info(self: Self) Info { 248 | var inner: c.MDB_envinfo = undefined; 249 | call(c.mdb_env_info, .{ self.inner, &inner }) catch |err| { 250 | panic("Environment.info(): {}", .{err}); 251 | }; 252 | return Info{ 253 | .map_address = @ptrCast(?[*]u8, inner.me_mapaddr), 254 | .map_size = @intCast(usize, inner.me_mapsize), 255 | .last_page_num = @intCast(usize, inner.me_last_pgno), 256 | .last_tx_id = @intCast(usize, inner.me_last_txnid), 257 | .max_num_reader_slots = @intCast(usize, inner.me_maxreaders), 258 | .num_used_reader_slots = @intCast(usize, inner.me_numreaders), 259 | }; 260 | } 261 | pub inline fn begin(self: Self, flags: Transaction.Flags) !Transaction { 262 | var inner: ?*c.MDB_txn = null; 263 | const maybe_parent = if (flags.parent) |parent| parent.inner else null; 264 | try call(c.mdb_txn_begin, .{ self.inner, maybe_parent, flags.into(), &inner }); 265 | return Transaction{ .inner = inner }; 266 | } 267 | pub inline fn sync(self: Self, force: bool) !void { 268 | try call(c.mdb_env_sync, .{ self.inner, @as(c_int, if (force) 1 else 0) }); 269 | } 270 | pub inline fn purge(self: Self) !usize { 271 | var count: c_int = undefined; 272 | try call(c.mdb_reader_check, .{ self.inner, &count }); 273 | return @intCast(usize, count); 274 | } 275 | }; 276 | 277 | pub const Database = struct { 278 | pub const OpenFlags = packed struct { 279 | compare_keys_in_reverse_order: bool = false, 280 | allow_duplicate_keys: bool = false, 281 | keys_are_integers: bool = false, 282 | duplicate_entries_are_fixed_size: bool = false, 283 | duplicate_keys_are_integers: bool = false, 284 | compare_duplicate_keys_in_reverse_order: bool = false, 285 | pub inline fn into(self: Self.OpenFlags) c_uint { 286 | var flags: c_uint = 0; 287 | if (self.compare_keys_in_reverse_order) flags |= c.MDB_REVERSEKEY; 288 | if (self.allow_duplicate_keys) flags |= c.MDB_DUPSORT; 289 | if (self.keys_are_integers) flags |= c.MDB_INTEGERKEY; 290 | if (self.duplicate_entries_are_fixed_size) flags |= c.MDB_DUPFIXED; 291 | if (self.duplicate_keys_are_integers) flags |= c.MDB_INTEGERDUP; 292 | if (self.compare_duplicate_keys_in_reverse_order) flags |= c.MDB_REVERSEDUP; 293 | return flags; 294 | } 295 | }; 296 | 297 | pub const UseFlags = packed struct { 298 | compare_keys_in_reverse_order: bool = false, 299 | allow_duplicate_keys: bool = false, 300 | keys_are_integers: bool = false, 301 | duplicate_entries_are_fixed_size: bool = false, 302 | duplicate_keys_are_integers: bool = false, 303 | compare_duplicate_keys_in_reverse_order: bool = false, 304 | create_if_not_exists: bool = false, 305 | pub inline fn into(self: Self.UseFlags) c_uint { 306 | var flags: c_uint = 0; 307 | if (self.compare_keys_in_reverse_order) flags |= c.MDB_REVERSEKEY; 308 | if (self.allow_duplicate_keys) flags |= c.MDB_DUPSORT; 309 | if (self.keys_are_integers) flags |= c.MDB_INTEGERKEY; 310 | if (self.duplicate_entries_are_fixed_size) flags |= c.MDB_DUPFIXED; 311 | if (self.duplicate_keys_are_integers) flags |= c.MDB_INTEGERDUP; 312 | if (self.compare_duplicate_keys_in_reverse_order) flags |= c.MDB_REVERSEDUP; 313 | if (self.create_if_not_exists) flags |= c.MDB_CREATE; 314 | return flags; 315 | } 316 | }; 317 | 318 | const Self = @This(); 319 | 320 | inner: c.MDB_dbi, 321 | pub inline fn close(self: Self, env: Environment) void { 322 | call(c.mdb_dbi_close, .{ env.inner, self.inner }); 323 | } 324 | }; 325 | 326 | pub const Transaction = packed struct { 327 | pub const Flags = struct { 328 | parent: ?Self = null, 329 | read_only: bool = false, 330 | dont_sync: bool = false, 331 | dont_sync_metadata: bool = false, 332 | pub inline fn into(self: Self.Flags) c_uint { 333 | var flags: c_uint = 0; 334 | if (self.read_only) flags |= c.MDB_RDONLY; 335 | if (self.dont_sync) flags |= c.MDB_NOSYNC; 336 | if (self.dont_sync_metadata) flags |= c.MDB_NOMETASYNC; 337 | return flags; 338 | } 339 | }; 340 | 341 | const Self = @This(); 342 | 343 | inner: ?*c.MDB_txn, 344 | pub inline fn id(self: Self) usize { 345 | return @intCast(usize, c.mdb_txn_id(self.inner)); 346 | } 347 | pub inline fn open(self: Self, flags: Database.OpenFlags) !Database { 348 | var inner: c.MDB_dbi = 0; 349 | try call(c.mdb_dbi_open, .{ self.inner, null, flags.into(), &inner }); 350 | return Database{ .inner = inner }; 351 | } 352 | pub inline fn use(self: Self, name: []const u8, flags: Database.UseFlags) !Database { 353 | var inner: c.MDB_dbi = 0; 354 | try call(c.mdb_dbi_open, .{ self.inner, name.ptr, flags.into(), &inner }); 355 | return Database{ .inner = inner }; 356 | } 357 | pub inline fn cursor(self: Self, db: Database) !Cursor { 358 | var inner: ?*c.MDB_cursor = undefined; 359 | try call(c.mdb_cursor_open, .{ self.inner, db.inner, &inner }); 360 | return Cursor{ .inner = inner }; 361 | } 362 | pub inline fn setKeyOrder(self: Self, db: Database, comptime order: fn (a: []const u8, b: []const u8) math.Order) !void { 363 | const S = struct { 364 | fn cmp(a: ?*const c.MDB_val, b: ?*const c.MDB_val) callconv(.C) c_int { 365 | const slice_a = @ptrCast([*]const u8, a.?.mv_data)[0..a.?.mv_size]; 366 | const slice_b = @ptrCast([*]const u8, b.?.mv_data)[0..b.?.mv_size]; 367 | return switch (order(slice_a, slice_b)) { 368 | .eq => 0, 369 | .lt => -1, 370 | .gt => 1, 371 | }; 372 | } 373 | }; 374 | try call(c.mdb_set_compare, .{ self.inner, db.inner, S.cmp }); 375 | } 376 | pub inline fn setItemOrder(self: Self, db: Database, comptime order: fn (a: []const u8, b: []const u8) math.Order) !void { 377 | const S = struct { 378 | fn cmp(a: ?*const c.MDB_val, b: ?*const c.MDB_val) callconv(.C) c_int { 379 | const slice_a = @ptrCast([*]const u8, a.?.mv_data)[0..a.?.mv_size]; 380 | const slice_b = @ptrCast([*]const u8, b.?.mv_data)[0..b.?.mv_size]; 381 | return switch (order(slice_a, slice_b)) { 382 | .eq => 0, 383 | .lt => -1, 384 | .gt => 1, 385 | }; 386 | } 387 | }; 388 | try call(c.mdb_set_dupsort, .{ self.inner, db.inner, S.cmp }); 389 | } 390 | pub inline fn get(self: Self, db: Database, key: []const u8) ![]const u8 { 391 | var k = &c.MDB_val{ .mv_size = key.len, .mv_data = @intToPtr(?*c_void, @ptrToInt(key.ptr)) }; 392 | var v: c.MDB_val = undefined; 393 | try call(c.mdb_get, .{ self.inner, db.inner, k, &v }); 394 | 395 | return @ptrCast([*]const u8, v.mv_data)[0..v.mv_size]; 396 | } 397 | 398 | pub const PutFlags = packed struct { 399 | dont_overwrite_key: bool = false, 400 | dont_overwrite_item: bool = false, 401 | data_already_sorted: bool = false, 402 | set_already_sorted: bool = false, 403 | pub inline fn into(self: PutFlags) c_uint { 404 | var flags: c_uint = 0; 405 | if (self.dont_overwrite_key) flags |= c.MDB_NOOVERWRITE; 406 | if (self.dont_overwrite_item) flags |= c.MDB_NODUPDATA; 407 | if (self.data_already_sorted) flags |= c.MDB_APPEND; 408 | if (self.set_already_sorted) flags |= c.MDB_APPENDDUP; 409 | return flags; 410 | } 411 | }; 412 | pub inline fn putItem(self: Self, db: Database, key: []const u8, val: anytype, flags: PutFlags) !void { 413 | const bytes = if (meta.trait.isIndexable(@TypeOf(val))) mem.span(val) else mem.asBytes(&val); 414 | return self.put(db, key, bytes, flags); 415 | } 416 | pub inline fn put(self: Self, db: Database, key: []const u8, val: []const u8, flags: PutFlags) !void { 417 | var k = &c.MDB_val{ .mv_size = key.len, .mv_data = @intToPtr(?*c_void, @ptrToInt(key.ptr)) }; 418 | var v = &c.MDB_val{ .mv_size = val.len, .mv_data = @intToPtr(?*c_void, @ptrToInt(val.ptr)) }; 419 | try call(c.mdb_put, .{ self.inner, db.inner, k, v, flags.into() }); 420 | } 421 | pub inline fn getOrPut(self: Self, db: Database, key: []const u8, val: []const u8) !?[]const u8 { 422 | var k = &c.MDB_val{ .mv_size = key.len, .mv_data = @intToPtr(?*c_void, @ptrToInt(key.ptr)) }; 423 | var v = &c.MDB_val{ .mv_size = val.len, .mv_data = @intToPtr(?*c_void, @ptrToInt(val.ptr)) }; 424 | 425 | call(c.mdb_put, .{ self.inner, db.inner, k, v, c.MDB_NOOVERWRITE }) catch |err| switch (err) { 426 | error.AlreadyExists => return @ptrCast([*]u8, v.mv_data)[0..v.mv_size], 427 | else => return err, 428 | }; 429 | 430 | return null; 431 | } 432 | 433 | pub const ReserveFlags = packed struct { 434 | dont_overwrite_key: bool = false, 435 | data_already_sorted: bool = false, 436 | pub inline fn into(self: ReserveFlags) c_uint { 437 | var flags: c_uint = c.MDB_RESERVE; 438 | if (self.dont_overwrite_key) flags |= c.MDB_NOOVERWRITE; 439 | if (self.data_already_sorted) flags |= c.MDB_APPEND; 440 | return flags; 441 | } 442 | }; 443 | 444 | pub const ReserveResult = union(enum) { 445 | successful: []u8, 446 | found_existing: []const u8, 447 | }; 448 | pub inline fn reserve(self: Self, db: Database, key: []const u8, val_len: usize, flags: ReserveFlags) !ReserveResult { 449 | var k = &c.MDB_val{ .mv_size = key.len, .mv_data = @intToPtr(?*c_void, @ptrToInt(key.ptr)) }; 450 | var v = &c.MDB_val{ .mv_size = val_len, .mv_data = null }; 451 | 452 | call(c.mdb_put, .{ self.inner, db.inner, k, v, flags.into() }) catch |err| switch (err) { 453 | error.AlreadyExists => return ReserveResult{ 454 | .found_existing = @ptrCast([*]const u8, v.mv_data)[0..v.mv_size], 455 | }, 456 | else => return err, 457 | }; 458 | 459 | return ReserveResult{ 460 | .successful = @ptrCast([*]u8, v.mv_data)[0..v.mv_size], 461 | }; 462 | } 463 | pub inline fn del(self: Self, db: Database, key: []const u8, op: union(enum) { key: void, item: []const u8 }) !void { 464 | var k = &c.MDB_val{ .mv_size = key.len, .mv_data = @intToPtr(?*c_void, @ptrToInt(key.ptr)) }; 465 | var v: ?*c.MDB_val = switch (op) { 466 | .key => null, 467 | .item => |item| &c.MDB_val{ 468 | .mv_size = item.len, 469 | .mv_data = @intToPtr(?*c_void, @ptrToInt(item.ptr)), 470 | }, 471 | }; 472 | try call(c.mdb_del, .{ self.inner, db.inner, k, v }); 473 | } 474 | pub inline fn drop(self: Self, db: Database, method: enum(c_int) { empty = 0, delete = 1 }) !void { 475 | try call(c.mdb_drop, .{ self.inner, db.inner, @enumToInt(method) }); 476 | } 477 | pub inline fn deinit(self: Self) void { 478 | call(c.mdb_txn_abort, .{self.inner}); 479 | } 480 | pub inline fn commit(self: Self) !void { 481 | try call(c.mdb_txn_commit, .{self.inner}); 482 | } 483 | pub inline fn renew(self: Self) !void { 484 | try call(c.mdb_txn_renew, .{self.inner}); 485 | } 486 | pub inline fn reset(self: Self) !void { 487 | try call(c.mdb_txn_reset, .{self.inner}); 488 | } 489 | }; 490 | 491 | pub const Cursor = packed struct { 492 | pub const Entry = struct { 493 | key: []const u8, 494 | val: []const u8, 495 | }; 496 | 497 | pub fn Page(comptime T: type) type { 498 | return struct { 499 | key: []const u8, 500 | items: []align(1) const T, 501 | }; 502 | } 503 | 504 | const Self = @This(); 505 | 506 | inner: ?*c.MDB_cursor, 507 | pub inline fn deinit(self: Self) void { 508 | call(c.mdb_cursor_close, .{self.inner}); 509 | } 510 | pub inline fn tx(self: Self) Transaction { 511 | return Transaction{ .inner = c.mdb_cursor_txn(self.inner) }; 512 | } 513 | pub inline fn db(self: Self) Database { 514 | return Database{ .inner = c.mdb_cursor_dbi(self.inner) }; 515 | } 516 | pub inline fn renew(self: Self, parent: Transaction) !void { 517 | try call(c.mdb_cursor_renew, .{ parent.inner, self.inner }); 518 | } 519 | pub inline fn count(self: Self) usize { 520 | var inner: c.mdb_size_t = undefined; 521 | call(c.mdb_cursor_count, .{ self.inner, &inner }) catch |err| { 522 | panic("cursor is initialized, or database does not support duplicate keys: {}", .{err}); 523 | }; 524 | return @intCast(usize, inner); 525 | } 526 | 527 | pub fn updateItemInPlace(self: Self, current_key: []const u8, new_val: anytype) !void { 528 | const bytes = if (meta.trait.isIndexable(@TypeOf(new_val))) mem.span(new_val) else mem.asBytes(&new_val); 529 | return self.updateInPlace(current_key, bytes); 530 | } 531 | 532 | pub fn updateInPlace(self: Self, current_key: []const u8, new_val: []const u8) !void { 533 | var k = &c.MDB_val{ .mv_size = current_key.len, .mv_data = @intToPtr(?*c_void, @ptrToInt(current_key.ptr)) }; 534 | var v = &c.MDB_val{ .mv_size = new_val.len, .mv_data = @intToPtr(?*c_void, @ptrToInt(new_val.ptr)) }; 535 | try call(c.mdb_cursor_put, .{ self.inner, k, v, c.MDB_CURRENT }); 536 | } 537 | 538 | /// May not be used with databases supporting duplicate keys. 539 | pub fn reserveInPlace(self: Self, current_key: []const u8, new_val_len: usize) ![]u8 { 540 | var k = &c.MDB_val{ .mv_size = current_key.len, .mv_data = @intToPtr(?*c_void, @ptrToInt(current_key.ptr)) }; 541 | var v = &c.MDB_val{ .mv_size = new_val_len, .mv_data = null }; 542 | try call(c.mdb_cursor_put, .{ self.inner, k, v, c.MDB_CURRENT | c.MDB_RESERVE }); 543 | return @ptrCast([*]u8, v.mv_data)[0..v.mv_size]; 544 | } 545 | 546 | pub const PutFlags = packed struct { 547 | dont_overwrite_key: bool = false, 548 | dont_overwrite_item: bool = false, 549 | data_already_sorted: bool = false, 550 | set_already_sorted: bool = false, 551 | pub inline fn into(self: PutFlags) c_uint { 552 | var flags: c_uint = 0; 553 | if (self.dont_overwrite_key) flags |= c.MDB_NOOVERWRITE; 554 | if (self.dont_overwrite_item) flags |= c.MDB_NODUPDATA; 555 | if (self.data_already_sorted) flags |= c.MDB_APPEND; 556 | if (self.set_already_sorted) flags |= c.MDB_APPENDDUP; 557 | return flags; 558 | } 559 | }; 560 | pub inline fn putItem(self: Self, key: []const u8, val: anytype, flags: PutFlags) !void { 561 | const bytes = if (meta.trait.isIndexable(@TypeOf(val))) mem.span(val) else mem.asBytes(&val); 562 | return self.put(key, bytes, flags); 563 | } 564 | pub inline fn put(self: Self, key: []const u8, val: []const u8, flags: PutFlags) !void { 565 | var k = &c.MDB_val{ .mv_size = key.len, .mv_data = @intToPtr(?*c_void, @ptrToInt(key.ptr)) }; 566 | var v = &c.MDB_val{ .mv_size = val.len, .mv_data = @intToPtr(?*c_void, @ptrToInt(val.ptr)) }; 567 | try call(c.mdb_cursor_put, .{ self.inner, k, v, flags.into() }); 568 | } 569 | pub inline fn putBatch(self: Self, key: []const u8, batch: anytype, flags: PutFlags) !usize { 570 | comptime assert(meta.trait.isIndexable(@TypeOf(batch))); 571 | 572 | var k = &c.MDB_val{ .mv_size = key.len, .mv_data = @intToPtr(?*c_void, @ptrToInt(key.ptr)) }; 573 | var v = [_]c.MDB_val{ 574 | .{ .mv_size = @sizeOf(meta.Elem(@TypeOf(batch))), .mv_data = @intToPtr(?*c_void, @ptrToInt(&batch[0])) }, 575 | .{ .mv_size = mem.len(batch), .mv_data = undefined }, 576 | }; 577 | try call(c.mdb_cursor_put, .{ self.inner, k, &v, @intCast(c_uint, c.MDB_MULTIPLE) | flags.into() }); 578 | 579 | return @intCast(usize, v[1].mv_size); 580 | } 581 | pub inline fn getOrPut(self: Self, key: []const u8, val: []const u8) !?[]const u8 { 582 | var k = &c.MDB_val{ .mv_size = key.len, .mv_data = @intToPtr(?*c_void, @ptrToInt(key.ptr)) }; 583 | var v = &c.MDB_val{ .mv_size = val.len, .mv_data = @intToPtr(?*c_void, @ptrToInt(val.ptr)) }; 584 | 585 | call(c.mdb_cursor_put, .{ self.inner, k, v, c.MDB_NOOVERWRITE }) catch |err| switch (err) { 586 | error.AlreadyExists => return @ptrCast([*]u8, v.mv_data)[0..v.mv_size], 587 | else => return err, 588 | }; 589 | 590 | return null; 591 | } 592 | 593 | pub const ReserveFlags = packed struct { 594 | dont_overwrite_key: bool = false, 595 | data_already_sorted: bool = false, 596 | pub inline fn into(self: ReserveFlags) c_uint { 597 | var flags: c_uint = c.MDB_RESERVE; 598 | if (self.dont_overwrite_key) flags |= c.MDB_NOOVERWRITE; 599 | if (self.data_already_sorted) flags |= c.MDB_APPEND; 600 | return flags; 601 | } 602 | }; 603 | 604 | pub const ReserveResult = union(enum) { 605 | successful: []u8, 606 | found_existing: []const u8, 607 | }; 608 | pub inline fn reserve(self: Self, key: []const u8, val_len: usize, flags: ReserveFlags) !ReserveResult { 609 | var k = &c.MDB_val{ .mv_size = key.len, .mv_data = @intToPtr(?*c_void, @ptrToInt(key.ptr)) }; 610 | var v = &c.MDB_val{ .mv_size = val_len, .mv_data = null }; 611 | 612 | call(c.mdb_cursor_put, .{ self.inner, k, v, flags.into() }) catch |err| switch (err) { 613 | error.AlreadyExists => return ReserveResult{ 614 | .found_existing = @ptrCast([*]const u8, v.mv_data)[0..v.mv_size], 615 | }, 616 | else => return err, 617 | }; 618 | 619 | return ReserveResult{ 620 | .successful = @ptrCast([*]u8, v.mv_data)[0..v.mv_size], 621 | }; 622 | } 623 | pub inline fn del(self: Self, op: enum(c_uint) { key = c.MDB_NODUPDATA, item = 0 }) !void { 624 | call(c.mdb_cursor_del, .{ self.inner, @enumToInt(op) }) catch |err| switch (err) { 625 | error.InvalidParameter => return error.NotFound, 626 | else => return err, 627 | }; 628 | } 629 | 630 | pub const Position = enum(c.MDB_cursor_op) { 631 | first = c.MDB_FIRST, 632 | first_item = c.MDB_FIRST_DUP, 633 | current = c.MDB_GET_CURRENT, 634 | last = c.MDB_LAST, 635 | last_item = c.MDB_LAST_DUP, 636 | next = c.MDB_NEXT, 637 | next_item = c.MDB_NEXT_DUP, 638 | next_key = c.MDB_NEXT_NODUP, 639 | prev = c.MDB_PREV, 640 | prev_item = c.MDB_PREV_DUP, 641 | prev_key = c.MDB_PREV_NODUP, 642 | }; 643 | pub inline fn get(self: Self, pos: Position) !?Entry { 644 | var k: c.MDB_val = undefined; 645 | var v: c.MDB_val = undefined; 646 | call(c.mdb_cursor_get, .{ self.inner, &k, &v, @enumToInt(pos) }) catch |err| switch (err) { 647 | error.InvalidParameter => return if (pos == .current) null else err, 648 | error.NotFound => return null, 649 | else => return err, 650 | }; 651 | return Entry{ 652 | .key = @ptrCast([*]const u8, k.mv_data)[0..k.mv_size], 653 | .val = @ptrCast([*]const u8, v.mv_data)[0..v.mv_size], 654 | }; 655 | } 656 | 657 | pub const PagePosition = enum(c.MDB_cursor_op) { 658 | current = c.MDB_GET_MULTIPLE, 659 | next = c.MDB_NEXT_MULTIPLE, 660 | prev = c.MDB_PREV_MULTIPLE, 661 | }; 662 | pub inline fn getPage(self: Self, comptime T: type, pos: PagePosition) !?Page(T) { 663 | var k: c.MDB_val = undefined; 664 | var v: c.MDB_val = undefined; 665 | call(c.mdb_cursor_get, .{ self.inner, &k, &v, @enumToInt(pos) }) catch |err| switch (err) { 666 | error.NotFound => return null, 667 | else => return err, 668 | }; 669 | return Page(T){ 670 | .key = @ptrCast([*]const u8, k.mv_data)[0..k.mv_size], 671 | .items = mem.bytesAsSlice(T, @ptrCast([*]const u8, v.mv_data)[0..v.mv_size]), 672 | }; 673 | } 674 | pub inline fn seekToItem(self: Self, key: []const u8, val: []const u8) !void { 675 | var k = &c.MDB_val{ .mv_size = key.len, .mv_data = @intToPtr(?*c_void, @ptrToInt(key.ptr)) }; 676 | var v = &c.MDB_val{ .mv_size = val.len, .mv_data = @intToPtr(?*c_void, @ptrToInt(val.ptr)) }; 677 | try call(c.mdb_cursor_get, .{ self.inner, k, v, .MDB_GET_BOTH }); 678 | } 679 | pub inline fn seekFromItem(self: Self, key: []const u8, val: []const u8) ![]const u8 { 680 | var k = &c.MDB_val{ .mv_size = key.len, .mv_data = @intToPtr(?*c_void, @ptrToInt(key.ptr)) }; 681 | var v = &c.MDB_val{ .mv_size = val.len, .mv_data = @intToPtr(?*c_void, @ptrToInt(val.ptr)) }; 682 | try call(c.mdb_cursor_get, .{ self.inner, k, v, c.MDB_GET_BOTH_RANGE }); 683 | return @ptrCast([*]const u8, v.mv_data)[0..v.mv_size]; 684 | } 685 | pub inline fn seekTo(self: Self, key: []const u8) ![]const u8 { 686 | var k = &c.MDB_val{ .mv_size = key.len, .mv_data = @intToPtr(?*c_void, @ptrToInt(key.ptr)) }; 687 | var v: c.MDB_val = undefined; 688 | try call(c.mdb_cursor_get, .{ self.inner, k, &v, c.MDB_SET_KEY }); 689 | return @ptrCast([*]const u8, v.mv_data)[0..v.mv_size]; 690 | } 691 | pub inline fn seekFrom(self: Self, key: []const u8) !Entry { 692 | var k = &c.MDB_val{ .mv_size = key.len, .mv_data = @intToPtr(?*c_void, @ptrToInt(key.ptr)) }; 693 | var v: c.MDB_val = undefined; 694 | try call(c.mdb_cursor_get, .{ self.inner, k, &v, c.MDB_SET_RANGE }); 695 | return Entry{ 696 | .key = @ptrCast([*]const u8, k.mv_data)[0..k.mv_size], 697 | .val = @ptrCast([*]const u8, v.mv_data)[0..v.mv_size], 698 | }; 699 | } 700 | pub inline fn first(self: Self) !?Entry { 701 | return self.get(.first); 702 | } 703 | pub inline fn firstItem(self: Self) !?Entry { 704 | return self.get(.first_item); 705 | } 706 | pub inline fn current(self: Self) !?Entry { 707 | return self.get(.current); 708 | } 709 | pub inline fn last(self: Self) !?Entry { 710 | return self.get(.last); 711 | } 712 | pub inline fn lastItem(self: Self) !?Entry { 713 | return self.get(.last_item); 714 | } 715 | pub inline fn next(self: Self) !?Entry { 716 | return self.get(.next); 717 | } 718 | pub inline fn nextItem(self: Self) !?Entry { 719 | return self.get(.next_item); 720 | } 721 | pub inline fn nextKey(self: Self) !?Entry { 722 | return self.get(.next_key); 723 | } 724 | pub inline fn prev(self: Self) !?Entry { 725 | return self.get(.prev); 726 | } 727 | pub inline fn prevItem(self: Self) !?Entry { 728 | return self.get(.prev_item); 729 | } 730 | pub inline fn prevKey(self: Self) !?Entry { 731 | return self.get(.prev_key); 732 | } 733 | pub inline fn currentPage(self: Self, comptime T: type) !?Page(T) { 734 | return self.getPage(T, .current); 735 | } 736 | pub inline fn nextPage(self: Self, comptime T: type) !?Page(T) { 737 | return self.getPage(T, .next); 738 | } 739 | pub inline fn prevPage(self: Self, comptime T: type) !?Page(T) { 740 | return self.getPage(T, .prev); 741 | } 742 | }; 743 | 744 | inline fn ResultOf(comptime function: anytype) type { 745 | return if (@typeInfo(@TypeOf(function)).Fn.return_type == c_int) anyerror!void else void; 746 | } 747 | 748 | inline fn call(comptime function: anytype, args: anytype) ResultOf(function) { 749 | const rc = @call(.{}, function, args); 750 | if (ResultOf(function) == void) return rc; 751 | 752 | return switch (rc) { 753 | c.MDB_SUCCESS => {}, 754 | c.MDB_KEYEXIST => error.AlreadyExists, 755 | c.MDB_NOTFOUND => error.NotFound, 756 | c.MDB_PAGE_NOTFOUND => error.PageNotFound, 757 | c.MDB_CORRUPTED => error.PageCorrupted, 758 | c.MDB_PANIC => error.Panic, 759 | c.MDB_VERSION_MISMATCH => error.VersionMismatch, 760 | c.MDB_INVALID => error.FileNotDatabase, 761 | c.MDB_MAP_FULL => error.MapSizeLimitReached, 762 | c.MDB_DBS_FULL => error.MaxNumDatabasesLimitReached, 763 | c.MDB_READERS_FULL => error.MaxNumReadersLimitReached, 764 | c.MDB_TLS_FULL => error.TooManyEnvironmentsOpen, 765 | c.MDB_TXN_FULL => error.TransactionTooBig, 766 | c.MDB_CURSOR_FULL => error.CursorStackLimitReached, 767 | c.MDB_PAGE_FULL => error.OutOfPageMemory, 768 | c.MDB_MAP_RESIZED => error.DatabaseExceedsMapSizeLimit, 769 | c.MDB_INCOMPATIBLE => error.IncompatibleOperation, 770 | c.MDB_BAD_RSLOT => error.InvalidReaderLocktableSlotReuse, 771 | c.MDB_BAD_TXN => error.TransactionNotAborted, 772 | c.MDB_BAD_VALSIZE => error.UnsupportedSize, 773 | c.MDB_BAD_DBI => error.BadDatabaseHandle, 774 | @enumToInt(os.E.NOENT) => error.NoSuchFileOrDirectory, 775 | @enumToInt(os.E.IO) => error.InputOutputError, 776 | @enumToInt(os.E.NOMEM) => error.OutOfMemory, 777 | @enumToInt(os.E.ACCES) => error.ReadOnly, 778 | @enumToInt(os.E.BUSY) => error.DeviceOrResourceBusy, 779 | @enumToInt(os.E.INVAL) => error.InvalidParameter, 780 | @enumToInt(os.E.NOSPC) => error.NoSpaceLeftOnDevice, 781 | @enumToInt(os.E.EXIST) => error.FileAlreadyExists, 782 | else => panic("({}) {s}", .{ rc, c.mdb_strerror(rc) }), 783 | }; 784 | } 785 | 786 | test { 787 | testing.refAllDecls(@This()); 788 | } 789 | 790 | test "Environment.init() / Environment.deinit(): query environment stats, flags, and info" { 791 | var tmp = testing.tmpDir(.{}); 792 | defer tmp.cleanup(); 793 | 794 | var buf: [fs.MAX_PATH_BYTES]u8 = undefined; 795 | var path = try tmp.dir.realpath("./", &buf); 796 | 797 | const env = try Environment.init(path, .{ 798 | .use_writable_memory_map = true, 799 | .dont_sync_metadata = true, 800 | .map_size = 4 * 1024 * 1024, 801 | .max_num_readers = 42, 802 | }); 803 | defer env.deinit(); 804 | 805 | try testing.expectEqualStrings(path, env.path()); 806 | try testing.expect(env.getMaxKeySize() > 0); 807 | try testing.expect(env.getMaxNumReaders() == 42); 808 | 809 | const stat = env.stat(); 810 | try testing.expect(stat.tree_height == 0); 811 | try testing.expect(stat.num_branch_pages == 0); 812 | try testing.expect(stat.num_leaf_pages == 0); 813 | try testing.expect(stat.num_overflow_pages == 0); 814 | try testing.expect(stat.num_entries == 0); 815 | 816 | const flags = env.getFlags(); 817 | try testing.expect(flags.use_writable_memory_map == true); 818 | try testing.expect(flags.dont_sync_metadata == true); 819 | 820 | env.disableFlags(.{ .dont_sync_metadata = true }); 821 | try testing.expect(env.getFlags().dont_sync_metadata == false); 822 | 823 | env.enableFlags(.{ .dont_sync_metadata = true }); 824 | try testing.expect(env.getFlags().dont_sync_metadata == true); 825 | 826 | const info = env.info(); 827 | try testing.expect(info.map_address == null); 828 | try testing.expect(info.map_size == 4 * 1024 * 1024); 829 | try testing.expect(info.last_page_num == 1); 830 | try testing.expect(info.last_tx_id == 0); 831 | try testing.expect(info.max_num_reader_slots > 0); 832 | try testing.expect(info.num_used_reader_slots == 0); 833 | 834 | try env.setMapSize(8 * 1024 * 1024); 835 | try testing.expect(env.info().map_size == 8 * 1024 * 1024); 836 | 837 | // The file descriptor should be >= 0. 838 | 839 | try testing.expect(env.fd() >= 0); 840 | 841 | try testing.expect((try env.purge()) == 0); 842 | } 843 | 844 | test "Environment.copyTo(): backup environment and check environment integrity" { 845 | var tmp_a = testing.tmpDir(.{}); 846 | defer tmp_a.cleanup(); 847 | 848 | var tmp_b = testing.tmpDir(.{}); 849 | defer tmp_b.cleanup(); 850 | 851 | var buf_a: [fs.MAX_PATH_BYTES]u8 = undefined; 852 | var buf_b: [fs.MAX_PATH_BYTES]u8 = undefined; 853 | 854 | var path_a = try tmp_a.dir.realpath("./", &buf_a); 855 | var path_b = try tmp_b.dir.realpath("./", &buf_b); 856 | 857 | const env_a = try Environment.init(path_a, .{}); 858 | { 859 | defer env_a.deinit(); 860 | 861 | const tx = try env_a.begin(.{}); 862 | errdefer tx.deinit(); 863 | 864 | const db = try tx.open(.{}); 865 | defer db.close(env_a); 866 | 867 | var i: u8 = 0; 868 | while (i < 128) : (i += 1) { 869 | try tx.put(db, &[_]u8{i}, &[_]u8{i}, .{ .dont_overwrite_key = true }); 870 | try testing.expectEqualStrings(&[_]u8{i}, try tx.get(db, &[_]u8{i})); 871 | } 872 | 873 | try tx.commit(); 874 | try env_a.copyTo(path_b, .{ .compact = true }); 875 | } 876 | 877 | const env_b = try Environment.init(path_b, .{}); 878 | { 879 | defer env_b.deinit(); 880 | 881 | const tx = try env_b.begin(.{}); 882 | defer tx.deinit(); 883 | 884 | const db = try tx.open(.{}); 885 | defer db.close(env_b); 886 | 887 | var i: u8 = 0; 888 | while (i < 128) : (i += 1) { 889 | try testing.expectEqualStrings(&[_]u8{i}, try tx.get(db, &[_]u8{i})); 890 | } 891 | } 892 | } 893 | 894 | test "Environment.sync(): manually flush system buffers" { 895 | var tmp = testing.tmpDir(.{}); 896 | defer tmp.cleanup(); 897 | 898 | var buf: [fs.MAX_PATH_BYTES]u8 = undefined; 899 | var path = try tmp.dir.realpath("./", &buf); 900 | 901 | const env = try Environment.init(path, .{ 902 | .dont_sync = true, 903 | .dont_sync_metadata = true, 904 | .use_writable_memory_map = true, 905 | }); 906 | defer env.deinit(); 907 | 908 | { 909 | const tx = try env.begin(.{}); 910 | errdefer tx.deinit(); 911 | 912 | const db = try tx.open(.{}); 913 | defer db.close(env); 914 | 915 | var i: u8 = 0; 916 | while (i < 128) : (i += 1) { 917 | try tx.put(db, &[_]u8{i}, &[_]u8{i}, .{ .dont_overwrite_key = true }); 918 | try testing.expectEqualStrings(&[_]u8{i}, try tx.get(db, &[_]u8{i})); 919 | } 920 | 921 | try tx.commit(); 922 | try env.sync(true); 923 | } 924 | 925 | { 926 | const tx = try env.begin(.{}); 927 | defer tx.deinit(); 928 | 929 | const db = try tx.open(.{}); 930 | defer db.close(env); 931 | 932 | var i: u8 = 0; 933 | while (i < 128) : (i += 1) { 934 | try testing.expectEqualStrings(&[_]u8{i}, try tx.get(db, &[_]u8{i})); 935 | } 936 | } 937 | } 938 | 939 | test "Transaction: get(), put(), reserve(), delete(), and commit() several entries with dont_overwrite_key = true / false" { 940 | var tmp = testing.tmpDir(.{}); 941 | defer tmp.cleanup(); 942 | 943 | var buf: [fs.MAX_PATH_BYTES]u8 = undefined; 944 | var path = try tmp.dir.realpath("./", &buf); 945 | 946 | const env = try Environment.init(path, .{}); 947 | defer env.deinit(); 948 | 949 | const tx = try env.begin(.{}); 950 | errdefer tx.deinit(); 951 | 952 | const db = try tx.open(.{}); 953 | defer db.close(env); 954 | 955 | // Transaction.put() / Transaction.get() 956 | 957 | try tx.put(db, "hello", "world", .{}); 958 | try testing.expectEqualStrings("world", try tx.get(db, "hello")); 959 | 960 | // Transaction.put() / Transaction.reserve() / Transaction.get() (.{ .dont_overwrite_key = true }) 961 | 962 | try testing.expectError(error.AlreadyExists, tx.put(db, "hello", "world", .{ .dont_overwrite_key = true })); 963 | { 964 | const result = try tx.reserve(db, "hello", "world".len, .{ .dont_overwrite_key = true }); 965 | try testing.expectEqualStrings("world", result.found_existing); 966 | } 967 | try testing.expectEqualStrings("world", try tx.get(db, "hello")); 968 | 969 | // Transaction.put() / Transaction.get() / Transaction.reserve() (.{ .dont_overwrite_key = false }) 970 | 971 | try tx.put(db, "hello", "other_value", .{}); 972 | try testing.expectEqualStrings("other_value", try tx.get(db, "hello")); 973 | { 974 | const result = try tx.reserve(db, "hello", "new_value".len, .{}); 975 | try testing.expectEqual("new_value".len, result.successful.len); 976 | mem.copy(u8, result.successful, "new_value"); 977 | } 978 | try testing.expectEqualStrings("new_value", try tx.get(db, "hello")); 979 | 980 | // Transaction.del() / Transaction.get() / Transaction.put() / Transaction.get() 981 | 982 | try tx.del(db, "hello", .key); 983 | 984 | try testing.expectError(error.NotFound, tx.del(db, "hello", .key)); 985 | try testing.expectError(error.NotFound, tx.get(db, "hello")); 986 | 987 | try tx.put(db, "hello", "world", .{}); 988 | try testing.expectEqualStrings("world", try tx.get(db, "hello")); 989 | 990 | // Transaction.commit() 991 | 992 | try tx.commit(); 993 | } 994 | 995 | test "Transaction: reserve, write, and attempt to reserve again with dont_overwrite_key = true" { 996 | var tmp = testing.tmpDir(.{}); 997 | defer tmp.cleanup(); 998 | 999 | var buf: [fs.MAX_PATH_BYTES]u8 = undefined; 1000 | var path = try tmp.dir.realpath("./", &buf); 1001 | 1002 | const env = try Environment.init(path, .{}); 1003 | defer env.deinit(); 1004 | 1005 | const tx = try env.begin(.{}); 1006 | errdefer tx.deinit(); 1007 | 1008 | const db = try tx.open(.{}); 1009 | defer db.close(env); 1010 | 1011 | switch (try tx.reserve(db, "hello", "world!".len, .{ .dont_overwrite_key = true })) { 1012 | .found_existing => try testing.expect(false), 1013 | .successful => |dst| std.mem.copy(u8, dst, "world!"), 1014 | } 1015 | 1016 | switch (try tx.reserve(db, "hello", "world!".len, .{ .dont_overwrite_key = true })) { 1017 | .found_existing => |src| try testing.expectEqualStrings("world!", src), 1018 | .successful => try testing.expect(false), 1019 | } 1020 | 1021 | try tx.commit(); 1022 | } 1023 | 1024 | test "Transaction: getOrPut() twice" { 1025 | var tmp = testing.tmpDir(.{}); 1026 | defer tmp.cleanup(); 1027 | 1028 | var buf: [fs.MAX_PATH_BYTES]u8 = undefined; 1029 | var path = try tmp.dir.realpath("./", &buf); 1030 | 1031 | const env = try Environment.init(path, .{}); 1032 | defer env.deinit(); 1033 | 1034 | const tx = try env.begin(.{}); 1035 | errdefer tx.deinit(); 1036 | 1037 | const db = try tx.open(.{}); 1038 | defer db.close(env); 1039 | 1040 | try testing.expectEqual(@as(?[]const u8, null), try tx.getOrPut(db, "hello", "world")); 1041 | try testing.expectEqualStrings("world", try tx.get(db, "hello")); 1042 | try testing.expectEqualStrings("world", (try tx.getOrPut(db, "hello", "world")) orelse unreachable); 1043 | 1044 | try tx.commit(); 1045 | } 1046 | 1047 | test "Transaction: use multiple named databases in a single transaction" { 1048 | var tmp = testing.tmpDir(.{}); 1049 | defer tmp.cleanup(); 1050 | 1051 | var buf: [fs.MAX_PATH_BYTES]u8 = undefined; 1052 | var path = try tmp.dir.realpath("./", &buf); 1053 | 1054 | const env = try Environment.init(path, .{ .max_num_dbs = 2 }); 1055 | defer env.deinit(); 1056 | 1057 | { 1058 | const tx = try env.begin(.{}); 1059 | errdefer tx.deinit(); 1060 | 1061 | const a = try tx.use("A", .{ .create_if_not_exists = true }); 1062 | defer a.close(env); 1063 | 1064 | const b = try tx.use("B", .{ .create_if_not_exists = true }); 1065 | defer b.close(env); 1066 | 1067 | try tx.put(a, "hello", "this is in A!", .{}); 1068 | try tx.put(b, "hello", "this is in B!", .{}); 1069 | 1070 | try tx.commit(); 1071 | } 1072 | 1073 | { 1074 | const tx = try env.begin(.{}); 1075 | errdefer tx.deinit(); 1076 | 1077 | const a = try tx.use("A", .{}); 1078 | defer a.close(env); 1079 | 1080 | const b = try tx.use("B", .{}); 1081 | defer b.close(env); 1082 | 1083 | try testing.expectEqualStrings("this is in A!", try tx.get(a, "hello")); 1084 | try testing.expectEqualStrings("this is in B!", try tx.get(b, "hello")); 1085 | 1086 | try tx.commit(); 1087 | } 1088 | } 1089 | 1090 | test "Transaction: nest transaction inside transaction" { 1091 | var tmp = testing.tmpDir(.{}); 1092 | defer tmp.cleanup(); 1093 | 1094 | var buf: [fs.MAX_PATH_BYTES]u8 = undefined; 1095 | var path = try tmp.dir.realpath("./", &buf); 1096 | 1097 | const env = try Environment.init(path, .{}); 1098 | defer env.deinit(); 1099 | 1100 | const parent = try env.begin(.{}); 1101 | errdefer parent.deinit(); 1102 | 1103 | const db = try parent.open(.{}); 1104 | defer db.close(env); 1105 | 1106 | { 1107 | const child = try env.begin(.{ .parent = parent }); 1108 | errdefer child.deinit(); 1109 | 1110 | // Parent ID is equivalent to Child ID. Parent is not allowed to perform 1111 | // operations while child has yet to be aborted / committed. 1112 | 1113 | try testing.expectEqual(parent.id(), child.id()); 1114 | 1115 | // Operations cannot be performed against a parent transaction while a child 1116 | // transaction is still active. 1117 | 1118 | try testing.expectError(error.TransactionNotAborted, parent.get(db, "hello")); 1119 | 1120 | try child.put(db, "hello", "world", .{}); 1121 | try child.commit(); 1122 | } 1123 | 1124 | try testing.expectEqualStrings("world", try parent.get(db, "hello")); 1125 | try parent.commit(); 1126 | } 1127 | 1128 | test "Transaction: custom key comparator" { 1129 | const Descending = struct { 1130 | fn order(a: []const u8, b: []const u8) math.Order { 1131 | return switch (mem.order(u8, a, b)) { 1132 | .eq => .eq, 1133 | .lt => .gt, 1134 | .gt => .lt, 1135 | }; 1136 | } 1137 | }; 1138 | 1139 | var tmp = testing.tmpDir(.{}); 1140 | defer tmp.cleanup(); 1141 | 1142 | var buf: [fs.MAX_PATH_BYTES]u8 = undefined; 1143 | var path = try tmp.dir.realpath("./", &buf); 1144 | 1145 | const env = try Environment.init(path, .{ .max_num_dbs = 2 }); 1146 | defer env.deinit(); 1147 | 1148 | const tx = try env.begin(.{}); 1149 | errdefer tx.deinit(); 1150 | 1151 | const db = try tx.open(.{}); 1152 | defer db.close(env); 1153 | 1154 | const items = [_][]const u8{ "a", "b", "c" }; 1155 | 1156 | try tx.setKeyOrder(db, Descending.order); 1157 | 1158 | for (items) |item| { 1159 | try tx.put(db, item, item, .{ .dont_overwrite_key = true }); 1160 | } 1161 | 1162 | { 1163 | const cursor = try tx.cursor(db); 1164 | defer cursor.deinit(); 1165 | 1166 | var i: usize = 0; 1167 | while (try cursor.next()) |item| : (i += 1) { 1168 | try testing.expectEqualSlices(u8, items[items.len - 1 - i], item.key); 1169 | try testing.expectEqualSlices(u8, items[items.len - 1 - i], item.val); 1170 | } 1171 | } 1172 | 1173 | try tx.commit(); 1174 | } 1175 | 1176 | test "Cursor: move around a database and add / delete some entries" { 1177 | var tmp = testing.tmpDir(.{}); 1178 | defer tmp.cleanup(); 1179 | 1180 | var buf: [fs.MAX_PATH_BYTES]u8 = undefined; 1181 | var path = try tmp.dir.realpath("./", &buf); 1182 | 1183 | const env = try Environment.init(path, .{}); 1184 | defer env.deinit(); 1185 | 1186 | const tx = try env.begin(.{}); 1187 | errdefer tx.deinit(); 1188 | 1189 | const db = try tx.open(.{}); 1190 | defer db.close(env); 1191 | 1192 | { 1193 | const cursor = try tx.cursor(db); 1194 | defer cursor.deinit(); 1195 | 1196 | const items = [_][]const u8{ "a", "b", "c" }; 1197 | 1198 | // Cursor.put() 1199 | 1200 | inline for (items) |item| { 1201 | try cursor.put(item, item, .{ .dont_overwrite_key = true }); 1202 | } 1203 | 1204 | // Cursor.current() / Cursor.first() / Cursor.last() / Cursor.next() / Cursor.prev() 1205 | 1206 | { 1207 | const last_item = try cursor.last(); 1208 | try testing.expectEqualStrings(items[items.len - 1], last_item.?.key); 1209 | try testing.expectEqualStrings(items[items.len - 1], last_item.?.val); 1210 | 1211 | { 1212 | var i: usize = items.len - 1; 1213 | while (true) { 1214 | const item = (try cursor.prev()) orelse break; 1215 | try testing.expectEqualStrings(items[i - 1], item.key); 1216 | try testing.expectEqualStrings(items[i - 1], item.val); 1217 | i -= 1; 1218 | } 1219 | } 1220 | 1221 | const current = try cursor.current(); 1222 | const first_item = try cursor.first(); 1223 | try testing.expectEqualStrings(items[0], first_item.?.key); 1224 | try testing.expectEqualStrings(items[0], first_item.?.val); 1225 | try testing.expectEqualStrings(first_item.?.key, current.?.key); 1226 | try testing.expectEqualStrings(first_item.?.val, current.?.val); 1227 | 1228 | { 1229 | var i: usize = 1; 1230 | while (true) { 1231 | const item = (try cursor.next()) orelse break; 1232 | try testing.expectEqualStrings(items[i], item.key); 1233 | try testing.expectEqualStrings(items[i], item.val); 1234 | i += 1; 1235 | } 1236 | } 1237 | } 1238 | 1239 | // Cursor.delete() 1240 | 1241 | try cursor.del(.key); 1242 | while (try cursor.prev()) |_| try cursor.del(.key); 1243 | try testing.expectError(error.NotFound, cursor.del(.key)); 1244 | try testing.expect((try cursor.current()) == null); 1245 | 1246 | // Cursor.put() / Cursor.updateInPlace() / Cursor.reserveInPlace() 1247 | 1248 | inline for (items) |item| { 1249 | try cursor.put(item, item, .{ .dont_overwrite_key = true }); 1250 | 1251 | try cursor.updateInPlace(item, "???"); 1252 | try testing.expectEqualStrings("???", (try cursor.current()).?.val); 1253 | 1254 | mem.copy(u8, try cursor.reserveInPlace(item, item.len), item); 1255 | try testing.expectEqualStrings(item, (try cursor.current()).?.val); 1256 | } 1257 | 1258 | // Cursor.seekTo() 1259 | 1260 | try testing.expectError(error.NotFound, cursor.seekTo("0")); 1261 | try testing.expectEqualStrings(items[items.len / 2], try cursor.seekTo(items[items.len / 2])); 1262 | 1263 | // Cursor.seekFrom() 1264 | 1265 | try testing.expectEqualStrings(items[0], (try cursor.seekFrom("0")).val); 1266 | try testing.expectEqualStrings(items[items.len / 2], (try cursor.seekFrom(items[items.len / 2])).val); 1267 | try testing.expectError(error.NotFound, cursor.seekFrom("z")); 1268 | try testing.expectEqualStrings(items[items.len - 1], (try cursor.seekFrom(items[items.len - 1])).val); 1269 | } 1270 | 1271 | try tx.commit(); 1272 | } 1273 | 1274 | test "Cursor: interact with variable-sized items in a database with duplicate keys" { 1275 | var tmp = testing.tmpDir(.{}); 1276 | defer tmp.cleanup(); 1277 | 1278 | var buf: [fs.MAX_PATH_BYTES]u8 = undefined; 1279 | var path = try tmp.dir.realpath("./", &buf); 1280 | 1281 | const env = try Environment.init(path, .{ .max_num_dbs = 1 }); 1282 | defer env.deinit(); 1283 | 1284 | const tx = try env.begin(.{}); 1285 | errdefer tx.deinit(); 1286 | 1287 | const db = try tx.open(.{ .allow_duplicate_keys = true }); 1288 | defer db.close(env); 1289 | 1290 | const expected = comptime .{ 1291 | .{ "Another Set C", [_][]const u8{ "be", "ka", "kra", "tan" } }, 1292 | .{ "Set A", [_][]const u8{ "a", "kay", "zay" } }, 1293 | .{ "Some Set B", [_][]const u8{ "bru", "ski", "vle" } }, 1294 | }; 1295 | 1296 | inline for (expected) |entry| { 1297 | inline for (entry[1]) |val| { 1298 | try tx.putItem(db, entry[0], val, .{ .dont_overwrite_item = true }); 1299 | } 1300 | } 1301 | 1302 | { 1303 | const cursor = try tx.cursor(db); 1304 | defer cursor.deinit(); 1305 | 1306 | comptime var i = 0; 1307 | comptime var j = 0; 1308 | 1309 | inline while (i < expected.len) : ({ 1310 | i += 1; 1311 | j = 0; 1312 | }) { 1313 | inline while (j < expected[i][1].len) : (j += 1) { 1314 | const maybe_entry = try cursor.next(); 1315 | const entry = maybe_entry orelse unreachable; 1316 | 1317 | try testing.expectEqualStrings(expected[i][0], entry.key); 1318 | try testing.expectEqualStrings(expected[i][1][j], entry.val); 1319 | } 1320 | } 1321 | } 1322 | 1323 | try tx.commit(); 1324 | } 1325 | 1326 | test "Cursor: interact with batches of fixed-sized items in a database with duplicate keys" { 1327 | const U64 = struct { 1328 | fn order(a: []const u8, b: []const u8) math.Order { 1329 | const num_a = mem.bytesToValue(u64, a[0..8]); 1330 | const num_b = mem.bytesToValue(u64, b[0..8]); 1331 | if (num_a < num_b) return .lt; 1332 | if (num_a > num_b) return .gt; 1333 | return .eq; 1334 | } 1335 | }; 1336 | 1337 | var tmp = testing.tmpDir(.{}); 1338 | defer tmp.cleanup(); 1339 | 1340 | var buf: [fs.MAX_PATH_BYTES]u8 = undefined; 1341 | var path = try tmp.dir.realpath("./", &buf); 1342 | 1343 | const env = try Environment.init(path, .{ .max_num_dbs = 1 }); 1344 | defer env.deinit(); 1345 | 1346 | const tx = try env.begin(.{}); 1347 | errdefer tx.deinit(); 1348 | 1349 | const db = try tx.open(.{ 1350 | .allow_duplicate_keys = true, 1351 | .duplicate_entries_are_fixed_size = true, 1352 | }); 1353 | defer db.close(env); 1354 | 1355 | try tx.setItemOrder(db, U64.order); 1356 | 1357 | comptime var items: [512]u64 = undefined; 1358 | inline for (items) |*item, i| item.* = @as(u64, i); 1359 | 1360 | const expected = comptime .{ 1361 | .{ "Set A", &items }, 1362 | .{ "Set B", &items }, 1363 | }; 1364 | 1365 | { 1366 | const cursor = try tx.cursor(db); 1367 | defer cursor.deinit(); 1368 | 1369 | inline for (expected) |entry| { 1370 | try testing.expectEqual(entry[1].len, try cursor.putBatch(entry[0], entry[1], .{})); 1371 | } 1372 | } 1373 | 1374 | { 1375 | const cursor = try tx.cursor(db); 1376 | defer cursor.deinit(); 1377 | 1378 | inline for (expected) |expected_entry| { 1379 | const maybe_entry = try cursor.next(); 1380 | const entry = maybe_entry orelse unreachable; 1381 | try testing.expectEqualStrings(expected_entry[0], entry.key); 1382 | 1383 | var i: usize = 0; 1384 | 1385 | while (try cursor.nextPage(u64)) |page| { 1386 | for (page.items) |item| { 1387 | try testing.expectEqual(expected_entry[1][i], item); 1388 | i += 1; 1389 | } 1390 | } 1391 | } 1392 | } 1393 | 1394 | try tx.commit(); 1395 | } 1396 | -------------------------------------------------------------------------------- /zig.mod: -------------------------------------------------------------------------------- 1 | id: uyw3xk95w7nrelwn86xop56d8v8w2wocqdrpuqnu9ylx4n3n 2 | name: lmdb 3 | main: lmdb.zig 4 | dependencies: 5 | - type: git 6 | path: https://git.openldap.org/openldap/openldap 7 | version: tag-LMDB_0.9.29 8 | c_include_dirs: 9 | - libraries/liblmdb 10 | c_source_files: 11 | - libraries/liblmdb/mdb.c 12 | - libraries/liblmdb/midl.c 13 | c_source_flags: [-fno-sanitize=undefined] 14 | --------------------------------------------------------------------------------