├── .gitignore ├── .github └── workflows │ └── check.yml ├── src ├── lib.zig ├── data.zig ├── batch.zig ├── iterator.zig └── database.zig ├── README.md └── LICENSE /.gitignore: -------------------------------------------------------------------------------- 1 | /.zig-cache 2 | /zig-out 3 | /test-state 4 | /.vscode 5 | -------------------------------------------------------------------------------- /.github/workflows/check.yml: -------------------------------------------------------------------------------- 1 | name: check 2 | 3 | on: push 4 | 5 | jobs: 6 | lint: 7 | runs-on: ubuntu-latest 8 | steps: 9 | - name: checkout 10 | uses: actions/checkout@v2 11 | 12 | - name: setup-zig 13 | uses: mlugg/setup-zig@v1 14 | with: 15 | version: 0.14.0 16 | 17 | - name: lint 18 | run: | 19 | zig fmt --check src/ build.zig 20 | 21 | test: 22 | strategy: 23 | matrix: 24 | os: [ubuntu-latest] 25 | runs-on: ${{matrix.os}} 26 | timeout-minutes: 60 27 | steps: 28 | - name: checkout 29 | uses: actions/checkout@v2 30 | with: 31 | submodules: recursive 32 | 33 | - name: setup-zig 34 | uses: mlugg/setup-zig@v1 35 | with: 36 | version: 0.14.0 37 | 38 | - name: test 39 | run: | 40 | zig build test 41 | -------------------------------------------------------------------------------- /src/lib.zig: -------------------------------------------------------------------------------- 1 | pub const Iterator = iterator.Iterator; 2 | pub const IteratorDirection = iterator.Direction; 3 | pub const RawIterator = iterator.RawIterator; 4 | 5 | pub const ColumnFamily = database.ColumnFamily; 6 | pub const ColumnFamilyDescription = database.ColumnFamilyDescription; 7 | pub const ColumnFamilyHandle = database.ColumnFamilyHandle; 8 | pub const ColumnFamilyOptions = database.ColumnFamilyOptions; 9 | pub const DB = database.DB; 10 | pub const DBOptions = database.DBOptions; 11 | pub const LiveFile = database.LiveFile; 12 | 13 | pub const Data = data.Data; 14 | 15 | pub const WriteBatch = batch.WriteBatch; 16 | 17 | //////////// 18 | // private 19 | pub const batch = @import("batch.zig"); 20 | pub const data = @import("data.zig"); 21 | pub const database = @import("database.zig"); 22 | pub const iterator = @import("iterator.zig"); 23 | 24 | test { 25 | const std = @import("std"); 26 | std.testing.refAllDecls(@This()); 27 | } 28 | -------------------------------------------------------------------------------- /src/data.zig: -------------------------------------------------------------------------------- 1 | const std = @import("std"); 2 | const rdb = @import("rocksdb"); 3 | 4 | const Allocator = std.mem.Allocator; 5 | 6 | /// data that was allocated by rocksdb and must be freed by rocksdb 7 | pub const Data = struct { 8 | data: []const u8, 9 | free: *const fn (?*anyopaque) callconv(.C) void, 10 | 11 | pub fn deinit(self: @This()) void { 12 | self.free(@ptrCast(@constCast(self.data.ptr))); 13 | } 14 | 15 | pub fn format( 16 | self: @This(), 17 | comptime _: []const u8, 18 | options: std.fmt.FormatOptions, 19 | writer: anytype, 20 | ) !void { 21 | try std.fmt.formatBuf(self.data, options, writer); 22 | } 23 | }; 24 | 25 | pub fn copy(allocator: Allocator, in: [*c]const u8) Allocator.Error![]u8 { 26 | return copyLen(allocator, in, std.mem.len(in)); 27 | } 28 | 29 | pub fn copyLen(allocator: Allocator, in: [*c]const u8, len: usize) Allocator.Error![]u8 { 30 | const ret = try allocator.dupe(u8, in[0..len]); 31 | return ret; 32 | } 33 | -------------------------------------------------------------------------------- /src/batch.zig: -------------------------------------------------------------------------------- 1 | const std = @import("std"); 2 | const rdb = @import("rocksdb"); 3 | const lib = @import("lib.zig"); 4 | 5 | const Allocator = std.mem.Allocator; 6 | 7 | const ColumnFamilyHandle = lib.ColumnFamilyHandle; 8 | 9 | pub const WriteBatch = struct { 10 | inner: *rdb.rocksdb_writebatch_t, 11 | 12 | const Self = @This(); 13 | 14 | pub fn init() WriteBatch { 15 | return .{ .inner = rdb.rocksdb_writebatch_create().? }; 16 | } 17 | 18 | pub fn deinit(self: WriteBatch) void { 19 | rdb.rocksdb_writebatch_destroy(self.inner); 20 | } 21 | 22 | pub fn put( 23 | self: *const Self, 24 | column_family: ColumnFamilyHandle, 25 | key: []const u8, 26 | value: []const u8, 27 | ) void { 28 | rdb.rocksdb_writebatch_put_cf( 29 | self.inner, 30 | column_family, 31 | key.ptr, 32 | key.len, 33 | value.ptr, 34 | value.len, 35 | ); 36 | } 37 | 38 | pub fn delete( 39 | self: *const Self, 40 | column_family: ColumnFamilyHandle, 41 | key: []const u8, 42 | ) void { 43 | rdb.rocksdb_writebatch_delete_cf( 44 | self.inner, 45 | column_family, 46 | key.ptr, 47 | key.len, 48 | ); 49 | } 50 | 51 | pub fn deleteRange( 52 | self: *const Self, 53 | column_family: ColumnFamilyHandle, 54 | start_key: []const u8, 55 | end_key: []const u8, 56 | ) void { 57 | rdb.rocksdb_writebatch_delete_range_cf( 58 | self.inner, 59 | column_family, 60 | start_key.ptr, 61 | start_key.len, 62 | end_key.ptr, 63 | end_key.len, 64 | ); 65 | } 66 | }; 67 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | Build and use RocksDB in zig. 2 | 3 | # Build Dependencies 4 | 5 | `rocksdb-zig` is pinned to [Zig `0.13`](https://ziglang.org/download/), so you will need to have it installed. 6 | 7 | # Usage 8 | 9 | Supported use cases: 10 | - [⬇️](#build-rocksdb) Build a RocksDB static library using the zig build system. 11 | - [⬇️](#import-rocksdb-c-api-in-a-zig-project) Use the RocksDB C API through auto-generated Zig bindings. 12 | - [⬇️](#import-the-zig-bindings-library) Import an idiomatic zig library of bindings that wrap the RocksDB library with hand-written zig code. 13 | 14 | ## Build RocksDB 15 | Clone this repository, then run `zig build`. 16 | 17 | You will find a statically linked `rocksdb` archive 18 | in `zig-out/lib/librocksdb.a`. 19 | 20 | You can use this with any language or build system. 21 | 22 | ## Import RocksDB C API in the Zig Build System. 23 | 24 | Fetch `rocksdb` and save it to your `build.zig.zon`: 25 | ``` 26 | $ zig fetch --save=rocksdb https://github.com/Syndica/rocksdb-zig/archive/.tar.gz 27 | ``` 28 | 29 | Add the import to a module: 30 | ```zig 31 | const rocksdb = b.dependency("rocksdb", .{}).module("rocksdb"); 32 | exe.root_module.addImport("rocksdb", rocksdb); 33 | ``` 34 | 35 | Import the `rocksdb` module. 36 | ```zig 37 | const rocksdb = @import("rocksdb"); 38 | ``` 39 | 40 | ## Import the Zig bindings library using the Zig Build System. 41 | 42 | Fetch `rocksdb` and save it to your `build.zig.zon`: 43 | ``` 44 | $ zig fetch --save=rocksdb https://github.com/Syndica/rocksdb-zig/archive/.tar.gz 45 | ``` 46 | 47 | Add the import to a module: 48 | ```zig 49 | const bindings = b.dependency("rocksdb", .{}).module("bindings"); 50 | exe.root_module.addImport("rocksdb", bindings); 51 | ``` 52 | 53 | Import the `rocksdb` module. 54 | ```zig 55 | const rocksdb = @import("rocksdb"); 56 | ``` 57 | -------------------------------------------------------------------------------- /src/iterator.zig: -------------------------------------------------------------------------------- 1 | const std = @import("std"); 2 | const rdb = @import("rocksdb"); 3 | const lib = @import("lib.zig"); 4 | 5 | const Allocator = std.mem.Allocator; 6 | 7 | const Data = lib.Data; 8 | 9 | const general_freer = lib.data.general_freer; 10 | 11 | pub const Direction = enum { forward, reverse }; 12 | 13 | pub const Iterator = struct { 14 | raw: RawIterator, 15 | direction: Direction, 16 | done: bool, 17 | is_first: bool = true, 18 | 19 | const Self = @This(); 20 | 21 | pub fn deinit(self: Self) void { 22 | self.raw.deinit(); 23 | } 24 | 25 | pub fn next(self: *Self, err_str: *?Data) error{RocksDBIterator}!?[2]Data { 26 | return self.nextGeneric([2]Data, RawIterator.entry, err_str); 27 | } 28 | 29 | pub fn nextKey(self: *Self, err_str: *?Data) error{RocksDBIterator}!?Data { 30 | return self.nextGeneric(Data, RawIterator.key, err_str); 31 | } 32 | 33 | pub fn nextValue(self: *Self, err_str: *?Data) error{RocksDBIterator}!?Data { 34 | return self.nextGeneric(Data, RawIterator.value, err_str); 35 | } 36 | 37 | fn nextGeneric( 38 | self: *Self, 39 | comptime T: type, 40 | getNext: fn (RawIterator) ?T, 41 | err_str: *?Data, 42 | ) error{RocksDBIterator}!?T { 43 | if (self.done) { 44 | return null; 45 | } else { 46 | // NOTE: we call next before getting the value (instead of after) 47 | // because rocksdb uses pointers 48 | if (!self.is_first) { 49 | switch (self.direction) { 50 | .forward => self.raw.next(), 51 | .reverse => self.raw.prev(), 52 | } 53 | } 54 | 55 | if (getNext(self.raw)) |item| { 56 | self.is_first = false; 57 | return item; 58 | } else { 59 | self.done = true; 60 | try self.raw.status(err_str); 61 | return null; 62 | } 63 | } 64 | } 65 | }; 66 | 67 | pub const RawIterator = struct { 68 | inner: *rdb.rocksdb_iterator_t, 69 | 70 | const Self = @This(); 71 | 72 | pub fn deinit(self: Self) void { 73 | rdb.rocksdb_iter_destroy(self.inner); 74 | } 75 | 76 | pub fn seek(self: Self, key_: []const u8) void { 77 | rdb.rocksdb_iter_seek(self.inner, @ptrCast(key_.ptr), key_.len); 78 | } 79 | 80 | pub fn seekForPrev(self: Self, key_: []const u8) void { 81 | rdb.rocksdb_iter_seek_for_prev(self.inner, @ptrCast(key_.ptr), key_.len); 82 | } 83 | 84 | pub fn seekToFirst(self: Self) void { 85 | rdb.rocksdb_iter_seek_to_first(self.inner); 86 | } 87 | 88 | pub fn seekToLast(self: Self) void { 89 | rdb.rocksdb_iter_seek_to_last(self.inner); 90 | } 91 | 92 | pub fn valid(self: Self) bool { 93 | return rdb.rocksdb_iter_valid(self.inner) != 0; 94 | } 95 | 96 | pub fn entry(self: Self) ?[2]Data { 97 | if (self.valid()) { 98 | return .{ self.keyImpl(), self.valueImpl() }; 99 | } else { 100 | return null; 101 | } 102 | } 103 | 104 | pub fn key(self: Self) ?Data { 105 | if (self.valid()) { 106 | return self.keyImpl(); 107 | } else { 108 | return null; 109 | } 110 | } 111 | 112 | pub fn value(self: Self) ?Data { 113 | if (self.valid()) { 114 | return self.valueImpl(); 115 | } else { 116 | return null; 117 | } 118 | } 119 | 120 | fn keyImpl(self: Self) Data { 121 | var len: usize = undefined; 122 | const ret = rdb.rocksdb_iter_key(self.inner, &len); 123 | return .{ 124 | .data = ret[0..len], 125 | .free = rdb.rocksdb_free, 126 | }; 127 | } 128 | 129 | fn valueImpl(self: Self) Data { 130 | var len: usize = undefined; 131 | const ret = rdb.rocksdb_iter_value(self.inner, &len); 132 | return .{ 133 | .data = ret[0..len], 134 | .free = rdb.rocksdb_free, 135 | }; 136 | } 137 | 138 | pub fn next(self: Self) void { 139 | rdb.rocksdb_iter_next(self.inner); 140 | } 141 | 142 | pub fn prev(self: Self) void { 143 | rdb.rocksdb_iter_prev(self.inner); 144 | } 145 | 146 | pub fn status(self: Self, err_str: *?Data) error{RocksDBIterator}!void { 147 | var err_str_in: ?[*:0]u8 = null; 148 | rdb.rocksdb_iter_get_error(self.inner, @ptrCast(&err_str_in)); 149 | if (err_str_in) |s| { 150 | err_str.* = .{ 151 | .data = std.mem.span(s), 152 | .free = rdb.rocksdb_free, 153 | }; 154 | return error.RocksDBIterator; 155 | } 156 | } 157 | }; 158 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS -------------------------------------------------------------------------------- /src/database.zig: -------------------------------------------------------------------------------- 1 | const std = @import("std"); 2 | const rdb = @import("rocksdb"); 3 | const lib = @import("lib.zig"); 4 | 5 | const Allocator = std.mem.Allocator; 6 | const RwLock = std.Thread.RwLock; 7 | 8 | const Data = lib.Data; 9 | const Iterator = lib.Iterator; 10 | const IteratorDirection = lib.IteratorDirection; 11 | const RawIterator = lib.RawIterator; 12 | const WriteBatch = lib.WriteBatch; 13 | 14 | const copy = lib.data.copy; 15 | const copyLen = lib.data.copyLen; 16 | 17 | pub const DB = struct { 18 | db: *rdb.rocksdb_t, 19 | default_cf: ?ColumnFamilyHandle = null, 20 | cf_name_to_handle: *CfNameToHandleMap, 21 | 22 | const Self = @This(); 23 | 24 | pub fn open( 25 | allocator: Allocator, 26 | dir: []const u8, 27 | db_options: DBOptions, 28 | maybe_column_families: ?[]const ColumnFamilyDescription, 29 | err_str: *?Data, 30 | ) (Allocator.Error || error{RocksDBOpen})!struct { Self, []const ColumnFamily } { 31 | const column_families = if (maybe_column_families) |cfs| 32 | cfs 33 | else 34 | &[1]ColumnFamilyDescription{.{ .name = "default" }}; 35 | 36 | const cf_handles = try allocator.alloc(?ColumnFamilyHandle, column_families.len); 37 | defer allocator.free(cf_handles); 38 | 39 | // open database 40 | const db = db: { 41 | const cf_options = try allocator.alloc(?*const rdb.rocksdb_options_t, column_families.len); 42 | defer allocator.free(cf_options); 43 | const cf_names = try allocator.alloc([*c]const u8, column_families.len); 44 | defer allocator.free(cf_names); 45 | for (column_families, 0..) |cf, i| { 46 | cf_names[i] = @ptrCast(cf.name.ptr); 47 | cf_options[i] = cf.options.convert(); 48 | } 49 | var ch = CallHandler.init(err_str); 50 | break :db try ch.handle(rdb.rocksdb_open_column_families( 51 | db_options.convert(), 52 | dir.ptr, 53 | @intCast(cf_names.len), 54 | @ptrCast(cf_names.ptr), 55 | @ptrCast(cf_options.ptr), 56 | @ptrCast(cf_handles.ptr), 57 | @ptrCast(&ch.err_str_in), 58 | ), error.RocksDBOpen); 59 | }; 60 | 61 | // organize column family metadata 62 | const cf_list = try allocator.alloc(ColumnFamily, column_families.len); 63 | errdefer allocator.free(cf_list); 64 | const cf_map = try CfNameToHandleMap.create(allocator); 65 | errdefer cf_map.destroy(); 66 | for (cf_list, 0..) |*cf, i| { 67 | const name = try allocator.dupe(u8, column_families[i].name); 68 | errdefer allocator.free(name); 69 | cf.* = .{ 70 | .name = name, 71 | .handle = cf_handles[i].?, 72 | }; 73 | try cf_map.map.put(allocator, name, cf_handles[i].?); 74 | } 75 | 76 | return .{ 77 | Self{ .db = db.?, .cf_name_to_handle = cf_map }, 78 | cf_list, 79 | }; 80 | } 81 | 82 | pub fn withDefaultColumnFamily(self: Self, column_family: ColumnFamilyHandle) Self { 83 | return .{ 84 | .db = self.db, 85 | .cf_name_to_handle = self.cf_name_to_handle, 86 | .default_cf = column_family, 87 | }; 88 | } 89 | 90 | /// Closes the database and cleans up this struct's state. 91 | pub fn deinit(self: Self) void { 92 | self.cf_name_to_handle.destroy(); 93 | rdb.rocksdb_close(self.db); 94 | } 95 | 96 | /// Delete the entire database from the filesystem. 97 | /// Destroying a database after it is closed has undefined behavior. 98 | pub fn destroy(self: Self) error{Closed}!void { 99 | rdb.rocksdb_destroy_db(self.db); 100 | } 101 | 102 | pub fn createColumnFamily( 103 | self: *Self, 104 | name: []const u8, 105 | err_str: *?Data, 106 | ) !ColumnFamilyHandle { 107 | const options = rdb.rocksdb_options_create(); 108 | var ch = CallHandler.init(err_str); 109 | const handle = (try ch.handle(rdb.rocksdb_create_column_family( 110 | self.db, 111 | options, 112 | @ptrCast(name), 113 | @ptrCast(&ch.err_str_in), 114 | ), error.RocksDBCreateColumnFamily)).?; 115 | self.cf_name_to_handle.put(name, handle); 116 | return handle; 117 | } 118 | 119 | pub fn columnFamily( 120 | self: *const Self, 121 | cf_name: []const u8, 122 | ) error{UnknownColumnFamily}!ColumnFamilyHandle { 123 | return self.cf_name_to_handle.get(cf_name) orelse error.UnknownColumnFamily; 124 | } 125 | 126 | pub fn put( 127 | self: *const Self, 128 | column_family: ?ColumnFamilyHandle, 129 | key: []const u8, 130 | value: []const u8, 131 | err_str: *?Data, 132 | ) error{RocksDBPut}!void { 133 | const options = rdb.rocksdb_writeoptions_create(); 134 | defer rdb.rocksdb_writeoptions_destroy(options); 135 | var ch = CallHandler.init(err_str); 136 | try ch.handle(rdb.rocksdb_put_cf( 137 | self.db, 138 | options, 139 | column_family orelse self.default_cf, 140 | key.ptr, 141 | key.len, 142 | value.ptr, 143 | value.len, 144 | @ptrCast(&ch.err_str_in), 145 | ), error.RocksDBPut); 146 | } 147 | 148 | pub fn get( 149 | self: *const Self, 150 | column_family: ?ColumnFamilyHandle, 151 | key: []const u8, 152 | err_str: *?Data, 153 | ) error{RocksDBGet}!?Data { 154 | var valueLength: usize = 0; 155 | const options = rdb.rocksdb_readoptions_create(); 156 | defer rdb.rocksdb_readoptions_destroy(options); 157 | var ch = CallHandler.init(err_str); 158 | const value = try ch.handle(rdb.rocksdb_get_cf( 159 | self.db, 160 | options, 161 | column_family orelse self.default_cf, 162 | key.ptr, 163 | key.len, 164 | &valueLength, 165 | @ptrCast(&ch.err_str_in), 166 | ), error.RocksDBGet); 167 | if (value == 0) { 168 | return null; 169 | } 170 | return .{ 171 | .free = rdb.rocksdb_free, 172 | .data = value[0..valueLength], 173 | }; 174 | } 175 | 176 | pub fn delete( 177 | self: *const Self, 178 | column_family: ?ColumnFamilyHandle, 179 | key: []const u8, 180 | err_str: *?Data, 181 | ) error{RocksDBDelete}!void { 182 | const options = rdb.rocksdb_writeoptions_create(); 183 | defer rdb.rocksdb_writeoptions_destroy(options); 184 | var ch = CallHandler.init(err_str); 185 | try ch.handle(rdb.rocksdb_delete_cf( 186 | self.db, 187 | options, 188 | column_family orelse self.default_cf, 189 | key.ptr, 190 | key.len, 191 | @ptrCast(&ch.err_str_in), 192 | ), error.RocksDBDelete); 193 | } 194 | 195 | pub fn deleteFilesInRange( 196 | self: *const Self, 197 | column_family: ?ColumnFamilyHandle, 198 | start_key: []const u8, 199 | limit_key: []const u8, 200 | err_str: *?Data, 201 | ) error{RocksDBDeleteFilesInRange}!void { 202 | var ch = CallHandler.init(err_str); 203 | try ch.handle(rdb.rocksdb_delete_file_in_range_cf( 204 | self.db, 205 | column_family orelse self.default_cf, 206 | @ptrCast(start_key.ptr), 207 | start_key.len, 208 | @ptrCast(limit_key.ptr), 209 | limit_key.len, 210 | @ptrCast(&ch.err_str_in), 211 | ), error.RocksDBDeleteFilesInRange); 212 | } 213 | 214 | pub fn iterator( 215 | self: *const Self, 216 | column_family: ?ColumnFamilyHandle, 217 | direction: IteratorDirection, 218 | start: ?[]const u8, 219 | ) Iterator { 220 | const it = self.rawIterator(column_family); 221 | if (start) |seek_target| switch (direction) { 222 | .forward => it.seek(seek_target), 223 | .reverse => it.seekForPrev(seek_target), 224 | } else switch (direction) { 225 | .forward => it.seekToFirst(), 226 | .reverse => it.seekToLast(), 227 | } 228 | return .{ 229 | .raw = it, 230 | .direction = direction, 231 | .done = false, 232 | }; 233 | } 234 | 235 | pub fn rawIterator( 236 | self: *const Self, 237 | column_family: ?ColumnFamilyHandle, 238 | ) RawIterator { 239 | const options = rdb.rocksdb_readoptions_create(); 240 | defer rdb.rocksdb_readoptions_destroy(options); // TODO does this need to outlive the iterator? 241 | const inner_iter = rdb.rocksdb_create_iterator_cf( 242 | self.db, 243 | options, 244 | column_family orelse self.default_cf, 245 | ).?; 246 | const ri = RawIterator{ .inner = inner_iter }; 247 | return ri; 248 | } 249 | 250 | pub fn liveFiles(self: *const Self, allocator: Allocator) Allocator.Error!std.ArrayList(LiveFile) { 251 | const files = rdb.rocksdb_livefiles(self.db).?; 252 | const num_files: usize = @intCast(rdb.rocksdb_livefiles_count(files)); 253 | var livefiles = std.ArrayList(LiveFile).init(allocator); 254 | var key_size: usize = 0; 255 | for (0..num_files) |i| { 256 | const file_num: c_int = @intCast(i); 257 | try livefiles.append(.{ 258 | .allocator = allocator, 259 | .column_family_name = try copy(allocator, rdb.rocksdb_livefiles_column_family_name(files, file_num)), 260 | .name = try copy(allocator, rdb.rocksdb_livefiles_name(files, file_num)), 261 | .size = rdb.rocksdb_livefiles_size(files, file_num), 262 | .level = rdb.rocksdb_livefiles_level(files, file_num), 263 | .start_key = try copyLen(allocator, rdb.rocksdb_livefiles_smallestkey(files, file_num, &key_size), key_size), 264 | .end_key = try copyLen(allocator, rdb.rocksdb_livefiles_largestkey(files, file_num, &key_size), key_size), 265 | .num_entries = rdb.rocksdb_livefiles_entries(files, file_num), 266 | .num_deletions = rdb.rocksdb_livefiles_deletions(files, file_num), 267 | }); 268 | } 269 | rdb.rocksdb_livefiles_destroy(files); 270 | return livefiles; 271 | } 272 | 273 | pub fn propertyValueCf( 274 | self: *const Self, 275 | column_family: ?ColumnFamilyHandle, 276 | propname: []const u8, 277 | ) Data { 278 | const value = rdb.rocksdb_property_value_cf( 279 | self.db, 280 | column_family orelse self.default_cf, 281 | @ptrCast(propname.ptr), 282 | ); 283 | return .{ 284 | .data = std.mem.span(value), 285 | .free = rdb.rocksdb_free, 286 | }; 287 | } 288 | 289 | pub fn write( 290 | self: *const Self, 291 | batch: WriteBatch, 292 | err_str: *?Data, 293 | ) error{RocksDBWrite}!void { 294 | const options = rdb.rocksdb_writeoptions_create(); 295 | defer rdb.rocksdb_writeoptions_destroy(options); 296 | var ch = CallHandler.init(err_str); 297 | try ch.handle(rdb.rocksdb_write( 298 | self.db, 299 | options, 300 | batch.inner, 301 | @ptrCast(&ch.err_str_in), 302 | ), error.RocksDBWrite); 303 | } 304 | 305 | pub fn flush( 306 | self: *const Self, 307 | column_family: ?ColumnFamilyHandle, 308 | err_str: *?Data, 309 | ) error{RocksDBFlush}!void { 310 | const options = rdb.rocksdb_flushoptions_create(); 311 | defer rdb.rocksdb_flushoptions_destroy(options); 312 | var ch = CallHandler.init(err_str); 313 | const e = error.RocksDBFlush; 314 | if (column_family) |cf| 315 | try ch.handle(rdb.rocksdb_flush_cf(self.db, options, cf, @ptrCast(&ch.err_str_in)), e) 316 | else 317 | try ch.handle(rdb.rocksdb_flush(self.db, options, @ptrCast(&ch.err_str_in)), e); 318 | } 319 | }; 320 | 321 | pub const DBOptions = struct { 322 | /// If true, the database will be created if it is missing. 323 | /// Default: false 324 | create_if_missing: bool = false, 325 | 326 | /// If true, missing column families will be automatically created on 327 | /// DB::Open(). 328 | /// Default: false 329 | create_missing_column_families: bool = false, 330 | 331 | /// Number of open files that can be used by the DB. You may need to 332 | /// increase this if your database has a large working set. Value -1 means 333 | /// files opened are always kept open. You can estimate number of files based 334 | /// on target_file_size_base and target_file_size_multiplier for level-based 335 | /// compaction. For universal-style compaction, you can usually set it to -1. 336 | /// 337 | /// A high value or -1 for this option can cause high memory usage. 338 | /// See BlockBasedTableOptions::cache_usage_options to constrain 339 | /// memory usage in case of block based table format. 340 | /// 341 | /// Default: -1 342 | /// 343 | /// Dynamically changeable through SetDBOptions() API. 344 | max_open_files: i32 = -1, 345 | 346 | fn convert(do: DBOptions) *rdb.struct_rocksdb_options_t { 347 | const ro = rdb.rocksdb_options_create().?; 348 | rdb.rocksdb_options_set_create_if_missing(ro, @intFromBool(do.create_if_missing)); 349 | rdb.rocksdb_options_set_create_missing_column_families(ro, @intFromBool(do.create_if_missing)); 350 | rdb.rocksdb_options_set_max_open_files(ro, do.max_open_files); 351 | 352 | return ro; 353 | } 354 | }; 355 | 356 | test "DB clean init and deinit" { 357 | const ns = struct { 358 | pub fn run(allocator: Allocator) !void { 359 | var dir = std.testing.tmpDir(.{}); 360 | defer dir.cleanup(); 361 | const path = try dir.dir.realpathAlloc(allocator, "."); 362 | defer allocator.free(path); 363 | 364 | var data: ?Data = null; 365 | const db, const cfs = try DB.open( 366 | allocator, 367 | path, 368 | .{ 369 | .create_if_missing = true, 370 | .create_missing_column_families = true, 371 | }, 372 | null, 373 | &data, 374 | ); 375 | 376 | db.deinit(); 377 | allocator.free(cfs); 378 | } 379 | }; 380 | 381 | try ns.run(std.testing.allocator); 382 | try std.testing.checkAllAllocationFailures(std.testing.allocator, ns.run, .{}); 383 | } 384 | 385 | test "DBOptions defaults" { 386 | try testDBOptions(DBOptions{}, rdb.rocksdb_options_create().?); 387 | } 388 | 389 | test "DBOptions custom" { 390 | const subject = DBOptions{ 391 | .create_if_missing = true, 392 | .create_missing_column_families = true, 393 | .max_open_files = 1234, 394 | }; 395 | 396 | const expected = rdb.rocksdb_options_create().?; 397 | rdb.rocksdb_options_set_create_if_missing(expected, 1); 398 | rdb.rocksdb_options_set_create_missing_column_families(expected, 1); 399 | rdb.rocksdb_options_set_max_open_files(expected, 1234); 400 | 401 | try testDBOptions(subject, expected); 402 | } 403 | 404 | fn testDBOptions(test_subject: DBOptions, expected: *rdb.struct_rocksdb_options_t) !void { 405 | const actual = test_subject.convert(); 406 | 407 | inline for (@typeInfo(DBOptions).@"struct".fields) |field| { 408 | const getter = "rocksdb_options_get_" ++ field.name; 409 | const expected_value = @call(.auto, @field(rdb, getter), .{expected}); 410 | const actual_value = @call(.auto, @field(rdb, getter), .{actual}); 411 | try std.testing.expectEqual(expected_value, actual_value); 412 | } 413 | } 414 | 415 | pub const ColumnFamilyDescription = struct { 416 | name: []const u8, 417 | options: ColumnFamilyOptions = .{}, 418 | }; 419 | 420 | pub const ColumnFamily = struct { 421 | name: []const u8, 422 | handle: ColumnFamilyHandle, 423 | }; 424 | 425 | pub const ColumnFamilyHandle = *rdb.rocksdb_column_family_handle_t; 426 | 427 | pub const ColumnFamilyOptions = struct { 428 | fn convert(_: ColumnFamilyOptions) *rdb.struct_rocksdb_options_t { 429 | return rdb.rocksdb_options_create().?; 430 | } 431 | }; 432 | 433 | /// The metadata that describes a SST file 434 | pub const LiveFile = struct { 435 | allocator: Allocator, 436 | /// Name of the column family the file belongs to 437 | column_family_name: []const u8, 438 | /// Name of the file 439 | name: []const u8, 440 | /// Size of the file 441 | size: usize, 442 | /// Level at which this file resides 443 | level: i32, 444 | /// Smallest user defined key in the file 445 | start_key: ?[]const u8, 446 | /// Largest user defined key in the file 447 | end_key: ?[]const u8, 448 | /// Number of entries/alive keys in the file 449 | num_entries: u64, 450 | /// Number of deletions/tomb key(s) in the file 451 | num_deletions: u64, 452 | 453 | pub fn deinit(self: LiveFile) void { 454 | self.allocator.free(self.column_family_name); 455 | self.allocator.free(self.name); 456 | if (self.start_key) |start_key| self.allocator.free(start_key); 457 | if (self.end_key) |end_key| self.allocator.free(end_key); 458 | } 459 | }; 460 | 461 | const CallHandler = struct { 462 | /// The error string to pass into rocksdb. 463 | err_str_in: ?[*:0]u8 = null, 464 | /// The user's error string. 465 | err_str_out: *?Data, 466 | 467 | fn init(err_str_out: *?Data) CallHandler { 468 | return .{ .err_str_out = err_str_out }; 469 | } 470 | 471 | fn errIn(self: *CallHandler) [*c][*c]u8 { 472 | return @ptrCast(&self.err_str_in); 473 | } 474 | 475 | fn handle( 476 | self: *CallHandler, 477 | ret: anytype, 478 | comptime err: anytype, 479 | ) @TypeOf(err)!@TypeOf(ret) { 480 | if (self.err_str_in) |s| { 481 | self.err_str_out.* = .{ 482 | .data = std.mem.span(s), 483 | .free = rdb.rocksdb_free, 484 | }; 485 | return err; 486 | } else { 487 | return ret; 488 | } 489 | } 490 | }; 491 | 492 | const CfNameToHandleMap = struct { 493 | allocator: Allocator, 494 | map: std.StringHashMapUnmanaged(ColumnFamilyHandle), 495 | lock: RwLock, 496 | 497 | const Self = @This(); 498 | 499 | fn create(allocator: Allocator) Allocator.Error!*Self { 500 | const self = try allocator.create(Self); 501 | self.* = .{ 502 | .allocator = allocator, 503 | .map = .{}, 504 | .lock = .{}, 505 | }; 506 | return self; 507 | } 508 | 509 | fn destroy(self: *Self) void { 510 | var iter = self.map.iterator(); 511 | while (iter.next()) |entry| { 512 | rdb.rocksdb_column_family_handle_destroy(entry.value_ptr.*); 513 | self.allocator.free(entry.key_ptr.*); 514 | } 515 | self.map.deinit(self.allocator); 516 | self.allocator.destroy(self); 517 | } 518 | 519 | fn put(self: *Self, name: []const u8, handle: ColumnFamilyHandle) Allocator.Error!void { 520 | const owned_name = try self.allocator.dupe(u8, name); 521 | 522 | self.lock.lock(); 523 | defer self.lock.unlock(); 524 | 525 | self.map.put(self.allocator, owned_name, handle); 526 | } 527 | 528 | fn get(self: *Self, name: []const u8) ?ColumnFamilyHandle { 529 | self.lock.lockShared(); 530 | defer self.lock.unlockShared(); 531 | return self.map.get(name); 532 | } 533 | }; 534 | 535 | test DB { 536 | var err_str: ?Data = null; 537 | defer if (err_str) |e| e.deinit(); 538 | runTest(&err_str) catch |e| { 539 | std.debug.print("{}: {?}\n", .{ e, err_str }); 540 | return e; 541 | }; 542 | } 543 | 544 | fn runTest(err_str: *?Data) !void { 545 | { 546 | var db, const families = try DB.open( 547 | std.testing.allocator, 548 | "test-state", 549 | .{ 550 | .create_if_missing = true, 551 | .create_missing_column_families = true, 552 | }, 553 | &.{ 554 | .{ .name = "default" }, 555 | .{ .name = "another" }, 556 | }, 557 | err_str, 558 | ); 559 | defer db.deinit(); 560 | defer std.testing.allocator.free(families); 561 | const a_family = families[1].handle; 562 | 563 | _ = try db.put(a_family, "hello", "world", err_str); 564 | _ = try db.put(a_family, "zebra", "world", err_str); 565 | 566 | db = db.withDefaultColumnFamily(a_family); 567 | 568 | const val = try db.get(null, "hello", err_str); 569 | try std.testing.expect(std.mem.eql(u8, val.?.data, "world")); 570 | 571 | var iter = db.iterator(null, .forward, null); 572 | defer iter.deinit(); 573 | var v = (try iter.nextValue(err_str)).?; 574 | try std.testing.expect(std.mem.eql(u8, "world", v.data)); 575 | v = (try iter.nextValue(err_str)).?; 576 | try std.testing.expect(std.mem.eql(u8, "world", v.data)); 577 | try std.testing.expect(null == try iter.next(err_str)); 578 | 579 | try db.delete(null, "hello", err_str); 580 | 581 | const noval = try db.get(null, "hello", err_str); 582 | try std.testing.expect(null == noval); 583 | } 584 | 585 | var db, const families = try DB.open( 586 | std.testing.allocator, 587 | "test-state", 588 | .{ 589 | .create_if_missing = true, 590 | .create_missing_column_families = true, 591 | }, 592 | &.{ 593 | .{ .name = "default" }, 594 | .{ .name = "another" }, 595 | }, 596 | err_str, 597 | ); 598 | defer db.deinit(); 599 | defer std.testing.allocator.free(families); 600 | const lfs = try db.liveFiles(std.testing.allocator); 601 | defer lfs.deinit(); 602 | defer for (lfs.items) |lf| lf.deinit(); 603 | try std.testing.expect(std.mem.eql(u8, "another", lfs.items[0].column_family_name)); 604 | } 605 | --------------------------------------------------------------------------------