├── .github └── workflows │ └── check.yml ├── .gitignore ├── LICENSE ├── README.md ├── build.zig ├── build.zig.zon └── src ├── batch.zig ├── data.zig ├── database.zig ├── iterator.zig └── lib.zig /.github/workflows/check.yml: -------------------------------------------------------------------------------- 1 | name: check 2 | 3 | on: push 4 | 5 | jobs: 6 | lint: 7 | runs-on: ubuntu-latest 8 | steps: 9 | - name: checkout 10 | uses: actions/checkout@v2 11 | 12 | - name: setup-zig 13 | uses: mlugg/setup-zig@v1 14 | with: 15 | version: 0.13.0 16 | 17 | - name: lint 18 | run: | 19 | zig fmt --check src/ build.zig 20 | 21 | test: 22 | strategy: 23 | matrix: 24 | os: [ubuntu-latest] 25 | runs-on: ${{matrix.os}} 26 | timeout-minutes: 60 27 | steps: 28 | - name: checkout 29 | uses: actions/checkout@v2 30 | with: 31 | submodules: recursive 32 | 33 | - name: setup-zig 34 | uses: mlugg/setup-zig@v1 35 | with: 36 | version: 0.13.0 37 | 38 | - name: test 39 | run: | 40 | zig build test 41 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | /.zig-cache 2 | /zig-out 3 | /test-state 4 | /.vscode 5 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | Build and use RocksDB in zig. 2 | 3 | # Build Dependencies 4 | 5 | `rocksdb-zig` is pinned to [Zig `0.13`](https://ziglang.org/download/), so you will need to have it installed. 6 | 7 | # Usage 8 | 9 | Supported use cases: 10 | - [⬇️](#build-rocksdb) Build a RocksDB static library using the zig build system. 11 | - [⬇️](#import-rocksdb-c-api-in-a-zig-project) Use the RocksDB C API through auto-generated Zig bindings. 12 | - [⬇️](#import-the-zig-bindings-library) Import an idiomatic zig library of bindings that wrap the RocksDB library with hand-written zig code. 13 | 14 | ## Build RocksDB 15 | Clone this repository, then run `zig build`. 16 | 17 | You will find a statically linked `rocksdb` archive 18 | in `zig-out/lib/librocksdb.a`. 19 | 20 | You can use this with any language or build system. 21 | 22 | ## Import RocksDB C API in the Zig Build System. 23 | 24 | Fetch `rocksdb` and save it to your `build.zig.zon`: 25 | ``` 26 | $ zig fetch --save=rocksdb https://github.com/Syndica/rocksdb-zig/archive/.tar.gz 27 | ``` 28 | 29 | Add the import to a module: 30 | ```zig 31 | const rocksdb = b.dependency("rocksdb", .{}).module("rocksdb"); 32 | exe.root_module.addImport("rocksdb", rocksdb); 33 | ``` 34 | 35 | Import the `rocksdb` module. 36 | ```zig 37 | const rocksdb = @import("rocksdb"); 38 | ``` 39 | 40 | ## Import the Zig bindings library using the Zig Build System. 41 | 42 | Fetch `rocksdb` and save it to your `build.zig.zon`: 43 | ``` 44 | $ zig fetch --save=rocksdb https://github.com/Syndica/rocksdb-zig/archive/.tar.gz 45 | ``` 46 | 47 | Add the import to a module: 48 | ```zig 49 | const rocksdb_bindings = b.dependency("rocksdb", .{}).module("rocksdb-bindings"); 50 | exe.root_module.addImport("rocksdb-bindings", rocksdb_bindings); 51 | ``` 52 | 53 | Import the `rocksdb-bindings` module. 54 | ```zig 55 | const rocksdb = @import("rocksdb-bindings"); 56 | ``` 57 | -------------------------------------------------------------------------------- /build.zig: -------------------------------------------------------------------------------- 1 | const std = @import("std"); 2 | const Build = std.Build; 3 | const ResolvedTarget = Build.ResolvedTarget; 4 | const OptimizeMode = std.builtin.OptimizeMode; 5 | 6 | pub fn build(b: *Build) void { 7 | const target = b.standardTargetOptions(.{}); 8 | const optimize = b.standardOptimizeOption(.{}); 9 | 10 | const test_step = b.step("test", "Run bindings tests"); 11 | 12 | // rocksdb itself as a zig module 13 | const rocksdb_mod = addRocksDB(b, target, optimize); 14 | 15 | // zig bindings library to rocksdb 16 | const bindings_mod = b.addModule("rocksdb-bindings", .{ 17 | .target = target, 18 | .optimize = optimize, 19 | .root_source_file = b.path("src/lib.zig"), 20 | }); 21 | bindings_mod.addImport("rocksdb", rocksdb_mod); 22 | 23 | // tests 24 | const tests = b.addTest(.{ 25 | .target = target, 26 | .optimize = optimize, 27 | .root_source_file = b.path("src/lib.zig"), 28 | }); 29 | tests.root_module.addImport("rocksdb", rocksdb_mod); 30 | test_step.dependOn(&b.addRunArtifact(tests).step); 31 | } 32 | 33 | /// Create a zig module for the bare C++ library by exposing its C api. 34 | /// Builds rocksdb, links it, and translates its headers. 35 | fn addRocksDB( 36 | b: *Build, 37 | target: ResolvedTarget, 38 | optimize: OptimizeMode, 39 | ) *Build.Module { 40 | const rocks_dep = b.dependency("rocksdb", .{}); 41 | 42 | const translate_c = b.addTranslateC(.{ 43 | .root_source_file = rocks_dep.path("include/rocksdb/c.h"), 44 | .target = target, 45 | .optimize = optimize, 46 | }); 47 | const mod = b.addModule("rocksdb", .{ 48 | .root_source_file = translate_c.getOutput(), 49 | .target = target, 50 | .optimize = optimize, 51 | .link_libc = true, 52 | .link_libcpp = true, 53 | }); 54 | 55 | const librocksdb_a = b.addStaticLibrary(.{ 56 | .name = "rocksdb", 57 | .target = target, 58 | .optimize = optimize, 59 | }); 60 | const librocksdb_so = b.addSharedLibrary(.{ 61 | .name = "rocksdb", 62 | .target = target, 63 | .optimize = optimize, 64 | }); 65 | 66 | try buildRocksDB(b, librocksdb_a, target); 67 | try buildRocksDB(b, librocksdb_so, target); 68 | 69 | mod.addIncludePath(rocks_dep.path("include")); 70 | mod.linkLibrary(librocksdb_a); 71 | 72 | return mod; 73 | } 74 | 75 | /// The build process for rocksdb itself. works for static or shared library 76 | fn buildRocksDB( 77 | b: *Build, 78 | librocksdb: *std.Build.Step.Compile, 79 | target: std.Build.ResolvedTarget, 80 | ) !void { 81 | const t = target.result; 82 | const rocks_dep = b.dependency("rocksdb", .{}); 83 | 84 | librocksdb.linkLibC(); 85 | librocksdb.linkLibCpp(); 86 | 87 | librocksdb.addIncludePath(rocks_dep.path("include")); 88 | librocksdb.addIncludePath(rocks_dep.path(".")); 89 | librocksdb.addCSourceFiles(.{ 90 | .root = rocks_dep.path("."), 91 | .files = &.{ 92 | "cache/cache.cc", 93 | "cache/cache_entry_roles.cc", 94 | "cache/cache_key.cc", 95 | "cache/cache_helpers.cc", 96 | "cache/cache_reservation_manager.cc", 97 | "cache/charged_cache.cc", 98 | "cache/clock_cache.cc", 99 | "cache/compressed_secondary_cache.cc", 100 | "cache/lru_cache.cc", 101 | "cache/secondary_cache.cc", 102 | "cache/secondary_cache_adapter.cc", 103 | "cache/sharded_cache.cc", 104 | "cache/tiered_secondary_cache.cc", 105 | "db/arena_wrapped_db_iter.cc", 106 | "db/attribute_group_iterator_impl.cc", 107 | "db/blob/blob_contents.cc", 108 | "db/blob/blob_fetcher.cc", 109 | "db/blob/blob_file_addition.cc", 110 | "db/blob/blob_file_builder.cc", 111 | "db/blob/blob_file_cache.cc", 112 | "db/blob/blob_file_garbage.cc", 113 | "db/blob/blob_file_meta.cc", 114 | "db/blob/blob_file_reader.cc", 115 | "db/blob/blob_garbage_meter.cc", 116 | "db/blob/blob_log_format.cc", 117 | "db/blob/blob_log_sequential_reader.cc", 118 | "db/blob/blob_log_writer.cc", 119 | "db/blob/blob_source.cc", 120 | "db/blob/prefetch_buffer_collection.cc", 121 | "db/builder.cc", 122 | "db/c.cc", 123 | "db/coalescing_iterator.cc", 124 | "db/column_family.cc", 125 | "db/compaction/compaction.cc", 126 | "db/compaction/compaction_iterator.cc", 127 | "db/compaction/compaction_picker.cc", 128 | "db/compaction/compaction_job.cc", 129 | "db/compaction/compaction_picker_fifo.cc", 130 | "db/compaction/compaction_picker_level.cc", 131 | "db/compaction/compaction_picker_universal.cc", 132 | "db/compaction/compaction_service_job.cc", 133 | "db/compaction/compaction_state.cc", 134 | "db/compaction/compaction_outputs.cc", 135 | "db/compaction/sst_partitioner.cc", 136 | "db/compaction/subcompaction_state.cc", 137 | "db/convenience.cc", 138 | "db/db_filesnapshot.cc", 139 | "db/db_impl/compacted_db_impl.cc", 140 | "db/db_impl/db_impl.cc", 141 | "db/db_impl/db_impl_write.cc", 142 | "db/db_impl/db_impl_compaction_flush.cc", 143 | "db/db_impl/db_impl_files.cc", 144 | "db/db_impl/db_impl_follower.cc", 145 | "db/db_impl/db_impl_open.cc", 146 | "db/db_impl/db_impl_debug.cc", 147 | "db/db_impl/db_impl_experimental.cc", 148 | "db/db_impl/db_impl_readonly.cc", 149 | "db/db_impl/db_impl_secondary.cc", 150 | "db/db_info_dumper.cc", 151 | "db/db_iter.cc", 152 | "db/dbformat.cc", 153 | "db/error_handler.cc", 154 | "db/event_helpers.cc", 155 | "db/experimental.cc", 156 | "db/external_sst_file_ingestion_job.cc", 157 | "db/file_indexer.cc", 158 | "db/flush_job.cc", 159 | "db/flush_scheduler.cc", 160 | "db/forward_iterator.cc", 161 | "db/import_column_family_job.cc", 162 | "db/internal_stats.cc", 163 | "db/logs_with_prep_tracker.cc", 164 | "db/log_reader.cc", 165 | "db/log_writer.cc", 166 | "db/malloc_stats.cc", 167 | "db/memtable.cc", 168 | "db/memtable_list.cc", 169 | "db/merge_helper.cc", 170 | "db/merge_operator.cc", 171 | "db/output_validator.cc", 172 | "db/periodic_task_scheduler.cc", 173 | "db/range_del_aggregator.cc", 174 | "db/range_tombstone_fragmenter.cc", 175 | "db/repair.cc", 176 | "db/seqno_to_time_mapping.cc", 177 | "db/snapshot_impl.cc", 178 | "db/table_cache.cc", 179 | "db/table_properties_collector.cc", 180 | "db/transaction_log_impl.cc", 181 | "db/trim_history_scheduler.cc", 182 | "db/version_builder.cc", 183 | "db/version_edit.cc", 184 | "db/version_edit_handler.cc", 185 | "db/version_set.cc", 186 | "db/wal_edit.cc", 187 | "db/wal_manager.cc", 188 | "db/wide/wide_column_serialization.cc", 189 | "db/wide/wide_columns.cc", 190 | "db/wide/wide_columns_helper.cc", 191 | "db/write_batch.cc", 192 | "db/write_batch_base.cc", 193 | "db/write_controller.cc", 194 | "db/write_stall_stats.cc", 195 | "db/write_thread.cc", 196 | "env/composite_env.cc", 197 | "env/env.cc", 198 | "env/env_chroot.cc", 199 | "env/env_encryption.cc", 200 | "env/file_system.cc", 201 | "env/file_system_tracer.cc", 202 | "env/fs_on_demand.cc", 203 | "env/fs_remap.cc", 204 | "env/mock_env.cc", 205 | "env/unique_id_gen.cc", 206 | "file/delete_scheduler.cc", 207 | "file/file_prefetch_buffer.cc", 208 | "file/file_util.cc", 209 | "file/filename.cc", 210 | "file/line_file_reader.cc", 211 | "file/random_access_file_reader.cc", 212 | "file/read_write_util.cc", 213 | "file/readahead_raf.cc", 214 | "file/sequence_file_reader.cc", 215 | "file/sst_file_manager_impl.cc", 216 | "file/writable_file_writer.cc", 217 | "logging/auto_roll_logger.cc", 218 | "logging/event_logger.cc", 219 | "logging/log_buffer.cc", 220 | "memory/arena.cc", 221 | "memory/concurrent_arena.cc", 222 | "memory/jemalloc_nodump_allocator.cc", 223 | "memory/memkind_kmem_allocator.cc", 224 | "memory/memory_allocator.cc", 225 | "memtable/alloc_tracker.cc", 226 | "memtable/hash_linklist_rep.cc", 227 | "memtable/hash_skiplist_rep.cc", 228 | "memtable/skiplistrep.cc", 229 | "memtable/vectorrep.cc", 230 | "memtable/write_buffer_manager.cc", 231 | "monitoring/histogram.cc", 232 | "monitoring/histogram_windowing.cc", 233 | "monitoring/in_memory_stats_history.cc", 234 | "monitoring/instrumented_mutex.cc", 235 | "monitoring/iostats_context.cc", 236 | "monitoring/perf_context.cc", 237 | "monitoring/perf_level.cc", 238 | "monitoring/persistent_stats_history.cc", 239 | "monitoring/statistics.cc", 240 | "monitoring/thread_status_impl.cc", 241 | "monitoring/thread_status_updater.cc", 242 | "monitoring/thread_status_util.cc", 243 | "monitoring/thread_status_util_debug.cc", 244 | "options/cf_options.cc", 245 | "options/configurable.cc", 246 | "options/customizable.cc", 247 | "options/db_options.cc", 248 | "options/offpeak_time_info.cc", 249 | "options/options.cc", 250 | "options/options_helper.cc", 251 | "options/options_parser.cc", 252 | "port/mmap.cc", 253 | "port/stack_trace.cc", 254 | "table/adaptive/adaptive_table_factory.cc", 255 | "table/block_based/binary_search_index_reader.cc", 256 | "table/block_based/block.cc", 257 | "table/block_based/block_based_table_builder.cc", 258 | "table/block_based/block_based_table_factory.cc", 259 | "table/block_based/block_based_table_iterator.cc", 260 | "table/block_based/block_based_table_reader.cc", 261 | "table/block_based/block_builder.cc", 262 | "table/block_based/block_cache.cc", 263 | "table/block_based/block_prefetcher.cc", 264 | "table/block_based/block_prefix_index.cc", 265 | "table/block_based/data_block_hash_index.cc", 266 | "table/block_based/data_block_footer.cc", 267 | "table/block_based/filter_block_reader_common.cc", 268 | "table/block_based/filter_policy.cc", 269 | "table/block_based/flush_block_policy.cc", 270 | "table/block_based/full_filter_block.cc", 271 | "table/block_based/hash_index_reader.cc", 272 | "table/block_based/index_builder.cc", 273 | "table/block_based/index_reader_common.cc", 274 | "table/block_based/parsed_full_filter_block.cc", 275 | "table/block_based/partitioned_filter_block.cc", 276 | "table/block_based/partitioned_index_iterator.cc", 277 | "table/block_based/partitioned_index_reader.cc", 278 | "table/block_based/reader_common.cc", 279 | "table/block_based/uncompression_dict_reader.cc", 280 | "table/block_fetcher.cc", 281 | "table/cuckoo/cuckoo_table_builder.cc", 282 | "table/cuckoo/cuckoo_table_factory.cc", 283 | "table/cuckoo/cuckoo_table_reader.cc", 284 | "table/format.cc", 285 | "table/get_context.cc", 286 | "table/iterator.cc", 287 | "table/merging_iterator.cc", 288 | "table/compaction_merging_iterator.cc", 289 | "table/meta_blocks.cc", 290 | "table/persistent_cache_helper.cc", 291 | "table/plain/plain_table_bloom.cc", 292 | "table/plain/plain_table_builder.cc", 293 | "table/plain/plain_table_factory.cc", 294 | "table/plain/plain_table_index.cc", 295 | "table/plain/plain_table_key_coding.cc", 296 | "table/plain/plain_table_reader.cc", 297 | "table/sst_file_dumper.cc", 298 | "table/sst_file_reader.cc", 299 | "table/sst_file_writer.cc", 300 | "table/table_factory.cc", 301 | "table/table_properties.cc", 302 | "table/two_level_iterator.cc", 303 | "table/unique_id.cc", 304 | "test_util/sync_point.cc", 305 | "test_util/sync_point_impl.cc", 306 | "test_util/testutil.cc", 307 | "test_util/transaction_test_util.cc", 308 | "tools/block_cache_analyzer/block_cache_trace_analyzer.cc", 309 | "tools/dump/db_dump_tool.cc", 310 | "tools/io_tracer_parser_tool.cc", 311 | "tools/ldb_cmd.cc", 312 | "tools/ldb_tool.cc", 313 | "tools/sst_dump_tool.cc", 314 | "tools/trace_analyzer_tool.cc", 315 | "trace_replay/block_cache_tracer.cc", 316 | "trace_replay/io_tracer.cc", 317 | "trace_replay/trace_record_handler.cc", 318 | "trace_replay/trace_record_result.cc", 319 | "trace_replay/trace_record.cc", 320 | "trace_replay/trace_replay.cc", 321 | "util/async_file_reader.cc", 322 | "util/cleanable.cc", 323 | "util/coding.cc", 324 | "util/compaction_job_stats_impl.cc", 325 | "util/comparator.cc", 326 | "util/compression.cc", 327 | "util/compression_context_cache.cc", 328 | "util/concurrent_task_limiter_impl.cc", 329 | "util/crc32c.cc", 330 | "util/data_structure.cc", 331 | "util/dynamic_bloom.cc", 332 | "util/hash.cc", 333 | "util/murmurhash.cc", 334 | "util/random.cc", 335 | "util/rate_limiter.cc", 336 | "util/ribbon_config.cc", 337 | "util/slice.cc", 338 | "util/file_checksum_helper.cc", 339 | "util/status.cc", 340 | "util/stderr_logger.cc", 341 | "util/string_util.cc", 342 | "util/thread_local.cc", 343 | "util/threadpool_imp.cc", 344 | "util/udt_util.cc", 345 | "util/write_batch_util.cc", 346 | "util/xxhash.cc", 347 | "utilities/agg_merge/agg_merge.cc", 348 | "utilities/backup/backup_engine.cc", 349 | "utilities/blob_db/blob_compaction_filter.cc", 350 | "utilities/blob_db/blob_db.cc", 351 | "utilities/blob_db/blob_db_impl.cc", 352 | "utilities/blob_db/blob_db_impl_filesnapshot.cc", 353 | "utilities/blob_db/blob_dump_tool.cc", 354 | "utilities/blob_db/blob_file.cc", 355 | "utilities/cache_dump_load.cc", 356 | "utilities/cache_dump_load_impl.cc", 357 | "utilities/cassandra/cassandra_compaction_filter.cc", 358 | "utilities/cassandra/format.cc", 359 | "utilities/cassandra/merge_operator.cc", 360 | "utilities/checkpoint/checkpoint_impl.cc", 361 | "utilities/compaction_filters.cc", 362 | "utilities/compaction_filters/remove_emptyvalue_compactionfilter.cc", 363 | "utilities/counted_fs.cc", 364 | "utilities/debug.cc", 365 | "utilities/env_mirror.cc", 366 | "utilities/env_timed.cc", 367 | "utilities/fault_injection_env.cc", 368 | "utilities/fault_injection_fs.cc", 369 | "utilities/fault_injection_secondary_cache.cc", 370 | "utilities/leveldb_options/leveldb_options.cc", 371 | "utilities/memory/memory_util.cc", 372 | "utilities/merge_operators.cc", 373 | "utilities/merge_operators/bytesxor.cc", 374 | "utilities/merge_operators/max.cc", 375 | "utilities/merge_operators/put.cc", 376 | "utilities/merge_operators/sortlist.cc", 377 | "utilities/merge_operators/string_append/stringappend.cc", 378 | "utilities/merge_operators/string_append/stringappend2.cc", 379 | "utilities/merge_operators/uint64add.cc", 380 | "utilities/object_registry.cc", 381 | "utilities/option_change_migration/option_change_migration.cc", 382 | "utilities/options/options_util.cc", 383 | "utilities/persistent_cache/block_cache_tier.cc", 384 | "utilities/persistent_cache/block_cache_tier_file.cc", 385 | "utilities/persistent_cache/block_cache_tier_metadata.cc", 386 | "utilities/persistent_cache/persistent_cache_tier.cc", 387 | "utilities/persistent_cache/volatile_tier_impl.cc", 388 | "utilities/simulator_cache/cache_simulator.cc", 389 | "utilities/simulator_cache/sim_cache.cc", 390 | "utilities/table_properties_collectors/compact_for_tiering_collector.cc", 391 | "utilities/table_properties_collectors/compact_on_deletion_collector.cc", 392 | "utilities/trace/file_trace_reader_writer.cc", 393 | "utilities/trace/replayer_impl.cc", 394 | "utilities/transactions/lock/lock_manager.cc", 395 | "utilities/transactions/lock/point/point_lock_tracker.cc", 396 | "utilities/transactions/lock/point/point_lock_manager.cc", 397 | "utilities/transactions/lock/range/range_tree/range_tree_lock_manager.cc", 398 | "utilities/transactions/lock/range/range_tree/range_tree_lock_tracker.cc", 399 | "utilities/transactions/optimistic_transaction_db_impl.cc", 400 | "utilities/transactions/optimistic_transaction.cc", 401 | "utilities/transactions/pessimistic_transaction.cc", 402 | "utilities/transactions/pessimistic_transaction_db.cc", 403 | "utilities/transactions/snapshot_checker.cc", 404 | "utilities/transactions/transaction_base.cc", 405 | "utilities/transactions/transaction_db_mutex_impl.cc", 406 | "utilities/transactions/transaction_util.cc", 407 | "utilities/transactions/write_prepared_txn.cc", 408 | "utilities/transactions/write_prepared_txn_db.cc", 409 | "utilities/transactions/write_unprepared_txn.cc", 410 | "utilities/transactions/write_unprepared_txn_db.cc", 411 | "utilities/types_util.cc", 412 | "utilities/ttl/db_ttl_impl.cc", 413 | "utilities/wal_filter.cc", 414 | "utilities/write_batch_with_index/write_batch_with_index.cc", 415 | "utilities/write_batch_with_index/write_batch_with_index_internal.cc", 416 | "utilities/transactions/lock/range/range_tree/lib/locktree/concurrent_tree.cc", 417 | "utilities/transactions/lock/range/range_tree/lib/locktree/keyrange.cc", 418 | "utilities/transactions/lock/range/range_tree/lib/locktree/lock_request.cc", 419 | "utilities/transactions/lock/range/range_tree/lib/locktree/locktree.cc", 420 | "utilities/transactions/lock/range/range_tree/lib/locktree/manager.cc", 421 | "utilities/transactions/lock/range/range_tree/lib/locktree/range_buffer.cc", 422 | "utilities/transactions/lock/range/range_tree/lib/locktree/treenode.cc", 423 | "utilities/transactions/lock/range/range_tree/lib/locktree/txnid_set.cc", 424 | "utilities/transactions/lock/range/range_tree/lib/locktree/wfg.cc", 425 | "utilities/transactions/lock/range/range_tree/lib/standalone_port.cc", 426 | "utilities/transactions/lock/range/range_tree/lib/util/dbt.cc", 427 | "utilities/transactions/lock/range/range_tree/lib/util/memarena.cc", 428 | }, 429 | .flags = &.{ 430 | "-std=c++17", 431 | "-faligned-new", 432 | "-DHAVE_ALIGNED_NEW", 433 | "-DROCKSDB_UBSAN_RUN", 434 | }, 435 | }); 436 | 437 | // platform dependent stuff 438 | if (t.cpu.arch == .aarch64) { 439 | librocksdb.addCSourceFile(.{ 440 | .file = rocks_dep.path("util/crc32c_arm64.cc"), 441 | .flags = &.{ 442 | "-std=c++17", 443 | "-faligned-new", 444 | "-DHAVE_ALIGNED_NEW", 445 | "-DROCKSDB_UBSAN_RUN", 446 | }, 447 | }); 448 | } 449 | 450 | if (t.os.tag != .windows) { 451 | librocksdb.root_module.addCMacro("ROCKSDB_PLATFORM_POSIX", ""); 452 | librocksdb.root_module.addCMacro("ROCKSDB_LIB_IO_POSIX", ""); 453 | librocksdb.addCSourceFiles(.{ 454 | .root = rocks_dep.path("."), 455 | .files = &.{ 456 | "port/port_posix.cc", 457 | "env/env_posix.cc", 458 | "env/fs_posix.cc", 459 | "env/io_posix.cc", 460 | }, 461 | .flags = &.{ 462 | "-std=c++17", 463 | "-faligned-new", 464 | "-DHAVE_ALIGNED_NEW", 465 | }, 466 | }); 467 | } else { 468 | @panic("TODO: support windows!"); 469 | } 470 | 471 | const os_name = switch (t.os.tag) { 472 | .macos => "OS_MACOSX", 473 | .linux => "OS_LINUX", 474 | else => std.debug.panic("TODO: support target OS '{s}'", .{@tagName(t.os.tag)}), 475 | }; 476 | librocksdb.root_module.addCMacro(os_name, ""); 477 | 478 | const build_version = b.addConfigHeader(.{ 479 | .style = .{ .cmake = rocks_dep.path("util/build_version.cc.in") }, 480 | .include_path = "util/build_version.cc", 481 | }, .{ 482 | .GIT_MOD = 1, 483 | }); 484 | librocksdb.addCSourceFile(.{ .file = build_version.getOutput() }); 485 | 486 | b.installArtifact(librocksdb); 487 | } 488 | -------------------------------------------------------------------------------- /build.zig.zon: -------------------------------------------------------------------------------- 1 | .{ 2 | .name = "rocksdb", 3 | .version = "9.7.4", 4 | .dependencies = .{ 5 | .rocksdb = .{ 6 | .url = "https://github.com/Syndica/rocksdb/archive/3793e721c7795b338d9d7808544b75db6bde3548.tar.gz", 7 | .hash = "1220f93ed0f3a9f3cd0a39352c908af2cbcef0ab42f9fff76e3eb39aaaff5570e146", 8 | }, 9 | }, 10 | .paths = .{ 11 | "build.zig", 12 | "build.zig.zon", 13 | "src", 14 | }, 15 | } 16 | -------------------------------------------------------------------------------- /src/batch.zig: -------------------------------------------------------------------------------- 1 | const std = @import("std"); 2 | const rdb = @import("rocksdb"); 3 | const lib = @import("lib.zig"); 4 | 5 | const Allocator = std.mem.Allocator; 6 | 7 | const ColumnFamilyHandle = lib.ColumnFamilyHandle; 8 | 9 | pub const WriteBatch = struct { 10 | inner: *rdb.rocksdb_writebatch_t, 11 | 12 | const Self = @This(); 13 | 14 | pub fn init() WriteBatch { 15 | return .{ .inner = rdb.rocksdb_writebatch_create().? }; 16 | } 17 | 18 | pub fn deinit(self: WriteBatch) void { 19 | rdb.rocksdb_writebatch_destroy(self.inner); 20 | } 21 | 22 | pub fn put( 23 | self: *const Self, 24 | column_family: ColumnFamilyHandle, 25 | key: []const u8, 26 | value: []const u8, 27 | ) void { 28 | rdb.rocksdb_writebatch_put_cf( 29 | self.inner, 30 | column_family, 31 | key.ptr, 32 | key.len, 33 | value.ptr, 34 | value.len, 35 | ); 36 | } 37 | 38 | pub fn delete( 39 | self: *const Self, 40 | column_family: ColumnFamilyHandle, 41 | key: []const u8, 42 | ) void { 43 | rdb.rocksdb_writebatch_delete_cf( 44 | self.inner, 45 | column_family, 46 | key.ptr, 47 | key.len, 48 | ); 49 | } 50 | 51 | pub fn deleteRange( 52 | self: *const Self, 53 | column_family: ColumnFamilyHandle, 54 | start_key: []const u8, 55 | end_key: []const u8, 56 | ) void { 57 | rdb.rocksdb_writebatch_delete_range_cf( 58 | self.inner, 59 | column_family, 60 | start_key.ptr, 61 | start_key.len, 62 | end_key.ptr, 63 | end_key.len, 64 | ); 65 | } 66 | }; 67 | -------------------------------------------------------------------------------- /src/data.zig: -------------------------------------------------------------------------------- 1 | const std = @import("std"); 2 | const rdb = @import("rocksdb"); 3 | 4 | const Allocator = std.mem.Allocator; 5 | 6 | /// data that was allocated by rocksdb and must be freed by rocksdb 7 | pub const Data = struct { 8 | data: []const u8, 9 | free: *const fn (?*anyopaque) callconv(.C) void, 10 | 11 | pub fn deinit(self: @This()) void { 12 | self.free(@ptrCast(@constCast(self.data.ptr))); 13 | } 14 | 15 | pub fn format( 16 | self: @This(), 17 | comptime _: []const u8, 18 | options: std.fmt.FormatOptions, 19 | writer: anytype, 20 | ) !void { 21 | try std.fmt.formatBuf(self.data, options, writer); 22 | } 23 | }; 24 | 25 | pub fn copy(allocator: Allocator, in: [*c]const u8) Allocator.Error![]u8 { 26 | return copyLen(allocator, in, std.mem.len(in)); 27 | } 28 | 29 | pub fn copyLen(allocator: Allocator, in: [*c]const u8, len: usize) Allocator.Error![]u8 { 30 | const ret = try allocator.dupe(u8, in[0..len]); 31 | return ret; 32 | } 33 | -------------------------------------------------------------------------------- /src/database.zig: -------------------------------------------------------------------------------- 1 | const std = @import("std"); 2 | const rdb = @import("rocksdb"); 3 | const lib = @import("lib.zig"); 4 | 5 | const Allocator = std.mem.Allocator; 6 | const RwLock = std.Thread.RwLock; 7 | 8 | const Data = lib.Data; 9 | const Iterator = lib.Iterator; 10 | const IteratorDirection = lib.IteratorDirection; 11 | const RawIterator = lib.RawIterator; 12 | const WriteBatch = lib.WriteBatch; 13 | 14 | const copy = lib.data.copy; 15 | const copyLen = lib.data.copyLen; 16 | 17 | pub const DB = struct { 18 | db: *rdb.rocksdb_t, 19 | default_cf: ?ColumnFamilyHandle = null, 20 | cf_name_to_handle: *CfNameToHandleMap, 21 | 22 | const Self = @This(); 23 | 24 | pub fn open( 25 | allocator: Allocator, 26 | dir: []const u8, 27 | db_options: DBOptions, 28 | maybe_column_families: ?[]const ColumnFamilyDescription, 29 | err_str: *?Data, 30 | ) (Allocator.Error || error{RocksDBOpen})!struct { Self, []const ColumnFamily } { 31 | const column_families = if (maybe_column_families) |cfs| 32 | cfs 33 | else 34 | &[1]ColumnFamilyDescription{.{ .name = "default" }}; 35 | 36 | const cf_handles = try allocator.alloc(?ColumnFamilyHandle, column_families.len); 37 | defer allocator.free(cf_handles); 38 | 39 | // open database 40 | const db = db: { 41 | const cf_options = try allocator.alloc(?*const rdb.rocksdb_options_t, column_families.len); 42 | defer allocator.free(cf_options); 43 | const cf_names = try allocator.alloc([*c]const u8, column_families.len); 44 | defer allocator.free(cf_names); 45 | for (column_families, 0..) |cf, i| { 46 | cf_names[i] = @ptrCast(cf.name.ptr); 47 | cf_options[i] = cf.options.convert(); 48 | } 49 | var ch = CallHandler.init(err_str); 50 | break :db try ch.handle(rdb.rocksdb_open_column_families( 51 | db_options.convert(), 52 | dir.ptr, 53 | @intCast(cf_names.len), 54 | @ptrCast(cf_names.ptr), 55 | @ptrCast(cf_options.ptr), 56 | @ptrCast(cf_handles.ptr), 57 | &ch.err_str_in, 58 | ), error.RocksDBOpen); 59 | }; 60 | 61 | // organize column family metadata 62 | const cf_list = try allocator.alloc(ColumnFamily, column_families.len); 63 | errdefer allocator.free(cf_list); 64 | const cf_map = try CfNameToHandleMap.create(allocator); 65 | errdefer cf_map.destroy(); 66 | for (cf_list, 0..) |*cf, i| { 67 | const name = try allocator.dupe(u8, column_families[i].name); 68 | cf.* = .{ 69 | .name = name, 70 | .handle = cf_handles[i].?, 71 | }; 72 | try cf_map.map.put(allocator, name, cf_handles[i].?); 73 | } 74 | 75 | return .{ 76 | Self{ .db = db.?, .cf_name_to_handle = cf_map }, 77 | cf_list, 78 | }; 79 | } 80 | 81 | pub fn withDefaultColumnFamily(self: Self, column_family: ColumnFamilyHandle) Self { 82 | return .{ 83 | .db = self.db, 84 | .cf_name_to_handle = self.cf_name_to_handle, 85 | .default_cf = column_family, 86 | }; 87 | } 88 | 89 | /// Closes the database and cleans up this struct's state. 90 | pub fn deinit(self: Self) void { 91 | self.cf_name_to_handle.destroy(); 92 | rdb.rocksdb_close(self.db); 93 | } 94 | 95 | /// Delete the entire database from the filesystem. 96 | /// Destroying a database after it is closed has undefined behavior. 97 | pub fn destroy(self: Self) error{Closed}!void { 98 | rdb.rocksdb_destroy_db(self.db); 99 | } 100 | 101 | pub fn createColumnFamily( 102 | self: *Self, 103 | name: []const u8, 104 | err_str: *?Data, 105 | ) !ColumnFamilyHandle { 106 | const options = rdb.rocksdb_options_create(); 107 | var ch = CallHandler.init(err_str); 108 | const handle = (try ch.handle(rdb.rocksdb_create_column_family( 109 | self.db, 110 | options, 111 | @as([*c]const u8, @ptrCast(name)), 112 | &ch.err_str_in, 113 | ), error.RocksDBCreateColumnFamily)).?; 114 | self.cf_name_to_handle.put(name, handle); 115 | return handle; 116 | } 117 | 118 | pub fn columnFamily( 119 | self: *const Self, 120 | cf_name: []const u8, 121 | ) error{UnknownColumnFamily}!ColumnFamilyHandle { 122 | return self.cf_name_to_handle.get(cf_name) orelse error.UnknownColumnFamily; 123 | } 124 | 125 | pub fn put( 126 | self: *const Self, 127 | column_family: ?ColumnFamilyHandle, 128 | key: []const u8, 129 | value: []const u8, 130 | err_str: *?Data, 131 | ) error{RocksDBPut}!void { 132 | const options = rdb.rocksdb_writeoptions_create(); 133 | defer rdb.rocksdb_writeoptions_destroy(options); 134 | var ch = CallHandler.init(err_str); 135 | try ch.handle(rdb.rocksdb_put_cf( 136 | self.db, 137 | options, 138 | column_family orelse self.default_cf, 139 | key.ptr, 140 | key.len, 141 | value.ptr, 142 | value.len, 143 | &ch.err_str_in, 144 | ), error.RocksDBPut); 145 | } 146 | 147 | pub fn get( 148 | self: *const Self, 149 | column_family: ?ColumnFamilyHandle, 150 | key: []const u8, 151 | err_str: *?Data, 152 | ) error{RocksDBGet}!?Data { 153 | var valueLength: usize = 0; 154 | const options = rdb.rocksdb_readoptions_create(); 155 | defer rdb.rocksdb_readoptions_destroy(options); 156 | var ch = CallHandler.init(err_str); 157 | const value = try ch.handle(rdb.rocksdb_get_cf( 158 | self.db, 159 | options, 160 | column_family orelse self.default_cf, 161 | key.ptr, 162 | key.len, 163 | &valueLength, 164 | &ch.err_str_in, 165 | ), error.RocksDBGet); 166 | if (value == 0) { 167 | return null; 168 | } 169 | return .{ 170 | .free = rdb.rocksdb_free, 171 | .data = value[0..valueLength], 172 | }; 173 | } 174 | 175 | pub fn delete( 176 | self: *const Self, 177 | column_family: ?ColumnFamilyHandle, 178 | key: []const u8, 179 | err_str: *?Data, 180 | ) error{RocksDBDelete}!void { 181 | const options = rdb.rocksdb_writeoptions_create(); 182 | defer rdb.rocksdb_writeoptions_destroy(options); 183 | var ch = CallHandler.init(err_str); 184 | try ch.handle(rdb.rocksdb_delete_cf( 185 | self.db, 186 | options, 187 | column_family orelse self.default_cf, 188 | key.ptr, 189 | key.len, 190 | &ch.err_str_in, 191 | ), error.RocksDBDelete); 192 | } 193 | 194 | pub fn deleteFilesInRange( 195 | self: *const Self, 196 | column_family: ?ColumnFamilyHandle, 197 | start_key: []const u8, 198 | limit_key: []const u8, 199 | err_str: *?Data, 200 | ) error{RocksDBDeleteFilesInRange}!void { 201 | var ch = CallHandler.init(err_str); 202 | try ch.handle(rdb.rocksdb_delete_file_in_range_cf( 203 | self.db, 204 | column_family orelse self.default_cf, 205 | @ptrCast(start_key.ptr), 206 | start_key.len, 207 | @ptrCast(limit_key.ptr), 208 | limit_key.len, 209 | &ch.err_str_in, 210 | ), error.RocksDBDeleteFilesInRange); 211 | } 212 | 213 | pub fn iterator( 214 | self: *const Self, 215 | column_family: ?ColumnFamilyHandle, 216 | direction: IteratorDirection, 217 | start: ?[]const u8, 218 | ) Iterator { 219 | const it = self.rawIterator(column_family); 220 | if (start) |seek_target| switch (direction) { 221 | .forward => it.seek(seek_target), 222 | .reverse => it.seekForPrev(seek_target), 223 | } else switch (direction) { 224 | .forward => it.seekToFirst(), 225 | .reverse => it.seekToLast(), 226 | } 227 | return .{ 228 | .raw = it, 229 | .direction = direction, 230 | .done = false, 231 | }; 232 | } 233 | 234 | pub fn rawIterator( 235 | self: *const Self, 236 | column_family: ?ColumnFamilyHandle, 237 | ) RawIterator { 238 | const options = rdb.rocksdb_readoptions_create(); 239 | defer rdb.rocksdb_readoptions_destroy(options); // TODO does this need to outlive the iterator? 240 | const inner_iter = rdb.rocksdb_create_iterator_cf( 241 | self.db, 242 | options, 243 | column_family orelse self.default_cf, 244 | ).?; 245 | const ri = RawIterator{ .inner = inner_iter }; 246 | return ri; 247 | } 248 | 249 | pub fn liveFiles(self: *const Self, allocator: Allocator) Allocator.Error!std.ArrayList(LiveFile) { 250 | const files = rdb.rocksdb_livefiles(self.db).?; 251 | const num_files: usize = @intCast(rdb.rocksdb_livefiles_count(files)); 252 | var livefiles = std.ArrayList(LiveFile).init(allocator); 253 | var key_size: usize = 0; 254 | for (0..num_files) |i| { 255 | const file_num: c_int = @intCast(i); 256 | try livefiles.append(.{ 257 | .allocator = allocator, 258 | .column_family_name = try copy(allocator, rdb.rocksdb_livefiles_column_family_name(files, file_num)), 259 | .name = try copy(allocator, rdb.rocksdb_livefiles_name(files, file_num)), 260 | .size = rdb.rocksdb_livefiles_size(files, file_num), 261 | .level = rdb.rocksdb_livefiles_level(files, file_num), 262 | .start_key = try copyLen(allocator, rdb.rocksdb_livefiles_smallestkey(files, file_num, &key_size), key_size), 263 | .end_key = try copyLen(allocator, rdb.rocksdb_livefiles_largestkey(files, file_num, &key_size), key_size), 264 | .num_entries = rdb.rocksdb_livefiles_entries(files, file_num), 265 | .num_deletions = rdb.rocksdb_livefiles_deletions(files, file_num), 266 | }); 267 | } 268 | rdb.rocksdb_livefiles_destroy(files); 269 | return livefiles; 270 | } 271 | 272 | pub fn propertyValueCf( 273 | self: *const Self, 274 | column_family: ?ColumnFamilyHandle, 275 | propname: []const u8, 276 | ) Data { 277 | const value = rdb.rocksdb_property_value_cf( 278 | self.db, 279 | column_family orelse self.default_cf, 280 | @ptrCast(propname.ptr), 281 | ); 282 | return .{ 283 | .data = std.mem.span(value), 284 | .free = rdb.rocksdb_free, 285 | }; 286 | } 287 | 288 | pub fn write( 289 | self: *const Self, 290 | batch: WriteBatch, 291 | err_str: *?Data, 292 | ) error{RocksDBWrite}!void { 293 | const options = rdb.rocksdb_writeoptions_create(); 294 | defer rdb.rocksdb_writeoptions_destroy(options); 295 | var ch = CallHandler.init(err_str); 296 | try ch.handle(rdb.rocksdb_write( 297 | self.db, 298 | options, 299 | batch.inner, 300 | &ch.err_str_in, 301 | ), error.RocksDBWrite); 302 | } 303 | 304 | pub fn flush( 305 | self: *const Self, 306 | column_family: ?ColumnFamilyHandle, 307 | err_str: *?Data, 308 | ) error{RocksDBFlush}!void { 309 | const options = rdb.rocksdb_flushoptions_create(); 310 | defer rdb.rocksdb_flushoptions_destroy(options); 311 | var ch = CallHandler.init(err_str); 312 | const e = error.RocksDBFlush; 313 | if (column_family) |cf| 314 | try ch.handle(rdb.rocksdb_flush_cf(self.db, options, cf, &ch.err_str_in), e) 315 | else 316 | try ch.handle(rdb.rocksdb_flush(self.db, options, &ch.err_str_in), e); 317 | } 318 | }; 319 | 320 | pub const DBOptions = struct { 321 | /// If true, the database will be created if it is missing. 322 | /// Default: false 323 | create_if_missing: bool = false, 324 | 325 | /// If true, missing column families will be automatically created on 326 | /// DB::Open(). 327 | /// Default: false 328 | create_missing_column_families: bool = false, 329 | 330 | /// Number of open files that can be used by the DB. You may need to 331 | /// increase this if your database has a large working set. Value -1 means 332 | /// files opened are always kept open. You can estimate number of files based 333 | /// on target_file_size_base and target_file_size_multiplier for level-based 334 | /// compaction. For universal-style compaction, you can usually set it to -1. 335 | /// 336 | /// A high value or -1 for this option can cause high memory usage. 337 | /// See BlockBasedTableOptions::cache_usage_options to constrain 338 | /// memory usage in case of block based table format. 339 | /// 340 | /// Default: -1 341 | /// 342 | /// Dynamically changeable through SetDBOptions() API. 343 | max_open_files: i32 = -1, 344 | 345 | fn convert(do: DBOptions) *rdb.struct_rocksdb_options_t { 346 | const ro = rdb.rocksdb_options_create().?; 347 | rdb.rocksdb_options_set_create_if_missing(ro, @intFromBool(do.create_if_missing)); 348 | rdb.rocksdb_options_set_create_missing_column_families(ro, @intFromBool(do.create_if_missing)); 349 | rdb.rocksdb_options_set_max_open_files(ro, do.max_open_files); 350 | 351 | return ro; 352 | } 353 | }; 354 | 355 | test "DBOptions defaults" { 356 | try testDBOptions(DBOptions{}, rdb.rocksdb_options_create().?); 357 | } 358 | 359 | test "DBOptions custom" { 360 | const subject = DBOptions{ 361 | .create_if_missing = true, 362 | .create_missing_column_families = true, 363 | .max_open_files = 1234, 364 | }; 365 | 366 | const expected = rdb.rocksdb_options_create().?; 367 | rdb.rocksdb_options_set_create_if_missing(expected, 1); 368 | rdb.rocksdb_options_set_create_missing_column_families(expected, 1); 369 | rdb.rocksdb_options_set_max_open_files(expected, 1234); 370 | 371 | try testDBOptions(subject, expected); 372 | } 373 | 374 | fn testDBOptions(test_subject: DBOptions, expected: *rdb.struct_rocksdb_options_t) !void { 375 | const actual = test_subject.convert(); 376 | 377 | inline for (@typeInfo(DBOptions).Struct.fields) |field| { 378 | const getter = "rocksdb_options_get_" ++ field.name; 379 | const expected_value = @call(.auto, @field(rdb, getter), .{expected}); 380 | const actual_value = @call(.auto, @field(rdb, getter), .{actual}); 381 | try std.testing.expectEqual(expected_value, actual_value); 382 | } 383 | } 384 | 385 | pub const ColumnFamilyDescription = struct { 386 | name: []const u8, 387 | options: ColumnFamilyOptions = .{}, 388 | }; 389 | 390 | pub const ColumnFamily = struct { 391 | name: []const u8, 392 | handle: ColumnFamilyHandle, 393 | }; 394 | 395 | pub const ColumnFamilyHandle = *rdb.rocksdb_column_family_handle_t; 396 | 397 | pub const ColumnFamilyOptions = struct { 398 | fn convert(_: ColumnFamilyOptions) *rdb.struct_rocksdb_options_t { 399 | return rdb.rocksdb_options_create().?; 400 | } 401 | }; 402 | 403 | /// The metadata that describes a SST file 404 | pub const LiveFile = struct { 405 | allocator: Allocator, 406 | /// Name of the column family the file belongs to 407 | column_family_name: []const u8, 408 | /// Name of the file 409 | name: []const u8, 410 | /// Size of the file 411 | size: usize, 412 | /// Level at which this file resides 413 | level: i32, 414 | /// Smallest user defined key in the file 415 | start_key: ?[]const u8, 416 | /// Largest user defined key in the file 417 | end_key: ?[]const u8, 418 | /// Number of entries/alive keys in the file 419 | num_entries: u64, 420 | /// Number of deletions/tomb key(s) in the file 421 | num_deletions: u64, 422 | 423 | pub fn deinit(self: LiveFile) void { 424 | self.allocator.free(self.column_family_name); 425 | self.allocator.free(self.name); 426 | if (self.start_key) |start_key| self.allocator.free(start_key); 427 | if (self.end_key) |end_key| self.allocator.free(end_key); 428 | } 429 | }; 430 | 431 | const CallHandler = struct { 432 | /// The error string to pass into rocksdb. 433 | err_str_in: ?[*:0]u8 = null, 434 | /// The user's error string. 435 | err_str_out: *?Data, 436 | 437 | fn init(err_str_out: *?Data) @This() { 438 | return .{ .err_str_out = err_str_out }; 439 | } 440 | 441 | fn errIn(self: *@This()) [*c][*c]u8 { 442 | return @ptrCast(&self.err_str_in); 443 | } 444 | 445 | fn handle( 446 | self: *@This(), 447 | ret: anytype, 448 | comptime err: anytype, 449 | ) @TypeOf(err)!@TypeOf(ret) { 450 | if (self.err_str_in) |s| { 451 | self.err_str_out.* = .{ 452 | .data = std.mem.span(s), 453 | .free = rdb.rocksdb_free, 454 | }; 455 | return err; 456 | } else { 457 | return ret; 458 | } 459 | } 460 | }; 461 | 462 | const CfNameToHandleMap = struct { 463 | allocator: Allocator, 464 | map: std.StringHashMapUnmanaged(ColumnFamilyHandle), 465 | lock: RwLock, 466 | 467 | const Self = @This(); 468 | 469 | fn create(allocator: Allocator) Allocator.Error!*Self { 470 | const self = try allocator.create(Self); 471 | self.* = .{ 472 | .allocator = allocator, 473 | .map = .{}, 474 | .lock = .{}, 475 | }; 476 | return self; 477 | } 478 | 479 | fn destroy(self: *Self) void { 480 | var iter = self.map.iterator(); 481 | while (iter.next()) |entry| { 482 | rdb.rocksdb_column_family_handle_destroy(entry.value_ptr.*); 483 | self.allocator.free(entry.key_ptr.*); 484 | } 485 | self.map.deinit(self.allocator); 486 | self.allocator.destroy(self); 487 | } 488 | 489 | fn put(self: *Self, name: []const u8, handle: ColumnFamilyHandle) Allocator.Error!void { 490 | const owned_name = try self.allocator.dupe(u8, name); 491 | 492 | self.lock.lock(); 493 | defer self.lock.unlock(); 494 | 495 | self.map.put(self.allocator, owned_name, handle); 496 | } 497 | 498 | fn get(self: *Self, name: []const u8) ?ColumnFamilyHandle { 499 | self.lock.lockShared(); 500 | defer self.lock.unlockShared(); 501 | return self.map.get(name); 502 | } 503 | }; 504 | 505 | test DB { 506 | var err_str: ?Data = null; 507 | defer if (err_str) |e| e.deinit(); 508 | runTest(&err_str) catch |e| { 509 | std.debug.print("{}: {?}\n", .{ e, err_str }); 510 | return e; 511 | }; 512 | } 513 | 514 | fn runTest(err_str: *?Data) !void { 515 | { 516 | var db, const families = try DB.open( 517 | std.testing.allocator, 518 | "test-state", 519 | .{ 520 | .create_if_missing = true, 521 | .create_missing_column_families = true, 522 | }, 523 | &.{ 524 | .{ .name = "default" }, 525 | .{ .name = "another" }, 526 | }, 527 | err_str, 528 | ); 529 | defer db.deinit(); 530 | defer std.testing.allocator.free(families); 531 | const a_family = families[1].handle; 532 | 533 | _ = try db.put(a_family, "hello", "world", err_str); 534 | _ = try db.put(a_family, "zebra", "world", err_str); 535 | 536 | db = db.withDefaultColumnFamily(a_family); 537 | 538 | const val = try db.get(null, "hello", err_str); 539 | try std.testing.expect(std.mem.eql(u8, val.?.data, "world")); 540 | 541 | var iter = db.iterator(null, .forward, null); 542 | defer iter.deinit(); 543 | var v = (try iter.nextValue(err_str)).?; 544 | try std.testing.expect(std.mem.eql(u8, "world", v.data)); 545 | v = (try iter.nextValue(err_str)).?; 546 | try std.testing.expect(std.mem.eql(u8, "world", v.data)); 547 | try std.testing.expect(null == try iter.next(err_str)); 548 | 549 | try db.delete(null, "hello", err_str); 550 | 551 | const noval = try db.get(null, "hello", err_str); 552 | try std.testing.expect(null == noval); 553 | } 554 | 555 | var db, const families = try DB.open( 556 | std.testing.allocator, 557 | "test-state", 558 | .{ 559 | .create_if_missing = true, 560 | .create_missing_column_families = true, 561 | }, 562 | &.{ 563 | .{ .name = "default" }, 564 | .{ .name = "another" }, 565 | }, 566 | err_str, 567 | ); 568 | defer db.deinit(); 569 | defer std.testing.allocator.free(families); 570 | const lfs = try db.liveFiles(std.testing.allocator); 571 | defer lfs.deinit(); 572 | defer for (lfs.items) |lf| lf.deinit(); 573 | try std.testing.expect(std.mem.eql(u8, "another", lfs.items[0].column_family_name)); 574 | } 575 | -------------------------------------------------------------------------------- /src/iterator.zig: -------------------------------------------------------------------------------- 1 | const std = @import("std"); 2 | const rdb = @import("rocksdb"); 3 | const lib = @import("lib.zig"); 4 | 5 | const Allocator = std.mem.Allocator; 6 | 7 | const Data = lib.Data; 8 | 9 | const general_freer = lib.data.general_freer; 10 | 11 | pub const Direction = enum { forward, reverse }; 12 | 13 | pub const Iterator = struct { 14 | raw: RawIterator, 15 | direction: Direction, 16 | done: bool, 17 | is_first: bool = true, 18 | 19 | const Self = @This(); 20 | 21 | pub fn deinit(self: Self) void { 22 | self.raw.deinit(); 23 | } 24 | 25 | pub fn next(self: *Self, err_str: *?Data) error{RocksDBIterator}!?[2]Data { 26 | return self.nextGeneric([2]Data, RawIterator.entry, err_str); 27 | } 28 | 29 | pub fn nextKey(self: *Self, err_str: *?Data) error{RocksDBIterator}!?Data { 30 | return self.nextGeneric(Data, RawIterator.key, err_str); 31 | } 32 | 33 | pub fn nextValue(self: *Self, err_str: *?Data) error{RocksDBIterator}!?Data { 34 | return self.nextGeneric(Data, RawIterator.value, err_str); 35 | } 36 | 37 | fn nextGeneric( 38 | self: *Self, 39 | comptime T: type, 40 | getNext: fn (RawIterator) ?T, 41 | err_str: *?Data, 42 | ) error{RocksDBIterator}!?T { 43 | if (self.done) { 44 | return null; 45 | } else { 46 | // NOTE: we call next before getting the value (instead of after) 47 | // because rocksdb uses pointers 48 | if (!self.is_first) { 49 | switch (self.direction) { 50 | .forward => self.raw.next(), 51 | .reverse => self.raw.prev(), 52 | } 53 | } 54 | 55 | if (getNext(self.raw)) |item| { 56 | self.is_first = false; 57 | return item; 58 | } else { 59 | self.done = true; 60 | try self.raw.status(err_str); 61 | return null; 62 | } 63 | } 64 | } 65 | }; 66 | 67 | pub const RawIterator = struct { 68 | inner: *rdb.rocksdb_iterator_t, 69 | 70 | const Self = @This(); 71 | 72 | pub fn deinit(self: Self) void { 73 | rdb.rocksdb_iter_destroy(self.inner); 74 | } 75 | 76 | pub fn seek(self: Self, key_: []const u8) void { 77 | rdb.rocksdb_iter_seek(self.inner, @ptrCast(key_.ptr), key_.len); 78 | } 79 | 80 | pub fn seekForPrev(self: Self, key_: []const u8) void { 81 | rdb.rocksdb_iter_seek_for_prev(self.inner, @ptrCast(key_.ptr), key_.len); 82 | } 83 | 84 | pub fn seekToFirst(self: Self) void { 85 | rdb.rocksdb_iter_seek_to_first(self.inner); 86 | } 87 | 88 | pub fn seekToLast(self: Self) void { 89 | rdb.rocksdb_iter_seek_to_last(self.inner); 90 | } 91 | 92 | pub fn valid(self: Self) bool { 93 | return rdb.rocksdb_iter_valid(self.inner) != 0; 94 | } 95 | 96 | pub fn entry(self: Self) ?[2]Data { 97 | if (self.valid()) { 98 | return .{ self.keyImpl(), self.valueImpl() }; 99 | } else { 100 | return null; 101 | } 102 | } 103 | 104 | pub fn key(self: Self) ?Data { 105 | if (self.valid()) { 106 | return self.keyImpl(); 107 | } else { 108 | return null; 109 | } 110 | } 111 | 112 | pub fn value(self: Self) ?Data { 113 | if (self.valid()) { 114 | return self.valueImpl(); 115 | } else { 116 | return null; 117 | } 118 | } 119 | 120 | fn keyImpl(self: Self) Data { 121 | var len: usize = undefined; 122 | const ret = rdb.rocksdb_iter_key(self.inner, &len); 123 | return .{ 124 | .data = ret[0..len], 125 | .free = rdb.rocksdb_free, 126 | }; 127 | } 128 | 129 | fn valueImpl(self: Self) Data { 130 | var len: usize = undefined; 131 | const ret = rdb.rocksdb_iter_value(self.inner, &len); 132 | return .{ 133 | .data = ret[0..len], 134 | .free = rdb.rocksdb_free, 135 | }; 136 | } 137 | 138 | pub fn next(self: Self) void { 139 | rdb.rocksdb_iter_next(self.inner); 140 | } 141 | 142 | pub fn prev(self: Self) void { 143 | rdb.rocksdb_iter_prev(self.inner); 144 | } 145 | 146 | pub fn status(self: Self, err_str: *?Data) error{RocksDBIterator}!void { 147 | var err_str_in: ?[*:0]u8 = null; 148 | rdb.rocksdb_iter_get_error(self.inner, @ptrCast(&err_str_in)); 149 | if (err_str_in) |s| { 150 | err_str.* = .{ 151 | .data = std.mem.span(s), 152 | .free = rdb.rocksdb_free, 153 | }; 154 | return error.RocksDBIterator; 155 | } 156 | } 157 | }; 158 | -------------------------------------------------------------------------------- /src/lib.zig: -------------------------------------------------------------------------------- 1 | pub const Iterator = iterator.Iterator; 2 | pub const IteratorDirection = iterator.Direction; 3 | pub const RawIterator = iterator.RawIterator; 4 | 5 | pub const ColumnFamily = database.ColumnFamily; 6 | pub const ColumnFamilyDescription = database.ColumnFamilyDescription; 7 | pub const ColumnFamilyHandle = database.ColumnFamilyHandle; 8 | pub const ColumnFamilyOptions = database.ColumnFamilyOptions; 9 | pub const DB = database.DB; 10 | pub const DBOptions = database.DBOptions; 11 | pub const LiveFile = database.LiveFile; 12 | 13 | pub const Data = data.Data; 14 | 15 | pub const WriteBatch = batch.WriteBatch; 16 | 17 | //////////// 18 | // private 19 | pub const batch = @import("batch.zig"); 20 | pub const data = @import("data.zig"); 21 | pub const database = @import("database.zig"); 22 | pub const iterator = @import("iterator.zig"); 23 | 24 | test { 25 | const std = @import("std"); 26 | std.testing.refAllDecls(@This()); 27 | } 28 | --------------------------------------------------------------------------------