├── .github
└── workflows
│ └── main.yml
├── .gitignore
├── LICENSE
├── README.md
├── book
├── .DS_Store
├── 1.png
├── 2.svg
├── 3.svg
├── Untitled (1).svg
├── Untitled-5.tldr
├── Untitled.svg
├── Untitled.tldr
├── design.md
├── p1.svg
├── 无标题-2023-11-28-1233.excalidraw
├── 无标题-2023-11-28-1233.png
└── 无标题.tldr
├── build.zig
├── build.zig.zon
├── example
├── build.zig
├── build.zig.zon
└── src
│ ├── main.zig
│ └── root.zig
└── src
├── assert.zig
├── bucket.zig
├── bucket_test.zig
├── consts.zig
├── cursor.zig
├── cursor_test.zig
├── db.zig
├── db_test.zig
├── error.zig
├── freelist.zig
├── gc.zig
├── main.zig
├── mutex.zig
├── namespace.zig
├── node.zig
├── node_test.zig
├── page.zig
├── pretty_table.zig
├── root.zig
├── tests.zig
├── tx.zig
├── tx_test.zig
└── util.zig
/.github/workflows/main.yml:
--------------------------------------------------------------------------------
1 | name: Zig CI
2 |
3 | on:
4 | push:
5 | branches:
6 | - main
7 | pull_request:
8 | branches:
9 | - main
10 |
11 | jobs:
12 | build:
13 | runs-on: ubuntu-latest
14 |
15 | steps:
16 | - name: Checkout repository
17 | uses: actions/checkout@v2
18 |
19 | - name: Install Zig
20 | run: |
21 | wget https://ziglang.org/builds/zig-linux-x86_64-0.14.0-dev.2643+fb43e91b2.tar.xz
22 | tar -xf zig-linux-x86_64-0.14.0-dev.2643+fb43e91b2.tar.xz
23 | sudo mv zig-linux-x86_64-0.14.0-dev.2643+fb43e91b2 $HOME/zig
24 | $HOME/zig/zig version
25 |
26 | - name: Build and test
27 | run: |
28 | mkdir dirty
29 | $HOME/zig/zig test src/root.zig
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | .idea
2 | dirty
3 | zig-cache
4 | zig-out
5 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | Apache License
2 | Version 2.0, January 2004
3 | http://www.apache.org/licenses/
4 |
5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
6 |
7 | 1. Definitions.
8 |
9 | "License" shall mean the terms and conditions for use, reproduction,
10 | and distribution as defined by Sections 1 through 9 of this document.
11 |
12 | "Licensor" shall mean the copyright owner or entity authorized by
13 | the copyright owner that is granting the License.
14 |
15 | "Legal Entity" shall mean the union of the acting entity and all
16 | other entities that control, are controlled by, or are under common
17 | control with that entity. For the purposes of this definition,
18 | "control" means (i) the power, direct or indirect, to cause the
19 | direction or management of such entity, whether by contract or
20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the
21 | outstanding shares, or (iii) beneficial ownership of such entity.
22 |
23 | "You" (or "Your") shall mean an individual or Legal Entity
24 | exercising permissions granted by this License.
25 |
26 | "Source" form shall mean the preferred form for making modifications,
27 | including but not limited to software source code, documentation
28 | source, and configuration files.
29 |
30 | "Object" form shall mean any form resulting from mechanical
31 | transformation or translation of a Source form, including but
32 | not limited to compiled object code, generated documentation,
33 | and conversions to other media types.
34 |
35 | "Work" shall mean the work of authorship, whether in Source or
36 | Object form, made available under the License, as indicated by a
37 | copyright notice that is included in or attached to the work
38 | (an example is provided in the Appendix below).
39 |
40 | "Derivative Works" shall mean any work, whether in Source or Object
41 | form, that is based on (or derived from) the Work and for which the
42 | editorial revisions, annotations, elaborations, or other modifications
43 | represent, as a whole, an original work of authorship. For the purposes
44 | of this License, Derivative Works shall not include works that remain
45 | separable from, or merely link (or bind by name) to the interfaces of,
46 | the Work and Derivative Works thereof.
47 |
48 | "Contribution" shall mean any work of authorship, including
49 | the original version of the Work and any modifications or additions
50 | to that Work or Derivative Works thereof, that is intentionally
51 | submitted to Licensor for inclusion in the Work by the copyright owner
52 | or by an individual or Legal Entity authorized to submit on behalf of
53 | the copyright owner. For the purposes of this definition, "submitted"
54 | means any form of electronic, verbal, or written communication sent
55 | to the Licensor or its representatives, including but not limited to
56 | communication on electronic mailing lists, source code control systems,
57 | and issue tracking systems that are managed by, or on behalf of, the
58 | Licensor for the purpose of discussing and improving the Work, but
59 | excluding communication that is conspicuously marked or otherwise
60 | designated in writing by the copyright owner as "Not a Contribution."
61 |
62 | "Contributor" shall mean Licensor and any individual or Legal Entity
63 | on behalf of whom a Contribution has been received by Licensor and
64 | subsequently incorporated within the Work.
65 |
66 | 2. Grant of Copyright License. Subject to the terms and conditions of
67 | this License, each Contributor hereby grants to You a perpetual,
68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable
69 | copyright license to reproduce, prepare Derivative Works of,
70 | publicly display, publicly perform, sublicense, and distribute the
71 | Work and such Derivative Works in Source or Object form.
72 |
73 | 3. Grant of Patent License. Subject to the terms and conditions of
74 | this License, each Contributor hereby grants to You a perpetual,
75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable
76 | (except as stated in this section) patent license to make, have made,
77 | use, offer to sell, sell, import, and otherwise transfer the Work,
78 | where such license applies only to those patent claims licensable
79 | by such Contributor that are necessarily infringed by their
80 | Contribution(s) alone or by combination of their Contribution(s)
81 | with the Work to which such Contribution(s) was submitted. If You
82 | institute patent litigation against any entity (including a
83 | cross-claim or counterclaim in a lawsuit) alleging that the Work
84 | or a Contribution incorporated within the Work constitutes direct
85 | or contributory patent infringement, then any patent licenses
86 | granted to You under this License for that Work shall terminate
87 | as of the date such litigation is filed.
88 |
89 | 4. Redistribution. You may reproduce and distribute copies of the
90 | Work or Derivative Works thereof in any medium, with or without
91 | modifications, and in Source or Object form, provided that You
92 | meet the following conditions:
93 |
94 | (a) You must give any other recipients of the Work or
95 | Derivative Works a copy of this License; and
96 |
97 | (b) You must cause any modified files to carry prominent notices
98 | stating that You changed the files; and
99 |
100 | (c) You must retain, in the Source form of any Derivative Works
101 | that You distribute, all copyright, patent, trademark, and
102 | attribution notices from the Source form of the Work,
103 | excluding those notices that do not pertain to any part of
104 | the Derivative Works; and
105 |
106 | (d) If the Work includes a "NOTICE" text file as part of its
107 | distribution, then any Derivative Works that You distribute must
108 | include a readable copy of the attribution notices contained
109 | within such NOTICE file, excluding those notices that do not
110 | pertain to any part of the Derivative Works, in at least one
111 | of the following places: within a NOTICE text file distributed
112 | as part of the Derivative Works; within the Source form or
113 | documentation, if provided along with the Derivative Works; or,
114 | within a display generated by the Derivative Works, if and
115 | wherever such third-party notices normally appear. The contents
116 | of the NOTICE file are for informational purposes only and
117 | do not modify the License. You may add Your own attribution
118 | notices within Derivative Works that You distribute, alongside
119 | or as an addendum to the NOTICE text from the Work, provided
120 | that such additional attribution notices cannot be construed
121 | as modifying the License.
122 |
123 | You may add Your own copyright statement to Your modifications and
124 | may provide additional or different license terms and conditions
125 | for use, reproduction, or distribution of Your modifications, or
126 | for any such Derivative Works as a whole, provided Your use,
127 | reproduction, and distribution of the Work otherwise complies with
128 | the conditions stated in this License.
129 |
130 | 5. Submission of Contributions. Unless You explicitly state otherwise,
131 | any Contribution intentionally submitted for inclusion in the Work
132 | by You to the Licensor shall be under the terms and conditions of
133 | this License, without any additional terms or conditions.
134 | Notwithstanding the above, nothing herein shall supersede or modify
135 | the terms of any separate license agreement you may have executed
136 | with Licensor regarding such Contributions.
137 |
138 | 6. Trademarks. This License does not grant permission to use the trade
139 | names, trademarks, service marks, or product names of the Licensor,
140 | except as required for reasonable and customary use in describing the
141 | origin of the Work and reproducing the content of the NOTICE file.
142 |
143 | 7. Disclaimer of Warranty. Unless required by applicable law or
144 | agreed to in writing, Licensor provides the Work (and each
145 | Contributor provides its Contributions) on an "AS IS" BASIS,
146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
147 | implied, including, without limitation, any warranties or conditions
148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
149 | PARTICULAR PURPOSE. You are solely responsible for determining the
150 | appropriateness of using or redistributing the Work and assume any
151 | risks associated with Your exercise of permissions under this License.
152 |
153 | 8. Limitation of Liability. In no event and under no legal theory,
154 | whether in tort (including negligence), contract, or otherwise,
155 | unless required by applicable law (such as deliberate and grossly
156 | negligent acts) or agreed to in writing, shall any Contributor be
157 | liable to You for damages, including any direct, indirect, special,
158 | incidental, or consequential damages of any character arising as a
159 | result of this License or out of the use or inability to use the
160 | Work (including but not limited to damages for loss of goodwill,
161 | work stoppage, computer failure or malfunction, or any and all
162 | other commercial damages or losses), even if such Contributor
163 | has been advised of the possibility of such damages.
164 |
165 | 9. Accepting Warranty or Additional Liability. While redistributing
166 | the Work or Derivative Works thereof, You may choose to offer,
167 | and charge a fee for, acceptance of support, warranty, indemnity,
168 | or other liability obligations and/or rights consistent with this
169 | License. However, in accepting such obligations, You may act only
170 | on Your own behalf and on Your sole responsibility, not on behalf
171 | of any other Contributor, and only if You agree to indemnify,
172 | defend, and hold each Contributor harmless for any liability
173 | incurred by, or claims asserted against, such Contributor by reason
174 | of your accepting any such warranty or additional liability.
175 |
176 | END OF TERMS AND CONDITIONS
177 |
178 | APPENDIX: How to apply the Apache License to your work.
179 |
180 | To apply the Apache License to your work, attach the following
181 | boilerplate notice, with the fields enclosed by brackets "[]"
182 | replaced with your own identifying information. (Don't include
183 | the brackets!) The text should be enclosed in the appropriate
184 | comment syntax for the file format. We also recommend that a
185 | file or class name and description of purpose be included on the
186 | same "printed page" as the copyright notice for easier
187 | identification within third-party archives.
188 |
189 | Copyright [yyyy] [name of copyright owner]
190 |
191 | Licensed under the Apache License, Version 2.0 (the "License");
192 | you may not use this file except in compliance with the License.
193 | You may obtain a copy of the License at
194 |
195 | http://www.apache.org/licenses/LICENSE-2.0
196 |
197 | Unless required by applicable law or agreed to in writing, software
198 | distributed under the License is distributed on an "AS IS" BASIS,
199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
200 | See the License for the specific language governing permissions and
201 | limitations under the License.
202 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # boltdb-zig
2 |
3 | A pure Zig implementation of BoltDB, an embedded key/value database.
4 |
5 | ## Overview
6 |
7 | boltdb-zig is a Zig port of the original [BoltDB](https://github.com/boltdb/bolt), which is a simple yet powerful embedded key/value database written in Go. It provides a consistent and ACID-compliant data store with the following features:
8 |
9 | - Pure Zig implementation
10 | - Single file backend
11 | - ACID transactions
12 | - Lock-free MVCC
13 | - Nested buckets
14 |
15 | ## Status
16 |
17 | 🚧 This project is currently under development.
18 |
19 | ## Usage
20 |
21 | ```zig
22 | # zig fetch --save git+https://github.com/laohanlinux/boltdb-zig.git
23 | ```
24 |
25 | import the library in your build.zig.zon file: [link](example/build.zig.zon)
26 |
27 | ```
28 | const boltdbDep = b.dependency("boltdb-zig", .{
29 | .target = target,
30 | .optimize = optimize,
31 | });
32 |
33 | const exe = b.addExecutable(.{
34 | .name = "example",
35 | .root_source_file = b.path("src/main.zig"),
36 | .target = target,
37 | .optimize = optimize,
38 | });
39 |
40 | exe.root_module.addImport("boltdb", boltdbDep.module("boltdb"));
41 | ```
42 |
43 | How use?
44 | [link](example/src/main.zig)
45 |
46 | ## License
47 |
48 | This project is licensed under the MIT License - see the LICENSE file for details.
49 |
50 | ## Acknowledgments
51 |
52 | - Original [BoltDB](https://github.com/boltdb/bolt)
53 |
54 |
--------------------------------------------------------------------------------
/book/.DS_Store:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/laohanlinux/boltdb-zig/a9601497f7dbc89cf61bcf9b55017cbed0497721/book/.DS_Store
--------------------------------------------------------------------------------
/book/1.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/laohanlinux/boltdb-zig/a9601497f7dbc89cf61bcf9b55017cbed0497721/book/1.png
--------------------------------------------------------------------------------
/book/design.md:
--------------------------------------------------------------------------------
1 |
2 |
3 | ## 内部结构
4 |
5 | ### 初始化时,Page内部结构
6 |
7 | ```mermaid
8 | block-beta
9 | columns 3
10 | doc("Disk File"):3
11 | space down1<["mmap"]>(down) space
12 |
13 | block:e:3
14 | meta0["id-0 | meta0"]
15 | meta1["id-1 | meta1"]
16 | freelist["id-2 | freelist"] --> root_bucket["id-3 | rootbucket(Leaf Page)"]
17 | end
18 |
19 | block:e:1
20 | File["file"]
21 | end
22 | style root_bucket fill:#d6d,stroke:#333,stroke-width:4px
23 | ```
24 |
25 | Page如下
26 |
27 | ````rust
28 | meta0:page.Page{ .id = 0, .flags = 4, .count = 0, .overflow = 0 }
29 | meta1:page.Page{ .id = 1, .flags = 4, .count = 0, .overflow = 0 }
30 | rootBucket:page.Page{ .id = 3, .flags = 2, .count = 0, .overflow = 0 }
31 | freelist:page.Page{ .id = 2, .flags = 16, .count = 0, .overflow = 0 }
32 | ````
33 |
34 | 
35 |
36 | 创建一个Bucket:*widgets*
37 |
38 | - 创建tx
39 |
40 | ```go
41 | txid = 2,
42 | type Tx struct {
43 | meta *meta = meta1
44 | root Bucket = struct {
45 | tx: self,
46 | rootNode:bucket = {
47 | root = 3,
48 | seq = 0,
49 | }
50 | }
51 | }
52 | ```
53 |
54 |
55 |
56 | ## B+ Tree
57 |
58 | 
59 |
60 | ### Find
61 |
62 | 
63 |
64 | #### Find 20
65 |
66 | - 检索N0,中间节点,fIndex=0,即c0
67 | - 检索N1,中间节点,找到key=20,fIndex=2,即c2
68 | - 检索N5,叶子节点,且检索到20,即找到目标值
69 |
70 | #### Find 53
71 |
72 | - 检索N0,中间节点,fIndex=1,即c1
73 | - 检索N2,中间节点,fIndex=1,即c1
74 | - 检索N6,叶子点,且未找到53,即未找到目标值
75 |
76 | 检索到的节点为branch,需要二级跳转,
77 |
78 | ## Insert
79 |
80 | 如何找到適合的插入位置:
81 |
82 | - 检索对应的叶子节点
83 |
84 | ```go
85 | index := sort.Search(len(n.inodes), func(i int) bool {
86 | return bytes.Compare(n.inodes[i].key, key) != -1
87 | })
88 | ```
89 |
90 | - 从叶子中找出对应的位置
91 |
92 | ```go
93 | index := sort.Search(len(n.inodes), func(i int) bool {return bytes.Compare(n.inodes[i].key, oldKey) != 1})
94 | ```
95 |
96 | 即找出一個 `inodes[i].key >= key`的位置。
97 |
98 | - 如果找到的位置inode.key == key,不用*扩容*,直接填充新数覆盖即可
99 | - 如果找到的位置大于叶子节点数,或者对应的位置inode,key != key,需要扩容,然后填充新数据即可
100 |
101 | 至此,数据已被插入到对应的叶子节点。
102 |
103 | ### Insert 4
104 |
105 | - 检索到N4-5-i0 >= 4
106 | - N4=》4-i0,5-i1,8-i2,9-i3
107 |
108 | ### Insert 100
109 |
110 | - 检索到N8
111 | - N8=〉 90-i0,96-i1,99-i2,100-i3
112 |
113 | ### Insert 37
114 |
115 | - 检索到N7
116 |
117 | ### Rebalance
118 |
119 | 
120 |
121 | 节点平衡发生在tx.Commit时.
122 |
123 | example: 假设插入一个key=4,插入到N4,N4 = 【4-i0,5-i1,8-i2,9-i3】,检索的堆栈如下:`N0 -》 N1 -〉 N4`
124 |
125 | #### 节点平衡
126 |
127 | ```go
128 | // rebalance attempts to balance all nodes.
129 | func (b *Bucket) rebalance() {
130 | for _, n := range b.nodes {
131 | n.rebalance()
132 | }
133 | for _, child := range b.buckets {
134 | child.rebalance()
135 | }
136 | }
137 | ```
138 |
139 | 这个b.nodes从那里来的?比如一个Put操作。
140 |
141 | ```go
142 | // node returns the node that the cursor is currently positioned on.
143 | func (c *Cursor) node() *node {
144 | _assert(len(c.stack) > 0, "accessing a node with a zero-length cursor stack")
145 |
146 | // If the top of the stack is a leaf node then just return it.
147 | if ref := &c.stack[len(c.stack)-1]; ref.node != nil && ref.isLeaf() {
148 | return ref.node
149 | }
150 |
151 | // Start from root and traverse down the hierarchy.
152 | var n = c.stack[0].node
153 | if n == nil {
154 | n = c.bucket.node(c.stack[0].page.id, nil)
155 | }
156 | for _, ref := range c.stack[:len(c.stack)-1] {
157 | _assert(!n.isLeaf, "expected branch node")
158 | n = n.childAt(ref.index)
159 | }
160 | _assert(n.isLeaf, "expected leaf node")
161 | return n
162 | }
163 | ```
164 |
165 | 具体以一个简单的例子来展示整个过程。
166 |
167 | - 数据初始化,其数据布局
168 |
169 | 
170 |
171 | - 启动一个writable事务,比如db.Update
172 |
173 | 对应的交易对象如下:
174 |
175 | ```go
176 | txid = 2,
177 | type Tx struct {
178 | meta *meta = meta1
179 | Bucket = struct {
180 | tx: self,
181 | rootNode:bucket = {
182 | root = 3,
183 | seq = 0,
184 | }
185 | }
186 | }
187 | ```
188 |
189 | - CreateNewBucket:创建新bucket
190 |
191 | - b.Cursor() 创建游标
192 |
193 | - 如图所示,从root(top bucket),开始检索
194 |
195 | - 未检索到对应的key,即可以新建一个子bucket
196 |
197 | ```go
198 | var bucket = Bucket{
199 | bucket: &bucket{},
200 | rootNode: &node{isLeaf: true},
201 | FillPercent: DefaultFillPercent,
202 | }
203 | ```
204 |
205 | > 1.可以看到新建的bucket是inline bucket,其对应的底层存储也是一个叶子节点。
206 | >
207 | > 2.和其他节点一样,新建的节点使用的是node,而不是page,这里的rootNode也是一样的道理
208 | >
209 | > 3.游标什么时候用node,什么时候用page?如果是“读”操作,只用page即可,如果是“写”操作,用node来替换。因为涉及到内存的变动,比如增加,删除,平衡等。
210 |
211 | - 将bucket写入一块内存中: var value = bucket.write()
212 |
213 | 请参考[这个段落](#Write a bucket Into slice)。
214 |
215 | - 将对应的节点node写入到cursor的合适位置上,就是一个B+叔正常的插入过程
216 |
217 | > 注意这里会把检索路径节点都转为node,而不是page
218 |
219 | - 此时这个b+树如何?
220 |
221 | ```mermaid
222 | flowchart TD
223 | n7("i0-widgets(NewBucketName, Leaf Node)")
224 | ```
225 |
226 |
227 |
228 |
229 | #### Buckets平衡
230 |
231 | ### Spill
232 |
233 | ### Merge
234 |
235 | ### Delete
236 |
237 | - Delete a key from a leaf node, after the deletion, if the node is less than half of the fill percent, then the node will be merged with its sibling node.
238 | - After the key is deleted, the node'key is not updated, because the rebanlance will be triggered during the commit process. So, the deleted key exists in the node'key, but the key is not valid.
239 |
240 | ## Write a bucket Into slice
241 |
242 |
243 |
244 | ```go
245 | // write allocates and writes a bucket to a byte slice.
246 | func (b *Bucket) write() []byte {
247 | // Allocate the appropriate size.
248 | var n = b.rootNode
249 | var value = make([]byte, bucketHeaderSize+n.size())
250 |
251 | // Write a bucket header.
252 | var bucket = (*bucket)(unsafe.Pointer(&value[0]))
253 | *bucket = *b.bucket
254 |
255 | // Convert byte slice to a fake page and write the root node.
256 | var p = (*page)(unsafe.Pointer(&value[bucketHeaderSize]))
257 | n.write(p)
258 |
259 | return value
260 | }
261 | ```
262 |
263 | 底层结构
264 |
265 | ```
266 | |bucketHeader|page|
267 | ```
268 |
269 | ## 事务管理
270 |
271 | ### Example
272 |
273 | - 初始化状态
274 |
275 | ```
276 | tid = 10,
277 | ```
278 |
279 | - 创建2个读事务
280 |
281 | 第一个读事务
282 |
283 | ```
284 | tid = 10, // 读事务,不会递增事务id
285 | ReadTxPtr = 0x100, // 读事务的指针
286 | ```
287 |
288 | 第二个读事务
289 |
290 | ```
291 | tid = 10, // 读事务,不会递增事务id
292 | ReadTxPtr = 0x101, // 读事务的指针
293 | ```
294 |
295 | 假设此时,他们都引用了同一个page,pid = 89
296 |
297 | - 创建一个写事务
298 |
299 | ```
300 | tid = 11,
301 | WriteTxPtr = 0x102, // 写事务的指针
302 | ```
303 |
304 | 该事务也引用了同一个page,pid = 89。
305 |
306 |
307 | *此时*,事务管理器中,有3个事务,2个读事务,1个写事务。
308 |
309 | Bolt如何处理?
310 |
311 | 一种简单的解决办法是:
312 | ```
313 | ### Page生命周期管理
314 |
315 | 对于上述场景中的page 89,Bolt采用以下策略管理其生命周期:
316 |
317 | 1. **引用计数**
318 | - 每个page维护内部引用计数
319 | - 当前page 89有3个引用(2个读事务 + 1个写事务)
320 | - 引用计数确保页面在仍被使用时不会被释放
321 |
322 | 2. **写时复制(Copy-on-Write)**
323 | - 写事务修改page 89时会创建新副本
324 | - 读事务继续使用原始page 89
325 | - 写事务使用新的page副本
326 | - 原始page 89会保留到所有读事务完成
327 |
328 | 3. **页面释放**
329 | - 读事务完成时递减引用计数
330 | - 只有引用计数为0时才释放页面
331 | - 写事务提交后,新页面版本对后续事务可见
332 |
333 | 这种机制确保了:
334 | - 读事务的数据一致性
335 | - 写事务的隔离性
336 | - 高效的内存管理
337 | ```
338 |
339 |
--------------------------------------------------------------------------------
/book/无标题-2023-11-28-1233.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/laohanlinux/boltdb-zig/a9601497f7dbc89cf61bcf9b55017cbed0497721/book/无标题-2023-11-28-1233.png
--------------------------------------------------------------------------------
/build.zig:
--------------------------------------------------------------------------------
1 | const std = @import("std");
2 |
3 | // Although this function looks imperative, note that its job is to
4 | // declaratively construct a build graph that will be executed by an external
5 | // runner.
6 | pub fn build(b: *std.Build) void {
7 | // Standard target options allows the person running `zig build` to choose
8 | // what target to build for. Here we do not override the defaults, which
9 | // means any target is allowed, and the default is native. Other options
10 | // for restricting supported target set are available.
11 | const target = b.standardTargetOptions(.{});
12 |
13 | // Standard optimization options allow the person running `zig build` to select
14 | // between Debug, ReleaseSafe, ReleaseFast, and ReleaseSmall. Here we do not
15 | // set a preferred release mode, allowing the user to decide how to optimize.
16 | const optimize = b.standardOptimizeOption(.{});
17 |
18 | // Create module
19 | _ = b.addModule("boltdb", .{
20 | .root_source_file = b.path("src/namespace.zig"),
21 | });
22 |
23 | const lib = b.addStaticLibrary(.{
24 | .name = "boltdb-zig",
25 | // In this case the main source file is merely a path, however, in more
26 | // complicated build scripts, this could be a generated file.
27 | .root_source_file = b.path("src/root.zig"),
28 | .target = target,
29 | .optimize = optimize,
30 | });
31 | // This declares intent for the library to be installed into the standard
32 | // location when the user invokes the "install" step (the default step when
33 | // running `zig build`).
34 | b.installArtifact(lib);
35 |
36 | const exe = b.addExecutable(.{
37 | .name = "boltdb-zig",
38 | .root_source_file = b.path("src/main.zig"),
39 | .target = target,
40 | .optimize = optimize,
41 | });
42 |
43 | // This declares intent for the executable to be installed into the
44 | // standard location when the user invokes the "install" step (the default
45 | // step when running `zig build`).
46 | b.installArtifact(exe);
47 |
48 | // This *creates* a Run step in the build graph, to be executed when another
49 | // step is evaluated that depends on it. The next line below will establish
50 | // such a dependency.
51 | const run_cmd = b.addRunArtifact(exe);
52 |
53 | // By making the run step depend on the install step, it will be run from the
54 | // installation directory rather than directly from within the cache directory.
55 | // This is not necessary, however, if the application depends on other installed
56 | // files, this ensures they will be present and in the expected location.
57 | run_cmd.step.dependOn(b.getInstallStep());
58 |
59 | // This allows the user to pass arguments to the application in the build
60 | // command itself, like this: `zig build run -- arg1 arg2 etc`
61 | if (b.args) |args| {
62 | run_cmd.addArgs(args);
63 | }
64 |
65 | // This creates a build step. It will be visible in the `zig build --help` menu,
66 | // and can be selected like this: `zig build run`
67 | // This will evaluate the `run` step rather than the default, which is "install".
68 | const run_step = b.step("run", "Run the app");
69 | run_step.dependOn(&run_cmd.step);
70 |
71 | // Creates a step for unit testing. This only builds the test executable
72 | // but does not run it.
73 | const lib_unit_tests = b.addTest(.{
74 | .root_source_file = b.path("src/root.zig"),
75 | .target = target,
76 | .optimize = optimize,
77 | });
78 |
79 | const run_lib_unit_tests = b.addRunArtifact(lib_unit_tests);
80 |
81 | const exe_unit_tests = b.addTest(.{
82 | .root_source_file = b.path("src/main.zig"),
83 | .target = target,
84 | .optimize = optimize,
85 | });
86 |
87 | const run_exe_unit_tests = b.addRunArtifact(exe_unit_tests);
88 |
89 | // Similar to creating the run step earlier, this exposes a `test` step to
90 | // the `zig build --help` menu, providing a way for the user to request
91 | // running the unit tests.
92 | const test_step = b.step("test", "Run unit tests");
93 | test_step.dependOn(&run_lib_unit_tests.step);
94 | test_step.dependOn(&run_exe_unit_tests.step);
95 | }
96 |
--------------------------------------------------------------------------------
/build.zig.zon:
--------------------------------------------------------------------------------
1 | .{
2 | .name = "boltdb-zig",
3 | // This is a [Semantic Version](https://semver.org/).
4 | // In a future version of Zig it will be used for package deduplication.
5 | .version = "0.0.0",
6 |
7 | // This field is optional.
8 | // This is currently advisory only; Zig does not yet do anything
9 | // with this value.
10 | //.minimum_zig_version = "0.11.0",
11 |
12 | // This field is optional.
13 | // Each dependency must either provide a `url` and `hash`, or a `path`.
14 | // `zig build --fetch` can be used to fetch all dependencies of a package, recursively.
15 | // Once all dependencies are fetched, `zig build` no longer requires
16 | // internet connectivity.
17 | .paths = .{
18 | // This makes *all* files, recursively, included in this package. It is generally
19 | // better to explicitly list the files and directories instead, to insure that
20 | // fetching from tarballs, file system paths, and version control all result
21 | // in the same contents hash.
22 | "",
23 | // For example...
24 | //"build.zig",
25 | //"build.zig.zon",
26 | //"src",
27 | //"LICENSE",
28 | //"README.md",
29 | },
30 | }
31 |
--------------------------------------------------------------------------------
/example/build.zig:
--------------------------------------------------------------------------------
1 | const std = @import("std");
2 |
3 | // Although this function looks imperative, note that its job is to
4 | // declaratively construct a build graph that will be executed by an external
5 | // runner.
6 | pub fn build(b: *std.Build) void {
7 | // Standard target options allows the person running `zig build` to choose
8 | // what target to build for. Here we do not override the defaults, which
9 | // means any target is allowed, and the default is native. Other options
10 | // for restricting supported target set are available.
11 | const target = b.standardTargetOptions(.{});
12 |
13 | // Standard optimization options allow the person running `zig build` to select
14 | // between Debug, ReleaseSafe, ReleaseFast, and ReleaseSmall. Here we do not
15 | // set a preferred release mode, allowing the user to decide how to optimize.
16 | const optimize = b.standardOptimizeOption(.{});
17 |
18 | const lib = b.addStaticLibrary(.{
19 | .name = "example",
20 | // In this case the main source file is merely a path, however, in more
21 | // complicated build scripts, this could be a generated file.
22 | .root_source_file = b.path("src/root.zig"),
23 | .target = target,
24 | .optimize = optimize,
25 | });
26 |
27 | // This declares intent for the library to be installed into the standard
28 | // location when the user invokes the "install" step (the default step when
29 | // running `zig build`).
30 | b.installArtifact(lib);
31 | const boltdbDep = b.dependency("boltdb-zig", .{
32 | .target = target,
33 | .optimize = optimize,
34 | });
35 |
36 | const exe = b.addExecutable(.{
37 | .name = "example",
38 | .root_source_file = b.path("src/main.zig"),
39 | .target = target,
40 | .optimize = optimize,
41 | });
42 |
43 | exe.root_module.addImport("boltdb", boltdbDep.module("boltdb"));
44 | // This declares intent for the executable to be installed into the
45 | // standard location when the user invokes the "install" step (the default
46 | // step when running `zig build`).
47 | b.installArtifact(exe);
48 |
49 | // This *creates* a Run step in the build graph, to be executed when another
50 | // step is evaluated that depends on it. The next line below will establish
51 | // such a dependency.
52 | const run_cmd = b.addRunArtifact(exe);
53 |
54 | // By making the run step depend on the install step, it will be run from the
55 | // installation directory rather than directly from within the cache directory.
56 | // This is not necessary, however, if the application depends on other installed
57 | // files, this ensures they will be present and in the expected location.
58 | run_cmd.step.dependOn(b.getInstallStep());
59 |
60 | // This allows the user to pass arguments to the application in the build
61 | // command itself, like this: `zig build run -- arg1 arg2 etc`
62 | if (b.args) |args| {
63 | run_cmd.addArgs(args);
64 | }
65 |
66 | // This creates a build step. It will be visible in the `zig build --help` menu,
67 | // and can be selected like this: `zig build run`
68 | // This will evaluate the `run` step rather than the default, which is "install".
69 | const run_step = b.step("run", "Run the app");
70 | run_step.dependOn(&run_cmd.step);
71 |
72 | // Creates a step for unit testing. This only builds the test executable
73 | // but does not run it.
74 | const lib_unit_tests = b.addTest(.{
75 | .root_source_file = b.path("src/root.zig"),
76 | .target = target,
77 | .optimize = optimize,
78 | });
79 | lib_unit_tests.root_module.addImport("boltdb", boltdbDep.module("boltdb"));
80 |
81 | const run_lib_unit_tests = b.addRunArtifact(lib_unit_tests);
82 |
83 | const exe_unit_tests = b.addTest(.{
84 | .root_source_file = b.path("src/main.zig"),
85 | .target = target,
86 | .optimize = optimize,
87 | });
88 | exe_unit_tests.root_module.addImport("boltdb", boltdbDep.module("boltdb"));
89 |
90 | const run_exe_unit_tests = b.addRunArtifact(exe_unit_tests);
91 |
92 | // Similar to creating the run step earlier, this exposes a `test` step to
93 | // the `zig build --help` menu, providing a way for the user to request
94 | // running the unit tests.
95 | const test_step = b.step("test", "Run unit tests");
96 | test_step.dependOn(&run_lib_unit_tests.step);
97 | test_step.dependOn(&run_exe_unit_tests.step);
98 | }
99 |
--------------------------------------------------------------------------------
/example/build.zig.zon:
--------------------------------------------------------------------------------
1 | .{
2 | // This is the default name used by packages depending on this one. For
3 | // example, when a user runs `zig fetch --save `, this field is used
4 | // as the key in the `dependencies` table. Although the user can choose a
5 | // different name, most users will stick with this provided value.
6 | //
7 | // It is redundant to include "zig" in this name because it is already
8 | // within the Zig package namespace.
9 | .name = "example",
10 |
11 | // This is a [Semantic Version](https://semver.org/).
12 | // In a future version of Zig it will be used for package deduplication.
13 | .version = "0.0.0",
14 |
15 | // This field is optional.
16 | // This is currently advisory only; Zig does not yet do anything
17 | // with this value.
18 | //.minimum_zig_version = "0.11.0",
19 |
20 | // This field is optional.
21 | // Each dependency must either provide a `url` and `hash`, or a `path`.
22 | // `zig build --fetch` can be used to fetch all dependencies of a package, recursively.
23 | // Once all dependencies are fetched, `zig build` no longer requires
24 | // internet connectivity.
25 | .dependencies = .{
26 | .@"boltdb-zig" = .{
27 | .url = "git+https://github.com/laohanlinux/boltdb-zig.git/?ref=align#3ec605eca6a9f85959f81900542fea59cc3eaef6",
28 | .hash = "122056bd20aba7365380a500c315f4a03aa5ea2134685b11fa7e32e312344c28175c",
29 | },
30 | },
31 | .paths = .{
32 | "build.zig",
33 | "build.zig.zon",
34 | "src",
35 | // For example...
36 | //"LICENSE",
37 | //"README.md",
38 | },
39 | }
40 |
--------------------------------------------------------------------------------
/example/src/main.zig:
--------------------------------------------------------------------------------
1 | //! By convention, main.zig is where your main function lives in the case that
2 | //! you are building an executable. If you are making a library, the convention
3 | //! is to delete this file and start with root.zig instead.
4 | const std = @import("std");
5 | const db = @import("boltdb");
6 |
7 | pub fn main() !void {
8 | std.testing.log_level = .debug;
9 | var gpa = std.heap.GeneralPurposeAllocator(.{}).init;
10 | const allocator = gpa.allocator();
11 | var database = try db.Database.open(allocator, "boltdb.tmp", null, db.defaultOptions);
12 | defer database.close() catch unreachable;
13 | // create a bucket
14 | try struct {
15 | fn exec(_database: *db.Database) db.Error!void {
16 | var trans = try _database.begin(true);
17 | defer trans.commit() catch unreachable;
18 | var bucket = try trans.createBucketIfNotExists("user");
19 | try bucket.put("hello", "word");
20 | }
21 | }.exec(&database);
22 |
23 | // Get a bucket
24 | try struct {
25 | fn exec(_database: *db.Database) db.Error!void {
26 | var trans = try _database.begin(false);
27 | defer trans.rollback() catch unreachable;
28 | var bucket = trans.bucket("user").?;
29 | const value = bucket.get("hello").?;
30 | std.log.info("hello value is {s}", .{value});
31 | }
32 | }.exec(&database);
33 |
34 | // exec update
35 | {
36 | try database.update(struct {
37 | fn exec(trans: *db.Transaction) db.Error!void {
38 | var bucket = try trans.createBucketIfNotExists("user");
39 | try bucket.put("baz", "bar");
40 | const stats = trans.stats();
41 | std.log.info("transaction's stats: {any}", .{stats});
42 | }
43 | }.exec);
44 |
45 | try database.view(struct {
46 | fn view(trans: *db.Transaction) db.Error!void {
47 | var bucket = trans.bucket("user").?;
48 | const value = bucket.get("baz").?;
49 | std.log.info("baz value is {s}", .{value});
50 | }
51 | }.view);
52 |
53 | try database.viewWithContext({}, struct {
54 | fn view(_: void, trans: *db.Transaction) db.Error!void {
55 | var bucket = trans.bucket("user").?;
56 | const value = bucket.get("baz").?;
57 | std.log.info("baz value is {s}", .{value});
58 | }
59 | }.view);
60 | }
61 |
62 | // iterator
63 | {
64 | try struct {
65 | fn exec(_database: *db.Database) db.Error!void {
66 | var trans = try _database.begin(false);
67 | defer trans.rollback() catch unreachable;
68 | var cursor = trans.cursor();
69 | defer cursor.deinit();
70 | var keyPair = cursor.first();
71 | while (!keyPair.isNotFound()) {
72 | if (keyPair.isBucket()) {
73 | std.log.info("iterator DB: this is a bucket: {s}", .{keyPair.key.?});
74 | } else {
75 | std.log.info("iterator DB: key: {s}, value: {s}", .{ keyPair.key.?, keyPair.value.? });
76 | }
77 | keyPair = cursor.next();
78 | }
79 | }
80 | }.exec(&database);
81 | }
82 | {
83 | try struct {
84 | fn exec(_database: *db.Database) db.Error!void {
85 | var trans = try _database.begin(true);
86 | defer trans.commit() catch unreachable;
87 | var bucket = trans.bucket("user").?;
88 | std.log.info("Create a new bucket: {s}", .{"address"});
89 | var newBucket = try bucket.createBucketIfNotExists("date");
90 | std.log.info("Create a new bucket: {s}", .{"date"});
91 | var _allocator = std.heap.ArenaAllocator.init(_database.allocator());
92 | defer _allocator.deinit();
93 | const onceAllocator = _allocator.allocator();
94 | const value = std.fmt.allocPrint(onceAllocator, "{d}", .{std.time.timestamp()}) catch unreachable;
95 | try newBucket.put("laos", "Deloin");
96 | var cursor = bucket.cursor();
97 | defer cursor.deinit();
98 | var keyPair = cursor.first();
99 | while (!keyPair.isNotFound()) {
100 | if (keyPair.isBucket()) {
101 | std.log.info("iterator Bucket: this is a bucket: {s}", .{keyPair.key.?});
102 | } else {
103 | std.log.info("iterator Bucket: key: {s}, value: {s}", .{ keyPair.key.?, keyPair.value.? });
104 | }
105 | keyPair = cursor.next();
106 | }
107 |
108 | try bucket.put("dol", value);
109 | keyPair = cursor.seek("dol");
110 | if (keyPair.isNotFound()) {
111 | std.log.info("not found key: {s}", .{"dol"});
112 | } else {
113 | try cursor.delete();
114 | std.log.info("delete key: {s}, value: {s}", .{ keyPair.key.?, keyPair.value.? });
115 | }
116 | const lastKeyPair = cursor.last();
117 | std.log.info("last key: {s}, value: {s}", .{ lastKeyPair.key.?, lastKeyPair.value.? });
118 | const prevKeyPair = cursor.prev();
119 | std.log.info("prev key: {s}", .{prevKeyPair.key.?});
120 | const cursorBucket = cursor.bucket();
121 | std.log.info("cursor's bucket: {any}", .{cursorBucket.stats()});
122 |
123 | try bucket.setSequence(100);
124 | const seq = try bucket.nextSequence();
125 | std.log.info("seq: {d}", .{seq});
126 | const root = bucket.root();
127 | std.log.info("root: {d}", .{root});
128 | const stats = trans.stats();
129 | std.log.info("transaction's stats: {any}", .{stats});
130 | const bucket_stats = bucket.stats();
131 | std.log.info("bucket's stats: {any}", .{bucket_stats});
132 | const writable = bucket.writable();
133 | std.log.info("bucket's writable: {}", .{writable});
134 | for (0..100) |i| {
135 | try newBucket.put(std.fmt.allocPrint(onceAllocator, "{d}", .{i}) catch unreachable, "value");
136 | }
137 |
138 | std.log.info("Bucket forEach:", .{});
139 | try bucket.forEach(struct {
140 | fn exec(_: *const db.Bucket, key: []const u8, _value: ?[]const u8) db.Error!void {
141 | if (_value == null) {
142 | std.log.info("this is a bucket, bucket name: {s}", .{key});
143 | } else {
144 | std.log.info("key: {s}, value: {s}", .{ key, _value.? });
145 | }
146 | }
147 | }.exec);
148 |
149 | std.log.info("Bucket forEach with Context", .{});
150 | var forEachCount: usize = 0;
151 | try bucket.forEachWithContext(&forEachCount, struct {
152 | fn exec(ctxRef: *usize, _: *const db.Bucket, key: []const u8, _value: ?[]const u8) db.Error!void {
153 | ctxRef.* += 1;
154 | if (_value == null) {
155 | std.log.info("this is a bucket, bucket name: {s}", .{key});
156 | } else {
157 | std.log.info("key: {s}, value: {s}", .{ key, _value.? });
158 | }
159 | }
160 | }.exec);
161 | std.log.info("forEachCount: {d}", .{forEachCount});
162 | }
163 | }.exec(&database);
164 | }
165 |
166 | {
167 | const path = database.path();
168 | std.log.info("database's path: {s}", .{path});
169 | const str = database.string(allocator);
170 | defer allocator.free(str);
171 | std.log.info("database's string: {s}", .{str});
172 | const isReadOnly = database.isReadOnly();
173 | std.log.info("database's isReadOnly: {}", .{isReadOnly});
174 | try database.sync();
175 | }
176 | }
177 |
--------------------------------------------------------------------------------
/example/src/root.zig:
--------------------------------------------------------------------------------
1 | //! By convention, root.zig is the root source file when making a library. If
2 | //! you are making an executable, the convention is to delete this file and
3 | //! start with main.zig instead.
4 | const std = @import("std");
5 | const testing = std.testing;
6 | export fn add(a: i32, b: i32) i32 {
7 | return a + b;
8 | }
9 |
10 | test "basic add functionality" {
11 | try testing.expect(add(3, 7) == 10);
12 | }
13 |
--------------------------------------------------------------------------------
/src/assert.zig:
--------------------------------------------------------------------------------
1 | const std = @import("std");
2 |
3 | /// Asserts that `expect` is true. If not, prints the formatted string `fmt` with the arguments `args` and then asserts.
4 | pub inline fn assert(expect: bool, comptime fmt: []const u8, args: anytype) void {
5 | if (!expect) {
6 | std.log.err(fmt, args);
7 | std.debug.assert(expect);
8 | }
9 | }
10 |
--------------------------------------------------------------------------------
/src/consts.zig:
--------------------------------------------------------------------------------
1 | const std = @import("std");
2 | const util = @import("util.zig");
3 | const assert = util.assert;
4 | const panicFmt = util.panicFmt;
5 | const Page = @import("page.zig").Page;
6 | const Node = @import("node.zig").Node;
7 | /// Represents a marker value to indicate that a file is a Bolt DB.
8 | pub const Magic = 0xED0CDAED;
9 | /// The data file format verison.
10 | pub const Version = 2;
11 |
12 | /// The largest step that can be taken when remapping the mmap.
13 | pub const MaxMMapStep: u64 = 1 << 30; // 1 GB
14 |
15 | /// Default values if not set in a DB instance.
16 | pub const DefaultMaxBatchSize = 1000; // not used yet
17 | pub const DefaultMaxBatchDelay = 10 * std.time.ms_per_s; // millisecond, not used yet
18 | pub const DefaultAllocSize = 16 * 1024 * 1024;
19 |
20 | /// A bucket leaf flag.
21 | pub const BucketLeafFlag: u32 = 0x01;
22 |
23 | pub const MinFillPercent: f64 = 0.1;
24 | pub const MaxFillPercent: f64 = 1.0;
25 |
26 | /// The maximum length of a key, in bytes
27 | pub const MaxKeySize: usize = 32768;
28 | /// The maximum length of a value, in bytes
29 | pub const MaxValueSize: usize = (1 << 32) - 2;
30 |
31 | /// The percentage that split pages are filled.
32 | /// This value can be changed by setting Bucket.FillPercent.
33 | pub const DefaultFillPercent = 0.5;
34 |
35 | /// The minimum number of keys in a page.
36 | pub const MinKeysPage: usize = 2;
37 |
38 | /// A page flag.
39 | pub const PageFlag = enum(u8) {
40 | branch = 0x01,
41 | leaf = 0x02,
42 | meta = 0x04,
43 | freeList = 0x10,
44 | };
45 | /// A bucket leaf flag.
46 | pub const bucket_leaf_flag: u32 = 0x01;
47 | /// A page id type.
48 | pub const PgidType = u64;
49 | /// A slice of page ids.
50 | pub const PgIds = []PgidType;
51 | /// The size of a page.
52 | pub const PageSize: usize = std.mem.page_size;
53 | // pub const PageSize: usize = 4096;
54 |
55 | /// Represents the options that can be set when opening a database.
56 | pub const Options = packed struct {
57 | // The amount of time to what wait to obtain a file lock.
58 | // When set to zero it will wait indefinitely. This option is only
59 | // available on Darwin and Linux.
60 | timeout: i64 = 0, // unit:nas
61 |
62 | // Sets the DB.no_grow_sync flag before money mapping the file.
63 | noGrowSync: bool = false,
64 |
65 | // Open database in read-only mode, Uses flock(..., LOCK_SH | LOCK_NB) to
66 | // grab a shared lock (UNIX).
67 | readOnly: bool = false,
68 |
69 | // Sets the DB.strict_mode flag before memory mapping the file.
70 | strictMode: bool = false,
71 |
72 | // Sets the DB.mmap_flags before memory mapping the file.
73 | mmapFlags: isize = 0,
74 |
75 | // The initial mmap size of the database
76 | // in bytes. Read transactions won't block write transaction
77 | // if the initial_mmap_size is large enough to hold database mmap
78 | // size. (See DB.begin for more information)
79 | //
80 | // If <= 0, the initial map size is 0.
81 | // If initial_mmap_size is smaller than the previous database size.
82 | // it takes no effect.
83 | initialMmapSize: usize = 0,
84 | // The page size of the database, it only use to test, don't set at in production
85 | pageSize: usize = 0,
86 | };
87 |
88 | /// Represents the options used if null options are passed into open().
89 | /// No timeout is used which will cause Bolt to wait indefinitely for a lock.
90 | pub const defaultOptions = Options{
91 | .timeout = 0,
92 | .noGrowSync = false,
93 | };
94 |
95 | /// Returns the size of a page given the page size and branching factor.
96 | pub fn intFromFlags(pageFlage: PageFlag) u16 {
97 | return @as(u16, @intFromEnum(pageFlage));
98 | }
99 |
100 | /// Convert 'flag' to PageFlag enum.
101 | pub fn toFlags(flag: u16) PageFlag {
102 | if (flag == 0x01) {
103 | return PageFlag.branch;
104 | }
105 | if (flag == 0x02) {
106 | return PageFlag.leaf;
107 | }
108 |
109 | if (flag == 0x04) {
110 | return PageFlag.meta;
111 | }
112 |
113 | if (flag == 0x10) {
114 | return PageFlag.freeList;
115 | }
116 |
117 | assert(false, "invalid flag: {}", .{flag});
118 | @panic("");
119 | }
120 |
121 | /// Represents the internal transaction indentifier.
122 | pub const TxId = u64;
123 |
124 | /// A page or node.
125 | pub const PageOrNode = struct {
126 | page: ?*Page,
127 | node: ?*Node,
128 | };
129 |
130 | /// A key-value reference.
131 | pub const KeyValueRef = struct {
132 | key: ?[]const u8 = null,
133 | value: ?[]u8 = null,
134 | flag: u32 = 0,
135 | pub fn dupeKey(self: *const KeyValueRef, allocator: std.mem.Allocator) ?[]const u8 {
136 | if (self.key) |key| {
137 | return allocator.dupe(u8, key) catch unreachable;
138 | }
139 | return null;
140 | }
141 | };
142 |
143 | /// A key-value pair.
144 | pub const KeyPair = struct {
145 | key: ?[]const u8,
146 | value: ?[]const u8,
147 | /// Create a new key-value pair.
148 | pub fn init(key: ?[]const u8, value: ?[]const u8) @This() {
149 | return KeyPair{ .key = key, .value = value };
150 | }
151 |
152 | /// Check if the key is not found.
153 | pub fn isNotFound(self: *const KeyPair) bool {
154 | return self.key == null;
155 | }
156 |
157 | /// Check if the value is a bucket.
158 | pub fn isBucket(self: *const KeyPair) bool {
159 | return !self.isNotFound() and self.value == null;
160 | }
161 | };
162 |
163 | /// Calculate the threshold before starting a new node.
164 | pub fn calThreshold(fillPercent: f64, pageSize: usize) usize {
165 | const _fillPercent = if (fillPercent < MinFillPercent) MinFillPercent else if (fillPercent > MaxFillPercent) MaxFillPercent else fillPercent;
166 | const fPageSize: f64 = @floatFromInt(pageSize);
167 | const threshold = @as(usize, @intFromFloat(fPageSize * _fillPercent));
168 | return threshold;
169 | }
170 |
--------------------------------------------------------------------------------
/src/cursor.zig:
--------------------------------------------------------------------------------
1 | const std = @import("std");
2 | const Bucket = @import("bucket.zig").Bucket;
3 | const Node = @import("node.zig").Node;
4 | const INode = @import("node.zig").INode;
5 | const findINodeFn = @import("node.zig").findFn;
6 | const lessThanFn = @import("node.zig").lessThanFn;
7 | const page = @import("page.zig");
8 | const util = @import("util.zig");
9 | const assert = util.assert;
10 | const consts = @import("consts.zig");
11 | const PgidType = consts.PgidType;
12 | const Tuple = consts.Tuple;
13 | const KeyPair = consts.KeyPair;
14 | const KeyValueRef = consts.KeyValueRef;
15 | const Error = @import("error.zig").Error;
16 |
17 | pub const Cursor = struct {
18 | _bucket: *Bucket,
19 | stack: std.ArrayList(ElementRef),
20 |
21 | allocator: std.mem.Allocator,
22 | arenaAllocator: ?std.heap.ArenaAllocator,
23 |
24 | const Self = @This();
25 |
26 | /// Initialize the cursor.
27 | pub fn init(_bt: *Bucket) Self {
28 | const allocator = _bt.getAllocator();
29 | return Cursor{
30 | ._bucket = _bt,
31 | .stack = std.ArrayList(ElementRef).init(allocator),
32 | .allocator = allocator,
33 | .arenaAllocator = null,
34 | };
35 | }
36 |
37 | /// Deinitialize the cursor.
38 | pub fn deinit(self: *Self) void {
39 | self.stack.deinit();
40 | if (self.arenaAllocator) |arenaAllocator| {
41 | arenaAllocator.deinit();
42 | }
43 | }
44 |
45 | pub fn bucket(self: *Self) *Bucket {
46 | return self._bucket;
47 | }
48 |
49 | pub fn first(self: *Self) KeyPair {
50 | assert(self._bucket.tx.?.db != null, "tx closed", .{});
51 | self.stack.resize(0) catch unreachable;
52 | const pNode = self._bucket.pageNode(self._bucket._b.?.root);
53 |
54 | {
55 | const ref = ElementRef{ .p = pNode.page, .node = pNode.node, .index = 0 };
56 | self.stack.append(ref) catch unreachable;
57 | _ = self._first();
58 | }
59 | // If we land on an empty page then move to the next value.
60 | // https://github.com/boltdb/bolt/issues/450
61 | if (self.getLastElementRef().?.count() == 0) {
62 | std.log.info("the last element count is 0, try to move to the next", .{});
63 | _ = self._next();
64 | }
65 | const keyValueRet = self.keyValue();
66 | if (keyValueRet.key == null) {
67 | return KeyPair.init(null, null);
68 | }
69 | // Return an error if current value is a bucket.
70 | if (keyValueRet.flag & consts.BucketLeafFlag != 0) {
71 | return KeyPair.init(keyValueRet.key.?, null);
72 | }
73 | return KeyPair.init(keyValueRet.key.?, keyValueRet.value);
74 | }
75 |
76 | pub fn last(self: *Self) KeyPair {
77 | assert(self._bucket.tx.?.db != null, "tx closed", .{});
78 | self.stack.resize(0) catch unreachable;
79 | const pNode = self._bucket.pageNode(self._bucket._b.?.root);
80 | var ref = ElementRef{ .p = pNode.page, .node = pNode.node, .index = 0 };
81 | if (ref.count() > 0) {
82 | ref.index = ref.count() - 1;
83 | }
84 | self.stack.append(ref) catch unreachable;
85 | self._last();
86 | const keyValueRet = self.keyValue();
87 | if (keyValueRet.key == null) {
88 | return KeyPair.init(null, null);
89 | }
90 | // Return an error if current value is a bucket.
91 | if (keyValueRet.flag & consts.BucketLeafFlag != 0) {
92 | return KeyPair.init(keyValueRet.key, null);
93 | }
94 | return KeyPair.init(keyValueRet.key, keyValueRet.value);
95 | }
96 |
97 | pub fn next(
98 | self: *Self,
99 | ) KeyPair {
100 | assert(self._bucket.tx.?.db != null, "tx closed", .{});
101 | const keyValueRet = self._next();
102 | if (keyValueRet.key == null) {
103 | return KeyPair.init(null, null);
104 | }
105 | // Return an error if current value is a bucket.
106 | if (keyValueRet.flag & consts.BucketLeafFlag != 0) {
107 | return KeyPair.init(keyValueRet.key, null);
108 | }
109 | return KeyPair.init(keyValueRet.key, keyValueRet.value);
110 | }
111 |
112 | /// Moves the cursor to the next item in the bucket and returns its key and value.
113 | pub fn tryNext(self: *Self) ?KeyPair {
114 | const keyValueRet = self.next();
115 | if (keyValueRet.isNotFound()) {
116 | return null;
117 | }
118 | return keyValueRet;
119 | }
120 |
121 | pub fn prev(self: *Self) KeyPair {
122 | assert(self._bucket.tx.?.db != null, "tx closed", .{});
123 | // Attempt to move back one element until we're successful.
124 | // Move up the stack as we hit the beginning of each page in our stack.
125 | var i: isize = @as(isize, @intCast(self.stack.items.len)) - 1;
126 | while (i >= 0) : (i -= 1) {
127 | const elem = &self.stack.items[@as(usize, @intCast(i))];
128 | if (elem.index > 0) {
129 | elem.index -= 1;
130 | break;
131 | }
132 | self.stack.resize(@as(usize, @intCast(i))) catch unreachable;
133 | }
134 |
135 | // If we've hit the end then return nil.
136 | if (self.stack.items.len == 0) {
137 | return KeyPair.init(null, null);
138 | }
139 | // Move down the stack to find the last element of the last leaf under this branch.
140 | self._last();
141 |
142 | const keyValueRet = self.keyValue();
143 | if (keyValueRet.key == null) {
144 | return KeyPair.init(null, null);
145 | }
146 | if (keyValueRet.flag & consts.BucketLeafFlag != 0) {
147 | return KeyPair.init(keyValueRet.key, null);
148 | }
149 | return KeyPair.init(keyValueRet.key, keyValueRet.value);
150 | }
151 |
152 | pub fn seek(self: *Self, seekKey: []const u8) KeyPair {
153 | var keyValueRet = self._seek(seekKey);
154 | // If we ended up after the last element of a page then move to the next one.
155 | const ref = self.getLastElementRef().?;
156 | if (ref.index >= ref.count()) {
157 | // the level page has remove all key?
158 | keyValueRet = self._next();
159 | }
160 | if (keyValueRet.key == null) {
161 | return KeyPair.init(null, null);
162 | } else if (keyValueRet.flag & consts.BucketLeafFlag != 0) {
163 | return KeyPair.init(keyValueRet.key, null);
164 | }
165 | return KeyPair.init(keyValueRet.key, keyValueRet.value);
166 | }
167 |
168 | /// Returns the current key and value without moving the cursor.
169 | pub fn getKeyPair(self: *Self) ?KeyValueRef {
170 | const keyValueRet = self.keyValue();
171 | if (keyValueRet.key == null) {
172 | return null;
173 | }
174 | return keyValueRet;
175 | }
176 |
177 | pub fn delete(self: *Self) Error!void {
178 | assert(self._bucket.tx.?.db != null, "tx closed", .{});
179 | if (!self._bucket.tx.?.writable) {
180 | return Error.TxNotWriteable;
181 | }
182 | const keyValueRet = self.keyValue();
183 | // Return an error if current value is a bucket.
184 | if (keyValueRet.flag & consts.BucketLeafFlag != 0) {
185 | return Error.IncompactibleValue;
186 | }
187 |
188 | _ = self.getNode().?.del(keyValueRet.key.?);
189 | }
190 |
191 | // Moves the cursor to a given key and returns it.
192 | // If the key does not exist then the next key is used.
193 | pub fn _seek(self: *Self, seekKey: []const u8) KeyValueRef {
194 | assert(self._bucket.tx.?.db != null, "tx closed", .{});
195 | // Start from root page/node and traverse to correct page.
196 | self.stack.resize(0) catch unreachable;
197 | self.search(seekKey, self._bucket._b.?.root);
198 | // self.prettyPrint();
199 | const ref = self.getLastElementRef().?;
200 | // If the cursor is pointing to the end of page/node then return nil.
201 | // TODO, if not found the key, the index should be 0, but the count maybe > 0
202 | if (ref.index >= ref.count()) {
203 | return KeyValueRef{ .key = null, .value = null, .flag = 0 };
204 | }
205 | // If this is a bucket then return a nil value.
206 | return self.keyValue();
207 | }
208 |
209 | // Moves the cursor to the first leaf element under that last page in the stack.
210 | fn _first(self: *Self) void {
211 | while (true) {
212 | // Exit when we hit a leaf page.
213 | const ref = self.getLastElementRef().?;
214 | if (ref.isLeaf()) {
215 | // had move to the first element that first leaf's key.
216 | break;
217 | }
218 | // Keep adding pages pointing to the first element to the stack.
219 | var pgid: PgidType = 0;
220 | if (ref.node) |n| {
221 | pgid = n.inodes.items[ref.index].pgid;
222 | } else {
223 | assert(ref.index < ref.p.?.count, "the index is out of range, index: {}, count: {}", .{ ref.index, ref.p.?.count });
224 | pgid = ref.p.?.branchPageElementRef(ref.index).?.pgid;
225 | }
226 | const pNode = self._bucket.pageNode(pgid);
227 | self.stack.append(ElementRef{ .p = pNode.page, .node = pNode.node, .index = 0 }) catch unreachable;
228 | assert(self.getLastElementRef().?.index == 0, "the index is not 0, index: {}", .{self.getLastElementRef().?.index});
229 | }
230 | }
231 |
232 | // Moves the cursor to the last leaf element under that last page in the stack.
233 | fn _last(self: *Self) void {
234 | while (true) {
235 | // Exit when we hit a leaf page.
236 | const ref = self.getLastElementRef().?;
237 | if (ref.isLeaf()) {
238 | break;
239 | }
240 |
241 | // Keep adding pages pointing to the last element in the stack.
242 | var pgid: PgidType = 0;
243 | if (ref.node) |_node| {
244 | pgid = _node.pgid;
245 | } else {
246 | pgid = ref.p.?.branchPageElementRef(ref.index).?.pgid;
247 | }
248 |
249 | const pNode = self._bucket.pageNode(pgid);
250 | var nextRef = ElementRef{ .p = pNode.page, .node = pNode.node, .index = 0 };
251 | nextRef.index = nextRef.count() - 1;
252 | self.stack.append(nextRef) catch unreachable;
253 | }
254 | }
255 |
256 | /// Moves to the next leaf element and returns the key and value.
257 | /// If the cursor is at the last leaf element then it stays there and return null.
258 | pub fn _next(self: *Self) KeyValueRef {
259 | while (true) {
260 | // {
261 | // const elementRef = self.getLastElementRef().?;
262 | // if (elementRef.isLeaf() and elementRef.index == 0) {
263 | // const threshold = consts.calThreshold(self._bucket.fillPercent, self._bucket.tx.?.db.?.pageSize);
264 | // if (elementRef.p) |p| {
265 | // var pSize = page.Page.headerSize();
266 | // for (0..p.count) |index| {
267 | // const leafElement = p.leafPageElement(index).?;
268 | // pSize += page.LeafPageElement.headerSize() + leafElement.kSize + leafElement.vSize;
269 | // }
270 | // assert(pSize <= threshold, "the page size is greater than the threshold, page size: {d}, threshold: {d}, fillPercent: {d}, pgid: {d}, count: {d}, pageSize: {d}", .{ pSize, threshold, self._bucket.fillPercent, p.id, p.count, self._bucket.tx.?.db.?.pageSize });
271 | // }
272 | // }
273 | // }
274 | // Attempt to move over one element until we're successful.
275 | // Move up the stack as we hit the end of each page in our stack.
276 | var i: isize = @as(isize, @intCast(self.stack.items.len - 1));
277 | // std.log.info("the i is {}", .{i});
278 | while (i >= 0) : (i -= 1) {
279 | const elem = &self.stack.items[@as(usize, @intCast(i))];
280 | if ((elem.index + 1) < elem.count()) { // iterate the current inode elements
281 | elem.index += 1;
282 | break;
283 | }
284 | // pop the current page by index that same to pop the current inode from the stack.
285 | }
286 |
287 | // If we've hit the root page then stop and return. This will leave the
288 | // cursor on the last element of the past page.
289 | if (i == -1) {
290 | return KeyValueRef{ .key = null, .value = null, .flag = 0 };
291 | }
292 |
293 | // Otherwise start from where we left off in the stack and find the
294 | // first element of the first leaf page.
295 | self.stack.resize(@as(usize, @intCast(i + 1))) catch unreachable; // TODO
296 | assert(self.stack.items.len == (i + 1), "the stack is empty", .{});
297 | // Fix location
298 | self._first();
299 |
300 | // If this is an empty page then restart and move back up the stack.
301 | if (self.getLastElementRef().?.count() == 0) {
302 | continue;
303 | }
304 | return self.keyValue();
305 | }
306 | }
307 |
308 | /// Search recursively performs a binary search against a given page/node until it finds a given key.
309 | pub fn search(self: *Self, key: []const u8, pgid: PgidType) void {
310 | const pNode = self._bucket.pageNode(pgid);
311 | const p = pNode.page;
312 | const n = pNode.node;
313 | if (p != null and (p.?.flags & (consts.intFromFlags(.branch) | consts.intFromFlags(.leaf)) == 0)) {
314 | assert(false, "invalid page type, pgid: {}, flags: {}, page: {any}\n", .{ pgid, p.?.flags, p.? });
315 | }
316 |
317 | const e = ElementRef{ .p = p, .node = n };
318 | self.stack.append(e) catch unreachable;
319 | // If we're on a leaf page/node then find the specific node.
320 | if (e.isLeaf()) {
321 | // return a equal or greater than key?
322 | self.nsearch(key);
323 | return;
324 | }
325 | if (n) |_node| {
326 | self.searchNode(key, _node);
327 | return;
328 | }
329 | assert(p.?.id == pgid, "the page id is not equal to the pgid, page id: {}, pgid: {}", .{ p.?.id, pgid });
330 | self.searchPage(key, p.?);
331 | }
332 |
333 | /// Returns the node that then cursor is currently positioned on.
334 | pub fn node(self: *Self) ?*Node {
335 | assert(self.stack.items.len > 0, "accessing a node with a zero-length cursor stack", .{});
336 |
337 | // If the top of the stack is a leaf node then just return it.
338 | const lastRef = self.getLastElementRef().?;
339 | if (lastRef.node != null and lastRef.node.?.isLeaf) {
340 | // std.log.debug("return a last reference node", .{});
341 | return lastRef.node;
342 | }
343 | // std.log.debug("start from root and traveerse down the hierarchy, the last reference is {any}", .{lastRef});
344 | // Start from root and traveerse down the hierarchy.
345 | var n: ?*Node = null;
346 | if (self.stack.items[0].node != null) {
347 | n = self.stack.items[0].node;
348 | } else {
349 | // the root node is not in the stack, so we need to get the root node from the bucket.
350 | n = self._bucket.node(self.stack.items[0].p.?.id, null);
351 | }
352 | for (self.stack.items[0 .. self.stack.items.len - 1]) |ref| {
353 | assert(!n.?.isLeaf, "expected branch node", .{});
354 | n = n.?.childAt(ref.index).?;
355 | }
356 |
357 | assert(n.?.isLeaf, "expect leaf node", .{});
358 | // std.log.debug("return a node, pgid: {}, refIndex: {}", .{ n.?.pgid, self.getLastElementRef().?.index });
359 | return n;
360 | }
361 |
362 | // Search key from nodes.
363 | fn searchNode(self: *Self, key: []const u8, n: *const Node) void {
364 | // const printNodes = struct {
365 | // fn print(curNode: *const Node) void {
366 | // for (curNode.inodes.items, 0..) |iNode, i| {
367 | // const iKey = iNode.getKey().?;
368 | // std.log.debug("i={}, pgid: {d}, key={any}, len={}, iKey = {any}, len={}", .{ i, curNode.pgid, curNode.key.?, curNode.key.?.len, iKey, iKey.len });
369 | // }
370 | // }
371 | // }.print;
372 | // // _ = printNodes;
373 | // printNodes(n);
374 | assert(n.inodes.items.len > 0, "the node is empty", .{});
375 | var indexRef = n.searchInodes(key);
376 | if (!indexRef.exact) {
377 | indexRef.index -= 1;
378 | }
379 | // std.log.debug("find index: {}, current pgid: {d}, current node len: {}, next pgid: {d}", .{ indexRef.index, n.pgid, n.inodes.items.len, n.inodes.items[indexRef.index].pgid });
380 | // Recursively search to the next node.
381 | const lastEntry = self.getLastElementRef().?;
382 | lastEntry.index = indexRef.index;
383 | self.search(key, n.inodes.items[indexRef.index].pgid);
384 | }
385 |
386 | // Search key from pages
387 | fn searchPage(self: *Self, key: []const u8, p: *page.Page) void {
388 | assert(p.flags == consts.intFromFlags(.branch), "the page is not a branch page, page: {any}", .{p});
389 | // Binary search for the correct range.
390 | var elementRef = p.searchBranchElements(key);
391 | if (!elementRef.exact and elementRef.index > 0) {
392 | elementRef.index -= 1;
393 | }
394 | self.getLastElementRef().?.index = elementRef.index;
395 | // Recursively search to the next page.
396 | const nextPgid = p.branchPageElementRef(elementRef.index).?.pgid;
397 | self.search(key, nextPgid);
398 | }
399 |
400 | // Searches the leaf node on the top of the stack for a key
401 | fn nsearch(self: *Self, key: []const u8) void {
402 | const e = self.getLastElementRef().?;
403 | const p = e.p;
404 | const n = e.node;
405 |
406 | // If we have a node then search its inodes.
407 | if (n) |_node| {
408 | const index = std.sort.lowerBound(INode, _node.inodes.items, key, INode.lowerBoundFn);
409 | e.index = index;
410 | return;
411 | }
412 |
413 | // If we have a page then search its leaf elements.
414 | const index = p.?.searchLeafElements(key).index;
415 | e.index = index;
416 | }
417 |
418 | // get the key and value of the cursor.
419 | fn keyValue(self: *Self) KeyValueRef {
420 | const ref = self.getLastElementRef().?;
421 | if (ref.count() == 0 or ref.index >= ref.count()) {
422 | // 1: all key remove of tx, the page's keys are 0,
423 | // 2: index == count indicate not found the key.
424 | return KeyValueRef{ .key = null, .value = null, .flag = 0 };
425 | }
426 |
427 | // Retrieve value from node.
428 | if (ref.node) |refNode| {
429 | const inode = &refNode.inodes.items[ref.index];
430 | return KeyValueRef{ .key = inode.getKey(), .value = inode.getValue(), .flag = inode.flags };
431 | }
432 |
433 | // Or retrieve value from page.
434 | const elem = ref.p.?.leafPageElement(ref.index).?;
435 | return KeyValueRef{ .key = elem.key(), .value = elem.value(), .flag = elem.flags };
436 | }
437 |
438 | /// Returns the node that the cursor is currently positioned on.
439 | fn getNode(self: *Self) ?*Node {
440 | assert(self.stack.items.len > 0, "accessing a node with a zero-length cursor stack", .{});
441 |
442 | // If the top of the stack is a leaf node then just return it.
443 | const latestElementRef = self.getLastElementRef().?;
444 | if (latestElementRef.node != null and latestElementRef.node.?.isLeaf) {
445 | return latestElementRef.node;
446 | }
447 | // Start from root and traverse down the lierarchy.
448 | var n = self.stack.items[0].node;
449 | if (n == null) {
450 | // assert(self.stack.items[0].p.?.id > 1, "the page id is not valid, id: {}", .{self.stack.items[0].p.?.id});
451 | n = self._bucket.node(self.stack.items[0].p.?.id, null);
452 | std.log.warn("the node is null, so it is the root node at this bucket, pgid: {}", .{self.stack.items[0].p.?.id});
453 | }
454 | // find the node from the stack from the top to the bottom.
455 | for (self.stack.items[0..(self.stack.items.len - 1)]) |ref| {
456 | assert(!n.?.isLeaf, "expected branch node", .{});
457 | n = n.?.childAt(ref.index);
458 | }
459 |
460 | assert(n.?.isLeaf, "expect leaf node", .{});
461 | return n;
462 | }
463 |
464 | // get the last element reference of the stack.
465 | fn getLastElementRef(self: *Self) ?*ElementRef {
466 | if (self.stack.items.len == 0) {
467 | return null;
468 | }
469 | return &self.stack.items[self.stack.items.len - 1];
470 | }
471 |
472 | fn prettyPrint(self: *const Self) void {
473 | std.log.debug("\t----------- the cursor stack -----------\t", .{});
474 | std.log.debug("the boot root is {}", .{self._bucket._b.?.root});
475 | for (self.stack.items, 0..) |ref, i| {
476 | if (ref.node) |n| {
477 | std.log.debug("index: {}, is a node, pgid: {}, key index: {}", .{ i, n.pgid, ref.index });
478 | } else if (ref.p) |p| {
479 | std.log.debug("index: {}, is a page, id: {}, key index: {}", .{ i, p.id, ref.index });
480 | }
481 | }
482 | std.log.debug("\t----------------------------------------\t", .{});
483 | }
484 | };
485 |
486 | // Represents a reference to an element on a given page/node.
487 | const ElementRef = struct {
488 | // page
489 | p: ?*page.Page = null,
490 | // node, Thinking: if the transaction is read-only, the node is null. don't you know?
491 | node: ?*Node = null,
492 | index: usize = 0,
493 |
494 | // Create a new element reference.
495 | fn init(allocator: std.mem.Allocator, index: usize, p: ?*page.Page, node: ?*Node) *ElementRef {
496 | const self = allocator.create(ElementRef) catch unreachable;
497 | self.* = .{
498 | .p = p,
499 | .node = node,
500 | .index = index,
501 | };
502 | return self;
503 | }
504 |
505 | // Returns true if the element is a leaf element.
506 | inline fn isLeaf(self: *const ElementRef) bool {
507 | if (self.node) |node| {
508 | return node.isLeaf;
509 | }
510 | return self.p.?.flags & consts.intFromFlags(.leaf) != 0;
511 | }
512 |
513 | // returns the number of inodes or page elements.
514 | inline fn count(self: *const ElementRef) usize {
515 | if (self.node) |node| {
516 | return node.inodes.items.len;
517 | }
518 | return @as(usize, self.p.?.count);
519 | }
520 |
521 | // Returns the key for the current element.
522 | inline fn pgid(self: *const ElementRef) PgidType {
523 | if (self.node) |node| {
524 | return node.pgid;
525 | }
526 | return self.p.?.id;
527 | }
528 | };
529 |
--------------------------------------------------------------------------------
/src/cursor_test.zig:
--------------------------------------------------------------------------------
1 | const tests = @import("tests.zig");
2 | const TX = @import("tx.zig").TX;
3 | const consts = @import("consts.zig");
4 | const Error = @import("error.zig").Error;
5 | const std = @import("std");
6 | const Cursor = @import("cursor.zig").Cursor;
7 | const assert = @import("util.zig").assert;
8 | const DB = @import("db.zig").DB;
9 | const log = std.log.scoped(.cursor_test);
10 |
11 | // Ensure that a cursor can return a reference to the bucket that created it.
12 | test "Cursor_Bucket" {
13 | std.testing.log_level = .debug;
14 | var testCtx = try tests.setup(std.testing.allocator);
15 | defer tests.teardown(&testCtx);
16 | const kvDB = testCtx.db;
17 |
18 | const updateFn = struct {
19 | fn update(trx: *TX) Error!void {
20 | const b = trx.createBucket("widgets") catch unreachable;
21 | var cursor = b.cursor();
22 | defer cursor.deinit();
23 | const cb = cursor.bucket();
24 | std.debug.assert(@intFromPtr(b) == @intFromPtr(cb));
25 | }
26 | }.update;
27 | try kvDB.update(updateFn);
28 | }
29 |
30 | test "Cursor_Seek" {
31 | std.testing.log_level = .err;
32 | var testCtx = try tests.setup(std.testing.allocator);
33 | defer tests.teardown(&testCtx);
34 | const kvDB = testCtx.db;
35 | const updateFn = struct {
36 | fn update(trx: *TX) Error!void {
37 | const b = try trx.createBucket("widgets");
38 | try b.put(consts.KeyPair.init("foo", "0001"));
39 | try b.put(consts.KeyPair.init("bar", "0002"));
40 | try b.put(consts.KeyPair.init("baz", "0003"));
41 | _ = try b.createBucket("bkt");
42 | }
43 | }.update;
44 | try kvDB.update(updateFn);
45 |
46 | const viewFn = struct {
47 | fn view(trx: *TX) Error!void {
48 | const b = trx.getBucket("widgets") orelse unreachable;
49 | var cursor = b.cursor();
50 | defer cursor.deinit();
51 | // Exact match should go to the key.
52 | const kv = cursor.seek("bar");
53 | assert(std.mem.eql(u8, kv.key.?, "bar"), "the key should be 'bar'", .{});
54 | assert(std.mem.eql(u8, kv.value.?, "0002"), "the value should be '0002'", .{});
55 |
56 | // Inexact match should go to the next key.
57 | const kv2 = cursor.seek("bas");
58 | assert(std.mem.eql(u8, kv2.key.?, "baz"), "the key should be 'baz'", .{});
59 | assert(std.mem.eql(u8, kv2.value.?, "0003"), "the value should be '0003'", .{});
60 |
61 | // Low key should go to the first key.
62 | const kv3 = cursor.seek("");
63 | std.debug.assert(std.mem.eql(u8, kv3.key.?, "bar"));
64 | std.debug.assert(std.mem.eql(u8, kv3.value.?, "0002"));
65 |
66 | // High key should return no key.
67 | const kv4 = cursor.seek("zzz");
68 | std.debug.assert(kv4.key == null);
69 | std.debug.assert(kv4.value == null);
70 |
71 | // Buckets should return their key but no value.
72 | const kv5 = cursor.seek("bkt");
73 | std.debug.assert(std.mem.eql(u8, kv5.key.?, "bkt"));
74 | std.debug.assert(kv5.value == null);
75 | }
76 | }.view;
77 | try kvDB.view(viewFn);
78 | }
79 |
80 | test "Cursor_Delete" {
81 | std.testing.log_level = .err;
82 | var testCtx = try tests.setup(std.testing.allocator);
83 | defer tests.teardown(&testCtx);
84 | const kvDB = testCtx.db;
85 |
86 | const count = 1000;
87 | // Insert every other key between 0 and $count.
88 | const updateFn = struct {
89 | fn update(trx: *TX) Error!void {
90 | const b = try trx.createBucket("widgets");
91 | for (0..count) |i| {
92 | const key = try std.fmt.allocPrint(std.testing.allocator, "{0:0>10}", .{i});
93 | defer std.testing.allocator.free(key);
94 | const value = try std.fmt.allocPrint(std.testing.allocator, "{0:0>10}", .{count + i});
95 | defer std.testing.allocator.free(value);
96 | try b.put(consts.KeyPair.init(key, value));
97 | }
98 | _ = try b.createBucket("sub");
99 | }
100 | }.update;
101 | try kvDB.update(updateFn);
102 |
103 | const updateFn2 = struct {
104 | fn update(trx: *TX) Error!void {
105 | const b = trx.getBucket("widgets") orelse unreachable;
106 | var cursor = b.cursor();
107 | defer cursor.deinit();
108 |
109 | const key = try std.fmt.allocPrint(std.testing.allocator, "{0:0>10}", .{count / 2});
110 | defer std.testing.allocator.free(key);
111 |
112 | var keyPair = cursor.first();
113 | while (!keyPair.isNotFound()) {
114 | if (std.mem.order(u8, keyPair.key.?, key) == .lt) {
115 | try cursor.delete();
116 | const got = b.get(keyPair.key.?);
117 | assert(got == null, "the key should be deleted, key: {s}", .{keyPair.key.?});
118 | keyPair = cursor.next();
119 | continue;
120 | }
121 | break;
122 | }
123 | _ = cursor.seek("sub");
124 | const err = cursor.delete();
125 | assert(err == Error.IncompactibleValue, "the error is not bucket not found error, err: {any}", .{err});
126 | }
127 | }.update;
128 | try kvDB.update(updateFn2);
129 |
130 | if (kvDB.pageSize != 16 * 1024) {
131 | std.debug.print("skipping test because page size is not 16KB, it page is: {d}\n", .{kvDB.pageSize});
132 | return;
133 | }
134 | const viewFn = struct {
135 | fn view(trx: *TX) Error!void {
136 | const b = trx.getBucket("widgets") orelse unreachable;
137 | const got = b.get("0000000000");
138 | assert(got == null, "the key should be deleted, key: {s}", .{"0000000000"});
139 | const stats = b.stats();
140 | assert(stats.keyN == (count / 2 + 1), "the key number is invalid, pageSize: {d}, keyN: {d}, count: {d}", .{ trx.getDB().pageSize, stats.keyN, count / 2 + 1 });
141 | }
142 | }.view;
143 | try kvDB.view(viewFn);
144 | }
145 |
146 | // Ensure that a Tx cursor can seek to the appropriate keys when there are a
147 | // large number of keys. This test also checks that seek will always move
148 | // forward to the next key.
149 | //
150 | // Related: https://github.com/boltdb/bolt/pull/187
151 | test "Cursor_Seek_Large" {
152 | std.testing.log_level = .err;
153 | var testCtx = try tests.setup(std.testing.allocator);
154 | defer tests.teardown(&testCtx);
155 | const count: i64 = 1000;
156 | const kvDB = testCtx.db;
157 | // Insert every other key between 0 and $count.
158 | const updateFn = struct {
159 | fn update(trx: *TX) Error!void {
160 | const b = try trx.createBucket("widgets");
161 | var i: i64 = 0;
162 | while (i < count) : (i += 100) {
163 | var j: i64 = i;
164 | while (j < i + 100) : (j += 2) {
165 | const key = std.testing.allocator.alloc(u8, 8) catch unreachable;
166 | std.mem.writeInt(i64, key[0..8], j, .big);
167 | const value = std.testing.allocator.alloc(u8, 100) catch unreachable;
168 | try b.put(consts.KeyPair.init(key, value));
169 | std.testing.allocator.free(key);
170 | std.testing.allocator.free(value);
171 | }
172 | }
173 | }
174 | }.update;
175 | try kvDB.update(updateFn);
176 |
177 | const viewFn = struct {
178 | fn view(trx: *TX) Error!void {
179 | const b = trx.getBucket("widgets") orelse unreachable;
180 | var cursor = b.cursor();
181 | defer cursor.deinit();
182 | var keyPair = cursor.first();
183 | for (0..count) |i| {
184 | var seek: [8]u8 = undefined;
185 | const keyNum: i64 = @intCast(i);
186 | std.mem.writeInt(i64, seek[0..8], keyNum, .big);
187 | keyPair = cursor.seek(seek[0..]);
188 | // The last seek is beyond the end of the the range so
189 | // it should return nil.
190 | if (i == count - 1) {
191 | assert(keyPair.isNotFound(), "the key should be not found, key: {s}", .{seek});
192 | continue;
193 | }
194 | // Otherwise we should seek to the exact key or the next key.
195 | const num = std.mem.readInt(i64, keyPair.key.?[0..8], .big);
196 | if (i % 2 == 0) {
197 | assert(num == i, "the key should be seeked to the exact key or the next key, i: {d}, key: {any}, num: {d}", .{ i, seek, num });
198 | } else {
199 | assert(num == i + 1, "the key should be seeked to the next key({d}), i: {d}, key: {any}, num: {d}", .{ i + 1, i, seek, num });
200 | }
201 | }
202 | }
203 | }.view;
204 | try kvDB.view(viewFn);
205 | }
206 |
207 | // Ensure that a cursor can iterate over an empty bucket without error.
208 | test "Cursor_Iterate_EmptyBucket" {
209 | std.testing.log_level = .err;
210 | var testCtx = try tests.setup(std.testing.allocator);
211 | defer tests.teardown(&testCtx);
212 | const kvDB = testCtx.db;
213 | const updateFn = struct {
214 | fn update(trx: *TX) Error!void {
215 | _ = try trx.createBucket("widgets");
216 | }
217 | }.update;
218 | try kvDB.update(updateFn);
219 |
220 | const viewFn = struct {
221 | fn view(trx: *TX) Error!void {
222 | const b = trx.getBucket("widgets") orelse unreachable;
223 | var cursor = b.cursor();
224 | defer cursor.deinit();
225 | var keyPair = cursor.first();
226 | assert(keyPair.isNotFound(), "the key should be not found", .{});
227 | }
228 | }.view;
229 | try kvDB.view(viewFn);
230 | }
231 |
232 | // Ensure that a Tx cursor can reverse iterate over an empty bucket without error.
233 | test "Cursor_EmptyBucketReverse" {
234 | std.testing.log_level = .err;
235 | var testCtx = try tests.setup(std.testing.allocator);
236 | defer tests.teardown(&testCtx);
237 | const kvDB = testCtx.db;
238 | const updateFn = struct {
239 | fn update(trx: *TX) Error!void {
240 | _ = try trx.createBucket("widgets");
241 | }
242 | }.update;
243 | try kvDB.update(updateFn);
244 |
245 | const viewFn = struct {
246 | fn view(trx: *TX) Error!void {
247 | const b = trx.getBucket("widgets") orelse unreachable;
248 | var cursor = b.cursor();
249 | defer cursor.deinit();
250 | var keyPair = cursor.last();
251 | assert(keyPair.isNotFound(), "the key should be not found", .{});
252 | }
253 | }.view;
254 | try kvDB.view(viewFn);
255 | }
256 |
257 | // Ensure that a Tx cursor can iterate over a single root with a couple elements.
258 | test "Cursor_Iterate_Leaf" {
259 | std.testing.log_level = .err;
260 | var testCtx = try tests.setup(std.testing.allocator);
261 | defer tests.teardown(&testCtx);
262 | const kvDB = testCtx.db;
263 | const updateFn = struct {
264 | fn update(trx: *TX) Error!void {
265 | const b = try trx.createBucket("widgets");
266 | try b.put(consts.KeyPair.init("baz", ""));
267 | try b.put(consts.KeyPair.init("foo", &[_]u8{0}));
268 | try b.put(consts.KeyPair.init("bar", &[_]u8{1}));
269 | }
270 | }.update;
271 | try kvDB.update(updateFn);
272 |
273 | const trx = try kvDB.begin(false);
274 | const bt = trx.getBucket("widgets");
275 | assert(bt != null, "the bucket should not be null", .{});
276 | var c = bt.?.cursor();
277 | const keyPair = c.first();
278 | assert(std.mem.eql(u8, keyPair.key.?, "bar"), "the key should be 'bar'", .{});
279 | assert(std.mem.eql(u8, keyPair.value.?, &[_]u8{1}), "the value should be [1]", .{});
280 |
281 | const kv = c.next();
282 | assert(std.mem.eql(u8, kv.key.?, "baz"), "the key should be 'baz'", .{});
283 | assert(std.mem.eql(u8, kv.value.?, &[_]u8{}), "the value should be []", .{});
284 |
285 | const kv2 = c.next();
286 | assert(std.mem.eql(u8, kv2.key.?, "foo"), "the key should be 'foo'", .{});
287 | assert(std.mem.eql(u8, kv2.value.?, &[_]u8{0}), "the value should be [0]", .{});
288 |
289 | const kv3 = c.next();
290 | assert(kv3.isNotFound(), "the key should be not found", .{});
291 |
292 | const kv4 = c.next();
293 | assert(kv4.isNotFound(), "the key should be not found", .{});
294 | c.deinit();
295 | try trx.rollbackAndDestroy();
296 | }
297 |
298 | // Ensure that a cursor can reverse iterate over a single root with a couple elements.
299 | test "Cursor_LeafRootReverse" {
300 | std.testing.log_level = .err;
301 | var testCtx = try tests.setup(std.testing.allocator);
302 | defer tests.teardown(&testCtx);
303 | const kvDB = testCtx.db;
304 | const updateFn = struct {
305 | fn update(trx: *TX) Error!void {
306 | const b = try trx.createBucket("widgets");
307 | try b.put(consts.KeyPair.init("baz", ""));
308 | try b.put(consts.KeyPair.init("foo", &[_]u8{0}));
309 | try b.put(consts.KeyPair.init("bar", &[_]u8{1}));
310 | }
311 | }.update;
312 | try kvDB.update(updateFn);
313 |
314 | const trx = try kvDB.begin(false);
315 | const bt = trx.getBucket("widgets");
316 | assert(bt != null, "the bucket should not be null", .{});
317 | var c = bt.?.cursor();
318 | const keyPair = c.last();
319 | assert(std.mem.eql(u8, keyPair.key.?, "foo"), "the key should be 'foo'", .{});
320 | assert(std.mem.eql(u8, keyPair.value.?, &[_]u8{0}), "the value should be [0]", .{});
321 |
322 | const kv2 = c.prev();
323 | assert(std.mem.eql(u8, kv2.key.?, "baz"), "the key should be 'baz'", .{});
324 | assert(std.mem.eql(u8, kv2.value.?, &[_]u8{}), "the value should be []", .{});
325 |
326 | const kv = c.prev();
327 | assert(std.mem.eql(u8, kv.key.?, "bar"), "the key should be 'bar'", .{});
328 | assert(std.mem.eql(u8, kv.value.?, &[_]u8{1}), "the value should be [1]", .{});
329 |
330 | const kv3 = c.prev();
331 | assert(kv3.isNotFound(), "the key should be not found", .{});
332 |
333 | const kv4 = c.prev();
334 | assert(kv4.isNotFound(), "the key should be not found", .{});
335 | c.deinit();
336 |
337 | try trx.rollbackAndDestroy();
338 | }
339 |
340 | // Ensure that a Tx cursor can restart from the beginning.
341 | test "Cursor_Restart" {
342 | std.testing.log_level = .err;
343 | var testCtx = try tests.setup(std.testing.allocator);
344 | defer tests.teardown(&testCtx);
345 | const kvDB = testCtx.db;
346 | const updateFn = struct {
347 | fn update(trx: *TX) Error!void {
348 | const b = try trx.createBucket("widgets");
349 | try b.put(consts.KeyPair.init("bar", ""));
350 | try b.put(consts.KeyPair.init("foo", ""));
351 | }
352 | }.update;
353 | try kvDB.update(updateFn);
354 |
355 | const trx = try kvDB.begin(false);
356 | const bt = trx.getBucket("widgets");
357 | assert(bt != null, "the bucket should not be null", .{});
358 | var c = bt.?.cursor();
359 | const keyPair = c.first();
360 | assert(std.mem.eql(u8, keyPair.key.?, "bar"), "the key should be 'bar'", .{});
361 |
362 | const keyPair2 = c.next();
363 | assert(std.mem.eql(u8, keyPair2.key.?, "foo"), "the key should be 'foo'", .{});
364 |
365 | const keyPair3 = c.first();
366 | assert(std.mem.eql(u8, keyPair3.key.?, "bar"), "the key should be 'bar'", .{});
367 |
368 | const keyPair4 = c.next();
369 | assert(std.mem.eql(u8, keyPair4.key.?, "foo"), "the key should be 'foo'", .{});
370 |
371 | c.deinit();
372 | try trx.rollbackAndDestroy();
373 | }
374 |
375 | // Ensure that a cursor can skip over empty pages that have been deleted.
376 | test "Cursor_First_EmptyPages" {
377 | std.testing.log_level = .err;
378 | var testCtx = try tests.setup(std.testing.allocator);
379 | defer tests.teardown(&testCtx);
380 | const kvDB = testCtx.db;
381 | // Create 1000 keys in the "widgets" bucket.
382 | const updateFn = struct {
383 | fn update(trx: *TX) Error!void {
384 | const b = try trx.createBucket("widgets");
385 | var key: [8]u8 = undefined;
386 | for (0..1000) |i| {
387 | const keyNum: i64 = @intCast(i);
388 | std.mem.writeInt(i64, key[0..8], keyNum, .big);
389 | try b.put(consts.KeyPair.init(key[0..8], ""));
390 | @memset(key[0..8], 0);
391 | }
392 | }
393 | }.update;
394 | try kvDB.update(updateFn);
395 |
396 | // Delete half the keys and then try to iterate.
397 | const updateFn2 = struct {
398 | fn update(trx: *TX) Error!void {
399 | const b = trx.getBucket("widgets") orelse unreachable;
400 | var key: [8]u8 = undefined;
401 | for (0..600) |i| {
402 | const keyNum: i64 = @intCast(i);
403 | std.mem.writeInt(i64, key[0..8], keyNum, .big);
404 | try b.delete(key[0..8]);
405 | @memset(key[0..8], 0);
406 | }
407 | var c = b.cursor();
408 | defer c.deinit();
409 | var n: usize = 0;
410 | var keyPair = c.first();
411 | while (!keyPair.isNotFound()) {
412 | keyPair = c.next();
413 | n += 1;
414 | }
415 | assert(n == 400, "the number of keys should be 400, but got {d}", .{n});
416 | }
417 | }.update;
418 | try kvDB.update(updateFn2);
419 | }
420 |
421 | // Ensure that a Tx can iterate over all elements in a bucket.
422 | test "Cursor_QuickCheck" {
423 | std.testing.log_level = .err;
424 |
425 | const f = struct {
426 | fn quickCheck(allocator: std.mem.Allocator, size: usize) !void {
427 | var q = tests.Quick.init(allocator);
428 | q.maxItems = size;
429 | // q.maxKeySize = 10;
430 | // q.maxValueSize = 10;
431 | _ = try q.generate(allocator);
432 | defer q.deinit();
433 |
434 | std.debug.print("QuickCheck passed for size {d}.\n", .{size});
435 | var testCtx = try tests.setup(allocator);
436 | defer tests.teardown(&testCtx);
437 | const kvDB = testCtx.db;
438 | {
439 | const trx = try kvDB.begin(true);
440 | const b = try trx.createBucket("widgets");
441 | for (q.items.items) |item| {
442 | try b.put(consts.KeyPair.init(item.key, item.value));
443 | }
444 |
445 | try trx.commitAndDestroy();
446 | }
447 | q.sort();
448 |
449 | // Iterate over all items and check consistency.
450 | {
451 | const trx = try kvDB.begin(false);
452 | const b = trx.getBucket("widgets") orelse unreachable;
453 | var cursor = b.cursor();
454 | var keyPair = cursor.first();
455 | for (q.items.items) |item| {
456 | assert(std.mem.eql(u8, keyPair.key.?, item.key), "the key should be {s}", .{item.key});
457 | assert(std.mem.eql(u8, keyPair.value.?, item.value), "the value should be {s}", .{item.value});
458 | keyPair = cursor.next();
459 | }
460 | cursor.deinit();
461 | try trx.rollbackAndDestroy();
462 | }
463 | }
464 | }.quickCheck;
465 | try f(std.testing.allocator, 500);
466 | }
467 |
468 | test "Cursor_QuickCheck_Reverse" {
469 | std.testing.log_level = .err;
470 | const f = struct {
471 | fn quickCheckReverse(allocator: std.mem.Allocator, size: usize) !void {
472 | // TODO
473 | var q = tests.Quick.init(allocator);
474 | q.maxItems = size;
475 | _ = try q.generate(allocator);
476 | defer q.deinit();
477 | var testCtx = try tests.setup(allocator);
478 | defer tests.teardown(&testCtx);
479 | const kvDB = testCtx.db;
480 | // Bulk insert all values.
481 | {
482 | const trx = kvDB.begin(true) catch unreachable;
483 | const b = trx.createBucket("widgets") catch unreachable;
484 | for (q.items.items) |item| {
485 | try b.put(consts.KeyPair.init(item.key, item.value));
486 | }
487 | try trx.commitAndDestroy();
488 | }
489 |
490 | // Sort test data.
491 | q.reverse();
492 |
493 | // Iterate over all items and check consistency.
494 | {
495 | const trx = kvDB.begin(false) catch unreachable;
496 | const b = trx.getBucket("widgets") orelse unreachable;
497 | var cursor = b.cursor();
498 | var keyPair = cursor.last();
499 | for (q.items.items) |item| {
500 | assert(std.mem.eql(u8, keyPair.key.?, item.key), "the key should be {s}", .{item.key});
501 | keyPair = cursor.prev();
502 | }
503 | cursor.deinit();
504 | try trx.rollbackAndDestroy();
505 | }
506 | }
507 | }.quickCheckReverse;
508 | try f(std.testing.allocator, 500);
509 | }
510 |
511 | // Ensure that a Tx cursor can iterate over subbuckets.
512 | test "Cursor_QuickCheck_BucketsOnly" {
513 | // TODO
514 | std.testing.log_level = .err;
515 | var db = tests.setup(std.testing.allocator) catch unreachable;
516 | defer tests.teardown(&db);
517 | const kvDB = db.db;
518 | const bucket_names = [_][]const u8{ "foo", "bar", "baz" };
519 | const updateFn = struct {
520 | fn update(trx: *TX) Error!void {
521 | const b = try trx.createBucket("widgets");
522 | for (bucket_names) |name| {
523 | _ = try b.createBucket(name);
524 | }
525 | }
526 | }.update;
527 | try kvDB.update(updateFn);
528 |
529 | const expected_bucket_names = [_][]const u8{ "bar", "baz", "foo" };
530 | const viewFn = struct {
531 | fn view(trx: *TX) Error!void {
532 | const b = trx.getBucket("widgets") orelse unreachable;
533 | var cursor = b.cursor();
534 | defer cursor.deinit();
535 | var keyPair = cursor.first();
536 | var i: usize = 0;
537 | while (!keyPair.isNotFound()) {
538 | assert(std.mem.eql(u8, keyPair.key.?, expected_bucket_names[i]), "the key should be {s}", .{expected_bucket_names[i]});
539 | assert(keyPair.value == null, "the value should be null", .{});
540 | keyPair = cursor.next();
541 | i += 1;
542 | }
543 | assert(i == bucket_names.len, "the number of keys should be {d}, but got {d}", .{ bucket_names.len, i });
544 | }
545 | }.view;
546 | try kvDB.view(viewFn);
547 | }
548 |
549 | // Ensure that a Tx cursor can reverse iterate over subbuckets.
550 | test "Cursor_QuickCheck_BucketsOnly_Reverse" {
551 | // TODO
552 | std.testing.log_level = .err;
553 | var db = tests.setup(std.testing.allocator) catch unreachable;
554 | defer tests.teardown(&db);
555 | const kvDB = db.db;
556 | const bucket_names = [_][]const u8{ "foo", "bar", "baz" };
557 | const expected_bucket_names = [_][]const u8{ "foo", "baz", "bar" };
558 | const updateFn = struct {
559 | fn update(trx: *TX) Error!void {
560 | const b = try trx.createBucket("widgets");
561 | for (bucket_names) |name| {
562 | _ = try b.createBucket(name);
563 | }
564 | }
565 | }.update;
566 | try kvDB.update(updateFn);
567 |
568 | const viewFn = struct {
569 | fn view(trx: *TX) Error!void {
570 | // TODO
571 | const b = trx.getBucket("widgets") orelse unreachable;
572 | var cursor = b.cursor();
573 | defer cursor.deinit();
574 | var keyPair = cursor.last();
575 | var i: usize = 0;
576 | while (!keyPair.isNotFound()) {
577 | assert(std.mem.eql(u8, keyPair.key.?, expected_bucket_names[i]), "the key should be {s}={s}", .{ expected_bucket_names[i], keyPair.key.? });
578 | keyPair = cursor.prev();
579 | i += 1;
580 | }
581 | assert(i == bucket_names.len, "the number of keys should be {d}, but got {d}", .{ bucket_names.len, i });
582 | }
583 | }.view;
584 | try kvDB.view(viewFn);
585 | }
586 |
587 | test "ExampleCursor" {
588 | std.testing.log_level = .err;
589 | var testCtx = try tests.setup(std.testing.allocator);
590 | defer tests.teardown(&testCtx);
591 | const kvDB = testCtx.db;
592 | // Start a read-write transaction.
593 | const updateFn = struct {
594 | fn update(trx: *TX) Error!void {
595 | // Create a new bucket.
596 | const b = trx.createBucket("animals") catch unreachable;
597 |
598 | // Insert data into a bucket.
599 | try b.put(consts.KeyPair.init("dog", "fun"));
600 | try b.put(consts.KeyPair.init("cat", "lame"));
601 | try b.put(consts.KeyPair.init("liger", "awesome"));
602 |
603 | // Create a cursor for iteration.
604 | var c = b.cursor();
605 | defer c.deinit();
606 |
607 | // Iterate over the bucket.
608 | var keyPair = c.first();
609 | while (!keyPair.isNotFound()) {
610 | std.debug.print("A {s} is {s}.\n", .{ keyPair.key.?, keyPair.value.? });
611 | // Do something with keyPair.
612 | keyPair = c.next();
613 | }
614 | }
615 | }.update;
616 | try kvDB.update(updateFn);
617 | }
618 |
619 | test "ExampleCursor_reverse" {
620 | std.testing.log_level = .err;
621 | var testCtx = try tests.setup(std.testing.allocator);
622 | defer tests.teardown(&testCtx);
623 | const kvDB = testCtx.db;
624 | const updateFn = struct {
625 | fn update(trx: *TX) Error!void {
626 | // Create a new bucket.
627 | const b = trx.createBucket("animals") catch unreachable;
628 |
629 | // Insert data into a bucket.
630 | try b.put(consts.KeyPair.init("dog", "fun"));
631 | try b.put(consts.KeyPair.init("cat", "lame"));
632 | try b.put(consts.KeyPair.init("liger", "awesome"));
633 |
634 | // Create a cursor for iteration.
635 | var c = b.cursor();
636 | defer c.deinit();
637 | // Iterate over items in reverse sorted key order. This starts
638 | // from the last key/value pair and updates the k/v variables to
639 | // the previous key/value on each iteration.
640 | //
641 | // The loop finishes at the beginning of the cursor when a nil key
642 | // is returned.
643 | var keyPair = c.last();
644 | while (!keyPair.isNotFound()) {
645 | std.debug.print("A {s} is {s}.\n", .{ keyPair.key.?, keyPair.value.? });
646 | keyPair = c.prev();
647 | }
648 | }
649 | }.update;
650 | try kvDB.update(updateFn);
651 | // Output:
652 | // A liger is awesome.
653 | // A dog is fun.
654 | // A cat is lame.
655 | }
656 |
--------------------------------------------------------------------------------
/src/error.zig:
--------------------------------------------------------------------------------
1 | /// Error type
2 | pub const Error = error{
3 | // Below are the database errors
4 | DatabaseNotOpen,
5 | DatabaseOpen,
6 |
7 | Invalid,
8 | VersionMismatch,
9 | CheckSum,
10 | Timeout,
11 | // Below are the transaction errors
12 | TxNotWriteable,
13 | TxClosed,
14 | DatabaseReadOnly,
15 |
16 | // Below are the bucket errors
17 | BucketNotFound,
18 | BucketExists,
19 | BucketNameRequired,
20 | KeyRequired,
21 | KeyTooLarge,
22 | ValueTooLarge,
23 | IncompactibleValue,
24 |
25 | // Below are the mmap errors
26 | MMapTooLarge,
27 |
28 | // Memory allocation error
29 | OutOfMemory,
30 | // Consistency check
31 | NotPassConsistencyCheck,
32 | // File IO error
33 | FileIOError,
34 | // For Test
35 | ManagedTxCommitNotAllowed,
36 | ManagedTxRollbackNotAllowed,
37 | };
38 |
--------------------------------------------------------------------------------
/src/freelist.zig:
--------------------------------------------------------------------------------
1 | const page = @import("page.zig");
2 | const std = @import("std");
3 | const tx = @import("tx.zig");
4 | const Error = @import("error.zig").Error;
5 | const consts = @import("consts.zig");
6 | const PgidType = consts.PgidType;
7 | const assert = @import("assert.zig").assert;
8 | const TxId = consts.TxId;
9 | const Page = page.Page;
10 | const log = std.log.scoped(.BoltFreeList);
11 |
12 | // FreeList represents a list of all pages that are available for allcoation.
13 | // It also tracks pages that have been freed but are still in use by open transactions.
14 | pub const FreeList = struct {
15 | // all free and available free page ids.
16 | ids: std.ArrayList(PgidType),
17 | // mapping of soon-to-be free page ids by tx.
18 | pending: std.AutoHashMap(consts.TxId, std.ArrayList(PgidType)),
19 | // fast lookup of all free and pending pgae ids.
20 | cache: std.AutoHashMap(PgidType, bool),
21 |
22 | allocator: std.mem.Allocator,
23 |
24 | const Self = @This();
25 |
26 | /// init freelist
27 | pub fn init(allocator: std.mem.Allocator) *Self {
28 | const f = allocator.create(Self) catch unreachable;
29 | f.ids = std.ArrayList(PgidType).init(allocator);
30 | f.pending = std.AutoHashMap(TxId, std.ArrayList(PgidType)).init(allocator);
31 | f.cache = std.AutoHashMap(PgidType, bool).init(allocator);
32 | f.allocator = allocator;
33 | return f;
34 | }
35 |
36 | /// deinit freelist
37 | pub fn deinit(self: *Self) void {
38 | // log.info("deinit freelist", .{});
39 | defer self.allocator.destroy(self);
40 | var itr = self.pending.iterator();
41 | while (itr.next()) |entry| {
42 | if (@import("builtin").is_test) {
43 | log.info("free pending, txid: {}, ids: {any}", .{ entry.key_ptr.*, entry.value_ptr.items });
44 | }
45 | entry.value_ptr.deinit();
46 | }
47 | self.pending.deinit();
48 | self.cache.deinit();
49 | self.ids.deinit();
50 | }
51 |
52 | /// Return the size of the page after serlialization.
53 | pub fn size(self: *Self) usize {
54 | var n: usize = self.count();
55 | if (n >= 0xFFFF) {
56 | // The first elements will be used to store the count. See freelist.write.
57 | n += 1;
58 | }
59 | return Page.headerSize() + @sizeOf(PgidType) * n;
60 | }
61 |
62 | /// Returns count of pages on the freelist.
63 | pub fn count(self: *Self) usize {
64 | return self.freeCount() + self.pendingCount();
65 | }
66 |
67 | /// Returns count of free pages.
68 | pub fn freeCount(self: *Self) usize {
69 | return self.ids.items.len;
70 | }
71 |
72 | /// Returns count of pending pages.
73 | pub fn pendingCount(self: *Self) usize {
74 | var pageCount: usize = 0;
75 | var itr = self.pending.valueIterator();
76 | while (itr.next()) |valuePtr| {
77 | pageCount += valuePtr.items.len;
78 | }
79 | return pageCount;
80 | }
81 |
82 | /// Copies into dst a list of all free ids and all pending ids in one sorted list.
83 | pub fn copyAll(self: *Self, dst: []PgidType) void {
84 | var array = std.ArrayList(PgidType).initCapacity(self.allocator, self.pendingCount()) catch unreachable;
85 | defer array.deinit();
86 | var itr = self.pending.valueIterator();
87 | while (itr.next()) |entries| {
88 | array.appendSlice(entries.items) catch unreachable;
89 | }
90 | std.mem.sort(PgidType, array.items, {}, std.sort.asc(PgidType));
91 | Self.mergeSortedArray(dst, self.ids.items, array.items);
92 | }
93 |
94 | /// Returns the starting page id of a contiguous list of pages of a given size.
95 | pub fn allocate(self: *Self, n: usize) PgidType {
96 | if (self.ids.items.len == 0) {
97 | return 0;
98 | }
99 |
100 | var initial: usize = 0;
101 | var previd: usize = 0;
102 | for (self.ids.items, 0..) |id, i| {
103 | assert(id > 1, "invalid page({}) allocation", .{id});
104 | // Reset initial page if this is not contigous.
105 | if (previd == 0 or (id - previd) != 1) {
106 | initial = id;
107 | }
108 | previd = id;
109 | // If we found a contignous block then remove it and return it.
110 | if ((id - initial) + 1 == @as(PgidType, n)) {
111 | const beforeCount = self.ids.items.len;
112 | const beforeIds = self.allocator.alloc(PgidType, self.ids.items.len) catch unreachable;
113 | std.mem.copyForwards(PgidType, beforeIds, self.ids.items);
114 | // If we're allocating off the beginning then take the fast path
115 | // and just adjust then existing slice. This will use extra memory
116 | // temporarilly but then append() in free() will realloc the slice
117 | // as is necessary.
118 | if (i + 1 == n) {
119 | std.mem.copyForwards(PgidType, self.ids.items[0..], self.ids.items[i + 1 ..]);
120 | self.ids.resize(self.ids.items.len - i - 1) catch unreachable;
121 | } else {
122 | std.mem.copyForwards(PgidType, self.ids.items[i - n + 1 ..], self.ids.items[(i + 1)..]);
123 | self.ids.resize(self.ids.items.len - n) catch unreachable;
124 | }
125 | assert(beforeCount == (n + self.ids.items.len), "beforeCount == n + self.ids.items.len, beforeCount: {d}, n: {d}, self.ids.items.len: {d}", .{ beforeCount, n, self.ids.items.len });
126 | // Remove from the free cache.
127 | for (0..n) |ii| {
128 | const have = self.cache.remove(initial + ii);
129 | if (!@import("builtin").is_test) {
130 | assert(have, "page {} not found in cache", .{initial + ii});
131 | }
132 | }
133 | const afterCount = self.ids.items.len;
134 | assert(beforeCount == (n + afterCount), "{} != {}", .{ beforeCount, afterCount });
135 | if (@import("builtin").is_test) {
136 | log.debug("allocate a new page from freelist, pgid: {d}, n: {d}, ids from {any} change to {any}", .{ initial, n, beforeIds, self.ids.items });
137 | }
138 | self.allocator.free(beforeIds);
139 | return initial;
140 | }
141 | }
142 | return 0;
143 | }
144 |
145 | /// Releases a page and its overflow for a given transaction id.
146 | /// If the page is already free then a panic will occur.
147 | pub fn free(self: *Self, txid: TxId, p: *const Page) !void {
148 | assert(p.id > 1, "can not free 0 or 1 page", .{});
149 | // Free page and all its overflow pages.
150 | const ids = try self.pending.getOrPutValue(txid, std.ArrayList(PgidType).init(self.allocator));
151 | for (p.id..(p.id + p.overflow + 1)) |id| {
152 | // Add to the freelist and cache.
153 | try self.cache.putNoClobber(id, true);
154 | try ids.value_ptr.append(id);
155 | }
156 | // log.debug("after free a page, txid: {}, pending ids: {any}", .{ txid, ids.value_ptr.items });
157 | }
158 |
159 | /// Moves all page ids for a transaction id (or older) to the freelist.
160 | pub fn release(self: *Self, txid: TxId) !void {
161 | if (!@import("builtin").is_test) {
162 | assert(self.pending.count() <= 1, "pending count should be less than 1", .{});
163 | }
164 | var arrayIDs = std.ArrayList(PgidType).init(self.allocator);
165 | defer arrayIDs.deinit();
166 | var itr = self.pending.iterator();
167 | while (itr.next()) |entry| {
168 | if (entry.key_ptr.* <= txid) {
169 | // Move transaction's pending pages to the available freelist.
170 | // Don't remove from the cache since the page is still free.
171 | try arrayIDs.appendSlice(entry.value_ptr.items);
172 | entry.value_ptr.deinit();
173 | const have = self.pending.remove(entry.key_ptr.*);
174 | assert(have, "sanity check", .{});
175 | }
176 | }
177 | // Sort the array
178 | std.mem.sort(PgidType, arrayIDs.items, {}, std.sort.asc(PgidType));
179 | var array = try std.ArrayList(PgidType).initCapacity(self.allocator, arrayIDs.items.len + self.ids.items.len);
180 | defer array.deinit();
181 | try array.appendNTimes(0, arrayIDs.items.len + self.ids.items.len);
182 | assert(array.items.len == (arrayIDs.items.len + self.ids.items.len), "array.items.len == (arrayIDs.items.len + self.ids.items.len)", .{});
183 | // log.info("Release a tx's pages, before merge:\t {any} <= [{any}, {any}]", .{ array.items, arrayIDs.items, self.ids.items });
184 | Self.mergeSortedArray(array.items, arrayIDs.items, self.ids.items);
185 | try self.ids.resize(0);
186 | try self.ids.appendSlice(array.items);
187 | assert(self.ids.items.len == array.items.len, "self.ids.items.len == array.items.len", .{});
188 | // log.info("Release a tx's pages, after merge:\t {any}", .{self.ids.items});
189 | }
190 |
191 | /// Removes the pages from a given pending tx.
192 | pub fn rollback(self: *Self, txid: TxId) void {
193 | // Remove page ids from cache.
194 | if (self.pending.get(txid)) |pendingIds| {
195 | for (pendingIds.items) |id| {
196 | _ = self.cache.remove(id);
197 | }
198 | pendingIds.deinit();
199 | // Remove pages from pending list.
200 | _ = self.pending.remove(txid);
201 | }
202 | }
203 |
204 | /// Returns whether a given page is in the free list.
205 | pub fn freed(self: *Self, pgid: PgidType) bool {
206 | return self.cache.contains(pgid);
207 | }
208 |
209 | /// Initializes the freelist from a freelist page.
210 | pub fn read(self: *Self, p: *Page) void {
211 | // If the page.count is at the max u16 value (64k) then it's considered
212 | // an overflow and the size of the freelist is stored as the first elment.
213 | var _count = @as(usize, p.count);
214 | var idx: usize = 0;
215 | if (_count == 0xFFFF) {
216 | idx = 1;
217 | _count = p.freelistPageOverWithCountElements().?[0];
218 | }
219 |
220 | // Copy the list of page ids from the freelist.
221 | if (_count == 0) {
222 | self.ids.resize(0) catch unreachable;
223 | } else {
224 | const ids = p.freelistPageOverWithCountElements().?;
225 | self.ids.appendSlice(ids[idx.._count]) catch unreachable;
226 | // Make sure they're sorted
227 | std.mem.sortUnstable(PgidType, self.ids.items, {}, std.sort.asc(PgidType));
228 | }
229 | // Rebuild the page cache.
230 | self.reindex();
231 | }
232 |
233 | /// Writes the page ids onto a freelist page. All free and pending ids are
234 | /// saved to disk since in the event of a program crash, all pending ids will
235 | /// become free.
236 | pub fn write(self: *Self, p: *Page) Error!void {
237 | // Combine the old free pgids and pgids waiting on an open transaction.
238 | //
239 | // Update the header flag.
240 | p.flags |= consts.intFromFlags(.freeList);
241 |
242 | // The page.Count can only hold up to 64k elements so if we overflow that
243 | // number then we handle it by putting the size in the first element.
244 | const lenids = self.count();
245 | if (lenids == 0) {
246 | p.count = @as(u16, @intCast(lenids));
247 | } else if (lenids < 0xFFFF) {
248 | p.count = @as(u16, @intCast(lenids));
249 | self.copyAll(p.freelistPageElements().?);
250 | } else {
251 | p.count = @as(u16, 0xFFFF);
252 | const overflow = p.freelistPageOverWithCountElements().?;
253 | overflow[0] = @as(u64, lenids);
254 | p.overflow = @as(u32, @intCast(lenids));
255 | self.copyAll(overflow[1..]);
256 | }
257 | // log.info("𓃠 after write freelist to page, pgid: {}, ids: {any}", .{ p.id, p.freelistPageElements().? });
258 | }
259 |
260 | /// Reads the freelist from a page and filters out pending itmes.
261 | pub fn reload(self: *Self, p: *Page) void {
262 | self.read(p);
263 |
264 | // Build a cache of only pending pages.
265 | var pagaeCahe = std.AutoHashMap(PgidType, bool).init(self.allocator);
266 | var vitr = self.pending.valueIterator();
267 |
268 | while (vitr.next()) |pendingIDs| {
269 | for (pendingIDs.items) |pendingID| {
270 | pagaeCahe.put(pendingID, true) catch unreachable;
271 | }
272 | }
273 |
274 | // Check each page in the freelist and build a new available freelist.
275 | // with any pages not in the pending lists.
276 | var a = std.ArrayList(PgidType).init(self.allocator);
277 | defer a.deinit();
278 | for (self.ids.items) |id| {
279 | if (!pagaeCahe.contains(id)) {
280 | a.append(id) catch unreachable;
281 | }
282 | }
283 |
284 | self.ids.appendSlice(a.items) catch unreachable;
285 |
286 | // Once the available list is rebuilt then rebuild the free cache so that
287 | // it includes the available and pending free pages.
288 | self.reindex();
289 | }
290 |
291 | // Rebuilds the free cache based on available and pending free list.
292 | fn reindex(self: *Self) void {
293 | self.cache.clearAndFree();
294 | for (self.ids.items) |id| {
295 | self.cache.put(id, true) catch unreachable;
296 | }
297 |
298 | var itr = self.pending.valueIterator();
299 | while (itr.next()) |entry| {
300 | for (entry.items) |id| {
301 | self.cache.put(id, true) catch unreachable;
302 | }
303 | }
304 | }
305 |
306 | /// Merge two sorted arrays into a third array.
307 | fn mergeSortedArray(dst: []PgidType, a: []const PgidType, b: []const PgidType) void {
308 | const size1 = a.len;
309 | const size2 = b.len;
310 | var i: usize = 0;
311 | var j: usize = 0;
312 | var index: usize = 0;
313 |
314 | while (i < size1 and j < size2) {
315 | if (a[i] <= b[j]) {
316 | dst[index] = a[i];
317 | i += 1;
318 | } else {
319 | dst[index] = b[j];
320 | j += 1;
321 | }
322 | index += 1;
323 | }
324 | if (i < size1) {
325 | std.mem.copyForwards(PgidType, dst[index..], a[i..]);
326 | }
327 | if (j < size2) {
328 | std.mem.copyForwards(PgidType, dst[index..], b[j..]);
329 | }
330 | }
331 |
332 | /// Format freelist to string with _allocator.
333 | pub fn string(self: *Self, _allocator: std.mem.Allocator) []u8 {
334 | var buf = std.ArrayList(u8).init(_allocator);
335 | defer buf.deinit();
336 | const writer = buf.writer();
337 |
338 | {
339 | writer.print("pending:", .{}) catch unreachable;
340 | var itr = self.pending.iterator();
341 | while (itr.next()) |entry| {
342 | writer.print(" [txid: {any}, pages: {any}], ", .{ entry.key_ptr.*, entry.value_ptr.items }) catch unreachable;
343 | }
344 | writer.print("\n", .{}) catch unreachable;
345 | }
346 | const pendingIds = self.pending.keyIterator().items;
347 | writer.print("count: {}, freeCount: {}, pendingCount: {}, pendingKeys: {any}", .{ self.count(), self.freeCount(), self.pendingCount(), pendingIds[0..] }) catch unreachable;
348 | return buf.toOwnedSlice() catch unreachable;
349 | }
350 | };
351 |
352 | // // Ensure that a page is added to a transaction's freelist.
353 | // test "Freelist_free" {
354 | // var freelist = FreeList.init(std.testing.allocator);
355 | // defer freelist.deinit();
356 | // try freelist.free(100, &page.Page{ .id = 100, .count = 1, .overflow = 0, .flags = 0 });
357 | // const pending = freelist.pending.get(100).?.items;
358 | // assert(pending.len == 1, "pending.items.len == 1", .{});
359 | // assert(pending[0] == 100, "pending.items[0].id == 100", .{});
360 | // }
361 |
362 | // // Ensure that a page and its overflow is added to a transaction's freelist.
363 | // test "Freelist_free_overflow" {
364 | // var freelist = FreeList.init(std.testing.allocator);
365 | // defer freelist.deinit();
366 | // try freelist.free(100, &.{ .id = 12, .overflow = 3 });
367 | // const pending = freelist.pending.get(100).?.items;
368 | // assert(pending.len == 4, "pending.items.len == 4", .{});
369 | // assert(pending[0] == 12, "pending.items[0].id == 12", .{});
370 | // assert(pending[1] == 13, "pending.items[1].id == 13", .{});
371 | // assert(pending[2] == 14, "pending.items[2].id == 14", .{});
372 | // assert(pending[3] == 15, "pending.items[3].id == 15", .{});
373 | // }
374 |
375 | // // Ensure that a transaction's free pages can be released.
376 | // test "Freelist_release" {
377 | // var freelist = FreeList.init(std.testing.allocator);
378 | // defer freelist.deinit();
379 | // try freelist.free(100, &.{ .id = 12, .overflow = 1 });
380 | // try freelist.free(100, &.{ .id = 9 });
381 | // try freelist.free(102, &.{ .id = 39 });
382 | // try freelist.release(100);
383 | // try freelist.release(101);
384 | // assert(std.mem.eql(u64, freelist.ids.items, &.{ 9, 12, 13 }), "freelist.ids.items == [9, 12, 13]", .{});
385 | // try freelist.release(102);
386 | // assert(std.mem.eql(u64, freelist.ids.items, &.{ 9, 12, 13, 39 }), "freelist.ids.items == [9, 12, 13, 39]", .{});
387 | // }
388 |
389 | // // Ensure that a freelist can find contiguous blocks of pages.
390 | // test "Freelist_allocate" {
391 | // std.testing.log_level = .debug;
392 | // var freelist = FreeList.init(std.testing.allocator);
393 | // try freelist.ids.appendSlice(&[_]PgidType{ 3, 4, 5, 6, 7, 9, 12, 13, 18 });
394 | // defer freelist.deinit();
395 | // var pid = freelist.allocate(3);
396 | // assert(pid == 3, "freelist.allocate(3) == 3, pid: {d}", .{pid});
397 | // pid = freelist.allocate(1);
398 | // assert(pid == 6, "freelist.allocate(1) == 6, pid: {d}", .{pid});
399 | // pid = freelist.allocate(3);
400 | // assert(pid == 0, "freelist.allocate(3) == 0, pid: {d}", .{pid});
401 | // pid = freelist.allocate(2);
402 | // assert(pid == 12, "freelist.allocate(2) == 12, pid: {d}", .{pid});
403 | // pid = freelist.allocate(1);
404 | // assert(pid == 7, "freelist.allocate(1) == 7, pid: {d}", .{pid});
405 | // assert(std.mem.eql(u64, freelist.ids.items, &[_]PgidType{ 9, 18 }), "freelist.ids == {any}", .{freelist.ids.items});
406 | // pid = freelist.allocate(1);
407 | // assert(pid == 9, "freelist.allocate(1) == 9, pid: {d}", .{pid});
408 | // pid = freelist.allocate(1);
409 | // assert(pid == 18, "freelist.allocate(1) == 18, pid: {d}", .{pid});
410 | // pid = freelist.allocate(1);
411 | // assert(pid == 0, "freelist.allocate(1) == 0, pid: {d}", .{pid});
412 | // assert(freelist.ids.items.len == 0, "freelist.ids.items.len == 0", .{});
413 | // }
414 |
415 | // // Ensure that a freelist can deserialize from a freelist page.
416 | // test "Freelist_read" {
417 | // var buf: [consts.PageSize]u8 = [_]u8{0} ** consts.PageSize;
418 | // var p = page.Page.init(buf[0..]);
419 | // p.flags = consts.intFromFlags(.freeList);
420 | // p.count = 2;
421 |
422 | // // Insert 2 page ids.
423 | // const ptr = p.freelistPageElements().?;
424 | // ptr[0] = 23;
425 | // ptr[1] = 50;
426 |
427 | // // Deserialize page into a freelist.
428 | // var freelist = FreeList.init(std.testing.allocator);
429 | // defer freelist.deinit();
430 | // freelist.read(p);
431 |
432 | // // Ensure that there are two page ids in the freelist.
433 | // assert(freelist.ids.items.len == 2, "freelist.ids.items.len == 2", .{});
434 | // assert(freelist.ids.items[0] == 23, "freelist.ids.items[0] == 23", .{});
435 | // assert(freelist.ids.items[1] == 50, "freelist.ids.items[1] == 50", .{});
436 | // }
437 |
438 | // // Ensure that a freelist can serialize into a freelist page.
439 | // test "Freelist_write" {
440 | // std.testing.log_level = .err;
441 | // // Create a freelist and write it to a page.
442 | // var buf: [consts.PageSize]u8 = [_]u8{0} ** consts.PageSize;
443 | // var freelist = FreeList.init(std.testing.allocator);
444 | // defer freelist.deinit();
445 | // try freelist.ids.appendSlice(&.{ 12, 39 });
446 |
447 | // var c100 = std.ArrayList(PgidType).init(std.testing.allocator);
448 | // c100.appendSlice(&.{ 28, 11 }) catch unreachable;
449 | // var c101 = std.ArrayList(PgidType).init(std.testing.allocator);
450 | // c101.appendSlice(&.{3}) catch unreachable;
451 | // try freelist.pending.put(100, c100);
452 | // try freelist.pending.put(101, c101);
453 |
454 | // const p = page.Page.init(buf[0..]);
455 | // try freelist.write(p);
456 | // // Read the page back out.
457 | // var freelist2 = FreeList.init(std.testing.allocator);
458 | // defer freelist2.deinit();
459 | // freelist2.read(p);
460 |
461 | // // Ensure that the freelist is correct.
462 | // // All pages should be present and in reverse order.
463 | // assert(std.mem.eql(PgidType, freelist2.ids.items, &.{ 3, 11, 12, 28, 39 }), "freelist2.ids.items == {any}", .{freelist2.ids.items});
464 | // }
465 |
466 | // // test "meta" {
467 | // // var gpa = std.heap.GeneralPurposeAllocator(.{}){}; // instantiate allocator
468 | // // const galloc = gpa.allocator(); // retrieves the created allocator.
469 | // // var ff = FreeList.init(galloc);
470 | // // defer ff.deinit();
471 | // // _ = ff.size();
472 | // // _ = ff.count();
473 | // // const fCount = ff.freeCount();
474 | // // _ = ff.pendingCount();
475 | // // ff.copyAll(&.{});
476 | // // const i = ff.allocate(100);
477 | // // try ff.release(1);
478 | // // ff.rollback(1);
479 | // // _ = ff.freed(200);
480 | // // // ff.reload(20);
481 | // // ff.reindex();
482 | // // try ff.cache.put(1000, true);
483 | // // std.debug.print("What the fuck {d} {d}, {?}\n", .{ fCount, i, ff.cache.getKey(1000) });
484 |
485 | // // const a = [_]page.PgidType{ 1, 3, 4, 5 };
486 | // // const b = [_]page.PgidType{ 0, 2, 6, 7, 120 };
487 | // // var array = [_]page.PgidType{0} ** (a.len + b.len);
488 | // // FreeList.merge_sorted_array(array[0..], a[0..], b[0..]);
489 | // // std.debug.print("after merge!\n", .{});
490 | // // for (array) |n| {
491 | // // std.debug.print("{},", .{n});
492 | // // }
493 | // // std.debug.print("\n", .{});
494 | // // var arr = try std.ArrayList(page.PgidType).initCapacity(std.heap.page_allocator, 100);
495 | // // defer arr.deinit();
496 | // // }
497 |
498 | // // test "freelist" {
499 | // // var flist = FreeList.init(std.testing.allocator);
500 | // // defer flist.deinit();
501 |
502 | // // var ids = std.ArrayList(page.PgidType).initCapacity(std.testing.allocator, 0) catch unreachable;
503 | // // for (0..29) |i| {
504 | // // const pid = @as(u64, i);
505 | // // ids.append(pid) catch unreachable;
506 | // // }
507 | // // defer ids.deinit();
508 | // // std.mem.copyForwards(u64, ids.items[0..20], ids.items[10..12]);
509 | // // ids.resize(2) catch unreachable;
510 | // // std.debug.print("{any}\n", .{ids.items});
511 | // // }
512 |
513 | // // test "freelist" {
514 | // // const buf = try std.testing.allocator.alloc(u8, 7 * consts.PageSize);
515 | // // @memset(buf, 0);
516 | // // const p = Page.init(buf);
517 | // // p.overflow = 7;
518 | // // p.id = 26737;
519 | // // p.flags = 16;
520 | // // p.count = 13368;
521 | // // p.overflow = 6;
522 | // // std.log.info("freelistPageElements, ptr: {d}", .{p.ptrInt()});
523 | // // defer std.testing.allocator.free(buf);
524 | // // const ids = p.freelistPageElements().?;
525 | // // // std.debug.print("{any}\n", .{ids});
526 | // // }
527 |
--------------------------------------------------------------------------------
/src/gc.zig:
--------------------------------------------------------------------------------
1 | const std = @import("std");
2 | const Page = @import("page.zig").Page;
3 | const assert = @import("util.zig").assert;
4 |
5 | /// A simple garbage collector that frees slices of memory when triggered.
6 | pub const GC = struct {
7 | slices: std.AutoHashMap(u64, struct {
8 | allocator: std.mem.Allocator,
9 | bytes: []u8,
10 | }),
11 | allocator: std.mem.Allocator,
12 |
13 | const Self = @This();
14 |
15 | /// Initializes the GC with a given allocator.
16 | pub fn init(allocator: std.mem.Allocator) Self {
17 | return .{
18 | .slices = std.AutoHashMap(u64, struct {
19 | allocator: std.mem.Allocator,
20 | bytes: []u8,
21 | }).init(allocator),
22 | .allocator = allocator,
23 | };
24 | }
25 |
26 | /// Creates a new GC.
27 | pub fn create(allocator: std.mem.Allocator) *Self {
28 | const self = allocator.create(Self) catch unreachable;
29 | self.* = Self.init(allocator);
30 | return self;
31 | }
32 |
33 | /// Deinitializes the GC and frees all allocated memory.
34 | pub fn deinit(self: *Self) void {
35 | self.trigger();
36 | }
37 |
38 | /// Destroys the GC and frees all allocated memory.
39 | pub fn destroy(self: *Self) void {
40 | self.deinit();
41 | self.allocator.destroy(self);
42 | }
43 |
44 | /// Adds a new slice to the GC.
45 | pub fn add(self: *Self, allocator: std.mem.Allocator, bytes: []u8) !void {
46 | const ptr = @intFromPtr(bytes.ptr);
47 | const entry = self.slices.getOrPut(ptr) catch unreachable;
48 | if (!entry.found_existing) {
49 | entry.value_ptr.allocator = allocator;
50 | entry.value_ptr.bytes = bytes;
51 | }
52 | }
53 |
54 | pub fn addArrayList(self: *Self, list: std.ArrayList(u8)) !void {
55 | const bytes = try list.toOwnedSlice();
56 | const allocator = list.allocator;
57 | try self.add(allocator, bytes);
58 | }
59 |
60 | /// Triggers the GC to free all slices.
61 | pub fn trigger(self: *Self) void {
62 | var itr = self.slices.iterator();
63 | while (itr.next()) |entry| {
64 | entry.value_ptr.allocator.free(entry.value_ptr.bytes);
65 | }
66 | self.slices.clearAndFree();
67 | }
68 | };
69 |
70 | /// A pool of pages that can be reused.
71 | pub const PagePool = struct {
72 | free: std.ArrayList(*Page),
73 | arena: std.heap.ArenaAllocator,
74 | lock: std.Thread.Mutex, // Protects meta page access.
75 | pageSize: usize = 0,
76 | allocSize: usize = 0,
77 |
78 | /// Initializes the PagePool with a given allocator and page size.
79 | pub fn init(allocator: std.mem.Allocator, pageSize: usize) @This() {
80 | return .{ .arena = std.heap.ArenaAllocator.init(allocator), .free = std.ArrayList(*Page).init(allocator), .lock = .{}, .pageSize = pageSize };
81 | }
82 |
83 | /// Deinitializes the PagePool and frees all allocated memory.
84 | pub fn deinit(self: *@This()) void {
85 | self.lock.lock();
86 | defer self.lock.unlock();
87 | self.free.deinit();
88 | self.arena.deinit();
89 | }
90 |
91 | /// Allocates a new page from the pool or creates a new one if the pool is empty.
92 | pub fn new(self: *@This()) !*Page {
93 | self.lock.lock();
94 | defer self.lock.unlock();
95 | const p = if (self.free.popOrNull()) |hasPage| hasPage else {
96 | const buffer = try self.arena.allocator().alloc(u8, self.pageSize);
97 | @memset(buffer, 0);
98 | self.allocSize += buffer.len;
99 | return Page.init(buffer);
100 | };
101 | return p;
102 | }
103 |
104 | /// Deletes a page from the pool.
105 | pub fn delete(self: *@This(), p: *Page) void {
106 | const buffer = p.asSlice();
107 | assert(buffer.len == self.pageSize, "page size mismatch", .{});
108 | @memset(buffer, 0);
109 | self.lock.lock();
110 | defer self.lock.unlock();
111 | self.free.append(p) catch unreachable;
112 | }
113 |
114 | /// Returns the total allocated size of the PagePool.
115 | pub fn getAllocSize(self: *@This()) usize {
116 | self.lock.lock();
117 | defer self.lock.unlock();
118 | return self.allocSize;
119 | }
120 | };
121 |
122 | // test "Page Pool" {
123 | // const consts = @import("consts.zig");
124 | // var arena = std.heap.ArenaAllocator.init(std.testing.allocator);
125 | // defer arena.deinit();
126 |
127 | // for (0..100000) |i| {
128 | // _ = i; // autofix
129 | // const allocator = arena.allocator();
130 | // var pagePool = PagePool.init(allocator, consts.PageSize);
131 |
132 | // for (0..10000) |_| {
133 | // const p = try pagePool.new();
134 | // pagePool.delete(p);
135 | // }
136 | // pagePool.deinit();
137 | // _ = arena.reset(.free_all);
138 | // std.Thread.sleep(10 * std.time.ms_per_min);
139 | // }
140 | // }
141 |
142 | // test "GC" {
143 | // var arena = std.heap.ArenaAllocator.init(std.testing.allocator);
144 | // defer arena.deinit();
145 | // var bytes = [5]u8{ 0, 0, 0, 0, 0 };
146 | // for (0..100) |i| {
147 | // _ = i; // autofix
148 | // const allocator = arena.allocator();
149 | // _ = allocator.dupe(u8, bytes[0..]) catch unreachable;
150 |
151 | // for (0..100) |j| {
152 | // _ = j; // autofix
153 | // var arenaAllocator = std.heap.ArenaAllocator.init(allocator);
154 | // for (0..100) |k| {
155 | // _ = k; // autofix
156 | // _ = arenaAllocator.allocator().dupe(u8, bytes[0..]) catch unreachable;
157 | // }
158 | // arenaAllocator.deinit();
159 | // }
160 | // }
161 | // }
162 |
--------------------------------------------------------------------------------
/src/main.zig:
--------------------------------------------------------------------------------
1 | const std = @import("std");
2 |
3 | pub fn main() !void {
4 | const db = @import("namespace.zig");
5 | std.testing.log_level = .debug;
6 | var gpa = std.heap.GeneralPurposeAllocator(.{}).init;
7 | const allocator = gpa.allocator();
8 | var database = try db.Database.open(allocator, "boltdb.tmp", null, db.defaultOptions);
9 | defer database.close() catch unreachable;
10 | // create a bucket
11 | try struct {
12 | fn exec(_database: *db.Database) db.Error!void {
13 | var trans = try _database.begin(true);
14 | defer trans.commit() catch unreachable;
15 | var bucket = try trans.createBucketIfNotExists("user");
16 | try bucket.put("hello", "word");
17 | }
18 | }.exec(&database);
19 |
20 | // Get a bucket
21 | try struct {
22 | fn exec(_database: *db.Database) db.Error!void {
23 | var trans = try _database.begin(false);
24 | defer trans.rollback() catch unreachable;
25 | var bucket = trans.bucket("user").?;
26 | const value = bucket.get("hello").?;
27 | std.log.info("hello value is {s}", .{value});
28 | }
29 | }.exec(&database);
30 |
31 | // exec update
32 | {
33 | try database.update(struct {
34 | fn exec(trans: *db.Transaction) db.Error!void {
35 | var bucket = try trans.createBucketIfNotExists("user");
36 | try bucket.put("baz", "bar");
37 | const stats = trans.stats();
38 | std.log.info("transaction's stats: {any}", .{stats});
39 | }
40 | }.exec);
41 |
42 | try database.view(struct {
43 | fn view(trans: *db.Transaction) db.Error!void {
44 | var bucket = trans.bucket("user").?;
45 | const value = bucket.get("baz").?;
46 | std.log.info("baz value is {s}", .{value});
47 | }
48 | }.view);
49 |
50 | try database.viewWithContext({}, struct {
51 | fn view(_: void, trans: *db.Transaction) db.Error!void {
52 | var bucket = trans.bucket("user").?;
53 | const value = bucket.get("baz").?;
54 | std.log.info("baz value is {s}", .{value});
55 | }
56 | }.view);
57 | }
58 |
59 | // iterator
60 | {
61 | try struct {
62 | fn exec(_database: *db.Database) db.Error!void {
63 | var trans = try _database.begin(false);
64 | defer trans.rollback() catch unreachable;
65 | var cursor = trans.cursor();
66 | defer cursor.deinit();
67 | var keyPair = cursor.first();
68 | while (!keyPair.isNotFound()) {
69 | if (keyPair.isBucket()) {
70 | std.log.info("iterator DB: this is a bucket: {s}", .{keyPair.key.?});
71 | } else {
72 | std.log.info("iterator DB: key: {s}, value: {s}", .{ keyPair.key.?, keyPair.value.? });
73 | }
74 | keyPair = cursor.next();
75 | }
76 | }
77 | }.exec(&database);
78 | }
79 | {
80 | try struct {
81 | fn exec(_database: *db.Database) db.Error!void {
82 | var trans = try _database.begin(true);
83 | defer trans.commit() catch unreachable;
84 | var bucket = trans.bucket("user").?;
85 | std.log.info("Create a new bucket: {s}", .{"address"});
86 | var newBucket = try bucket.createBucketIfNotExists("date");
87 | std.log.info("Create a new bucket: {s}", .{"date"});
88 | var _allocator = std.heap.ArenaAllocator.init(_database.allocator());
89 | defer _allocator.deinit();
90 | const onceAllocator = _allocator.allocator();
91 | const value = std.fmt.allocPrint(onceAllocator, "{d}", .{std.time.timestamp()}) catch unreachable;
92 | try newBucket.put("laos", "Deloin");
93 | var cursor = bucket.cursor();
94 | defer cursor.deinit();
95 | var keyPair = cursor.first();
96 | while (!keyPair.isNotFound()) {
97 | if (keyPair.isBucket()) {
98 | std.log.info("iterator Bucket: this is a bucket: {s}", .{keyPair.key.?});
99 | } else {
100 | std.log.info("iterator Bucket: key: {s}, value: {s}", .{ keyPair.key.?, keyPair.value.? });
101 | }
102 | keyPair = cursor.next();
103 | }
104 |
105 | try bucket.put("dol", value);
106 | keyPair = cursor.seek("dol");
107 | if (keyPair.isNotFound()) {
108 | std.log.info("not found key: {s}", .{"dol"});
109 | } else {
110 | try cursor.delete();
111 | std.log.info("delete key: {s}, value: {s}", .{ keyPair.key.?, keyPair.value.? });
112 | }
113 | const lastKeyPair = cursor.last();
114 | std.log.info("last key: {s}, value: {s}", .{ lastKeyPair.key.?, lastKeyPair.value.? });
115 | const prevKeyPair = cursor.prev();
116 | std.log.info("prev key: {s}", .{prevKeyPair.key.?});
117 | const cursorBucket = cursor.bucket();
118 | std.log.info("cursor's bucket: {any}", .{cursorBucket.stats()});
119 |
120 | try bucket.setSequence(100);
121 | const seq = try bucket.nextSequence();
122 | std.log.info("seq: {d}", .{seq});
123 | const root = bucket.root();
124 | std.log.info("root: {d}", .{root});
125 | const stats = trans.stats();
126 | std.log.info("transaction's stats: {any}", .{stats});
127 | const bucket_stats = bucket.stats();
128 | std.log.info("bucket's stats: {any}", .{bucket_stats});
129 | const writable = bucket.writable();
130 | std.log.info("bucket's writable: {}", .{writable});
131 | for (0..100) |i| {
132 | try newBucket.put(std.fmt.allocPrint(onceAllocator, "{d}", .{i}) catch unreachable, "value");
133 | }
134 |
135 | std.log.info("Bucket forEach:", .{});
136 | try bucket.forEach(struct {
137 | fn exec(_: *const db.Bucket, key: []const u8, _value: ?[]const u8) db.Error!void {
138 | if (_value == null) {
139 | std.log.info("this is a bucket, bucket name: {s}", .{key});
140 | } else {
141 | std.log.info("key: {s}, value: {s}", .{ key, _value.? });
142 | }
143 | }
144 | }.exec);
145 |
146 | std.log.info("Bucket forEach with Context", .{});
147 | var forEachCount: usize = 0;
148 | try bucket.forEachWithContext(&forEachCount, struct {
149 | fn exec(ctxRef: *usize, _: *const db.Bucket, key: []const u8, _value: ?[]const u8) db.Error!void {
150 | ctxRef.* += 1;
151 | if (_value == null) {
152 | std.log.info("this is a bucket, bucket name: {s}", .{key});
153 | } else {
154 | std.log.info("key: {s}, value: {s}", .{ key, _value.? });
155 | }
156 | }
157 | }.exec);
158 | std.log.info("forEachCount: {d}", .{forEachCount});
159 | }
160 | }.exec(&database);
161 | }
162 |
163 | {
164 | const path = database.path();
165 | std.log.info("database's path: {s}", .{path});
166 | const str = database.string(allocator);
167 | defer allocator.free(str);
168 | std.log.info("database's string: {s}", .{str});
169 | const isReadOnly = database.isReadOnly();
170 | std.log.info("database's isReadOnly: {}", .{isReadOnly});
171 | try database.sync();
172 | }
173 | }
174 |
--------------------------------------------------------------------------------
/src/mutex.zig:
--------------------------------------------------------------------------------
1 | const std = @import("std");
2 | const Semaphore = std.Thread.Semaphore;
3 |
4 | /// A mutex that can be shared between threads.
5 | pub const Mutex = struct {
6 | sem: Semaphore = Semaphore{ .permits = 1 },
7 |
8 | /// Initialize the mutex.
9 | pub fn init() Mutex {
10 | return .{ .sem = Semaphore{ .permits = 1 } };
11 | }
12 |
13 | /// Lock the mutex.
14 | pub fn lock(self: *@This()) void {
15 | self.sem.wait();
16 | }
17 |
18 | /// Unlock the mutex.
19 | pub fn unlock(self: *@This()) void {
20 | self.sem.post();
21 | }
22 |
23 | /// Try to lock the mutex.
24 | pub fn tryLock(self: *@This(), timeout_ns: usize) error{Timeout}!void {
25 | return self.sem.timedWait(timeout_ns);
26 | }
27 | };
28 |
--------------------------------------------------------------------------------
/src/namespace.zig:
--------------------------------------------------------------------------------
1 | const page = @import("page.zig");
2 | const tx = @import("tx.zig");
3 | const db = @import("db.zig");
4 | const std = @import("std");
5 | const consts = @import("consts.zig");
6 | const DB = db.DB;
7 | const InnerBucket = @import("bucket.zig").Bucket;
8 | const cursor = @import("cursor.zig");
9 | pub const Error = @import("error.zig").Error;
10 | pub const Stats = db.Stats;
11 | pub const BucketStats = @import("bucket.zig").BucketStats;
12 | pub const TxStats = tx.TxStats;
13 | pub const PageInfo = page.PageInfo;
14 | pub const Options = consts.Options;
15 | pub const defaultOptions = consts.defaultOptions;
16 |
17 | /// A bucket is a collection of key-value pairs.
18 | pub const Bucket = struct {
19 | _bt: *@import("bucket.zig").Bucket,
20 | const Self = @This();
21 |
22 | /// Retrieves a nested bucket by name.
23 | /// Returns nil if the bucket does not exits.
24 | /// The bucket instance is only valid for the lifetime of the transaction.
25 | pub fn bucket(self: Self, name: []const u8) ?Self {
26 | if (self._bt.getBucket(name)) |bt| {
27 | return .{ ._bt = bt };
28 | }
29 | return null;
30 | }
31 |
32 | /// Creates a new bucket at the given key and returns the new bucket.
33 | /// Returns an error if the key already exists, if the bucket name is blank, or if the bucket name is too long.
34 | /// The bucket instance is only valid for the lifetime of the transaction.
35 | pub fn createBucket(self: Self, key: []const u8) Error!Bucket {
36 | const bt = try self._bt.createBucket(key);
37 | return .{ ._bt = bt };
38 | }
39 |
40 | /// Creates a new bucket if it doesn't already exist and returns a reference to it.
41 | /// Returns an error if the bucket name is blank, or if the bucket name is too long.
42 | /// The bucket instance is only valid for the lifetime of the transaction.
43 | pub fn createBucketIfNotExists(self: Self, key: []const u8) Error!Bucket {
44 | const bt = try self._bt.createBucketIfNotExists(key);
45 | return .{ ._bt = bt };
46 | }
47 |
48 | /// Deletes a bucket at the give key.
49 | /// Returns an error if the bucket does not exists, or if the key represents a non-bucket value.
50 | pub fn deleteBucket(self: *Self, key: []const u8) Error!void {
51 | return self._bt.deleteBucket(key);
52 | }
53 |
54 | /// Retrives the value for a key in the bucket.
55 | /// Return a nil value if the key does not exist or if the key is a nested bucket.
56 | /// The returned value is only valid for the life of the transaction.
57 | pub fn get(self: *Self, key: []const u8) ?[]u8 {
58 | return self._bt.get(key);
59 | }
60 |
61 | /// Sets the value for a key in the bucket.
62 | /// If the key exist then its previous value will be overwritten.
63 | /// Supplied value must remain valid for the life of the transaction.
64 | /// Returns an error if the bucket was created from a read-only transaction, if the key is bucket, if the key is too large, or
65 | /// of if the value is too large.
66 | pub fn put(self: *Self, key: []const u8, value: []const u8) Error!void {
67 | const keyPair = consts.KeyPair{ .key = key, .value = value };
68 | return self._bt.put(keyPair);
69 | }
70 |
71 | /// Removes a key from the bucket.
72 | /// If the key does not exist then nothing is done and a nil error is returned.
73 | /// Returns an error if the bucket was created from a read-only transaction.
74 | /// TODO: add bool return indicate the key is deleted or not.
75 | pub fn delete(self: *Self, key: []const u8) Error!void {
76 | return self._bt.delete(key);
77 | }
78 |
79 | /// Create a cursor associated with the bucket.
80 | /// The cursor is only valid as long as the transaction is open.
81 | /// Do not use a cursor after the transaction is closed.
82 | pub fn cursor(self: *Self) Cursor {
83 | return .{ ._cursor = self._bt.cursor() };
84 | }
85 |
86 | /// Updates the sequence number for the bucket.
87 | pub fn setSequence(self: *Self, v: u64) Error!void {
88 | return self._bt.setSequence(v);
89 | }
90 |
91 | /// Returns an autoincrementing integer for the bucket.
92 | pub fn nextSequence(self: *Self) Error!u64 {
93 | return self._bt.nextSequence();
94 | }
95 |
96 | /// Root returns the root of the bucket.
97 | pub fn root(self: *const Self) u64 {
98 | return self._bt.root();
99 | }
100 |
101 | /// Return stats on a bucket.
102 | pub fn stats(self: *const Self) BucketStats {
103 | return self._bt.stats();
104 | }
105 |
106 | /// Returns the tx of the bucket.
107 | pub fn transaction(self: *Self) *Transaction {
108 | var allocator = self._bt.getAllocator();
109 | var trans = allocator.create(Transaction) catch unreachable;
110 | trans._tx = self._bt.getTx().?;
111 | return trans;
112 | }
113 |
114 | /// Returns whether the bucket is writable.
115 | pub fn writable(self: *const Self) bool {
116 | return self._bt.tx.?.writable;
117 | }
118 |
119 | // Like bucket.forEachWithContext
120 | pub fn forEach(self: *Self, func: fn (bt: *Bucket, key: []const u8, value: ?[]const u8) Error!void) Error!void {
121 | return self.forEachWithContext({}, struct {
122 | fn f(_: void, bt: *Bucket, key: []const u8, value: ?[]const u8) Error!void {
123 | return func(bt, key, value);
124 | }
125 | }.f);
126 | }
127 |
128 | /// Executes a function for each key/value pair in a bucket(if the value is nil, then the key is a bucket)
129 | /// If the provided function returns an error then the iteration is stopped and
130 | /// the error is returned to the caller. The provided function must not modify
131 | /// the bucket; this will result in undefined behavior.
132 | pub fn forEachWithContext(self: *Self, context: anytype, func: fn (ctx: @TypeOf(context), bt: *Bucket, key: []const u8, value: ?[]const u8) Error!void) Error!void {
133 | return self._bt.forEachContext(context, struct {
134 | fn f(ctx: @TypeOf(context), bt: *InnerBucket, keyPair: *const consts.KeyPair) Error!void {
135 | var btRef = Bucket{ ._bt = bt };
136 | return func(ctx, &btRef, keyPair.key.?, keyPair.value);
137 | }
138 | }.f);
139 | }
140 | };
141 |
142 | /// A transaction is a read-write managed transaction.
143 | pub const Transaction = struct {
144 | allocator: ?std.mem.Allocator,
145 | _tx: *tx.TX,
146 |
147 | /// Writes all changes to disk and updates the meta page.
148 | /// Returns an error if a disk write error occurs, or if commit is
149 | /// called on a ready-only transaction.
150 | pub fn commit(self: *Transaction) Error!void {
151 | _ = self._tx.commitAndDestroy() catch |err| {
152 | self.allocator.?.destroy(self);
153 | return err;
154 | };
155 | self.allocator.?.destroy(self);
156 | }
157 |
158 | /// Rolls back the transaction and destroys the transaction.
159 | pub fn rollback(self: *Transaction) Error!void {
160 | _ = return self._tx.rollbackAndDestroy() catch |err| {
161 | self.allocator.?.destroy(self);
162 | return err;
163 | };
164 | self.allocator.?.destroy(self);
165 | }
166 |
167 | /// Retrieves a bucket any name.
168 | /// Returns null if the bucekt does not exist.
169 | /// The bucket instance is only valid for the lifetime of the transaction.
170 | pub fn bucket(self: *Transaction, name: []const u8) ?Bucket {
171 | if (self._tx.getBucket(name)) |bt| {
172 | return .{ ._bt = bt };
173 | }
174 | return null;
175 | }
176 |
177 | /// Creates a new bucket.
178 | /// Returns an error if the bucket already exists, if th bucket name is blank, or if the bucket name is too long.
179 | /// The bucket instance is only valid for the lifetime of the transaction.
180 | pub fn createBucket(self: *Transaction, name: []const u8) Error!Bucket {
181 | const bt = try self._tx.createBucket(name);
182 | return .{ ._bt = bt };
183 | }
184 |
185 | /// Creates a new bucket if the bucket if it doesn't already exist.
186 | /// Returns an error if the bucket name is blank, or if the bucket name is too long.
187 | /// The bucket instance is only valid for the lifetime of the transaction.
188 | pub fn createBucketIfNotExists(self: *Transaction, name: []const u8) Error!Bucket {
189 | const bt = try self._tx.createBucketIfNotExists(name);
190 | return .{ ._bt = bt };
191 | }
192 |
193 | /// Deletes a bucket.
194 | /// Returns an error if the bucket cannot be found or if the key represents a non-bucket value.
195 | pub fn deleteBucket(self: *Transaction, name: []const u8) Error!void {
196 | return self._tx.deleteBucket(name);
197 | }
198 |
199 | /// Returns the database that the transaction is associated with.
200 | pub fn database(self: *Transaction) Database {
201 | return Database{ ._db = self._tx.db };
202 | }
203 |
204 | /// Returns the ID of the transaction.
205 | pub fn id(self: *const Transaction) u64 {
206 | return self._tx.getID();
207 | }
208 |
209 | /// Returns the size of the transaction.
210 | pub fn size(self: *const Transaction) usize {
211 | return self._tx.getSize();
212 | }
213 |
214 | /// Returns true if the transaction is writable.
215 | pub fn writable(self: *const Transaction) bool {
216 | return self._tx.writable;
217 | }
218 |
219 | /// Returns the stats of the transaction.
220 | pub fn stats(self: *const Transaction) TxStats {
221 | return self._tx.getStats();
222 | }
223 |
224 | /// Check performs several consistency checks on the database for this transaction.
225 | /// An error is returned if any inconsistency is found.
226 | ///
227 | /// It can be safely run concurrently on a writable transaction. However, this
228 | /// incurs a hight cost for large databases and databases with a lot of subbuckets.
229 | /// because of caching. This overhead can be removed if running on a read-only
230 | /// transaction. however, this is not safe to execute other writer-transactions at
231 | /// the same time.
232 | pub fn check(self: *Transaction) Error!void {
233 | return self._tx.check();
234 | }
235 |
236 | // copy writes the entire database to a writer.
237 | // This function exists for backwards compatibility.
238 | //
239 | // Deprecated; Use WriteTo() instead.
240 | pub fn copy(self: *Transaction) Error!void {
241 | return self._tx.copy();
242 | }
243 |
244 | /// Writes the entire database to a writer.
245 | pub fn writeTo(self: *Transaction, writer: anytype) Error!usize {
246 | return self._tx.writeToAnyWriter(writer);
247 | }
248 |
249 | /// Returns a reference to the page with a given id.
250 | /// If page has been written to then a temporary buffered page is returned.
251 | pub fn page(self: *Transaction, _id: u64) !?PageInfo {
252 | return self._tx.getPageInfo(_id);
253 | }
254 |
255 | /// Adds a handler function to be executed after the transaction successfully commits.
256 | pub fn onCommit(self: *Transaction, onCtx: *anyopaque, f: fn (
257 | ?*anyopaque,
258 | *Transaction,
259 | ) void) void {
260 | self._tx.onCommit(onCtx, struct {
261 | fn exec(_: void, trans: *tx.TX) void {
262 | const _trans = Transaction{ .allocator = null, ._tx = trans };
263 | return f(onCtx, &_trans);
264 | }
265 | }.exec);
266 | }
267 |
268 | /// Creates a cursor assosicated with the root bucket.
269 | pub fn cursor(self: *Transaction) Cursor {
270 | return .{ ._cursor = self._tx.cursor() };
271 | }
272 | };
273 |
274 | /// A database is the main entry point for interacting with BoltDB.
275 | pub const Database = struct {
276 | _db: *DB,
277 | const Self = @This();
278 | /// Creates and opens a database at the given path.
279 | /// If the file does not exist then it will be created automatically.
280 | /// Passing in null options will cause Bolt to open the database with the default options.
281 | pub fn open(_allocator: std.mem.Allocator, filePath: []const u8, fileMode: ?std.fs.File.Mode, options: Options) !Self {
282 | const _db = try DB.open(_allocator, filePath, fileMode, options);
283 | return .{ ._db = _db };
284 | }
285 |
286 | /// close closes the database and releases all associated resources.
287 | pub fn close(self: *Self) !void {
288 | return self._db.close();
289 | }
290 |
291 | /// Return the path to currently open database file.
292 | pub fn path(self: *const Self) []const u8 {
293 | return self._db.path();
294 | }
295 |
296 | /// Returns the string representation of the database.
297 | pub fn string(self: *const Self, _allocator: std.mem.Allocator) []const u8 {
298 | return self._db.string(_allocator);
299 | }
300 |
301 | /// Syncs the database file to disk.
302 | pub fn sync(self: *Self) Error!void {
303 | try self._db.sync();
304 | }
305 |
306 | // Begin starts a new transaction.
307 | // Multiple read-only transactions can be used concurrently but only one write transaction can be used at a time. Starting multiple write transactions
308 | // will cause the calls to back and be serialized until the current write transaction finishes.
309 | //
310 | // Transactions should not be dependent on the one another. Opening a read
311 | // transaction and a write transaction in the same goroutine can cause the
312 | // writer to deadlock because the databases periodically needs to re-map itself
313 | // as it grows and it cannot do that while a read transaction is open.
314 | //
315 | // If a long running read transaction (for example, a snapshot transaction) is
316 | // needed, you might want to send DB.initialMmapSize to a larger enough value to avoid potential blocking of write transaction.
317 | //
318 | // *IMPORTANT*: You must close read-only transactions after you are finished or else the database will not reclaim old pages.
319 | pub fn begin(self: *Self, writable: bool) Error!*Transaction {
320 | const innerTrans = try self._db.begin(writable);
321 | var trans = self._db.allocator.create(Transaction) catch unreachable;
322 | trans._tx = innerTrans;
323 | trans.allocator = self._db.allocator;
324 | return trans;
325 | }
326 |
327 | /// Executes a function within the context of a read-write managed transaction.
328 | /// If no error is returned from the function then the transaction is committed.
329 | /// If an error is returned then the entire transaction is rolled back.
330 | /// Any error that is returned from the function or returned from the commit is
331 | /// returned from the update() method.
332 | ///
333 | /// Attempting to manually commit or rollback within the function will cause a panic.
334 | pub fn update(self: *Self, execFn: fn (self: *Transaction) Error!void) Error!void {
335 | return self._db.update(struct {
336 | fn exec(innerTrans: *tx.TX) Error!void {
337 | var trans = Transaction{ .allocator = null, ._tx = innerTrans };
338 | return execFn(&trans);
339 | }
340 | }.exec);
341 | }
342 |
343 | /// Executes a function within the context of a read-write managed transaction.
344 | pub fn updateWithContext(self: *Self, context: anytype, execFn: fn (ctx: @TypeOf(context), self: *Transaction) Error!void) Error!void {
345 | return self._db.updateWithContext(context, struct {
346 | fn exec(_: void, trans: *tx.TX) Error!void {
347 | const _trans = Transaction{ .allocator = null, ._tx = trans };
348 | return execFn(context, &_trans);
349 | }
350 | }.exec);
351 | }
352 |
353 | /// Executes a function within the context of a managed read-only transaction.
354 | /// Any error that is returned from the function is returned from the view() method.
355 | ///
356 | /// Attempting to manually rollback within the function will cause a panic.
357 | pub fn view(self: *Self, func: fn (self: *Transaction) Error!void) Error!void {
358 | return self._db.view(struct {
359 | fn exec(innerTrans: *tx.TX) Error!void {
360 | var trans = Transaction{ ._tx = innerTrans, .allocator = null };
361 | return func(&trans);
362 | }
363 | }.exec);
364 | }
365 |
366 | /// Executes a function within the context of a managed read-only transaction.
367 | pub fn viewWithContext(self: *Self, context: anytype, func: fn (ctx: @TypeOf(context), self: *Transaction) Error!void) Error!void {
368 | return self._db.viewWithContext(context, struct {
369 | fn exec(_ctx: @TypeOf(context), innerTrans: *tx.TX) Error!void {
370 | var trans = Transaction{ .allocator = null, ._tx = innerTrans };
371 | return func(_ctx, &trans);
372 | }
373 | }.exec);
374 | }
375 |
376 | /// Returns true if the database is read-only.
377 | pub fn isReadOnly(self: *const Self) bool {
378 | return self._db.isReadOnly();
379 | }
380 |
381 | /// Returns the allocator of the database.
382 | pub fn allocator(self: *const Self) std.mem.Allocator {
383 | return self._db.allocator;
384 | }
385 | };
386 |
387 | /// Cursor represents an iterator that can traverse over all key/value pairs in a bucket in sorted order.
388 | /// Cursors see nested buckets with value == nil.
389 | /// Cursors can be obtained from a transaction and are valid as long as the transaction is open.
390 | ///
391 | /// Keys and values returned from the cursor are only valid for the life of the transaction.
392 | ///
393 | /// Changing data while traversing with a cursor may cause it to be invalidated
394 | /// and return unexpected keys and/or values. You must reposition your cursor
395 | /// after mutating data.
396 | pub const Cursor = struct {
397 | _cursor: cursor.Cursor,
398 | const Self = @This();
399 |
400 | pub const KeyPair = consts.KeyPair;
401 |
402 | /// Deinitialize the cursor.
403 | pub fn deinit(self: *Self) void {
404 | self._cursor.deinit();
405 | }
406 |
407 | /// Returns the bucket that this cursor was created from.
408 | pub fn bucket(self: *Self) Bucket {
409 | return .{ ._bt = self._cursor.bucket() };
410 | }
411 |
412 | /// Moves the cursor to the first item in the bucket and returns its key and value.
413 | /// If the bucket is empty then a nil key and value are returned.
414 | /// The returned key and value are only valid for the life of the transaction
415 | pub fn first(self: *Self) KeyPair {
416 | return self._cursor.first();
417 | }
418 |
419 | /// Moves the cursor to the next item in the bucket and returns its key and value.
420 | /// If the cursor is at the end of the bucket then a nil key and value are returned.
421 | /// The returned key and value are only valid for the life of the transaction.
422 | pub fn next(self: *Self) KeyPair {
423 | return self._cursor.next();
424 | }
425 |
426 | /// Moves the cursor to the last item in the bucket and returns its key and value.
427 | /// If the bucket is empty then a nil key and value are returned.
428 | pub fn last(self: *Self) KeyPair {
429 | return self._cursor.last();
430 | }
431 |
432 | /// Moves the cursor to the previous item in the bucket and returns its key and value.
433 | /// If the cursor is at the beginning of the bucket then a nil key and value are returned.
434 | /// The returned key and value are only valid for the life of the transaction.
435 | pub fn prev(self: *Self) KeyPair {
436 | return self._cursor.prev();
437 | }
438 |
439 | /// Removes the current key/value under the cursor from the bucket.
440 | /// Delete fails if current key/value is a bucket or if the transaction is not writable.
441 | pub fn delete(self: *Self) Error!void {
442 | return self._cursor.delete();
443 | }
444 |
445 | /// Moves the cursor to a given key and returns it.
446 | /// If the key does not exist then the next key is used. If no keys
447 | /// follow, a nil key is returned.
448 | /// The returned key and value are only valid for the life of the transaction.
449 | pub fn seek(self: *Self, key: []const u8) KeyPair {
450 | return self._cursor.seek(key);
451 | }
452 | };
453 |
--------------------------------------------------------------------------------
/src/node_test.zig:
--------------------------------------------------------------------------------
1 | const std = @import("std");
2 | const tests = @import("tests.zig");
3 | const Node = @import("node.zig").Node;
4 | const bucket = @import("bucket.zig");
5 | const tx = @import("tx.zig");
6 | const DB = @import("db.zig");
7 | const consts = @import("consts.zig");
8 | const assert = @import("assert.zig").assert;
9 |
10 | // Ensure that a node can insert a key/value.
11 | test "Node_put" {
12 | std.testing.log_level = .warn;
13 | var testContext = try tests.setup(std.testing.allocator);
14 | defer tests.teardown(&testContext);
15 | const db = testContext.db;
16 | const trx = try db.begin(true);
17 | defer trx.rollbackAndDestroy() catch {};
18 | const b = try trx.createBucketIfNotExists("test");
19 | const node = Node.init(testContext.allocator);
20 | node.bucket = b;
21 | defer node.deinit();
22 | // Create mutable buffers for values
23 | var val2 = [_]u8{'2'};
24 | var val0 = [_]u8{'0'};
25 | var val1 = [_]u8{'1'};
26 | var val3 = [_]u8{'3'};
27 |
28 | _ = node.put("baz", "baz", &val2, 0, 0);
29 | _ = node.put("foo", "foo", &val0, 0, 0);
30 | _ = node.put("bar", "bar", &val1, 0, 0);
31 | _ = node.put("foo", "foo", &val3, 0, consts.intFromFlags(.leaf));
32 | assert(node.inodes.items.len == 3, "it should have 3 inodes, but got {d}", .{node.inodes.items.len});
33 |
34 | assert(std.mem.eql(u8, node.inodes.items[0].key.?, "bar"), "key should be bar", .{});
35 | assert(std.mem.eql(u8, node.inodes.items[0].value.?, "1"), "value should be 1", .{});
36 | assert(std.mem.eql(u8, node.inodes.items[1].key.?, "baz"), "key should be baz", .{});
37 | assert(std.mem.eql(u8, node.inodes.items[1].value.?, "2"), "value should be 2", .{});
38 | assert(std.mem.eql(u8, node.inodes.items[2].key.?, "foo"), "key should be foo", .{});
39 | assert(std.mem.eql(u8, node.inodes.items[2].value.?, "3"), "value should be 3", .{});
40 | assert(node.inodes.items[2].flags == consts.intFromFlags(.leaf), "flags should be leaf", .{});
41 | }
42 |
43 | // Ensure that a node can deserialize from a leaf page.
44 | test "Node_read_LeafPage" {
45 | const Page = @import("page.zig").Page;
46 | std.testing.log_level = .warn;
47 |
48 | var buffer = [_]u8{0} ** consts.PageSize;
49 | @memset(buffer[0..], 0);
50 | const p = Page.init(&buffer);
51 | p.count = 2;
52 | p.flags = consts.intFromFlags(.leaf);
53 | // Insert 2 elements at the beginning. sizeof(leafPageElement) == 16
54 | p.leafPageElementPtr(0).* = .{
55 | .flags = 0,
56 | .pos = 32,
57 | .kSize = 3,
58 | .vSize = 4,
59 | };
60 | p.leafPageElementPtr(1).* = .{
61 | .flags = 0,
62 | .pos = 23,
63 | .kSize = 10,
64 | .vSize = 3,
65 | };
66 | // Write data for the nodes at the end.
67 | var data = p.leafPageElementDataPtr();
68 | std.mem.copyForwards(u8, data[0..7], "barfooz");
69 | std.mem.copyForwards(u8, data[7..20], "helloworldbye");
70 |
71 | // Deserialize page into a leaf.
72 | var node = Node.init(std.testing.allocator);
73 | defer node.deinit();
74 | node.read(p);
75 |
76 | // Check that there are two inodes with correct data.
77 | assert(node.inodes.items.len == 2, "it should have 2 inodes, but got {d}", .{node.inodes.items.len});
78 | assert(node.isLeaf, "it should be a leaf", .{});
79 | assert(std.mem.eql(u8, node.inodes.items[0].key.?, "bar"), "key should be bar", .{});
80 | assert(std.mem.eql(u8, node.inodes.items[0].value.?, "fooz"), "value should be fooz, but got {s}", .{node.inodes.items[0].value.?});
81 | assert(std.mem.eql(u8, node.inodes.items[1].key.?, "helloworld"), "key should be helloworld", .{});
82 | assert(std.mem.eql(u8, node.inodes.items[1].value.?, "bye"), "value should be bye", .{});
83 | }
84 |
85 | // Ensure that a node can serialize into a leaf page.
86 | test "Node_write_LeafPage" {
87 | const Page = @import("page.zig").Page;
88 | std.testing.log_level = .err;
89 | var testContext = try tests.setup(std.testing.allocator);
90 | defer tests.teardown(&testContext);
91 | const db = testContext.db;
92 | const trx = try db.begin(true);
93 | defer trx.rollbackAndDestroy() catch {};
94 | const b = try trx.createBucketIfNotExists("test");
95 | const node = Node.init(testContext.allocator);
96 | node.bucket = b;
97 | node.isLeaf = true;
98 | defer node.deinit();
99 | // Create mutable buffers for values
100 | var val1 = [_]u8{ 'q', 'u', 'e' };
101 | var val2 = [_]u8{ 'l', 'a', 'k', 'e' };
102 | var val3 = [_]u8{ 'j', 'o', 'h', 'n', 's', 'o', 'n' };
103 | _ = node.put("susy", "susy", &val1, 0, 0);
104 | _ = node.put("ricki", "ricki", &val2, 0, 0);
105 | _ = node.put("john", "john", &val3, 0, 0);
106 | // Write it to a page.
107 | var buffer = [_]u8{0} ** consts.PageSize;
108 | @memset(buffer[0..], 0);
109 | const p = Page.init(&buffer);
110 |
111 | _ = node.write(p);
112 |
113 | // Read the page back in.
114 | const n2 = Node.init(std.testing.allocator);
115 | defer n2.deinit();
116 | n2.read(p);
117 | // Check that the two pages are the same.
118 | assert(n2.inodes.items.len == 3, "it should have 3 inodes, but got {d}", .{n2.inodes.items.len});
119 | assert(std.mem.eql(u8, n2.inodes.items[0].key.?, "john"), "key should be john", .{});
120 | assert(std.mem.eql(u8, n2.inodes.items[0].value.?, "johnson"), "value should be johnson", .{});
121 | assert(std.mem.eql(u8, n2.inodes.items[1].key.?, "ricki"), "key should be ricki", .{});
122 | assert(std.mem.eql(u8, n2.inodes.items[1].value.?, "lake"), "value should be lake", .{});
123 | assert(std.mem.eql(u8, n2.inodes.items[2].key.?, "susy"), "key should be susy", .{});
124 | assert(std.mem.eql(u8, n2.inodes.items[2].value.?, "que"), "value should be que", .{});
125 | }
126 |
127 | // Ensure that a node can split into appropriate subgroups.
128 | test "Node_split" {
129 | std.testing.log_level = .debug;
130 | var testContext = try tests.setup(std.testing.allocator);
131 | defer tests.teardown(&testContext);
132 | const db = testContext.db;
133 | const trx = try db.begin(true);
134 | defer trx.rollbackAndDestroy() catch {};
135 | const b = try trx.createBucketIfNotExists("test");
136 | const node = Node.init(testContext.allocator);
137 | node.bucket = b;
138 | defer node.deinit();
139 | _ = node.put("00000001", "00000001", toValue("0123456701234567"), 0, 0);
140 | _ = node.put("00000002", "00000002", toValue("0123456701234567"), 0, 0);
141 | _ = node.put("00000003", "00000003", toValue("0123456701234567"), 0, 0);
142 | _ = node.put("00000004", "00000004", toValue("0123456701234567"), 0, 0);
143 | _ = node.put("00000005", "00000005", toValue("0123456701234567"), 0, 0);
144 | // Split between 2 & 3.
145 | // _ = node.split(100);
146 | }
147 |
148 | fn toValue(key: []const u8) []u8 {
149 | return @constCast(key)[0..];
150 | }
151 |
--------------------------------------------------------------------------------
/src/page.zig:
--------------------------------------------------------------------------------
1 | const std = @import("std");
2 | const db = @import("db.zig");
3 | const consts = @import("consts.zig");
4 | const PgidType = consts.PgidType;
5 | const PgIds = consts.PgIds;
6 | const util = @import("util.zig");
7 |
8 | /// A page.
9 | pub const Page = struct {
10 | // The page identifier.
11 | id: PgidType align(1) = 0,
12 | // The page flags.
13 | flags: u16 align(1) = 0,
14 | // The number of elements in the page.
15 | count: u16 align(1) = 0,
16 | // the number of overflow page
17 | overflow: u32 align(1) = 0,
18 |
19 | const Self = @This();
20 | // the size of this, but why align(4)?
21 | // pub const headerSize: usize = 16; // Has some bug if use @sizeOf(Page), when other file reference it;
22 |
23 | /// Initializes a page from a slice of bytes.
24 | pub fn init(slice: []u8) *Page {
25 | const ptr: *Page = @ptrCast(@alignCast(slice));
26 | return ptr;
27 | }
28 |
29 | /// Deinitializes a page.
30 | pub fn deinit(self: *Self, allocator: std.mem.Allocator) void {
31 | const ptr = self.asSlice();
32 | allocator.free(ptr);
33 | }
34 |
35 | /// Returns the size of the page header.
36 | pub inline fn headerSize() usize {
37 | return @sizeOf(Self);
38 | }
39 |
40 | /// Returns the type of the page.
41 | pub fn typ(self: *const Self) []const u8 {
42 | if (self.flags & consts.intFromFlags(.branch) != 0) {
43 | return "branch";
44 | } else if (self.flags & consts.intFromFlags(.leaf) != 0) {
45 | return "leaf";
46 | } else if (self.flags & consts.intFromFlags(.meta) != 0) {
47 | return "meta";
48 | } else if (self.flags & consts.intFromFlags(.freeList) != 0) {
49 | return "freelist";
50 | } else {
51 | return "unkown";
52 | }
53 | }
54 |
55 | /// Returns whether the page is a leaf page.
56 | pub fn isLeaf(self: *const Self) bool {
57 | return self.flags & consts.intFromFlags(.leaf) != 0;
58 | }
59 |
60 | /// Returns a pointer to the metadata section of the page.
61 | pub fn meta(self: *Self) *db.Meta {
62 | const ptr: usize = self.getDataPtrInt();
63 | const _meta: *db.Meta = @ptrFromInt(ptr);
64 | return _meta;
65 | }
66 |
67 | // Retrives the branch node by index.
68 | pub fn branchPageElement(self: *Self, index: usize) ?*BranchPageElement {
69 | if (self.count <= index) {
70 | return null;
71 | }
72 | const basePtr = self.getDataPtrInt() + index * BranchPageElement.headerSize();
73 | // const aligned_ptr = std.mem.alignForward(usize, basePtr + index * BranchPageElement.headerSize(), @alignOf(BranchPageElement));
74 | const dPtr: *BranchPageElement = @ptrFromInt(basePtr);
75 |
76 | // const dPtr: *BranchPageElement = @ptrFromInt(ptr);
77 | return dPtr;
78 | }
79 |
80 | /// Converts a pointer to a specific type.
81 | pub fn opaqPtrTo(_: *Self, ptr: ?*anyopaque, comptime T: type) T {
82 | return @ptrCast(@alignCast(ptr));
83 | }
84 |
85 | /// Returns branch element reference by index.
86 | pub fn branchPageElementRef(self: *const Self, index: usize) ?*const BranchPageElement {
87 | if (self.count <= index) {
88 | return null;
89 | }
90 | const basePtr = self.getDataPtrInt() + index * BranchPageElement.headerSize();
91 | // const aligned_ptr = std.mem.alignForward(usize, basePtr + index * BranchPageElement.headerSize(), @alignOf(BranchPageElement));
92 | const dPtr: *BranchPageElement = @ptrFromInt(basePtr);
93 | return dPtr;
94 | }
95 |
96 | /// Retrives the leaf node by index.
97 | pub fn leafPageElement(self: *Self, index: usize) ?*LeafPageElement {
98 | if (self.count <= index) {
99 | return null;
100 | }
101 | const ptr = self.getDataPtrInt() + index * LeafPageElement.headerSize();
102 | const dPtr: *LeafPageElement = @ptrFromInt(ptr);
103 | return dPtr;
104 | }
105 |
106 | /// Retrives the leaf page reference element by index.
107 | pub fn leafPageElementRef(self: *const Self, index: usize) ?*const LeafPageElement {
108 | if (self.count <= index) {
109 | return null;
110 | }
111 | const ptr = self.getDataPtrIntRef() + index * LeafPageElement.headerSize();
112 | const dPtr: *const LeafPageElement = @ptrFromInt(ptr);
113 | return dPtr;
114 | }
115 |
116 | /// Returns the pointer of index's leaf elements
117 | pub fn leafPageElementPtr(self: *Self, index: usize) *LeafPageElement {
118 | if (self.count <= index) {
119 | return undefined;
120 | }
121 | const ptr = self.getDataPtrInt() + index * LeafPageElement.headerSize();
122 | const dPtr: *LeafPageElement = @ptrFromInt(ptr);
123 | return dPtr;
124 | }
125 |
126 | /// Retrives a list of leaf nodes.
127 | pub fn leafPageElements(self: *Self) ?[]LeafPageElement {
128 | if (self.count == 0) {
129 | return null;
130 | }
131 | const firstPtr = self.leafPageElementPtr(0);
132 | var elements: [*]LeafPageElement = @ptrCast(firstPtr);
133 | return elements[0..self.count];
134 | }
135 |
136 | /// Returns a pointer to the leaf page element data.
137 | pub fn leafPageElementDataPtr(self: *Self) [*]u8 {
138 | const ptr = self.getDataPtrIntRef() + self.count * LeafPageElement.headerSize();
139 | const slice = @as([*]u8, @ptrFromInt(ptr));
140 | return slice;
141 | }
142 |
143 | /// Retrives a list of freelist page elements.
144 | pub fn freelistPageElements(self: *Self) ?[]PgidType {
145 | const ptr = self.getDataPtrInt();
146 | const aligned_ptr = std.mem.alignForward(usize, ptr, @alignOf(PgidType));
147 | const firstPtr: *PgidType = @ptrFromInt(aligned_ptr);
148 | var elements: [*]PgidType = @ptrCast(firstPtr);
149 | return elements[0..self.count];
150 | }
151 |
152 | /// Retrives a list of freelist page elements.
153 | pub fn freelistPageOverWithCountElements(self: *Self) ?[]PgidType {
154 | const ptr = self.getDataPtrInt();
155 | const aligned_ptr = std.mem.alignForward(usize, ptr, @alignOf(PgidType));
156 | const firstPtr: *PgidType = @ptrFromInt(aligned_ptr);
157 | var elements: [*]PgidType = @ptrCast(firstPtr);
158 | if (self.count == 0xFFFF) {
159 | const overflowCount = elements[0..1][0];
160 | return elements[0..@as(usize, overflowCount + 1)];
161 | } else {
162 | return elements[0..@as(usize, self.count)];
163 | }
164 | }
165 |
166 | /// Returns the pointer of the page.
167 | pub fn ptrInt(self: *const Self) usize {
168 | return @intFromPtr(self);
169 | }
170 |
171 | /// Returns the pointer of the page data.
172 | pub fn getDataPtrInt(self: *const Self) usize {
173 | const ptr = @intFromPtr(self);
174 | return ptr + Self.headerSize();
175 | }
176 |
177 | /// Returns the pointer reference of the page data.
178 | pub fn getDataPtrIntRef(self: *const Self) usize {
179 | const ptr = @intFromPtr(self);
180 | return ptr + Self.headerSize();
181 | }
182 |
183 | /// Returns a byte slice of the page data.
184 | /// NOTE: if the page is inline page, the slice's len maybe lt one page size.
185 | pub fn asSlice(self: *Self) []u8 {
186 | const slice: [*]u8 = @ptrCast(self);
187 | const pageNum = self.overflow + 1;
188 | return slice[0..@as(usize, pageNum * consts.PageSize)];
189 | }
190 |
191 | /// find the key in the leaf page elements, if found, return the index and exact, if not found, return the position of the first element that is greater than the key
192 | pub fn searchLeafElements(self: *const Self, key: []const u8) struct { index: usize, exact: bool } {
193 | var left: usize = 0;
194 | var right: usize = self.count;
195 | // std.log.debug("searchLeafElements: {s}, count: {d}", .{ key, self.count });
196 | while (left < right) {
197 | const mid = left + (right - left) / 2;
198 | const element = self.leafPageElementRef(mid).?;
199 | const cmp = std.mem.order(u8, element.key(), key);
200 | switch (cmp) {
201 | .eq => return .{ .index = mid, .exact = true },
202 | .lt => left = mid + 1,
203 | .gt => right = mid,
204 | }
205 | }
206 | // if not found, return the position of the first element that is greater than the key
207 | return .{ .index = left, .exact = false };
208 | }
209 |
210 | /// find the key in the branch page elements, if found, return the index and exact, if not found, return the position of the first element that is greater than the key
211 | pub fn searchBranchElements(self: *const Self, key: []const u8) struct { index: usize, exact: bool } {
212 | var left: usize = 0;
213 | var right: usize = self.count;
214 | while (left < right) {
215 | const mid = left + (right - left) / 2;
216 | const element = self.branchPageElementRef(mid).?;
217 | const cmp = std.mem.order(u8, element.key(), key);
218 | switch (cmp) {
219 | .eq => return .{ .index = mid, .exact = true },
220 | .lt => left = mid + 1,
221 | .gt => right = mid,
222 | }
223 | }
224 | return .{ .index = left, .exact = false };
225 | }
226 | };
227 |
228 | /// A branch page element.
229 | pub const BranchPageElement = struct {
230 | //
231 | // |pageHeader| --> |element0|, |element1|, |element2|, |element3|, |element4| --> |key1| --> |key2| --> |key3| --> |key4|
232 | //
233 | pos: u32 align(1) = 0,
234 | kSize: u32 align(1) = 0,
235 | pgid: PgidType align(1) = 0,
236 |
237 | const Self = @This();
238 | /// Returns the size of the branch page element header.
239 | pub inline fn headerSize() usize {
240 | return @sizeOf(Self);
241 | }
242 |
243 | /// Returns a byte slice of the node key.
244 | pub fn key(self: *const Self) []const u8 {
245 | const ptr = @as([*]u8, @ptrCast(@constCast(self)));
246 | return ptr[self.pos .. self.pos + self.kSize];
247 | }
248 | };
249 |
250 | /// A leaf page element.
251 | pub const LeafPageElement = struct {
252 | flags: u32 align(1) = 0,
253 | // pos is the offset from first position of the element.
254 | //
255 | // |pageHeader| --> |element0|, |element1|, |element2|, |element3|, |element4| --> |key1, value1| --> |key2, value2| --> |key3, value3| --> |key4, value4|
256 | //
257 | pos: u32 align(1) = 0,
258 | kSize: u32 align(1) = 0,
259 | vSize: u32 align(1) = 0,
260 |
261 | const Self = @This();
262 | /// Returns the size of the leaf page element header.
263 | pub inline fn headerSize() usize {
264 | return @sizeOf(Self);
265 | }
266 |
267 | /// Returns a byte slice of the node key.
268 | pub fn key(self: *const Self) []const u8 {
269 | const buf = @as([*]u8, @ptrCast(@constCast(self)));
270 | return buf[0..][self.pos..(self.pos + self.kSize)];
271 | }
272 |
273 | /// Returns a byte slice of the node value.
274 | pub fn value(self: *const Self) []u8 {
275 | const buf: [*]u8 = @as([*]u8, @ptrCast(@constCast(self)));
276 | return buf[0..][(self.pos + self.kSize)..(self.pos + self.kSize + self.vSize)];
277 | }
278 |
279 | /// Pretty print the leaf page element.
280 | pub fn pretty(self: *const Self) void {
281 | std.log.debug("key: {s}, value: {s}", .{ self.key(), self.value() });
282 | }
283 | };
284 |
285 | /// PageInfo represents human readable information about a page.
286 | pub const PageInfo = struct {
287 | id: PgidType = 0,
288 | typ: []const u8 = "",
289 | count: isize = 0,
290 | over_flow_count: isize = 0,
291 | };
292 |
293 | /// Returns the sorted union of a and b.
294 | pub fn merge(allocator: std.mem.Allocator, a: PgIds, b: PgIds) PgIds {
295 | // Return the opposite if one is nil.
296 | if (a.len == 0) {
297 | return b;
298 | }
299 | if (b.len == 0) {
300 | return a;
301 | }
302 | const merged = allocator.alloc(PgidType, a.len + b.len) catch unreachable;
303 | mergePgIds(merged, a, b);
304 | return merged;
305 | }
306 |
307 | // Copies the sorted union of a and b into dst,
308 | // If dst is too small, it panics.
309 | fn mergePgIds(dst: PgIds, a: PgIds, b: PgIds) void {
310 | if (dst.len < (a.len + b.len)) {
311 | @panic("mergepids bad len");
312 | }
313 |
314 | // Copy in the opposite slice if one is nil.
315 | if (a.len == 0) {
316 | std.mem.copyBackwards(PgidType, dst, a);
317 | return;
318 | }
319 | if (b.len == 0) {
320 | std.mem.copyBackwards(PgidType, dst, b);
321 | return;
322 | }
323 |
324 | // Merged will hold all elements from both lists.
325 | const merged: usize = 0;
326 |
327 | // Asign lead to the slice with a lower starting value, follow to the higher value.
328 | var lead = a;
329 | var follow = b;
330 | if (b[0] < a[0]) {
331 | lead = b;
332 | follow = a;
333 | }
334 |
335 | // Continue while there elements in the lead.
336 | while (lead.len > 0) {
337 | // Merge largest prefix the lead that is ahead of follow[0].
338 | const n = std.sort.upperBound(
339 | PgidType,
340 | follow[0],
341 | lead,
342 | .{},
343 | lessThanPid,
344 | );
345 |
346 | std.mem.copyBackwards(PgidType, dst[merged..], lead[0..n]);
347 | merged += n;
348 | if (n >= lead.len) {
349 | break;
350 | }
351 | // Swap lead and follow.
352 | lead = follow;
353 | follow = lead[n..];
354 | }
355 |
356 | // Append what's left in follow.
357 | std.mem.copyBackwards(PgidType, dst[merged..], follow);
358 | }
359 |
360 | fn lessThanPid(context: void, lhs: PgidType, rhs: PgidType) bool {
361 | _ = context;
362 | return lhs < rhs;
363 | }
364 |
365 | test "page struct" {
366 | var area = std.heap.ArenaAllocator.init(std.testing.allocator);
367 | defer area.deinit();
368 | const allocator = area.allocator();
369 | const page = Page{ .id = 1, .flags = 2, .count = 1, .overflow = 1 };
370 | _ = page;
371 | const page_size = consts.PageSize;
372 | const slice = allocator.alloc(u8, page_size) catch unreachable;
373 | @memset(slice, 0);
374 | // Meta
375 | {
376 | std.debug.print("Test Meta\n", .{});
377 | var page1 = Page.init(slice);
378 | var page2 = Page.init(slice);
379 | page2.id = 200;
380 | page2.flags = consts.intFromFlags(.leaf);
381 | page2.meta().*.version = 1;
382 | page2.meta().*.version = 2;
383 | try std.testing.expectEqual(page1.meta().*.version, 2);
384 | try std.testing.expectEqual(page1.meta().*.version, page2.meta().*.version);
385 | try std.testing.expectEqual(page1.flags, page2.flags);
386 | }
387 | @memset(slice, 0);
388 | // Branch
389 | {
390 | std.debug.print("Test Branch\n", .{});
391 | const pageRef = Page.init(slice);
392 | pageRef.count = 10;
393 | for (0..10) |i| {
394 | const branch = pageRef.branchPageElement(i);
395 | branch.?.pos = @as(u32, @intCast(i * 9 + 300));
396 | branch.?.kSize = @as(u32, @intCast(i + 1));
397 | branch.?.pgid = @as(u64, i + 2);
398 | }
399 | for (0..10) |i| {
400 | const branch = pageRef.branchPageElementRef(i);
401 | std.debug.print("{} {}\n", .{ branch.?, branch.?.pgid });
402 | }
403 | }
404 | @memset(slice, 0);
405 | std.debug.print("-------------------------------page size {}-----------\n", .{page_size});
406 | // Leaf
407 | {
408 | const pageRef = Page.init(slice);
409 | pageRef.count = 10;
410 | const n: usize = @as(usize, pageRef.count);
411 | var leftPos = pageRef.getDataPtrInt();
412 | var rightPos: usize = @intFromPtr(slice.ptr) + page_size - 1;
413 | // store it
414 | for (0..n) |i| {
415 | const leaf = pageRef.leafPageElement(i).?;
416 | leaf.flags = 0;
417 | leaf.kSize = @as(u32, @intCast(i + 1));
418 | leaf.vSize = @as(u32, @intCast(i + 2));
419 | const kvSize = leaf.kSize + leaf.vSize;
420 |
421 | leaf.pos = @as(u32, @intCast(rightPos - leftPos)) - kvSize;
422 | std.debug.assert(leaf.pos == pageRef.leafPageElement(i).?.pos);
423 | leftPos += LeafPageElement.headerSize();
424 | rightPos -= @as(usize, kvSize);
425 | const keyConst = leaf.key();
426 | var key: []u8 = @constCast(keyConst);
427 | for (0..key.len) |index| {
428 | key[index] = @as(u8, @intCast(index + 'B'));
429 | }
430 | const value = leaf.value();
431 | for (0..value.len) |index| {
432 | value[index] = @as(u8, @intCast(index + 'E'));
433 | }
434 | }
435 | const leafElements = pageRef.leafPageElements();
436 | for (leafElements.?) |leaf| {
437 | std.debug.print("{?}\n", .{leaf});
438 | }
439 | }
440 | }
441 |
442 | test "check alignments" {
443 | std.debug.print("\n", .{});
444 | std.debug.print("Page alignment: {}\n", .{@alignOf(Page)});
445 | std.debug.print("Page size: {}\n", .{@sizeOf(Page)});
446 | std.debug.print("PgidType alignment: {}\n", .{@alignOf(PgidType)});
447 | std.debug.print("u16 alignment: {}\n", .{@alignOf(u16)});
448 | std.debug.print("u32 alignment: {}\n", .{@alignOf(u32)});
449 | }
450 |
--------------------------------------------------------------------------------
/src/pretty_table.zig:
--------------------------------------------------------------------------------
1 | const std = @import("std");
2 |
3 | /// A color.
4 | pub const Color = enum {
5 | Red,
6 | Green,
7 | Yellow,
8 | Blue,
9 | Magenta,
10 | Cyan,
11 | White,
12 |
13 | /// Get the ANSI code for a color.
14 | pub fn ansiCode(self: Color) []const u8 {
15 | return switch (self) {
16 | .Red => "\x1b[31m",
17 | .Green => "\x1b[32m",
18 | .Yellow => "\x1b[33m",
19 | .Blue => "\x1b[34m",
20 | .Magenta => "\x1b[35m",
21 | .Cyan => "\x1b[36m",
22 | .White => "\x1b[37m",
23 | };
24 | }
25 | };
26 |
27 | /// Reset color.
28 | pub const ResetColor = "\x1b[0m";
29 |
30 | /// A table.
31 | pub const Table = struct {
32 | headers: std.ArrayList([]const u8),
33 | rows: std.ArrayList(std.ArrayList([]const u8)),
34 | columnWidth: usize,
35 | allocator: std.mem.Allocator,
36 | headerColor: Color,
37 | name: []const u8,
38 |
39 | /// Init a table.
40 | pub fn init(allocator: std.mem.Allocator, columnWidth: usize, headerColor: Color, name: []const u8) @This() {
41 | return .{
42 | .headers = std.ArrayList([]const u8).init(allocator),
43 | .rows = std.ArrayList(std.ArrayList([]const u8)).init(allocator),
44 | .columnWidth = columnWidth,
45 | .allocator = allocator,
46 | .headerColor = headerColor,
47 | .name = name,
48 | };
49 | }
50 |
51 | /// Deinit a table.
52 | pub fn deinit(self: *@This()) void {
53 | for (self.headers.items) |header| {
54 | self.allocator.free(header);
55 | }
56 | self.headers.deinit();
57 | for (self.rows.items) |row| {
58 | for (row.items) |cell| {
59 | self.allocator.free(cell);
60 | }
61 | row.deinit();
62 | }
63 | self.rows.deinit();
64 | }
65 |
66 | /// Add a header to a table.
67 | pub fn addHeader(self: *@This(), comptime header: anytype) !void {
68 | inline for (header) |cell| {
69 | const cp = try self.allocator.dupe(u8, cell);
70 | try self.headers.append(cp);
71 | }
72 | }
73 |
74 | /// Add a row to a table.
75 | pub fn addRow(self: *@This(), row: anytype) !void {
76 | var rowList = std.ArrayList([]const u8).init(self.allocator);
77 | inline for (row) |cell| {
78 | const cellStr = switch (@TypeOf(cell)) {
79 | u64, usize, i64, isize, u32, i32, u16, i16, u8, i8 => try std.fmt.allocPrint(self.allocator, "{d}", .{cell}),
80 | bool => try std.fmt.allocPrint(self.allocator, "{s}", .{if (cell) "true" else "false"}),
81 | else => try std.fmt.allocPrint(self.allocator, "{s}", .{cell}),
82 | };
83 | try rowList.append(cellStr);
84 | }
85 | try self.rows.append(rowList);
86 | }
87 |
88 | /// Print a table.
89 | pub fn print(self: @This()) !void {
90 | const writer = std.io.getStdOut().writer();
91 |
92 | // calculate the total width of the table
93 | const totalWidth = self.columnWidth * self.headers.items.len + self.headers.items.len + 1;
94 | const nameLen = self.name.len;
95 | const leftPadding = if (totalWidth > nameLen) (totalWidth - nameLen) / 2 else 0;
96 | const rightPadding = if (totalWidth > nameLen + leftPadding) totalWidth - nameLen - leftPadding else 0;
97 |
98 | try writer.writeByteNTimes('-', leftPadding);
99 | try writer.print(" {s} ", .{self.name});
100 | try writer.writeByteNTimes('-', rightPadding);
101 | try writer.print("\n", .{});
102 |
103 | // print the top separator
104 | try self.printSeparator(writer);
105 |
106 | // print the header (with color)
107 | try writer.print("{s}", .{self.headerColor.ansiCode()});
108 | try self.printRow(writer, self.headers.items);
109 | try writer.print("{s}\n", .{ResetColor});
110 |
111 | // print the separator between the header and the data
112 | try self.printSeparator(writer);
113 |
114 | // print the data rows
115 | for (self.rows.items) |row| {
116 | try self.printRow(writer, row.items);
117 | try writer.print("\n", .{});
118 | }
119 |
120 | // print the bottom separator
121 | try self.printSeparator(writer);
122 | }
123 |
124 | fn printSeparator(self: @This(), writer: anytype) !void {
125 | try writer.writeByte('+');
126 | for (self.headers.items) |_| {
127 | try writer.writeByteNTimes('-', self.columnWidth);
128 | try writer.writeByte('+');
129 | }
130 | try writer.print("\n", .{});
131 | }
132 |
133 | fn printRow(self: @This(), writer: anytype, row: []const []const u8) !void {
134 | try writer.writeByte('|');
135 | for (row) |cell| {
136 | var cellLen: usize = cell.len;
137 | if (cellLen > self.columnWidth) {
138 | cellLen = self.columnWidth;
139 | }
140 | const padding = if (cellLen < self.columnWidth) (self.columnWidth - cellLen) / 2 else 0;
141 | try writer.writeByteNTimes(' ', padding);
142 | if (cell.len > self.columnWidth) {
143 | try writer.print("{s}...", .{cell[0 .. self.columnWidth - 3]});
144 | } else {
145 | try writer.print("{s}", .{cell});
146 | try writer.writeByteNTimes(' ', self.columnWidth - cellLen - padding);
147 | }
148 | try writer.writeByte('|');
149 | }
150 | }
151 | };
152 |
--------------------------------------------------------------------------------
/src/root.zig:
--------------------------------------------------------------------------------
1 | const std = @import("std");
2 | const testing = std.testing;
3 |
4 | export fn add(a: i32, b: i32) i32 {
5 | return a + b;
6 | }
7 |
8 | test "basic add functionality" {
9 | std.testing.log_level = .debug;
10 | std.log.warn("run test", .{});
11 | try testing.expect(add(3, 7) == 10);
12 | }
13 |
14 | test {
15 | _ = @import("cursor_test.zig");
16 | _ = @import("node_test.zig");
17 | _ = @import("bucket_test.zig");
18 | _ = @import("tx_test.zig");
19 | _ = @import("page.zig");
20 | }
21 |
--------------------------------------------------------------------------------
/src/tests.zig:
--------------------------------------------------------------------------------
1 | const std = @import("std");
2 | const db = @import("db.zig");
3 | const DB = db.DB;
4 | const node = @import("node.zig");
5 | const consts = @import("consts.zig");
6 | const Error = @import("error.zig").Error;
7 |
8 | // A tuple of two values.
9 | pub const Tuple = struct {
10 | pub fn t2(comptime firstType: type, comptime secondType: type) type {
11 | return struct {
12 | first: firstType,
13 | second: secondType,
14 | };
15 | }
16 | pub fn t3(comptime firstType: type, comptime secondType: type, comptime thirdType: type) type {
17 | return struct {
18 | first: firstType,
19 | second: secondType,
20 | third: thirdType,
21 | };
22 | }
23 | };
24 |
25 | /// A test context.
26 | pub const TestContext = struct {
27 | allocator: std.mem.Allocator,
28 | db: *DB,
29 | pub fn generateBytes(self: @This(), bufSize: usize) []usize {
30 | const buffer = self.allocator.alloc(usize, bufSize) catch unreachable;
31 | randomBuf(buffer);
32 | return buffer;
33 | }
34 |
35 | pub fn repeat(self: @This(), c: u8, bufferSize: usize) []u8 {
36 | const buffer = self.allocator.alloc(u8, bufferSize) catch unreachable;
37 | @memset(buffer, c);
38 | return buffer;
39 | }
40 | };
41 |
42 | /// Setup a test context.
43 | pub fn setup(allocator: std.mem.Allocator) !TestContext {
44 | var options = consts.defaultOptions;
45 | options.readOnly = false;
46 | options.initialMmapSize = 100000 * consts.PageSize;
47 | return setupWithOptions(allocator, options);
48 | }
49 |
50 | /// Setup a test context with custom options.
51 | pub fn setupWithOptions(allocator: std.mem.Allocator, options: consts.Options) !TestContext {
52 | const tmpFile = createTmpFile();
53 | const filePath = tmpFile.path(allocator);
54 | defer tmpFile.file.close();
55 | defer allocator.free(filePath);
56 | const kvDB = DB.open(allocator, filePath, null, options) catch unreachable;
57 | return TestContext{ .allocator = allocator, .db = kvDB };
58 | }
59 |
60 | /// Teardown a test context.
61 | pub fn teardown(ctx: *TestContext) void {
62 | std.log.debug("teardown", .{});
63 | const path = ctx.allocator.dupe(u8, ctx.db.path()) catch unreachable;
64 | ctx.db.close() catch unreachable;
65 | std.fs.cwd().deleteFile(path) catch unreachable;
66 | std.log.debug("delete dirty file: {s}\n", .{path});
67 | ctx.allocator.free(path);
68 | }
69 |
70 | /// Teardown a test context without deleting the database file.
71 | pub fn teardownNotDeleteDB(ctx: *TestContext) void {
72 | std.log.debug("teardown", .{});
73 | ctx.db.close() catch unreachable;
74 | }
75 |
76 | /// Generate a random buffer.
77 | pub fn randomBuf(buf: []usize) void {
78 | var prng = std.Random.DefaultPrng.init(buf.len);
79 | var random = prng.random();
80 | for (0..buf.len) |i| {
81 | buf[i] = @intCast(i);
82 | }
83 | var i: usize = buf.len - 1;
84 | while (i > 0) : (i -= 1) {
85 | const j = random.intRangeAtMost(usize, 0, i);
86 | std.mem.swap(usize, &buf[i], &buf[j]);
87 | }
88 | }
89 |
90 | /// Check if the model is long.
91 | pub fn isLongModel() bool {
92 | const rModel = std.os.environ;
93 | for (rModel) |env| {
94 | const env_str = std.mem.span(env); // Convert null-terminated string to slice
95 | if (std.mem.startsWith(u8, env_str, "ZIG_TEST_MODEL")) {
96 | std.log.warn("env: {s}", .{env_str});
97 | if (std.mem.endsWith(u8, env_str, "ZIG_TEST_MODEL=long")) {
98 | return true;
99 | }
100 | }
101 | }
102 | return false;
103 | }
104 |
105 | /// Create a temporary file.
106 | pub fn createTmpFile() struct {
107 | file: std.fs.File,
108 | tmpDir: std.testing.TmpDir,
109 |
110 | pub fn deinit(self: *@This()) void {
111 | self.file.close();
112 | self.tmpDir.cleanup();
113 | }
114 |
115 | pub fn path(self: @This(), allocator: std.mem.Allocator) []const u8 {
116 | // First get the relative path
117 | const relative_path = std.fmt.allocPrint(allocator, ".zig-cache/tmp/{s}/{s}", .{ self.tmpDir.sub_path, "bolt.db.tmp" }) catch unreachable;
118 | defer allocator.free(relative_path);
119 |
120 | // Convert to absolute path
121 | const absolute_path = std.fs.cwd().realpathAlloc(allocator, relative_path) catch unreachable;
122 | return absolute_path;
123 | // const name = std.fmt.allocPrint(allocator, ".zig-cache/tmp/{s}/{s}", .{ self.tmpDir.sub_path, "bolt.db.tmp" }) catch unreachable;
124 | // return name;
125 | }
126 | } {
127 | var tmpDir = std.testing.tmpDir(.{});
128 | const file = tmpDir.dir.createFile("bolt.db.tmp", .{}) catch unreachable;
129 | return .{
130 | .file = file,
131 | .tmpDir = tmpDir,
132 | };
133 | }
134 |
135 | /// Create a temporary file.
136 | pub fn createFile(name: []const u8) std.fs.File {
137 | var tmpDir = std.testing.tmpDir(.{});
138 | return tmpDir.dir.createFile(name, .{}) catch unreachable;
139 | }
140 |
141 | /// Get a temporary file path.
142 | pub fn getTmpFilePath(name: ?[]const u8) []const u8 {
143 | const tmpDir = std.testing.tmpDir(.{});
144 | if (name) |n| {
145 | return std.fmt.allocPrint(std.testing.allocator, "{s}/{s}", .{ tmpDir.sub_path, n }) catch unreachable;
146 | } else {
147 | return std.fmt.allocPrint(std.testing.allocator, "{s}/bolt.db.tmp", .{tmpDir.sub_path}) catch unreachable;
148 | }
149 | }
150 |
151 | /// testing/quick defaults to 5 iterations and a random seed.
152 | /// You can override these settings from the command line:
153 | /// -quick.count The number of iterations to perform.
154 | /// -quick.seed The seed to use for randomizing.
155 | /// -quick.maxitems The maximum number of items to insert into a DB.
156 | /// -quick.maxksize The maximum size of a key.
157 | /// -quick.maxvsize The maximum size of a value.
158 | pub const Quick = struct {
159 | count: usize = 5,
160 | seed: u64 = 0,
161 | maxItems: usize = 10000,
162 | maxKeySize: usize = 1024,
163 | maxValueSize: usize = 1024,
164 | items: std.ArrayList(TestDataItem) = undefined,
165 | allocator: std.mem.Allocator,
166 |
167 | /// Initialize a Quick instance.
168 | pub fn init(allocator: std.mem.Allocator) Quick {
169 | return Quick{
170 | .allocator = allocator,
171 | .seed = @intCast(std.time.microTimestamp()),
172 | .items = undefined,
173 | };
174 | }
175 |
176 | /// Deinitialize a Quick instance.
177 | pub fn deinit(self: *@This()) void {
178 | for (self.items.items) |item| {
179 | self.allocator.free(item.key);
180 | self.allocator.free(item.value);
181 | }
182 | self.items.deinit();
183 | }
184 |
185 | /// Generate a set of test data.
186 | pub fn generate(self: *@This(), allocator: std.mem.Allocator) !std.ArrayList(TestDataItem) {
187 | var randItems = try RevTestData.generate(allocator, self);
188 | const slice = try randItems.toOwnedSlice();
189 | self.items = std.ArrayList(TestDataItem).fromOwnedSlice(self.allocator, slice);
190 | return self.items;
191 | }
192 |
193 | /// Sort the items by key.
194 | pub fn sort(self: *@This()) void {
195 | std.mem.sort(TestDataItem, self.items.items, {}, struct {
196 | fn lessThan(_: void, lhs: TestDataItem, rhs: TestDataItem) bool {
197 | return std.mem.lessThan(u8, lhs.key, rhs.key);
198 | }
199 | }.lessThan);
200 | }
201 |
202 | /// Reverse the items.
203 | pub fn reverse(self: *@This()) void {
204 | self.sort();
205 | std.mem.reverse(TestDataItem, self.items.items);
206 | }
207 |
208 | pub fn checkWithContext(self: *@This(), context: anytype, config: ?Config, comptime travel: fn (@TypeOf(context)) Error!void) std.ArrayList(Error) {
209 | if (config == null) {
210 | config = .{
211 | .rand = std.Random.DefaultPrng.init(0).random(),
212 | };
213 | }
214 | const maxCount = config.?.getMaxCount();
215 | const randor = config.?.getRand();
216 | _ = randor; // autofix
217 |
218 | var errors = std.ArrayList(Error).init(self.allocator);
219 |
220 | for (0..maxCount) |i| {
221 | _ = i; // autofix
222 | travel(context) catch |err| {
223 | errors.append(err) catch unreachable;
224 | };
225 | }
226 | return errors;
227 | }
228 | };
229 |
230 | pub const Config = struct {
231 | // MaxCount sets the maximum number of iterations.
232 | // If zero, MaxCountScale is used.
233 | maxCount: usize = 0,
234 | // MaxCountScale is a non-negative scale factor applied to the
235 | // default maximum.
236 | // A count of zero implies the default, which is usually 100
237 | // but can be set by the -quickchecks flag.
238 | maxCountScale: f64 = 1.0,
239 | maxKeySize: usize = 1024,
240 | // Rand specifies a source of random numbers.
241 | // If nil, a default pseudo-random source will be used.
242 | rand: ?std.Random.Xoshiro256 = null,
243 |
244 | pub fn getRand(self: @This()) std.Random {
245 | if (self.rand) |r| {
246 | return r.random();
247 | } else {
248 | return std.Random.DefaultPrng.init(0).random();
249 | }
250 | }
251 |
252 | pub fn getMaxCount(self: @This()) usize {
253 | if (self.maxCount == 0) {
254 | if (self.maxCountScale != 0) {
255 | const count: f64 = self.maxCountScale * 100.0;
256 | return @as(usize, @intFromFloat(count));
257 | }
258 | return 100;
259 | } else {
260 | return self.maxCount;
261 | }
262 | }
263 | };
264 |
265 | /// A test data item.
266 | pub const TestDataItem = struct {
267 | key: []u8,
268 | value: []u8,
269 | };
270 |
271 | /// A test data generator.
272 | pub const RevTestData = struct {
273 | const Self = @This();
274 | /// Generate a set of test data.
275 | pub fn generate(
276 | allocator: std.mem.Allocator,
277 | q: *const Quick,
278 | ) !std.ArrayList(TestDataItem) {
279 | var prng = std.Random.DefaultPrng.init(q.seed);
280 | var random = prng.random();
281 | const n = random.intRangeAtMost(usize, 1, q.maxItems);
282 | var items = std.ArrayList(TestDataItem).init(allocator);
283 | try items.appendNTimes(TestDataItem{ .key = undefined, .value = undefined }, n);
284 | var used = std.StringHashMap(bool).init(allocator);
285 | defer used.deinit();
286 | for (0..items.items.len) |i| {
287 | while (true) {
288 | const randBytes = try Self.randByteSlice(allocator, random, 1, q.maxKeySize);
289 | const got = try used.getOrPut(randBytes);
290 | if (got.found_existing) {
291 | allocator.free(randBytes);
292 | continue;
293 | } else {
294 | got.value_ptr.* = true;
295 | items.items[i].key = randBytes;
296 | }
297 | break;
298 | }
299 |
300 | const randBytes = try Self.randByteSlice(allocator, random, 1, q.maxValueSize);
301 | items.items[i].value = randBytes;
302 | }
303 | return items;
304 | }
305 |
306 | /// Generate a random byte slice.
307 | fn randByteSlice(allocator: std.mem.Allocator, random: std.Random, minSize: usize, maxSize: usize) ![]u8 {
308 | const n = random.intRangeAtMost(usize, minSize, maxSize);
309 | var b = try allocator.alloc(u8, n);
310 | for (0..n) |i| {
311 | b[i] = @intCast(random.intRangeAtMost(u8, 0, 255));
312 | }
313 | return b;
314 | }
315 | };
316 |
317 | /// A copy writer.
318 | pub const CopyWriter = struct {
319 | buffer: std.ArrayList(u8),
320 | /// Append bytes to the buffer.
321 | pub fn appendWriter(self: *CopyWriter, bytes: []const u8) error{OutOfMemory}!usize {
322 | try self.buffer.appendSlice(bytes);
323 | return bytes.len;
324 | }
325 | /// Get a writer.
326 | pub fn writer(self: *CopyWriter) std.io.Writer(*CopyWriter, error{OutOfMemory}, appendWriter) {
327 | return .{ .context = self };
328 | }
329 | };
330 |
--------------------------------------------------------------------------------
/src/tx_test.zig:
--------------------------------------------------------------------------------
1 | const std = @import("std");
2 | const tx = @import("tx.zig");
3 | const tests = @import("tests.zig");
4 | const Error = @import("error.zig").Error;
5 | const assert = @import("util.zig").assert;
6 | const consts = @import("consts.zig");
7 | const PageSize = consts.PageSize;
8 | const defaultOptions = consts.defaultOptions;
9 | const KeyPair = consts.KeyPair;
10 |
11 | // Ensure that committing a closed transaction returns an error.
12 | test "Tx_Commit_ErrTxClosed" {
13 | std.testing.log_level = .err;
14 | var testCtx = try tests.setup(std.testing.allocator);
15 | defer tests.teardown(&testCtx);
16 | const kvDB = testCtx.db;
17 | {
18 | const trx = try kvDB.begin(true);
19 | defer trx.destroy();
20 | _ = try trx.createBucket("foo");
21 |
22 | try trx.commit();
23 | trx.commit() catch |err| {
24 | try std.testing.expect(err == Error.TxClosed);
25 | };
26 | }
27 | {
28 | const trx = try kvDB.begin(true);
29 | _ = try trx.createBucketIfNotExists("foo");
30 | try trx.commitAndDestroy();
31 | }
32 | {
33 | const trx = try kvDB.begin(true);
34 | defer trx.destroy();
35 | _ = try trx.createBucketIfNotExists("foo");
36 | try trx.commit();
37 | }
38 | }
39 |
40 | // Ensure that rolling back a closed transaction returns an error.
41 | test "Tx_Rollback_ErrTxClosed" {
42 | std.testing.log_level = .err;
43 | var testCtx = try tests.setup(std.testing.allocator);
44 | defer tests.teardown(&testCtx);
45 | const kvDB = testCtx.db;
46 | {
47 | const trx = try kvDB.begin(true);
48 | defer trx.destroy();
49 | try trx.rollback();
50 | trx.rollback() catch |err| {
51 | try std.testing.expect(err == Error.TxClosed);
52 | };
53 | }
54 |
55 | {
56 | const trx = try kvDB.begin(true);
57 | try trx.rollbackAndDestroy();
58 | }
59 | }
60 |
61 | // Ensure that committing a read-only transaction returns an error.
62 | test "Tx_Commit_ErrTxNotWritable" {
63 | std.testing.log_level = .err;
64 | var testCtx = try tests.setup(std.testing.allocator);
65 | defer tests.teardown(&testCtx);
66 | const kvDB = testCtx.db;
67 | const trx = try kvDB.begin(false);
68 | defer trx.rollbackAndDestroy() catch unreachable;
69 | trx.commit() catch |err| {
70 | try std.testing.expect(err == Error.TxNotWriteable);
71 | };
72 | }
73 |
74 | // Ensure that a transaction can retrieve a cursor on the root bucket.
75 | test "Tx_Cursor" {
76 | std.testing.log_level = .err;
77 | var testCtx = try tests.setup(std.testing.allocator);
78 | defer tests.teardown(&testCtx);
79 | const kvDB = testCtx.db;
80 | try kvDB.update(struct {
81 | fn exec(trx: *tx.TX) Error!void {
82 | _ = try trx.createBucket("widgets");
83 | _ = try trx.createBucket("woojits");
84 | var cursor = trx.cursor();
85 | defer cursor.deinit();
86 | var keyPair = cursor.first();
87 | assert(std.mem.eql(u8, keyPair.key.?, "widgets"), "expected key 'widgets'", .{});
88 | keyPair = cursor.next();
89 | assert(std.mem.eql(u8, keyPair.key.?, "woojits"), "expected key 'woojits'", .{});
90 | keyPair = cursor.next();
91 | assert(keyPair.key == null, "expected nil key", .{});
92 | }
93 | }.exec);
94 | }
95 |
96 | // Ensure that creating a bucket with a read-only transaction returns an error.
97 | test "Tx_CreateBucket_ErrTxNotWritable" {
98 | std.testing.log_level = .err;
99 | var testCtx = try tests.setup(std.testing.allocator);
100 | defer tests.teardown(&testCtx);
101 | const kvDB = testCtx.db;
102 | try kvDB.view(struct {
103 | fn exec(trx: *tx.TX) Error!void {
104 | _ = trx.createBucket("widgets") catch |err| {
105 | assert(err == Error.TxNotWriteable, "expected error TxNotWriteable", .{});
106 | };
107 | }
108 | }.exec);
109 | }
110 |
111 | // Ensure that creating a bucket on a closed transaction returns an error.
112 | test "Tx_CreateBucket_ErrTxClosed" {
113 | std.testing.log_level = .err;
114 | var testCtx = try tests.setup(std.testing.allocator);
115 | defer tests.teardown(&testCtx);
116 | const kvDB = testCtx.db;
117 | const trx = try kvDB.begin(true);
118 | defer trx.destroy();
119 | try trx.commit();
120 | _ = trx.createBucket("widgets") catch |err| {
121 | assert(err == Error.TxClosed, "expected error TxClosed", .{});
122 | };
123 | }
124 |
125 | // Ensure that a Tx can retrieve a bucket.
126 | test "Tx_Bucket" {
127 | std.testing.log_level = .err;
128 | var testCtx = try tests.setup(std.testing.allocator);
129 | defer tests.teardown(&testCtx);
130 | const kvDB = testCtx.db;
131 | try kvDB.update(struct {
132 | fn exec(trx: *tx.TX) Error!void {
133 | _ = try trx.createBucket("widgets");
134 | const bucket = trx.getBucket("widgets");
135 | assert(bucket != null, "expected bucket 'widgets'", .{});
136 | }
137 | }.exec);
138 | }
139 |
140 | // Ensure that a Tx retrieving a non-existent key returns nil.
141 | test "Tx_Get_NotFound" {
142 | std.testing.log_level = .err;
143 | var testCtx = try tests.setup(std.testing.allocator);
144 | defer tests.teardown(&testCtx);
145 | const kvDB = testCtx.db;
146 | try kvDB.update(struct {
147 | fn exec(trx: *tx.TX) Error!void {
148 | const bt = try trx.createBucket("widgets");
149 | try bt.put(KeyPair.init("foo", "bar"));
150 | const notFound = bt.get("no_such_key");
151 | assert(notFound == null, "expected nil value", .{});
152 | }
153 | }.exec);
154 | }
155 |
156 | // Ensure that a bucket can be created and retrieved.
157 | test "Tx_CreateBucket" {
158 | std.testing.log_level = .err;
159 | var testCtx = try tests.setup(std.testing.allocator);
160 | defer tests.teardown(&testCtx);
161 | const kvDB = testCtx.db;
162 | try kvDB.update(struct {
163 | fn exec(trx: *tx.TX) Error!void {
164 | _ = try trx.createBucket("widgets");
165 | }
166 | }.exec);
167 | // Read the bucket through a separate transaction.
168 | try kvDB.view(struct {
169 | fn exec(trx: *tx.TX) Error!void {
170 | const bucket = trx.getBucket("widgets");
171 | assert(bucket != null, "expected bucket 'widgets'", .{});
172 | }
173 | }.exec);
174 | }
175 |
176 | // Ensure that a bucket can be created if it doesn't already exist.
177 | test "Tx_CreateBucketIfNotExists" {
178 | std.testing.log_level = .err;
179 | var testCtx = try tests.setup(std.testing.allocator);
180 | defer tests.teardown(&testCtx);
181 | const kvDB = testCtx.db;
182 | try kvDB.update(struct {
183 | fn exec(trx: *tx.TX) Error!void {
184 | _ = try trx.createBucketIfNotExists("widgets");
185 | _ = try trx.createBucketIfNotExists("widgets");
186 | }
187 | }.exec);
188 |
189 | // Read the bucket through a separate transaction.
190 | try kvDB.view(struct {
191 | fn exec(trx: *tx.TX) Error!void {
192 | const bucket = trx.getBucket("widgets");
193 | assert(bucket != null, "expected bucket 'widgets'", .{});
194 | }
195 | }.exec);
196 | }
197 |
198 | // Ensure transaction returns an error if creating an unnamed bucket.
199 | test "Tx_CreateBucketIfNotExists_ErrBucketNameRequired" {
200 | std.testing.log_level = .err;
201 | var testCtx = try tests.setup(std.testing.allocator);
202 | defer tests.teardown(&testCtx);
203 | const kvDB = testCtx.db;
204 | try kvDB.update(struct {
205 | fn exec(trx: *tx.TX) Error!void {
206 | _ = trx.createBucketIfNotExists("") catch |err| {
207 | assert(err == Error.BucketNameRequired, "expected error BucketNameRequired", .{});
208 | };
209 | }
210 | }.exec);
211 | }
212 |
213 | // Ensure that a bucket cannot be created twice.
214 | test "Tx_CreateBucket_ErrBucketExists" {
215 | std.testing.log_level = .err;
216 | var testCtx = try tests.setup(std.testing.allocator);
217 | defer tests.teardown(&testCtx);
218 | const kvDB = testCtx.db;
219 | try kvDB.update(struct {
220 | fn exec(trx: *tx.TX) Error!void {
221 | _ = try trx.createBucket("widgets");
222 | }
223 | }.exec);
224 | try kvDB.update(struct {
225 | fn exec(trx: *tx.TX) Error!void {
226 | _ = trx.createBucket("widgets") catch |err| {
227 | assert(err == Error.BucketExists, "expected error BucketExists", .{});
228 | };
229 | }
230 | }.exec);
231 | }
232 |
233 | // Ensure that a bucket is created with a non-blank name.
234 | test "Tx_CreateBucket_ErrBucketNameRequired" {
235 | std.testing.log_level = .err;
236 | var testCtx = try tests.setup(std.testing.allocator);
237 | defer tests.teardown(&testCtx);
238 | const kvDB = testCtx.db;
239 | try kvDB.update(struct {
240 | fn exec(trx: *tx.TX) Error!void {
241 | _ = trx.createBucket("") catch |err| {
242 | assert(err == Error.BucketNameRequired, "expected error BucketNameRequired", .{});
243 | };
244 | }
245 | }.exec);
246 | }
247 |
248 | // Ensure that a bucket can be deleted.
249 | test "Tx_DeleteBucket" {
250 | std.testing.log_level = .err;
251 | var testCtx = try tests.setup(std.testing.allocator);
252 | defer tests.teardown(&testCtx);
253 | const kvDB = testCtx.db;
254 | try kvDB.update(struct {
255 | fn exec(trx: *tx.TX) Error!void {
256 | const b = try trx.createBucket("widgets");
257 | try b.put(KeyPair.init("foo", "bar"));
258 | }
259 | }.exec);
260 |
261 | // Delete the bucket and make sure we can't get the value.
262 | try kvDB.update(struct {
263 | fn exec(trx: *tx.TX) Error!void {
264 | try trx.deleteBucket("widgets");
265 | const bt = trx.getBucket("widgets");
266 | assert(bt == null, "expected nil bucket", .{});
267 | }
268 | }.exec);
269 |
270 | try kvDB.update(struct {
271 | fn exec(trx: *tx.TX) Error!void {
272 | // Create the bucket again and make sure there's not a phantom value.
273 | const bt = try trx.createBucket("widgets");
274 | const value = bt.get("foo");
275 | assert(value == null, "expected nil value", .{});
276 | }
277 | }.exec);
278 | }
279 |
280 | // Ensure that deleting a bucket on a closed transaction returns an error.
281 | test "Tx_DeleteBucket_ErrTxClosed" {
282 | std.testing.log_level = .err;
283 | var testCtx = try tests.setup(std.testing.allocator);
284 | defer tests.teardown(&testCtx);
285 | const kvDB = testCtx.db;
286 | const trx = try kvDB.begin(true);
287 | defer trx.destroy();
288 | try trx.commit();
289 | trx.deleteBucket("widgets") catch |err| {
290 | assert(err == Error.TxClosed, "expected error TxClosed", .{});
291 | };
292 | }
293 |
294 | // Ensure that deleting a bucket with a read-only transaction returns an error.
295 | test "Tx_DeleteBucket_ErrTxNotWritable" {
296 | std.testing.log_level = .err;
297 | var testCtx = try tests.setup(std.testing.allocator);
298 | defer tests.teardown(&testCtx);
299 | const kvDB = testCtx.db;
300 | try kvDB.view(struct {
301 | fn exec(trx: *tx.TX) Error!void {
302 | trx.deleteBucket("widgets") catch |err| {
303 | assert(err == Error.TxNotWriteable, "expected error TxNotWriteable", .{});
304 | };
305 | }
306 | }.exec);
307 | }
308 |
309 | // Ensure that nothing happens when deleting a bucket that doesn't exist.
310 | test "Tx_DeleteBucket_NotFound" {
311 | std.testing.log_level = .err;
312 | var testCtx = try tests.setup(std.testing.allocator);
313 | defer tests.teardown(&testCtx);
314 | const kvDB = testCtx.db;
315 | _ = try kvDB.update(struct {
316 | fn exec(trx: *tx.TX) Error!void {
317 | trx.deleteBucket("widgets") catch |err| {
318 | assert(err == Error.BucketNotFound, "expected error BucketNotFound", .{});
319 | };
320 | }
321 | }.exec);
322 | }
323 |
324 | // Ensure that no error is returned when a tx.ForEach function does not return
325 | // an error.
326 | test "Tx_ForEach_NoError" {
327 | std.testing.log_level = .err;
328 | var testCtx = try tests.setup(std.testing.allocator);
329 | defer tests.teardown(&testCtx);
330 | const kvDB = testCtx.db;
331 | try kvDB.update(struct {
332 | fn exec(trx: *tx.TX) Error!void {
333 | const bt = try trx.createBucket("widgets");
334 | try bt.put(KeyPair.init("foo", "bar"));
335 | try trx.forEach(struct {
336 | fn f(_: []const u8) Error!void {}
337 | }.f);
338 | }
339 | }.exec);
340 | }
341 |
342 | // Ensure that an error is returned when a tx.ForEach function returns an error.
343 | test "Tx_ForEach_WithError" {
344 | std.testing.log_level = .err;
345 | var testCtx = try tests.setup(std.testing.allocator);
346 | defer tests.teardown(&testCtx);
347 | const kvDB = testCtx.db;
348 | try kvDB.update(struct {
349 | fn exec(trx: *tx.TX) Error!void {
350 | const bt = try trx.createBucket("widgets");
351 | try bt.put(KeyPair.init("foo", "bar"));
352 | trx.forEach(struct {
353 | fn f(_: []const u8) Error!void {
354 | return Error.NotPassConsistencyCheck;
355 | }
356 | }.f) catch |err| {
357 | assert(err == Error.NotPassConsistencyCheck, "expected error NotPassConsistencyCheck", .{});
358 | };
359 | }
360 | }.exec);
361 | }
362 |
363 | // Ensure that Tx commit handlers are called after a transaction successfully commits.
364 | test "Tx_OnCommit" {
365 | std.testing.log_level = .err;
366 | var testCtx = try tests.setup(std.testing.allocator);
367 | defer tests.teardown(&testCtx);
368 | const kvDB = testCtx.db;
369 |
370 | const Context = struct {
371 | commitCount: usize = 0,
372 | };
373 | var ctx = Context{};
374 | try kvDB.updateWithContext(&ctx, struct {
375 | fn exec(context: *Context, trx: *tx.TX) Error!void {
376 | const ctxPtr = @as(*anyopaque, @ptrCast(@alignCast(context)));
377 | trx.onCommit(ctxPtr, struct {
378 | fn callback(_ctxPtr: ?*anyopaque, _: *tx.TX) void {
379 | const argPtr = @as(*Context, @ptrCast(@alignCast(_ctxPtr)));
380 | argPtr.commitCount += 1;
381 | }
382 | }.callback);
383 | }
384 | }.exec);
385 | try std.testing.expectEqual(@as(usize, 1), ctx.commitCount);
386 | }
387 |
388 | // Ensure that Tx commit handlers are NOT called after a transaction rolls back.
389 | test "Tx_OnCommit_Rollback" {
390 | std.testing.log_level = .err;
391 | var testCtx = try tests.setup(std.testing.allocator);
392 | defer tests.teardown(&testCtx);
393 | const kvDB = testCtx.db;
394 | const Context = struct {
395 | x: usize = 0,
396 | };
397 | var ctx = Context{};
398 | const OnCommit = struct {
399 | fn callback(_ctxPtr: ?*anyopaque, _: *tx.TX) void {
400 | std.log.err("callback", .{});
401 | const argPtr = @as(*Context, @ptrCast(@alignCast(_ctxPtr)));
402 | argPtr.x += 1;
403 | }
404 | };
405 | kvDB.updateWithContext(&ctx, struct {
406 | fn exec(context: *Context, trx: *tx.TX) Error!void {
407 | trx.onCommit(context, OnCommit.callback);
408 | _ = try trx.createBucket("widgets");
409 | return Error.NotPassConsistencyCheck;
410 | }
411 | }.exec) catch |err| {
412 | assert(err == Error.NotPassConsistencyCheck, "expected error NotPassConsistencyCheck", .{});
413 | };
414 | assert(ctx.x == 0, "expected ctx.x to be 0", .{});
415 | }
416 |
417 | // Ensure that the database can be copied to a file path.
418 | test "Tx_CopyFile" {
419 | std.testing.log_level = .err;
420 | var tmpDir = tests.createTmpFile();
421 | defer tmpDir.deinit();
422 |
423 | var originPath: []const u8 = ""; // 明确指定类型为 []const u8
424 | defer std.testing.allocator.free(originPath);
425 | {
426 | var testCtx = try tests.setup(std.testing.allocator);
427 | const kvDB = testCtx.db;
428 | const Context = struct {
429 | fp: std.fs.File,
430 | };
431 | var ctx = Context{ .fp = tmpDir.file };
432 | _ = try kvDB.update(struct {
433 | fn exec(trx: *tx.TX) Error!void {
434 | const b = try trx.createBucket("widgets");
435 | try b.put(KeyPair.init("foo", "bar"));
436 | try b.put(KeyPair.init("baz", "bat"));
437 | }
438 | }.exec);
439 |
440 | _ = try kvDB.viewWithContext(&ctx, struct {
441 | fn exec(copyCtx: *Context, trx: *tx.TX) Error!void {
442 | var writer = tx.FileWriter.init(copyCtx.fp);
443 | _ = try trx.writeToAnyWriter(&writer);
444 | }
445 | }.exec);
446 | originPath = std.testing.allocator.dupe(u8, kvDB.path()) catch unreachable;
447 | tests.teardownNotDeleteDB(&testCtx);
448 | }
449 | {
450 | var options = defaultOptions;
451 | options.readOnly = false;
452 | options.initialMmapSize = 100000 * PageSize;
453 | const filePath = tmpDir.path(std.testing.allocator);
454 | defer std.testing.allocator.free(filePath);
455 | const kvDB = try @import("db.zig").DB.open(std.testing.allocator, filePath, null, options);
456 | defer kvDB.close() catch unreachable;
457 | try kvDB.view(struct {
458 | fn exec(trx: *tx.TX) Error!void {
459 | const b = trx.getBucket("widgets");
460 | assert(b != null, "expected bucket 'widgets'", .{});
461 | const foo = b.?.get("foo");
462 | assert(std.mem.eql(u8, foo.?, "bar"), "expected 'bar'", .{});
463 | const baz = b.?.get("baz");
464 | assert(std.mem.eql(u8, baz.?, "bat"), "expected 'bat'", .{});
465 | }
466 | }.exec);
467 | }
468 | }
469 |
470 | // Ensure that Copy handles write errors right.
471 | test "Tx_CopyFile_Error_Meta" {
472 | std.testing.log_level = .err;
473 | var testCtx = try tests.setup(std.testing.allocator);
474 | defer tests.teardown(&testCtx);
475 | const kvDB = testCtx.db;
476 | const Context = struct {
477 | after: usize = 0,
478 | };
479 | var ctx = Context{ .after = 3 * kvDB.pageSize };
480 |
481 | _ = try kvDB.update(struct {
482 | fn exec(trx: *tx.TX) Error!void {
483 | const b = try trx.createBucket("widgets");
484 | try b.put(KeyPair.init("foo", "bar"));
485 | try b.put(consts.KeyPair.init("baz", "bat"));
486 | }
487 | }.exec);
488 | const err = kvDB.viewWithContext(&ctx, struct {
489 | fn exec(_: *Context, trx: *tx.TX) Error!void {
490 | var tmpDir = tests.createTmpFile();
491 | defer tmpDir.deinit();
492 |
493 | const streamWriter = struct {
494 | after: usize = 0,
495 | fp: std.fs.File,
496 | const Self = @This();
497 | fn init(fp: std.fs.File) Self {
498 | return Self{ .after = 0, .fp = fp };
499 | }
500 |
501 | // Write all bytes to the file, and return the number of bytes written.
502 | pub fn writeAll(self: *Self, bytes: []const u8) Error!usize {
503 | self.fp.writeAll(bytes) catch {
504 | return Error.FileIOError;
505 | };
506 | self.after += bytes.len;
507 | if (self.after >= 2 * consts.PageSize) {
508 | return Error.FileIOError;
509 | }
510 | return bytes.len;
511 | }
512 | };
513 | var writer = streamWriter.init(tmpDir.file);
514 | _ = try trx.writeToAnyWriter(&writer);
515 | }
516 | }.exec);
517 | std.debug.assert(err == Error.FileIOError);
518 | }
519 |
520 | // Ensure that Copy handles write errors right.
521 | test "Tx_CopyFile_Error_Normal" {
522 | std.testing.log_level = .err;
523 | var testCtx = try tests.setup(std.testing.allocator);
524 | defer tests.teardown(&testCtx);
525 | const kvDB = testCtx.db;
526 | try kvDB.update(struct {
527 | fn exec(trx: *tx.TX) Error!void {
528 | const b = try trx.createBucket("widgets");
529 | try b.put(KeyPair.init("foo", "bar"));
530 | try b.put(KeyPair.init("baz", "bat"));
531 | }
532 | }.exec);
533 |
534 | const err = kvDB.view(struct {
535 | fn exec(trx: *tx.TX) Error!void {
536 | var tmpDir = tests.createTmpFile();
537 | defer tmpDir.deinit();
538 |
539 | const streamWriter = struct {
540 | after: usize = 0,
541 | fp: std.fs.File,
542 | const Self = @This();
543 | fn init(fp: std.fs.File) Self {
544 | return Self{ .after = 0, .fp = fp };
545 | }
546 |
547 | // Write all bytes to the file, and return the number of bytes written.
548 | pub fn writeAll(self: *Self, bytes: []const u8) Error!usize {
549 | self.fp.writeAll(bytes) catch {
550 | return Error.FileIOError;
551 | };
552 | self.after += bytes.len;
553 | std.log.info("has written {d} bytes, page size: {d}", .{ self.after, consts.PageSize });
554 | if (self.after > 2 * consts.PageSize) {
555 | return Error.FileIOError;
556 | }
557 | return bytes.len;
558 | }
559 | };
560 | var writer = streamWriter.init(tmpDir.file);
561 | _ = try trx.writeToAnyWriter(&writer);
562 | }
563 | }.exec);
564 | std.debug.assert(err == Error.FileIOError);
565 | }
566 |
567 | test "ExampleTx_Rollback" {
568 | std.testing.log_level = .err;
569 | var testCtx = try tests.setup(std.testing.allocator);
570 | defer tests.teardown(&testCtx);
571 | const kvDB = testCtx.db;
572 | // Create a bucket.
573 | try kvDB.update(struct {
574 | fn exec(trx: *tx.TX) Error!void {
575 | _ = try trx.createBucket("widgets");
576 | }
577 | }.exec);
578 | // Set a value for a key.
579 | try kvDB.update(struct {
580 | fn exec(trx: *tx.TX) Error!void {
581 | const b = trx.getBucket("widgets").?;
582 | try b.put(KeyPair.init("foo", "bar"));
583 | }
584 | }.exec);
585 | // Update the key but rollback the transaction so it never saves.
586 | {
587 | const trx = try kvDB.begin(true);
588 | const b = trx.getBucket("widgets").?;
589 | try b.put(KeyPair.init("foo", "baz"));
590 | try trx.rollbackAndDestroy();
591 | }
592 | // Ensure that our original value is still set.
593 | try kvDB.view(struct {
594 | fn exec(trx: *tx.TX) Error!void {
595 | const b = trx.getBucket("widgets").?;
596 | const foo = b.get("foo");
597 | assert(foo != null, "expected 'foo' to be set", .{});
598 | assert(std.mem.eql(u8, foo.?, "bar"), "expected 'bar'", .{});
599 | }
600 | }.exec);
601 | }
602 |
603 | test "ExampleTx_CopyFile" {
604 | std.testing.log_level = .err;
605 | var testCtx = try tests.setup(std.testing.allocator);
606 | defer tests.teardown(&testCtx);
607 | const kvDB = testCtx.db;
608 | // Create a bucket and a key.
609 | try kvDB.update(struct {
610 | fn exec(trx: *tx.TX) Error!void {
611 | _ = try trx.createBucket("widgets");
612 | const b = trx.getBucket("widgets").?;
613 | try b.put(KeyPair.init("foo", "bar"));
614 | }
615 | }.exec);
616 | // Copy the database to another file.
617 | var tmpDir = tests.createTmpFile();
618 | try kvDB.updateWithContext(tmpDir.file, struct {
619 | fn exec(ctx: std.fs.File, trx: *tx.TX) Error!void {
620 | var writer = tx.FileWriter.init(ctx);
621 | _ = try trx.writeToAnyWriter(&writer);
622 | }
623 | }.exec);
624 | const filePath = tmpDir.path(testCtx.allocator);
625 | defer tmpDir.deinit();
626 | defer testCtx.allocator.free(filePath);
627 | const db = try @import("db.zig").DB.open(testCtx.allocator, filePath, null, defaultOptions);
628 | defer db.close() catch unreachable;
629 | // Ensure that the key is still set.
630 | try kvDB.view(struct {
631 | fn exec(trx: *tx.TX) Error!void {
632 | const b = trx.getBucket("widgets").?;
633 | const foo = b.get("foo");
634 | assert(foo != null, "expected 'foo' to be set", .{});
635 | assert(std.mem.eql(u8, foo.?, "bar"), "expected 'bar'", .{});
636 | }
637 | }.exec);
638 | }
639 |
--------------------------------------------------------------------------------
/src/util.zig:
--------------------------------------------------------------------------------
1 | const std = @import("std");
2 |
3 | /// Asserts that `ok` is true. If not, it will print the formatted message and panic.
4 | pub inline fn assert(ok: bool, comptime fmt: []const u8, args: anytype) void {
5 | if (ok) {
6 | return;
7 | }
8 | std.debug.print(fmt ++ "\n", args);
9 | std.debug.assert(ok);
10 | }
11 |
12 | /// panic the program with the formatted message
13 | pub inline fn panicFmt(comptime fmt: []const u8, args: anytype) noreturn {
14 | const allocator = std.heap.page_allocator;
15 | const s = std.fmt.allocPrint(allocator, fmt, args) catch unreachable;
16 | std.debug.print("{s}\n", .{s});
17 | defer allocator.free(s);
18 | @panic(s);
19 | }
20 |
21 | /// check the platform is Windows
22 | pub inline fn isWindows() bool {
23 | const tag = @import("builtin").os.tag;
24 | return (tag == .windows);
25 | }
26 |
27 | /// check the platform is Linux
28 | pub inline fn isLinux() bool {
29 | const tag = @import("builtin").os.tag;
30 | return (tag == .linux);
31 | }
32 |
33 | /// check the platform is MacOS
34 | pub inline fn isMacOS() bool {
35 | const tag = @import("builtin").os.tag;
36 | return tag.isDarwin();
37 | }
38 |
39 | /// TODO check platform
40 | pub inline fn maxMapSize() usize {
41 | return 1 << 32;
42 | }
43 |
44 | /// mmap the file to the memory
45 | pub fn mmap(fp: std.fs.File, fileSize: u64, writeable: bool) ![]u8 {
46 | var port: u32 = std.posix.PROT.READ;
47 | if (writeable) {
48 | port |= std.posix.PROT.WRITE;
49 | }
50 | const ptr = try std.posix.mmap(null, fileSize, port, .{ .TYPE = .SHARED }, fp.handle, 0);
51 | return ptr;
52 | }
53 |
54 | pub fn munmap(ptr: []u8) void {
55 | // std.debug.print("the ptr size: {}, {}\n", .{ ptr.len, std.mem.page_size });
56 | const alignData: []align(std.mem.page_size) const u8 = @alignCast(ptr);
57 | if (isLinux() or isMacOS()) {
58 | std.posix.munmap(alignData);
59 | } else {
60 | @panic("not support the os");
61 | }
62 | }
63 |
64 | /// binary search the key in the items, if found, return the index and exact, if not found, return the position of the first element that is greater than the key
65 | pub fn binarySearch(
66 | comptime T: type,
67 | items: []const T,
68 | context: anytype,
69 | comptime compareFn: fn (@TypeOf(context), T) std.math.Order,
70 | ) struct { index: usize, exact: bool } {
71 | if (items.len == 0) {
72 | return .{ .index = 0, .exact = false };
73 | }
74 | var left: usize = 0;
75 | var right: usize = items.len;
76 | while (left < right) {
77 | const mid = left + (right - left) / 2;
78 | const element = items[mid];
79 | const cmp = compareFn(context, element);
80 | switch (cmp) {
81 | .eq => return .{ .index = mid, .exact = true },
82 | .lt => left = mid + 1,
83 | .gt => right = mid,
84 | }
85 | }
86 | return .{ .index = left, .exact = false };
87 | }
88 |
--------------------------------------------------------------------------------