├── .circleci
└── config.yml
├── .gitignore
├── LICENSE
├── README.md
├── collection.go
├── collection_test.go
├── const.go
├── dal.go
├── dal_test.go
├── db.go
├── db_test.go
├── expected
├── TestCreateDalIncorrectMagicNumber
├── TestCreateDalWithExistingFile
├── TestDeserializeCollection
├── TestDeserializeWithChildNodes
├── TestDeserializeWithoutChildNodes
├── TestFreelistDeserialize
├── TestFreelistSerialize
├── TestMetaDeserialize
├── TestMetaSerialize
├── TestSerializeCollection
├── TestSerializeWithChildNodes
└── TestSerializeWithoutChildNodes
├── freelist.go
├── freelist_test.go
├── go.mod
├── go.sum
├── main.go
├── meta.go
├── meta_test.go
├── node.go
├── node_test.go
├── testutils.go
├── tx.go
└── tx_test.go
/.circleci/config.yml:
--------------------------------------------------------------------------------
1 | # Golang CircleCI 2.0 configuration file
2 | version: 2
3 | jobs:
4 | build:
5 | docker:
6 | - image: circleci/golang:1.17
7 | working_directory: /go/src/github.com/amit-davidson/btree
8 | resource_class: small
9 | steps:
10 | - checkout
11 | - run:
12 | name: Run tests
13 | command: |
14 | go test -v ./...
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | *.o
2 | *.a
3 | *.so
4 | .idea
5 | .vscode
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | GNU GENERAL PUBLIC LICENSE
2 | Version 3, 29 June 2007
3 |
4 | Copyright (C) 2007 Free Software Foundation, Inc.
5 | Everyone is permitted to copy and distribute verbatim copies
6 | of this license document, but changing it is not allowed.
7 |
8 | Preamble
9 |
10 | The GNU General Public License is a free, copyleft license for
11 | software and other kinds of works.
12 |
13 | The licenses for most software and other practical works are designed
14 | to take away your freedom to share and change the works. By contrast,
15 | the GNU General Public License is intended to guarantee your freedom to
16 | share and change all versions of a program--to make sure it remains free
17 | software for all its users. We, the Free Software Foundation, use the
18 | GNU General Public License for most of our software; it applies also to
19 | any other work released this way by its authors. You can apply it to
20 | your programs, too.
21 |
22 | When we speak of free software, we are referring to freedom, not
23 | price. Our General Public Licenses are designed to make sure that you
24 | have the freedom to distribute copies of free software (and charge for
25 | them if you wish), that you receive source code or can get it if you
26 | want it, that you can change the software or use pieces of it in new
27 | free programs, and that you know you can do these things.
28 |
29 | To protect your rights, we need to prevent others from denying you
30 | these rights or asking you to surrender the rights. Therefore, you have
31 | certain responsibilities if you distribute copies of the software, or if
32 | you modify it: responsibilities to respect the freedom of others.
33 |
34 | For example, if you distribute copies of such a program, whether
35 | gratis or for a fee, you must pass on to the recipients the same
36 | freedoms that you received. You must make sure that they, too, receive
37 | or can get the source code. And you must show them these terms so they
38 | know their rights.
39 |
40 | Developers that use the GNU GPL protect your rights with two steps:
41 | (1) assert copyright on the software, and (2) offer you this License
42 | giving you legal permission to copy, distribute and/or modify it.
43 |
44 | For the developers' and authors' protection, the GPL clearly explains
45 | that there is no warranty for this free software. For both users' and
46 | authors' sake, the GPL requires that modified versions be marked as
47 | changed, so that their problems will not be attributed erroneously to
48 | authors of previous versions.
49 |
50 | Some devices are designed to deny users access to install or run
51 | modified versions of the software inside them, although the manufacturer
52 | can do so. This is fundamentally incompatible with the aim of
53 | protecting users' freedom to change the software. The systematic
54 | pattern of such abuse occurs in the area of products for individuals to
55 | use, which is precisely where it is most unacceptable. Therefore, we
56 | have designed this version of the GPL to prohibit the practice for those
57 | products. If such problems arise substantially in other domains, we
58 | stand ready to extend this provision to those domains in future versions
59 | of the GPL, as needed to protect the freedom of users.
60 |
61 | Finally, every program is threatened constantly by software patents.
62 | States should not allow patents to restrict development and use of
63 | software on general-purpose computers, but in those that do, we wish to
64 | avoid the special danger that patents applied to a free program could
65 | make it effectively proprietary. To prevent this, the GPL assures that
66 | patents cannot be used to render the program non-free.
67 |
68 | The precise terms and conditions for copying, distribution and
69 | modification follow.
70 |
71 | TERMS AND CONDITIONS
72 |
73 | 0. Definitions.
74 |
75 | "This License" refers to version 3 of the GNU General Public License.
76 |
77 | "Copyright" also means copyright-like laws that apply to other kinds of
78 | works, such as semiconductor masks.
79 |
80 | "The Program" refers to any copyrightable work licensed under this
81 | License. Each licensee is addressed as "you". "Licensees" and
82 | "recipients" may be individuals or organizations.
83 |
84 | To "modify" a work means to copy from or adapt all or part of the work
85 | in a fashion requiring copyright permission, other than the making of an
86 | exact copy. The resulting work is called a "modified version" of the
87 | earlier work or a work "based on" the earlier work.
88 |
89 | A "covered work" means either the unmodified Program or a work based
90 | on the Program.
91 |
92 | To "propagate" a work means to do anything with it that, without
93 | permission, would make you directly or secondarily liable for
94 | infringement under applicable copyright law, except executing it on a
95 | computer or modifying a private copy. Propagation includes copying,
96 | distribution (with or without modification), making available to the
97 | public, and in some countries other activities as well.
98 |
99 | To "convey" a work means any kind of propagation that enables other
100 | parties to make or receive copies. Mere interaction with a user through
101 | a computer network, with no transfer of a copy, is not conveying.
102 |
103 | An interactive user interface displays "Appropriate Legal Notices"
104 | to the extent that it includes a convenient and prominently visible
105 | feature that (1) displays an appropriate copyright notice, and (2)
106 | tells the user that there is no warranty for the work (except to the
107 | extent that warranties are provided), that licensees may convey the
108 | work under this License, and how to view a copy of this License. If
109 | the interface presents a list of user commands or options, such as a
110 | menu, a prominent item in the list meets this criterion.
111 |
112 | 1. Source Code.
113 |
114 | The "source code" for a work means the preferred form of the work
115 | for making modifications to it. "Object code" means any non-source
116 | form of a work.
117 |
118 | A "Standard Interface" means an interface that either is an official
119 | standard defined by a recognized standards body, or, in the case of
120 | interfaces specified for a particular programming language, one that
121 | is widely used among developers working in that language.
122 |
123 | The "System Libraries" of an executable work include anything, other
124 | than the work as a whole, that (a) is included in the normal form of
125 | packaging a Major Component, but which is not part of that Major
126 | Component, and (b) serves only to enable use of the work with that
127 | Major Component, or to implement a Standard Interface for which an
128 | implementation is available to the public in source code form. A
129 | "Major Component", in this context, means a major essential component
130 | (kernel, window system, and so on) of the specific operating system
131 | (if any) on which the executable work runs, or a compiler used to
132 | produce the work, or an object code interpreter used to run it.
133 |
134 | The "Corresponding Source" for a work in object code form means all
135 | the source code needed to generate, install, and (for an executable
136 | work) run the object code and to modify the work, including scripts to
137 | control those activities. However, it does not include the work's
138 | System Libraries, or general-purpose tools or generally available free
139 | programs which are used unmodified in performing those activities but
140 | which are not part of the work. For example, Corresponding Source
141 | includes interface definition files associated with source files for
142 | the work, and the source code for shared libraries and dynamically
143 | linked subprograms that the work is specifically designed to require,
144 | such as by intimate data communication or control flow between those
145 | subprograms and other parts of the work.
146 |
147 | The Corresponding Source need not include anything that users
148 | can regenerate automatically from other parts of the Corresponding
149 | Source.
150 |
151 | The Corresponding Source for a work in source code form is that
152 | same work.
153 |
154 | 2. Basic Permissions.
155 |
156 | All rights granted under this License are granted for the term of
157 | copyright on the Program, and are irrevocable provided the stated
158 | conditions are met. This License explicitly affirms your unlimited
159 | permission to run the unmodified Program. The output from running a
160 | covered work is covered by this License only if the output, given its
161 | content, constitutes a covered work. This License acknowledges your
162 | rights of fair use or other equivalent, as provided by copyright law.
163 |
164 | You may make, run and propagate covered works that you do not
165 | convey, without conditions so long as your license otherwise remains
166 | in force. You may convey covered works to others for the sole purpose
167 | of having them make modifications exclusively for you, or provide you
168 | with facilities for running those works, provided that you comply with
169 | the terms of this License in conveying all material for which you do
170 | not control copyright. Those thus making or running the covered works
171 | for you must do so exclusively on your behalf, under your direction
172 | and control, on terms that prohibit them from making any copies of
173 | your copyrighted material outside their relationship with you.
174 |
175 | Conveying under any other circumstances is permitted solely under
176 | the conditions stated below. Sublicensing is not allowed; section 10
177 | makes it unnecessary.
178 |
179 | 3. Protecting Users' Legal Rights From Anti-Circumvention Law.
180 |
181 | No covered work shall be deemed part of an effective technological
182 | measure under any applicable law fulfilling obligations under article
183 | 11 of the WIPO copyright treaty adopted on 20 December 1996, or
184 | similar laws prohibiting or restricting circumvention of such
185 | measures.
186 |
187 | When you convey a covered work, you waive any legal power to forbid
188 | circumvention of technological measures to the extent such circumvention
189 | is effected by exercising rights under this License with respect to
190 | the covered work, and you disclaim any intention to limit operation or
191 | modification of the work as a means of enforcing, against the work's
192 | users, your or third parties' legal rights to forbid circumvention of
193 | technological measures.
194 |
195 | 4. Conveying Verbatim Copies.
196 |
197 | You may convey verbatim copies of the Program's source code as you
198 | receive it, in any medium, provided that you conspicuously and
199 | appropriately publish on each copy an appropriate copyright notice;
200 | keep intact all notices stating that this License and any
201 | non-permissive terms added in accord with section 7 apply to the code;
202 | keep intact all notices of the absence of any warranty; and give all
203 | recipients a copy of this License along with the Program.
204 |
205 | You may charge any price or no price for each copy that you convey,
206 | and you may offer support or warranty protection for a fee.
207 |
208 | 5. Conveying Modified Source Versions.
209 |
210 | You may convey a work based on the Program, or the modifications to
211 | produce it from the Program, in the form of source code under the
212 | terms of section 4, provided that you also meet all of these conditions:
213 |
214 | a) The work must carry prominent notices stating that you modified
215 | it, and giving a relevant date.
216 |
217 | b) The work must carry prominent notices stating that it is
218 | released under this License and any conditions added under section
219 | 7. This requirement modifies the requirement in section 4 to
220 | "keep intact all notices".
221 |
222 | c) You must license the entire work, as a whole, under this
223 | License to anyone who comes into possession of a copy. This
224 | License will therefore apply, along with any applicable section 7
225 | additional terms, to the whole of the work, and all its parts,
226 | regardless of how they are packaged. This License gives no
227 | permission to license the work in any other way, but it does not
228 | invalidate such permission if you have separately received it.
229 |
230 | d) If the work has interactive user interfaces, each must display
231 | Appropriate Legal Notices; however, if the Program has interactive
232 | interfaces that do not display Appropriate Legal Notices, your
233 | work need not make them do so.
234 |
235 | A compilation of a covered work with other separate and independent
236 | works, which are not by their nature extensions of the covered work,
237 | and which are not combined with it such as to form a larger program,
238 | in or on a volume of a storage or distribution medium, is called an
239 | "aggregate" if the compilation and its resulting copyright are not
240 | used to limit the access or legal rights of the compilation's users
241 | beyond what the individual works permit. Inclusion of a covered work
242 | in an aggregate does not cause this License to apply to the other
243 | parts of the aggregate.
244 |
245 | 6. Conveying Non-Source Forms.
246 |
247 | You may convey a covered work in object code form under the terms
248 | of sections 4 and 5, provided that you also convey the
249 | machine-readable Corresponding Source under the terms of this License,
250 | in one of these ways:
251 |
252 | a) Convey the object code in, or embodied in, a physical product
253 | (including a physical distribution medium), accompanied by the
254 | Corresponding Source fixed on a durable physical medium
255 | customarily used for software interchange.
256 |
257 | b) Convey the object code in, or embodied in, a physical product
258 | (including a physical distribution medium), accompanied by a
259 | written offer, valid for at least three years and valid for as
260 | long as you offer spare parts or customer support for that product
261 | model, to give anyone who possesses the object code either (1) a
262 | copy of the Corresponding Source for all the software in the
263 | product that is covered by this License, on a durable physical
264 | medium customarily used for software interchange, for a price no
265 | more than your reasonable cost of physically performing this
266 | conveying of source, or (2) access to copy the
267 | Corresponding Source from a network server at no charge.
268 |
269 | c) Convey individual copies of the object code with a copy of the
270 | written offer to provide the Corresponding Source. This
271 | alternative is allowed only occasionally and noncommercially, and
272 | only if you received the object code with such an offer, in accord
273 | with subsection 6b.
274 |
275 | d) Convey the object code by offering access from a designated
276 | place (gratis or for a charge), and offer equivalent access to the
277 | Corresponding Source in the same way through the same place at no
278 | further charge. You need not require recipients to copy the
279 | Corresponding Source along with the object code. If the place to
280 | copy the object code is a network server, the Corresponding Source
281 | may be on a different server (operated by you or a third party)
282 | that supports equivalent copying facilities, provided you maintain
283 | clear directions next to the object code saying where to find the
284 | Corresponding Source. Regardless of what server hosts the
285 | Corresponding Source, you remain obligated to ensure that it is
286 | available for as long as needed to satisfy these requirements.
287 |
288 | e) Convey the object code using peer-to-peer transmission, provided
289 | you inform other peers where the object code and Corresponding
290 | Source of the work are being offered to the general public at no
291 | charge under subsection 6d.
292 |
293 | A separable portion of the object code, whose source code is excluded
294 | from the Corresponding Source as a System Library, need not be
295 | included in conveying the object code work.
296 |
297 | A "User Product" is either (1) a "consumer product", which means any
298 | tangible personal property which is normally used for personal, family,
299 | or household purposes, or (2) anything designed or sold for incorporation
300 | into a dwelling. In determining whether a product is a consumer product,
301 | doubtful cases shall be resolved in favor of coverage. For a particular
302 | product received by a particular user, "normally used" refers to a
303 | typical or common use of that class of product, regardless of the status
304 | of the particular user or of the way in which the particular user
305 | actually uses, or expects or is expected to use, the product. A product
306 | is a consumer product regardless of whether the product has substantial
307 | commercial, industrial or non-consumer uses, unless such uses represent
308 | the only significant mode of use of the product.
309 |
310 | "Installation Information" for a User Product means any methods,
311 | procedures, authorization keys, or other information required to install
312 | and execute modified versions of a covered work in that User Product from
313 | a modified version of its Corresponding Source. The information must
314 | suffice to ensure that the continued functioning of the modified object
315 | code is in no case prevented or interfered with solely because
316 | modification has been made.
317 |
318 | If you convey an object code work under this section in, or with, or
319 | specifically for use in, a User Product, and the conveying occurs as
320 | part of a transaction in which the right of possession and use of the
321 | User Product is transferred to the recipient in perpetuity or for a
322 | fixed term (regardless of how the transaction is characterized), the
323 | Corresponding Source conveyed under this section must be accompanied
324 | by the Installation Information. But this requirement does not apply
325 | if neither you nor any third party retains the ability to install
326 | modified object code on the User Product (for example, the work has
327 | been installed in ROM).
328 |
329 | The requirement to provide Installation Information does not include a
330 | requirement to continue to provide support service, warranty, or updates
331 | for a work that has been modified or installed by the recipient, or for
332 | the User Product in which it has been modified or installed. Access to a
333 | network may be denied when the modification itself materially and
334 | adversely affects the operation of the network or violates the rules and
335 | protocols for communication across the network.
336 |
337 | Corresponding Source conveyed, and Installation Information provided,
338 | in accord with this section must be in a format that is publicly
339 | documented (and with an implementation available to the public in
340 | source code form), and must require no special password or key for
341 | unpacking, reading or copying.
342 |
343 | 7. Additional Terms.
344 |
345 | "Additional permissions" are terms that supplement the terms of this
346 | License by making exceptions from one or more of its conditions.
347 | Additional permissions that are applicable to the entire Program shall
348 | be treated as though they were included in this License, to the extent
349 | that they are valid under applicable law. If additional permissions
350 | apply only to part of the Program, that part may be used separately
351 | under those permissions, but the entire Program remains governed by
352 | this License without regard to the additional permissions.
353 |
354 | When you convey a copy of a covered work, you may at your option
355 | remove any additional permissions from that copy, or from any part of
356 | it. (Additional permissions may be written to require their own
357 | removal in certain cases when you modify the work.) You may place
358 | additional permissions on material, added by you to a covered work,
359 | for which you have or can give appropriate copyright permission.
360 |
361 | Notwithstanding any other provision of this License, for material you
362 | add to a covered work, you may (if authorized by the copyright holders of
363 | that material) supplement the terms of this License with terms:
364 |
365 | a) Disclaiming warranty or limiting liability differently from the
366 | terms of sections 15 and 16 of this License; or
367 |
368 | b) Requiring preservation of specified reasonable legal notices or
369 | author attributions in that material or in the Appropriate Legal
370 | Notices displayed by works containing it; or
371 |
372 | c) Prohibiting misrepresentation of the origin of that material, or
373 | requiring that modified versions of such material be marked in
374 | reasonable ways as different from the original version; or
375 |
376 | d) Limiting the use for publicity purposes of names of licensors or
377 | authors of the material; or
378 |
379 | e) Declining to grant rights under trademark law for use of some
380 | trade names, trademarks, or service marks; or
381 |
382 | f) Requiring indemnification of licensors and authors of that
383 | material by anyone who conveys the material (or modified versions of
384 | it) with contractual assumptions of liability to the recipient, for
385 | any liability that these contractual assumptions directly impose on
386 | those licensors and authors.
387 |
388 | All other non-permissive additional terms are considered "further
389 | restrictions" within the meaning of section 10. If the Program as you
390 | received it, or any part of it, contains a notice stating that it is
391 | governed by this License along with a term that is a further
392 | restriction, you may remove that term. If a license document contains
393 | a further restriction but permits relicensing or conveying under this
394 | License, you may add to a covered work material governed by the terms
395 | of that license document, provided that the further restriction does
396 | not survive such relicensing or conveying.
397 |
398 | If you add terms to a covered work in accord with this section, you
399 | must place, in the relevant source files, a statement of the
400 | additional terms that apply to those files, or a notice indicating
401 | where to find the applicable terms.
402 |
403 | Additional terms, permissive or non-permissive, may be stated in the
404 | form of a separately written license, or stated as exceptions;
405 | the above requirements apply either way.
406 |
407 | 8. Termination.
408 |
409 | You may not propagate or modify a covered work except as expressly
410 | provided under this License. Any attempt otherwise to propagate or
411 | modify it is void, and will automatically terminate your rights under
412 | this License (including any patent licenses granted under the third
413 | paragraph of section 11).
414 |
415 | However, if you cease all violation of this License, then your
416 | license from a particular copyright holder is reinstated (a)
417 | provisionally, unless and until the copyright holder explicitly and
418 | finally terminates your license, and (b) permanently, if the copyright
419 | holder fails to notify you of the violation by some reasonable means
420 | prior to 60 days after the cessation.
421 |
422 | Moreover, your license from a particular copyright holder is
423 | reinstated permanently if the copyright holder notifies you of the
424 | violation by some reasonable means, this is the first time you have
425 | received notice of violation of this License (for any work) from that
426 | copyright holder, and you cure the violation prior to 30 days after
427 | your receipt of the notice.
428 |
429 | Termination of your rights under this section does not terminate the
430 | licenses of parties who have received copies or rights from you under
431 | this License. If your rights have been terminated and not permanently
432 | reinstated, you do not qualify to receive new licenses for the same
433 | material under section 10.
434 |
435 | 9. Acceptance Not Required for Having Copies.
436 |
437 | You are not required to accept this License in order to receive or
438 | run a copy of the Program. Ancillary propagation of a covered work
439 | occurring solely as a consequence of using peer-to-peer transmission
440 | to receive a copy likewise does not require acceptance. However,
441 | nothing other than this License grants you permission to propagate or
442 | modify any covered work. These actions infringe copyright if you do
443 | not accept this License. Therefore, by modifying or propagating a
444 | covered work, you indicate your acceptance of this License to do so.
445 |
446 | 10. Automatic Licensing of Downstream Recipients.
447 |
448 | Each time you convey a covered work, the recipient automatically
449 | receives a license from the original licensors, to run, modify and
450 | propagate that work, subject to this License. You are not responsible
451 | for enforcing compliance by third parties with this License.
452 |
453 | An "entity transaction" is a transaction transferring control of an
454 | organization, or substantially all assets of one, or subdividing an
455 | organization, or merging organizations. If propagation of a covered
456 | work results from an entity transaction, each party to that
457 | transaction who receives a copy of the work also receives whatever
458 | licenses to the work the party's predecessor in interest had or could
459 | give under the previous paragraph, plus a right to possession of the
460 | Corresponding Source of the work from the predecessor in interest, if
461 | the predecessor has it or can get it with reasonable efforts.
462 |
463 | You may not impose any further restrictions on the exercise of the
464 | rights granted or affirmed under this License. For example, you may
465 | not impose a license fee, royalty, or other charge for exercise of
466 | rights granted under this License, and you may not initiate litigation
467 | (including a cross-claim or counterclaim in a lawsuit) alleging that
468 | any patent claim is infringed by making, using, selling, offering for
469 | sale, or importing the Program or any portion of it.
470 |
471 | 11. Patents.
472 |
473 | A "contributor" is a copyright holder who authorizes use under this
474 | License of the Program or a work on which the Program is based. The
475 | work thus licensed is called the contributor's "contributor version".
476 |
477 | A contributor's "essential patent claims" are all patent claims
478 | owned or controlled by the contributor, whether already acquired or
479 | hereafter acquired, that would be infringed by some manner, permitted
480 | by this License, of making, using, or selling its contributor version,
481 | but do not include claims that would be infringed only as a
482 | consequence of further modification of the contributor version. For
483 | purposes of this definition, "control" includes the right to grant
484 | patent sublicenses in a manner consistent with the requirements of
485 | this License.
486 |
487 | Each contributor grants you a non-exclusive, worldwide, royalty-free
488 | patent license under the contributor's essential patent claims, to
489 | make, use, sell, offer for sale, import and otherwise run, modify and
490 | propagate the contents of its contributor version.
491 |
492 | In the following three paragraphs, a "patent license" is any express
493 | agreement or commitment, however denominated, not to enforce a patent
494 | (such as an express permission to practice a patent or covenant not to
495 | sue for patent infringement). To "grant" such a patent license to a
496 | party means to make such an agreement or commitment not to enforce a
497 | patent against the party.
498 |
499 | If you convey a covered work, knowingly relying on a patent license,
500 | and the Corresponding Source of the work is not available for anyone
501 | to copy, free of charge and under the terms of this License, through a
502 | publicly available network server or other readily accessible means,
503 | then you must either (1) cause the Corresponding Source to be so
504 | available, or (2) arrange to deprive yourself of the benefit of the
505 | patent license for this particular work, or (3) arrange, in a manner
506 | consistent with the requirements of this License, to extend the patent
507 | license to downstream recipients. "Knowingly relying" means you have
508 | actual knowledge that, but for the patent license, your conveying the
509 | covered work in a country, or your recipient's use of the covered work
510 | in a country, would infringe one or more identifiable patents in that
511 | country that you have reason to believe are valid.
512 |
513 | If, pursuant to or in connection with a single transaction or
514 | arrangement, you convey, or propagate by procuring conveyance of, a
515 | covered work, and grant a patent license to some of the parties
516 | receiving the covered work authorizing them to use, propagate, modify
517 | or convey a specific copy of the covered work, then the patent license
518 | you grant is automatically extended to all recipients of the covered
519 | work and works based on it.
520 |
521 | A patent license is "discriminatory" if it does not include within
522 | the scope of its coverage, prohibits the exercise of, or is
523 | conditioned on the non-exercise of one or more of the rights that are
524 | specifically granted under this License. You may not convey a covered
525 | work if you are a party to an arrangement with a third party that is
526 | in the business of distributing software, under which you make payment
527 | to the third party based on the extent of your activity of conveying
528 | the work, and under which the third party grants, to any of the
529 | parties who would receive the covered work from you, a discriminatory
530 | patent license (a) in connection with copies of the covered work
531 | conveyed by you (or copies made from those copies), or (b) primarily
532 | for and in connection with specific products or compilations that
533 | contain the covered work, unless you entered into that arrangement,
534 | or that patent license was granted, prior to 28 March 2007.
535 |
536 | Nothing in this License shall be construed as excluding or limiting
537 | any implied license or other defenses to infringement that may
538 | otherwise be available to you under applicable patent law.
539 |
540 | 12. No Surrender of Others' Freedom.
541 |
542 | If conditions are imposed on you (whether by court order, agreement or
543 | otherwise) that contradict the conditions of this License, they do not
544 | excuse you from the conditions of this License. If you cannot convey a
545 | covered work so as to satisfy simultaneously your obligations under this
546 | License and any other pertinent obligations, then as a consequence you may
547 | not convey it at all. For example, if you agree to terms that obligate you
548 | to collect a royalty for further conveying from those to whom you convey
549 | the Program, the only way you could satisfy both those terms and this
550 | License would be to refrain entirely from conveying the Program.
551 |
552 | 13. Use with the GNU Affero General Public License.
553 |
554 | Notwithstanding any other provision of this License, you have
555 | permission to link or combine any covered work with a work licensed
556 | under version 3 of the GNU Affero General Public License into a single
557 | combined work, and to convey the resulting work. The terms of this
558 | License will continue to apply to the part which is the covered work,
559 | but the special requirements of the GNU Affero General Public License,
560 | section 13, concerning interaction through a network will apply to the
561 | combination as such.
562 |
563 | 14. Revised Versions of this License.
564 |
565 | The Free Software Foundation may publish revised and/or new versions of
566 | the GNU General Public License from time to time. Such new versions will
567 | be similar in spirit to the present version, but may differ in detail to
568 | address new problems or concerns.
569 |
570 | Each version is given a distinguishing version number. If the
571 | Program specifies that a certain numbered version of the GNU General
572 | Public License "or any later version" applies to it, you have the
573 | option of following the terms and conditions either of that numbered
574 | version or of any later version published by the Free Software
575 | Foundation. If the Program does not specify a version number of the
576 | GNU General Public License, you may choose any version ever published
577 | by the Free Software Foundation.
578 |
579 | If the Program specifies that a proxy can decide which future
580 | versions of the GNU General Public License can be used, that proxy's
581 | public statement of acceptance of a version permanently authorizes you
582 | to choose that version for the Program.
583 |
584 | Later license versions may give you additional or different
585 | permissions. However, no additional obligations are imposed on any
586 | author or copyright holder as a result of your choosing to follow a
587 | later version.
588 |
589 | 15. Disclaimer of Warranty.
590 |
591 | THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
592 | APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
593 | HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
594 | OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
595 | THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
596 | PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
597 | IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
598 | ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
599 |
600 | 16. Limitation of Liability.
601 |
602 | IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
603 | WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
604 | THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
605 | GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
606 | USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
607 | DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
608 | PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
609 | EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
610 | SUCH DAMAGES.
611 |
612 | 17. Interpretation of Sections 15 and 16.
613 |
614 | If the disclaimer of warranty and limitation of liability provided
615 | above cannot be given local legal effect according to their terms,
616 | reviewing courts shall apply local law that most closely approximates
617 | an absolute waiver of all civil liability in connection with the
618 | Program, unless a warranty or assumption of liability accompanies a
619 | copy of the Program in return for a fee.
620 |
621 | END OF TERMS AND CONDITIONS
622 |
623 | How to Apply These Terms to Your New Programs
624 |
625 | If you develop a new program, and you want it to be of the greatest
626 | possible use to the public, the best way to achieve this is to make it
627 | free software which everyone can redistribute and change under these terms.
628 |
629 | To do so, attach the following notices to the program. It is safest
630 | to attach them to the start of each source file to most effectively
631 | state the exclusion of warranty; and each file should have at least
632 | the "copyright" line and a pointer to where the full notice is found.
633 |
634 |
635 | Copyright (C)
636 |
637 | This program is free software: you can redistribute it and/or modify
638 | it under the terms of the GNU General Public License as published by
639 | the Free Software Foundation, either version 3 of the License, or
640 | (at your option) any later version.
641 |
642 | This program is distributed in the hope that it will be useful,
643 | but WITHOUT ANY WARRANTY; without even the implied warranty of
644 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
645 | GNU General Public License for more details.
646 |
647 | You should have received a copy of the GNU General Public License
648 | along with this program. If not, see .
649 |
650 | Also add information on how to contact you by electronic and paper mail.
651 |
652 | If the program does terminal interaction, make it output a short
653 | notice like this when it starts in an interactive mode:
654 |
655 | Copyright (C)
656 | This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
657 | This is free software, and you are welcome to redistribute it
658 | under certain conditions; type `show c' for details.
659 |
660 | The hypothetical commands `show w' and `show c' should show the appropriate
661 | parts of the General Public License. Of course, your program's commands
662 | might be different; for a GUI interface, you would use an "about box".
663 |
664 | You should also get your employer (if you work as a programmer) or school,
665 | if any, to sign a "copyright disclaimer" for the program, if necessary.
666 | For more information on this, and how to apply and follow the GNU GPL, see
667 | .
668 |
669 | The GNU General Public License does not permit incorporating your program
670 | into proprietary programs. If your program is a subroutine library, you
671 | may consider it more useful to permit linking proprietary applications with
672 | the library. If this is what you want to do, use the GNU Lesser General
673 | Public License instead of this License. But first, please read
674 | .
675 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # LibraDB
2 |
3 | [](http://golang.org)
4 | [](http://golang.org)
5 | [](https://lbesson.mit-license.org/)
6 | [](http://makeapullrequest.com)
7 | [](https://app.circleci.com/pipelines/github/amit-davidson/LibraDB)
8 |
9 | LibraDB is a simple, persistent key/value store written in pure Go. The project aims to provide a working yet simple
10 | example of a working database. If you're interested in databases, I encourage you to start here.
11 |
12 | This database accompanies my [blog post](https://betterprogramming.pub/build-a-nosql-database-from-the-scratch-in-1000-lines-of-code-8ed1c15ed924) on how to write a database from scratch.
13 |
14 | ## Installing
15 |
16 | To start using LibraDB, install Go and run `go get`:
17 |
18 | ```sh
19 | go get -u github.com/amit-davidson/LibraDB
20 | ```
21 |
22 | ## Basic usage
23 | ```go
24 | package main
25 |
26 | import "github.com/amit-davidson/LibraDB"
27 |
28 | func main() {
29 | path := "libra.db"
30 | db, _ := LibraDB.Open(path, LibraDB.DefaultOptions)
31 |
32 | tx := db.WriteTx()
33 | name := []byte("test")
34 | collection, _ := tx.CreateCollection(name)
35 |
36 | key, value := []byte("key1"), []byte("value1")
37 | _ = collection.Put(key, value)
38 |
39 | _ = tx.Commit()
40 | }
41 | ```
42 | ## Transactions
43 | Read-only and read-write transactions are supported. LibraDB allows multiple read transactions or one read-write
44 | transaction at the same time. Transactions are goroutine-safe.
45 |
46 | LibraDB has an isolation level: [Serializable](https://en.wikipedia.org/wiki/Isolation_(database_systems)#Serializable).
47 | In simpler words, transactions are executed one after another and not at the same time.This is the highest isolation level.
48 |
49 | ### Read-write transactions
50 |
51 | ```go
52 | tx := db.WriteTx()
53 | ...
54 | if err := tx.Commit(); err != nil {
55 | return err
56 | }
57 | ```
58 | ### Read-only transactions
59 | ```go
60 | tx := db.ReadTx()
61 | ...
62 | if err := tx.Commit(); err != nil {
63 | return err
64 | }
65 | ```
66 |
67 | ## Collections
68 | Collections are a grouping of key-value pairs. Collections are used to organize and quickly access data as each
69 | collection is B-Tree by itself. All keys in a collection must be unique.
70 | ```go
71 | tx := db.WriteTx()
72 | collection, err := tx.CreateCollection([]byte("test"))
73 | if err != nil {
74 | return err
75 | }
76 | _ = tx.Commit()
77 | ```
78 |
79 | ### Auto generating ID
80 | The `Collection.ID()` function returns an integer to be used as a unique identifier for key/value pairs.
81 | ```go
82 | tx := db.WriteTx()
83 | collection, err := tx.GetCollection([]byte("test"))
84 | if err != nil {
85 | return err
86 | }
87 | id := collection.ID()
88 | _ = tx.Commit()
89 | ```
90 | ## Key-Value Pairs
91 | Key/value pairs reside inside collections. CRUD operations are possible using the methods `Collection.Put`
92 | `Collection.Find` `Collection.Remove` as shown below.
93 | ```go
94 | tx := db.WriteTx()
95 | collection, err := tx.GetCollection([]byte("test"))
96 | if err != nil {
97 | return err
98 | }
99 |
100 | key, value := []byte("key1"), []byte("value1")
101 | if err := collection.Put(key, value); err != nil {
102 | return err
103 | }
104 | if item, err := collection.Find(key); err != nil {
105 | return err
106 | }
107 |
108 | if err := collection.Remove(key); err != nil {
109 | return err
110 | }
111 | _ = tx.Commit()
112 | ```
113 |
--------------------------------------------------------------------------------
/collection.go:
--------------------------------------------------------------------------------
1 | package LibraDB
2 |
3 | import (
4 | "bytes"
5 | "encoding/binary"
6 | )
7 |
8 | type Collection struct {
9 | name []byte
10 | root pgnum
11 | counter uint64
12 |
13 | // associated transaction
14 | tx *tx
15 |
16 | }
17 |
18 | func newCollection(name []byte, root pgnum) *Collection {
19 | return &Collection{
20 | name: name,
21 | root: root,
22 | }
23 | }
24 |
25 | func newEmptyCollection() *Collection {
26 | return &Collection{}
27 | }
28 |
29 | func (c *Collection) ID() uint64 {
30 | if !c.tx.write {
31 | return 0
32 | }
33 |
34 | id := c.counter
35 | c.counter += 1
36 | return id
37 | }
38 |
39 | func (c *Collection) serialize() *Item {
40 | b := make([]byte, collectionSize)
41 | leftPos := 0
42 | binary.LittleEndian.PutUint64(b[leftPos:], uint64(c.root))
43 | leftPos += pageNumSize
44 | binary.LittleEndian.PutUint64(b[leftPos:], c.counter)
45 | leftPos += counterSize
46 | return newItem(c.name, b)
47 | }
48 |
49 | func (c *Collection) deserialize(item *Item) {
50 | c.name = item.key
51 |
52 | if len(item.value) != 0 {
53 | leftPos := 0
54 | c.root = pgnum(binary.LittleEndian.Uint64(item.value[leftPos:]))
55 | leftPos += pageNumSize
56 |
57 | c.counter = binary.LittleEndian.Uint64(item.value[leftPos:])
58 | leftPos += counterSize
59 | }
60 | }
61 |
62 | // Put adds a key to the tree. It finds the correct node and the insertion index and adds the item. When performing the
63 | // search, the ancestors are returned as well. This way we can iterate over them to check which nodes were modified and
64 | // rebalance by splitting them accordingly. If the root has too many items, then a new root of a new layer is
65 | // created and the created nodes from the split are added as children.
66 | func (c *Collection) Put(key []byte, value []byte) error {
67 | if !c.tx.write {
68 | return writeInsideReadTxErr
69 | }
70 |
71 | i := newItem(key, value)
72 |
73 | // On first insertion the root node does not exist, so it should be created
74 | var root *Node
75 | var err error
76 | if c.root == 0 {
77 | root = c.tx.writeNode(c.tx.newNode([]*Item{i}, []pgnum{}))
78 | c.root = root.pageNum
79 | return nil
80 | } else {
81 | root, err = c.tx.getNode(c.root)
82 | if err != nil {
83 | return err
84 | }
85 | }
86 |
87 | // Find the path to the node where the insertion should happen
88 | insertionIndex, nodeToInsertIn, ancestorsIndexes, err := root.findKey(i.key, false)
89 | if err != nil {
90 | return err
91 | }
92 |
93 | // If key already exists
94 | if nodeToInsertIn.items != nil && insertionIndex < len(nodeToInsertIn.items) && bytes.Compare(nodeToInsertIn.items[insertionIndex].key, key) == 0 {
95 | nodeToInsertIn.items[insertionIndex] = i
96 | } else {
97 | // Add item to the leaf node
98 | nodeToInsertIn.addItem(i, insertionIndex)
99 | }
100 | nodeToInsertIn.writeNode(nodeToInsertIn)
101 |
102 | ancestors, err := c.getNodes(ancestorsIndexes)
103 | if err != nil {
104 | return err
105 | }
106 |
107 | // Rebalance the nodes all the way up. Start From one node before the last and go all the way up. Exclude root.
108 | for i := len(ancestors) - 2; i >= 0; i-- {
109 | pnode := ancestors[i]
110 | node := ancestors[i+1]
111 | nodeIndex := ancestorsIndexes[i+1]
112 | if node.isOverPopulated() {
113 | pnode.split(node, nodeIndex)
114 | }
115 | }
116 |
117 | // Handle root
118 | rootNode := ancestors[0]
119 | if rootNode.isOverPopulated() {
120 | newRoot := c.tx.newNode([]*Item{}, []pgnum{rootNode.pageNum})
121 | newRoot.split(rootNode, 0)
122 |
123 | // commit newly created root
124 | newRoot = c.tx.writeNode(newRoot)
125 |
126 | c.root = newRoot.pageNum
127 | }
128 |
129 | return nil
130 | }
131 |
132 | // Find Returns an item according based on the given key by performing a binary search.
133 | func (c *Collection) Find(key []byte) (*Item, error) {
134 | n, err := c.tx.getNode(c.root)
135 | if err != nil {
136 | return nil, err
137 | }
138 |
139 | index, containingNode, _, err := n.findKey(key, true)
140 | if err != nil {
141 | return nil, err
142 | }
143 | if index == -1 {
144 | return nil, nil
145 | }
146 | return containingNode.items[index], nil
147 | }
148 |
149 | // Remove removes a key from the tree. It finds the correct node and the index to remove the item from and removes it.
150 | // When performing the search, the ancestors are returned as well. This way we can iterate over them to check which
151 | // nodes were modified and rebalance by rotating or merging the unbalanced nodes. Rotation is done first. If the
152 | // siblings don't have enough items, then merging occurs. If the root is without items after a split, then the root is
153 | // removed and the tree is one level shorter.
154 | func (c *Collection) Remove(key []byte) error {
155 | if !c.tx.write {
156 | return writeInsideReadTxErr
157 | }
158 |
159 | // Find the path to the node where the deletion should happen
160 | rootNode, err := c.tx.getNode(c.root)
161 | if err != nil {
162 | return err
163 | }
164 |
165 | removeItemIndex, nodeToRemoveFrom, ancestorsIndexes, err := rootNode.findKey(key, true)
166 | if err != nil {
167 | return err
168 | }
169 |
170 | if removeItemIndex == -1 {
171 | return nil
172 | }
173 |
174 | if nodeToRemoveFrom.isLeaf() {
175 | nodeToRemoveFrom.removeItemFromLeaf(removeItemIndex)
176 | } else {
177 | affectedNodes, err := nodeToRemoveFrom.removeItemFromInternal(removeItemIndex)
178 | if err != nil {
179 | return err
180 | }
181 | ancestorsIndexes = append(ancestorsIndexes, affectedNodes...)
182 | }
183 |
184 | ancestors, err := c.getNodes(ancestorsIndexes)
185 | if err != nil {
186 | return err
187 | }
188 |
189 | // Rebalance the nodes all the way up. Start From one node before the last and go all the way up. Exclude root.
190 | for i := len(ancestors) - 2; i >= 0; i-- {
191 | pnode := ancestors[i]
192 | node := ancestors[i+1]
193 | if node.isUnderPopulated() {
194 | err = pnode.rebalanceRemove(node, ancestorsIndexes[i+1])
195 | if err != nil {
196 | return err
197 | }
198 | }
199 | }
200 |
201 | rootNode = ancestors[0]
202 | // If the root has no items after rebalancing, there's no need to save it because we ignore it.
203 | if len(rootNode.items) == 0 && len(rootNode.childNodes) > 0 {
204 | c.root = ancestors[1].pageNum
205 | }
206 |
207 | return nil
208 | }
209 |
210 | // getNodes returns a list of nodes based on their indexes (the breadcrumbs) from the root
211 | // p
212 | // / \
213 | // a b
214 | // / \ / \
215 | // c d e f
216 | // For [0,1,0] -> p,b,e
217 | func (c *Collection) getNodes(indexes []int) ([]*Node, error) {
218 | root, err := c.tx.getNode(c.root)
219 | if err != nil {
220 | return nil, err
221 | }
222 |
223 | nodes := []*Node{root}
224 | child := root
225 | for i := 1; i < len(indexes); i++ {
226 | child, _ = c.tx.getNode(child.childNodes[indexes[i]])
227 | nodes = append(nodes, child)
228 | }
229 | return nodes, nil
230 | }
231 |
--------------------------------------------------------------------------------
/collection_test.go:
--------------------------------------------------------------------------------
1 | package LibraDB
2 |
3 | import (
4 | "github.com/stretchr/testify/assert"
5 | "github.com/stretchr/testify/require"
6 | "os"
7 | "testing"
8 | )
9 |
10 | func Test_GetAndCreateCollection(t *testing.T) {
11 | db, cleanFunc := createTestDB(t)
12 | defer cleanFunc()
13 |
14 | tx := db.WriteTx()
15 |
16 | collectionName := testCollectionName
17 | createdCollection, err := tx.CreateCollection(collectionName)
18 | require.NoError(t, err)
19 |
20 | err = tx.Commit()
21 | require.NoError(t, err)
22 |
23 | tx = db.ReadTx()
24 | actual, err := tx.GetCollection(collectionName)
25 | require.NoError(t, err)
26 |
27 | err = tx.Commit()
28 | require.NoError(t, err)
29 |
30 | expected := newEmptyCollection()
31 | expected.root = createdCollection.root
32 | expected.counter = 0
33 | expected.name = collectionName
34 |
35 | areCollectionsEqual(t, expected, actual)
36 | }
37 |
38 | func Test_GetCollectionDoesntExist(t *testing.T) {
39 | db, cleanFunc := createTestDB(t)
40 | defer cleanFunc()
41 |
42 | tx := db.ReadTx()
43 | collection, err := tx.GetCollection([]byte("name1"))
44 | require.NoError(t, err)
45 |
46 | assert.Nil(t, collection)
47 | }
48 |
49 | func Test_CreateCollectionPutItem(t *testing.T) {
50 | db, cleanFunc := createTestDB(t)
51 | defer cleanFunc()
52 |
53 | tx := db.WriteTx()
54 |
55 | collectionName := testCollectionName
56 | createdCollection, err := tx.CreateCollection(collectionName)
57 | require.NoError(t, err)
58 |
59 | newKey := []byte("0")
60 | newVal := []byte("1")
61 | err = createdCollection.Put(newKey, newVal)
62 | require.NoError(t, err)
63 |
64 | item, err := createdCollection.Find(newKey)
65 | require.NoError(t, err)
66 |
67 | assert.Equal(t, newKey, item.key)
68 | assert.Equal(t, newVal, item.value)
69 | }
70 |
71 | func Test_DeleteCollection(t *testing.T) {
72 | db, cleanFunc := createTestDB(t)
73 | defer cleanFunc()
74 |
75 | tx := db.WriteTx()
76 |
77 | collectionName := testCollectionName
78 | createdCollection, err := tx.CreateCollection(collectionName)
79 | require.NoError(t, err)
80 |
81 | err = tx.Commit()
82 | require.NoError(t, err)
83 |
84 | tx = db.WriteTx()
85 | actual, err := tx.GetCollection(collectionName)
86 | require.NoError(t, err)
87 |
88 | areCollectionsEqual(t, createdCollection, actual)
89 |
90 | err = tx.DeleteCollection(createdCollection.name)
91 | require.NoError(t, err)
92 |
93 | actualAfterRemoval, err := tx.GetCollection(collectionName)
94 | require.NoError(t, err)
95 | assert.Nil(t, actualAfterRemoval)
96 |
97 | err = tx.Commit()
98 | require.NoError(t, err)
99 | }
100 |
101 | func Test_DeleteItem(t *testing.T) {
102 | db, cleanFunc := createTestDB(t)
103 | defer cleanFunc()
104 |
105 | tx := db.WriteTx()
106 |
107 | collectionName := testCollectionName
108 | createdCollection, err := tx.CreateCollection(collectionName)
109 | require.NoError(t, err)
110 |
111 | newKey := []byte("0")
112 | newVal := []byte("1")
113 | err = createdCollection.Put(newKey, newVal)
114 | require.NoError(t, err)
115 |
116 | item, err := createdCollection.Find(newKey)
117 | require.NoError(t, err)
118 |
119 | assert.Equal(t, newKey, item.key)
120 | assert.Equal(t, newVal, item.value)
121 |
122 | err = createdCollection.Remove(item.key)
123 | require.NoError(t, err)
124 |
125 | item, err = createdCollection.Find(newKey)
126 | require.NoError(t, err)
127 |
128 | assert.Nil(t, item)
129 | }
130 | func TestSerializeCollection(t *testing.T) {
131 | expectedCollectionValue, err := os.ReadFile(getExpectedResultFileName(t.Name()))
132 | require.NoError(t, err)
133 |
134 | expected := &Item{
135 | key: []byte("collection1"),
136 | value: expectedCollectionValue,
137 | }
138 |
139 | collection := &Collection{
140 | name: []byte("collection1"),
141 | root: 1,
142 | counter: 1,
143 | }
144 |
145 | actual := collection.serialize()
146 | assert.Equal(t, expected, actual)
147 | }
148 |
149 | func TestDeserializeCollection(t *testing.T) {
150 | expectedCollectionValue, err := os.ReadFile(getExpectedResultFileName(t.Name()))
151 |
152 | expected := &Collection{
153 | name: []byte("collection1"),
154 | root: 1,
155 | counter: 1,
156 | }
157 |
158 | collection := &Item{
159 | key: []byte("collection1"),
160 | value: expectedCollectionValue,
161 | }
162 | actual := newEmptyCollection()
163 | actual.deserialize(collection)
164 |
165 | require.NoError(t, err)
166 | assert.Equal(t, expected, actual)
167 | }
168 |
--------------------------------------------------------------------------------
/const.go:
--------------------------------------------------------------------------------
1 | package LibraDB
2 |
3 | import "errors"
4 |
5 | const (
6 | magicNumberSize = 4
7 | counterSize = 4
8 | nodeHeaderSize = 3
9 |
10 | collectionSize = 16
11 | pageNumSize = 8
12 | )
13 |
14 | var writeInsideReadTxErr = errors.New("can't perform a write operation inside a read transaction")
--------------------------------------------------------------------------------
/dal.go:
--------------------------------------------------------------------------------
1 | package LibraDB
2 |
3 | import (
4 | "errors"
5 | "fmt"
6 | "os"
7 | )
8 |
9 | type pgnum uint64
10 |
11 | type Options struct {
12 | pageSize int
13 |
14 | MinFillPercent float32
15 | MaxFillPercent float32
16 | }
17 |
18 | var DefaultOptions = &Options{
19 | MinFillPercent: 0.5,
20 | MaxFillPercent: 0.95,
21 | }
22 |
23 | type page struct {
24 | num pgnum
25 | data []byte
26 | }
27 |
28 | type dal struct {
29 | pageSize int
30 | minFillPercent float32
31 | maxFillPercent float32
32 | file *os.File
33 |
34 | *meta
35 | *freelist
36 | }
37 |
38 | func newDal(path string, options *Options) (*dal, error) {
39 | dal := &dal{
40 | meta: newEmptyMeta(),
41 | pageSize: options.pageSize,
42 | minFillPercent: options.MinFillPercent,
43 | maxFillPercent: options.MaxFillPercent,
44 | }
45 |
46 | // exist
47 | if _, err := os.Stat(path); err == nil {
48 | dal.file, err = os.OpenFile(path, os.O_RDWR|os.O_CREATE, 0666)
49 | if err != nil {
50 | _ = dal.close()
51 | return nil, err
52 | }
53 |
54 | meta, err := dal.readMeta()
55 | if err != nil {
56 | return nil, err
57 | }
58 | dal.meta = meta
59 |
60 | freelist, err := dal.readFreelist()
61 | if err != nil {
62 | return nil, err
63 | }
64 | dal.freelist = freelist
65 | // doesn't exist
66 | } else if errors.Is(err, os.ErrNotExist) {
67 | // init freelist
68 | dal.file, err = os.OpenFile(path, os.O_RDWR|os.O_CREATE, 0666)
69 | if err != nil {
70 | _ = dal.close()
71 | return nil, err
72 | }
73 |
74 | dal.freelist = newFreelist()
75 | dal.freelistPage = dal.getNextPage()
76 | _, err := dal.writeFreelist()
77 | if err != nil {
78 | return nil, err
79 | }
80 |
81 | // init root
82 | collectionsNode, err := dal.writeNode(NewNodeForSerialization([]*Item{}, []pgnum{}))
83 | if err != nil {
84 | return nil, err
85 | }
86 | dal.root = collectionsNode.pageNum
87 |
88 | // write meta page
89 | _, err = dal.writeMeta(dal.meta) // other error
90 | } else {
91 | return nil, err
92 | }
93 | return dal, nil
94 | }
95 |
96 | // getSplitIndex should be called when performing rebalance after an item is removed. It checks if a node can spare an
97 | // element, and if it does then it returns the index when there the split should happen. Otherwise -1 is returned.
98 | func (d *dal) getSplitIndex(node *Node) int {
99 | size := 0
100 | size += nodeHeaderSize
101 |
102 | for i := range node.items {
103 | size += node.elementSize(i)
104 |
105 | // if we have a big enough page size (more than minimum), and didn't reach the last node, which means we can
106 | // spare an element
107 | if float32(size) > d.minThreshold() && i < len(node.items)-1 {
108 | return i + 1
109 | }
110 | }
111 |
112 | return -1
113 | }
114 |
115 | func (d *dal) maxThreshold() float32 {
116 | return d.maxFillPercent * float32(d.pageSize)
117 | }
118 |
119 | func (d *dal) isOverPopulated(node *Node) bool {
120 | return float32(node.nodeSize()) > d.maxThreshold()
121 | }
122 |
123 | func (d *dal) minThreshold() float32 {
124 | return d.minFillPercent * float32(d.pageSize)
125 | }
126 |
127 | func (d *dal) isUnderPopulated(node *Node) bool {
128 | return float32(node.nodeSize()) < d.minThreshold()
129 | }
130 |
131 | func (d *dal) close() error {
132 | if d.file != nil {
133 | err := d.file.Close()
134 | if err != nil {
135 | return fmt.Errorf("could not close file: %s", err)
136 | }
137 | d.file = nil
138 | }
139 |
140 | return nil
141 | }
142 |
143 | func (d *dal) allocateEmptyPage() *page {
144 | return &page{
145 | data: make([]byte, d.pageSize, d.pageSize),
146 | }
147 | }
148 |
149 | func (d *dal) readPage(pageNum pgnum) (*page, error) {
150 | p := d.allocateEmptyPage()
151 |
152 | offset := int(pageNum) * d.pageSize
153 | _, err := d.file.ReadAt(p.data, int64(offset))
154 | if err != nil {
155 | return nil, err
156 | }
157 | return p, err
158 | }
159 |
160 | func (d *dal) writePage(p *page) error {
161 | offset := int64(p.num) * int64(d.pageSize)
162 | _, err := d.file.WriteAt(p.data, offset)
163 | return err
164 | }
165 |
166 | func (d *dal) getNode(pageNum pgnum) (*Node, error) {
167 | p, err := d.readPage(pageNum)
168 | if err != nil {
169 | return nil, err
170 | }
171 | node := NewEmptyNode()
172 | node.deserialize(p.data)
173 | node.pageNum = pageNum
174 | return node, nil
175 | }
176 |
177 | func (d *dal) writeNode(n *Node) (*Node, error) {
178 | p := d.allocateEmptyPage()
179 | if n.pageNum == 0 {
180 | p.num = d.getNextPage()
181 | n.pageNum = p.num
182 | } else {
183 | p.num = n.pageNum
184 | }
185 |
186 | p.data = n.serialize(p.data)
187 |
188 | err := d.writePage(p)
189 | if err != nil {
190 | return nil, err
191 | }
192 | return n, nil
193 | }
194 |
195 | func (d *dal) deleteNode(pageNum pgnum) {
196 | d.releasePage(pageNum)
197 | }
198 |
199 | func (d *dal) readFreelist() (*freelist, error) {
200 | p, err := d.readPage(d.freelistPage)
201 | if err != nil {
202 | return nil, err
203 | }
204 |
205 | freelist := newFreelist()
206 | freelist.deserialize(p.data)
207 | return freelist, nil
208 | }
209 |
210 | func (d *dal) writeFreelist() (*page, error) {
211 | p := d.allocateEmptyPage()
212 | p.num = d.freelistPage
213 | d.freelist.serialize(p.data)
214 |
215 | err := d.writePage(p)
216 | if err != nil {
217 | return nil, err
218 | }
219 | d.freelistPage = p.num
220 | return p, nil
221 | }
222 |
223 | func (d *dal) writeMeta(meta *meta) (*page, error) {
224 | p := d.allocateEmptyPage()
225 | p.num = metaPageNum
226 | meta.serialize(p.data)
227 |
228 | err := d.writePage(p)
229 | if err != nil {
230 | return nil, err
231 | }
232 | return p, nil
233 | }
234 |
235 | func (d *dal) readMeta() (*meta, error) {
236 | p, err := d.readPage(metaPageNum)
237 | if err != nil {
238 | return nil, err
239 | }
240 |
241 | meta := newEmptyMeta()
242 | meta.deserialize(p.data)
243 | return meta, nil
244 | }
245 |
--------------------------------------------------------------------------------
/dal_test.go:
--------------------------------------------------------------------------------
1 | package LibraDB
2 |
3 | import (
4 | "github.com/stretchr/testify/assert"
5 | "github.com/stretchr/testify/require"
6 | "os"
7 | "testing"
8 | )
9 |
10 | func createTestDAL(t *testing.T) (*dal, func()) {
11 | fileName := getTempFileName()
12 | dal, err := newDal(fileName, &Options{
13 | pageSize: testPageSize,
14 | })
15 | require.NoError(t, err)
16 |
17 | cleanFunc := func() {
18 | err = dal.close()
19 | require.NoError(t, err)
20 | err = os.Remove(fileName)
21 | require.NoError(t, err)
22 | }
23 | return dal, cleanFunc
24 | }
25 |
26 | func TestCreateAndGetNode(t *testing.T) {
27 | dal, cleanFunc := createTestDAL(t)
28 | defer cleanFunc()
29 |
30 | items := []*Item{newItem([]byte("key1"), []byte("val1")), newItem([]byte("key2"), []byte("val2"))}
31 | var childNodes []pgnum
32 |
33 | expectedNode, err := dal.writeNode(NewNodeForSerialization(items, childNodes))
34 | require.NoError(t, err)
35 |
36 | actualNode, err := dal.getNode(expectedNode.pageNum)
37 | require.NoError(t, err)
38 |
39 | assert.Equal(t, expectedNode, actualNode)
40 | }
41 |
42 | func TestDeleteNode(t *testing.T) {
43 | dal, cleanFunc := createTestDAL(t)
44 | defer cleanFunc()
45 |
46 | var items []*Item
47 | var childNodes []pgnum
48 |
49 | node, err := dal.writeNode(NewNodeForSerialization(items, childNodes))
50 | require.NoError(t, err)
51 | assert.Equal(t, node.pageNum, dal.maxPage)
52 |
53 | dal.deleteNode(node.pageNum)
54 |
55 | assert.Equal(t, dal.releasedPages, []pgnum{node.pageNum})
56 | assert.Equal(t, node.pageNum, dal.maxPage)
57 | }
58 |
59 | func TestDeleteNodeAndReusePage(t *testing.T) {
60 | dal, cleanFunc := createTestDAL(t)
61 | defer cleanFunc()
62 |
63 | var items []*Item
64 | var childNodes []pgnum
65 |
66 | node, err := dal.writeNode(NewNodeForSerialization(items, childNodes))
67 | require.NoError(t, err)
68 | assert.Equal(t, node.pageNum, dal.maxPage)
69 |
70 | dal.deleteNode(node.pageNum)
71 |
72 | assert.Equal(t, dal.releasedPages, []pgnum{node.pageNum})
73 | assert.Equal(t, node.pageNum, dal.maxPage)
74 |
75 | newNode, err := dal.writeNode(NewNodeForSerialization(items, childNodes))
76 | require.NoError(t, err)
77 | assert.Equal(t, dal.releasedPages, []pgnum{})
78 | assert.Equal(t, newNode.pageNum, dal.maxPage)
79 | }
80 |
81 | func TestCreateDalWithNewFile(t *testing.T) {
82 | dal, cleanFunc := createTestDAL(t)
83 | defer cleanFunc()
84 |
85 | metaPage, err := dal.readMeta()
86 | require.NoError(t, err)
87 |
88 | freelistPageNum := pgnum(1)
89 | rootPageNum := pgnum(2)
90 | assert.Equal(t, freelistPageNum, metaPage.freelistPage)
91 | assert.Equal(t, rootPageNum, metaPage.root)
92 |
93 | assert.Equal(t, freelistPageNum, dal.freelistPage)
94 | assert.Equal(t, rootPageNum, dal.root)
95 | }
96 |
97 | func TestCreateDalWithExistingFile(t *testing.T) {
98 | // Make sure file exists
99 | _, err := os.Stat(getExpectedResultFileName(t.Name()))
100 | require.NoError(t, err)
101 |
102 | dal, cleanFunc := createTestDAL(t)
103 | defer cleanFunc()
104 |
105 | metaPage, err := dal.readMeta()
106 | require.NoError(t, err)
107 |
108 | freelistPageNum := pgnum(1)
109 | rootPageNum := pgnum(2)
110 | assert.Equal(t, freelistPageNum, metaPage.freelistPage)
111 | assert.Equal(t, rootPageNum, metaPage.root)
112 |
113 | assert.Equal(t, freelistPageNum, dal.freelistPage)
114 | assert.Equal(t, rootPageNum, dal.root)
115 | }
116 |
--------------------------------------------------------------------------------
/db.go:
--------------------------------------------------------------------------------
1 | package LibraDB
2 |
3 | import (
4 | "os"
5 | "sync"
6 | )
7 |
8 | type DB struct {
9 | rwlock sync.RWMutex // Allows only one writer at a time
10 | *dal
11 | }
12 |
13 | func Open(path string, options *Options) (*DB, error) {
14 | options.pageSize = os.Getpagesize()
15 | dal, err := newDal(path, options)
16 | if err != nil {
17 | return nil, err
18 | }
19 |
20 | db := &DB{
21 | sync.RWMutex{},
22 | dal,
23 | }
24 |
25 | return db, nil
26 | }
27 |
28 | func (db *DB) Close() error {
29 | return db.close()
30 | }
31 |
32 | func (db *DB) ReadTx() *tx {
33 | db.rwlock.RLock()
34 | return newTx(db, false)
35 | }
36 |
37 | func (db *DB) WriteTx() *tx {
38 | db.rwlock.Lock()
39 | return newTx(db, true)
40 | }
--------------------------------------------------------------------------------
/db_test.go:
--------------------------------------------------------------------------------
1 | package LibraDB
2 |
3 | import (
4 | "github.com/stretchr/testify/assert"
5 | "github.com/stretchr/testify/require"
6 | "testing"
7 | )
8 |
9 | func TestDB_CreateCollectionPutItem(t *testing.T) {
10 | db, err := Open(getTempFileName(), &Options{MinFillPercent: 0.5, MaxFillPercent: 1.0})
11 | require.NoError(t, err)
12 |
13 | tx := db.WriteTx()
14 | collectionName := testCollectionName
15 | createdCollection, err := tx.CreateCollection(collectionName)
16 | require.NoError(t, err)
17 |
18 | newKey := []byte("0")
19 | newVal := []byte("1")
20 | err = createdCollection.Put(newKey, newVal)
21 | require.NoError(t, err)
22 |
23 | item, err := createdCollection.Find(newKey)
24 | require.NoError(t, err)
25 |
26 | assert.Equal(t, newKey, item.key)
27 | assert.Equal(t, newVal, item.value)
28 |
29 | err = tx.Commit()
30 | require.NoError(t, err)
31 | }
32 |
33 | func TestDB_WritersDontBlockReaders(t *testing.T) {
34 | t.Skip()
35 | db, err := Open(getTempFileName(), &Options{MinFillPercent: 0.5, MaxFillPercent: 1.0})
36 | require.NoError(t, err)
37 |
38 | tx := db.WriteTx()
39 | collectionName := testCollectionName
40 | createdCollection, err := tx.CreateCollection(collectionName)
41 | require.NoError(t, err)
42 |
43 | newKey := []byte("0")
44 | newVal := []byte("1")
45 | err = createdCollection.Put(newKey, newVal)
46 | require.NoError(t, err)
47 |
48 | item, err := createdCollection.Find(newKey)
49 | require.NoError(t, err)
50 |
51 | assert.Equal(t, newKey, item.key)
52 | assert.Equal(t, newVal, item.value)
53 |
54 | err = tx.Commit()
55 | require.NoError(t, err)
56 |
57 | // Now open a write tx and try to read while that tx is open
58 | holdingTx := db.WriteTx()
59 |
60 | readTx := db.ReadTx()
61 |
62 | collection, err := readTx.GetCollection(createdCollection.name)
63 | areCollectionsEqual(t, createdCollection, collection)
64 |
65 | err = readTx.Commit()
66 | require.NoError(t, err)
67 |
68 | err = holdingTx.Commit()
69 | require.NoError(t, err)
70 | }
71 |
72 | func TestDB_ReadersDontSeeUncommittedChanges(t *testing.T) {
73 | t.Skip()
74 | db, err := Open(getTempFileName(), &Options{MinFillPercent: 0.5, MaxFillPercent: 1.0})
75 | require.NoError(t, err)
76 |
77 | tx := db.WriteTx()
78 | collectionName := testCollectionName
79 | createdCollection, err := tx.CreateCollection(collectionName)
80 | require.NoError(t, err)
81 |
82 | err = tx.Commit()
83 | require.NoError(t, err)
84 |
85 | tx2 := db.WriteTx()
86 | createdCollection, err = tx2.GetCollection(createdCollection.name)
87 | require.NoError(t, err)
88 |
89 | newKey := createItem("0")
90 | newVal := createItem("1")
91 | err = createdCollection.Put(newKey, newVal)
92 | require.NoError(t, err)
93 |
94 | readTx := db.ReadTx()
95 |
96 | collection, err := readTx.GetCollection(createdCollection.name)
97 | areCollectionsEqual(t, createdCollection, collection)
98 |
99 | item, err := collection.Find(newKey)
100 | require.NoError(t, err)
101 | assert.Nil(t, item)
102 |
103 | err = readTx.Commit()
104 | require.NoError(t, err)
105 |
106 | err = tx2.Commit()
107 | require.NoError(t, err)
108 | }
109 |
110 | func TestDB_DeleteItem(t *testing.T) {
111 | db, err := Open(getTempFileName(), &Options{MinFillPercent: testMinPercentage, MaxFillPercent: testMaxPercentage})
112 | require.NoError(t, err)
113 |
114 | tx := db.WriteTx()
115 |
116 | collectionName := testCollectionName
117 | createdCollection, err := tx.CreateCollection(collectionName)
118 | require.NoError(t, err)
119 |
120 | newKey := []byte("0")
121 | newVal := []byte("1")
122 | err = createdCollection.Put(newKey, newVal)
123 | require.NoError(t, err)
124 |
125 | item, err := createdCollection.Find(newKey)
126 | require.NoError(t, err)
127 |
128 | assert.Equal(t, newKey, item.key)
129 | assert.Equal(t, newVal, item.value)
130 |
131 | err = createdCollection.Remove(item.key)
132 | require.NoError(t, err)
133 |
134 | item, err = createdCollection.Find(newKey)
135 | require.NoError(t, err)
136 |
137 | assert.Nil(t, item)
138 |
139 | err = tx.Commit()
140 | require.NoError(t, err)
141 | }
142 |
--------------------------------------------------------------------------------
/expected/TestCreateDalIncorrectMagicNumber:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/expected/TestCreateDalWithExistingFile:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/amit-davidson/LibraDB/4a154c8cb27ec9968cc9f150a39edf51cacb1b19/expected/TestCreateDalWithExistingFile
--------------------------------------------------------------------------------
/expected/TestDeserializeCollection:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/expected/TestDeserializeWithChildNodes:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/amit-davidson/LibraDB/4a154c8cb27ec9968cc9f150a39edf51cacb1b19/expected/TestDeserializeWithChildNodes
--------------------------------------------------------------------------------
/expected/TestDeserializeWithoutChildNodes:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/amit-davidson/LibraDB/4a154c8cb27ec9968cc9f150a39edf51cacb1b19/expected/TestDeserializeWithoutChildNodes
--------------------------------------------------------------------------------
/expected/TestFreelistDeserialize:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/expected/TestFreelistSerialize:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/expected/TestMetaDeserialize:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/amit-davidson/LibraDB/4a154c8cb27ec9968cc9f150a39edf51cacb1b19/expected/TestMetaDeserialize
--------------------------------------------------------------------------------
/expected/TestMetaSerialize:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/amit-davidson/LibraDB/4a154c8cb27ec9968cc9f150a39edf51cacb1b19/expected/TestMetaSerialize
--------------------------------------------------------------------------------
/expected/TestSerializeCollection:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/expected/TestSerializeWithChildNodes:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/amit-davidson/LibraDB/4a154c8cb27ec9968cc9f150a39edf51cacb1b19/expected/TestSerializeWithChildNodes
--------------------------------------------------------------------------------
/expected/TestSerializeWithoutChildNodes:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/amit-davidson/LibraDB/4a154c8cb27ec9968cc9f150a39edf51cacb1b19/expected/TestSerializeWithoutChildNodes
--------------------------------------------------------------------------------
/freelist.go:
--------------------------------------------------------------------------------
1 | package LibraDB
2 |
3 | import "encoding/binary"
4 |
5 | // metaPage is the maximum pgnum that is used by the db for its own purposes. For now, only page 0 is used as the
6 | // header page. It means all other page numbers can be used.
7 | const metaPage = 0
8 |
9 | // freelist manages the manages free and used pages.
10 | type freelist struct {
11 | // maxPage holds the latest page num allocated. releasedPages holds all the ids that were released during
12 | // delete. New page ids are first given from the releasedPageIDs to avoid growing the file. If it's empty, then
13 | // maxPage is incremented and a new page is created thus increasing the file size.
14 | maxPage pgnum
15 | releasedPages []pgnum
16 | }
17 |
18 | func newFreelist() *freelist {
19 | return &freelist{
20 | maxPage: metaPage,
21 | releasedPages: []pgnum{},
22 | }
23 | }
24 |
25 | // getNextPage returns page ids for writing New page ids are first given from the releasedPageIDs to avoid growing
26 | // the file. If it's empty, then maxPage is incremented and a new page is created thus increasing the file size.
27 | func (fr *freelist) getNextPage() pgnum {
28 | if len(fr.releasedPages) != 0 {
29 | // Take the last element and remove it from the list
30 | pageID := fr.releasedPages[len(fr.releasedPages)-1]
31 | fr.releasedPages = fr.releasedPages[:len(fr.releasedPages)-1]
32 | return pageID
33 | }
34 | fr.maxPage += 1
35 | return fr.maxPage
36 | }
37 |
38 | func (fr *freelist) releasePage(page pgnum) {
39 | fr.releasedPages = append(fr.releasedPages, page)
40 | }
41 |
42 | func (fr *freelist) serialize(buf []byte) []byte {
43 | pos := 0
44 |
45 | binary.LittleEndian.PutUint16(buf[pos:], uint16(fr.maxPage))
46 | pos += 2
47 |
48 | // released pages count
49 | binary.LittleEndian.PutUint16(buf[pos:], uint16(len(fr.releasedPages)))
50 | pos += 2
51 |
52 | for _, page := range fr.releasedPages {
53 | binary.LittleEndian.PutUint64(buf[pos:], uint64(page))
54 | pos += pageNumSize
55 |
56 | }
57 | return buf
58 | }
59 |
60 | func (fr *freelist) deserialize(buf []byte) {
61 | pos := 0
62 | fr.maxPage = pgnum(binary.LittleEndian.Uint16(buf[pos:]))
63 | pos += 2
64 |
65 | // released pages count
66 | releasedPagesCount := int(binary.LittleEndian.Uint16(buf[pos:]))
67 | pos += 2
68 |
69 | for i := 0; i < releasedPagesCount; i++ {
70 | fr.releasedPages = append(fr.releasedPages, pgnum(binary.LittleEndian.Uint64(buf[pos:])))
71 | pos += pageNumSize
72 | }
73 | }
74 |
--------------------------------------------------------------------------------
/freelist_test.go:
--------------------------------------------------------------------------------
1 | package LibraDB
2 |
3 | import (
4 | "github.com/stretchr/testify/assert"
5 | "github.com/stretchr/testify/require"
6 | "os"
7 | "testing"
8 | )
9 |
10 | func TestFreelistSerialize(t *testing.T) {
11 | freelist := newFreelist()
12 | freelist.maxPage = 5
13 | freelist.releasedPages = []pgnum{1, 2, 3}
14 | actual := freelist.serialize(make([]byte, testPageSize, testPageSize))
15 |
16 | expected, err := os.ReadFile(getExpectedResultFileName(t.Name()))
17 | require.NoError(t, err)
18 |
19 | assert.Equal(t, expected, actual)
20 | }
21 |
22 | func TestFreelistDeserialize(t *testing.T) {
23 | freelist, err := os.ReadFile(getExpectedResultFileName(t.Name()))
24 | actual := newFreelist()
25 | actual.deserialize(freelist)
26 | require.NoError(t, err)
27 |
28 | expected := newFreelist()
29 | expected.maxPage = 5
30 | expected.releasedPages = []pgnum{1, 2, 3}
31 |
32 | assert.Equal(t, expected, actual)
33 | }
34 |
--------------------------------------------------------------------------------
/go.mod:
--------------------------------------------------------------------------------
1 | module github.com/amit-davidson/LibraDB
2 |
3 | go 1.17
4 |
5 | require (
6 | github.com/google/uuid v1.3.0
7 | github.com/stretchr/testify v1.7.0
8 | )
9 |
10 | require (
11 | github.com/davecgh/go-spew v1.1.0 // indirect
12 | github.com/pmezard/go-difflib v1.0.0 // indirect
13 | gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c // indirect
14 | )
15 |
--------------------------------------------------------------------------------
/go.sum:
--------------------------------------------------------------------------------
1 | github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8=
2 | github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
3 | github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
4 | github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
5 | github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I=
6 | github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
7 | github.com/jcelliott/lumber v0.0.0-20160324203708-dd349441af25 h1:EFT6MH3igZK/dIVqgGbTqWVvkZ7wJ5iGN03SVtvvdd8=
8 | github.com/jcelliott/lumber v0.0.0-20160324203708-dd349441af25/go.mod h1:sWkGw/wsaHtRsT9zGQ/WyJCotGWG/Anow/9hsAcBWRw=
9 | github.com/nanobox-io/golang-scribble v0.0.0-20190309225732-aa3e7c118975 h1:zm/Rb2OsnLWCY88Njoqgo4X6yt/lx3oBNWhepX0AOMU=
10 | github.com/nanobox-io/golang-scribble v0.0.0-20190309225732-aa3e7c118975/go.mod h1:4Mct/lWCFf1jzQTTAaWtOI7sXqmG+wBeiBfT4CxoaJk=
11 | github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
12 | github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
13 | github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
14 | github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY=
15 | github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
16 | gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM=
17 | gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
18 | gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo=
19 | gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
20 | gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b h1:h8qDotaEPuJATrMmW04NCwg7v22aHH28wwpauUhK9Oo=
21 | gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
22 |
--------------------------------------------------------------------------------
/main.go:
--------------------------------------------------------------------------------
1 | package LibraDB
2 |
3 | func LibraDB() {
4 | path := "libra.db"
5 | db, _ := Open(path, DefaultOptions)
6 |
7 | tx := db.WriteTx()
8 | name := []byte("test")
9 | collection, _ := tx.CreateCollection(name)
10 |
11 | key, value := []byte("key1"), []byte("value1")
12 | _ = collection.Put(key, value)
13 |
14 | _ = tx.Commit()
15 | }
--------------------------------------------------------------------------------
/meta.go:
--------------------------------------------------------------------------------
1 | package LibraDB
2 |
3 | import "encoding/binary"
4 |
5 | const (
6 | magicNumber uint32 = 0xD00DB00D
7 | metaPageNum = 0
8 | )
9 |
10 | // meta is the meta page of the db
11 | type meta struct {
12 | // The database has a root collection that holds all the collections in the database. It is called root and the
13 | // root property of meta holds page number containing the root of collections collection. The keys are the
14 | // collections names and the values are the page number of the root of each collection. Then, once the collection
15 | // and the root page are located, a search inside a collection can be made.
16 | root pgnum
17 | freelistPage pgnum
18 | }
19 |
20 | func newEmptyMeta() *meta {
21 | return &meta{}
22 | }
23 |
24 | func (m *meta) serialize(buf []byte) {
25 | pos := 0
26 | binary.LittleEndian.PutUint32(buf[pos:], magicNumber)
27 | pos += magicNumberSize
28 |
29 | binary.LittleEndian.PutUint64(buf[pos:], uint64(m.root))
30 | pos += pageNumSize
31 |
32 | binary.LittleEndian.PutUint64(buf[pos:], uint64(m.freelistPage))
33 | pos += pageNumSize
34 | }
35 |
36 | func (m *meta) deserialize(buf []byte) {
37 | pos := 0
38 | magicNumberRes := binary.LittleEndian.Uint32(buf[pos:])
39 | pos += magicNumberSize
40 |
41 | if magicNumberRes != magicNumber {
42 | panic("The file is not a libra db file")
43 | }
44 |
45 | m.root = pgnum(binary.LittleEndian.Uint64(buf[pos:]))
46 | pos += pageNumSize
47 |
48 | m.freelistPage = pgnum(binary.LittleEndian.Uint64(buf[pos:]))
49 | pos += pageNumSize
50 | }
51 |
--------------------------------------------------------------------------------
/meta_test.go:
--------------------------------------------------------------------------------
1 | package LibraDB
2 |
3 | import (
4 | "github.com/stretchr/testify/assert"
5 | "github.com/stretchr/testify/require"
6 | "os"
7 | "testing"
8 | )
9 |
10 | func TestMetaSerialize(t *testing.T) {
11 | meta := newEmptyMeta()
12 | meta.root = 3
13 | meta.freelistPage = 4
14 | actual := make([]byte, testPageSize, testPageSize)
15 | meta.serialize(actual)
16 |
17 | expected, err := os.ReadFile(getExpectedResultFileName(t.Name()))
18 | require.NoError(t, err)
19 |
20 | assert.Equal(t, expected, actual)
21 | }
22 |
23 | func TestCreateDalIncorrectMagicNumber(t *testing.T) {
24 | actualMetaBytes, err := os.ReadFile(getExpectedResultFileName(t.Name()))
25 | require.NoError(t, err)
26 | actualMeta := newEmptyMeta()
27 | assert.Panics(t, func() {
28 | actualMeta.deserialize(actualMetaBytes)
29 | })
30 | }
31 |
32 | func TestMetaDeserialize(t *testing.T) {
33 | actualMetaBytes, err := os.ReadFile(getExpectedResultFileName(t.Name()))
34 | actualMeta := newEmptyMeta()
35 | actualMeta.deserialize(actualMetaBytes)
36 | require.NoError(t, err)
37 |
38 | expectedMeta := newEmptyMeta()
39 | expectedMeta.root = 3
40 | expectedMeta.freelistPage = 4
41 |
42 | assert.Equal(t, expectedMeta, actualMeta)
43 | }
--------------------------------------------------------------------------------
/node.go:
--------------------------------------------------------------------------------
1 | package LibraDB
2 |
3 | import (
4 | "bytes"
5 | "encoding/binary"
6 | )
7 |
8 | type Item struct {
9 | key []byte
10 | value []byte
11 | }
12 |
13 | type Node struct {
14 | // associated transaction
15 | tx *tx
16 |
17 | pageNum pgnum
18 | items []*Item
19 | childNodes []pgnum
20 | }
21 |
22 | func NewEmptyNode() *Node {
23 | return &Node{}
24 | }
25 |
26 | // NewNodeForSerialization creates a new node only with the properties that are relevant when saving to the disk
27 | func NewNodeForSerialization(items []*Item, childNodes []pgnum) *Node {
28 | return &Node{
29 | items: items,
30 | childNodes: childNodes,
31 | }
32 | }
33 |
34 | func newItem(key []byte, value []byte) *Item {
35 | return &Item{
36 | key: key,
37 | value: value,
38 | }
39 | }
40 |
41 | func isLast(index int, parentNode *Node) bool {
42 | return index == len(parentNode.items)
43 | }
44 |
45 | func isFirst(index int) bool {
46 | return index == 0
47 | }
48 |
49 | func (n *Node) isLeaf() bool {
50 | return len(n.childNodes) == 0
51 | }
52 |
53 | func (n *Node) writeNode(node *Node) *Node {
54 | return n.tx.writeNode(node)
55 | }
56 |
57 | func (n *Node) writeNodes(nodes ...*Node) {
58 | for _, node := range nodes {
59 | n.writeNode(node)
60 | }
61 | }
62 |
63 | func (n *Node) getNode(pageNum pgnum) (*Node, error) {
64 | return n.tx.getNode(pageNum)
65 | }
66 |
67 | // isOverPopulated checks if the node size is bigger than the size of a page.
68 | func (n *Node) isOverPopulated() bool {
69 | return n.tx.db.isOverPopulated(n)
70 | }
71 |
72 | // canSpareAnElement checks if the node size is big enough to populate a page after giving away one item.
73 | func (n *Node) canSpareAnElement() bool {
74 | splitIndex := n.tx.db.getSplitIndex(n)
75 | if splitIndex == -1 {
76 | return false
77 | }
78 | return true
79 | }
80 |
81 | // isUnderPopulated checks if the node size is smaller than the size of a page.
82 | func (n *Node) isUnderPopulated() bool {
83 | return n.tx.db.isUnderPopulated(n)
84 | }
85 |
86 | func (n *Node) serialize(buf []byte) []byte {
87 | leftPos := 0
88 | rightPos := len(buf) - 1
89 |
90 | // Add page header: isLeaf, key-value pairs count, node num
91 | // isLeaf
92 | isLeaf := n.isLeaf()
93 | var bitSetVar uint64
94 | if isLeaf {
95 | bitSetVar = 1
96 | }
97 | buf[leftPos] = byte(bitSetVar)
98 | leftPos += 1
99 |
100 | // key-value pairs count
101 | binary.LittleEndian.PutUint16(buf[leftPos:], uint16(len(n.items)))
102 | leftPos += 2
103 |
104 | // We use slotted pages for storing data in the page. It means the actual keys and values (the cells) are appended
105 | // to right of the page whereas offsets have a fixed size and are appended from the left.
106 | // It's easier to preserve the logical order (alphabetical in the case of b-tree) using the metadata and performing
107 | // pointer arithmetic. Using the data itself is harder as it varies by size.
108 |
109 | // Page structure is:
110 | // ----------------------------------------------------------------------------------
111 | // | Page | key-value / child node key-value | key-value |
112 | // | Header | offset / pointer offset .... | data ..... |
113 | // ----------------------------------------------------------------------------------
114 |
115 | for i := 0; i < len(n.items); i++ {
116 | item := n.items[i]
117 | if !isLeaf {
118 | childNode := n.childNodes[i]
119 |
120 | // Write the child page as a fixed size of 8 bytes
121 | binary.LittleEndian.PutUint64(buf[leftPos:], uint64(childNode))
122 | leftPos += pageNumSize
123 | }
124 |
125 | klen := len(item.key)
126 | vlen := len(item.value)
127 |
128 | // write offset
129 | offset := rightPos - klen - vlen - 2
130 | binary.LittleEndian.PutUint16(buf[leftPos:], uint16(offset))
131 | leftPos += 2
132 |
133 | rightPos -= vlen
134 | copy(buf[rightPos:], item.value)
135 |
136 | rightPos -= 1
137 | buf[rightPos] = byte(vlen)
138 |
139 | rightPos -= klen
140 | copy(buf[rightPos:], item.key)
141 |
142 | rightPos -= 1
143 | buf[rightPos] = byte(klen)
144 | }
145 |
146 | if !isLeaf {
147 | // Write the last child node
148 | lastChildNode := n.childNodes[len(n.childNodes)-1]
149 | // Write the child page as a fixed size of 8 bytes
150 | binary.LittleEndian.PutUint64(buf[leftPos:], uint64(lastChildNode))
151 | }
152 |
153 | return buf
154 | }
155 |
156 | func (n *Node) deserialize(buf []byte) {
157 | leftPos := 0
158 |
159 | // Read header
160 | isLeaf := uint16(buf[0])
161 |
162 | itemsCount := int(binary.LittleEndian.Uint16(buf[1:3]))
163 | leftPos += 3
164 |
165 | // Read body
166 | for i := 0; i < itemsCount; i++ {
167 | if isLeaf == 0 { // False
168 | pageNum := binary.LittleEndian.Uint64(buf[leftPos:])
169 | leftPos += pageNumSize
170 |
171 | n.childNodes = append(n.childNodes, pgnum(pageNum))
172 | }
173 |
174 | // Read offset
175 | offset := binary.LittleEndian.Uint16(buf[leftPos:])
176 | leftPos += 2
177 |
178 | klen := uint16(buf[int(offset)])
179 | offset += 1
180 |
181 | key := buf[offset : offset+klen]
182 | offset += klen
183 |
184 | vlen := uint16(buf[int(offset)])
185 | offset += 1
186 |
187 | value := buf[offset : offset+vlen]
188 | offset += vlen
189 | n.items = append(n.items, newItem(key, value))
190 | }
191 |
192 | if isLeaf == 0 { // False
193 | // Read the last child node
194 | pageNum := pgnum(binary.LittleEndian.Uint64(buf[leftPos:]))
195 | n.childNodes = append(n.childNodes, pageNum)
196 | }
197 | }
198 |
199 | // elementSize returns the size of a key-value-childNode triplet at a given index. If the node is a leaf, then the size
200 | // of a key-value pair is returned. It's assumed i <= len(n.items)
201 | func (n *Node) elementSize(i int) int {
202 | size := 0
203 | size += len(n.items[i].key)
204 | size += len(n.items[i].value)
205 | size += pageNumSize // 8 is the pgnum size
206 | return size
207 | }
208 |
209 | // nodeSize returns the node's size in bytes
210 | func (n *Node) nodeSize() int {
211 | size := 0
212 | size += nodeHeaderSize
213 |
214 | for i := range n.items {
215 | size += n.elementSize(i)
216 | }
217 |
218 | // Add last page
219 | size += pageNumSize // 8 is the pgnum size
220 | return size
221 | }
222 |
223 | // findKey searches for a key inside the tree. Once the key is found, the parent node and the correct index are returned
224 | // so the key itself can be accessed in the following way parent[index]. A list of the node ancestors (not including the
225 | // node itself) is also returned.
226 | // If the key isn't found, we have 2 options. If exact is true, it means we expect findKey
227 | // to find the key, so a falsey answer. If exact is false, then findKey is used to locate where a new key should be
228 | // inserted so the position is returned.
229 | func (n *Node) findKey(key []byte, exact bool) (int, *Node, []int ,error) {
230 | ancestorsIndexes := []int{0} // index of root
231 | index, node, err := findKeyHelper(n, key, exact, &ancestorsIndexes)
232 | if err != nil {
233 | return -1, nil, nil, err
234 | }
235 | return index, node, ancestorsIndexes, nil
236 | }
237 |
238 | func findKeyHelper(node *Node, key []byte, exact bool, ancestorsIndexes *[]int) (int, *Node ,error) {
239 | wasFound, index := node.findKeyInNode(key)
240 | if wasFound {
241 | return index, node, nil
242 | }
243 |
244 | if node.isLeaf() {
245 | if exact {
246 | return -1, nil, nil
247 | }
248 | return index, node, nil
249 | }
250 |
251 | *ancestorsIndexes = append(*ancestorsIndexes, index)
252 | nextChild, err := node.getNode(node.childNodes[index])
253 | if err != nil {
254 | return -1, nil, err
255 | }
256 | return findKeyHelper(nextChild, key, exact, ancestorsIndexes)
257 | }
258 |
259 | // findKeyInNode iterates all the items and finds the key. If the key is found, then the item is returned. If the key
260 | // isn't found then return the index where it should have been (the first index that key is greater than it's previous)
261 | func (n *Node) findKeyInNode(key []byte) (bool, int) {
262 | for i, existingItem := range n.items {
263 | res := bytes.Compare(existingItem.key, key)
264 | if res == 0 { // Keys match
265 | return true, i
266 | }
267 |
268 | // The key is bigger than the previous item, so it doesn't exist in the node, but may exist in child nodes.
269 | if res == 1 {
270 | return false, i
271 | }
272 | }
273 |
274 | // The key isn't bigger than any of the items which means it's in the last index.
275 | return false, len(n.items)
276 | }
277 |
278 | func (n *Node) addItem(item *Item, insertionIndex int) int {
279 | if len(n.items) == insertionIndex { // nil or empty slice or after last element
280 | n.items = append(n.items, item)
281 | return insertionIndex
282 | }
283 |
284 | n.items = append(n.items[:insertionIndex+1], n.items[insertionIndex:]...)
285 | n.items[insertionIndex] = item
286 | return insertionIndex
287 | }
288 |
289 | // split rebalances the tree after adding. After insertion the modified node has to be checked to make sure it
290 | // didn't exceed the maximum number of elements. If it did, then it has to be split and rebalanced. The transformation
291 | // is depicted in the graph below. If it's not a leaf node, then the children has to be moved as well as shown.
292 | // This may leave the parent unbalanced by having too many items so rebalancing has to be checked for all the ancestors.
293 | // The split is performed in a for loop to support splitting a node more than once. (Though in practice used only once).
294 | // n n
295 | // 3 3,6
296 | // / \ ------> / | \
297 | // a modifiedNode a modifiedNode newNode
298 | // 1,2 4,5,6,7,8 1,2 4,5 7,8
299 | func (n *Node) split(nodeToSplit *Node, nodeToSplitIndex int) {
300 | // The first index where min amount of bytes to populate a page is achieved. Then add 1 so it will be split one
301 | // index after.
302 | splitIndex := nodeToSplit.tx.db.getSplitIndex(nodeToSplit)
303 |
304 | middleItem := nodeToSplit.items[splitIndex]
305 | var newNode *Node
306 |
307 | if nodeToSplit.isLeaf() {
308 | newNode = n.writeNode(n.tx.newNode(nodeToSplit.items[splitIndex+1:], []pgnum{}))
309 | nodeToSplit.items = nodeToSplit.items[:splitIndex]
310 | } else {
311 | newNode = n.writeNode(n.tx.newNode(nodeToSplit.items[splitIndex+1:], nodeToSplit.childNodes[splitIndex+1:]))
312 | nodeToSplit.items = nodeToSplit.items[:splitIndex]
313 | nodeToSplit.childNodes = nodeToSplit.childNodes[:splitIndex+1]
314 | }
315 | n.addItem(middleItem, nodeToSplitIndex)
316 | if len(n.childNodes) == nodeToSplitIndex+1 { // If middle of list, then move items forward
317 | n.childNodes = append(n.childNodes, newNode.pageNum)
318 | } else {
319 | n.childNodes = append(n.childNodes[:nodeToSplitIndex+1], n.childNodes[nodeToSplitIndex:]...)
320 | n.childNodes[nodeToSplitIndex+1] = newNode.pageNum
321 | }
322 |
323 | n.writeNodes(n, nodeToSplit)
324 | }
325 |
326 | // rebalanceRemove rebalances the tree after a remove operation. This can be either by rotating to the right, to the
327 | // left or by merging. First, the sibling nodes are checked to see if they have enough items for rebalancing
328 | // (>= minItems+1). If they don't have enough items, then merging with one of the sibling nodes occurs. This may leave
329 | // the parent unbalanced by having too little items so rebalancing has to be checked for all the ancestors.
330 | func (n *Node) rebalanceRemove(unbalancedNode *Node, unbalancedNodeIndex int) error {
331 | pNode := n
332 |
333 | // Right rotate
334 | if unbalancedNodeIndex != 0 {
335 | leftNode, err := n.getNode(pNode.childNodes[unbalancedNodeIndex-1])
336 | if err != nil {
337 | return err
338 | }
339 | if leftNode.canSpareAnElement() {
340 | rotateRight(leftNode, pNode, unbalancedNode, unbalancedNodeIndex)
341 | n.writeNodes(leftNode, pNode, unbalancedNode)
342 | return nil
343 | }
344 | }
345 |
346 | // Left Balance
347 | if unbalancedNodeIndex != len(pNode.childNodes)-1 {
348 | rightNode, err := n.getNode(pNode.childNodes[unbalancedNodeIndex+1])
349 | if err != nil {
350 | return err
351 | }
352 | if rightNode.canSpareAnElement() {
353 | rotateLeft(unbalancedNode, pNode, rightNode, unbalancedNodeIndex)
354 | n.writeNodes(unbalancedNode, pNode, rightNode)
355 | return nil
356 | }
357 | }
358 |
359 | // The merge function merges a given node with its node to the right. So by default, we merge an unbalanced node
360 | // with its right sibling. In the case where the unbalanced node is the leftmost, we have to replace the merge
361 | // parameters, so the unbalanced node right sibling, will be merged into the unbalanced node.
362 | if unbalancedNodeIndex == 0 {
363 | rightNode, err := n.getNode(n.childNodes[unbalancedNodeIndex+1])
364 | if err != nil {
365 | return err
366 | }
367 |
368 | return pNode.merge(rightNode, unbalancedNodeIndex+1)
369 | }
370 |
371 | return pNode.merge(unbalancedNode, unbalancedNodeIndex)
372 | }
373 |
374 | // removeItemFromLeaf removes an item from a leaf node. It means there is no handling of child nodes.
375 | func (n *Node) removeItemFromLeaf(index int) {
376 | n.items = append(n.items[:index], n.items[index+1:]...)
377 | n.writeNode(n)
378 | }
379 |
380 | func (n *Node) removeItemFromInternal(index int) ([]int, error) {
381 | // Take element before inorder (The biggest element from the left branch), put it in the removed index and remove
382 | // it from the original node. Track in affectedNodes any nodes in the path leading to that node. It will be used
383 | // in case the tree needs to be rebalanced.
384 | // p
385 | // /
386 | // ..
387 | // / \
388 | // .. a
389 |
390 | affectedNodes := make([]int, 0)
391 | affectedNodes = append(affectedNodes, index)
392 |
393 | // Starting from its left child, descend to the rightmost descendant.
394 | aNode, err := n.getNode(n.childNodes[index])
395 | if err != nil {
396 | return nil, err
397 | }
398 |
399 | for !aNode.isLeaf() {
400 | traversingIndex := len(n.childNodes) - 1
401 | aNode, err = aNode.getNode(aNode.childNodes[traversingIndex])
402 | if err != nil {
403 | return nil, err
404 | }
405 | affectedNodes = append(affectedNodes, traversingIndex)
406 | }
407 |
408 | // Replace the item that should be removed with the item before inorder which we just found.
409 | n.items[index] = aNode.items[len(aNode.items)-1]
410 | aNode.items = aNode.items[:len(aNode.items)-1]
411 | n.writeNodes(n, aNode)
412 |
413 | return affectedNodes, nil
414 | }
415 |
416 | func rotateRight(aNode, pNode, bNode *Node, bNodeIndex int) {
417 | // p p
418 | // 4 3
419 | // / \ ------> / \
420 | // a b (unbalanced) a b (unbalanced)
421 | // 1,2,3 5 1,2 4,5
422 |
423 | // Get last item and remove it
424 | aNodeItem := aNode.items[len(aNode.items)-1]
425 | aNode.items = aNode.items[:len(aNode.items)-1]
426 |
427 | // Get item from parent node and assign the aNodeItem item instead
428 | pNodeItemIndex := bNodeIndex - 1
429 | if isFirst(bNodeIndex) {
430 | pNodeItemIndex = 0
431 | }
432 | pNodeItem := pNode.items[pNodeItemIndex]
433 | pNode.items[pNodeItemIndex] = aNodeItem
434 |
435 | // Assign parent item to b and make it first
436 | bNode.items = append([]*Item{pNodeItem}, bNode.items...)
437 |
438 | // If it's an inner leaf then move children as well.
439 | if !aNode.isLeaf() {
440 | childNodeToShift := aNode.childNodes[len(aNode.childNodes)-1]
441 | aNode.childNodes = aNode.childNodes[:len(aNode.childNodes)-1]
442 | bNode.childNodes = append([]pgnum{childNodeToShift}, bNode.childNodes...)
443 | }
444 | }
445 |
446 | func rotateLeft(aNode, pNode, bNode *Node, bNodeIndex int) {
447 | // p p
448 | // 2 3
449 | // / \ ------> / \
450 | // a(unbalanced) b a(unbalanced) b
451 | // 1 3,4,5 1,2 4,5
452 |
453 | // Get first item and remove it
454 | bNodeItem := bNode.items[0]
455 | bNode.items = bNode.items[1:]
456 |
457 | // Get item from parent node and assign the bNodeItem item instead
458 | pNodeItemIndex := bNodeIndex
459 | if isLast(bNodeIndex, pNode) {
460 | pNodeItemIndex = len(pNode.items) - 1
461 | }
462 | pNodeItem := pNode.items[pNodeItemIndex]
463 | pNode.items[pNodeItemIndex] = bNodeItem
464 |
465 | // Assign parent item to a and make it last
466 | aNode.items = append(aNode.items, pNodeItem)
467 |
468 | // If it's an inner leaf then move children as well.
469 | if !bNode.isLeaf() {
470 | childNodeToShift := bNode.childNodes[0]
471 | bNode.childNodes = bNode.childNodes[1:]
472 | aNode.childNodes = append(aNode.childNodes, childNodeToShift)
473 | }
474 | }
475 |
476 | func (n *Node) merge(bNode *Node, bNodeIndex int) error {
477 | // p p
478 | // 3,5 5
479 | // / | \ ------> / \
480 | // a b c a c
481 | // 1,2 4 6,7 1,2,3,4 6,7
482 | aNode, err := n.getNode(n.childNodes[bNodeIndex-1])
483 | if err != nil {
484 | return err
485 | }
486 |
487 | // Take the item from the parent, remove it and add it to the unbalanced node
488 | pNodeItem := n.items[bNodeIndex-1]
489 | n.items = append(n.items[:bNodeIndex-1], n.items[bNodeIndex:]...)
490 | aNode.items = append(aNode.items, pNodeItem)
491 |
492 | aNode.items = append(aNode.items, bNode.items...)
493 | n.childNodes = append(n.childNodes[:bNodeIndex], n.childNodes[bNodeIndex+1:]...)
494 | if !aNode.isLeaf() {
495 | aNode.childNodes = append(aNode.childNodes, bNode.childNodes...)
496 | }
497 | n.writeNodes(aNode, n)
498 | n.tx.deleteNode(bNode)
499 | return nil
500 | }
--------------------------------------------------------------------------------
/node_test.go:
--------------------------------------------------------------------------------
1 | package LibraDB
2 |
3 | import (
4 | "bytes"
5 | "github.com/stretchr/testify/assert"
6 | "github.com/stretchr/testify/require"
7 | "os"
8 | "strconv"
9 | "testing"
10 | )
11 |
12 | func Test_AddSingle(t *testing.T) {
13 | db, cleanFunc := createTestDB(t)
14 | defer cleanFunc()
15 |
16 | tx := db.WriteTx()
17 | collection, err := tx.CreateCollection(testCollectionName)
18 | require.NoError(t, err)
19 |
20 | value := createItem("0")
21 | err = collection.Put(value, value)
22 | require.NoError(t, err)
23 |
24 | err = tx.Commit()
25 | require.NoError(t, err)
26 |
27 | expectedDB, expectedCleanFunc := createTestDB(t)
28 | defer expectedCleanFunc()
29 |
30 | expectedTx := expectedDB.WriteTx()
31 |
32 | expectedRoot := expectedTx.writeNode(expectedTx.newNode(createItems("0"), []pgnum{}))
33 | expectedCollection, err := expectedTx.createCollection(newCollection(testCollectionName, expectedRoot.pageNum))
34 | require.NoError(t, err)
35 |
36 | err = expectedTx.Commit()
37 | require.NoError(t, err)
38 |
39 | areTreesEqual(t, expectedCollection, collection)
40 | }
41 |
42 | func Test_RemoveFromRootSingleElement(t *testing.T) {
43 | db, cleanFunc := createTestDB(t)
44 | defer cleanFunc()
45 |
46 | tx := db.WriteTx()
47 | collection, err := tx.CreateCollection(testCollectionName)
48 | require.NoError(t, err)
49 |
50 | value := createItem("0")
51 | err = collection.Put(value, value)
52 | require.NoError(t, err)
53 |
54 | err = collection.Remove(value)
55 | require.NoError(t, err)
56 |
57 | err = tx.Commit()
58 | require.NoError(t, err)
59 |
60 | expectedDBAfterRemoval, cleanFuncAfterRemoval := createTestDB(t)
61 | defer cleanFuncAfterRemoval()
62 |
63 | expectedTxAfterRemoval := expectedDBAfterRemoval.WriteTx()
64 |
65 | expectedRootAfterRemoval := expectedTxAfterRemoval.writeNode(expectedTxAfterRemoval.newNode([]*Item{}, []pgnum{}))
66 |
67 | expectedCollectionAfterRemoval, err := expectedTxAfterRemoval.createCollection(newCollection(testCollectionName, expectedRootAfterRemoval.pageNum))
68 |
69 | err = expectedTxAfterRemoval.Commit()
70 | require.NoError(t, err)
71 |
72 | areTreesEqual(t, expectedCollectionAfterRemoval, collection)
73 | }
74 |
75 | func Test_AddMultiple(t *testing.T) {
76 | db, cleanFunc := createTestDB(t)
77 | defer cleanFunc()
78 |
79 | tx := db.WriteTx()
80 | collection, err := tx.CreateCollection(testCollectionName)
81 | require.NoError(t, err)
82 |
83 | numOfElements := mockNumberOfElements
84 | for i := 0; i < numOfElements; i++ {
85 | val := createItem(strconv.Itoa(i))
86 | err = collection.Put(val, val)
87 | require.NoError(t, err)
88 | }
89 | err = tx.Commit()
90 | require.NoError(t, err)
91 |
92 | // Tree is balanced
93 | expected, expectedCleanFunc := createTestMockTree(t)
94 | defer expectedCleanFunc()
95 | areTreesEqual(t, expected, collection)
96 | }
97 |
98 | func Test_AddAndRebalanceSplit(t *testing.T) {
99 | db, cleanFunc := createTestDB(t)
100 | defer cleanFunc()
101 |
102 | tx := db.WriteTx()
103 | child0 := tx.writeNode(tx.newNode(createItems("0", "1", "2", "3"), []pgnum{}))
104 |
105 | child1 := tx.writeNode(tx.newNode(createItems("5", "6", "7", "8"), []pgnum{}))
106 |
107 | root := tx.writeNode(tx.newNode(createItems("4"), []pgnum{child0.pageNum, child1.pageNum}))
108 |
109 | collection, err := tx.createCollection(newCollection(testCollectionName, root.pageNum))
110 | require.NoError(t, err)
111 |
112 | val := createItem("9")
113 | err = collection.Put(val, val)
114 | require.NoError(t, err)
115 |
116 | err = tx.Commit()
117 | require.NoError(t, err)
118 |
119 | expectedTestDB, expectedCleanFunc := createTestDB(t)
120 | defer expectedCleanFunc()
121 |
122 | testTx := expectedTestDB.WriteTx()
123 |
124 | expectedChild0 := testTx.writeNode(testTx.newNode(createItems("0", "1", "2", "3"), []pgnum{}))
125 |
126 | expectedChild1 := testTx.writeNode(testTx.newNode(createItems("5", "6"), []pgnum{}))
127 |
128 | expectedChild2 := testTx.writeNode(testTx.newNode(createItems("8", "9"), []pgnum{}))
129 |
130 | expectedRoot := testTx.writeNode(testTx.newNode(createItems("4", "7"), []pgnum{expectedChild0.pageNum, expectedChild1.pageNum, expectedChild2.pageNum}))
131 |
132 | expectedCollection, err := testTx.createCollection(newCollection(testCollectionName, expectedRoot.pageNum))
133 | require.NoError(t, err)
134 |
135 | err = testTx.Commit()
136 | require.NoError(t, err)
137 |
138 | // Tree is balanced
139 | areTreesEqual(t, expectedCollection, collection)
140 | }
141 |
142 | func Test_SplitAndMerge(t *testing.T) {
143 | db, cleanFunc := createTestDB(t)
144 | defer cleanFunc()
145 |
146 | tx := db.WriteTx()
147 | child0 := tx.writeNode(tx.newNode(createItems("0", "1", "2", "3"), []pgnum{}))
148 |
149 | child1 := tx.writeNode(tx.newNode(createItems("5", "6", "7", "8"), []pgnum{}))
150 |
151 | root := tx.writeNode(tx.newNode(createItems("4"), []pgnum{child0.pageNum, child1.pageNum}))
152 |
153 | collection, err := tx.createCollection(newCollection(testCollectionName, root.pageNum))
154 | require.NoError(t, err)
155 |
156 | val := createItem("9")
157 | err = collection.Put(val, val)
158 | require.NoError(t, err)
159 |
160 | err = tx.Commit()
161 | require.NoError(t, err)
162 |
163 | expectedTestDB, expectedCleanFunc := createTestDB(t)
164 | defer expectedCleanFunc()
165 |
166 | testTx := expectedTestDB.WriteTx()
167 |
168 | expectedChild0 := testTx.writeNode(testTx.newNode(createItems("0", "1", "2", "3"), []pgnum{}))
169 |
170 | expectedChild1 := testTx.writeNode(testTx.newNode(createItems("5", "6"), []pgnum{}))
171 |
172 | expectedChild2 := testTx.writeNode(testTx.newNode(createItems("8", "9"), []pgnum{}))
173 |
174 | expectedRoot := testTx.writeNode(testTx.newNode(createItems("4", "7"), []pgnum{expectedChild0.pageNum, expectedChild1.pageNum, expectedChild2.pageNum}))
175 |
176 | expectedCollection, err := testTx.createCollection(newCollection(testCollectionName, expectedRoot.pageNum))
177 | require.NoError(t, err)
178 |
179 | // Tree is balanced
180 | areTreesEqual(t, expectedCollection, collection)
181 |
182 | err = testTx.Commit()
183 | require.NoError(t, err)
184 |
185 | removeTx := db.WriteTx()
186 | collection , err = removeTx.GetCollection(collection.name)
187 | require.NoError(t, err)
188 |
189 | err = collection.Remove(val)
190 | require.NoError(t, err)
191 |
192 | err = removeTx.Commit()
193 | require.NoError(t, err)
194 |
195 | expectedDBAfterRemoval, expectedDBCleanFunc := createTestDB(t)
196 | defer expectedDBCleanFunc()
197 |
198 | expectedTxAfterRemoval := expectedDBAfterRemoval.WriteTx()
199 | expectedChild0AfterRemoval := expectedTxAfterRemoval.writeNode(expectedTxAfterRemoval.newNode(createItems("0", "1", "2", "3"), []pgnum{}))
200 |
201 | expectedChild1AfterRemoval := expectedTxAfterRemoval.writeNode(expectedTxAfterRemoval.newNode(createItems("5", "6", "7", "8"), []pgnum{}))
202 |
203 | expectedRootAfterRemoval := expectedTxAfterRemoval.writeNode(expectedTxAfterRemoval.newNode(createItems("4"), []pgnum{expectedChild0AfterRemoval.pageNum, expectedChild1AfterRemoval.pageNum}))
204 |
205 | expectedCollectionAfterRemoval, err := expectedTxAfterRemoval.createCollection(newCollection(testCollectionName, expectedRootAfterRemoval.pageNum))
206 | require.NoError(t, err)
207 |
208 | err = expectedTxAfterRemoval.Commit()
209 | require.NoError(t, err)
210 |
211 | areTreesEqual(t, expectedCollectionAfterRemoval, collection)
212 | }
213 |
214 | func Test_RemoveFromRootWithoutRebalance(t *testing.T) {
215 | db, cleanFunc := createTestDB(t)
216 | defer cleanFunc()
217 |
218 | tx := db.WriteTx()
219 | collection, err := tx.CreateCollection(testCollectionName)
220 | require.NoError(t, err)
221 |
222 | numOfElements := mockNumberOfElements
223 | for i := 0; i < numOfElements; i++ {
224 | val := createItem(strconv.Itoa(i))
225 | err = collection.Put(val, val)
226 | require.NoError(t, err)
227 | }
228 |
229 | // Remove an element
230 | err = collection.Remove(createItem("7"))
231 | require.NoError(t, err)
232 |
233 | err = tx.Commit()
234 | require.NoError(t, err)
235 |
236 | expectedDB, expectedCleanFunc := createTestDB(t)
237 | defer expectedCleanFunc()
238 |
239 | expectedTestTx := expectedDB.WriteTx()
240 |
241 | expectedChild0 := expectedTestTx.writeNode(expectedTestTx.newNode(createItems("0", "1"), []pgnum{}))
242 |
243 | expectedChild1 := expectedTestTx.writeNode(expectedTestTx.newNode(createItems("3", "4"), []pgnum{}))
244 |
245 | expectedChild2 := expectedTestTx.writeNode(expectedTestTx.newNode(createItems("6", "8", "9"), []pgnum{}))
246 |
247 | expectedRoot := expectedTestTx.writeNode(expectedTestTx.newNode(createItems("2", "5"), []pgnum{expectedChild0.pageNum, expectedChild1.pageNum, expectedChild2.pageNum}))
248 |
249 | expectedCollectionAfterRemoval, err := expectedTestTx.createCollection(newCollection(testCollectionName, expectedRoot.pageNum))
250 | require.NoError(t, err)
251 |
252 | err = expectedTestTx.Commit()
253 | require.NoError(t, err)
254 |
255 | areTreesEqual(t, expectedCollectionAfterRemoval, collection)
256 | }
257 |
258 | func Test_RemoveFromRootAndRotateLeft(t *testing.T) {
259 | db, cleanFunc := createTestDB(t)
260 | defer cleanFunc()
261 |
262 | tx := db.WriteTx()
263 |
264 | child0 := tx.writeNode(tx.newNode(createItems("0", "1"), []pgnum{}))
265 |
266 | child1 := tx.writeNode(tx.newNode(createItems("3", "4"), []pgnum{}))
267 |
268 | child2 := tx.writeNode(tx.newNode(createItems("6", "7", "8"), []pgnum{}))
269 |
270 | root := tx.writeNode(tx.newNode(createItems("2", "5"), []pgnum{child0.pageNum, child1.pageNum, child2.pageNum}))
271 |
272 | collection, err := tx.createCollection(newCollection(testCollectionName, root.pageNum))
273 | require.NoError(t, err)
274 |
275 | // Remove an element
276 | err = collection.Remove(createItem("5"))
277 | require.NoError(t, err)
278 |
279 | err = tx.Commit()
280 | require.NoError(t, err)
281 |
282 | expectedDB, expectedCleanFunc := createTestDB(t)
283 | defer expectedCleanFunc()
284 |
285 | expectedTx := expectedDB.WriteTx()
286 |
287 | expectedChild0 := expectedTx.writeNode(expectedTx.newNode(createItems("0", "1"), []pgnum{}))
288 |
289 | expectedChild1 := expectedTx.writeNode(expectedTx.newNode(createItems("3", "4"), []pgnum{}))
290 |
291 | expectedChild2 := expectedTx.writeNode(expectedTx.newNode(createItems("7", "8"), []pgnum{}))
292 |
293 | expectedRoot := expectedTx.writeNode(expectedTx.newNode(createItems("2", "6"), []pgnum{expectedChild0.pageNum, expectedChild1.pageNum, expectedChild2.pageNum}))
294 |
295 | expectedCollection, err := expectedTx.createCollection(newCollection(testCollectionName, expectedRoot.pageNum))
296 | require.NoError(t, err)
297 |
298 | err = expectedTx.Commit()
299 | require.NoError(t, err)
300 |
301 | areTreesEqual(t, expectedCollection, collection)
302 | }
303 |
304 | func Test_RemoveFromRootAndRotateRight(t *testing.T) {
305 | db, cleanFunc := createTestDB(t)
306 | defer cleanFunc()
307 |
308 | tx := db.WriteTx()
309 |
310 | child0 := tx.writeNode(tx.newNode(createItems("0", "1", "2"), []pgnum{}))
311 |
312 | child1 := tx.writeNode(tx.newNode(createItems("4", "5"), []pgnum{}))
313 |
314 | child2 := tx.writeNode(tx.newNode(createItems("7", "8"), []pgnum{}))
315 |
316 | root := tx.writeNode(tx.newNode(createItems("3", "6"), []pgnum{child0.pageNum, child1.pageNum, child2.pageNum}))
317 |
318 | collection, err := tx.createCollection(newCollection(testCollectionName, root.pageNum))
319 | require.NoError(t, err)
320 |
321 | // Remove an element
322 | err = collection.Remove(createItem("6"))
323 | require.NoError(t, err)
324 |
325 | err = tx.Commit()
326 | require.NoError(t, err)
327 |
328 | expectedDB, expectedCleanFunc := createTestDB(t)
329 | defer expectedCleanFunc()
330 |
331 | expectedTx := expectedDB.WriteTx()
332 |
333 | expectedChild0 := expectedTx.writeNode(expectedTx.newNode(createItems("0", "1"), []pgnum{}))
334 |
335 | expectedChild1 := expectedTx.writeNode(expectedTx.newNode(createItems("3", "4"), []pgnum{}))
336 |
337 | expectedChild2 := expectedTx.writeNode(expectedTx.newNode(createItems("7", "8"), []pgnum{}))
338 |
339 | expectedRoot := expectedTx.writeNode(expectedTx.newNode(createItems("2", "5"), []pgnum{expectedChild0.pageNum, expectedChild1.pageNum, expectedChild2.pageNum}))
340 |
341 | expectedCollection, err := expectedTx.createCollection(newCollection(testCollectionName, expectedRoot.pageNum))
342 | require.NoError(t, err)
343 |
344 | err = expectedTx.Commit()
345 | require.NoError(t, err)
346 |
347 | areTreesEqual(t, expectedCollection, collection)
348 | }
349 |
350 | // Test_RemoveFromRootAndRebalanceMergeToUnbalanced tests when the unbalanced node is the most left one so the
351 | // merge has to happen from the right node into the unbalanced node
352 | func Test_RemoveFromRootAndRebalanceMergeToUnbalanced(t *testing.T) {
353 | db, cleanFunc := createTestDB(t)
354 | defer cleanFunc()
355 |
356 | tx := db.WriteTx()
357 |
358 | child0 := tx.writeNode(tx.newNode(createItems("0", "1"), []pgnum{}))
359 |
360 | child1 := tx.writeNode(tx.newNode(createItems("3", "4"), []pgnum{}))
361 |
362 | child2 := tx.writeNode(tx.newNode(createItems("6", "7"), []pgnum{}))
363 |
364 | root := tx.writeNode(tx.newNode(createItems("2", "5"), []pgnum{child0.pageNum, child1.pageNum, child2.pageNum}))
365 |
366 | collection, err := tx.createCollection(newCollection(testCollectionName, root.pageNum))
367 | require.NoError(t, err)
368 |
369 | // Remove an element
370 | err = collection.Remove(createItem("2"))
371 | require.NoError(t, err)
372 |
373 | err = tx.Commit()
374 | require.NoError(t, err)
375 |
376 | expectedDB, expectedCleanFunc := createTestDB(t)
377 | defer expectedCleanFunc()
378 |
379 | expectedTx := expectedDB.WriteTx()
380 |
381 | expectedChild0 := expectedTx.writeNode(expectedTx.newNode(createItems("0", "1", "3", "4"), []pgnum{}))
382 |
383 | expectedChild1 := expectedTx.writeNode(expectedTx.newNode(createItems("6", "7"), []pgnum{}))
384 |
385 | expectedRoot := expectedTx.writeNode(expectedTx.newNode(createItems("5"), []pgnum{expectedChild0.pageNum, expectedChild1.pageNum}))
386 |
387 | expectedCollection, err := expectedTx.createCollection(newCollection(testCollectionName, expectedRoot.pageNum))
388 | require.NoError(t, err)
389 |
390 | err = expectedTx.Commit()
391 | require.NoError(t, err)
392 |
393 | areTreesEqual(t, expectedCollection, collection)
394 | }
395 |
396 | // Test_RemoveFromRootAndRebalanceMergeFromUnbalanced tests when the unbalanced node is not the most left one so the
397 | // merge has to happen from the unbalanced node to the node left to it
398 | func Test_RemoveFromRootAndRebalanceMergeFromUnbalanced(t *testing.T) {
399 | db, cleanFunc := createTestDB(t)
400 | defer cleanFunc()
401 |
402 | tx := db.WriteTx()
403 |
404 | child0 := tx.writeNode(tx.newNode(createItems("0", "1"), []pgnum{}))
405 |
406 | child1 := tx.writeNode(tx.newNode(createItems("3", "4"), []pgnum{}))
407 |
408 | child2 := tx.writeNode(tx.newNode(createItems("6", "7"), []pgnum{}))
409 |
410 | root := tx.writeNode(tx.newNode(createItems("2", "5"), []pgnum{child0.pageNum, child1.pageNum, child2.pageNum}))
411 |
412 | collection, err := tx.createCollection(newCollection(testCollectionName, root.pageNum))
413 | require.NoError(t, err)
414 |
415 | // Remove an element
416 | err = collection.Remove(createItem("5"))
417 | require.NoError(t, err)
418 |
419 | err = tx.Commit()
420 | require.NoError(t, err)
421 |
422 | expectedDB, expectedCleanFunc := createTestDB(t)
423 | defer expectedCleanFunc()
424 |
425 | expectedTx := expectedDB.WriteTx()
426 |
427 | expectedChild0 := expectedTx.writeNode(expectedTx.newNode(createItems("0", "1", "2", "3"), []pgnum{}))
428 |
429 | expectedChild1 := expectedTx.writeNode(expectedTx.newNode(createItems("6", "7"), []pgnum{}))
430 |
431 | expectedRoot := expectedTx.writeNode(expectedTx.newNode(createItems("4"), []pgnum{expectedChild0.pageNum, expectedChild1.pageNum}))
432 |
433 | expectedCollection, err := expectedTx.createCollection(newCollection(testCollectionName, expectedRoot.pageNum))
434 | require.NoError(t, err)
435 |
436 | err = expectedTx.Commit()
437 | require.NoError(t, err)
438 |
439 | // Remove an element
440 | areTreesEqual(t, expectedCollection, collection)
441 | }
442 |
443 | func Test_RemoveFromInnerNodeAndRotateLeft(t *testing.T) {
444 | db, cleanFunc := createTestDB(t)
445 | defer cleanFunc()
446 |
447 | tx := db.WriteTx()
448 |
449 | child00 := tx.writeNode(tx.newNode(createItems("0", "1"), []pgnum{}))
450 |
451 | child01 := tx.writeNode(tx.newNode(createItems("3", "4"), []pgnum{}))
452 |
453 | child02 := tx.writeNode(tx.newNode(createItems("6", "7"), []pgnum{}))
454 |
455 | child0 := tx.writeNode(tx.newNode(createItems("2", "5"), []pgnum{child00.pageNum, child01.pageNum, child02.pageNum}))
456 |
457 | child10 := tx.writeNode(tx.newNode(createItems("9", "a"), []pgnum{}))
458 |
459 | child11 := tx.writeNode(tx.newNode(createItems("c", "d"), []pgnum{}))
460 |
461 | child12 := tx.writeNode(tx.newNode(createItems("f", "g"), []pgnum{}))
462 |
463 | child13 := tx.writeNode(tx.newNode(createItems("i", "j"), []pgnum{}))
464 |
465 | child1 := tx.writeNode(tx.newNode(createItems("b", "e", "h"), []pgnum{child10.pageNum, child11.pageNum, child12.pageNum, child13.pageNum}))
466 |
467 | root := tx.writeNode(tx.newNode(createItems("8"), []pgnum{child0.pageNum, child1.pageNum}))
468 |
469 | collection, err := tx.createCollection(newCollection(testCollectionName, root.pageNum))
470 | require.NoError(t, err)
471 |
472 | // Remove an element
473 | err = collection.Remove(createItem("5"))
474 | require.NoError(t, err)
475 |
476 | err = tx.Commit()
477 | require.NoError(t, err)
478 |
479 | expectedDB, expectedCleanFunc := createTestDB(t)
480 | defer expectedCleanFunc()
481 |
482 | expectedTx := expectedDB.WriteTx()
483 |
484 | expectedChild00 := expectedTx.writeNode(expectedTx.newNode(createItems("0", "1", "2", "3"), []pgnum{}))
485 |
486 | expectedChild01 := expectedTx.writeNode(expectedTx.newNode(createItems("6", "7"), []pgnum{}))
487 |
488 | expectedChild02 := expectedTx.writeNode(expectedTx.newNode(createItems("9", "a"), []pgnum{}))
489 |
490 | expectedChild0 := expectedTx.writeNode(expectedTx.newNode(createItems("4", "8"), []pgnum{expectedChild00.pageNum, expectedChild01.pageNum, expectedChild02.pageNum}))
491 |
492 | expectedChild10 := expectedTx.writeNode(expectedTx.newNode(createItems("c", "d"), []pgnum{}))
493 |
494 | expectedChild11 := expectedTx.writeNode(expectedTx.newNode(createItems("f", "g"), []pgnum{}))
495 |
496 | expectedChild12 := expectedTx.writeNode(expectedTx.newNode(createItems("i", "j"), []pgnum{}))
497 |
498 | expectedChild1 := expectedTx.writeNode(expectedTx.newNode(createItems("e", "h"), []pgnum{expectedChild10.pageNum, expectedChild11.pageNum, expectedChild12.pageNum}))
499 |
500 | expectedRoot := expectedTx.writeNode(expectedTx.newNode(createItems("b"), []pgnum{expectedChild0.pageNum, expectedChild1.pageNum}))
501 |
502 | expectedCollection, err := expectedTx.createCollection(newCollection(testCollectionName, expectedRoot.pageNum))
503 | require.NoError(t, err)
504 |
505 | err = expectedTx.Commit()
506 | require.NoError(t, err)
507 |
508 | // Remove an element
509 | areTreesEqual(t, expectedCollection, collection)
510 | }
511 |
512 | func Test_RemoveFromInnerNodeAndRotateRight(t *testing.T) {
513 | db, cleanFunc := createTestDB(t)
514 | defer cleanFunc()
515 |
516 | tx := db.WriteTx()
517 |
518 | child00 := tx.writeNode(tx.newNode(createItems("0", "1"), []pgnum{}))
519 |
520 | child01 := tx.writeNode(tx.newNode(createItems("3", "4"), []pgnum{}))
521 |
522 | child02 := tx.writeNode(tx.newNode(createItems("6", "7"), []pgnum{}))
523 |
524 | child03 := tx.writeNode(tx.newNode(createItems("9", "a"), []pgnum{}))
525 |
526 | child0 := tx.writeNode(tx.newNode(createItems("2", "5", "8"), []pgnum{child00.pageNum, child01.pageNum, child02.pageNum, child03.pageNum}))
527 |
528 | child10 := tx.writeNode(tx.newNode(createItems("c", "d"), []pgnum{}))
529 |
530 | child11 := tx.writeNode(tx.newNode(createItems("f", "g"), []pgnum{}))
531 |
532 | child12 := tx.writeNode(tx.newNode(createItems("i", "j"), []pgnum{}))
533 |
534 | child1 := tx.writeNode(tx.newNode(createItems("e", "h"), []pgnum{child10.pageNum, child11.pageNum, child12.pageNum}))
535 |
536 | root := tx.writeNode(tx.newNode(createItems("b"), []pgnum{child0.pageNum, child1.pageNum}))
537 |
538 | collection, err := tx.createCollection(newCollection(testCollectionName, root.pageNum))
539 | require.NoError(t, err)
540 |
541 | // Remove an element
542 | err = collection.Remove(createItem("e"))
543 | require.NoError(t, err)
544 |
545 | err = tx.Commit()
546 | require.NoError(t, err)
547 |
548 | expectedDB, expectedCleanFunc := createTestDB(t)
549 | defer expectedCleanFunc()
550 |
551 | expectedTx := expectedDB.WriteTx()
552 |
553 | expectedChild00 := expectedTx.writeNode(expectedTx.newNode(createItems("0", "1"), []pgnum{}))
554 |
555 | expectedChild01 := expectedTx.writeNode(expectedTx.newNode(createItems("3", "4"), []pgnum{}))
556 |
557 | expectedChild02 := expectedTx.writeNode(expectedTx.newNode(createItems("6", "7"), []pgnum{}))
558 |
559 | expectedChild0 := expectedTx.writeNode(expectedTx.newNode(createItems("2", "5"), []pgnum{expectedChild00.pageNum, expectedChild01.pageNum, expectedChild02.pageNum}))
560 |
561 | expectedChild10 := expectedTx.writeNode(expectedTx.newNode(createItems("9", "a"), []pgnum{}))
562 |
563 | expectedChild11 := expectedTx.writeNode(expectedTx.newNode(createItems("c", "d", "f", "g"), []pgnum{}))
564 |
565 | expectedChild12 := expectedTx.writeNode(expectedTx.newNode(createItems("i", "j"), []pgnum{}))
566 |
567 | expectedChild1 := expectedTx.writeNode(expectedTx.newNode(createItems("b", "h"), []pgnum{expectedChild10.pageNum, expectedChild11.pageNum, expectedChild12.pageNum}))
568 |
569 | expectedRoot := expectedTx.writeNode(expectedTx.newNode(createItems("8"), []pgnum{expectedChild0.pageNum, expectedChild1.pageNum}))
570 |
571 | expectedCollection, err := expectedTx.createCollection(newCollection(testCollectionName, expectedRoot.pageNum))
572 | require.NoError(t, err)
573 |
574 | err = expectedTx.Commit()
575 | require.NoError(t, err)
576 |
577 | // Remove an element
578 | areTreesEqual(t, expectedCollection, collection)
579 | }
580 |
581 | func Test_RemoveFromInnerNodeAndUnion(t *testing.T) {
582 | db, cleanFunc := createTestDB(t)
583 | defer cleanFunc()
584 |
585 | tx := db.WriteTx()
586 |
587 | child00 := tx.writeNode(tx.newNode(createItems("0", "1"), []pgnum{}))
588 |
589 | child01 := tx.writeNode(tx.newNode(createItems("3", "4"), []pgnum{}))
590 |
591 | child02 := tx.writeNode(tx.newNode(createItems("6", "7"), []pgnum{}))
592 |
593 | child0 := tx.writeNode(tx.newNode(createItems("2", "5"), []pgnum{child00.pageNum, child01.pageNum, child02.pageNum}))
594 |
595 | child10 := tx.writeNode(tx.newNode(createItems("9", "a"), []pgnum{}))
596 |
597 | child11 := tx.writeNode(tx.newNode(createItems("c", "d"), []pgnum{}))
598 |
599 | child12 := tx.writeNode(tx.newNode(createItems("f", "g"), []pgnum{}))
600 |
601 | child1 := tx.writeNode(tx.newNode(createItems("b", "e"), []pgnum{child10.pageNum, child11.pageNum, child12.pageNum}))
602 |
603 | root := tx.writeNode(tx.newNode(createItems("8"), []pgnum{child0.pageNum, child1.pageNum}))
604 |
605 | collection, err := tx.createCollection(newCollection(testCollectionName, root.pageNum))
606 | require.NoError(t, err)
607 |
608 | // Remove an element
609 | err = collection.Remove(createItem("2"))
610 | require.NoError(t, err)
611 |
612 | err = tx.Commit()
613 | require.NoError(t, err)
614 |
615 | expectedDB, expectedCleanFunc := createTestDB(t)
616 | defer expectedCleanFunc()
617 |
618 | expectedTx := expectedDB.WriteTx()
619 |
620 | expectedChild0 := expectedTx.writeNode(expectedTx.newNode(createItems("0", "1", "3", "4"), []pgnum{}))
621 |
622 | expectedChild1 := expectedTx.writeNode(expectedTx.newNode(createItems("6", "7"), []pgnum{}))
623 |
624 | expectedChild2 := expectedTx.writeNode(expectedTx.newNode(createItems("9", "a"), []pgnum{}))
625 |
626 | expectedChild3 := expectedTx.writeNode(expectedTx.newNode(createItems("c", "d"), []pgnum{}))
627 |
628 | expectedChild4 := expectedTx.writeNode(expectedTx.newNode(createItems("f", "g"), []pgnum{}))
629 |
630 | expectedRoot := expectedTx.writeNode(expectedTx.newNode(createItems("5", "8", "b", "e"), []pgnum{expectedChild0.pageNum, expectedChild1.pageNum, expectedChild2.pageNum, expectedChild3.pageNum, expectedChild4.pageNum}))
631 |
632 | expectedCollection, err := expectedTx.createCollection(newCollection(testCollectionName, expectedRoot.pageNum))
633 | require.NoError(t, err)
634 |
635 | err = expectedTx.Commit()
636 | require.NoError(t, err)
637 |
638 | // Remove an element
639 | areTreesEqual(t, expectedCollection, collection)
640 | }
641 |
642 | func Test_RemoveFromLeafAndRotateLeft(t *testing.T) {
643 | db, cleanFunc := createTestDB(t)
644 | defer cleanFunc()
645 |
646 | tx := db.WriteTx()
647 |
648 | child00 := tx.writeNode(tx.newNode(createItems("0", "1"), []pgnum{}))
649 |
650 | child01 := tx.writeNode(tx.newNode(createItems("3", "4", "5"), []pgnum{}))
651 |
652 | child02 := tx.writeNode(tx.newNode(createItems("7", "8"), []pgnum{}))
653 |
654 | child0 := tx.writeNode(tx.newNode(createItems("2", "6"), []pgnum{child00.pageNum, child01.pageNum, child02.pageNum}))
655 |
656 | child10 := tx.writeNode(tx.newNode(createItems("a", "b"), []pgnum{}))
657 |
658 | child11 := tx.writeNode(tx.newNode(createItems("d", "e"), []pgnum{}))
659 |
660 | child12 := tx.writeNode(tx.newNode(createItems("g", "h"), []pgnum{}))
661 |
662 | child1 := tx.writeNode(tx.newNode(createItems("c", "f"), []pgnum{child10.pageNum, child11.pageNum, child12.pageNum}))
663 |
664 | root := tx.writeNode(tx.newNode(createItems("9"), []pgnum{child0.pageNum, child1.pageNum}))
665 |
666 | collection, err := tx.createCollection(newCollection(testCollectionName, root.pageNum))
667 | require.NoError(t, err)
668 |
669 | // Remove an element
670 | err = collection.Remove(createItem("1"))
671 | require.NoError(t, err)
672 |
673 | err = tx.Commit()
674 | require.NoError(t, err)
675 |
676 | expectedDB, expectedCleanFunc := createTestDB(t)
677 | defer expectedCleanFunc()
678 |
679 | expectedTx := expectedDB.WriteTx()
680 |
681 | expectedChild00 := expectedTx.writeNode(expectedTx.newNode(createItems("0", "2"), []pgnum{}))
682 |
683 | expectedChild01 := expectedTx.writeNode(expectedTx.newNode(createItems("4", "5"), []pgnum{}))
684 |
685 | expectedChild02 := expectedTx.writeNode(expectedTx.newNode(createItems("7", "8"), []pgnum{}))
686 |
687 | expectedChild0 := expectedTx.writeNode(expectedTx.newNode(createItems("3", "6"), []pgnum{expectedChild00.pageNum, expectedChild01.pageNum, expectedChild02.pageNum}))
688 |
689 | expectedChild10 := expectedTx.writeNode(expectedTx.newNode(createItems("a", "b"), []pgnum{}))
690 |
691 | expectedChild11 := expectedTx.writeNode(expectedTx.newNode(createItems("d", "e"), []pgnum{}))
692 |
693 | expectedChild12 := expectedTx.writeNode(expectedTx.newNode(createItems("g", "h"), []pgnum{}))
694 |
695 | expectedChild1 := expectedTx.writeNode(expectedTx.newNode(createItems("c", "f"), []pgnum{expectedChild10.pageNum, expectedChild11.pageNum, expectedChild12.pageNum}))
696 |
697 | expectedRoot := expectedTx.writeNode(expectedTx.newNode(createItems("9"), []pgnum{expectedChild0.pageNum, expectedChild1.pageNum}))
698 |
699 | expectedCollection, err := expectedTx.createCollection(newCollection(testCollectionName, expectedRoot.pageNum))
700 | require.NoError(t, err)
701 |
702 | err = expectedTx.Commit()
703 | require.NoError(t, err)
704 |
705 | // Remove an element
706 | areTreesEqual(t, expectedCollection, collection)
707 | }
708 |
709 | func Test_RemoveFromLeafAndRotateRight(t *testing.T) {
710 | db, cleanFunc := createTestDB(t)
711 | defer cleanFunc()
712 |
713 | tx := db.WriteTx()
714 |
715 | child00 := tx.writeNode(tx.newNode(createItems("0", "1"), []pgnum{}))
716 |
717 | child01 := tx.writeNode(tx.newNode(createItems("3", "4", "5"), []pgnum{}))
718 |
719 | child02 := tx.writeNode(tx.newNode(createItems("7", "8"), []pgnum{}))
720 |
721 | child0 := tx.writeNode(tx.newNode(createItems("2", "6"), []pgnum{child00.pageNum, child01.pageNum, child02.pageNum}))
722 |
723 | child10 := tx.writeNode(tx.newNode(createItems("a", "b"), []pgnum{}))
724 |
725 | child11 := tx.writeNode(tx.newNode(createItems("d", "e"), []pgnum{}))
726 |
727 | child12 := tx.writeNode(tx.newNode(createItems("g", "h"), []pgnum{}))
728 |
729 | child1 := tx.writeNode(tx.newNode(createItems("c", "f"), []pgnum{child10.pageNum, child11.pageNum, child12.pageNum}))
730 |
731 | root := tx.writeNode(tx.newNode(createItems("9"), []pgnum{child0.pageNum, child1.pageNum}))
732 |
733 | collection, err := tx.createCollection(newCollection(testCollectionName, root.pageNum))
734 | require.NoError(t, err)
735 |
736 | // Remove an element
737 | err = collection.Remove(createItem("8"))
738 | require.NoError(t, err)
739 |
740 | err = tx.Commit()
741 | require.NoError(t, err)
742 |
743 | expectedDB, expectedCleanFunc := createTestDB(t)
744 | defer expectedCleanFunc()
745 |
746 | expectedTx := expectedDB.WriteTx()
747 |
748 | expectedChild00 := expectedTx.writeNode(expectedTx.newNode(createItems("0", "1"), []pgnum{}))
749 |
750 | expectedChild01 := expectedTx.writeNode(expectedTx.newNode(createItems("3", "4"), []pgnum{}))
751 |
752 | expectedChild02 := expectedTx.writeNode(expectedTx.newNode(createItems("6", "7"), []pgnum{}))
753 |
754 | expectedChild0 := expectedTx.writeNode(expectedTx.newNode(createItems("2", "5"), []pgnum{expectedChild00.pageNum, expectedChild01.pageNum, expectedChild02.pageNum}))
755 |
756 | expectedChild10 := expectedTx.writeNode(expectedTx.newNode(createItems("a", "b"), []pgnum{}))
757 |
758 | expectedChild11 := expectedTx.writeNode(expectedTx.newNode(createItems("d", "e"), []pgnum{}))
759 |
760 | expectedChild12 := expectedTx.writeNode(expectedTx.newNode(createItems("g", "h"), []pgnum{}))
761 |
762 | expectedChild1 := expectedTx.writeNode(expectedTx.newNode(createItems("c", "f"), []pgnum{expectedChild10.pageNum, expectedChild11.pageNum, expectedChild12.pageNum}))
763 |
764 | expectedRoot := expectedTx.writeNode(expectedTx.newNode(createItems("9"), []pgnum{expectedChild0.pageNum, expectedChild1.pageNum}))
765 |
766 | expectedCollection, err := expectedTx.createCollection(newCollection(testCollectionName, expectedRoot.pageNum))
767 | require.NoError(t, err)
768 |
769 | err = expectedTx.Commit()
770 | require.NoError(t, err)
771 |
772 | // Remove an element
773 | areTreesEqual(t, expectedCollection, collection)
774 | }
775 |
776 | func Test_RemoveFromLeafAndUnion(t *testing.T) {
777 | db, cleanFunc := createTestDB(t)
778 | defer cleanFunc()
779 |
780 | tx := db.WriteTx()
781 |
782 | child00 := tx.writeNode(tx.newNode(createItems("0", "1"), []pgnum{}))
783 |
784 | child01 := tx.writeNode(tx.newNode(createItems("3", "4"), []pgnum{}))
785 |
786 | child02 := tx.writeNode(tx.newNode(createItems("6", "7"), []pgnum{}))
787 |
788 | child0 := tx.writeNode(tx.newNode(createItems("2", "5"), []pgnum{child00.pageNum, child01.pageNum, child02.pageNum}))
789 |
790 | child10 := tx.writeNode(tx.newNode(createItems("9", "a"), []pgnum{}))
791 |
792 | child11 := tx.writeNode(tx.newNode(createItems("c", "d"), []pgnum{}))
793 |
794 | child12 := tx.writeNode(tx.newNode(createItems("f", "g"), []pgnum{}))
795 |
796 | child1 := tx.writeNode(tx.newNode(createItems("b", "e"), []pgnum{child10.pageNum, child11.pageNum, child12.pageNum}))
797 |
798 | root := tx.writeNode(tx.newNode(createItems("8"), []pgnum{child0.pageNum, child1.pageNum}))
799 |
800 | collection, err := tx.createCollection(newCollection(testCollectionName, root.pageNum))
801 | require.NoError(t, err)
802 |
803 | // Remove an element
804 | err = collection.Remove(createItem("0"))
805 | require.NoError(t, err)
806 |
807 | err = tx.Commit()
808 | require.NoError(t, err)
809 |
810 | expectedDB, expectedCleanFunc := createTestDB(t)
811 | defer expectedCleanFunc()
812 |
813 | expectedTx := expectedDB.WriteTx()
814 |
815 | expectedChild0 := expectedTx.writeNode(expectedTx.newNode(createItems("1", "2", "3", "4"), []pgnum{}))
816 |
817 | expectedChild1 := expectedTx.writeNode(expectedTx.newNode(createItems("6", "7"), []pgnum{}))
818 |
819 | expectedChild2 := expectedTx.writeNode(expectedTx.newNode(createItems("9", "a"), []pgnum{}))
820 |
821 | expectedChild3 := expectedTx.writeNode(expectedTx.newNode(createItems("c", "d"), []pgnum{}))
822 |
823 | expectedChild4 := expectedTx.writeNode(expectedTx.newNode(createItems("f", "g"), []pgnum{}))
824 |
825 | expectedRoot := expectedTx.writeNode(expectedTx.newNode(createItems("5", "8", "b", "e"), []pgnum{expectedChild0.pageNum, expectedChild1.pageNum, expectedChild2.pageNum, expectedChild3.pageNum, expectedChild4.pageNum}))
826 |
827 | expectedCollection, err := expectedTx.createCollection(newCollection(testCollectionName, expectedRoot.pageNum))
828 | require.NoError(t, err)
829 |
830 | err = expectedTx.Commit()
831 | require.NoError(t, err)
832 |
833 | // Remove an element
834 | areTreesEqual(t, expectedCollection, collection)
835 | }
836 |
837 | func Test_FindNode(t *testing.T) {
838 | db, cleanFunc := createTestDB(t)
839 | defer cleanFunc()
840 |
841 | tx := db.WriteTx()
842 |
843 | child00 := tx.writeNode(tx.newNode(createItems("0", "1"), []pgnum{}))
844 |
845 | child01 := tx.writeNode(tx.newNode(createItems("3", "4"), []pgnum{}))
846 |
847 | child02 := tx.writeNode(tx.newNode(createItems("6", "7"), []pgnum{}))
848 |
849 | child0 := tx.writeNode(tx.newNode(createItems("2", "5"), []pgnum{child00.pageNum, child01.pageNum, child02.pageNum}))
850 |
851 | child10 := tx.writeNode(tx.newNode(createItems("9", "a"), []pgnum{}))
852 |
853 | child11 := tx.writeNode(tx.newNode(createItems("c", "d"), []pgnum{}))
854 |
855 | child12 := tx.writeNode(tx.newNode(createItems("f", "g"), []pgnum{}))
856 |
857 | child1 := tx.writeNode(tx.newNode(createItems("b", "e"), []pgnum{child10.pageNum, child11.pageNum, child12.pageNum}))
858 |
859 | root := tx.writeNode(tx.newNode(createItems("8"), []pgnum{child0.pageNum, child1.pageNum}))
860 |
861 | collection, err := tx.createCollection(newCollection(testCollectionName, root.pageNum))
862 | require.NoError(t, err)
863 |
864 | err = tx.Commit()
865 | require.NoError(t, err)
866 |
867 | // Item found
868 | expectedVal := createItem("c")
869 | expectedItem := newItem(expectedVal, expectedVal)
870 | item, err := collection.Find(expectedVal)
871 | require.NoError(t, err)
872 | assert.Equal(t, expectedItem, item)
873 |
874 | // Item not found
875 | expectedVal = createItem("h")
876 | item, err = collection.Find(expectedVal)
877 | require.NoError(t, err)
878 | assert.Nil(t, item)
879 | }
880 |
881 | func Test_UpdateNode(t *testing.T) {
882 | db, cleanFunc := createTestDB(t)
883 | defer cleanFunc()
884 |
885 | tx := db.WriteTx()
886 |
887 | child00 := tx.writeNode(tx.newNode(createItems("0", "1"), []pgnum{}))
888 |
889 | child01 := tx.writeNode(tx.newNode(createItems("3", "4"), []pgnum{}))
890 |
891 | child02 := tx.writeNode(tx.newNode(createItems("6", "7"), []pgnum{}))
892 |
893 | child0 := tx.writeNode(tx.newNode(createItems("2", "5"), []pgnum{child00.pageNum, child01.pageNum, child02.pageNum}))
894 |
895 | child10 := tx.writeNode(tx.newNode(createItems("9", "a"), []pgnum{}))
896 |
897 | child11 := tx.writeNode(tx.newNode(createItems("c", "d"), []pgnum{}))
898 |
899 | child12 := tx.writeNode(tx.newNode(createItems("f", "g"), []pgnum{}))
900 |
901 | child1 := tx.writeNode(tx.newNode(createItems("b", "e"), []pgnum{child10.pageNum, child11.pageNum, child12.pageNum}))
902 |
903 | root := tx.writeNode(tx.newNode(createItems("8"), []pgnum{child0.pageNum, child1.pageNum}))
904 |
905 | collection, err := tx.createCollection(newCollection(testCollectionName, root.pageNum))
906 | require.NoError(t, err)
907 |
908 | err = tx.Commit()
909 | require.NoError(t, err)
910 |
911 | tx2 := db.WriteTx()
912 | collection, err = tx2.GetCollection(collection.name)
913 | require.NoError(t, err)
914 |
915 | // Item found
916 | expectedVal := createItem("c")
917 | expectedItem := newItem(expectedVal, expectedVal)
918 | item, err := collection.Find(expectedVal)
919 | require.NoError(t, err)
920 | assert.Equal(t, expectedItem, item)
921 |
922 | // Item updated successfully
923 | newvalue := createItem("f")
924 | err = collection.Put(expectedVal, newvalue)
925 | require.NoError(t, err)
926 |
927 | item, err = collection.Find(expectedVal)
928 | require.NoError(t, err)
929 | assert.Equal(t, newvalue, item.value)
930 |
931 | err = tx2.Commit()
932 | require.NoError(t, err)
933 | }
934 |
935 | func TestSerializeWithoutChildNodes(t *testing.T) {
936 | items := []*Item{newItem([]byte("key1"), []byte("val1")), newItem([]byte("key2"), []byte("val2"))}
937 | var childNodes []pgnum
938 | node := &Node{
939 | items: items,
940 | childNodes: childNodes,
941 | }
942 |
943 | actual := node.serialize(make([]byte, testPageSize, testPageSize))
944 |
945 | expectedPage, err := os.ReadFile(getExpectedResultFileName(t.Name()))
946 | require.NoError(t, err)
947 | assert.Equal(t, 0, bytes.Compare(actual, expectedPage))
948 | }
949 |
950 | func TestDeserializeWithoutChildNodes(t *testing.T) {
951 | page, err := os.ReadFile(getExpectedResultFileName(t.Name()))
952 | require.NoError(t, err)
953 |
954 | actualNode := NewEmptyNode()
955 | actualNode.deserialize(page)
956 |
957 | items := []*Item{newItem([]byte("key1"), []byte("val1")), newItem([]byte("key2"), []byte("val2"))}
958 | var childNodes []pgnum
959 | expectedNode := &Node{
960 | items: items,
961 | childNodes: childNodes,
962 | }
963 |
964 | assert.Equal(t, expectedNode, actualNode)
965 | }
966 |
967 | func TestSerializeWithChildNodes(t *testing.T) {
968 | items := []*Item{newItem([]byte("key1"), []byte("val1")), newItem([]byte("key2"), []byte("val2"))}
969 | childNodes := []pgnum{1, 2, 3}
970 | node := &Node{
971 | items: items,
972 | childNodes: childNodes,
973 | }
974 |
975 | actual := node.serialize(make([]byte, testPageSize, testPageSize))
976 |
977 | expectedPage, err := os.ReadFile(getExpectedResultFileName(t.Name()))
978 | require.NoError(t, err)
979 | assert.Equal(t, 0, bytes.Compare(actual, expectedPage))
980 | }
981 |
982 | func TestDeserializeWithChildNodes(t *testing.T) {
983 | page, err := os.ReadFile(getExpectedResultFileName(t.Name()))
984 | require.NoError(t, err)
985 |
986 | items := []*Item{newItem([]byte("key1"), []byte("val1")), newItem([]byte("key2"), []byte("val2"))}
987 | childNodes := []pgnum{1, 2, 3}
988 | expectedNode := &Node{
989 | items: items,
990 | childNodes: childNodes,
991 | }
992 |
993 | actualNode := NewEmptyNode()
994 | actualNode.deserialize(page)
995 | assert.Equal(t, expectedNode, actualNode)
996 | }
997 |
--------------------------------------------------------------------------------
/testutils.go:
--------------------------------------------------------------------------------
1 | package LibraDB
2 |
3 | import (
4 | "bytes"
5 | "fmt"
6 | "github.com/google/uuid"
7 | "github.com/stretchr/testify/assert"
8 | "github.com/stretchr/testify/require"
9 | "os"
10 | "testing"
11 | )
12 |
13 | const (
14 | // The tests are designed so min items should be around 2 and max items should be around 4 (1 is not enough, 5 is
15 | // too much). The sizes are adjusted accordingly so those number will return in all tests.
16 | testPageSize = 4096
17 | testMinPercentage = 0.2
18 | testMaxPercentage = 0.55
19 | testValSize = 255
20 |
21 | mockNumberOfElements = 10
22 | expectedFolderPath = "expected"
23 | )
24 |
25 | var (
26 | testCollectionName = []byte("test1")
27 | )
28 |
29 | func createTestDB(t *testing.T) (*DB, func()) {
30 | db, err := Open(getTempFileName(), &Options{MinFillPercent: testMinPercentage, MaxFillPercent: testMaxPercentage})
31 | require.NoError(t, err)
32 |
33 | return db, func() {
34 | _ = db.Close()
35 | }
36 | }
37 |
38 | func areCollectionsEqual(t *testing.T, c1, c2 *Collection) {
39 | assert.Equal(t, c1.name, c2.name)
40 | assert.Equal(t, c1.root, c2.root)
41 | assert.Equal(t, c1.counter, c2.counter)
42 | }
43 |
44 | func areTreesEqual(t *testing.T, t1, t2 *Collection) {
45 | t1Root, err := t1.tx.getNode(t1.root)
46 | require.NoError(t, err)
47 |
48 | t2Root, err := t2.tx.getNode(t2.root)
49 | require.NoError(t, err)
50 |
51 | areTreesEqualHelper(t, t1Root, t2Root)
52 | }
53 |
54 | func areNodesEqual(t *testing.T, n1, n2 *Node) {
55 | for i := 0; i < len(n1.items); i++ {
56 | assert.Equal(t, n1.items[i].key, n2.items[i].key)
57 | assert.Equal(t, n1.items[i].value, n2.items[i].value)
58 | }
59 | }
60 |
61 | func areTreesEqualHelper(t *testing.T, n1, n2 *Node) {
62 | require.Equal(t, len(n1.items), len(n2.items))
63 | require.Equal(t, len(n1.childNodes), len(n2.childNodes))
64 | areNodesEqual(t, n1, n2)
65 | // Exit condition: child node -> len(n1.childNodes) == 0
66 | for i := 0; i < len(n1.childNodes); i++ {
67 | node1, err := n1.getNode(n1.childNodes[i])
68 | require.NoError(t, err)
69 | node2, err := n2.getNode(n2.childNodes[i])
70 | areTreesEqualHelper(t, node1, node2)
71 | }
72 | }
73 |
74 | func createTestMockTree(t *testing.T) (*Collection, func()) {
75 | db, cleanFunc := createTestDB(t)
76 |
77 | tx := db.WriteTx()
78 |
79 | child0 := tx.writeNode(tx.newNode(createItems("0", "1"), []pgnum{}))
80 |
81 | child1 := tx.writeNode(tx.newNode(createItems("3", "4"), []pgnum{}))
82 |
83 | child2 := tx.writeNode(tx.newNode(createItems("6", "7", "8", "9"), []pgnum{}))
84 |
85 | root := tx.writeNode(tx.newNode(createItems("2", "5"), []pgnum{child0.pageNum, child1.pageNum, child2.pageNum}))
86 |
87 | expectedCollection, err := tx.createCollection(newCollection(testCollectionName, root.pageNum))
88 | require.NoError(t, err)
89 |
90 | err = tx.Commit()
91 | require.NoError(t, err)
92 |
93 | return expectedCollection, cleanFunc
94 | }
95 |
96 | func getExpectedResultFileName(name string) string {
97 | return fmt.Sprintf("%s%c%s", expectedFolderPath, os.PathSeparator, name)
98 | }
99 |
100 | func getTempFileName() string {
101 | var id = uuid.New()
102 | return fmt.Sprintf("%s%c%s", os.TempDir(), os.PathSeparator,id)
103 | }
104 |
105 | func memset(buf []byte, count int) []byte {
106 | return bytes.Repeat(buf, count)
107 | }
108 |
109 | // createItem creates an item by memset a fixed size buf (255) with the given value. The fixed size is used so all tests
110 | // so the minimum number of items in a node will be 2 and the maximum will be 4. This is for uniformity in the rebalance
111 | // tests.
112 | func createItem(key string) []byte {
113 | keyBuf := memset([]byte(key), testValSize)
114 | return keyBuf
115 | }
116 |
117 | func createItems(keys ...string) []*Item {
118 | items := make([]*Item, 0)
119 | for _, key := range keys {
120 | keyBuf := memset([]byte(key), testValSize)
121 | items = append(items, newItem(keyBuf, keyBuf))
122 | }
123 | return items
124 | }
125 |
--------------------------------------------------------------------------------
/tx.go:
--------------------------------------------------------------------------------
1 | package LibraDB
2 |
3 | type tx struct {
4 | dirtyNodes map[pgnum]*Node
5 | pagesToDelete []pgnum
6 |
7 | // new pages allocated during the transaction. They will be released if rollback is called.
8 | allocatedPageNums []pgnum
9 |
10 | write bool
11 |
12 | db *DB
13 | }
14 |
15 | func newTx(db *DB, write bool) *tx {
16 | return &tx{
17 | map[pgnum]*Node{},
18 | make([]pgnum, 0),
19 | make([]pgnum, 0),
20 | write,
21 | db,
22 | }
23 | }
24 |
25 | func (tx *tx) newNode(items []*Item, childNodes []pgnum) *Node {
26 | node := NewEmptyNode()
27 | node.items = items
28 | node.childNodes = childNodes
29 | node.pageNum = tx.db.getNextPage()
30 | node.tx = tx
31 |
32 | node.tx.allocatedPageNums = append(node.tx.allocatedPageNums, node.pageNum)
33 | return node
34 | }
35 |
36 | func (tx *tx) getNode(pageNum pgnum) (*Node, error) {
37 | if node, ok := tx.dirtyNodes[pageNum]; ok {
38 | return node, nil
39 | }
40 |
41 | node, err := tx.db.getNode(pageNum)
42 | if err != nil {
43 | return nil, err
44 | }
45 | node.tx = tx
46 | return node, nil
47 | }
48 |
49 | func (tx *tx) writeNode(node *Node) *Node {
50 | tx.dirtyNodes[node.pageNum] = node
51 | node.tx = tx
52 | return node
53 | }
54 |
55 | func (tx *tx) deleteNode(node *Node) {
56 | tx.pagesToDelete = append(tx.pagesToDelete, node.pageNum)
57 | }
58 |
59 | func (tx *tx) Rollback() {
60 | if !tx.write {
61 | tx.db.rwlock.RUnlock()
62 | return
63 | }
64 |
65 | tx.dirtyNodes = nil
66 | tx.pagesToDelete = nil
67 | for _, pageNum := range tx.allocatedPageNums {
68 | tx.db.freelist.releasePage(pageNum)
69 | }
70 | tx.allocatedPageNums = nil
71 | tx.db.rwlock.Unlock()
72 | }
73 |
74 | func (tx *tx) Commit() error {
75 | if !tx.write {
76 | tx.db.rwlock.RUnlock()
77 | return nil
78 | }
79 |
80 | for _, node := range tx.dirtyNodes {
81 | _, err := tx.db.writeNode(node)
82 | if err != nil {
83 | return err
84 | }
85 | }
86 |
87 | for _, pageNum := range tx.pagesToDelete {
88 | tx.db.deleteNode(pageNum)
89 | }
90 | _, err := tx.db.writeFreelist()
91 | if err != nil {
92 | return err
93 | }
94 |
95 | tx.dirtyNodes = nil
96 | tx.pagesToDelete = nil
97 | tx.allocatedPageNums = nil
98 | tx.db.rwlock.Unlock()
99 | return nil
100 | }
101 |
102 |
103 | // This will be used for implementing COW. The idea is to mark all the dirty collection, then for each collection,
104 | // traverse it's dirty in post order and commit child page. Then take the new page numbers, assign them to the parent,
105 | // and save him as well and so. After we wrote all collections, rewrite the root node with the new collections roots.
106 | // Rewrite the freelist with the newly allocated pages. Finally, rewrite the meta page, so the new root node will take
107 | // effect.
108 | // COW will give us atomicity as new pages cannot be seen until the root node is written. This way, in case of a failure
109 | // or a rollback no harm will be done as nothing was committed to the database.
110 | //func (tx *tx) commitNode(node *Node) error {
111 | // oldPageNum := node.num
112 | // node.num = 0
113 | //
114 | // newNode, err := tx.db.writeNode(node)
115 | // if err != nil {
116 | // return err
117 | // }
118 | // tx.committedNodes[oldPageNum] = newNode.num
119 | // tx.deleteNode(node)
120 | // return nil
121 | //}
122 | //
123 | //// saveDirtyNodes saves the tree in a post order way. post order is used since child pages are written to the disk and
124 | //// are given new page id, only then we can update the parent node with new page of the child node.
125 | //func (tx *tx) saveDirtyNodes(node *Node) error {
126 | // if len(node.childNodes) == 0 {
127 | // return tx.commitNode(node)
128 | // }
129 | //
130 | // for i, childNodePgid := range node.childNodes {
131 | // if childNode, ok := tx.dirtyNodes[childNodePgid]; ok {
132 | // err := tx.saveDirtyNodes(childNode)
133 | // if err != nil {
134 | // return err
135 | // }
136 | // }
137 | // node.childNodes[i] = tx.committedNodes[childNodePgid]
138 | // }
139 | //
140 | // return tx.commitNode(node)
141 | //}
142 |
143 | func (tx *tx) getRootCollection() *Collection {
144 | rootCollection := newEmptyCollection()
145 | rootCollection.root = tx.db.root
146 | rootCollection.tx = tx
147 | return rootCollection
148 | }
149 |
150 | func (tx *tx) GetCollection(name []byte) (*Collection, error) {
151 | rootCollection := tx.getRootCollection()
152 | item, err := rootCollection.Find(name)
153 | if err != nil {
154 | return nil, err
155 | }
156 |
157 | if item == nil {
158 | return nil, nil
159 | }
160 |
161 | collection := newEmptyCollection()
162 | collection.deserialize(item)
163 | collection.tx = tx
164 | return collection, nil
165 | }
166 |
167 | func (tx *tx) CreateCollection(name []byte) (*Collection, error) {
168 | if !tx.write {
169 | return nil, writeInsideReadTxErr
170 | }
171 |
172 | newCollectionPage, err := tx.db.writeNode(NewEmptyNode())
173 | if err != nil {
174 | return nil, err
175 | }
176 |
177 | newCollection := newEmptyCollection()
178 | newCollection.name = name
179 | newCollection.root = newCollectionPage.pageNum
180 | return tx.createCollection(newCollection)
181 | }
182 |
183 | func (tx *tx) DeleteCollection(name []byte) error {
184 | if !tx.write {
185 | return writeInsideReadTxErr
186 | }
187 |
188 | rootCollection := tx.getRootCollection()
189 |
190 | return rootCollection.Remove(name)
191 |
192 | }
193 |
194 | func (tx *tx) createCollection(collection *Collection) (*Collection, error) {
195 | collection.tx = tx
196 | collectionBytes := collection.serialize()
197 |
198 | rootCollection := tx.getRootCollection()
199 | err := rootCollection.Put(collection.name, collectionBytes.value)
200 | if err != nil {
201 | return nil, err
202 | }
203 |
204 | return collection, nil
205 | }
206 |
--------------------------------------------------------------------------------
/tx_test.go:
--------------------------------------------------------------------------------
1 | package LibraDB
2 |
3 | import (
4 | "github.com/stretchr/testify/assert"
5 | "github.com/stretchr/testify/require"
6 | "sync"
7 | "testing"
8 | )
9 |
10 | func TestTx_CreateCollection(t *testing.T) {
11 | db, cleanFunc := createTestDB(t)
12 | defer cleanFunc()
13 |
14 | tx := db.WriteTx()
15 | collection, err := tx.CreateCollection(testCollectionName)
16 | require.NoError(t, err)
17 |
18 | err = tx.Commit()
19 | require.NoError(t, err)
20 |
21 | tx = db.ReadTx()
22 | actualCollection, err := tx.GetCollection(collection.name)
23 | require.NoError(t, err)
24 |
25 | err = tx.Commit()
26 | require.NoError(t, err)
27 |
28 | areCollectionsEqual(t, collection, actualCollection)
29 | }
30 |
31 | func TestTx_CreateCollectionReadTx(t *testing.T) {
32 | db, cleanFunc := createTestDB(t)
33 | defer cleanFunc()
34 |
35 | tx := db.ReadTx()
36 | collection, err := tx.CreateCollection(testCollectionName)
37 | require.Error(t, err)
38 | require.Nil(t, collection)
39 |
40 | err = tx.Commit()
41 | }
42 |
43 | func TestTx_OpenMultipleReadTxSimultaneously(t *testing.T) {
44 | db, cleanFunc := createTestDB(t)
45 | defer cleanFunc()
46 |
47 | tx1 := db.ReadTx()
48 | tx2 := db.ReadTx()
49 |
50 | collection1, err := tx1.GetCollection(testCollectionName)
51 | require.NoError(t, err)
52 | require.Nil(t, collection1)
53 |
54 | collection2, err := tx2.GetCollection(testCollectionName)
55 | require.NoError(t, err)
56 | require.Nil(t, collection2)
57 |
58 | err = tx1.Commit()
59 | err = tx2.Commit()
60 | }
61 |
62 | // TestTx_OpenReadAndWriteTxSimultaneously Validates read and write tx don't run simultaneously. It first starts a read
63 | // tx, then once the lock was acquired, a write transaction is started. Once the read tx finishes, the lock is released,
64 | // and the write tx start executing. Once the lock is acquired again, a read tx is triggered.
65 | // The nesting of the functions is done to make sure the transactions are fired only after the lock is acquired to avoid
66 | // race conditions.
67 | // We expect the first read tx (tx1) not to see the changes done by the write tx (tx2) even when though it queries the database
68 | // after the write tx was triggered (Because of the lock).
69 | // And again, even though tx3 was triggered before tx2 committed the changes, it doesn't query the database yet since
70 | // the lock is already acquired. It will query the database only after tx2 makes the changes and releases the lock.
71 | func TestTx_OpenReadAndWriteTxSimultaneously(t *testing.T) {
72 | db, cleanFunc := createTestDB(t)
73 | defer cleanFunc()
74 |
75 | wg := sync.WaitGroup{}
76 |
77 | tx1 := db.ReadTx()
78 |
79 | // Start write tx only after the lock was acquired by the read tx
80 | wg.Add(1)
81 | go func() {
82 | tx2 := db.WriteTx()
83 |
84 | // Start read tx only after the lock was acquired by the write tx
85 | wg.Add(1)
86 | go func() {
87 | tx3 := db.ReadTx()
88 |
89 | collection3, err := tx3.GetCollection(testCollectionName)
90 | require.NoError(t, err)
91 | require.Equal(t, testCollectionName, collection3.name)
92 |
93 | err = tx3.Commit()
94 | wg.Done()
95 | }()
96 |
97 |
98 | _, err := tx2.CreateCollection(testCollectionName)
99 | require.NoError(t, err)
100 |
101 | err = tx2.Commit()
102 | wg.Done()
103 | }()
104 |
105 | collection1, err := tx1.GetCollection(testCollectionName)
106 | require.NoError(t, err)
107 | require.Nil(t, collection1)
108 |
109 | err = tx1.Commit()
110 |
111 | wg.Wait()
112 | }
113 |
114 | func TestTx_Rollback(t *testing.T) {
115 | db, cleanFunc := createTestDB(t)
116 | defer cleanFunc()
117 |
118 | tx := db.WriteTx()
119 | child0 := tx.writeNode(tx.newNode(createItems("0", "1", "2", "3"), []pgnum{}))
120 |
121 | child1 := tx.writeNode(tx.newNode(createItems("5", "6", "7", "8"), []pgnum{}))
122 |
123 | root := tx.writeNode(tx.newNode(createItems("4"), []pgnum{child0.pageNum, child1.pageNum}))
124 |
125 | collection, err := tx.createCollection(newCollection(testCollectionName, root.pageNum))
126 | require.NoError(t, err)
127 |
128 | err = tx.Commit()
129 | require.NoError(t, err)
130 |
131 | assert.Len(t, tx.db.freelist.releasedPages, 0)
132 |
133 | // Try to add 9 but then perform a rollback, so it won't be saved
134 | tx2 := db.WriteTx()
135 |
136 | collection, err = tx2.GetCollection(collection.name)
137 | require.NoError(t, err)
138 |
139 | val := createItem("9")
140 | err = collection.Put(val, val)
141 | require.NoError(t, err)
142 |
143 | tx2.Rollback()
144 |
145 | // 9 should not exist since a rollback was performed. A new page should be added to released page ids though, since
146 | // a split occurred and a new page node was created, but later deleted.
147 | assert.Len(t, tx2.db.freelist.releasedPages, 1)
148 | tx3 := db.ReadTx()
149 |
150 | collection, err = tx3.GetCollection(collection.name)
151 | require.NoError(t, err)
152 |
153 | // Item not found
154 | expectedVal := createItem("9")
155 | item, err := collection.Find(expectedVal)
156 | require.NoError(t, err)
157 | assert.Nil(t, item)
158 |
159 | err = tx3.Commit()
160 | require.NoError(t, err)
161 |
162 | assert.Len(t, tx3.db.freelist.releasedPages, 1)
163 | }
164 |
--------------------------------------------------------------------------------