├── .github
└── workflows
│ └── node.js.yml
├── .gitignore
├── .prettierrc.yaml
├── LICENSES
├── CC0-1.0.txt
├── LGPL-3.0-only.txt
└── Unlicense.txt
├── README.md
├── bench-flumelog.patch
├── bench-flumelog.patch.license
├── compaction.js
├── compat.js
├── errors.js
├── index.js
├── package.json
├── package.json.license
├── record.js
├── stream.js
└── test
├── bad-offset.js
├── basic.js
├── bench.js
├── bench2.js
├── compaction.js
├── delete.js
├── fix-buggy-write.js
├── fix-concurrency-write-drain-bug.js
├── idempotent-resume.js
├── jacob.js
├── stream-abort.js
├── stream-pausable.js
├── stream.js
└── stress-test.js
/.github/workflows/node.js.yml:
--------------------------------------------------------------------------------
1 | # SPDX-FileCopyrightText: 2021 Anders Rune Jensen
2 | #
3 | # SPDX-License-Identifier: Unlicense
4 |
5 | # This workflow will do a clean install of node dependencies, build the source code and run tests across different versions of node
6 | # For more information see: https://help.github.com/actions/language-and-framework-guides/using-nodejs-with-github-actions
7 |
8 | name: CI
9 |
10 | on:
11 | push:
12 | branches: [master]
13 | pull_request:
14 | branches: [master]
15 |
16 | jobs:
17 | licenses:
18 | runs-on: ubuntu-latest
19 | steps:
20 | - uses: actions/checkout@v2
21 | - name: REUSE Compliance Check
22 | uses: fsfe/reuse-action@v1
23 |
24 | test:
25 | runs-on: ubuntu-latest
26 | timeout-minutes: 10
27 |
28 | strategy:
29 | matrix:
30 | node-version: [12.x, 14.x, 16.x]
31 |
32 | steps:
33 | - uses: actions/checkout@v2
34 | - name: Use Node.js ${{ matrix.node-version }}
35 | uses: actions/setup-node@v1
36 | with:
37 | node-version: ${{ matrix.node-version }}
38 | - run: npm install
39 | - name: npm test
40 | run: npm test
41 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | # SPDX-FileCopyrightText: 2021 Anders Rune Jensen
2 | #
3 | # SPDX-License-Identifier: Unlicense
4 |
5 | node_modules
6 | package-lock.json
7 | pnpm-lock.yaml
8 | .nyc_output
9 | coverage
--------------------------------------------------------------------------------
/.prettierrc.yaml:
--------------------------------------------------------------------------------
1 | # SPDX-FileCopyrightText: 2021 Anders Rune Jensen
2 | # SPDX-FileCopyrightText: 2021 Andre 'Staltz' Medeiros
3 | #
4 | # SPDX-License-Identifier: Unlicense
5 |
6 | semi: false
7 | singleQuote: true
8 |
--------------------------------------------------------------------------------
/LICENSES/CC0-1.0.txt:
--------------------------------------------------------------------------------
1 | Creative Commons Legal Code
2 |
3 | CC0 1.0 Universal
4 |
5 | CREATIVE COMMONS CORPORATION IS NOT A LAW FIRM AND DOES NOT PROVIDE
6 | LEGAL SERVICES. DISTRIBUTION OF THIS DOCUMENT DOES NOT CREATE AN
7 | ATTORNEY-CLIENT RELATIONSHIP. CREATIVE COMMONS PROVIDES THIS
8 | INFORMATION ON AN "AS-IS" BASIS. CREATIVE COMMONS MAKES NO WARRANTIES
9 | REGARDING THE USE OF THIS DOCUMENT OR THE INFORMATION OR WORKS
10 | PROVIDED HEREUNDER, AND DISCLAIMS LIABILITY FOR DAMAGES RESULTING FROM
11 | THE USE OF THIS DOCUMENT OR THE INFORMATION OR WORKS PROVIDED
12 | HEREUNDER.
13 |
14 | Statement of Purpose
15 |
16 | The laws of most jurisdictions throughout the world automatically confer
17 | exclusive Copyright and Related Rights (defined below) upon the creator
18 | and subsequent owner(s) (each and all, an "owner") of an original work of
19 | authorship and/or a database (each, a "Work").
20 |
21 | Certain owners wish to permanently relinquish those rights to a Work for
22 | the purpose of contributing to a commons of creative, cultural and
23 | scientific works ("Commons") that the public can reliably and without fear
24 | of later claims of infringement build upon, modify, incorporate in other
25 | works, reuse and redistribute as freely as possible in any form whatsoever
26 | and for any purposes, including without limitation commercial purposes.
27 | These owners may contribute to the Commons to promote the ideal of a free
28 | culture and the further production of creative, cultural and scientific
29 | works, or to gain reputation or greater distribution for their Work in
30 | part through the use and efforts of others.
31 |
32 | For these and/or other purposes and motivations, and without any
33 | expectation of additional consideration or compensation, the person
34 | associating CC0 with a Work (the "Affirmer"), to the extent that he or she
35 | is an owner of Copyright and Related Rights in the Work, voluntarily
36 | elects to apply CC0 to the Work and publicly distribute the Work under its
37 | terms, with knowledge of his or her Copyright and Related Rights in the
38 | Work and the meaning and intended legal effect of CC0 on those rights.
39 |
40 | 1. Copyright and Related Rights. A Work made available under CC0 may be
41 | protected by copyright and related or neighboring rights ("Copyright and
42 | Related Rights"). Copyright and Related Rights include, but are not
43 | limited to, the following:
44 |
45 | i. the right to reproduce, adapt, distribute, perform, display,
46 | communicate, and translate a Work;
47 | ii. moral rights retained by the original author(s) and/or performer(s);
48 | iii. publicity and privacy rights pertaining to a person's image or
49 | likeness depicted in a Work;
50 | iv. rights protecting against unfair competition in regards to a Work,
51 | subject to the limitations in paragraph 4(a), below;
52 | v. rights protecting the extraction, dissemination, use and reuse of data
53 | in a Work;
54 | vi. database rights (such as those arising under Directive 96/9/EC of the
55 | European Parliament and of the Council of 11 March 1996 on the legal
56 | protection of databases, and under any national implementation
57 | thereof, including any amended or successor version of such
58 | directive); and
59 | vii. other similar, equivalent or corresponding rights throughout the
60 | world based on applicable law or treaty, and any national
61 | implementations thereof.
62 |
63 | 2. Waiver. To the greatest extent permitted by, but not in contravention
64 | of, applicable law, Affirmer hereby overtly, fully, permanently,
65 | irrevocably and unconditionally waives, abandons, and surrenders all of
66 | Affirmer's Copyright and Related Rights and associated claims and causes
67 | of action, whether now known or unknown (including existing as well as
68 | future claims and causes of action), in the Work (i) in all territories
69 | worldwide, (ii) for the maximum duration provided by applicable law or
70 | treaty (including future time extensions), (iii) in any current or future
71 | medium and for any number of copies, and (iv) for any purpose whatsoever,
72 | including without limitation commercial, advertising or promotional
73 | purposes (the "Waiver"). Affirmer makes the Waiver for the benefit of each
74 | member of the public at large and to the detriment of Affirmer's heirs and
75 | successors, fully intending that such Waiver shall not be subject to
76 | revocation, rescission, cancellation, termination, or any other legal or
77 | equitable action to disrupt the quiet enjoyment of the Work by the public
78 | as contemplated by Affirmer's express Statement of Purpose.
79 |
80 | 3. Public License Fallback. Should any part of the Waiver for any reason
81 | be judged legally invalid or ineffective under applicable law, then the
82 | Waiver shall be preserved to the maximum extent permitted taking into
83 | account Affirmer's express Statement of Purpose. In addition, to the
84 | extent the Waiver is so judged Affirmer hereby grants to each affected
85 | person a royalty-free, non transferable, non sublicensable, non exclusive,
86 | irrevocable and unconditional license to exercise Affirmer's Copyright and
87 | Related Rights in the Work (i) in all territories worldwide, (ii) for the
88 | maximum duration provided by applicable law or treaty (including future
89 | time extensions), (iii) in any current or future medium and for any number
90 | of copies, and (iv) for any purpose whatsoever, including without
91 | limitation commercial, advertising or promotional purposes (the
92 | "License"). The License shall be deemed effective as of the date CC0 was
93 | applied by Affirmer to the Work. Should any part of the License for any
94 | reason be judged legally invalid or ineffective under applicable law, such
95 | partial invalidity or ineffectiveness shall not invalidate the remainder
96 | of the License, and in such case Affirmer hereby affirms that he or she
97 | will not (i) exercise any of his or her remaining Copyright and Related
98 | Rights in the Work or (ii) assert any associated claims and causes of
99 | action with respect to the Work, in either case contrary to Affirmer's
100 | express Statement of Purpose.
101 |
102 | 4. Limitations and Disclaimers.
103 |
104 | a. No trademark or patent rights held by Affirmer are waived, abandoned,
105 | surrendered, licensed or otherwise affected by this document.
106 | b. Affirmer offers the Work as-is and makes no representations or
107 | warranties of any kind concerning the Work, express, implied,
108 | statutory or otherwise, including without limitation warranties of
109 | title, merchantability, fitness for a particular purpose, non
110 | infringement, or the absence of latent or other defects, accuracy, or
111 | the present or absence of errors, whether or not discoverable, all to
112 | the greatest extent permissible under applicable law.
113 | c. Affirmer disclaims responsibility for clearing rights of other persons
114 | that may apply to the Work or any use thereof, including without
115 | limitation any person's Copyright and Related Rights in the Work.
116 | Further, Affirmer disclaims responsibility for obtaining any necessary
117 | consents, permissions or other rights required for any use of the
118 | Work.
119 | d. Affirmer understands and acknowledges that Creative Commons is not a
120 | party to this document and has no duty or obligation with respect to
121 | this CC0 or use of the Work.
122 |
--------------------------------------------------------------------------------
/LICENSES/LGPL-3.0-only.txt:
--------------------------------------------------------------------------------
1 | GNU LESSER GENERAL PUBLIC LICENSE
2 | Version 3, 29 June 2007
3 |
4 | Copyright (C) 2007 Free Software Foundation, Inc.
5 |
6 | Everyone is permitted to copy and distribute verbatim copies of this license document, but changing it is not allowed.
7 |
8 | This version of the GNU Lesser General Public License incorporates the terms and conditions of version 3 of the GNU General Public License, supplemented by the additional permissions listed below.
9 |
10 | 0. Additional Definitions.
11 |
12 | As used herein, "this License" refers to version 3 of the GNU Lesser General Public License, and the "GNU GPL" refers to version 3 of the GNU General Public License.
13 |
14 | "The Library" refers to a covered work governed by this License, other than an Application or a Combined Work as defined below.
15 |
16 | An "Application" is any work that makes use of an interface provided by the Library, but which is not otherwise based on the Library. Defining a subclass of a class defined by the Library is deemed a mode of using an interface provided by the Library.
17 |
18 | A "Combined Work" is a work produced by combining or linking an Application with the Library. The particular version of the Library with which the Combined Work was made is also called the "Linked Version".
19 |
20 | The "Minimal Corresponding Source" for a Combined Work means the Corresponding Source for the Combined Work, excluding any source code for portions of the Combined Work that, considered in isolation, are based on the Application, and not on the Linked Version.
21 |
22 | The "Corresponding Application Code" for a Combined Work means the object code and/or source code for the Application, including any data and utility programs needed for reproducing the Combined Work from the Application, but excluding the System Libraries of the Combined Work.
23 |
24 | 1. Exception to Section 3 of the GNU GPL.
25 | You may convey a covered work under sections 3 and 4 of this License without being bound by section 3 of the GNU GPL.
26 |
27 | 2. Conveying Modified Versions.
28 | If you modify a copy of the Library, and, in your modifications, a facility refers to a function or data to be supplied by an Application that uses the facility (other than as an argument passed when the facility is invoked), then you may convey a copy of the modified version:
29 |
30 | a) under this License, provided that you make a good faith effort to ensure that, in the event an Application does not supply the function or data, the facility still operates, and performs whatever part of its purpose remains meaningful, or
31 |
32 | b) under the GNU GPL, with none of the additional permissions of this License applicable to that copy.
33 |
34 | 3. Object Code Incorporating Material from Library Header Files.
35 | The object code form of an Application may incorporate material from a header file that is part of the Library. You may convey such object code under terms of your choice, provided that, if the incorporated material is not limited to numerical parameters, data structure layouts and accessors, or small macros, inline functions and templates (ten or fewer lines in length), you do both of the following:
36 |
37 | a) Give prominent notice with each copy of the object code that the Library is used in it and that the Library and its use are covered by this License.
38 |
39 | b) Accompany the object code with a copy of the GNU GPL and this license document.
40 |
41 | 4. Combined Works.
42 | You may convey a Combined Work under terms of your choice that, taken together, effectively do not restrict modification of the portions of the Library contained in the Combined Work and reverse engineering for debugging such modifications, if you also do each of the following:
43 |
44 | a) Give prominent notice with each copy of the Combined Work that the Library is used in it and that the Library and its use are covered by this License.
45 |
46 | b) Accompany the Combined Work with a copy of the GNU GPL and this license document.
47 |
48 | c) For a Combined Work that displays copyright notices during execution, include the copyright notice for the Library among these notices, as well as a reference directing the user to the copies of the GNU GPL and this license document.
49 |
50 | d) Do one of the following:
51 |
52 | 0) Convey the Minimal Corresponding Source under the terms of this License, and the Corresponding Application Code in a form suitable for, and under terms that permit, the user to recombine or relink the Application with a modified version of the Linked Version to produce a modified Combined Work, in the manner specified by section 6 of the GNU GPL for conveying Corresponding Source.
53 |
54 | 1) Use a suitable shared library mechanism for linking with the Library. A suitable mechanism is one that (a) uses at run time a copy of the Library already present on the user's computer system, and (b) will operate properly with a modified version of the Library that is interface-compatible with the Linked Version.
55 |
56 | e) Provide Installation Information, but only if you would otherwise be required to provide such information under section 6 of the GNU GPL, and only to the extent that such information is necessary to install and execute a modified version of the Combined Work produced by recombining or relinking the Application with a modified version of the Linked Version. (If you use option 4d0, the Installation Information must accompany the Minimal Corresponding Source and Corresponding Application Code. If you use option 4d1, you must provide the Installation Information in the manner specified by section 6 of the GNU GPL for conveying Corresponding Source.)
57 |
58 | 5. Combined Libraries.
59 | You may place library facilities that are a work based on the Library side by side in a single library together with other library facilities that are not Applications and are not covered by this License, and convey such a combined library under terms of your choice, if you do both of the following:
60 |
61 | a) Accompany the combined library with a copy of the same work based on the Library, uncombined with any other library facilities, conveyed under the terms of this License.
62 |
63 | b) Give prominent notice with the combined library that part of it is a work based on the Library, and explaining where to find the accompanying uncombined form of the same work.
64 |
65 | 6. Revised Versions of the GNU Lesser General Public License.
66 | The Free Software Foundation may publish revised and/or new versions of the GNU Lesser General Public License from time to time. Such new versions will be similar in spirit to the present version, but may differ in detail to address new problems or concerns.
67 |
68 | Each version is given a distinguishing version number. If the Library as you received it specifies that a certain numbered version of the GNU Lesser General Public License "or any later version" applies to it, you have the option of following the terms and conditions either of that published version or of any later version published by the Free Software Foundation. If the Library as you received it does not specify a version number of the GNU Lesser General Public License, you may choose any version of the GNU Lesser General Public License ever published by the Free Software Foundation.
69 |
70 | If the Library as you received it specifies that a proxy can decide whether future versions of the GNU Lesser General Public License shall
71 | apply, that proxy's public statement of acceptance of any version is permanent authorization for you to choose that version for the Library.
72 |
--------------------------------------------------------------------------------
/LICENSES/Unlicense.txt:
--------------------------------------------------------------------------------
1 | This is free and unencumbered software released into the public domain.
2 |
3 | Anyone is free to copy, modify, publish, use, compile, sell, or distribute this software, either in source code form or as a compiled binary, for any purpose, commercial or non-commercial, and by any means.
4 |
5 | In jurisdictions that recognize copyright laws, the author or authors of this software dedicate any and all copyright interest in the software to the public domain. We make this dedication for the benefit of the public at large and to the detriment of our heirs and
6 | successors. We intend this dedication to be an overt act of relinquishment in perpetuity of all present and future rights to this software under copyright law.
7 |
8 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
9 |
10 | For more information, please refer to
11 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 |
6 |
7 | # Async append only log
8 |
9 | This module is heavily inspired by [flumelog-aligned-offset]. It is an
10 | attempt to implement the same concept but in a simpler fashion, making
11 | it easier to reason about the code. A log is the lowest part of the
12 | SSB stack, so it should extremly stable while still maintaining good
13 | performance.
14 |
15 | A log consists of a number of `blocks`, that contain a number of
16 | `record`s. A `record` is simply it's `length`, as a 16-bit unsigned
17 | integer, followed by the `data` bytes. A record must be in one and
18 | only one block, which means there probably will be some empty space at
19 | the end of a block. Blocks are always written in full.
20 |
21 | ```
22 |
23 |
25 |
26 | *
27 | *
28 | ```
29 |
30 | In contrast to flumelog-aligned-offset there is no additional `length`
31 | after the `data` in a `record` and no pointer at the end of a
32 | `block`. These were there to be able to iterate over the log in
33 | reverse, but I have never seen the need for that.
34 |
35 | Writing to the log is always async. Note this is different from
36 | [flumelog-offset] and [flumelog-aligned-offset]. The `since`
37 | observable will be updated once the data is written. The `onDrain`
38 | callback can be used to know when data has been written if
39 | needed. Streaming will only emit values that have been written to
40 | storage. This is to ensure that a view will never get ahead of the
41 | main log and thus end up in a bad state if the system crashes before
42 | data is written. `get` will return values that have not been written
43 | to disk yet.
44 |
45 | This module is not compatible with flume without a wrapper around
46 | stream as it uses the same terminology as [JITDB] and [ssb-db2] of
47 | using offset for the byte position of a record instead of seq.
48 |
49 | ## API
50 |
51 | ### Open the log
52 |
53 | ```js
54 | const OffsetLog = require('async-append-only-log')
55 |
56 | const log = OffsetLog('/path/to/log.file', {
57 | /**
58 | * Size of the block, in bytes.
59 | *
60 | * DEFAULT: 65536
61 | */
62 | blockSize: 1024,
63 |
64 | /**
65 | * Conversion layer as an object of the shape `{encode, decode}`,
66 | * where `encode` defines a function (item)=>buffer when writing to disk
67 | * and `decode` defines a function (buffer)=>item, where `item` is what
68 | * you will directly interact with using async-append-only-log's APIs.
69 | * For JSON, use `flumecodec/json`.
70 | *
71 | * DEFAULT: `{encode: x => x, decode: x => x}`
72 | */
73 | codec: { encode, decode },
74 |
75 | /**
76 | * Amount of time to wait between writes, in milliseconds.
77 | *
78 | * DEFAULT: 250
79 | */
80 | writeTimeout: 100,
81 |
82 | /**
83 | * A function that takes a record's buffer and should return a boolean
84 | * indicating whether the record is "valid". Implement this to ensure the
85 | * record is not corrupted. When the log is loaded, all records in the latest
86 | * block will be checked using this.
87 | *
88 | * DEFAULT: (recordBuffer) => true
89 | */
90 | validateRecord: (recordBuffer) => {
91 | // ...
92 | },
93 | })
94 | ```
95 |
96 | ### Write a single record
97 |
98 | ```js
99 | log.append(item, (err, offset) => {
100 | // ...
101 | })
102 | ```
103 |
104 | ### Write several records
105 |
106 | ```js
107 | log.append([item1, item2, item3], (err, offset3) => {
108 | // ...
109 | })
110 | ```
111 |
112 | ### Write several records, either all fail or all succeed
113 |
114 | ```js
115 | log.appendTransaction([item1, item2, item3], (err, offset3) => {
116 | // ...
117 | })
118 | ```
119 |
120 | ### Wait for all ongoing appends to be flushed to disk
121 |
122 | ```js
123 | log.onDrain(() => {
124 | // ...
125 | })
126 | ```
127 |
128 | ### Scan all records as a `push-stream`
129 |
130 | ```js
131 | log.stream(opts).pipe(sink)
132 | ```
133 |
134 | Where
135 |
136 | ```js
137 | opts = { live, offsets, values, limit, gte, gt }
138 | ```
139 |
140 | - `live` is a boolean indicating that you're interested only in records added
141 | after streaming began. DEFAULT: `false`
142 | - `offsets` is a boolean indicating you're interested in knowing the offset for each record streamed to the sink. DEFAULT: `true`
143 | - `values` is a boolean indicating you're interested in getting the data buffer for each record streamed to the sink. DEFAULT: `true`
144 | - `limit` is a number indicating how many records you want from the stream, after which the stream will close. DEFAULT: `0` which **means unlimited**
145 | - `gte` and `gt` and other opts are specific to [ltgt]
146 |
147 | ```js
148 | sink = { paused, write, end }
149 | ```
150 |
151 | `sink` is from [push-stream]
152 |
153 | ### Read a record
154 |
155 | ```js
156 | log.get(offset, (err, item) => {
157 | // ...
158 | })
159 | ```
160 |
161 | ### Delete a record
162 |
163 | In practice, this will just overwrite the record with zero bytes.
164 |
165 | ```js
166 | log.del(offset, (err) => {
167 | // ...
168 | })
169 | ```
170 |
171 | ### Wait for all ongoing deletes to be flushed to disk
172 |
173 | ```js
174 | log.onDeletesFlushed(() => {
175 | // ...
176 | })
177 | ```
178 |
179 | ### Keep track of the most recent record
180 |
181 | As an [obz] observable:
182 |
183 | ```js
184 | log.since((offset) => {
185 | // ...
186 | })
187 | ```
188 |
189 | ### Get statistics on deleted records
190 |
191 | Among other things, this is useful for knowing how much storage space you could
192 | save by running compaction, to eliminate deleted records.
193 |
194 | ```js
195 | log.stats((err, stats) => {
196 | console.log(stats)
197 | // { totalBytes, deletedBytes }
198 | })
199 | ```
200 |
201 | ### Compact the log (remove deleted records)
202 |
203 | ```js
204 | log.compact((err) => {
205 | // This callback will be called once, when the compaction is done.
206 | })
207 | ```
208 |
209 | Note, this functionality is currently not available when running in a
210 | browser.
211 |
212 | ### Track progress of compactions
213 |
214 | As an [obz] observable:
215 |
216 | ```js
217 | log.compactionProgress((progress) => {
218 | console.log(progress)
219 | // {
220 | // startOffset,
221 | // compactedOffset,
222 | // unshiftedOffset,
223 | // percent,
224 | // done,
225 | // sizeDiff,
226 | // holesFound,
227 | // }
228 | })
229 | ```
230 |
231 | Where
232 |
233 | - `startOffset`: the starting point for compaction. All offsets smaller than
234 | this have been left untouched by the compaction algorithm.
235 | - `compactedOffset`: all records up until this point have been compacted so far.
236 | - `unshiftedOffset`: offset for the first record that hasn't yet been "moved"
237 | to previous slots. Tracking this allows you to see the algorithm proceeding.
238 | - `percent`: a number between 0 and 1 to indicate the progress of compaction.
239 | - `done`: a boolean indicating whether compaction is ongoing (`false`) or done
240 | (`true`).
241 | - `sizeDiff`: number of bytes freed after compaction is finished. Only available
242 | if `done` is `true`.
243 | - `holesFound`: number of deleted records that were found while compaction was
244 | ongoing. Only available if `done` is `true`.
245 |
246 | ### Close the log
247 |
248 | ```js
249 | log.close((err) => {
250 | // ...
251 | })
252 | ```
253 |
254 | ## Benchmarks
255 |
256 | Running [bench-flumelog] reveals the following numbers. Async flumelog
257 | is faster that regular flumelog-offset in all categories. The most
258 | important numbers are append (used for onboarding) and stream (used
259 | for building indexes). Flumelog-aligned-offset is not included in the
260 | benchmarks, as it writes every message synchronously rendering the
261 | results invalid.
262 |
263 | ```
264 |
265 | async-append-only-log:
266 |
267 | name, ops/second, mb/second, ops, total-mb, seconds
268 | append, 923964.807, 138.002, 4620748, 690.149, 5.001
269 | stream, 1059075.865, 158.182, 4620748, 690.149, 4.363
270 | stream no cache, 1102803.818, 164.713, 4620748, 690.149, 4.19
271 | stream10, 2540947.641, 379.51, 12714902, 1899.068, 5.004
272 | random, 39715.656, 5.931, 198618, 29.664, 5.001
273 |
274 | flumelog offset:
275 |
276 | name, ops/second, mb/second, ops, total-mb, seconds
277 | append, 306180.037, 45.74, 3064556, 457.817, 10.009
278 | stream, 294511.348, 43.997, 2945408, 440.017, 10.001
279 | stream no cache, 327724.949, 48.959, 3064556, 457.817, 9.351
280 | stream10, 452973.302, 67.67, 4530186, 676.776, 10.001
281 | random, 28774.712, 4.298, 287891, 43.008, 10.005
282 |
283 | ```
284 |
285 | To run the benchmarks the small `bench-flumelog.patch` needs to be
286 | applied.
287 |
288 | [JITDB] results for more real world benchmarks are available as [jitdb-results].
289 |
290 | [push-stream]: https://github.com/push-stream/push-stream
291 | [flumelog-aligned-offset]: https://github.com/flumedb/flumelog-aligned-offset/
292 | [flumelog-offset]: https://github.com/flumedb/flumelog-offset/
293 | [bench-flumelog]: https://github.com/flumedb/bench-flumelog
294 | [jitdb]: https://github.com/ssb-ngi-pointer/jitdb/
295 | [ltgt]: https://github.com/dominictarr/ltgt
296 | [jitdb-results]: https://github.com/arj03/jitdb/blob/master/bench.txt
297 | [ssb-db2]: https://github.com/ssb-ngi-pointer/ssb-db2/
298 | [obz]: https://www.npmjs.com/package/obz
299 |
--------------------------------------------------------------------------------
/bench-flumelog.patch:
--------------------------------------------------------------------------------
1 | --- node_modules/bench-flumelog/index.js~ 1985-10-26 09:15:00.000000000 +0100
2 | +++ node_modules/bench-flumelog/index.js 2020-09-22 00:28:03.493468776 +0200
3 | @@ -65,8 +65,10 @@
4 | }, function (err) {
5 | if(err && err != true) throw err
6 | var time = (Date.now() - start)/1000
7 | - print('append', c/time, (total/MB)/time, c, total/MB, time)
8 | - next2()
9 | + log.onDrain(() => {
10 | + print('append', c/time, (total/MB)/time, c, total/MB, time)
11 | + next2()
12 | + })
13 | })
14 | )
15 | }
16 |
--------------------------------------------------------------------------------
/bench-flumelog.patch.license:
--------------------------------------------------------------------------------
1 | SPDX-FileCopyrightText: 2021 Anders Rune Jensen
2 |
3 | SPDX-License-Identifier: Unlicense
--------------------------------------------------------------------------------
/compaction.js:
--------------------------------------------------------------------------------
1 | // SPDX-FileCopyrightText: 2022 Anders Rune Jensen
2 | //
3 | // SPDX-License-Identifier: LGPL-3.0-only
4 |
5 | const RAF = require('polyraf')
6 | const fs = require('fs')
7 | const Obv = require('obz')
8 | const push = require('push-stream')
9 | const mutexify = require('mutexify')
10 | const debug = require('debug')('async-append-only-log')
11 | const Record = require('./record')
12 |
13 | function getStateFilename(logFilename) {
14 | return logFilename + '.compaction'
15 | }
16 |
17 | function stateFileExists(logFilename) {
18 | return fs.existsSync(getStateFilename(logFilename))
19 | }
20 |
21 | const NO_TRUNCATE = 0xffffffff
22 |
23 | /**
24 | * This file has state describing the continuation of the compaction algorithm.
25 | *
26 | * - bytes 0..3: UInt32LE for the version of this file format
27 | * smallest version is 1.
28 | * - bytes 4..7: UInt32LE for the startOffset, usually the start of some block
29 | * - bytes 8..11: UInt32LE for block index where to perform truncation
30 | * where 0xFFFFFFFF means no truncation to-be-done yet
31 | * - bytes 12..15: UInt32LE for the blockIndex to-be-compacted
32 | * - bytes 16..19: UInt32LE for the 1st unshifted record's offset
33 | * - bytes 20..(20+blockSize-1): blockBuf containing the 1st unshifted record
34 | */
35 | function PersistentState(logFilename, blockSize) {
36 | const raf = RAF(getStateFilename(logFilename))
37 | const writeLock = mutexify()
38 | const stateFileSize = 4 + 4 + 4 + 4 + 4 + blockSize
39 |
40 | function load(cb) {
41 | raf.stat(function onRAFStatDone(err, stat) {
42 | const fileSize = !err && stat ? stat.size : -1
43 | if (fileSize <= 0) {
44 | const state = {
45 | version: 1,
46 | startOffset: 0,
47 | truncateBlockIndex: NO_TRUNCATE,
48 | compactedBlockIndex: 0,
49 | unshiftedOffset: 0,
50 | unshiftedBlockBuffer: null,
51 | initial: true,
52 | }
53 | cb(null, state)
54 | } else {
55 | raf.read(0, stateFileSize, function onFirstRAFReadDone(err, buf) {
56 | if (err) return cb(err)
57 | const state = {
58 | version: buf.readUInt32LE(0),
59 | startOffset: buf.readUInt32LE(4),
60 | truncateBlockIndex: buf.readUInt32LE(8),
61 | compactedBlockIndex: buf.readUInt32LE(12),
62 | unshiftedOffset: buf.readUInt32LE(16),
63 | unshiftedBlockBuf: buf.slice(20),
64 | initial: false,
65 | }
66 | cb(null, state)
67 | })
68 | }
69 | })
70 | }
71 |
72 | function save(state, cb) {
73 | const buf = Buffer.alloc(stateFileSize)
74 | buf.writeUInt32LE(state.version, 0)
75 | buf.writeUint32LE(state.startOffset, 4)
76 | buf.writeUInt32LE(state.truncateBlockIndex, 8)
77 | buf.writeUInt32LE(state.compactedBlockIndex, 12)
78 | buf.writeUInt32LE(state.unshiftedOffset, 16)
79 | state.unshiftedBlockBuf.copy(buf, 20)
80 | writeLock((unlock) => {
81 | raf.write(0, buf, function onRafWriteDone(err) {
82 | if (err) return unlock(cb, err)
83 | if (raf.fd) {
84 | fs.fsync(raf.fd, function onFsyncDone(err) {
85 | if (err) unlock(cb, err)
86 | else unlock(cb, null, state)
87 | })
88 | } else unlock(cb, null, state)
89 | })
90 | })
91 | }
92 |
93 | function destroy(cb) {
94 | if (stateFileExists(logFilename)) {
95 | raf.close(function onRAFClosed(err) {
96 | if (err) return cb(err)
97 | fs.unlink(raf.filename, cb)
98 | })
99 | } else {
100 | cb()
101 | }
102 | }
103 |
104 | return {
105 | load,
106 | save,
107 | destroy,
108 | }
109 | }
110 |
111 | /**
112 | * Compaction is the process of removing deleted records from the log by
113 | * rewriting blocks in the log *in situ*, moving ("shifting") subsequent records
114 | * to earlier slots, to fill the spaces left by the deleted records.
115 | *
116 | * The compaction algorithm is, at a high level:
117 | * - Keep track of some state, comprised of:
118 | * - compactedBlockIndex: blockIndex of the current block being compacted.
119 | * all blocks before this have already been compacted. This state always
120 | * increases, never decreases.
121 | * - unshiftedOffset: offset of the first unshifted record in the log, that
122 | * is, the first record that has not been shifted to earlier slots. This
123 | * offset is greater or equal to the compacted block's start offset, and
124 | * may be either in the same block as the compacted block, or even in a much
125 | * later block. This state always increases, never decreases.
126 | * - unshiftedBlockBuf: the block containing the first unshifted record
127 | * - Save the state to disk
128 | * - Compact one block at a time, in increasing order of blockIndex
129 | * - When a block is compacted, the state file is updated
130 | * - Once all blocks have been compacted, delete the state file
131 | */
132 | function Compaction(log, onDone) {
133 | const persistentState = PersistentState(log.filename, log.blockSize)
134 | const progress = Obv() // for the unshifted offset
135 | let startOffset = 0
136 | let version = 0
137 | let holesFound = 0
138 | let hasHoles = true // assume true
139 |
140 | let compactedBlockIndex = -1
141 | let compactedBlockBuf = null
142 | let compactedOffset = 0
143 | let compactedBlockIdenticalToUnshifted = true
144 |
145 | let unshiftedBlockIndex = 0
146 | let unshiftedBlockBuf = null
147 | let unshiftedOffset = 0
148 |
149 | let truncateBlockIndex = NO_TRUNCATE
150 |
151 | loadPersistentState(function onCompactionStateLoaded2(err) {
152 | if (err) return onDone(err)
153 | if (truncateBlockIndex !== NO_TRUNCATE) {
154 | truncateAndBeDone()
155 | } else {
156 | compactedBlockIndex -= 1 // because it'll be incremented very soon
157 | compactNextBlock()
158 | }
159 | })
160 |
161 | function loadPersistentState(cb) {
162 | persistentState.load(function onCompactionStateLoaded(err, state) {
163 | if (err) return cb(err)
164 | if (state.version !== 1) return cb(new Error('unsupported state version'))
165 | version = state.version
166 | startOffset = state.startOffset
167 | truncateBlockIndex = state.truncateBlockIndex
168 | compactedBlockIndex = state.compactedBlockIndex
169 | unshiftedOffset = state.unshiftedOffset
170 | unshiftedBlockBuf = state.unshiftedBlockBuf
171 | unshiftedBlockIndex = Math.floor(state.unshiftedOffset / log.blockSize)
172 | if (state.initial) {
173 | findStateFromLog(function foundStateFromLog(err, state) {
174 | if (err) return cb(err)
175 | compactedBlockIndex = state.compactedBlockIndex
176 | startOffset = compactedBlockIndex * log.blockSize
177 | unshiftedOffset = state.unshiftedOffset
178 | unshiftedBlockBuf = state.unshiftedBlockBuf
179 | unshiftedBlockIndex = Math.floor(unshiftedOffset / log.blockSize)
180 | savePersistentState(cb)
181 | })
182 | } else {
183 | cb()
184 | }
185 | })
186 | }
187 |
188 | function savePersistentState(cb) {
189 | if (!unshiftedBlockBuf) {
190 | loadUnshiftedBlock(saveIt)
191 | } else {
192 | saveIt()
193 | }
194 |
195 | function saveIt() {
196 | persistentState.save(
197 | {
198 | version,
199 | startOffset,
200 | truncateBlockIndex,
201 | compactedBlockIndex,
202 | unshiftedOffset,
203 | unshiftedBlockBuf,
204 | },
205 | cb
206 | )
207 | }
208 | }
209 |
210 | function findStateFromLog(cb) {
211 | findFirstDeletedOffset(function gotFirstDeleted(err, holeOffset) {
212 | if (err) return cb(err)
213 | if (holeOffset === -1) {
214 | compactedBlockIndex = Math.floor(log.since.value / log.blockSize)
215 | hasHoles = false
216 | stop()
217 | return
218 | }
219 | const blockStart = holeOffset - (holeOffset % log.blockSize)
220 | const blockIndex = Math.floor(holeOffset / log.blockSize)
221 | findNonDeletedOffsetGTE(blockStart, function gotNonDeleted(err, offset) {
222 | if (err) return cb(err)
223 | if (offset === -1) {
224 | compactedBlockIndex = Math.floor((holeOffset - 1) / log.blockSize)
225 | stop()
226 | return
227 | }
228 | holesFound = offset > holeOffset ? 1 : 0
229 | const state = {
230 | compactedBlockIndex: blockIndex,
231 | unshiftedOffset: offset,
232 | unshiftedBlockBuf: null,
233 | }
234 | cb(null, state)
235 | })
236 | })
237 | }
238 |
239 | function findFirstDeletedOffset(cb) {
240 | log.stream({ offsets: true, values: true }).pipe(
241 | push.drain(
242 | function sinkToFindFirstDeleted(record) {
243 | if (record.value === null) {
244 | cb(null, record.offset)
245 | return false // abort push.drain
246 | }
247 | },
248 | function sinkEndedLookingForDeleted() {
249 | cb(null, -1)
250 | }
251 | )
252 | )
253 | }
254 |
255 | function findNonDeletedOffsetGTE(gte, cb) {
256 | log.stream({ gte, offsets: true, values: true }).pipe(
257 | push.drain(
258 | function sinkToFindNonDeleted(record) {
259 | if (record.value !== null) {
260 | cb(null, record.offset)
261 | return false // abort push.drain
262 | }
263 | },
264 | function sinkEndedLookingForNonDeleted() {
265 | cb(null, -1)
266 | }
267 | )
268 | )
269 | }
270 |
271 | function continueCompactingBlock() {
272 | while (true) {
273 | // Fetch the unshifted block, if necessary
274 | if (!unshiftedBlockBuf) {
275 | loadUnshiftedBlock(continueCompactingBlock)
276 | return
277 | }
278 | // When all records have been shifted (thus end of log), stop compacting
279 | if (unshiftedBlockIndex === -1) {
280 | saveCompactedBlock(function onCompactedBlockSaved(err) {
281 | if (err) return onDone(err)
282 | stop()
283 | })
284 | return
285 | }
286 | const [unshiftedDataBuf, unshiftedRecSize] = getUnshiftedRecord()
287 | // Get a non-deleted unshifted record, if necessary
288 | if (!unshiftedDataBuf) {
289 | holesFound += 1
290 | goToNextUnshifted()
291 | continue
292 | }
293 | const compactedBlockStart = compactedBlockIndex * log.blockSize
294 | const offsetInCompactedBlock = compactedOffset - compactedBlockStart
295 | // Proceed to compact the next block if this block is full
296 | if (log.hasNoSpaceFor(unshiftedDataBuf, offsetInCompactedBlock)) {
297 | saveCompactedBlock()
298 | setImmediate(compactNextBlock)
299 | return
300 | }
301 |
302 | if (
303 | compactedBlockIndex !== unshiftedBlockIndex ||
304 | compactedOffset !== unshiftedOffset
305 | ) {
306 | compactedBlockIdenticalToUnshifted = false
307 | }
308 |
309 | // Copy record to new compacted block
310 | Record.write(compactedBlockBuf, offsetInCompactedBlock, unshiftedDataBuf)
311 | goToNextUnshifted()
312 | compactedOffset += unshiftedRecSize
313 | }
314 | }
315 |
316 | function saveCompactedBlock(cb) {
317 | if (compactedBlockIdenticalToUnshifted) {
318 | if (cb) cb()
319 | } else {
320 | const blockIndex = compactedBlockIndex
321 | log.overwrite(blockIndex, compactedBlockBuf, function onOverwritten(err) {
322 | if (err && cb) cb(err)
323 | else if (err) return onDone(err)
324 | else {
325 | debug('compacted block %d', blockIndex)
326 | if (cb) cb()
327 | }
328 | })
329 | }
330 | }
331 |
332 | function loadUnshiftedBlock(cb) {
333 | const blockStart = unshiftedBlockIndex * log.blockSize
334 | log.getBlock(blockStart, function onBlockLoaded(err, blockBuf) {
335 | if (err) return onDone(err)
336 | unshiftedBlockBuf = blockBuf
337 | cb()
338 | })
339 | }
340 |
341 | function getUnshiftedRecord() {
342 | const [, dataBuf, recSize] = log.getDataNextOffset(
343 | unshiftedBlockBuf,
344 | unshiftedOffset,
345 | true
346 | )
347 | return [dataBuf, recSize]
348 | }
349 |
350 | function goToNextUnshifted() {
351 | let [nextOffset] = log.getDataNextOffset(
352 | unshiftedBlockBuf,
353 | unshiftedOffset,
354 | true
355 | )
356 | if (nextOffset === -1) {
357 | unshiftedBlockIndex = -1
358 | } else if (nextOffset === 0) {
359 | unshiftedBlockIndex += 1
360 | unshiftedBlockBuf = null
361 | unshiftedOffset = unshiftedBlockIndex * log.blockSize
362 | } else {
363 | unshiftedOffset = nextOffset
364 | }
365 | }
366 |
367 | function compactNextBlock() {
368 | compactedBlockIndex += 1
369 | compactedBlockBuf = Buffer.alloc(log.blockSize)
370 | compactedOffset = compactedBlockIndex * log.blockSize
371 | compactedBlockIdenticalToUnshifted = true
372 | progress.set(calculateProgressStats())
373 | savePersistentState(function onCompactionStateSaved(err) {
374 | if (err) return onDone(err)
375 | continueCompactingBlock()
376 | })
377 | }
378 |
379 | function calculateProgressStats() {
380 | const percent =
381 | (unshiftedOffset - startOffset) / (log.since.value - startOffset)
382 | return {
383 | startOffset,
384 | compactedOffset,
385 | unshiftedOffset,
386 | percent,
387 | }
388 | }
389 |
390 | function stop() {
391 | compactedBlockBuf = null
392 | unshiftedBlockBuf = null
393 | truncateBlockIndex = compactedBlockIndex
394 | const state = {
395 | version,
396 | startOffset,
397 | truncateBlockIndex,
398 | compactedBlockIndex: 0,
399 | unshiftedOffset: 0,
400 | unshiftedBlockBuf: Buffer.alloc(0),
401 | }
402 | persistentState.save(state, function onTruncateStateSaved(err) {
403 | if (err) return onDone(err)
404 | truncateAndBeDone()
405 | })
406 | }
407 |
408 | function truncateAndBeDone() {
409 | if (truncateBlockIndex === NO_TRUNCATE) {
410 | return onDone(new Error('Cannot truncate log yet'))
411 | }
412 | log.truncate(truncateBlockIndex, function onTruncatedLog(err, sizeDiff) {
413 | if (err) return onDone(err)
414 | persistentState.destroy(function onStateDestroyed(err) {
415 | if (err) return onDone(err)
416 | if (sizeDiff === 0 && hasHoles) {
417 | // Truncation did not make the log smaller but it did rewrite the log.
418 | // So report 1 byte as a way of saying that compaction filled holes.
419 | onDone(null, { sizeDiff: 1, holesFound })
420 | } else {
421 | onDone(null, { sizeDiff, holesFound })
422 | }
423 | })
424 | })
425 | }
426 |
427 | return {
428 | progress,
429 | }
430 | }
431 |
432 | Compaction.stateFileExists = stateFileExists
433 |
434 | module.exports = Compaction
435 |
--------------------------------------------------------------------------------
/compat.js:
--------------------------------------------------------------------------------
1 | // SPDX-FileCopyrightText: 2021 Anders Rune Jensen
2 | //
3 | // SPDX-License-Identifier: LGPL-3.0-only
4 |
5 | var toPull = require('push-stream-to-pull-stream/source')
6 |
7 | module.exports = function toCompat(log) {
8 | log.onWrite = log.since.set
9 |
10 | var _stream = log.stream
11 | log.stream = function (opts) {
12 | var stream = _stream.call(log, opts)
13 | return toPull(stream)
14 | }
15 | return log
16 | }
17 |
--------------------------------------------------------------------------------
/errors.js:
--------------------------------------------------------------------------------
1 | // SPDX-FileCopyrightText: 2022 Anders Rune Jensen
2 | //
3 | // SPDX-License-Identifier: LGPL-3.0-only
4 |
5 | class ErrorWithCode extends Error {
6 | constructor(message, code) {
7 | super(message)
8 | this.code = code
9 | }
10 | }
11 |
12 | function nanOffsetErr(offset) {
13 | return new ErrorWithCode(
14 | `Offset ${offset} is not a number`,
15 | 'ERR_AAOL_INVALID_OFFSET'
16 | )
17 | }
18 |
19 | function negativeOffsetErr(offset) {
20 | return new ErrorWithCode(
21 | `Offset ${offset} is negative`,
22 | 'ERR_AAOL_INVALID_OFFSET'
23 | )
24 | }
25 |
26 | function outOfBoundsOffsetErr(offset, logSize) {
27 | return new ErrorWithCode(
28 | `Offset ${offset} is beyond log size ${logSize}`,
29 | 'ERR_AAOL_OFFSET_OUT_OF_BOUNDS'
30 | )
31 | }
32 |
33 | function deletedRecordErr() {
34 | return new ErrorWithCode('Record has been deleted', 'ERR_AAOL_DELETED_RECORD')
35 | }
36 |
37 | function delDuringCompactErr() {
38 | return new Error('Cannot delete while compaction is in progress')
39 | }
40 |
41 | function compactWithMaxLiveStreamErr() {
42 | return new Error(
43 | 'Compaction cannot run if there are live streams ' +
44 | 'configured with opts.lt or opts.lte'
45 | )
46 | }
47 |
48 | function appendLargerThanBlockErr() {
49 | return new Error('Data to be appended is larger than block size')
50 | }
51 |
52 | function appendTransactionWantsArrayErr() {
53 | return new Error('appendTransaction expects first argument to be an array')
54 | }
55 |
56 | function unexpectedTruncationErr() {
57 | return new Error(
58 | 'truncate() is trying to *increase* the log size, ' +
59 | 'which is totally unexpected. ' +
60 | 'There may be a logic bug in async-append-only-log'
61 | )
62 | }
63 |
64 | module.exports = {
65 | ErrorWithCode,
66 | nanOffsetErr,
67 | negativeOffsetErr,
68 | outOfBoundsOffsetErr,
69 | deletedRecordErr,
70 | delDuringCompactErr,
71 | compactWithMaxLiveStreamErr,
72 | appendLargerThanBlockErr,
73 | appendTransactionWantsArrayErr,
74 | unexpectedTruncationErr,
75 | }
76 |
--------------------------------------------------------------------------------
/index.js:
--------------------------------------------------------------------------------
1 | // SPDX-FileCopyrightText: 2021 Anders Rune Jensen
2 | //
3 | // SPDX-License-Identifier: LGPL-3.0-only
4 |
5 | const Cache = require('@alloc/quick-lru')
6 | const RAF = require('polyraf')
7 | const Obv = require('obz')
8 | const AtomicFile = require('atomic-file-rw')
9 | const debounce = require('lodash.debounce')
10 | const isBufferZero = require('is-buffer-zero')
11 | const debug = require('debug')('async-append-only-log')
12 | const fs = require('fs')
13 | const mutexify = require('mutexify')
14 |
15 | const {
16 | deletedRecordErr,
17 | nanOffsetErr,
18 | negativeOffsetErr,
19 | outOfBoundsOffsetErr,
20 | delDuringCompactErr,
21 | appendLargerThanBlockErr,
22 | appendTransactionWantsArrayErr,
23 | unexpectedTruncationErr,
24 | compactWithMaxLiveStreamErr,
25 | } = require('./errors')
26 | const Stream = require('./stream')
27 | const Record = require('./record')
28 | const Compaction = require('./compaction')
29 |
30 | /**
31 | * The "End of Block" is a special field used to mark the end of a block, and
32 | * in practice it's like a Record header "length" field, with the value 0.
33 | * In most cases, the end region of a block will have a larger length than this,
34 | * but we want to guarantee there is at *least* this many bytes at the end.
35 | */
36 | const EOB = {
37 | SIZE: Record.HEADER_SIZE,
38 | asNumber: 0,
39 | }
40 |
41 | const DEFAULT_BLOCK_SIZE = 65536
42 | const DEFAULT_CODEC = { encode: (x) => x, decode: (x) => x }
43 | const DEFAULT_WRITE_TIMEOUT = 250
44 | const DEFAULT_VALIDATE = () => true
45 |
46 | const COMPACTION_PROGRESS_EMIT_INTERVAL = 1000
47 |
48 | module.exports = function AsyncAppendOnlyLog(filename, opts) {
49 | const cache = new Cache({ maxSize: 1024 }) // This is potentially 64 MiB!
50 | const raf = RAF(filename)
51 | const statsFilename = filename + '.stats'
52 | const blockSize = (opts && opts.blockSize) || DEFAULT_BLOCK_SIZE
53 | const codec = (opts && opts.codec) || DEFAULT_CODEC
54 | const writeTimeout = (opts && opts.writeTimeout) || DEFAULT_WRITE_TIMEOUT
55 | const validateRecord = (opts && opts.validateRecord) || DEFAULT_VALIDATE
56 | let self
57 |
58 | const waitingLoad = []
59 | const waitingDrain = new Map() // blockIndex -> []
60 | const waitingFlushDelete = []
61 | const blocksToBeWritten = new Map() // blockIndex -> { blockBuf, offset }
62 | const blocksWithDeletables = new Map() // blockIndex -> blockBuf
63 | let flushingDelete = false
64 | let writingBlockIndex = -1
65 |
66 | let latestBlockBuf = null
67 | let latestBlockIndex = null
68 | let nextOffsetInBlock = null
69 | let deletedBytes = 0
70 | const since = Obv() // offset of last written record
71 | let compaction = null
72 | const compactionProgress = Obv()
73 | if (typeof window !== 'undefined') {
74 | // fs sync not working in browser
75 | compactionProgress.set({ percent: 1, done: true, sizeDiff: 0 })
76 | } else {
77 | compactionProgress.set(
78 | Compaction.stateFileExists(filename)
79 | ? { percent: 0, done: false }
80 | : { percent: 1, done: true, sizeDiff: 0 }
81 | )
82 | }
83 |
84 | const waitingCompaction = []
85 |
86 | onLoad(function maybeResumeCompaction() {
87 | // fs sync not working in browser
88 | if (typeof window !== 'undefined') return
89 |
90 | if (Compaction.stateFileExists(filename)) {
91 | compact(function onCompactDone(err) {
92 | if (err) throw err
93 | })
94 | }
95 | })()
96 |
97 | AtomicFile.readFile(statsFilename, 'utf8', function statsUp(err, json) {
98 | if (err) {
99 | debug('error loading stats: %s', err.message)
100 | deletedBytes = 0
101 | } else {
102 | try {
103 | const stats = JSON.parse(json)
104 | deletedBytes = stats.deletedBytes
105 | } catch (err) {
106 | debug('error parsing stats: %s', err.message)
107 | deletedBytes = 0
108 | }
109 | }
110 |
111 | raf.stat(function onRAFStatDone(err, stat) {
112 | if (err) debug('failed to stat ' + filename, err)
113 |
114 | const fileSize = stat ? stat.size : -1
115 |
116 | if (fileSize <= 0) {
117 | debug('empty file')
118 | latestBlockBuf = Buffer.alloc(blockSize)
119 | latestBlockIndex = 0
120 | nextOffsetInBlock = 0
121 | cache.set(0, latestBlockBuf)
122 | since.set(-1)
123 | while (waitingLoad.length) waitingLoad.shift()()
124 | } else {
125 | const blockStart = fileSize - blockSize
126 | loadLatestBlock(blockStart, function onLoadedLatestBlock(err) {
127 | if (err) throw err
128 | debug('opened file, since: %d', since.value)
129 | while (waitingLoad.length) waitingLoad.shift()()
130 | })
131 | }
132 | })
133 | })
134 |
135 | function loadLatestBlock(blockStart, cb) {
136 | raf.read(blockStart, blockSize, function onRAFReadLastDone(err, blockBuf) {
137 | if (err) return cb(err)
138 | getLastGoodRecord(
139 | blockBuf,
140 | blockStart,
141 | function gotLastGoodRecord(err, offsetInBlock) {
142 | if (err) return cb(err)
143 | latestBlockBuf = blockBuf
144 | latestBlockIndex = blockStart / blockSize
145 | const recSize = Record.readSize(blockBuf, offsetInBlock)
146 | nextOffsetInBlock = offsetInBlock + recSize
147 | since.set(blockStart + offsetInBlock)
148 | cb()
149 | }
150 | )
151 | })
152 | }
153 |
154 | function getOffsetInBlock(offset) {
155 | return offset % blockSize
156 | }
157 |
158 | function getBlockStart(offset) {
159 | return offset - getOffsetInBlock(offset)
160 | }
161 |
162 | function getNextBlockStart(offset) {
163 | return getBlockStart(offset) + blockSize
164 | }
165 |
166 | function getBlockIndex(offset) {
167 | return getBlockStart(offset) / blockSize
168 | }
169 |
170 | const writeLock = mutexify()
171 |
172 | function writeWithFSync(blockStart, blockBuf, successValue, cb) {
173 | writeLock(function onWriteLockReleased(unlock) {
174 | raf.write(blockStart, blockBuf, function onRAFWriteDone(err) {
175 | if (err) return unlock(cb, err)
176 |
177 | if (raf.fd) {
178 | fs.fsync(raf.fd, function onFSyncDone(err) {
179 | if (err) unlock(cb, err)
180 | else unlock(cb, null, successValue)
181 | })
182 | } else unlock(cb, null, successValue)
183 | })
184 | })
185 | }
186 |
187 | function truncateWithFSync(newSize, cb) {
188 | writeLock(function onWriteLockReleasedForTruncate(unlock) {
189 | raf.del(newSize, Infinity, function onRAFDeleteDone(err) {
190 | if (err) return unlock(cb, err)
191 |
192 | if (raf.fd) {
193 | fs.fsync(raf.fd, function onFSyncDoneForTruncate(err) {
194 | if (err) unlock(cb, err)
195 | else unlock(cb, null)
196 | })
197 | } else unlock(cb, null)
198 | })
199 | })
200 | }
201 |
202 | function fixBlock(blockBuf, badOffsetInBlock, blockStart, successValue, cb) {
203 | debug('found invalid record at %d, fixing last block', badOffsetInBlock)
204 | blockBuf.fill(0, badOffsetInBlock, blockSize)
205 | writeWithFSync(blockStart, blockBuf, successValue, cb)
206 | }
207 |
208 | function getLastGoodRecord(blockBuf, blockStart, cb) {
209 | let lastGoodOffset = 0
210 | for (let offsetInRecord = 0; offsetInRecord < blockSize; ) {
211 | const length = Record.readDataLength(blockBuf, offsetInRecord)
212 | if (length === EOB.asNumber) break
213 | const [dataBuf, recSize] = Record.read(blockBuf, offsetInRecord)
214 | const isLengthCorrupt = offsetInRecord + recSize > blockSize
215 | const isDataCorrupt = !isBufferZero(dataBuf) && !validateRecord(dataBuf)
216 | if (isLengthCorrupt || isDataCorrupt) {
217 | fixBlock(blockBuf, offsetInRecord, blockStart, lastGoodOffset, cb)
218 | return
219 | }
220 | lastGoodOffset = offsetInRecord
221 | offsetInRecord += recSize
222 | }
223 |
224 | cb(null, lastGoodOffset)
225 | }
226 |
227 | function getBlock(offset, cb) {
228 | const blockIndex = getBlockIndex(offset)
229 |
230 | if (cache.has(blockIndex)) {
231 | debug('getting offset %d from cache', offset)
232 | const cachedBlockBuf = cache.get(blockIndex)
233 | cb(null, cachedBlockBuf)
234 | } else {
235 | debug('getting offset %d from disc', offset)
236 | const blockStart = getBlockStart(offset)
237 | raf.read(blockStart, blockSize, function onRAFReadDone(err, blockBuf) {
238 | cache.set(blockIndex, blockBuf)
239 | cb(err, blockBuf)
240 | })
241 | }
242 | }
243 |
244 | function get(offset, cb) {
245 | const logSize = latestBlockIndex * blockSize + nextOffsetInBlock
246 | if (typeof offset !== 'number') return cb(nanOffsetErr(offset))
247 | if (isNaN(offset)) return cb(nanOffsetErr(offset))
248 | if (offset < 0) return cb(negativeOffsetErr(offset))
249 | if (offset >= logSize) return cb(outOfBoundsOffsetErr(offset, logSize))
250 |
251 | getBlock(offset, function gotBlock(err, blockBuf) {
252 | if (err) return cb(err)
253 | const [dataBuf] = Record.read(blockBuf, getOffsetInBlock(offset))
254 | if (isBufferZero(dataBuf)) return cb(deletedRecordErr())
255 | cb(null, codec.decode(dataBuf))
256 | })
257 | }
258 |
259 | // nextOffset can take 3 values:
260 | // -1: end of log
261 | // 0: need a new block
262 | // >0: next record within block
263 | function getDataNextOffset(blockBuf, offset, asRaw = false) {
264 | const offsetInBlock = getOffsetInBlock(offset)
265 | const [dataBuf, recSize] = Record.read(blockBuf, offsetInBlock)
266 | const nextLength = Record.readDataLength(blockBuf, offsetInBlock + recSize)
267 |
268 | let nextOffset
269 | if (nextLength === EOB.asNumber) {
270 | if (getNextBlockStart(offset) > since.value) nextOffset = -1
271 | else nextOffset = 0
272 | } else {
273 | nextOffset = offset + recSize
274 | }
275 |
276 | if (isBufferZero(dataBuf)) return [nextOffset, null, recSize]
277 | else return [nextOffset, asRaw ? dataBuf : codec.decode(dataBuf), recSize]
278 | }
279 |
280 | function del(offset, cb) {
281 | if (compaction) {
282 | cb(delDuringCompactErr())
283 | return
284 | }
285 | const blockIndex = getBlockIndex(offset)
286 | if (blocksToBeWritten.has(blockIndex)) {
287 | onDrain(function delAfterDrained() {
288 | del(offset, cb)
289 | })
290 | return
291 | }
292 |
293 | if (blocksWithDeletables.has(blockIndex)) {
294 | const blockBuf = blocksWithDeletables.get(blockIndex)
295 | gotBlockForDelete(null, blockBuf)
296 | } else {
297 | getBlock(offset, gotBlockForDelete)
298 | }
299 | function gotBlockForDelete(err, blockBuf) {
300 | if (err) return cb(err)
301 | const actualBlockBuf = blocksWithDeletables.get(blockIndex) || blockBuf
302 | Record.overwriteWithZeroes(actualBlockBuf, getOffsetInBlock(offset))
303 | deletedBytes += Record.readSize(actualBlockBuf, getOffsetInBlock(offset))
304 | blocksWithDeletables.set(blockIndex, actualBlockBuf)
305 | scheduleFlushDelete()
306 | cb()
307 | }
308 | }
309 |
310 | function hasNoSpaceFor(dataBuf, offsetInBlock) {
311 | return offsetInBlock + Record.size(dataBuf) + EOB.SIZE > blockSize
312 | }
313 |
314 | const scheduleFlushDelete = debounce(flushDelete, writeTimeout)
315 |
316 | function flushDelete() {
317 | if (blocksWithDeletables.size === 0) {
318 | for (const cb of waitingFlushDelete) cb()
319 | waitingFlushDelete.length = 0
320 | return
321 | }
322 | const blockIndex = blocksWithDeletables.keys().next().value
323 | const blockStart = blockIndex * blockSize
324 | const blockBuf = blocksWithDeletables.get(blockIndex)
325 | blocksWithDeletables.delete(blockIndex)
326 | flushingDelete = true
327 |
328 | writeWithFSync(blockStart, blockBuf, null, function flushedDelete(err) {
329 | saveStats(function onSavedStats(err) {
330 | if (err) debug('error saving stats: %s', err.message)
331 | flushingDelete = false
332 | if (err) {
333 | for (const cb of waitingFlushDelete) cb(err)
334 | waitingFlushDelete.length = 0
335 | return
336 | }
337 | flushDelete() // next
338 | })
339 | })
340 | }
341 |
342 | function onDeletesFlushed(cb) {
343 | if (flushingDelete || blocksWithDeletables.size > 0) {
344 | waitingFlushDelete.push(cb)
345 | } else cb()
346 | }
347 |
348 | function appendSingle(data) {
349 | let encodedData = codec.encode(data)
350 | if (typeof encodedData === 'string') encodedData = Buffer.from(encodedData)
351 |
352 | if (Record.size(encodedData) + EOB.SIZE > blockSize)
353 | throw appendLargerThanBlockErr()
354 |
355 | if (hasNoSpaceFor(encodedData, nextOffsetInBlock)) {
356 | const nextBlockBuf = Buffer.alloc(blockSize)
357 | latestBlockBuf = nextBlockBuf
358 | latestBlockIndex += 1
359 | nextOffsetInBlock = 0
360 | debug("data doesn't fit current block, creating new")
361 | }
362 |
363 | Record.write(latestBlockBuf, nextOffsetInBlock, encodedData)
364 | cache.set(latestBlockIndex, latestBlockBuf) // update cache
365 | const offset = latestBlockIndex * blockSize + nextOffsetInBlock
366 | blocksToBeWritten.set(latestBlockIndex, {
367 | blockBuf: latestBlockBuf,
368 | offset,
369 | })
370 | nextOffsetInBlock += Record.size(encodedData)
371 | scheduleWrite()
372 | debug('data inserted at offset %d', offset)
373 | return offset
374 | }
375 |
376 | function append(data, cb) {
377 | if (compaction) {
378 | waitingCompaction.push(() => append(data, cb))
379 | return
380 | }
381 |
382 | if (Array.isArray(data)) {
383 | let offset = 0
384 | for (let i = 0, length = data.length; i < length; ++i)
385 | offset = appendSingle(data[i])
386 |
387 | cb(null, offset)
388 | } else cb(null, appendSingle(data))
389 | }
390 |
391 | function appendTransaction(dataArray, cb) {
392 | if (!Array.isArray(dataArray)) {
393 | return cb(appendTransactionWantsArrayErr())
394 | }
395 | if (compaction) {
396 | waitingCompaction.push(() => appendTransaction(dataArray, cb))
397 | return
398 | }
399 |
400 | let size = 0
401 | const encodedDataArray = dataArray.map((data) => {
402 | let encodedData = codec.encode(data)
403 | if (typeof encodedData === 'string')
404 | encodedData = Buffer.from(encodedData)
405 | size += Record.size(encodedData)
406 | return encodedData
407 | })
408 |
409 | size += EOB.SIZE
410 |
411 | if (size > blockSize) return cb(appendLargerThanBlockErr())
412 |
413 | if (nextOffsetInBlock + size > blockSize) {
414 | // doesn't fit
415 | const nextBlockBuf = Buffer.alloc(blockSize)
416 | latestBlockBuf = nextBlockBuf
417 | latestBlockIndex += 1
418 | nextOffsetInBlock = 0
419 | debug("data doesn't fit current block, creating new")
420 | }
421 |
422 | const offsets = []
423 | for (const encodedData of encodedDataArray) {
424 | Record.write(latestBlockBuf, nextOffsetInBlock, encodedData)
425 | cache.set(latestBlockIndex, latestBlockBuf) // update cache
426 | const offset = latestBlockIndex * blockSize + nextOffsetInBlock
427 | offsets.push(offset)
428 | blocksToBeWritten.set(latestBlockIndex, {
429 | blockBuf: latestBlockBuf,
430 | offset,
431 | })
432 | nextOffsetInBlock += Record.size(encodedData)
433 | debug('data inserted at offset %d', offset)
434 | }
435 |
436 | scheduleWrite()
437 |
438 | return cb(null, offsets)
439 | }
440 |
441 | const scheduleWrite = debounce(write, writeTimeout)
442 |
443 | function write() {
444 | if (blocksToBeWritten.size === 0) return
445 | const blockIndex = blocksToBeWritten.keys().next().value
446 | const blockStart = blockIndex * blockSize
447 | const { blockBuf, offset } = blocksToBeWritten.get(blockIndex)
448 | blocksToBeWritten.delete(blockIndex)
449 |
450 | debug(
451 | 'writing block of size: %d, to offset: %d',
452 | blockBuf.length,
453 | blockIndex * blockSize
454 | )
455 | writingBlockIndex = blockIndex
456 | writeWithFSync(blockStart, blockBuf, null, function onBlockWritten(err) {
457 | const drainsBefore = (waitingDrain.get(blockIndex) || []).slice(0)
458 | writingBlockIndex = -1
459 | if (err) {
460 | debug('failed to write block %d', blockIndex)
461 | throw err
462 | } else {
463 | since.set(offset)
464 |
465 | // write values to live streams
466 | for (const stream of self.streams) {
467 | if (stream.live) stream.liveResume()
468 | }
469 |
470 | debug(
471 | 'draining the waiting queue for %d, items: %d',
472 | blockIndex,
473 | drainsBefore.length
474 | )
475 | for (let i = 0; i < drainsBefore.length; ++i) drainsBefore[i]()
476 |
477 | // the resumed streams might have added more to waiting
478 | let drainsAfter = waitingDrain.get(blockIndex) || []
479 | if (drainsBefore.length === drainsAfter.length)
480 | waitingDrain.delete(blockIndex)
481 | else if (drainsAfter.length === 0) waitingDrain.delete(blockIndex)
482 | else
483 | waitingDrain.set(
484 | blockIndex,
485 | waitingDrain.get(blockIndex).slice(drainsBefore.length)
486 | )
487 |
488 | write() // next!
489 | }
490 | })
491 | }
492 |
493 | function onStreamsDone(cb) {
494 | if ([...self.streams].every((stream) => stream.cursor === since.value)) {
495 | return cb()
496 | }
497 | const interval = setInterval(function checkIfStreamsDone() {
498 | for (const stream of self.streams) {
499 | if (stream.cursor < since.value) return
500 | }
501 | clearInterval(interval)
502 | cb()
503 | }, 100)
504 | if (interval.unref) interval.unref()
505 | }
506 |
507 | function overwrite(blockIndex, blockBuf, cb) {
508 | cache.set(blockIndex, blockBuf)
509 | const blockStart = blockIndex * blockSize
510 | writeWithFSync(blockStart, blockBuf, null, cb)
511 | }
512 |
513 | function truncate(newLatestBlockIndex, cb) {
514 | if (newLatestBlockIndex > latestBlockIndex) {
515 | return cb(unexpectedTruncationErr())
516 | }
517 | if (newLatestBlockIndex === latestBlockIndex) {
518 | const blockStart = latestBlockIndex * blockSize
519 | loadLatestBlock(blockStart, function onTruncateLoadedLatestBlock1(err) {
520 | if (err) cb(err)
521 | else cb(null, 0)
522 | })
523 | return
524 | }
525 | const size = (latestBlockIndex + 1) * blockSize
526 | const newSize = (newLatestBlockIndex + 1) * blockSize
527 | for (let i = newLatestBlockIndex + 1; i < latestBlockIndex; ++i) {
528 | cache.delete(i)
529 | }
530 | truncateWithFSync(newSize, function onTruncateWithFSyncDone(err) {
531 | if (err) return cb(err)
532 | const blockStart = newSize - blockSize
533 | loadLatestBlock(blockStart, function onTruncateLoadedLatestBlock2(err) {
534 | if (err) return cb(err)
535 | const sizeDiff = size - newSize
536 | cb(null, sizeDiff)
537 | })
538 | })
539 | }
540 |
541 | function stats(cb) {
542 | if (since.value == null) {
543 | since((totalBytes) => {
544 | cb(null, { totalBytes: Math.max(0, totalBytes), deletedBytes })
545 | return false
546 | })
547 | } else {
548 | cb(null, { totalBytes: Math.max(0, since.value), deletedBytes })
549 | }
550 | }
551 |
552 | function saveStats(cb) {
553 | const stats = JSON.stringify({ deletedBytes })
554 | AtomicFile.writeFile(statsFilename, stats, 'utf8', cb)
555 | }
556 |
557 | function compact(cb) {
558 | if (compaction) {
559 | debug('compaction already in progress')
560 | waitingCompaction.push(cb)
561 | return
562 | }
563 | for (const stream of self.streams) {
564 | if (stream.live && (stream.max || stream.max_inclusive)) {
565 | return cb(compactWithMaxLiveStreamErr())
566 | }
567 | }
568 | onStreamsDone(function startCompactAfterStreamsDone() {
569 | onDrain(function startCompactAfterDrain() {
570 | onDeletesFlushed(function startCompactAfterDeletes() {
571 | if (compactionProgress.value.done) {
572 | compactionProgress.set({ percent: 0, done: false })
573 | }
574 | compaction = new Compaction(self, (err, stats) => {
575 | compaction = null
576 | if (err) return cb(err)
577 | deletedBytes = 0
578 | saveStats(function onSavedStatsAfterCompaction(err) {
579 | if (err)
580 | debug('error saving stats after compaction: %s', err.message)
581 | })
582 | for (const stream of self.streams) {
583 | if (stream.live) stream.postCompactionReset(since.value)
584 | }
585 | compactionProgress.set({ ...stats, percent: 1, done: true })
586 | for (const callback of waitingCompaction) callback()
587 | waitingCompaction.length = 0
588 | cb()
589 | })
590 | let prevUpdate = 0
591 | compaction.progress((stats) => {
592 | const now = Date.now()
593 | if (now - prevUpdate > COMPACTION_PROGRESS_EMIT_INTERVAL) {
594 | prevUpdate = now
595 | compactionProgress.set({ ...stats, done: false })
596 | }
597 | })
598 | })
599 | })
600 | })
601 | }
602 |
603 | function close(cb) {
604 | onDrain(function closeAfterHavingDrained() {
605 | onDeletesFlushed(function closeAfterDeletesFlushed() {
606 | for (const stream of self.streams) stream.abort(true)
607 | self.streams.clear()
608 | raf.close(cb)
609 | })
610 | })
611 | }
612 |
613 | function onLoad(fn) {
614 | return function waitForLogLoaded(...args) {
615 | if (latestBlockBuf === null) waitingLoad.push(fn.bind(null, ...args))
616 | else fn(...args)
617 | }
618 | }
619 |
620 | function onDrain(fn) {
621 | if (compaction) {
622 | waitingCompaction.push(fn)
623 | return
624 | }
625 | if (blocksToBeWritten.size === 0 && writingBlockIndex === -1) fn()
626 | else {
627 | const latestBlockIndex =
628 | blocksToBeWritten.size > 0
629 | ? last(blocksToBeWritten.keys())
630 | : writingBlockIndex
631 | const drains = waitingDrain.get(latestBlockIndex) || []
632 | drains.push(fn)
633 | waitingDrain.set(latestBlockIndex, drains)
634 | }
635 | }
636 |
637 | function last(iterable) {
638 | let res = null
639 | for (let x of iterable) res = x
640 | return res
641 | }
642 |
643 | return (self = {
644 | // Public API:
645 | get: onLoad(get),
646 | del: onLoad(del),
647 | append: onLoad(append),
648 | appendTransaction: onLoad(appendTransaction),
649 | close: onLoad(close),
650 | onDrain: onLoad(onDrain),
651 | onDeletesFlushed: onLoad(onDeletesFlushed),
652 | compact: onLoad(compact),
653 | since,
654 | stats,
655 | compactionProgress,
656 | stream(opts) {
657 | const stream = new Stream(self, opts)
658 | self.streams.add(stream)
659 | return stream
660 | },
661 |
662 | // Internals needed by ./compaction.js:
663 | filename,
664 | blockSize,
665 | overwrite,
666 | truncate,
667 | hasNoSpaceFor,
668 | // Internals needed by ./stream.js:
669 | onLoad,
670 | getNextBlockStart,
671 | getDataNextOffset,
672 | getBlock,
673 | streams: new Set(),
674 | })
675 | }
676 |
--------------------------------------------------------------------------------
/package.json:
--------------------------------------------------------------------------------
1 | {
2 | "name": "async-append-only-log",
3 | "description": "An async append only log",
4 | "version": "4.3.10",
5 | "homepage": "https://github.com/ssb-ngi-pointer/async-append-only-log",
6 | "repository": {
7 | "type": "git",
8 | "url": "https://github.com/ssb-ngi-pointer/async-append-only-log"
9 | },
10 | "files": [
11 | "*.js",
12 | "package.json.license",
13 | "LICENSES/*"
14 | ],
15 | "dependencies": {
16 | "@alloc/quick-lru": "^5.2.0",
17 | "atomic-file-rw": "^0.3.0",
18 | "debug": "^4.2.0",
19 | "is-buffer-zero": "^1.0.0",
20 | "lodash.debounce": "^4.0.8",
21 | "looper": "^4.0.0",
22 | "ltgt": "^2.2.1",
23 | "mutexify": "^1.3.1",
24 | "obz": "^1.1.0",
25 | "polyraf": "^1.1.0",
26 | "push-stream": "^11.0.0",
27 | "push-stream-to-pull-stream": "^1.0.3"
28 | },
29 | "devDependencies": {
30 | "bench-flumelog": "^2.0.0",
31 | "bipf": "^1.4.0",
32 | "cont": "^1.0.3",
33 | "flumecodec": "0.0.1",
34 | "flumedb": "^2.1.8",
35 | "flumeview-level": "^4.0.4",
36 | "husky": "^4.3.0",
37 | "nyc": "^15.1.0",
38 | "prettier": "^2.5.1",
39 | "pretty-quick": "^3.1.3",
40 | "promisify-tuple": "^1.2.0",
41 | "pull-stream": "^3.6.14",
42 | "tap-arc": "^0.3.2",
43 | "tape": "^5.0.1",
44 | "test-flumeview-index": "^2.3.1",
45 | "too-hot": "^1.0.0"
46 | },
47 | "scripts": {
48 | "format-code": "prettier --write \"*.js\" \"test/*.js\"",
49 | "format-code-staged": "pretty-quick --staged --pattern \"*.js\" --pattern \"(test|compat|indexes|operators)/*.js\"",
50 | "test": "tape test/*.js | tap-arc --bail",
51 | "coverage": "nyc --reporter=lcov npm run test"
52 | },
53 | "husky": {
54 | "hooks": {
55 | "pre-commit": "npm run format-code-staged"
56 | }
57 | },
58 | "author": "Anders Rune Jensen ",
59 | "contributors": [
60 | "Andre Staltz "
61 | ],
62 | "license": "LGPL-3.0"
63 | }
64 |
--------------------------------------------------------------------------------
/package.json.license:
--------------------------------------------------------------------------------
1 | SPDX-FileCopyrightText: 2021 Anders Rune Jensen
2 |
3 | SPDX-License-Identifier: Unlicense
--------------------------------------------------------------------------------
/record.js:
--------------------------------------------------------------------------------
1 | // SPDX-FileCopyrightText: 2022 Anders Rune Jensen
2 | //
3 | // SPDX-License-Identifier: LGPL-3.0-only
4 |
5 | /*
6 | Binary format for a Record:
7 |
8 |
9 |
10 |
11 |
12 |
13 | The "Header" is the first two bytes for the dataLength.
14 | */
15 |
16 | const HEADER_SIZE = 2 // uint16
17 |
18 | function size(dataBuf) {
19 | return HEADER_SIZE + dataBuf.length
20 | }
21 |
22 | function readDataLength(blockBuf, offsetInBlock) {
23 | return blockBuf.readUInt16LE(offsetInBlock)
24 | }
25 |
26 | function readSize(blockBuf, offsetInBlock) {
27 | const dataLength = readDataLength(blockBuf, offsetInBlock)
28 | return HEADER_SIZE + dataLength
29 | }
30 |
31 | function read(blockBuf, offsetInBlock) {
32 | const dataLength = readDataLength(blockBuf, offsetInBlock)
33 | const dataStart = offsetInBlock + HEADER_SIZE
34 | const dataBuf = blockBuf.slice(dataStart, dataStart + dataLength)
35 | const size = HEADER_SIZE + dataLength
36 | return [dataBuf, size]
37 | }
38 |
39 | function write(blockBuf, offsetInBlock, dataBuf) {
40 | blockBuf.writeUInt16LE(dataBuf.length, offsetInBlock) // write dataLength
41 | dataBuf.copy(blockBuf, offsetInBlock + HEADER_SIZE) // write dataBuf
42 | }
43 |
44 | function overwriteWithZeroes(blockBuf, offsetInBlock) {
45 | const dataLength = readDataLength(blockBuf, offsetInBlock)
46 | const dataStart = offsetInBlock + HEADER_SIZE
47 | const dataEnd = dataStart + dataLength
48 | blockBuf.fill(0, dataStart, dataEnd)
49 | }
50 |
51 | module.exports = {
52 | HEADER_SIZE,
53 | size,
54 | readDataLength,
55 | readSize,
56 | read,
57 | write,
58 | overwriteWithZeroes,
59 | }
60 |
--------------------------------------------------------------------------------
/stream.js:
--------------------------------------------------------------------------------
1 | // SPDX-FileCopyrightText: 2021 Anders Rune Jensen
2 | //
3 | // SPDX-License-Identifier: LGPL-3.0-only
4 |
5 | const ltgt = require('ltgt')
6 | const looper = require('looper')
7 |
8 | module.exports = Stream
9 |
10 | const BLOCK_STATE = Object.freeze({
11 | GET_NEXT_BLOCK: 0,
12 | END_OF_STREAM: 1,
13 | PAUSED: 2,
14 | })
15 |
16 | const STREAM_STATE = Object.freeze({
17 | INITIALIZING: 0,
18 | LOADED: 1,
19 | RUNNING: 2,
20 | PAUSED: 3,
21 | ENDED: 4,
22 | })
23 |
24 | function Stream(log, opts) {
25 | opts = opts || {}
26 |
27 | this.log = log
28 |
29 | // configs
30 | this.live = !!opts.live
31 | this.offsets = opts.offsets !== false
32 | this.values = opts.values !== false
33 | this.sizes = opts.sizes === true
34 | this.limit = opts.limit || 0
35 |
36 | this.state = STREAM_STATE.INITIALIZING
37 |
38 | this.min = ltgt.lowerBound(opts, null)
39 | if (ltgt.lowerBoundInclusive(opts)) this.min_inclusive = this.min
40 |
41 | this.max = ltgt.upperBound(opts, null)
42 | if (ltgt.upperBoundInclusive(opts)) this.max_inclusive = this.max
43 |
44 | // this is properly initialized when this.log is ready
45 | this.cursor = -1
46 |
47 | // used together with limit
48 | this.count = 0
49 |
50 | // used for live (new values) & gt
51 | this.skip_next = false
52 |
53 | // needed in _ready
54 | this.opts = opts
55 |
56 | this._resumeCallback = this._resumeCallback.bind(this)
57 | this._resume = this._resume.bind(this)
58 |
59 | this.log.onLoad(this._ready.bind(this))()
60 | }
61 |
62 | Stream.prototype._ready = function _ready() {
63 | //note: cursor has default of the current length or zero.
64 | this.cursor = ltgt.lowerBound(this.opts, 0)
65 |
66 | if (this.cursor < 0) this.cursor = 0
67 |
68 | if (this.opts.gt >= 0) this.skip_next = true
69 |
70 | if (this.cursor === 0 && this.log.since.value === -1) {
71 | if (!this.live) this.state = STREAM_STATE.ENDED
72 | else this.state = STREAM_STATE.INITIALIZING // still not ready
73 | } else this.state = STREAM_STATE.LOADED
74 |
75 | this.resume()
76 | }
77 |
78 | Stream.prototype._writeToSink = function _writeToSink(value, size) {
79 | const offset = this.cursor
80 |
81 | const o = this.offsets
82 | const v = this.values
83 | const s = this.sizes
84 | if (o && v && s) this.sink.write({ offset, value, size })
85 | else if (o && v) this.sink.write({ offset, value })
86 | else if (o && s) this.sink.write({ offset, size })
87 | else if (v && s) this.sink.write({ value, size })
88 | else if (o) this.sink.write(offset)
89 | else if (v) this.sink.write(value)
90 | else if (s) this.sink.write(size)
91 | else this.sink.write(offset)
92 | }
93 |
94 | // returns a new BLOCK_STATE
95 | Stream.prototype._handleBlock = function _handleBlock(blockBuf) {
96 | while (true) {
97 | if (this.sink.paused || this.sink.ended) return BLOCK_STATE.PAUSED
98 |
99 | const [offset, value, size] = this.log.getDataNextOffset(
100 | blockBuf,
101 | this.cursor
102 | )
103 |
104 | if (this.skip_next) {
105 | this.skip_next = false
106 |
107 | if (offset > 0) {
108 | this.cursor = offset
109 | continue
110 | } else if (offset === 0) return BLOCK_STATE.GET_NEXT_BLOCK
111 | else if (offset === -1) return BLOCK_STATE.END_OF_STREAM
112 | }
113 |
114 | this.count++
115 |
116 | const o = this.cursor
117 |
118 | if (
119 | (this.min === null || this.min < o || this.min_inclusive === o) &&
120 | (this.max === null || this.max > o || this.max_inclusive === o)
121 | ) {
122 | this._writeToSink(value, size)
123 |
124 | if (offset > 0) this.cursor = offset
125 | else if (offset === 0) return BLOCK_STATE.GET_NEXT_BLOCK
126 | else if (offset === -1) return BLOCK_STATE.END_OF_STREAM
127 |
128 | if (this.limit > 0 && this.count >= this.limit)
129 | return BLOCK_STATE.END_OF_STREAM
130 | } else return BLOCK_STATE.END_OF_STREAM
131 | }
132 | }
133 |
134 | Stream.prototype._resume = function _resume() {
135 | if (this.state === STREAM_STATE.ENDED) {
136 | if (this.sink && !this.sink.ended) this.abort()
137 | return
138 | }
139 |
140 | if (this.state === STREAM_STATE.INITIALIZING) return // not ready yet
141 |
142 | if (!this.sink || this.sink.paused) {
143 | this.state = STREAM_STATE.PAUSED
144 | return
145 | }
146 |
147 | this.state = STREAM_STATE.RUNNING
148 |
149 | this.log.getBlock(this.cursor, this._resumeCallback)
150 | }
151 |
152 | Stream.prototype._resumeCallback = function _resumeCallback(err, block) {
153 | if (err) {
154 | console.error(err)
155 | return
156 | }
157 |
158 | const blockState = this._handleBlock(block)
159 | if (blockState === BLOCK_STATE.GET_NEXT_BLOCK) {
160 | this.cursor = this.log.getNextBlockStart(this.cursor)
161 | this._next()
162 | } else if (blockState === BLOCK_STATE.PAUSED) {
163 | this.state = STREAM_STATE.PAUSED
164 | } else if (blockState === BLOCK_STATE.END_OF_STREAM) {
165 | if (!this.live) this.abort()
166 | else {
167 | this.state = STREAM_STATE.PAUSED
168 | this.skip_next = true
169 | }
170 | }
171 | }
172 |
173 | Stream.prototype.resume = function resume() {
174 | if (this.state === STREAM_STATE.RUNNING) return
175 |
176 | this._next = looper(this._resume)
177 | this._next()
178 | }
179 |
180 | Stream.prototype.liveResume = function liveResume() {
181 | if (this.state === STREAM_STATE.INITIALIZING) this.state = STREAM_STATE.LOADED
182 |
183 | this.resume()
184 | }
185 |
186 | Stream.prototype.postCompactionReset = function postCompactionReset(offset) {
187 | this.cursor = Math.min(offset, this.cursor)
188 | this.min = null
189 | this.min_inclusive = null
190 | }
191 |
192 | Stream.prototype.abort = function abort(err) {
193 | this.state = STREAM_STATE.ENDED
194 | this.log.streams.delete(this)
195 | if (!this.sink.ended && this.sink.end) {
196 | this.sink.ended = true
197 | this.sink.end(err === true ? null : err)
198 | }
199 | }
200 |
201 | Stream.prototype.pipe = require('push-stream/pipe')
202 |
--------------------------------------------------------------------------------
/test/bad-offset.js:
--------------------------------------------------------------------------------
1 | // SPDX-FileCopyrightText: 2021 Anders Rune Jensen
2 | //
3 | // SPDX-License-Identifier: Unlicense
4 |
5 | var tape = require('tape')
6 | var fs = require('fs')
7 | var Log = require('../')
8 |
9 | tape('NaN', function (t) {
10 | var file = '/tmp/dsf-test-bad-offset.log'
11 | try {
12 | fs.unlinkSync(file)
13 | } catch (_) {}
14 | var db = Log(file, { blockSize: 2 * 1024 })
15 |
16 | var msg1 = Buffer.from('testing')
17 |
18 | db.append(msg1, function (err, offset1) {
19 | if (err) throw err
20 | t.equal(offset1, 0)
21 | db.get(NaN, function (err, b) {
22 | t.ok(err)
23 | t.match(err.message, /Offset NaN is not a number/, err.message)
24 | t.equals(err.code, 'ERR_AAOL_INVALID_OFFSET')
25 | db.close(t.end)
26 | })
27 | })
28 | })
29 |
30 | tape('-1', function (t) {
31 | var file = '/tmp/dsf-test-bad-offset.log'
32 | try {
33 | fs.unlinkSync(file)
34 | } catch (_) {}
35 | var db = Log(file, { blockSize: 2 * 1024 })
36 |
37 | var msg2 = Buffer.from('testing')
38 |
39 | db.append(msg2, function (err, offset1) {
40 | if (err) throw err
41 | t.equal(offset1, 0)
42 | db.get(-1, function (err, b) {
43 | t.ok(err)
44 | t.match(err.message, /Offset -1 is negative/, err.message)
45 | t.equals(err.code, 'ERR_AAOL_INVALID_OFFSET')
46 | db.close(t.end)
47 | })
48 | })
49 | })
50 |
51 | tape('out of bounds', function (t) {
52 | var file = '/tmp/dsf-test-bad-offset.log'
53 | try {
54 | fs.unlinkSync(file)
55 | } catch (_) {}
56 | var db = Log(file, { blockSize: 2 * 1024 })
57 |
58 | var msg2 = Buffer.from('testing')
59 |
60 | db.append(msg2, function (err, offset1) {
61 | if (err) throw err
62 | t.equal(offset1, 0)
63 | db.get(10240, function (err, b) {
64 | t.ok(err)
65 | t.match(err.message, /Offset 10240 is beyond log size/, err.message)
66 | t.equals(err.code, 'ERR_AAOL_OFFSET_OUT_OF_BOUNDS')
67 | db.close(t.end)
68 | })
69 | })
70 | })
71 |
--------------------------------------------------------------------------------
/test/basic.js:
--------------------------------------------------------------------------------
1 | // SPDX-FileCopyrightText: 2021 Anders Rune Jensen
2 | //
3 | // SPDX-License-Identifier: Unlicense
4 |
5 | var tape = require('tape')
6 | var fs = require('fs')
7 | var Log = require('../')
8 |
9 | tape('basic binary', function (t) {
10 | var file = '/tmp/dsf-test-basic-binary.log'
11 | try {
12 | fs.unlinkSync(file)
13 | } catch (_) {}
14 | var db = Log(file, { blockSize: 2 * 1024 })
15 |
16 | var msg1 = Buffer.from('testing')
17 | var msg2 = Buffer.from('testing2')
18 |
19 | db.append(msg1, function (err, offset1) {
20 | if (err) throw err
21 | t.equal(offset1, 0)
22 | db.append(msg2, function (err, offset2) {
23 | if (err) throw err
24 | db.get(offset1, function (err, b) {
25 | if (err) throw err
26 | t.equal(b.toString(), msg1.toString())
27 |
28 | db.get(offset2, function (err, b2) {
29 | if (err) throw err
30 | t.equal(b2.toString(), msg2.toString())
31 |
32 | db.close(t.end)
33 | })
34 | })
35 | })
36 | })
37 | })
38 |
39 | var json1 = { text: 'testing' }
40 | var json2 = { test: 'testing2' }
41 |
42 | tape('basic json', function (t) {
43 | var file = '/tmp/dsf-test-basic-json.log'
44 | try {
45 | fs.unlinkSync(file)
46 | } catch (_) {}
47 | var db = Log(file, {
48 | blockSize: 2 * 1024,
49 | codec: require('flumecodec/json'),
50 | })
51 |
52 | db.append(json1, function (err, offset1) {
53 | if (err) throw err
54 | t.equal(offset1, 0)
55 | db.append(json2, function (err, offset2) {
56 | if (err) throw err
57 | db.get(offset1, function (err, buf) {
58 | if (err) throw err
59 | t.deepEqual(buf, json1)
60 |
61 | db.get(offset2, function (err, buf) {
62 | if (err) throw err
63 | t.deepEqual(buf, json2)
64 |
65 | db.close(t.end)
66 | })
67 | })
68 | })
69 | })
70 | })
71 |
72 | tape('basic json re-read', function (t) {
73 | var file = '/tmp/dsf-test-basic-json.log'
74 | var db = Log(file, {
75 | blockSize: 2 * 1024,
76 | codec: require('flumecodec/json'),
77 | })
78 |
79 | db.onDrain(() => {
80 | t.equal(db.since.value, 20)
81 | db.get(0, function (err, buf) {
82 | if (err) throw err
83 | t.deepEqual(buf, json1)
84 |
85 | db.get(20, function (err, buf) {
86 | if (err) throw err
87 | t.deepEqual(buf, json2)
88 |
89 | db.close(t.end)
90 | })
91 | })
92 | })
93 | })
94 |
95 | tape('basic transaction', function (t) {
96 | var file = '/tmp/dsf-test-basic-transaction-json.log'
97 | try {
98 | fs.unlinkSync(file)
99 | } catch (_) {}
100 | var db = Log(file, {
101 | blockSize: 2 * 1024,
102 | codec: require('flumecodec/json'),
103 | })
104 |
105 | db.appendTransaction([json1, json2], function (err, offsets) {
106 | if (err) throw err
107 | t.equal(offsets[0], 0)
108 | db.get(offsets[0], function (err, buf) {
109 | if (err) throw err
110 | t.deepEqual(buf, json1)
111 |
112 | db.get(offsets[1], function (err, buf) {
113 | if (err) throw err
114 | t.deepEqual(buf, json2)
115 |
116 | db.close(t.end)
117 | })
118 | })
119 | })
120 | })
121 |
122 | tape('transaction fail', function (t) {
123 | var file = '/tmp/dsf-test-transaction-tail-json.log'
124 | try {
125 | fs.unlinkSync(file)
126 | } catch (_) {}
127 | var db = Log(file, {
128 | blockSize: 25,
129 | codec: require('flumecodec/json'),
130 | })
131 |
132 | db.appendTransaction([json1, json2], function (err, offsets) {
133 | t.equal(
134 | err.message,
135 | 'Data to be appended is larger than block size',
136 | 'fails on too much data'
137 | )
138 | db.close(t.end)
139 | })
140 | })
141 |
--------------------------------------------------------------------------------
/test/bench.js:
--------------------------------------------------------------------------------
1 | // SPDX-FileCopyrightText: 2021 Anders Rune Jensen
2 | //
3 | // SPDX-License-Identifier: Unlicense
4 |
5 | const os = require('os')
6 | const fs = require('fs')
7 | const path = require('path')
8 |
9 | const pull = require('pull-stream')
10 | const cont = require('cont')
11 | const Flume = require('flumedb')
12 | const Index = require('flumeview-level')
13 |
14 | const Log = require('../')
15 | const toCompat = require('../compat')
16 |
17 | console.log('name, ops, opts/second, seconds')
18 | function Timer(name) {
19 | var start = Date.now()
20 | return function (ops) {
21 | var seconds = (Date.now() - start) / 1000
22 | console.log([name, ops, ops / seconds, seconds].join(', '))
23 | }
24 | }
25 |
26 | function initialize(db, N, _, cb) {
27 | var data = []
28 | for (var i = 0; i < N; i++)
29 | data.push({
30 | key: '#' + i,
31 | value: {
32 | foo: Math.random(),
33 | bar: Date.now(),
34 | },
35 | })
36 |
37 | db.append(data, function (err, offset) {
38 | if (err) throw err
39 | //wait until the view is consistent!
40 | var remove = db.index.since(function (v) {
41 | if (v < offset) return
42 | remove()
43 | cb(null, N)
44 | })
45 | })
46 | }
47 |
48 | function ordered_para(db, N, _, cb) {
49 | //ordered reads
50 | var n = 0
51 | for (var i = 0; i < N; i++) {
52 | db.index.get('#' + i, next)
53 | }
54 |
55 | function next(err, v) {
56 | if (err) return cb(err)
57 | if (++n === N) cb(null, N)
58 | }
59 | }
60 |
61 | function ordered_series(db, N, _, cb) {
62 | //ordered reads
63 | var n = 0,
64 | i = 0
65 | ;(function _next() {
66 | var key = '#' + i++
67 | db.index.get(i, function (err, msg) {
68 | if (msg.key !== key) return cb('benchmark failed: incorrect key returned')
69 | if (i === n) cb(null, N)
70 | else setImmediate(_next)
71 | })
72 | })(0)
73 | }
74 |
75 | function random_series(db, N, _, cb) {
76 | ;(function get(i) {
77 | if (i >= N) return cb(null, N)
78 |
79 | db.index.get('#' + ~~(Math.random() * N), function (err, value) {
80 | if (err) return cb(err)
81 | setImmediate(function () {
82 | get(i + 1)
83 | })
84 | })
85 | })(0)
86 | }
87 |
88 | function random_para(db, N, _, cb) {
89 | var n = 0
90 | for (var i = 0; i < N; i++) db.index.get('#' + ~~(Math.random() * N), next)
91 |
92 | function next(err, value) {
93 | if (err && n >= 0) {
94 | n = -1
95 | cb(err)
96 | } else if (++n === N) cb(null, N)
97 | }
98 | }
99 |
100 | function random_ranges(db, N, makeOpts, cb) {
101 | if (!db.index.read) return cb(new Error('not supported'))
102 | ;(function get(i) {
103 | if (i >= N) return cb(null, N)
104 |
105 | pull(
106 | db.index.read(makeOpts('#' + ~~(Math.random() * N))),
107 | pull.collect(function (err, ary) {
108 | if (err) return cb(err)
109 | setImmediate(function () {
110 | get(i + ary.length)
111 | })
112 | })
113 | )
114 | })(0)
115 | }
116 |
117 | function limit10(key) {
118 | return { gt: key, limit: 10, keys: false }
119 | }
120 |
121 | function create(dir, seed) {
122 | if (!fs.existsSync(dir)) fs.mkdirSync(dir)
123 |
124 | var raf = Log(dir + '/aligned.log', {
125 | blockSize: 1024 * 64,
126 | codec: require('flumecodec/json'),
127 | })
128 |
129 | return Flume(toCompat(raf)).use(
130 | 'index',
131 | Index(1, function (msg) {
132 | return [msg.key]
133 | })
134 | )
135 | }
136 |
137 | var seed = Date.now()
138 | var dir = path.join(os.tmpdir(), `test-async-flumelog-bench-index-${seed}`)
139 | var db = create(dir, seed)
140 | var N = 50e3
141 |
142 | function refresh() {
143 | return function (cb) {
144 | db.close(function () {
145 | db = create(dir, seed)
146 | var start = Date.now()
147 | var rm = db.index.since(function (msg) {
148 | if (msg === db.since.value) {
149 | console.error('reload', Date.now() - start)
150 | rm()
151 | cb()
152 | }
153 | })
154 | })
155 | }
156 | }
157 |
158 | function run(name, benchmark, opts) {
159 | return function (cb) {
160 | var t = Timer(name)
161 | benchmark(db, N, opts, function (err, n) {
162 | t(err || n)
163 | cb()
164 | })
165 | }
166 | }
167 |
168 | cont.series(
169 | [
170 | run('append', initialize),
171 | run('ordered_para', ordered_para),
172 | run('random_para', random_para),
173 | run('ordered_series', ordered_para),
174 | run('random_series', random_para),
175 | refresh(),
176 | run('ordered_para (cool)', ordered_para),
177 | run('ordered_para (warm)', ordered_para),
178 | refresh(),
179 | run('random_para (cool)', random_para),
180 | run('random_para (warm)', random_para),
181 | refresh(),
182 | run('ordered_series (cool)', ordered_para),
183 | run('ordered_series (warm)', ordered_para),
184 | refresh(),
185 | run('random_series (cool)', random_para),
186 | run('random_series (warm)', random_para),
187 | refresh(),
188 | run('random-ranges', random_ranges, limit10),
189 | ].filter(Boolean)
190 | )(function () {
191 | db.close()
192 | })
193 |
--------------------------------------------------------------------------------
/test/bench2.js:
--------------------------------------------------------------------------------
1 | // SPDX-FileCopyrightText: 2021 Anders Rune Jensen
2 | //
3 | // SPDX-License-Identifier: Unlicense
4 |
5 | var FlumeLog = require('../')
6 | var codec = require('flumecodec')
7 | var toCompat = require('../compat')
8 |
9 | var file = '/tmp/bench-async-flumelog.log'
10 | try {
11 | require('fs').unlinkSync(file)
12 | } catch (_) {}
13 |
14 | require('bench-flumelog')(
15 | function () {
16 | var log = FlumeLog(file, {
17 | block: 1024 * 64,
18 | })
19 | return toCompat(log)
20 | },
21 | null,
22 | null,
23 | function (obj) {
24 | return obj
25 | }
26 | )
27 |
--------------------------------------------------------------------------------
/test/compaction.js:
--------------------------------------------------------------------------------
1 | // SPDX-FileCopyrightText: 2022 Anders Rune Jensen
2 | //
3 | // SPDX-License-Identifier: Unlicense
4 |
5 | const tape = require('tape')
6 | const fs = require('fs')
7 | const push = require('push-stream')
8 | const run = require('promisify-tuple')
9 | const timer = require('util').promisify(setTimeout)
10 | const Log = require('../')
11 |
12 | const hexCodec = {
13 | encode(num) {
14 | const hex = num.toString(16)
15 | const len = hex.length % 2 === 0 ? hex.length : hex.length + 1
16 | return Buffer.from(hex.padStart(len, '0'), 'hex')
17 | },
18 | decode(buf) {
19 | return parseInt(buf.toString('hex'), 16)
20 | },
21 | }
22 |
23 | tape('compact a log that does not have holes', async (t) => {
24 | const file = '/tmp/compaction-test_' + Date.now() + '.log'
25 | const log = Log(file, { blockSize: 10 })
26 |
27 | const [, stats] = await run(log.stats)()
28 | t.equals(stats.totalBytes, 0, 'stats.totalBytes (1)')
29 | t.equals(stats.deletedBytes, 0, 'stats.deletedBytes (1)')
30 |
31 | const buf1 = Buffer.from('first')
32 | const buf2 = Buffer.from('second')
33 |
34 | const [, offset1] = await run(log.append)(buf1)
35 | const [, offset2] = await run(log.append)(buf2)
36 | await run(log.onDrain)()
37 | t.pass('append two records')
38 |
39 | const [, stats2] = await run(log.stats)()
40 | t.equals(stats2.totalBytes, 10, 'stats.totalBytes (2)')
41 | t.equals(stats2.deletedBytes, 0, 'stats.deletedBytes (2)')
42 |
43 | const progressArr = []
44 | log.compactionProgress((stats) => {
45 | progressArr.push(stats)
46 | })
47 |
48 | const [err] = await run(log.compact)()
49 | await run(log.onDrain)()
50 | t.error(err, 'no error when compacting')
51 |
52 | t.deepEquals(
53 | progressArr,
54 | [
55 | { percent: 1, done: true, sizeDiff: 0 },
56 | { percent: 0, done: false },
57 | { percent: 1, done: true, sizeDiff: 0, holesFound: 0 },
58 | ],
59 | 'progress events'
60 | )
61 |
62 | await new Promise((resolve) => {
63 | log.stream({ offsets: false }).pipe(
64 | push.collect((err, ary) => {
65 | t.error(err, 'no error when streaming compacted log')
66 | t.deepEqual(ary, [buf1, buf2], 'both records exist')
67 | resolve()
68 | })
69 | )
70 | })
71 |
72 | await run(log.close)()
73 | t.end()
74 | })
75 |
76 | tape('compact waits for old log.streams to end', async (t) => {
77 | t.timeoutAfter(20000)
78 | const BLOCKSIZE = 100
79 | const file = '/tmp/compaction-test_' + Date.now() + '.log'
80 | const log = Log(file, {
81 | blockSize: BLOCKSIZE,
82 | codec: hexCodec,
83 | })
84 |
85 | const RECORDS = 50000
86 | const COMPACT_AT_RECORD = 200
87 | const PERCENTAGE = ((100 * COMPACT_AT_RECORD) / RECORDS).toFixed(1)
88 | const records = Array.from({ length: RECORDS }, (_, i) => i + 1)
89 |
90 | await run(log.append)(records)
91 | await run(log.onDrain)()
92 | t.pass(`appended ${RECORDS} records`)
93 |
94 | await run(log.del)(RECORDS * 0.9 * 4)
95 | await run(log.del)(RECORDS * 0.9 * 4 + 4)
96 | await run(log.del)(RECORDS * 0.9 * 4 + 8)
97 | await run(log.onDeletesFlushed)()
98 | t.pass(`deleted 3 records`)
99 |
100 | const [, stats] = await run(log.stats)()
101 | t.equals(stats.totalBytes, 208060, 'stats.totalBytes (1)')
102 | t.equals(stats.deletedBytes, 12, 'stats.deletedBytes (1)')
103 |
104 | await run(log.close)()
105 |
106 | const log2 = Log(file, {
107 | blockSize: BLOCKSIZE,
108 | codec: hexCodec,
109 | })
110 | t.pass('close and reopen log')
111 |
112 | const [err, stats2] = await run(log2.stats)()
113 | t.error(err, 'no error when getting stats')
114 | t.equals(stats2.totalBytes, 208060, 'stats.totalBytes (2)')
115 | t.equals(stats2.deletedBytes, 12, 'stats.deletedBytes (2)')
116 |
117 | let compactionStarted
118 | log2.compactionProgress((stats) => {
119 | if (!stats.done) {
120 | compactionStarted = true
121 | return false // stop tracking compactionProgress
122 | }
123 | })
124 |
125 | await new Promise((resolve) => {
126 | log2.stream({ gt: BLOCKSIZE, live: true, offsets: true }).pipe(
127 | push.drain((record) => {
128 | if (record.value === COMPACT_AT_RECORD) {
129 | t.pass(`start compact at ${PERCENTAGE}% of the log scan (old part)`)
130 | log2.compact((err) => {
131 | t.true(compactionStarted, 'compaction had started')
132 | t.error(err, 'compacted just completed')
133 | resolve()
134 | })
135 | }
136 | if (record.value === RECORDS) {
137 | t.pass('log scan (old part) ended')
138 | t.false(compactionStarted, 'compaction should not have started yet')
139 | }
140 | })
141 | )
142 | })
143 |
144 | const [, stats3] = await run(log2.stats)()
145 | t.equals(stats3.totalBytes, 208048, 'stats.totalBytes (3)')
146 | t.equals(stats3.deletedBytes, 0, 'stats.deletedBytes (3)')
147 |
148 | await run(log2.close)()
149 |
150 | t.end()
151 | })
152 |
153 | tape('delete first record, compact, stream', async (t) => {
154 | const file = '/tmp/compaction-test_' + Date.now() + '.log'
155 | const log = Log(file, { blockSize: 10 })
156 |
157 | const buf1 = Buffer.from('first')
158 | const buf2 = Buffer.from('second')
159 |
160 | const [, offset1] = await run(log.append)(buf1)
161 | const [, offset2] = await run(log.append)(buf2)
162 | await run(log.onDrain)()
163 | t.pass('append two records')
164 |
165 | await run(log.del)(offset1)
166 | await run(log.onDeletesFlushed)()
167 | t.pass('delete first record')
168 |
169 | const [err] = await run(log.compact)()
170 | await run(log.onDrain)()
171 | t.error(err, 'no error when compacting')
172 |
173 | await new Promise((resolve) => {
174 | log.stream({ offsets: false }).pipe(
175 | push.collect((err, ary) => {
176 | t.error(err, 'no error when streaming compacted log')
177 | t.deepEqual(ary, [buf2], 'only second record exists')
178 | resolve()
179 | })
180 | )
181 | })
182 |
183 | await run(log.close)()
184 | t.end()
185 | })
186 |
187 | tape('delete last record, compact, stream', async (t) => {
188 | const file = '/tmp/compaction-test_' + Date.now() + '.log'
189 | const log = Log(file, { blockSize: 10 })
190 |
191 | const buf1 = Buffer.from('first')
192 | const buf2 = Buffer.from('second')
193 | const buf3 = Buffer.from('third')
194 |
195 | const [, offset1] = await run(log.append)(buf1)
196 | const [, offset2] = await run(log.append)(buf2)
197 | const [, offset3] = await run(log.append)(buf3)
198 | await run(log.onDrain)()
199 | t.pass('append three records')
200 |
201 | await run(log.del)(offset3)
202 | await run(log.onDeletesFlushed)()
203 | t.pass('delete third record')
204 |
205 | await new Promise((resolve) => {
206 | log.stream({ offsets: false }).pipe(
207 | push.collect((err, ary) => {
208 | t.error(err, 'no error when streaming log')
209 | t.deepEqual(ary, [buf1, buf2, null], 'all blocks')
210 | resolve()
211 | })
212 | )
213 | })
214 |
215 | const [err] = await run(log.compact)()
216 | await run(log.onDrain)()
217 | t.error(err, 'no error when compacting')
218 |
219 | await new Promise((resolve) => {
220 | log.stream({ offsets: false }).pipe(
221 | push.collect((err, ary) => {
222 | t.error(err, 'no error when streaming compacted log')
223 | t.deepEqual(ary, [buf1, buf2], 'last block truncated away')
224 | resolve()
225 | })
226 | )
227 | })
228 |
229 | await run(log.close)()
230 | t.end()
231 | })
232 |
233 | tape('shift many blocks', async (t) => {
234 | const file = '/tmp/compaction-test_' + Date.now() + '.log'
235 | const log = Log(file, {
236 | blockSize: 11, // fits 3 records of size 3 plus EOB of size 2
237 | codec: hexCodec,
238 | })
239 |
240 | await run(log.append)(
241 | [
242 | // block 0
243 | [0x11, 0x22, 0x33], // offsets: 0, 3, 6
244 | // block 1
245 | [0x44, 0x55, 0x66], // offsets: 11+0, 11+3, 11+6
246 | // block 2
247 | [0x77, 0x88, 0x99], // offsets: 22+0, 22+3, 22+6
248 | // block 3
249 | [0xaa, 0xbb, 0xcc], // offsets: 33+0, 33+3, 33+6
250 | // block 4
251 | [0xdd, 0xee, 0xff], // offsets: 44+0, 44+3, 44+6
252 | ].flat()
253 | )
254 | t.pass('appended records')
255 |
256 | await run(log.del)(11 + 3)
257 | await run(log.del)(11 + 6)
258 | await run(log.del)(33 + 3)
259 | await run(log.onDeletesFlushed)()
260 | t.pass('deleted some records in the middle')
261 |
262 | await new Promise((resolve) => {
263 | log.stream({ offsets: false }).pipe(
264 | push.collect((err, ary) => {
265 | t.deepEqual(
266 | ary,
267 | [
268 | // block 0
269 | [0x11, 0x22, 0x33],
270 | // block 1
271 | [0x44, null, null],
272 | // block 2
273 | [0x77, 0x88, 0x99],
274 | // block 3
275 | [0xaa, null, 0xcc],
276 | // block 4
277 | [0xdd, 0xee, 0xff],
278 | ].flat(),
279 | 'log has 5 blocks and some holes'
280 | )
281 | resolve()
282 | })
283 | )
284 | })
285 |
286 | const progressArr = []
287 | log.compactionProgress((stats) => {
288 | progressArr.push(stats)
289 | })
290 |
291 | t.equals(log.since.value, 44 + 6, 'since before compaction')
292 |
293 | const [err] = await run(log.compact)()
294 | await run(log.onDrain)()
295 | t.error(err, 'no error when compacting')
296 |
297 | t.equals(log.since.value, 33 + 6, 'since after compaction')
298 |
299 | t.deepEquals(
300 | progressArr,
301 | [
302 | {
303 | sizeDiff: 0,
304 | percent: 1,
305 | done: true,
306 | },
307 | {
308 | percent: 0,
309 | done: false,
310 | },
311 | {
312 | startOffset: 11,
313 | compactedOffset: 11,
314 | unshiftedOffset: 11,
315 | percent: 0,
316 | done: false,
317 | },
318 | {
319 | sizeDiff: 11, // the log is now 1 block shorter
320 | holesFound: 3,
321 | percent: 1,
322 | done: true,
323 | },
324 | ],
325 | 'progress events'
326 | )
327 |
328 | await new Promise((resolve) => {
329 | log.stream({ offsets: false }).pipe(
330 | push.collect((err, ary) => {
331 | t.error(err, 'no error when streaming compacted log')
332 | t.deepEqual(
333 | ary,
334 | [
335 | // block 0
336 | [0x11, 0x22, 0x33],
337 | // block 1
338 | [0x44, 0x77, 0x88],
339 | // block 2
340 | [0x99, 0xaa, 0xcc],
341 | // block 3
342 | [0xdd, 0xee, 0xff],
343 | ].flat(),
344 | 'log has 4 blocks and no holes, except in the last block'
345 | )
346 | resolve()
347 | })
348 | )
349 | })
350 |
351 | await run(log.close)()
352 | t.end()
353 | })
354 |
355 | tape('cannot read truncated regions of the log', async (t) => {
356 | const file = '/tmp/compaction-test_' + Date.now() + '.log'
357 | const log = Log(file, { blockSize: 11, codec: hexCodec })
358 |
359 | await run(log.append)(
360 | [
361 | // block 0
362 | [0x11, 0x22, 0x33], // offsets: 0, 3, 6
363 | // block 1
364 | [0x44, 0x55, 0x66], // offsets: 11+0, 11+3, 11+6
365 | // block 2
366 | [0x77, 0x88, 0x99], // offsets: 22+0, 22+3, 22+6
367 | ].flat()
368 | )
369 | t.pass('appended records')
370 |
371 | await run(log.del)(11 + 3)
372 | await run(log.del)(11 + 6)
373 | await run(log.del)(22 + 0)
374 | await run(log.onDeletesFlushed)()
375 | t.pass('delete some records')
376 |
377 | await new Promise((resolve) => {
378 | log.stream({ offsets: false }).pipe(
379 | push.collect((err, ary) => {
380 | t.deepEqual(
381 | ary,
382 | [
383 | // block 0
384 | [0x11, 0x22, 0x33],
385 | // block 1
386 | [0x44, null, null],
387 | // block 2
388 | [null, 0x88, 0x99],
389 | ].flat(),
390 | 'log has some holes'
391 | )
392 | resolve()
393 | })
394 | )
395 | })
396 |
397 | const [err] = await run(log.compact)()
398 | await run(log.onDrain)()
399 | t.error(err, 'no error when compacting')
400 |
401 | await new Promise((resolve) => {
402 | log.stream({ offsets: false }).pipe(
403 | push.collect((err, ary) => {
404 | t.deepEqual(
405 | ary,
406 | [
407 | // block 0
408 | [0x11, 0x22, 0x33],
409 | // block 1
410 | [0x44, 0x88, 0x99],
411 | ].flat(),
412 | 'log has no holes'
413 | )
414 | resolve()
415 | })
416 | )
417 | })
418 |
419 | const [err2, item] = await run(log.get)(22 + 3) // outdated offset for 0x88
420 | t.ok(err2)
421 | t.equals(err2.code, 'ERR_AAOL_OFFSET_OUT_OF_BOUNDS')
422 | t.notEquals(item, 0x88)
423 |
424 | await run(log.close)()
425 | t.end()
426 | })
427 |
428 | tape('compact handling last deleted record on last block', async (t) => {
429 | const file = '/tmp/compaction-test_' + Date.now() + '.log'
430 | const log = Log(file, {
431 | blockSize: 11, // fits 3 records of size 3 plus EOB of size 2
432 | codec: hexCodec,
433 | })
434 |
435 | await run(log.append)(
436 | [
437 | // block 0
438 | [0x11, 0x22, 0x33], // offsets: 0, 3, 6
439 | // block 1
440 | [0x44, 0x55, 0x66], // offsets: 11+0, 11+3, 11+6
441 | // block 2
442 | [0x77, 0x88, 0x99], // offsets: 22+0, 22+3, 22+6
443 | ].flat()
444 | )
445 | t.pass('appended records')
446 |
447 | await run(log.del)(11 + 3)
448 | await run(log.del)(22 + 6)
449 | await run(log.onDeletesFlushed)()
450 | t.pass('deleted some records in the middle')
451 |
452 | await new Promise((resolve) => {
453 | log.stream({ offsets: false }).pipe(
454 | push.collect((err, ary) => {
455 | t.deepEqual(
456 | ary,
457 | [
458 | // block 0
459 | [0x11, 0x22, 0x33],
460 | // block 1
461 | [0x44, null, 0x66],
462 | // block 2
463 | [0x77, 0x88, null],
464 | ].flat(),
465 | 'log has 3 blocks and some holes'
466 | )
467 | resolve()
468 | })
469 | )
470 | })
471 |
472 | t.equals(log.since.value, 22 + 6, 'since before compaction')
473 |
474 | const [err] = await run(log.compact)()
475 | await run(log.onDrain)()
476 | t.error(err, 'no error when compacting')
477 |
478 | t.equals(log.since.value, 22 + 0, 'since after compaction')
479 |
480 | await new Promise((resolve) => {
481 | log.stream({ offsets: false }).pipe(
482 | push.collect((err, ary) => {
483 | t.error(err, 'no error when streaming compacted log')
484 | t.deepEqual(
485 | ary,
486 | [
487 | // block 0
488 | [0x11, 0x22, 0x33],
489 | // block 1
490 | [0x44, 0x66, 0x77],
491 | // block 2
492 | [0x88],
493 | ].flat(),
494 | 'log has 3 blocks'
495 | )
496 | resolve()
497 | })
498 | )
499 | })
500 |
501 | await run(log.close)()
502 | t.end()
503 | })
504 |
505 | tape('compact handling holes of different sizes', async (t) => {
506 | const file = '/tmp/compaction-test_' + Date.now() + '.log'
507 | const log = Log(file, {
508 | blockSize: 14, // fits 4 records of size 3 plus EOB of size 2
509 | codec: hexCodec,
510 | })
511 |
512 | await run(log.append)(
513 | [
514 | // block 0
515 | [0x11, 0x2222, 0x33], // offsets: 0, 3, 9
516 | // block 1
517 | [0x4444, 0x55, 0x66], // offsets: 14+0, 14+6, 14+9
518 | // block 2
519 | [0x77, 0x88, 0x99, 0xaa], // offsets: 28+0, 28+3, 28+6, 28+9
520 | ].flat()
521 | )
522 | t.pass('appended records')
523 |
524 | await run(log.del)(3)
525 | await run(log.del)(14 + 0)
526 | await run(log.onDeletesFlushed)()
527 | t.pass('deleted some records in the middle')
528 |
529 | await new Promise((resolve) => {
530 | log.stream({ offsets: false }).pipe(
531 | push.collect((err, ary) => {
532 | t.deepEqual(
533 | ary,
534 | [
535 | // block 0
536 | [0x11, null, 0x33],
537 | // block 1
538 | [null, 0x55, 0x66],
539 | // block 2
540 | [0x77, 0x88, 0x99, 0xaa],
541 | ].flat(),
542 | 'log has 3 blocks and some holes'
543 | )
544 | resolve()
545 | })
546 | )
547 | })
548 |
549 | const [err] = await run(log.compact)()
550 | await run(log.onDrain)()
551 | t.error(err, 'no error when compacting')
552 |
553 | await new Promise((resolve) => {
554 | log.stream({ offsets: false }).pipe(
555 | push.collect((err, ary) => {
556 | t.error(err, 'no error when streaming compacted log')
557 | t.deepEqual(
558 | ary,
559 | [
560 | // block 0
561 | [0x11, 0x33, 0x55, 0x66],
562 | // block 1
563 | [0x77, 0x88, 0x99, 0xaa],
564 | ].flat(),
565 | 'log has 2 blocks'
566 | )
567 | resolve()
568 | })
569 | )
570 | })
571 |
572 | await run(log.close)()
573 | t.end()
574 | })
575 |
576 | tape('startOffset is correct', async (t) => {
577 | t.timeoutAfter(6000)
578 | const file = '/tmp/compaction-test_' + Date.now() + '.log'
579 | const log = Log(file, { blockSize: 9, codec: hexCodec })
580 |
581 | await run(log.append)(
582 | [
583 | // block 0
584 | [0x11, 0x22], // offsets: 0, 3
585 | // block 1
586 | [0x33, 0x44], // offsets: 9+0, 9+3
587 | ].flat()
588 | )
589 | await run(log.onDrain)()
590 | t.pass('append four records')
591 |
592 | await run(log.del)(0)
593 | await run(log.onDeletesFlushed)()
594 | t.pass('delete 1st record')
595 |
596 | const progressArr = []
597 | log.compactionProgress((stats) => {
598 | progressArr.push(stats)
599 | })
600 |
601 | const [err] = await run(log.compact)()
602 | t.error(err, 'no error when compacting')
603 |
604 | t.deepEquals(
605 | progressArr,
606 | [
607 | {
608 | sizeDiff: 0,
609 | percent: 1,
610 | done: true,
611 | },
612 | {
613 | percent: 0,
614 | done: false,
615 | },
616 | {
617 | startOffset: 0,
618 | compactedOffset: 0,
619 | unshiftedOffset: 3,
620 | percent: 0.25,
621 | done: false,
622 | },
623 | {
624 | sizeDiff: 1,
625 | holesFound: 1,
626 | percent: 1,
627 | done: true,
628 | },
629 | ],
630 | 'progress events'
631 | )
632 |
633 | await new Promise((resolve) => {
634 | log.stream({ offsets: false }).pipe(
635 | push.collect((err, ary) => {
636 | t.error(err, 'no error when streaming compacted log')
637 | t.deepEqual(
638 | ary,
639 | [
640 | // block 0
641 | [0x22, 0x33],
642 | // block 1
643 | [0x44],
644 | ].flat(),
645 | 'log has 2 blocks'
646 | )
647 | resolve()
648 | })
649 | )
650 | })
651 |
652 | await run(log.close)()
653 | t.end()
654 | })
655 |
656 | tape('recovers from crash just after persisting state', async (t) => {
657 | t.timeoutAfter(6000)
658 | const file = '/tmp/compaction-test_' + Date.now() + '.log'
659 | let log = Log(file, { blockSize: 9, codec: hexCodec })
660 | t.pass('suppose the log has blockSize 9')
661 | t.pass('suppose we had blocks: [null, 0x22] and [0x33, 0x44]')
662 |
663 | await run(log.append)(
664 | [
665 | // block 0
666 | [0x22, 0x33], // offsets: 0, 3
667 | // block 1
668 | [0x33, 0x44], // offsets: 9+0, 9+3
669 | ].flat()
670 | )
671 | await run(log.close)()
672 | t.pass('suppose compaction was in progress: [0x22, 0x33] and [0x33, 0x44]')
673 |
674 | const version = [1, 0, 0, 0] // uint32LE
675 | const startOffset = [0, 0, 0, 0] // uint32LE
676 | const truncateBlockIndex = [255, 255, 255, 255] //uint32LE
677 | const compactingBlockIndex = [1, 0, 0, 0] // uint32LE
678 | const unshiftedOffset = [9 + 3, 0, 0, 0] // uint32LE
679 | const unshiftedBlock = [
680 | [1, 0, 0x33],
681 | [1, 0, 0x44],
682 | [0, 0, 0],
683 | ].flat()
684 | await fs.promises.writeFile(
685 | file + '.compaction',
686 | Buffer.from([
687 | ...version,
688 | ...startOffset,
689 | ...truncateBlockIndex,
690 | ...compactingBlockIndex,
691 | ...unshiftedOffset,
692 | ...unshiftedBlock,
693 | ])
694 | )
695 | t.pass('suppose compaction file: blockIndex 1, unshifted 12, [0x33, 0x44]')
696 | t.true(fs.existsSync(file + '.compaction'), 'compaction file exists')
697 |
698 | log = Log(file, { blockSize: 9, codec: hexCodec })
699 | t.pass('start log, compaction should autostart')
700 |
701 | const progressArr = []
702 | log.compactionProgress((stats) => {
703 | progressArr.push(stats)
704 | })
705 |
706 | await timer(1000)
707 |
708 | t.deepEquals(
709 | progressArr,
710 | [
711 | {
712 | percent: 0,
713 | done: false,
714 | },
715 | {
716 | startOffset: 0,
717 | compactedOffset: 9,
718 | unshiftedOffset: 12,
719 | percent: 1,
720 | done: false,
721 | },
722 | {
723 | sizeDiff: 1,
724 | holesFound: 0,
725 | percent: 1,
726 | done: true,
727 | },
728 | ],
729 | 'progress events'
730 | )
731 |
732 | await new Promise((resolve) => {
733 | log.stream({ offsets: false }).pipe(
734 | push.collect((err, ary) => {
735 | t.error(err, 'no error when streaming compacted log')
736 | t.deepEqual(
737 | ary,
738 | [
739 | // block 0
740 | [0x22, 0x33],
741 | // block 1
742 | [0x44],
743 | ].flat(),
744 | 'log has 2 blocks'
745 | )
746 | resolve()
747 | })
748 | )
749 | })
750 |
751 | t.false(fs.existsSync(file + '.compaction'), 'compaction file is autodeleted')
752 |
753 | await run(log.close)()
754 | t.end()
755 | })
756 |
757 | tape('recovers from crash just after persisting block', async (t) => {
758 | t.timeoutAfter(6000)
759 | const file = '/tmp/compaction-test_' + Date.now() + '.log'
760 | let log = Log(file, { blockSize: 9, codec: hexCodec })
761 | t.pass('suppose the log has blockSize 9')
762 | t.pass('suppose we had blocks: [null, 0x22] and [0x33, 0x44]')
763 |
764 | await run(log.append)(
765 | [
766 | // block 0
767 | [0x22, 0x33], // offsets: 0, 3
768 | // block 1
769 | [0x33, 0x44], // offsets: 9+0, 9+3
770 | ].flat()
771 | )
772 | await run(log.close)()
773 | t.pass('suppose compaction was in progress: [0x22, 0x33] and [0x33, 0x44]')
774 |
775 | const version = [1, 0, 0, 0] // uint32LE
776 | const startOffset = [0, 0, 0, 0] // uint32LE
777 | const truncateBlockIndex = [255, 255, 255, 255] // uint32LE
778 | const compactingBlockIndex = [0, 0, 0, 0] // uint32LE
779 | const unshiftedOffset = [0, 0, 0, 0] // uint32LE
780 | const unshiftedBlock = [
781 | [2, 0, 0, 0], // deleted. used to be [2, 0, 0x11, 0x11]
782 | [1, 0, 0x22],
783 | [0, 0],
784 | ].flat()
785 | await fs.promises.writeFile(
786 | file + '.compaction',
787 | Buffer.from([
788 | ...version,
789 | ...startOffset,
790 | ...truncateBlockIndex,
791 | ...compactingBlockIndex,
792 | ...unshiftedOffset,
793 | ...unshiftedBlock,
794 | ])
795 | )
796 | t.pass('suppose compaction file: blockIndex 0, unshifted 0, [null, 0x22]')
797 |
798 | log = Log(file, { blockSize: 9, codec: hexCodec })
799 | t.pass('start log, compaction should autostart')
800 |
801 | await timer(1000)
802 |
803 | await new Promise((resolve) => {
804 | log.stream({ offsets: false }).pipe(
805 | push.collect((err, ary) => {
806 | t.error(err, 'no error when streaming compacted log')
807 | t.deepEqual(
808 | ary,
809 | [
810 | // block 0
811 | [0x22, 0x33],
812 | // block 1
813 | [0x44],
814 | ].flat(),
815 | 'log has 2 blocks'
816 | )
817 | resolve()
818 | })
819 | )
820 | })
821 |
822 | await run(log.close)()
823 | t.end()
824 | })
825 |
826 | tape('restarts from crash just before truncating log', async (t) => {
827 | t.timeoutAfter(6000)
828 | const file = '/tmp/compaction-test_' + Date.now() + '.log'
829 | let log = Log(file, { blockSize: 9, codec: hexCodec })
830 | t.pass('suppose the log has blockSize 9')
831 | t.pass('suppose we had blocks: [null, 0x22], [null, 0x44] and [0x55, 0x66]')
832 |
833 | await run(log.append)(
834 | [
835 | // block 0
836 | [0x22, 0x44], // offsets: 0, 3
837 | // block 1
838 | [0x55, 0x66], // offsets: 9+0, 9+3
839 | // block 2
840 | [0x55, 0x66], // offsets: 18+0, 18+3
841 | ].flat()
842 | )
843 | await run(log.close)()
844 | t.pass('suppose compaction ready: [0x22, 0x44], [0x55, 0x66], [0x55, 0x66]')
845 |
846 | const version = [1, 0, 0, 0] // uint32LE
847 | const startOffset = [0, 0, 0, 0] // uint32LE
848 | const truncateBlockIndex = [1, 0, 0, 0] //uint32LE
849 | const compactingBlockIndex = [0, 0, 0, 0] // uint32LE
850 | const unshiftedOffset = [0, 0, 0, 0] // uint32LE
851 | const unshiftedBlock = [0, 0, 0, 0, 0, 0, 0, 0, 0]
852 | await fs.promises.writeFile(
853 | file + '.compaction',
854 | Buffer.from([
855 | ...version,
856 | ...startOffset,
857 | ...truncateBlockIndex,
858 | ...compactingBlockIndex,
859 | ...unshiftedOffset,
860 | ...unshiftedBlock,
861 | ])
862 | )
863 | t.pass('suppose compaction file: truncateBlockIndex 1')
864 |
865 | log = Log(file, { blockSize: 9, codec: hexCodec })
866 | t.pass('start log, compaction should autostart')
867 |
868 | await timer(1000)
869 |
870 | await new Promise((resolve) => {
871 | log.stream({ offsets: false }).pipe(
872 | push.collect((err, ary) => {
873 | t.error(err, 'no error when streaming compacted log')
874 | t.deepEqual(
875 | ary,
876 | [
877 | // block 0
878 | [0x22, 0x44],
879 | // block 1
880 | [0x55, 0x66],
881 | ].flat(),
882 | 'truncated to: [0x22, 0x44], [0x55, 0x66]'
883 | )
884 | resolve()
885 | })
886 | )
887 | })
888 |
889 | await run(log.close)()
890 | t.end()
891 | })
892 |
893 | tape('append during compaction is postponed', async (t) => {
894 | const file = '/tmp/compaction-test_' + Date.now() + '.log'
895 | const log = Log(file, { blockSize: 10 })
896 |
897 | const buf1 = Buffer.from('first')
898 | const buf2 = Buffer.from('second')
899 | const buf3 = Buffer.from('third')
900 |
901 | const [, offset1] = await run(log.append)(buf1)
902 | const [, offset2] = await run(log.append)(buf2)
903 | await run(log.onDrain)()
904 | t.pass('append two records')
905 |
906 | await run(log.del)(offset1)
907 | await run(log.onDeletesFlushed)()
908 | t.pass('delete first record')
909 |
910 | let appendDone = false
911 | let compactDone = false
912 | log.compact((err) => {
913 | t.error(err, 'no error when compacting')
914 | t.false(appendDone, 'compact was done before append')
915 | compactDone = true
916 | })
917 | const [err, offset3] = await run(log.append)(buf3)
918 | appendDone = true
919 | t.error(err, 'no error when appending')
920 | t.equal(offset3, 10, 'append wrote "third" on the 2nd block')
921 | t.true(compactDone, 'compaction was done by the time append is done')
922 | await run(log.onDrain)()
923 |
924 | await new Promise((resolve) => {
925 | log.stream({ offsets: false }).pipe(
926 | push.collect((err, ary) => {
927 | t.error(err, 'no error when streaming compacted log')
928 | t.deepEqual(ary, [buf2, buf3], 'only 2nd and 3rd records exist')
929 | resolve()
930 | })
931 | )
932 | })
933 |
934 | await run(log.close)()
935 | t.end()
936 | })
937 |
938 | tape('appendTransaction during compaction is postponed', async (t) => {
939 | const file = '/tmp/compaction-test_' + Date.now() + '.log'
940 | const log = Log(file, { blockSize: 10 })
941 |
942 | const buf1 = Buffer.from('first')
943 | const buf2 = Buffer.from('second')
944 | const buf3 = Buffer.from('third')
945 |
946 | const [, offset1] = await run(log.append)(buf1)
947 | const [, offset2] = await run(log.append)(buf2)
948 | await run(log.onDrain)()
949 | t.pass('append two records')
950 |
951 | await run(log.del)(offset1)
952 | await run(log.onDeletesFlushed)()
953 | t.pass('delete first record')
954 |
955 | let appendTransactionDone = false
956 | let compactDone = false
957 | log.compact((err) => {
958 | t.error(err, 'no error when compacting')
959 | t.false(appendTransactionDone, 'compact was done before appendTransaction')
960 | compactDone = true
961 | })
962 | const [err, offset3] = await run(log.appendTransaction)([buf3])
963 | appendTransactionDone = true
964 | t.error(err, 'no error when appending')
965 | t.deepEquals(offset3, [10], 'appendTransaction wrote "third" on 2nd block')
966 | t.true(compactDone, 'compaction was done before appendTransaction done')
967 | await run(log.onDrain)()
968 |
969 | await new Promise((resolve) => {
970 | log.stream({ offsets: false }).pipe(
971 | push.collect((err, ary) => {
972 | t.error(err, 'no error when streaming compacted log')
973 | t.deepEqual(ary, [buf2, buf3], 'only 2nd and 3rd records exist')
974 | resolve()
975 | })
976 | )
977 | })
978 |
979 | await run(log.close)()
980 | t.end()
981 | })
982 |
983 | tape('del during compaction is forbidden', async (t) => {
984 | const file = '/tmp/compaction-test_' + Date.now() + '.log'
985 | const log = Log(file, { blockSize: 10 })
986 |
987 | const buf1 = Buffer.from('first')
988 | const buf2 = Buffer.from('second')
989 |
990 | const [, offset1] = await run(log.append)(buf1)
991 | const [, offset2] = await run(log.append)(buf2)
992 | await run(log.onDrain)()
993 | t.pass('append two records')
994 |
995 | await run(log.del)(offset1)
996 | await run(log.onDeletesFlushed)()
997 | t.pass('delete first record')
998 |
999 | let compactDone = false
1000 | log.compact((err) => {
1001 | t.error(err, 'no error when compacting')
1002 | compactDone = true
1003 | })
1004 | const [err, offset3] = await run(log.del)(10)
1005 | t.ok(err, 'del is forbidden')
1006 | t.match(err.message, /Cannot delete/)
1007 | t.notOk(offset3)
1008 |
1009 | await new Promise((resolve) => {
1010 | const interval = setInterval(() => {
1011 | if (compactDone) {
1012 | clearInterval(interval)
1013 | resolve()
1014 | }
1015 | }, 100)
1016 | })
1017 |
1018 | await new Promise((resolve) => {
1019 | log.stream({ offsets: false }).pipe(
1020 | push.collect((err, ary) => {
1021 | t.error(err, 'no error when streaming compacted log')
1022 | t.deepEqual(ary, [buf2], 'only 2nd record exists')
1023 | resolve()
1024 | })
1025 | )
1026 | })
1027 |
1028 | await run(log.close)()
1029 | t.end()
1030 | })
1031 |
1032 | tape('there can only be one compact at a time', async (t) => {
1033 | const file = '/tmp/compaction-test_' + Date.now() + '.log'
1034 | const log = Log(file, { blockSize: 10 })
1035 |
1036 | const buf1 = Buffer.from('first')
1037 | const buf2 = Buffer.from('second')
1038 |
1039 | const [, offset1] = await run(log.append)(buf1)
1040 | const [, offset2] = await run(log.append)(buf2)
1041 | await run(log.onDrain)()
1042 | t.pass('append two records')
1043 |
1044 | await run(log.del)(offset1)
1045 | await run(log.onDeletesFlushed)()
1046 | t.pass('delete first record')
1047 |
1048 | let compact1Done = false
1049 | let compact2Done = false
1050 | log.compact((err) => {
1051 | t.error(err, 'no error when compacting')
1052 | t.true(compact2Done, '2nd compact cb has been called already')
1053 | compact1Done = true
1054 | })
1055 | log.compact((err) => {
1056 | t.error(err, 'no error when compacting')
1057 | t.false(compact1Done, '1st compact cb has not been called yet')
1058 | compact2Done = true
1059 | })
1060 | await run(log.onDrain)()
1061 | t.true(compact1Done, 'compact 1 done')
1062 | t.true(compact2Done, 'compact 2 done')
1063 |
1064 | await new Promise((resolve) => {
1065 | log.stream({ offsets: false }).pipe(
1066 | push.collect((err, ary) => {
1067 | t.error(err, 'no error when streaming compacted log')
1068 | t.deepEqual(ary, [buf2], 'only second record exists')
1069 | resolve()
1070 | })
1071 | )
1072 | })
1073 |
1074 | await run(log.close)()
1075 | t.end()
1076 | })
1077 |
1078 | tape('live streams post-compaction', async (t) => {
1079 | const file = '/tmp/compaction-test_' + Date.now() + '.log'
1080 | const log = Log(file, {
1081 | blockSize: 11, // fits 3 records of size 3 plus EOB of size 2
1082 | codec: hexCodec,
1083 | })
1084 |
1085 | await run(log.append)(
1086 | [
1087 | // block 0
1088 | [0x11, 0x22, 0x33], // offsets: 0, 3, 6
1089 | // block 1
1090 | [0x44, 0x55, 0x66], // offsets: 11+0, 11+3, 11+6
1091 | // block 2
1092 | [0x77, 0x88, 0x99], // offsets: 22+0, 22+3, 22+6
1093 | ].flat()
1094 | )
1095 | t.pass('appended records')
1096 |
1097 | let liveStreamFoundAA = false
1098 | log.stream({ gt: 22 + 6, offsets: false, old: false, live: true }).pipe({
1099 | paused: false,
1100 | write(hex) {
1101 | t.equal(hex, 0xaa)
1102 | liveStreamFoundAA = true
1103 | },
1104 | end() {},
1105 | })
1106 |
1107 | await run(log.del)(11 + 3)
1108 | await run(log.del)(22 + 6)
1109 | await run(log.onDeletesFlushed)()
1110 | t.pass('deleted some records in the middle')
1111 |
1112 | const [err] = await run(log.compact)()
1113 | await run(log.onDrain)()
1114 | t.error(err, 'no error when compacting')
1115 |
1116 | await run(log.append)(0xaa)
1117 | t.pass('appended new record')
1118 |
1119 | await timer(1000)
1120 |
1121 | t.true(liveStreamFoundAA, 'live stream found new record')
1122 |
1123 | await run(log.close)()
1124 | t.end()
1125 | })
1126 |
--------------------------------------------------------------------------------
/test/delete.js:
--------------------------------------------------------------------------------
1 | // SPDX-FileCopyrightText: 2021 Anders Rune Jensen
2 | //
3 | // SPDX-License-Identifier: Unlicense
4 |
5 | var tape = require('tape')
6 | var fs = require('fs')
7 | var pify = require('util').promisify
8 | var push = require('push-stream')
9 | var Log = require('../')
10 |
11 | var msg1 = Buffer.from(
12 | 'hello world hello world hello world hello world hello world hello world hello world hello world hello world hello world hello world hello world hello world hello world hello world hello world hello world hello world hello world hello world hello world hello world hello world hello world hello world hello world hello world hello world hello world hello world hello world hello world hello world hello world hello world hello world hello world hello world hello world hello world hello world hello world hello world hello world hello world hello world hello world hello world hello world hello world hello world hello world hello world hello world hello world hello world hello world hello world hello world hello world'
13 | )
14 | var msg2 = Buffer.from(
15 | 'hello offset db hello offset db hello offset db hello offset db hello offset db hello offset db hello offset db hello offset db hello offset db hello offset db hello offset db hello offset db hello offset db hello offset db hello offset db hello offset db hello offset db hello offset db hello offset db hello offset db hello offset db hello offset db hello offset db hello offset db hello offset db hello offset db hello offset db hello offset db hello offset db hello offset db hello offset db hello offset db hello offset db hello offset db hello offset db hello offset db hello offset db hello offset db hello offset db hello offset db hello offset db hello offset db hello offset db hello offset db hello offset db hello offset db hello offset db hello offset db hello offset db hello offset db hello offset db hello offset db hello offset db hello offset db hello offset db hello offset db hello offset db hello offset db hello offset db hello offset db hello offset db hello offset db hello offset db hello offset db hello offset db hello offset db hello offset db hello offset db hello offset db hello offset db hello offset db hello offset db hello offset db hello offset db hello offset db hello offset db'
16 | )
17 | var msg3 = Buffer.from(
18 | 'hello offsetty db hello offsetty db hello offsetty db hello offsetty db hello offsetty db hello offsetty db hello offsetty db hello offsetty db hello offsetty db hello offsetty db hello offsetty db hello offsetty db hello offsetty db hello offsetty db hello offsetty db hello offsetty db hello offsetty db hello offsetty db hello offsetty db hello offsetty db hello offsetty db hello offsetty db hello offsetty db hello offsetty db hello offsetty db hello offsetty db hello offsetty db hello offsetty db hello offsetty db hello offsetty db hello offsetty db hello offsetty db hello offsetty db hello offsetty db hello offsetty db hello offsetty db hello offsetty db hello offsetty db hello offsetty db hello offsetty db hello offsetty db hello offsetty db hello offsetty db hello offsetty db hello offsetty db hello offsetty db hello offsetty db hello offsetty db hello offsetty db hello offsetty db hello offsetty db hello offsetty db hello offsetty db hello offsetty db hello offsetty db hello offsetty db hello offsetty db hello offsetty db hello offsetty db hello offsetty db hello offsetty db hello offsetty db hello offsetty db hello offsetty db hello offsetty db hello offsetty db hello offsetty db hello offsetty db hello offsetty db hello offsetty db hello offsetty db hello offsetty db hello offsetty db hello offsetty db hello offsetty db hello offsetty db'
19 | )
20 |
21 | tape('simple', function (t) {
22 | var file = '/tmp/fao-test_del.log'
23 | try {
24 | fs.unlinkSync(file)
25 | } catch (_) {}
26 | var db = Log(file, { blockSize: 2 * 1024 })
27 |
28 | db.append(msg1, function (err, offset1) {
29 | if (err) throw err
30 | t.equal(offset1, 0)
31 | db.append(msg2, function (err, offset2) {
32 | if (err) throw err
33 | db.append(msg3, function (err, offset3) {
34 | if (err) throw err
35 | t.ok(offset3 > offset2)
36 | db.get(offset1, function (err, buf) {
37 | if (err) throw err
38 | t.equal(buf.toString(), msg1.toString())
39 |
40 | db.get(offset2, function (err, buf) {
41 | if (err) throw err
42 | t.equal(buf.toString(), msg2.toString())
43 |
44 | db.get(offset3, function (err, buf) {
45 | if (err) throw err
46 | t.equal(buf.toString(), msg3.toString())
47 |
48 | db.del(offset3, function (err) {
49 | t.error(err)
50 |
51 | db.onDeletesFlushed(() => {
52 | db.get(offset3, function (err, deletedBuf) {
53 | t.ok(err)
54 | t.equal(err.message, 'Record has been deleted')
55 | t.equal(err.code, 'ERR_AAOL_DELETED_RECORD')
56 | // write changes
57 | db.onDrain(t.end)
58 | })
59 | })
60 | })
61 | })
62 | })
63 | })
64 | })
65 | })
66 | })
67 | })
68 |
69 | tape('deleted records are never invalid (validateRecord)', function (t) {
70 | var file = '/tmp/fao-test_del_invalid.log'
71 | try {
72 | fs.unlinkSync(file)
73 | } catch (_) {}
74 | var opts = {
75 | blockSize: 2 * 1024,
76 | codec: {
77 | encode(msg) {
78 | return Buffer.from(JSON.stringify(msg), 'utf8')
79 | },
80 | decode(buf) {
81 | return JSON.parse(buf.toString('utf8'))
82 | },
83 | },
84 | validateRecord(buf) {
85 | try {
86 | JSON.parse(buf.toString('utf8'))
87 | return true
88 | } catch {
89 | return false
90 | }
91 | },
92 | }
93 | var db = Log(file, opts)
94 |
95 | db.append({ text: 'm0' }, function (err, offset1) {
96 | if (err) throw err
97 | db.append({ text: 'm1' }, function (err, offset2) {
98 | if (err) throw err
99 | db.append({ text: 'm2' }, function (err, offset3) {
100 | if (err) throw err
101 |
102 | db.del(offset2, function (err) {
103 | t.error(err)
104 |
105 | db.onDeletesFlushed(() => {
106 | db.close(() => {
107 | var db2 = Log(file, opts)
108 |
109 | db2.stream({ offsets: false }).pipe(
110 | push.collect((err, ary) => {
111 | t.error(err)
112 | t.deepEqual(ary, [{ text: 'm0' }, null, { text: 'm2' }])
113 | db2.close(t.end)
114 | })
115 | )
116 | })
117 | })
118 | })
119 | })
120 | })
121 | })
122 | })
123 |
124 | tape('simple reread', function (t) {
125 | var file = '/tmp/fao-test_del.log'
126 | var db = Log(file, { blockSize: 2 * 1024 })
127 |
128 | var offset1 = 0
129 | var offset2 = msg1.length + 2
130 | var offset3 = msg1.length + 2 + msg2.length + 2
131 |
132 | db.get(offset1, function (err, buf) {
133 | if (err) throw err
134 | t.equal(buf.toString(), msg1.toString())
135 |
136 | db.get(offset2, function (err, buf) {
137 | if (err) throw err
138 | t.equal(buf.toString(), msg2.toString())
139 |
140 | db.get(offset3, function (err) {
141 | t.ok(err)
142 | t.equal(err.message, 'Record has been deleted')
143 | t.equal(err.code, 'ERR_AAOL_DELETED_RECORD')
144 |
145 | db.del(offset2, function (err) {
146 | t.error(err)
147 |
148 | db.onDeletesFlushed(() => {
149 | db.get(offset2, function (err, deletedBuf) {
150 | t.ok(err)
151 | t.equal(err.message, 'Record has been deleted')
152 | t.equal(err.code, 'ERR_AAOL_DELETED_RECORD')
153 | // write changes
154 | db.close(t.end)
155 | })
156 | })
157 | })
158 | })
159 | })
160 | })
161 | })
162 |
163 | tape('simple reread 2', function (t) {
164 | var file = '/tmp/fao-test_del.log'
165 | var db = Log(file, { blockSize: 2 * 1024 })
166 |
167 | db.get(0, function (err, buf) {
168 | if (err) throw err
169 | t.equal(buf.toString(), msg1.toString())
170 |
171 | db.get(msg1.length + 2, function (err, deletedBuf) {
172 | console.log(deletedBuf)
173 | t.ok(err)
174 | t.equal(err.message, 'Record has been deleted')
175 | t.equal(err.code, 'ERR_AAOL_DELETED_RECORD')
176 |
177 | db.close(t.end)
178 | })
179 | })
180 | })
181 |
182 | tape('stream delete', function (t) {
183 | var file = '/tmp/offset-test_' + Date.now() + '.log'
184 | var db = Log(file, { blockSize: 64 * 1024 })
185 |
186 | var buf2 = Buffer.from('hello offset db')
187 |
188 | db.append(Buffer.from('hello world'), function (err, offset1) {
189 | if (err) throw err
190 | db.append(buf2, function (err, offset2) {
191 | if (err) throw err
192 | db.del(offset1, function (err) {
193 | t.error(err)
194 | db.onDrain(() => {
195 | db.onDeletesFlushed(() => {
196 | db.stream({ offsets: false }).pipe(
197 | push.collect((err, ary) => {
198 | t.notOk(err)
199 | t.deepEqual(ary, [null, buf2])
200 | db.close(t.end)
201 | })
202 | )
203 | })
204 | })
205 | })
206 | })
207 | })
208 | })
209 |
210 | tape('delete many', async (t) => {
211 | t.timeoutAfter(60e3)
212 | const file = '/tmp/aaol-test-delete-many' + Date.now() + '.log'
213 | const log = Log(file, { blockSize: 64 * 1024 })
214 |
215 | const TOTAL = 100000
216 | const offsets = []
217 | const logAppend = pify(log.append)
218 | console.time('append ' + TOTAL)
219 | for (let i = 0; i < TOTAL; i += 1) {
220 | const offset = await logAppend(Buffer.from(`hello ${i}`))
221 | offsets.push(offset)
222 | }
223 | t.pass('appended records')
224 | console.timeEnd('append ' + TOTAL)
225 |
226 | await pify(log.onDrain)()
227 |
228 | const logDel = pify(log.del)
229 | console.time('delete ' + TOTAL)
230 | for (let i = 0; i < TOTAL; i += 2) {
231 | await logDel(offsets[i])
232 | }
233 | console.timeEnd('delete ' + TOTAL)
234 | t.pass('deleted messages')
235 |
236 | await pify(log.onDeletesFlushed)()
237 |
238 | await new Promise((resolve) => {
239 | log.stream({ offsets: false }).pipe(
240 | push.collect((err, ary) => {
241 | t.error(err, 'no error on streaming')
242 | for (let i = 0; i < TOTAL; i += 1) {
243 | if (i % 2 === 0) {
244 | if (ary[i] !== null) t.fail('record ' + i + ' should be deleted')
245 | } else {
246 | if (ary[i] === null) t.fail('record ' + i + ' should be present')
247 | }
248 | }
249 | resolve()
250 | })
251 | )
252 | })
253 |
254 | await pify(log.close)()
255 | t.end()
256 | })
257 |
--------------------------------------------------------------------------------
/test/fix-buggy-write.js:
--------------------------------------------------------------------------------
1 | // SPDX-FileCopyrightText: 2021 Anders Rune Jensen
2 | //
3 | // SPDX-License-Identifier: Unlicense
4 |
5 | var tape = require('tape')
6 | var fs = require('fs')
7 | var push = require('push-stream')
8 | var Offset = require('../')
9 |
10 | var file = '/tmp/ds-test_restart.log'
11 |
12 | var msg1 = { text: 'hello world hello world' }
13 | var msg2 = { text: 'hello world hello world 2' }
14 |
15 | tape('simple', function (t) {
16 | try {
17 | fs.unlinkSync(file)
18 | } catch (_) {}
19 | var db = Offset(file, {
20 | block: 16 * 1024,
21 | codec: require('flumecodec/json'),
22 | })
23 |
24 | db.append(msg1, function (err, offset1) {
25 | if (err) throw err
26 | t.equal(offset1, 0)
27 | db.append(msg2, function (err, offset2) {
28 | if (err) throw err
29 | t.equal(offset2, 36)
30 |
31 | db.onDrain(() => {
32 | db.stream({ offsets: false }).pipe(
33 | push.collect((err, ary) => {
34 | t.deepEqual(ary, [msg1, msg2])
35 | t.end()
36 | })
37 | )
38 | })
39 | })
40 | })
41 | })
42 |
43 | tape('simple reread', function (t) {
44 | var db = Offset(file, {
45 | block: 16 * 1024,
46 | codec: require('flumecodec/json'),
47 | })
48 |
49 | db.onDrain(() => {
50 | db.stream({ offsets: false }).pipe(
51 | push.collect((err, ary) => {
52 | t.deepEqual(ary, [msg1, msg2])
53 | t.end()
54 | })
55 | )
56 | })
57 | })
58 |
--------------------------------------------------------------------------------
/test/fix-concurrency-write-drain-bug.js:
--------------------------------------------------------------------------------
1 | // SPDX-FileCopyrightText: 2021 Anders Rune Jensen
2 | //
3 | // SPDX-License-Identifier: Unlicense
4 |
5 | const tape = require('tape')
6 | const fs = require('fs')
7 | const Offset = require('../')
8 |
9 | const file = '/tmp/ds-test_drain_since.log'
10 |
11 | const msg1 = { text: 'hello world hello world' }
12 |
13 | tape('check since after drain', async (t) => {
14 | for (var i = 0; i < 1000; ++i) {
15 | try {
16 | fs.unlinkSync(file + i)
17 | } catch (_) {}
18 | const db = Offset(file + i, {
19 | block: 16 * 1024,
20 | writeTimeout: 1,
21 | codec: require('flumecodec/json'),
22 | })
23 |
24 | await new Promise((resolve, reject) => {
25 | db.onDrain(() => {
26 | db.append(msg1, (err, offset1) => {
27 | if (err) reject(err)
28 |
29 | setTimeout(() => {
30 | db.onDrain(() => {
31 | if (db.since.value !== 0) {
32 | t.fail('after drain offset was not set')
33 | }
34 | resolve()
35 | })
36 | }, 1)
37 | })
38 | })
39 | })
40 | }
41 | t.end()
42 | })
43 |
--------------------------------------------------------------------------------
/test/idempotent-resume.js:
--------------------------------------------------------------------------------
1 | // SPDX-FileCopyrightText: 2021 Anders Rune Jensen
2 | //
3 | // SPDX-License-Identifier: Unlicense
4 |
5 | var tape = require('tape')
6 | var fs = require('fs')
7 | var Log = require('../')
8 |
9 | const filename = '/tmp/dsf-idempotent-resume.log'
10 |
11 | try {
12 | fs.unlinkSync(filename)
13 | } catch (_) {}
14 | var log = Log(filename, { blockSize: 64 * 1024 })
15 |
16 | function Buf(fill, length) {
17 | var b = Buffer.alloc(length)
18 | b.fill(fill)
19 | return b
20 | }
21 |
22 | const TOTAL_RECORDS = 300_000
23 | const getRecordLength = (i) => 1 + (i % 500)
24 |
25 | tape('populate', function (t) {
26 | const records = Array(TOTAL_RECORDS)
27 | .fill(null)
28 | .map((x, i) => Buf(0x10, getRecordLength(i)))
29 | log.append(records, () => {
30 | log.onDrain(() => {
31 | t.end()
32 | })
33 | })
34 | })
35 |
36 | tape('a second resume() on the same stream is idempotent', function (t) {
37 | const stream = log.stream({ offsets: false })
38 |
39 | // The pipe causes the 1st resume to happen
40 | let i = 0
41 | stream.pipe({
42 | paused: false,
43 | offsets: false,
44 | write(buf) {
45 | const expected = getRecordLength(i)
46 | const actual = buf.length
47 | if (actual !== expected) {
48 | t.fail(`${i}-th record has ${actual} bytes, expected ${expected}`)
49 | process.exit(1) // otherwise the test will keep spamming many `t.fail`
50 | }
51 | i += 1
52 | },
53 | end() {
54 | t.equals(i, TOTAL_RECORDS)
55 | t.end()
56 | },
57 | })
58 |
59 | // This is the 2nd resume
60 | stream.resume()
61 | })
62 |
63 | tape('close', function (t) {
64 | t.equal(log.streams.size, 0, 'no open streams')
65 | log.close(() => {
66 | t.end()
67 | })
68 | })
69 |
--------------------------------------------------------------------------------
/test/jacob.js:
--------------------------------------------------------------------------------
1 | // SPDX-FileCopyrightText: 2021 Anders Rune Jensen
2 | //
3 | // SPDX-License-Identifier: Unlicense
4 |
5 | const tape = require('tape')
6 | const fs = require('fs')
7 | const bipf = require('bipf')
8 | const RAF = require('polyraf')
9 | const Log = require('../')
10 |
11 | function toBIPF(msg) {
12 | const len = bipf.encodingLength(msg)
13 | const buf = Buffer.alloc(len)
14 | bipf.encode(msg, buf, 0)
15 | return buf
16 | }
17 |
18 | tape('corrupt message', function (t) {
19 | var file = '/tmp/jacob.log'
20 | try {
21 | fs.unlinkSync(file)
22 | } catch (_) {}
23 | var db = Log(file, { blockSize: 64 * 1024 })
24 |
25 | var bipf1 = toBIPF({ text: 'testing' })
26 | var bipf2 = toBIPF({ bool: true, test: 'testing2' })
27 | bipf2[7] = '!' // corrupt the message
28 |
29 | db.append(bipf1, function (err, offset1) {
30 | if (err) throw err
31 | db.append(bipf2, function (err, offset2) {
32 | if (err) throw err
33 |
34 | db.close(t.end)
35 | })
36 | })
37 | })
38 |
39 | tape('corrupt message re-read without validation', function (t) {
40 | var file = '/tmp/jacob.log'
41 | var db = Log(file, { blockSize: 64 * 1024 })
42 |
43 | db.onDrain(() => {
44 | var result = []
45 |
46 | db.stream({ offsets: false }).pipe({
47 | paused: false,
48 | write: function (e) {
49 | result.push(e)
50 | },
51 | end: function () {
52 | // because these are just buffers we won't see the corruption
53 | t.equal(result.length, 2)
54 | db.close(t.end)
55 | },
56 | })
57 | })
58 | })
59 |
60 | tape('corrupt message re-read with validation', function (t) {
61 | var file = '/tmp/jacob.log'
62 | var db = Log(file, {
63 | blockSize: 64 * 1024,
64 | validateRecord: (d) => {
65 | try {
66 | bipf.decode(d, 0)
67 | return true
68 | } catch (ex) {
69 | return false
70 | }
71 | },
72 | })
73 |
74 | db.onDrain(() => {
75 | var result = []
76 |
77 | db.stream({ offsets: false }).pipe({
78 | paused: false,
79 | write: function (e) {
80 | result.push(e)
81 | },
82 | end: function () {
83 | t.equal(result.length, 1)
84 | db.close(t.end)
85 | },
86 | })
87 | })
88 | })
89 |
90 | tape('length corruption', function (t) {
91 | let file = '/tmp/jacob-length.log'
92 | try {
93 | fs.unlinkSync(file)
94 | } catch (_) {}
95 |
96 | var raf = RAF(file)
97 | let block = Buffer.alloc(64 * 1024)
98 |
99 | const bipf1 = toBIPF({ text: 'testing' })
100 | const bipf2 = toBIPF({ bool: true, test: 'testing2' })
101 |
102 | block.writeUInt16LE(bipf1.length, 0)
103 | bipf1.copy(block, 2)
104 | block.writeUInt16LE(65534, 2 + bipf1.length)
105 | bipf2.copy(block, 2 + bipf1.length + 2)
106 |
107 | raf.write(0, block, (err) => {
108 | raf.close(t.end())
109 | })
110 | })
111 |
112 | tape('length re-read without validation', function (t) {
113 | var file = '/tmp/jacob-length.log'
114 | var db = Log(file, {
115 | blockSize: 64 * 1024,
116 | })
117 |
118 | db.onDrain(() => {
119 | var result = []
120 |
121 | db.stream({ offsets: false }).pipe({
122 | paused: false,
123 | write: function (e) {
124 | result.push(e)
125 | },
126 | end: function () {
127 | t.equal(result.length, 1)
128 |
129 | // append a fixed record
130 | const bipf2 = toBIPF({ bool: true, test: 'testing2' })
131 | db.append(bipf2, function (err) {
132 | t.error(err)
133 | db.close(t.end)
134 | })
135 | },
136 | })
137 | })
138 | })
139 |
140 | tape('length re-read with validation', function (t) {
141 | var file = '/tmp/jacob-length.log'
142 | var db = Log(file, {
143 | blockSize: 64 * 1024,
144 | validateRecord: (d) => {
145 | try {
146 | bipf.decode(d, 0)
147 | return true
148 | } catch (ex) {
149 | return false
150 | }
151 | },
152 | })
153 |
154 | db.onDrain(() => {
155 | var result = []
156 |
157 | db.stream({ offsets: false }).pipe({
158 | paused: false,
159 | write: function (e) {
160 | result.push(e)
161 | },
162 | end: function () {
163 | t.equal(result.length, 2)
164 | db.close(t.end)
165 | },
166 | })
167 | })
168 | })
169 |
--------------------------------------------------------------------------------
/test/stream-abort.js:
--------------------------------------------------------------------------------
1 | // SPDX-FileCopyrightText: 2022 Andre 'Staltz' Medeiros
2 | //
3 | // SPDX-License-Identifier: CC0-1.0
4 |
5 | const tape = require('tape')
6 | const fs = require('fs')
7 | const toPull = require('push-stream-to-pull-stream/source')
8 | const pull = require('pull-stream')
9 | const Log = require('../')
10 |
11 | const filename = '/tmp/aaol-abort-live-pull-stream.log'
12 |
13 | try {
14 | fs.unlinkSync(filename)
15 | } catch (_) {}
16 | const log = Log(filename, { blockSize: 64 * 1024 })
17 |
18 | const msg1 = Buffer.alloc(10).fill(0x10)
19 | const msg2 = Buffer.alloc(20).fill(0x20)
20 | const msg3 = Buffer.alloc(30).fill(0x30)
21 |
22 | tape('abort live push-stream-to-pull-stream should not end with err', (t) => {
23 | t.plan(8)
24 | log.append(msg1, (err) => {
25 | t.error(err, 'no err to append msg1')
26 | log.append(msg2, (err) => {
27 | t.error(err, 'no err to append msg2')
28 | const expected = [msg1, msg2, msg3]
29 | const logPushStream = log.stream({ live: true, offsets: false })
30 | const logPullStream = toPull(logPushStream)
31 | pull(
32 | logPullStream,
33 | pull.drain(
34 | (buf) => {
35 | t.deepEqual(buf, expected.shift())
36 | if (expected.length === 0) {
37 | log.close(() => {
38 | t.pass('closed AAOL')
39 | })
40 | }
41 | },
42 | (err) => {
43 | t.error(err, 'no err when pull.draining')
44 | }
45 | )
46 | )
47 | })
48 | })
49 | log.append(msg3, (err) => {
50 | t.error(err, 'no err to append msg3')
51 | })
52 | })
53 |
--------------------------------------------------------------------------------
/test/stream-pausable.js:
--------------------------------------------------------------------------------
1 | // SPDX-FileCopyrightText: 2021 Anders Rune Jensen
2 | //
3 | // SPDX-License-Identifier: Unlicense
4 |
5 | var tape = require('tape')
6 | var fs = require('fs')
7 | var push = require('push-stream')
8 | var Log = require('../')
9 |
10 | const filename = '/tmp/dsf-test-stream-pause.log'
11 |
12 | try {
13 | fs.unlinkSync(filename)
14 | } catch (_) {}
15 | var log = Log(filename, { blockSize: 64 * 1024 })
16 |
17 | function Buf(fill, length) {
18 | var b = Buffer.alloc(length)
19 | b.fill(fill)
20 | return b
21 | }
22 |
23 | var msg1 = Buf(0x10, 100)
24 | tape('populate', function (t) {
25 | let i = 0
26 | ;(function next() {
27 | log.append(msg1, function (err) {
28 | i++
29 | if (i < 1000) next()
30 | else {
31 | log.onDrain(() => {
32 | log.stream({ offsets: false }).pipe(
33 | push.collect((err, ary) => {
34 | t.equal(ary.length, 1000)
35 | t.end()
36 | })
37 | )
38 | })
39 | }
40 | })
41 | })()
42 | })
43 |
44 | tape('pausable', function (t) {
45 | let sink
46 | let i = 0
47 | t.timeoutAfter(50000)
48 | log.stream({ offsets: false }).pipe(
49 | (sink = {
50 | paused: false,
51 | write: function (buf) {
52 | if (sink.paused) t.fail('should not write sink when it is paused')
53 | if (buf.compare(msg1) !== 0) t.fail('record does not match v1')
54 |
55 | sink.paused = true
56 | setTimeout(() => {
57 | sink.paused = false
58 | sink.source.resume()
59 | }, 1)
60 | },
61 | end: function () {
62 | t.end()
63 | },
64 | })
65 | )
66 | })
67 |
68 | tape('close', function (t) {
69 | t.equal(log.streams.size, 0, 'no open streams')
70 | log.stream({ offsets: false }).pipe({
71 | paused: false,
72 | write: function () {},
73 | end: function () {
74 | t.end()
75 | },
76 | })
77 | log.close(() => {})
78 | })
79 |
--------------------------------------------------------------------------------
/test/stream.js:
--------------------------------------------------------------------------------
1 | // SPDX-FileCopyrightText: 2021 Anders Rune Jensen
2 | //
3 | // SPDX-License-Identifier: Unlicense
4 |
5 | var tape = require('tape')
6 | var fs = require('fs')
7 | var push = require('push-stream')
8 | var Log = require('../')
9 |
10 | const filename = '/tmp/dsf-test-stream.log'
11 |
12 | try {
13 | fs.unlinkSync(filename)
14 | } catch (_) {}
15 | var log = Log(filename, { blockSize: 64 * 1024 })
16 |
17 | function Buf(fill, length) {
18 | var b = Buffer.alloc(length)
19 | b.fill(fill)
20 | return b
21 | }
22 |
23 | tape('empty', function (t) {
24 | log.stream({ offsets: false }).pipe({
25 | paused: false,
26 | write: function () {
27 | throw new Error('should be empty')
28 | },
29 | end: t.end,
30 | })
31 | })
32 |
33 | var msg1 = Buf(0x10, 10)
34 | tape('single', function (t) {
35 | log.append(msg1, function (err) {
36 | t.notOk(err)
37 | log.onDrain(() => {
38 | log.stream({ offsets: false }).pipe(
39 | push.collect((err, ary) => {
40 | t.notOk(err)
41 | t.deepEqual(ary, [msg1])
42 | t.end()
43 | })
44 | )
45 | })
46 | })
47 | })
48 |
49 | tape('single live pausable', function (t) {
50 | t.timeoutAfter(500)
51 | let i = 0
52 | let sink
53 | log.stream({ offsets: false, live: true }).pipe(
54 | (sink = {
55 | paused: false,
56 | write: function (buf) {
57 | t.deepEqual(buf, msg1)
58 | t.equal(i, 0)
59 | sink.paused = true
60 | setTimeout(() => {
61 | sink.paused = false
62 | sink.source.resume()
63 | })
64 | i++
65 | },
66 | end: function () {
67 | t.fail('should not end live stream')
68 | },
69 | })
70 | )
71 | setTimeout(t.end, 300)
72 | })
73 |
74 | tape('single, reload', function (t) {
75 | log = Log(filename, { blockSize: 64 * 1024 })
76 | log.stream({ offsets: false }).pipe(
77 | push.collect((err, ary) => {
78 | t.notOk(err)
79 | t.deepEqual(ary, [msg1])
80 | t.end()
81 | })
82 | )
83 | })
84 |
85 | var msg2 = Buf(0x20, 20)
86 | tape('second', function (t) {
87 | log.append(msg2, function (err) {
88 | t.notOk(err)
89 | log.onDrain(() => {
90 | log.stream({ offsets: false }).pipe(
91 | push.collect((err, ary) => {
92 | t.notOk(err)
93 | t.deepEqual(ary, [msg1, msg2])
94 | t.end()
95 | })
96 | )
97 | })
98 | })
99 | })
100 |
101 | var msg3 = Buf(0x30, 30)
102 | tape('live', function (t) {
103 | const expected = [msg1, msg2, msg3]
104 | const logStream = log.stream({ live: true, offsets: false })
105 | logStream.pipe({
106 | paused: false,
107 | write(buf) {
108 | t.deepEqual(buf, expected.shift())
109 | if (expected.length === 0) {
110 | logStream.abort()
111 | t.end()
112 | }
113 | },
114 | end() {},
115 | })
116 | log.append(msg3, function (err) {})
117 | })
118 |
119 | tape('offsets', function (t) {
120 | log.stream({ offsets: true }).pipe(
121 | push.collect((err, ary) => {
122 | t.notOk(err)
123 | t.deepEqual(ary, [
124 | { offset: 0, value: msg1 },
125 | { offset: 10 + 2, value: msg2 },
126 | { offset: 10 + 2 + 20 + 2, value: msg3 },
127 | ])
128 | t.end()
129 | })
130 | )
131 | })
132 |
133 | tape('push.drain', function (t) {
134 | const expected = [0, 12]
135 |
136 | log.stream({ offsets: true, values: false }).pipe(
137 | push.drain((x) => {
138 | t.true(expected.length > 0)
139 | t.equals(x, expected.shift())
140 | if (x === 12) return false
141 | if (x === 34) t.fail('should not receive more values after abort')
142 | }, (err) => {
143 | t.fail('end should not be called')
144 | })
145 | )
146 |
147 | setTimeout(() => {
148 | t.equals(expected.length, 0)
149 | t.end()
150 | }, 1000)
151 | })
152 |
153 | tape('pausable', function (t) {
154 | let i = 0
155 | let sink
156 | log.stream({ offsets: false }).pipe(
157 | (sink = {
158 | paused: false,
159 | write: function (buf) {
160 | if (sink.paused) t.fail('should not write sink when it is paused')
161 |
162 | if (i === 0) {
163 | t.deepEqual(buf, msg1, 'msg1')
164 | sink.paused = true
165 | setTimeout(() => {
166 | sink.paused = false
167 | sink.source.resume()
168 | }, 100)
169 | }
170 | if (i === 1) {
171 | t.deepEqual(buf, msg2, 'msg2')
172 | }
173 | if (i === 2) {
174 | t.deepEqual(buf, msg3, 'msg3')
175 | }
176 | i++
177 | },
178 | end: function () {
179 | t.end()
180 | },
181 | })
182 | )
183 | })
184 |
185 | tape('limit', function (t) {
186 | log.stream({ offsets: false, limit: 1 }).pipe(
187 | push.collect((err, ary) => {
188 | t.notOk(err)
189 | t.deepEqual(ary, [msg1])
190 | t.end()
191 | })
192 | )
193 | })
194 |
195 | tape('limit gte', function (t) {
196 | log.stream({ offsets: false, gte: 12, limit: 1 }).pipe(
197 | push.collect((err, ary) => {
198 | t.notOk(err)
199 | t.deepEqual(ary, [msg2])
200 | t.end()
201 | })
202 | )
203 | })
204 |
205 | tape('gte', function (t) {
206 | log.stream({ offsets: false, gte: 12 }).pipe(
207 | push.collect((err, ary) => {
208 | t.notOk(err)
209 | t.deepEqual(ary, [msg2, msg3])
210 | t.end()
211 | })
212 | )
213 | })
214 |
215 | tape('gt', function (t) {
216 | log.stream({ offsets: false, gt: 12 }).pipe(
217 | push.collect((err, ary) => {
218 | t.notOk(err)
219 | t.deepEqual(ary, [msg3])
220 | t.end()
221 | })
222 | )
223 | })
224 |
225 | tape('gt 0', function (t) {
226 | log.stream({ offsets: false, gt: 0 }).pipe(
227 | push.collect((err, ary) => {
228 | t.notOk(err)
229 | t.deepEqual(ary, [msg2, msg3])
230 | t.end()
231 | })
232 | )
233 | })
234 |
235 | tape('gt -1', function (t) {
236 | log.stream({ offsets: false, gt: -1 }).pipe(
237 | push.collect((err, ary) => {
238 | t.notOk(err)
239 | t.deepEqual(ary, [msg1, msg2, msg3])
240 | t.end()
241 | })
242 | )
243 | })
244 |
245 | tape('live gt', function (t) {
246 | const msg4 = Buf(0x40, 40)
247 | const logStream = log.stream({
248 | live: true,
249 | offsets: false,
250 | gt: 10 + 2 + 20 + 2,
251 | })
252 | logStream.pipe({
253 | paused: false,
254 | write(buf) {
255 | t.deepEqual(buf, msg4)
256 | logStream.abort()
257 | t.end()
258 | },
259 | end() {},
260 | })
261 | log.append(msg4, function (err) {})
262 | })
263 |
264 | tape('live gt -1', function (t) {
265 | var msg5 = Buf(0x50, 50)
266 | var msg6 = Buf(0x50, 60)
267 |
268 | const filename1 = '/tmp/dsf-test-stream-1.log'
269 | try {
270 | fs.unlinkSync(filename1)
271 | } catch (_) {}
272 | var newLog = Log(filename1, { blockSize: 64 * 1024 })
273 |
274 | const logStream = newLog.stream({ live: true, offsets: false, gt: -1 })
275 | const expected = [msg5, msg6]
276 | const sink = {
277 | paused: false,
278 | write(buf) {
279 | t.deepEquals(buf, expected.shift())
280 | if (expected.length === 0) {
281 | logStream.abort()
282 | t.end()
283 | }
284 | },
285 | end() {},
286 | }
287 | logStream.pipe(sink)
288 |
289 | setTimeout(() => {
290 | sink.paused = true
291 | logStream.resume()
292 | sink.paused = false
293 | logStream.resume()
294 | newLog.append(msg5, function (err) {})
295 | newLog.append(msg6, function (err) {})
296 | }, 100)
297 | })
298 |
299 | tape('double live', function (t) {
300 | const filename = '/tmp/dsf-test-stream-2.log'
301 |
302 | try {
303 | fs.unlinkSync(filename)
304 | } catch (_) {}
305 | var log = Log(filename, { blockSize: 64 * 1024 })
306 |
307 | var i = 0
308 |
309 | log.stream({ live: true, offsets: false }).pipe({
310 | paused: false,
311 | write: function (buf) {
312 | if (i === 0) {
313 | log.append(Buf(0x20, 20), function (err) {})
314 | ++i
315 | } else t.end()
316 | },
317 | })
318 |
319 | log.append(Buf(0x10, 10), function (err) {})
320 | })
321 |
322 | tape('close', function (t) {
323 | t.equal(log.streams.size, 0, 'no open streams')
324 | log.stream({ offsets: false }).pipe({
325 | paused: false,
326 | write: function () {},
327 | end: function () {
328 | t.end()
329 | },
330 | })
331 | log.close(() => {})
332 | })
333 |
--------------------------------------------------------------------------------
/test/stress-test.js:
--------------------------------------------------------------------------------
1 | // SPDX-FileCopyrightText: 2021 Anders Rune Jensen
2 | //
3 | // SPDX-License-Identifier: Unlicense
4 |
5 | const tape = require('tape')
6 | const fs = require('fs')
7 | const Log = require('../')
8 | const TooHot = require('too-hot')
9 |
10 | const items = 10e3
11 |
12 | function randomIntFromInterval(min, max) {
13 | return Math.floor(Math.random() * (max - min + 1) + min)
14 | }
15 |
16 | function randomStr(length) {
17 | let result = ''
18 | const characters =
19 | 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789'
20 | const charactersLength = characters.length
21 | for (let i = 0; i < length; ++i)
22 | result += characters.charAt(Math.floor(Math.random() * charactersLength))
23 | return result
24 | }
25 |
26 | for (var run = 0; run < 10; ++run) {
27 | tape('basic stress', function (t) {
28 | const filename = '/tmp/async-flumelog-basic-stress.log'
29 | const blockSize = randomIntFromInterval(12 * 1024, 64 * 1024)
30 |
31 | try {
32 | fs.unlinkSync(filename)
33 | } catch (_) {}
34 | var db = Log(filename, {
35 | blockSize,
36 | codec: require('flumecodec/json'),
37 | })
38 |
39 | const originalStream = db.stream
40 | db.stream = function (opts) {
41 | const tooHot = TooHot({ ceiling: 50, wait: 100, maxPause: Infinity })
42 | const s = originalStream(opts)
43 | const originalPipe = s.pipe.bind(s)
44 | s.pipe = function pipe(o) {
45 | let originalWrite = o.write
46 | o.write = (record) => {
47 | const hot = tooHot()
48 | if (hot && !s.sink.paused) {
49 | s.sink.paused = true
50 | hot.then(() => {
51 | originalWrite(record)
52 | s.sink.paused = false
53 | s.resume()
54 | })
55 | } else {
56 | originalWrite(record)
57 | }
58 | }
59 | return originalPipe(o)
60 | }
61 | return s
62 | }
63 |
64 | var data = []
65 | for (var i = 0; i < items; i++) {
66 | o = {
67 | key: '#' + i,
68 | value: {
69 | s: randomStr(randomIntFromInterval(100, 8000)),
70 | foo: Math.random(),
71 | bar: Date.now(),
72 | },
73 | }
74 | if (i % 10 === 0) o.value.baz = randomIntFromInterval(1, 1500)
75 | if (i % 3 === 0) o.value.cat = randomIntFromInterval(1, 1500)
76 | if (i % 2 === 0) o.value.hat = randomIntFromInterval(1, 1500)
77 | data.push(o)
78 | }
79 |
80 | db.append(data, function (err, offset) {
81 | var remove = db.since(function (v) {
82 | if (v < offset) return
83 | remove()
84 |
85 | var result1 = []
86 | var stream1Done = false,
87 | stream2Done = false
88 |
89 | db.stream({ offsets: false }).pipe({
90 | paused: false,
91 | write: function (value) {
92 | result1.push(value)
93 | },
94 | end: function () {
95 | t.equal(result1.length, data.length)
96 | //t.deepEqual(data, result)
97 | if (stream2Done) db.close(t.end)
98 | else stream1Done = true
99 | },
100 | })
101 |
102 | var result2 = []
103 |
104 | db.stream({ offsets: false }).pipe({
105 | paused: false,
106 | write: function (value) {
107 | result2.push(value)
108 | },
109 | end: function () {
110 | t.equal(result2.length, data.length)
111 | //t.deepEqual(data, result)
112 | if (stream1Done) db.close(t.end)
113 | else stream2Done = true
114 | },
115 | })
116 | })
117 | })
118 | })
119 | }
120 |
121 | for (var run = 0; run < 10; ++run) {
122 | tape('live stress', function (t) {
123 | const filename = '/tmp/async-flumelog-live-stress.log'
124 |
125 | try {
126 | fs.unlinkSync(filename)
127 | } catch (_) {}
128 | var db = Log(filename, {
129 | blockSize: 64 * 1024,
130 | writeTimeout: 10,
131 | codec: require('flumecodec/json'),
132 | })
133 |
134 | const originalStream = db.stream
135 | db.stream = function (opts) {
136 | const tooHot = TooHot({ ceiling: 90, wait: 100, maxPause: Infinity })
137 | const s = originalStream(opts)
138 | const originalPipe = s.pipe.bind(s)
139 | s.pipe = function pipe(o) {
140 | let originalWrite = o.write.bind(o)
141 | o.write = (record) => {
142 | const hot = tooHot()
143 | if (hot && !s.sink.paused) {
144 | //console.log("Hot in here", hot)
145 | s.sink.paused = true
146 | hot.then(() => {
147 | originalWrite(record)
148 | s.sink.paused = false
149 | s.resume()
150 | })
151 | } else {
152 | originalWrite(record)
153 | }
154 | }
155 | return originalPipe(o)
156 | }
157 | return s
158 | }
159 |
160 | var sink = {
161 | paused: false,
162 | array: [],
163 | write(rec) {
164 | this.array.push(rec)
165 | },
166 | end() {
167 | throw new Error('live stream should not end')
168 | },
169 | }
170 | db.stream({ live: true, offsets: false }).pipe(sink)
171 |
172 | var data = [],
173 | latestOffset = 0
174 | for (var i = 0; i < items; i++) {
175 | const d = {
176 | key: '#' + i,
177 | value: {
178 | foo: Math.random(),
179 | bar: Date.now(),
180 | },
181 | }
182 | data.push(d)
183 | db.append(d, function (err, offset) {
184 | if (offset > latestOffset) latestOffset = offset
185 | })
186 | }
187 |
188 | function checkStreamDone() {
189 | if (sink.array.length === data.length) {
190 | t.deepEqual(sink.array, data)
191 | t.end()
192 | } else setTimeout(checkStreamDone, 200)
193 | }
194 |
195 | var remove = db.since(function (offset) {
196 | if (offset < latestOffset) return
197 | if (remove) remove()
198 | // this is crazy, db.since is set first, then streams are
199 | // resumed. So we need to wait for the stream to resume and
200 | // finish before we can check that we got everything
201 | setTimeout(checkStreamDone, 200)
202 | })
203 | })
204 | }
205 |
206 | for (var run = 0; run < 10; ++run) {
207 | tape('resume stress', function (t) {
208 | const filename = '/tmp/async-flumelog-live-stress.log'
209 |
210 | try {
211 | fs.unlinkSync(filename)
212 | } catch (_) {}
213 | var db = Log(filename, {
214 | blockSize: 64 * 1024,
215 | writeTimeout: 10,
216 | codec: require('flumecodec/json'),
217 | })
218 |
219 | const originalStream = db.stream
220 | db.stream = function (opts) {
221 | const tooHot = TooHot({ ceiling: 90, wait: 100, maxPause: Infinity })
222 | const s = originalStream(opts)
223 | const originalPipe = s.pipe.bind(s)
224 | s.pipe = function pipe(o) {
225 | let originalWrite = o.write.bind(o)
226 | o.write = (record) => {
227 | const hot = tooHot()
228 | if (hot && !s.sink.paused) {
229 | //console.log("Hot in here", hot)
230 | s.sink.paused = true
231 | hot.then(() => {
232 | originalWrite(record)
233 | s.sink.paused = false
234 | s.resume()
235 | })
236 | } else {
237 | originalWrite(record)
238 | }
239 | }
240 | return originalPipe(o)
241 | }
242 | return s
243 | }
244 |
245 | var sink = {
246 | paused: false,
247 | array: [],
248 | write(rec) {
249 | this.array.push(rec)
250 | },
251 | end() {
252 | throw new Error('live stream should not end')
253 | },
254 | }
255 | const stream = db.stream({ live: true, offsets: false })
256 | stream.pipe(sink)
257 |
258 | var data = [],
259 | latestOffset = 0
260 | for (var i = 0; i < items; i++) {
261 | const d = {
262 | key: '#' + i,
263 | value: {
264 | foo: Math.random(),
265 | bar: Date.now(),
266 | },
267 | }
268 | data.push(d)
269 | db.append(d, function (err, offset) {
270 | if (offset > latestOffset) latestOffset = offset
271 | })
272 | }
273 |
274 | function checkStreamDone() {
275 | stream.resume() // stress test this
276 |
277 | if (sink.array.length === data.length) {
278 | t.deepEqual(sink.array, data)
279 | t.end()
280 | } else setTimeout(checkStreamDone, randomIntFromInterval(50, 200))
281 | }
282 |
283 | var remove = db.since(function (offset) {
284 | if (offset < latestOffset) return
285 | if (remove) remove()
286 | // this is crazy, db.since is set first, then streams are
287 | // resumed. So we need to wait for the stream to resume and
288 | // finish before we can check that we got everything
289 | setTimeout(checkStreamDone, 200)
290 | })
291 | })
292 | }
293 |
--------------------------------------------------------------------------------