├── .gitignore
├── .jsdoc.json
├── .jshintrc
├── .nvmrc
├── .travis.yml
├── LICENSE
├── README.md
├── bin
└── kfs.js
├── doc
├── about.md
├── cli.md
├── img
│ ├── FlushTimePerTestRunByDriveType.png
│ ├── fileSizeVsExecutionTimeUsingFlush.png
│ ├── kfs-vs-vanilla-two-sided-test-128.png
│ ├── kfs-vs-vanilla-two-sided-test.png
│ ├── mean-by-operation-and-db-hdd-128.png
│ ├── mean-by-operation-and-db-hdd.png
│ ├── mean-by-operation-and-db-ssd-128.png
│ ├── mean-by-operation-and-db-ssd.png
│ ├── performance-test-overview-kfs-vs-vanilla-hdd-128.png
│ ├── performance-test-overview-kfs-vs-vanilla-hdd.png
│ ├── performance-test-overview-kfs-vs-vanilla-ssd-128.png
│ ├── performance-test-overview-kfs-vs-vanilla-ssd.png
│ ├── sd-by-operation-and-db-hdd-128.png
│ ├── sd-by-operation-and-db-hdd.png
│ ├── sd-by-operation-and-db-ssd-128.png
│ ├── sd-by-operation-and-db-ssd.png
│ ├── varying_cLevel.png
│ └── xor-metric-distribution.png
├── index.json
├── kfs.md
└── performance-testing.md
├── index.js
├── lib
├── b-table.js
├── block-stream.js
├── constants.js
├── read-stream.js
├── s-bucket.js
├── utils.js
└── write-stream.js
├── package.json
├── perf
├── index.js
├── read-speed.js
├── unlink-speed.js
└── write-speed.js
└── test
├── b-table.integration.js
├── b-table.unit.js
├── block-stream.unit.js
├── read-stream.unit.js
├── s-bucket.integration.js
├── s-bucket.unit.js
├── utils.unit.js
└── write-stream.unit.js
/.gitignore:
--------------------------------------------------------------------------------
1 | # Logs
2 | logs
3 | *.log
4 | npm-debug.log*
5 |
6 | # Runtime data
7 | pids
8 | *.pid
9 | *.seed
10 |
11 | # Directory for instrumented libs generated by jscoverage/JSCover
12 | lib-cov
13 |
14 | # Coverage directory used by tools like istanbul
15 | coverage
16 |
17 | # nyc test coverage
18 | .nyc_output
19 |
20 | # Grunt intermediate storage (http://gruntjs.com/creating-plugins#storing-task-files)
21 | .grunt
22 |
23 | # node-waf configuration
24 | .lock-wscript
25 |
26 | # Compiled binary addons (http://nodejs.org/api/addons.html)
27 | build/Release
28 |
29 | # Dependency directories
30 | node_modules
31 | jspm_packages
32 |
33 | # Optional npm cache directory
34 | .npm
35 |
36 | # Optional REPL history
37 | .node_repl_history
38 |
39 | jsdoc
40 | *.swp
41 |
--------------------------------------------------------------------------------
/.jsdoc.json:
--------------------------------------------------------------------------------
1 | {
2 | "opts": {
3 | "template": "node_modules/ink-docstrap/template"
4 | },
5 | "templates": {
6 | "systemName": "KFS",
7 | "copyright": "Copyright 2016 Storj Labs, Inc",
8 | "includeDate": false,
9 | "navType": "vertical",
10 | "theme": "storj",
11 | "logoFile": "img/storj-kfs.svg",
12 | "linenums": false,
13 | "collapseSymbols": false,
14 | "inverseNav": false,
15 | "outputSourceFiles": true,
16 | "outputSourcePath": true,
17 | "dateFormat": "",
18 | "sort": "longname",
19 | "syntaxTheme": "dark"
20 | }
21 | }
22 |
--------------------------------------------------------------------------------
/.jshintrc:
--------------------------------------------------------------------------------
1 | {
2 | "laxbreak": true,
3 | "bitwise": false,
4 | "browser": true,
5 | "camelcase": false,
6 | "curly": true,
7 | "devel": false,
8 | "eqeqeq": true,
9 | "esnext": true,
10 | "freeze": true,
11 | "immed": true,
12 | "indent": 2,
13 | "latedef": false,
14 | "newcap": false,
15 | "noarg": true,
16 | "node": true,
17 | "noempty": true,
18 | "nonew": true,
19 | "quotmark": "single",
20 | "regexp": true,
21 | "smarttabs": false,
22 | "strict": true,
23 | "trailing": true,
24 | "undef": true,
25 | "unused": true,
26 | "maxparams": 4,
27 | "maxstatements": 14,
28 | "maxcomplexity": 6,
29 | "maxdepth": 3,
30 | "maxlen": 80,
31 | "multistr": true,
32 | "predef": [
33 | "after",
34 | "afterEach",
35 | "before",
36 | "beforeEach",
37 | "describe",
38 | "exports",
39 | "it",
40 | "module",
41 | "require"
42 | ]
43 | }
44 |
--------------------------------------------------------------------------------
/.nvmrc:
--------------------------------------------------------------------------------
1 | 6.9.1
2 |
--------------------------------------------------------------------------------
/.travis.yml:
--------------------------------------------------------------------------------
1 | language: node_js
2 | node_js:
3 | - "6.9.1"
4 | after_script:
5 | - npm run coverage
6 | - cat ./coverage/lcov.info | ./node_modules/.bin/coveralls
7 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | GNU GENERAL PUBLIC LICENSE
2 | Version 3, 29 June 2007
3 |
4 | Copyright (C) 2007 Free Software Foundation, Inc.
5 | Everyone is permitted to copy and distribute verbatim copies
6 | of this license document, but changing it is not allowed.
7 |
8 | Preamble
9 |
10 | The GNU General Public License is a free, copyleft license for
11 | software and other kinds of works.
12 |
13 | The licenses for most software and other practical works are designed
14 | to take away your freedom to share and change the works. By contrast,
15 | the GNU General Public License is intended to guarantee your freedom to
16 | share and change all versions of a program--to make sure it remains free
17 | software for all its users. We, the Free Software Foundation, use the
18 | GNU General Public License for most of our software; it applies also to
19 | any other work released this way by its authors. You can apply it to
20 | your programs, too.
21 |
22 | When we speak of free software, we are referring to freedom, not
23 | price. Our General Public Licenses are designed to make sure that you
24 | have the freedom to distribute copies of free software (and charge for
25 | them if you wish), that you receive source code or can get it if you
26 | want it, that you can change the software or use pieces of it in new
27 | free programs, and that you know you can do these things.
28 |
29 | To protect your rights, we need to prevent others from denying you
30 | these rights or asking you to surrender the rights. Therefore, you have
31 | certain responsibilities if you distribute copies of the software, or if
32 | you modify it: responsibilities to respect the freedom of others.
33 |
34 | For example, if you distribute copies of such a program, whether
35 | gratis or for a fee, you must pass on to the recipients the same
36 | freedoms that you received. You must make sure that they, too, receive
37 | or can get the source code. And you must show them these terms so they
38 | know their rights.
39 |
40 | Developers that use the GNU GPL protect your rights with two steps:
41 | (1) assert copyright on the software, and (2) offer you this License
42 | giving you legal permission to copy, distribute and/or modify it.
43 |
44 | For the developers' and authors' protection, the GPL clearly explains
45 | that there is no warranty for this free software. For both users' and
46 | authors' sake, the GPL requires that modified versions be marked as
47 | changed, so that their problems will not be attributed erroneously to
48 | authors of previous versions.
49 |
50 | Some devices are designed to deny users access to install or run
51 | modified versions of the software inside them, although the manufacturer
52 | can do so. This is fundamentally incompatible with the aim of
53 | protecting users' freedom to change the software. The systematic
54 | pattern of such abuse occurs in the area of products for individuals to
55 | use, which is precisely where it is most unacceptable. Therefore, we
56 | have designed this version of the GPL to prohibit the practice for those
57 | products. If such problems arise substantially in other domains, we
58 | stand ready to extend this provision to those domains in future versions
59 | of the GPL, as needed to protect the freedom of users.
60 |
61 | Finally, every program is threatened constantly by software patents.
62 | States should not allow patents to restrict development and use of
63 | software on general-purpose computers, but in those that do, we wish to
64 | avoid the special danger that patents applied to a free program could
65 | make it effectively proprietary. To prevent this, the GPL assures that
66 | patents cannot be used to render the program non-free.
67 |
68 | The precise terms and conditions for copying, distribution and
69 | modification follow.
70 |
71 | TERMS AND CONDITIONS
72 |
73 | 0. Definitions.
74 |
75 | "This License" refers to version 3 of the GNU General Public License.
76 |
77 | "Copyright" also means copyright-like laws that apply to other kinds of
78 | works, such as semiconductor masks.
79 |
80 | "The Program" refers to any copyrightable work licensed under this
81 | License. Each licensee is addressed as "you". "Licensees" and
82 | "recipients" may be individuals or organizations.
83 |
84 | To "modify" a work means to copy from or adapt all or part of the work
85 | in a fashion requiring copyright permission, other than the making of an
86 | exact copy. The resulting work is called a "modified version" of the
87 | earlier work or a work "based on" the earlier work.
88 |
89 | A "covered work" means either the unmodified Program or a work based
90 | on the Program.
91 |
92 | To "propagate" a work means to do anything with it that, without
93 | permission, would make you directly or secondarily liable for
94 | infringement under applicable copyright law, except executing it on a
95 | computer or modifying a private copy. Propagation includes copying,
96 | distribution (with or without modification), making available to the
97 | public, and in some countries other activities as well.
98 |
99 | To "convey" a work means any kind of propagation that enables other
100 | parties to make or receive copies. Mere interaction with a user through
101 | a computer network, with no transfer of a copy, is not conveying.
102 |
103 | An interactive user interface displays "Appropriate Legal Notices"
104 | to the extent that it includes a convenient and prominently visible
105 | feature that (1) displays an appropriate copyright notice, and (2)
106 | tells the user that there is no warranty for the work (except to the
107 | extent that warranties are provided), that licensees may convey the
108 | work under this License, and how to view a copy of this License. If
109 | the interface presents a list of user commands or options, such as a
110 | menu, a prominent item in the list meets this criterion.
111 |
112 | 1. Source Code.
113 |
114 | The "source code" for a work means the preferred form of the work
115 | for making modifications to it. "Object code" means any non-source
116 | form of a work.
117 |
118 | A "Standard Interface" means an interface that either is an official
119 | standard defined by a recognized standards body, or, in the case of
120 | interfaces specified for a particular programming language, one that
121 | is widely used among developers working in that language.
122 |
123 | The "System Libraries" of an executable work include anything, other
124 | than the work as a whole, that (a) is included in the normal form of
125 | packaging a Major Component, but which is not part of that Major
126 | Component, and (b) serves only to enable use of the work with that
127 | Major Component, or to implement a Standard Interface for which an
128 | implementation is available to the public in source code form. A
129 | "Major Component", in this context, means a major essential component
130 | (kernel, window system, and so on) of the specific operating system
131 | (if any) on which the executable work runs, or a compiler used to
132 | produce the work, or an object code interpreter used to run it.
133 |
134 | The "Corresponding Source" for a work in object code form means all
135 | the source code needed to generate, install, and (for an executable
136 | work) run the object code and to modify the work, including scripts to
137 | control those activities. However, it does not include the work's
138 | System Libraries, or general-purpose tools or generally available free
139 | programs which are used unmodified in performing those activities but
140 | which are not part of the work. For example, Corresponding Source
141 | includes interface definition files associated with source files for
142 | the work, and the source code for shared libraries and dynamically
143 | linked subprograms that the work is specifically designed to require,
144 | such as by intimate data communication or control flow between those
145 | subprograms and other parts of the work.
146 |
147 | The Corresponding Source need not include anything that users
148 | can regenerate automatically from other parts of the Corresponding
149 | Source.
150 |
151 | The Corresponding Source for a work in source code form is that
152 | same work.
153 |
154 | 2. Basic Permissions.
155 |
156 | All rights granted under this License are granted for the term of
157 | copyright on the Program, and are irrevocable provided the stated
158 | conditions are met. This License explicitly affirms your unlimited
159 | permission to run the unmodified Program. The output from running a
160 | covered work is covered by this License only if the output, given its
161 | content, constitutes a covered work. This License acknowledges your
162 | rights of fair use or other equivalent, as provided by copyright law.
163 |
164 | You may make, run and propagate covered works that you do not
165 | convey, without conditions so long as your license otherwise remains
166 | in force. You may convey covered works to others for the sole purpose
167 | of having them make modifications exclusively for you, or provide you
168 | with facilities for running those works, provided that you comply with
169 | the terms of this License in conveying all material for which you do
170 | not control copyright. Those thus making or running the covered works
171 | for you must do so exclusively on your behalf, under your direction
172 | and control, on terms that prohibit them from making any copies of
173 | your copyrighted material outside their relationship with you.
174 |
175 | Conveying under any other circumstances is permitted solely under
176 | the conditions stated below. Sublicensing is not allowed; section 10
177 | makes it unnecessary.
178 |
179 | 3. Protecting Users' Legal Rights From Anti-Circumvention Law.
180 |
181 | No covered work shall be deemed part of an effective technological
182 | measure under any applicable law fulfilling obligations under article
183 | 11 of the WIPO copyright treaty adopted on 20 December 1996, or
184 | similar laws prohibiting or restricting circumvention of such
185 | measures.
186 |
187 | When you convey a covered work, you waive any legal power to forbid
188 | circumvention of technological measures to the extent such circumvention
189 | is effected by exercising rights under this License with respect to
190 | the covered work, and you disclaim any intention to limit operation or
191 | modification of the work as a means of enforcing, against the work's
192 | users, your or third parties' legal rights to forbid circumvention of
193 | technological measures.
194 |
195 | 4. Conveying Verbatim Copies.
196 |
197 | You may convey verbatim copies of the Program's source code as you
198 | receive it, in any medium, provided that you conspicuously and
199 | appropriately publish on each copy an appropriate copyright notice;
200 | keep intact all notices stating that this License and any
201 | non-permissive terms added in accord with section 7 apply to the code;
202 | keep intact all notices of the absence of any warranty; and give all
203 | recipients a copy of this License along with the Program.
204 |
205 | You may charge any price or no price for each copy that you convey,
206 | and you may offer support or warranty protection for a fee.
207 |
208 | 5. Conveying Modified Source Versions.
209 |
210 | You may convey a work based on the Program, or the modifications to
211 | produce it from the Program, in the form of source code under the
212 | terms of section 4, provided that you also meet all of these conditions:
213 |
214 | a) The work must carry prominent notices stating that you modified
215 | it, and giving a relevant date.
216 |
217 | b) The work must carry prominent notices stating that it is
218 | released under this License and any conditions added under section
219 | 7. This requirement modifies the requirement in section 4 to
220 | "keep intact all notices".
221 |
222 | c) You must license the entire work, as a whole, under this
223 | License to anyone who comes into possession of a copy. This
224 | License will therefore apply, along with any applicable section 7
225 | additional terms, to the whole of the work, and all its parts,
226 | regardless of how they are packaged. This License gives no
227 | permission to license the work in any other way, but it does not
228 | invalidate such permission if you have separately received it.
229 |
230 | d) If the work has interactive user interfaces, each must display
231 | Appropriate Legal Notices; however, if the Program has interactive
232 | interfaces that do not display Appropriate Legal Notices, your
233 | work need not make them do so.
234 |
235 | A compilation of a covered work with other separate and independent
236 | works, which are not by their nature extensions of the covered work,
237 | and which are not combined with it such as to form a larger program,
238 | in or on a volume of a storage or distribution medium, is called an
239 | "aggregate" if the compilation and its resulting copyright are not
240 | used to limit the access or legal rights of the compilation's users
241 | beyond what the individual works permit. Inclusion of a covered work
242 | in an aggregate does not cause this License to apply to the other
243 | parts of the aggregate.
244 |
245 | 6. Conveying Non-Source Forms.
246 |
247 | You may convey a covered work in object code form under the terms
248 | of sections 4 and 5, provided that you also convey the
249 | machine-readable Corresponding Source under the terms of this License,
250 | in one of these ways:
251 |
252 | a) Convey the object code in, or embodied in, a physical product
253 | (including a physical distribution medium), accompanied by the
254 | Corresponding Source fixed on a durable physical medium
255 | customarily used for software interchange.
256 |
257 | b) Convey the object code in, or embodied in, a physical product
258 | (including a physical distribution medium), accompanied by a
259 | written offer, valid for at least three years and valid for as
260 | long as you offer spare parts or customer support for that product
261 | model, to give anyone who possesses the object code either (1) a
262 | copy of the Corresponding Source for all the software in the
263 | product that is covered by this License, on a durable physical
264 | medium customarily used for software interchange, for a price no
265 | more than your reasonable cost of physically performing this
266 | conveying of source, or (2) access to copy the
267 | Corresponding Source from a network server at no charge.
268 |
269 | c) Convey individual copies of the object code with a copy of the
270 | written offer to provide the Corresponding Source. This
271 | alternative is allowed only occasionally and noncommercially, and
272 | only if you received the object code with such an offer, in accord
273 | with subsection 6b.
274 |
275 | d) Convey the object code by offering access from a designated
276 | place (gratis or for a charge), and offer equivalent access to the
277 | Corresponding Source in the same way through the same place at no
278 | further charge. You need not require recipients to copy the
279 | Corresponding Source along with the object code. If the place to
280 | copy the object code is a network server, the Corresponding Source
281 | may be on a different server (operated by you or a third party)
282 | that supports equivalent copying facilities, provided you maintain
283 | clear directions next to the object code saying where to find the
284 | Corresponding Source. Regardless of what server hosts the
285 | Corresponding Source, you remain obligated to ensure that it is
286 | available for as long as needed to satisfy these requirements.
287 |
288 | e) Convey the object code using peer-to-peer transmission, provided
289 | you inform other peers where the object code and Corresponding
290 | Source of the work are being offered to the general public at no
291 | charge under subsection 6d.
292 |
293 | A separable portion of the object code, whose source code is excluded
294 | from the Corresponding Source as a System Library, need not be
295 | included in conveying the object code work.
296 |
297 | A "User Product" is either (1) a "consumer product", which means any
298 | tangible personal property which is normally used for personal, family,
299 | or household purposes, or (2) anything designed or sold for incorporation
300 | into a dwelling. In determining whether a product is a consumer product,
301 | doubtful cases shall be resolved in favor of coverage. For a particular
302 | product received by a particular user, "normally used" refers to a
303 | typical or common use of that class of product, regardless of the status
304 | of the particular user or of the way in which the particular user
305 | actually uses, or expects or is expected to use, the product. A product
306 | is a consumer product regardless of whether the product has substantial
307 | commercial, industrial or non-consumer uses, unless such uses represent
308 | the only significant mode of use of the product.
309 |
310 | "Installation Information" for a User Product means any methods,
311 | procedures, authorization keys, or other information required to install
312 | and execute modified versions of a covered work in that User Product from
313 | a modified version of its Corresponding Source. The information must
314 | suffice to ensure that the continued functioning of the modified object
315 | code is in no case prevented or interfered with solely because
316 | modification has been made.
317 |
318 | If you convey an object code work under this section in, or with, or
319 | specifically for use in, a User Product, and the conveying occurs as
320 | part of a transaction in which the right of possession and use of the
321 | User Product is transferred to the recipient in perpetuity or for a
322 | fixed term (regardless of how the transaction is characterized), the
323 | Corresponding Source conveyed under this section must be accompanied
324 | by the Installation Information. But this requirement does not apply
325 | if neither you nor any third party retains the ability to install
326 | modified object code on the User Product (for example, the work has
327 | been installed in ROM).
328 |
329 | The requirement to provide Installation Information does not include a
330 | requirement to continue to provide support service, warranty, or updates
331 | for a work that has been modified or installed by the recipient, or for
332 | the User Product in which it has been modified or installed. Access to a
333 | network may be denied when the modification itself materially and
334 | adversely affects the operation of the network or violates the rules and
335 | protocols for communication across the network.
336 |
337 | Corresponding Source conveyed, and Installation Information provided,
338 | in accord with this section must be in a format that is publicly
339 | documented (and with an implementation available to the public in
340 | source code form), and must require no special password or key for
341 | unpacking, reading or copying.
342 |
343 | 7. Additional Terms.
344 |
345 | "Additional permissions" are terms that supplement the terms of this
346 | License by making exceptions from one or more of its conditions.
347 | Additional permissions that are applicable to the entire Program shall
348 | be treated as though they were included in this License, to the extent
349 | that they are valid under applicable law. If additional permissions
350 | apply only to part of the Program, that part may be used separately
351 | under those permissions, but the entire Program remains governed by
352 | this License without regard to the additional permissions.
353 |
354 | When you convey a copy of a covered work, you may at your option
355 | remove any additional permissions from that copy, or from any part of
356 | it. (Additional permissions may be written to require their own
357 | removal in certain cases when you modify the work.) You may place
358 | additional permissions on material, added by you to a covered work,
359 | for which you have or can give appropriate copyright permission.
360 |
361 | Notwithstanding any other provision of this License, for material you
362 | add to a covered work, you may (if authorized by the copyright holders of
363 | that material) supplement the terms of this License with terms:
364 |
365 | a) Disclaiming warranty or limiting liability differently from the
366 | terms of sections 15 and 16 of this License; or
367 |
368 | b) Requiring preservation of specified reasonable legal notices or
369 | author attributions in that material or in the Appropriate Legal
370 | Notices displayed by works containing it; or
371 |
372 | c) Prohibiting misrepresentation of the origin of that material, or
373 | requiring that modified versions of such material be marked in
374 | reasonable ways as different from the original version; or
375 |
376 | d) Limiting the use for publicity purposes of names of licensors or
377 | authors of the material; or
378 |
379 | e) Declining to grant rights under trademark law for use of some
380 | trade names, trademarks, or service marks; or
381 |
382 | f) Requiring indemnification of licensors and authors of that
383 | material by anyone who conveys the material (or modified versions of
384 | it) with contractual assumptions of liability to the recipient, for
385 | any liability that these contractual assumptions directly impose on
386 | those licensors and authors.
387 |
388 | All other non-permissive additional terms are considered "further
389 | restrictions" within the meaning of section 10. If the Program as you
390 | received it, or any part of it, contains a notice stating that it is
391 | governed by this License along with a term that is a further
392 | restriction, you may remove that term. If a license document contains
393 | a further restriction but permits relicensing or conveying under this
394 | License, you may add to a covered work material governed by the terms
395 | of that license document, provided that the further restriction does
396 | not survive such relicensing or conveying.
397 |
398 | If you add terms to a covered work in accord with this section, you
399 | must place, in the relevant source files, a statement of the
400 | additional terms that apply to those files, or a notice indicating
401 | where to find the applicable terms.
402 |
403 | Additional terms, permissive or non-permissive, may be stated in the
404 | form of a separately written license, or stated as exceptions;
405 | the above requirements apply either way.
406 |
407 | 8. Termination.
408 |
409 | You may not propagate or modify a covered work except as expressly
410 | provided under this License. Any attempt otherwise to propagate or
411 | modify it is void, and will automatically terminate your rights under
412 | this License (including any patent licenses granted under the third
413 | paragraph of section 11).
414 |
415 | However, if you cease all violation of this License, then your
416 | license from a particular copyright holder is reinstated (a)
417 | provisionally, unless and until the copyright holder explicitly and
418 | finally terminates your license, and (b) permanently, if the copyright
419 | holder fails to notify you of the violation by some reasonable means
420 | prior to 60 days after the cessation.
421 |
422 | Moreover, your license from a particular copyright holder is
423 | reinstated permanently if the copyright holder notifies you of the
424 | violation by some reasonable means, this is the first time you have
425 | received notice of violation of this License (for any work) from that
426 | copyright holder, and you cure the violation prior to 30 days after
427 | your receipt of the notice.
428 |
429 | Termination of your rights under this section does not terminate the
430 | licenses of parties who have received copies or rights from you under
431 | this License. If your rights have been terminated and not permanently
432 | reinstated, you do not qualify to receive new licenses for the same
433 | material under section 10.
434 |
435 | 9. Acceptance Not Required for Having Copies.
436 |
437 | You are not required to accept this License in order to receive or
438 | run a copy of the Program. Ancillary propagation of a covered work
439 | occurring solely as a consequence of using peer-to-peer transmission
440 | to receive a copy likewise does not require acceptance. However,
441 | nothing other than this License grants you permission to propagate or
442 | modify any covered work. These actions infringe copyright if you do
443 | not accept this License. Therefore, by modifying or propagating a
444 | covered work, you indicate your acceptance of this License to do so.
445 |
446 | 10. Automatic Licensing of Downstream Recipients.
447 |
448 | Each time you convey a covered work, the recipient automatically
449 | receives a license from the original licensors, to run, modify and
450 | propagate that work, subject to this License. You are not responsible
451 | for enforcing compliance by third parties with this License.
452 |
453 | An "entity transaction" is a transaction transferring control of an
454 | organization, or substantially all assets of one, or subdividing an
455 | organization, or merging organizations. If propagation of a covered
456 | work results from an entity transaction, each party to that
457 | transaction who receives a copy of the work also receives whatever
458 | licenses to the work the party's predecessor in interest had or could
459 | give under the previous paragraph, plus a right to possession of the
460 | Corresponding Source of the work from the predecessor in interest, if
461 | the predecessor has it or can get it with reasonable efforts.
462 |
463 | You may not impose any further restrictions on the exercise of the
464 | rights granted or affirmed under this License. For example, you may
465 | not impose a license fee, royalty, or other charge for exercise of
466 | rights granted under this License, and you may not initiate litigation
467 | (including a cross-claim or counterclaim in a lawsuit) alleging that
468 | any patent claim is infringed by making, using, selling, offering for
469 | sale, or importing the Program or any portion of it.
470 |
471 | 11. Patents.
472 |
473 | A "contributor" is a copyright holder who authorizes use under this
474 | License of the Program or a work on which the Program is based. The
475 | work thus licensed is called the contributor's "contributor version".
476 |
477 | A contributor's "essential patent claims" are all patent claims
478 | owned or controlled by the contributor, whether already acquired or
479 | hereafter acquired, that would be infringed by some manner, permitted
480 | by this License, of making, using, or selling its contributor version,
481 | but do not include claims that would be infringed only as a
482 | consequence of further modification of the contributor version. For
483 | purposes of this definition, "control" includes the right to grant
484 | patent sublicenses in a manner consistent with the requirements of
485 | this License.
486 |
487 | Each contributor grants you a non-exclusive, worldwide, royalty-free
488 | patent license under the contributor's essential patent claims, to
489 | make, use, sell, offer for sale, import and otherwise run, modify and
490 | propagate the contents of its contributor version.
491 |
492 | In the following three paragraphs, a "patent license" is any express
493 | agreement or commitment, however denominated, not to enforce a patent
494 | (such as an express permission to practice a patent or covenant not to
495 | sue for patent infringement). To "grant" such a patent license to a
496 | party means to make such an agreement or commitment not to enforce a
497 | patent against the party.
498 |
499 | If you convey a covered work, knowingly relying on a patent license,
500 | and the Corresponding Source of the work is not available for anyone
501 | to copy, free of charge and under the terms of this License, through a
502 | publicly available network server or other readily accessible means,
503 | then you must either (1) cause the Corresponding Source to be so
504 | available, or (2) arrange to deprive yourself of the benefit of the
505 | patent license for this particular work, or (3) arrange, in a manner
506 | consistent with the requirements of this License, to extend the patent
507 | license to downstream recipients. "Knowingly relying" means you have
508 | actual knowledge that, but for the patent license, your conveying the
509 | covered work in a country, or your recipient's use of the covered work
510 | in a country, would infringe one or more identifiable patents in that
511 | country that you have reason to believe are valid.
512 |
513 | If, pursuant to or in connection with a single transaction or
514 | arrangement, you convey, or propagate by procuring conveyance of, a
515 | covered work, and grant a patent license to some of the parties
516 | receiving the covered work authorizing them to use, propagate, modify
517 | or convey a specific copy of the covered work, then the patent license
518 | you grant is automatically extended to all recipients of the covered
519 | work and works based on it.
520 |
521 | A patent license is "discriminatory" if it does not include within
522 | the scope of its coverage, prohibits the exercise of, or is
523 | conditioned on the non-exercise of one or more of the rights that are
524 | specifically granted under this License. You may not convey a covered
525 | work if you are a party to an arrangement with a third party that is
526 | in the business of distributing software, under which you make payment
527 | to the third party based on the extent of your activity of conveying
528 | the work, and under which the third party grants, to any of the
529 | parties who would receive the covered work from you, a discriminatory
530 | patent license (a) in connection with copies of the covered work
531 | conveyed by you (or copies made from those copies), or (b) primarily
532 | for and in connection with specific products or compilations that
533 | contain the covered work, unless you entered into that arrangement,
534 | or that patent license was granted, prior to 28 March 2007.
535 |
536 | Nothing in this License shall be construed as excluding or limiting
537 | any implied license or other defenses to infringement that may
538 | otherwise be available to you under applicable patent law.
539 |
540 | 12. No Surrender of Others' Freedom.
541 |
542 | If conditions are imposed on you (whether by court order, agreement or
543 | otherwise) that contradict the conditions of this License, they do not
544 | excuse you from the conditions of this License. If you cannot convey a
545 | covered work so as to satisfy simultaneously your obligations under this
546 | License and any other pertinent obligations, then as a consequence you may
547 | not convey it at all. For example, if you agree to terms that obligate you
548 | to collect a royalty for further conveying from those to whom you convey
549 | the Program, the only way you could satisfy both those terms and this
550 | License would be to refrain entirely from conveying the Program.
551 |
552 | 13. Use with the GNU Affero General Public License.
553 |
554 | Notwithstanding any other provision of this License, you have
555 | permission to link or combine any covered work with a work licensed
556 | under version 3 of the GNU Affero General Public License into a single
557 | combined work, and to convey the resulting work. The terms of this
558 | License will continue to apply to the part which is the covered work,
559 | but the special requirements of the GNU Affero General Public License,
560 | section 13, concerning interaction through a network will apply to the
561 | combination as such.
562 |
563 | 14. Revised Versions of this License.
564 |
565 | The Free Software Foundation may publish revised and/or new versions of
566 | the GNU General Public License from time to time. Such new versions will
567 | be similar in spirit to the present version, but may differ in detail to
568 | address new problems or concerns.
569 |
570 | Each version is given a distinguishing version number. If the
571 | Program specifies that a certain numbered version of the GNU General
572 | Public License "or any later version" applies to it, you have the
573 | option of following the terms and conditions either of that numbered
574 | version or of any later version published by the Free Software
575 | Foundation. If the Program does not specify a version number of the
576 | GNU General Public License, you may choose any version ever published
577 | by the Free Software Foundation.
578 |
579 | If the Program specifies that a proxy can decide which future
580 | versions of the GNU General Public License can be used, that proxy's
581 | public statement of acceptance of a version permanently authorizes you
582 | to choose that version for the Program.
583 |
584 | Later license versions may give you additional or different
585 | permissions. However, no additional obligations are imposed on any
586 | author or copyright holder as a result of your choosing to follow a
587 | later version.
588 |
589 | 15. Disclaimer of Warranty.
590 |
591 | THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
592 | APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
593 | HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
594 | OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
595 | THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
596 | PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
597 | IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
598 | ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
599 |
600 | 16. Limitation of Liability.
601 |
602 | IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
603 | WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
604 | THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
605 | GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
606 | USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
607 | DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
608 | PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
609 | EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
610 | SUCH DAMAGES.
611 |
612 | 17. Interpretation of Sections 15 and 16.
613 |
614 | If the disclaimer of warranty and limitation of liability provided
615 | above cannot be given local legal effect according to their terms,
616 | reviewing courts shall apply local law that most closely approximates
617 | an absolute waiver of all civil liability in connection with the
618 | Program, unless a warranty or assumption of liability accompanies a
619 | copy of the Program in return for a fee.
620 |
621 | END OF TERMS AND CONDITIONS
622 |
623 | How to Apply These Terms to Your New Programs
624 |
625 | If you develop a new program, and you want it to be of the greatest
626 | possible use to the public, the best way to achieve this is to make it
627 | free software which everyone can redistribute and change under these terms.
628 |
629 | To do so, attach the following notices to the program. It is safest
630 | to attach them to the start of each source file to most effectively
631 | state the exclusion of warranty; and each file should have at least
632 | the "copyright" line and a pointer to where the full notice is found.
633 |
634 | {one line to give the program's name and a brief idea of what it does.}
635 | Copyright (C) {year} {name of author}
636 |
637 | This program is free software: you can redistribute it and/or modify
638 | it under the terms of the GNU General Public License as published by
639 | the Free Software Foundation, either version 3 of the License, or
640 | (at your option) any later version.
641 |
642 | This program is distributed in the hope that it will be useful,
643 | but WITHOUT ANY WARRANTY; without even the implied warranty of
644 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
645 | GNU General Public License for more details.
646 |
647 | You should have received a copy of the GNU General Public License
648 | along with this program. If not, see .
649 |
650 | Also add information on how to contact you by electronic and paper mail.
651 |
652 | If the program does terminal interaction, make it output a short
653 | notice like this when it starts in an interactive mode:
654 |
655 | {project} Copyright (C) {year} {fullname}
656 | This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
657 | This is free software, and you are welcome to redistribute it
658 | under certain conditions; type `show c' for details.
659 |
660 | The hypothetical commands `show w' and `show c' should show the appropriate
661 | parts of the General Public License. Of course, your program's commands
662 | might be different; for a GUI interface, you would use an "about box".
663 |
664 | You should also get your employer (if you work as a programmer) or school,
665 | if any, to sign a "copyright disclaimer" for the program, if necessary.
666 | For more information on this, and how to apply and follow the GNU GPL, see
667 | .
668 |
669 | The GNU General Public License does not permit incorporating your program
670 | into proprietary programs. If your program is a subroutine library, you
671 | may consider it more useful to permit linking proprietary applications with
672 | the library. If this is what you want to do, use the GNU Lesser General
673 | Public License instead of this License. But first, please read
674 | .
675 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | _**Notice**: Development on this repo is deprecated as we continue our v3 rearchitecture. Please see https://github.com/storj/storj for ongoing v3 development._
2 |
3 | KFS (Kademlia File Store)
4 | =========================
5 |
6 | [](https://travis-ci.org/Storj/kfs)
7 | [](https://coveralls.io/r/Storj/kfs)
8 | [](https://www.npmjs.com/package/kfs)
9 | [](https://raw.githubusercontent.com/storj/kfs/master/LICENSE)
10 |
11 | The KFS system describes a method for managing the storage layer of nodes on
12 | the [Storj Network](https://storj.io) by creating a sharded local database
13 | where content-addressable data is placed in a shard using the same routing
14 | metric and algorithm used by the Kademlia distributed hash table.
15 |
16 | Be sure to read about the
17 | [motivation and how it works](https://storj.github.io/kfs/tutorial-about.html)!
18 |
19 | Quick Start
20 | -----------
21 |
22 | Install the `kfs` package using [Node Package Manager].
23 |
24 | ```
25 | npm install kfs --save
26 | ```
27 |
28 | This will install `kfs` as a dependency of your own project. See the
29 | [documentation](https://storj.github.io/kfs/) for in-depth usage details.
30 | You can also install globally to use the `kfs` command line utility.
31 |
32 | ```
33 | const kfs = require('kfs');
34 | const store = kfs('path/to/store');
35 |
36 | store.writeFile('some key', Buffer.from('some data'), (err) => {
37 | console.log(err || 'File written to store!');
38 | });
39 | ```
40 |
41 | License
42 | -------
43 |
44 | KFS - A Local File Storage System Inspired by Kademlia
45 | Copyright (C) 2016 Storj Labs, Inc
46 |
47 | This program is free software: you can redistribute it and/or modify
48 | it under the terms of the GNU General Public License as published by
49 | the Free Software Foundation, either version 3 of the License, or
50 | (at your option) any later version.
51 |
52 | This program is distributed in the hope that it will be useful,
53 | but WITHOUT ANY WARRANTY; without even the implied warranty of
54 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
55 | GNU General Public License for more details.
56 |
57 | You should have received a copy of the GNU General Public License
58 | along with this program. If not, see [http://www.gnu.org/licenses/].
59 |
60 | [Kademlia]: https://en.wikipedia.org/wiki/Kademlia "Kademlia"
61 | [Storj Network]: https://storj.io "Storj Labs"
62 | [LevelDB]: http://leveldb.org/ "LevelDB"
63 | [distance]: https://en.wikipedia.org/wiki/Kademlia#Routing_tables
64 | [Node Package Manager]: https://npmjs.org "Node Package Manager"
65 | [documentation]: http://bookch.in/kfs/ "Package Documentation"
66 |
--------------------------------------------------------------------------------
/bin/kfs.js:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env node
2 |
3 | 'use strict';
4 |
5 | const kfs = require('..');
6 | const program = require('commander');
7 | const path = require('path');
8 | const fs = require('fs');
9 | const {homedir} = require('os');
10 | const async = require('async');
11 |
12 | const HOME = homedir();
13 | const DEFAULT_DB = path.join(HOME, '.kfs', 'default');
14 |
15 | function _openDatabase(callback) {
16 | let db;
17 |
18 | try {
19 | db = kfs(program.db);
20 | } catch (err) {
21 | return callback(err);
22 | }
23 |
24 | callback(null, db);
25 | }
26 |
27 | function _writeFileToDatabase(fileKey, filePath) {
28 | _openDatabase((err, db) => {
29 | if (err) {
30 | process.stderr.write('[error] ' + err.message);
31 | process.exit(1);
32 | }
33 |
34 | if (filePath) {
35 | if (!kfs.utils.fileDoesExist(filePath)) {
36 | process.stderr.write('[error] ' + 'File does not exist');
37 | process.exit(1);
38 | }
39 |
40 | const fileBuffer = fs.readFileSync(filePath);
41 |
42 | db.writeFile(fileKey, fileBuffer, (err) => {
43 | if (err) {
44 | process.stderr.write('[error] ' + err.message);
45 | process.exit(1);
46 | }
47 |
48 | process.exit(0);
49 | });
50 | } else {
51 | db.createWriteStream(fileKey, (err, writableStream) => {
52 | if (err) {
53 | process.stderr.write('[error] ' + err.message);
54 | process.exit(1);
55 | }
56 |
57 | writableStream.on('error', (err) => {
58 | process.stderr.write('[error] ' + err.message);
59 | process.exit(1);
60 | });
61 |
62 | writableStream.on('finish', () => {
63 | process.exit(0);
64 | });
65 |
66 | process.stdin.pipe(writableStream);
67 | });
68 | }
69 | });
70 | };
71 |
72 | function _readFileFromDatabase(fileKey, outPath) {
73 | _openDatabase((err, db) => {
74 | if (err) {
75 | process.stderr.write('[error] ' + err.message);
76 | process.exit(1);
77 | }
78 |
79 | db.createReadStream(fileKey, (err, readableStream) => {
80 | if (err) {
81 | process.stderr.write('[error] ' + err.message);
82 | process.exit(1);
83 | }
84 |
85 | readableStream.on('error', (err) => {
86 | process.stderr.write('[error] ' + err.message);
87 | process.exit(1);
88 | });
89 |
90 | readableStream.on('end', () => {
91 | process.exit(0);
92 | });
93 |
94 | if (outPath) {
95 | const writeStream = fs.createWriteStream(outPath);
96 |
97 | writeStream.on('error', (err) => {
98 | process.stderr.write('[error] ' + err.message);
99 | process.exit(1);
100 | });
101 |
102 | writeStream.on('finish', () => {
103 | process.exit(0);
104 | });
105 |
106 | readableStream.pipe(writeStream);
107 | } else {
108 | readableStream.pipe(process.stdout);
109 | }
110 | });
111 | });
112 | }
113 |
114 | function _unlinkFileFromDatabase(fileKey) {
115 | _openDatabase((err, db) => {
116 | if (err) {
117 | process.stderr.write('[error] ' + err.message);
118 | process.exit(1);
119 | }
120 |
121 | db.unlink(fileKey, (err) => {
122 | if (err) {
123 | process.stderr.write('[error] ' + err.message);
124 | process.exit(1);
125 | }
126 |
127 | db.flush((err) => {
128 | if (err) {
129 | process.stderr.write('[error] ' + err.message);
130 | process.exit(1);
131 | }
132 |
133 | process.exit(0);
134 | });
135 | });
136 | });
137 | }
138 |
139 | function _statDatabase(keyOrIndex, opts) {
140 | _openDatabase((err, db) => {
141 | if (err) {
142 | process.stderr.write('[error] ' + err.message);
143 | process.exit(1);
144 | }
145 |
146 | if (!keyOrIndex) {
147 | db.stat(_statCallback);
148 | } else {
149 | db.stat(keyOrIndex, _statCallback);
150 | }
151 |
152 | function _statCallback(err, stats) {
153 | if (err) {
154 | process.stderr.write('[error] ' + err.message);
155 | process.exit(1);
156 | }
157 |
158 | let spacing = 2;
159 |
160 | stats = stats.map(function(sBucketStats) {
161 | let perc = sBucketStats.sBucketStats.size /
162 | sBucketStats.sBucketStats.free;
163 |
164 | sBucketStats.sBucketStats.perc = (perc * 100).toFixed(2);
165 |
166 | if (opts.human) {
167 | sBucketStats.sBucketStats.size = kfs.utils.toHumanReadableSize(
168 | sBucketStats.sBucketStats.size
169 | );
170 | }
171 |
172 | let sizeOutLength = sBucketStats.sBucketStats.size.toString().length;
173 | spacing = spacing < sizeOutLength ? sizeOutLength + 1 : spacing
174 |
175 | return sBucketStats;
176 | });
177 |
178 | stats.forEach((sBucketStats) => {
179 | process.stdout.write(
180 | kfs.utils.createSbucketNameFromIndex(sBucketStats.sBucketIndex) +
181 | '\t' +
182 | sBucketStats.sBucketStats.size +
183 | Array(
184 | spacing + 1 - sBucketStats.sBucketStats.size.toString().length
185 | ).join(' ') +
186 | '(' + sBucketStats.sBucketStats.perc + '%)' +
187 | '\n'
188 | );
189 | });
190 |
191 | process.exit(0);
192 | }
193 | });
194 | }
195 |
196 | function _listItemsInDatabase(bucketIndex, env) {
197 | _openDatabase((err, db) => {
198 | if (err) {
199 | process.stderr.write('[error] ' + err.message);
200 | process.exit(1);
201 | }
202 |
203 | bucketIndex = !isNaN(bucketIndex)
204 | ? Number(bucketIndex)
205 | : bucketIndex;
206 |
207 | db.list(bucketIndex, (err, keys) => {
208 | if (err) {
209 | process.stderr.write('[error] ' + err.message);
210 | process.exit(1);
211 | }
212 |
213 | keys.forEach((result) => {
214 | process.stdout.write(
215 | result.baseKey + '\t' +
216 | (env.human ?
217 | '~' + kfs.utils.toHumanReadableSize(result.approximateSize) :
218 | '~' + result.approximateSize) +
219 | '\n'
220 | );
221 | });
222 | process.exit(0);
223 | });
224 | });
225 | }
226 |
227 | function _showHelp() {
228 | program.help();
229 | }
230 |
231 | program
232 | .version(require('../package').version)
233 | .option(
234 | '-d, --db ',
235 | 'path the kfs database to use (default: ' + DEFAULT_DB + ')',
236 | DEFAULT_DB
237 | );
238 |
239 | program
240 | .command('write [file_path]')
241 | .description('write the file to the database (or read from stdin)')
242 | .action(_writeFileToDatabase);
243 |
244 | program
245 | .command('read [file_path]')
246 | .description('read the file from the database (or write to stdout)')
247 | .action(_readFileFromDatabase);
248 |
249 | program
250 | .command('unlink ')
251 | .description('unlink (delete) the file from the database')
252 | .action(_unlinkFileFromDatabase);
253 |
254 | program
255 | .command('list ')
256 | .option('-h, --human', 'print human readable format')
257 | .description('list all of the file keys in the given bucket')
258 | .action(_listItemsInDatabase);
259 |
260 | program
261 | .command('stat [bucket_index_or_file_key]')
262 | .option('-h, --human', 'print human readable format')
263 | .description('get the free and used space for the database ')
264 | .action(_statDatabase);
265 |
266 | program
267 | .command('*')
268 | .description('print usage information to the console')
269 | .action(_showHelp);
270 |
271 | program.parse(process.argv);
272 |
273 | if (process.argv.length < 3) {
274 | program.help();
275 | }
276 |
--------------------------------------------------------------------------------
/doc/about.md:
--------------------------------------------------------------------------------
1 | The Storj network consists of a number of distributed peers who provide
2 | storage capacity for lease to others. In its current implementation, these
3 | nodes store encrypted shards and their associated metadata in a [LevelDB].
4 | LevelDB provides a number of features that make it desirable for this use
5 | case; this includes its lexicographically sorted keys providing fast lookups
6 | for content-addressable values, fast and efficient compression, and perhaps
7 | most notably its portability which allows the Storj software to run on a
8 | wide range of hardware including dated or underpowered computers.
9 |
10 | However, due to the nature of LevelDB's design and its implementation in
11 | the Storj software, its performance suffers after the size of the database
12 | exceeds approximately 100GiB. This impact is larger on lower end systems and
13 | can also vary based on the type of disk in use. These performance issues seem
14 | to arise from LevelDB's compaction mechanism (which is an otherwise desirable
15 | feature). In addition to the cost of compaction, LevelDB blocks reads and
16 | writes during this process, which causes storage nodes to become effectively
17 | offline until the process completes.
18 |
19 | These properties indicate that if the size of a single database can be given an
20 | upper bound, then the cost of compaction can be significantly reduced to an
21 | acceptable level. Futhermore, in using a single database, if one level becomes
22 | corrupted, deleted, or otherwise inaccessible, the entire database may become
23 | unusable and unrecoverable. For these reasons, the KFS system seeks to create
24 | a series of size-capped databases where data is stored in a given "shard"
25 | based on a deterministic metric to ensure a sufficiently random and even
26 | spread to bound the cost of compaction, to reduce the impact of corruption, and
27 | to completely eliminate the need to maintain an index or state machine to
28 | efficiently lookup stored data.
29 |
30 | ### S-Buckets and Routing
31 |
32 | KFS requires that there be a reference identifier, which can be any arbitrary
33 | `R` bit key. This can be randomly generated upon creation of the database or
34 | derived from some other application or protocol specific information. In the
35 | Storj network, nodes are addressed with a 160 bit node identifier derived from
36 | the public portion of an ECDSA key pair. This *Reference ID* is used to
37 | calculate the database shard or *S-Bucket* to which a given piece of data
38 | belongs. Collectively, these S-Buckets form the *B-Table*.
39 |
40 | In KFS, there are a total of `B` S-Buckets, numbered `0`-`B-1`. To determine
41 | which bucket a piece of raw binary data belongs in, calculate the [distance]
42 | between the first byte of the hash of the data and the first byte of the
43 | reference ID. This is to say that if the distance between those bytes is 137,
44 | then the raw binary data should be stored in S-Bucket 137. An S-Bucket has a
45 | fixed size, `S`, in bytes. This means that a KFS database has a maximum size of
46 | `B * S` bytes. Once an S-Bucket is full, no more data can be placed in it. Once
47 | a KFS database is full, another should be created using a new Reference ID.
48 | Given the default constants, KFS databases are capped at a maximum of 8TiB each.
49 |
50 | ### Keying Data by Chunks
51 |
52 | To optimize the efficiency of reads and writes in KFS, data is stored in `C`
53 | sized chunks (or less), keyed by the full content's hash, followed by a
54 | space and a numerical index. This is performed to ensure that key/value pairs
55 | are small and that reading and writing data to and from a S-Bucket is done
56 | sequentially and can allow for efficient streaming of data both in and out of
57 | the S-bucket.
58 |
59 | Since LevelDB sorts items lexicographically, keys for data chunks should be
60 | strings and consist of:
61 |
62 | ```
63 | Hexidecimal(Hash) + ' ' + 00000N
64 | ```
65 |
66 | The number of preceding zeroes in the numerical index should be set such that
67 | a S-Bucket that contains only a single file split into `C` sized chunks can
68 | still be read sequentially from the database. Using the default constants
69 | would make the highest number index 262144, so the number of leading zeroes
70 | should be less than or equal to five.
71 |
72 | ### Ad-Hoc S-Bucket Initialization
73 |
74 | Given the low cost of creating and opening a LevelDB, it is not necessary to
75 | create all `B` S-Buckets at once. Instead, an S-Bucket can be created the first
76 | time data is to be stored inside of it. Additionally, S-Buckets can be opened
77 | and closed as needed, eliminating the potential overhead of opening a large
78 | number of file descriptors. Operations on a given S-Bucket should be added to
79 | a queue which when drained may trigger a close of the S-Bucket's underlying
80 | database.
81 |
82 | Kademlia's metric for determining distance is defined as the result of
83 | the XOR operation on a set of bits interpreted as an integer. As such, for
84 | two randomly generated sets of bits, the result is uniformly distributed. Therefore
85 | the XOR distance between pseudo-random first bytes of the reference ID and hash
86 | give any bucket an equal chance of being selected.
87 |
88 | Below is the frequency distribution plotted with ten million simulated calculations.
89 | As expected the distribution is uniform (the red dotted line indicates the theoretical
90 | value each bin should have):
91 |
92 | 
93 |
94 | Even with a uniform distribution, as the node reaches capacity some buckets will fill sooner than others.
95 | Offers that would be sorted into these buckets should be declined and relayed to other nodes.
96 |
97 | Constants
98 | ---------
99 |
100 | | Name | Description | Default |
101 | |------|------------------------------------|-----------------------|
102 | | B | Number of columns in the B-table | 256 |
103 | | S | Size (in bytes) of an S-Bucket | 34359738368 (32GiB) |
104 | | C | Size (in bytes) of a file chunk | 131072 |
105 | | R | Number of bits in the Reference ID | 160 |
106 |
107 | Considerations Specific to Storj
108 | --------------------------------
109 |
110 | * Storj farmers receive contracts for data shards that are already close to
111 | their own Node ID. To improve S-Bucket distribution, it may be desirable to
112 | double hash the data or otherwise force a degree of randomness before
113 | selecting a S-Bucket for storage.
114 | * The use of KFS in the Storj network creates an upper limit to how much data
115 | can be stored by a given Node ID (or identity). This encourages farmers to
116 | operate multiple nodes with different identities which lends itself to better
117 | network integration.
118 | * The use of HD (hierachical deterministic) private keys could allow a single
119 | farmer identity to assume multiple Reference IDs, thus eliminating the limit.
120 | * KFS does not track or store metadata about the contents of a S-Bucket, which
121 | in the context of the Storj network would include contracts and other special
122 | information related to a piece of data. Applications should handle this via
123 | their own means.
124 |
125 |
126 |
--------------------------------------------------------------------------------
/doc/cli.md:
--------------------------------------------------------------------------------
1 | KFS comes bundles with a handy command line interface for dealing with your
2 | databases. You can access this tool by installing the package globally:
3 |
4 | ```
5 | npm install -g kfs
6 | ```
7 |
8 | Once the installation completes, you can use the `kfs` command. To see usage
9 | information, run:
10 |
11 | ```
12 | Usage: kfs [options] [command]
13 |
14 |
15 | Commands:
16 |
17 | write [file_path] write the file to the database (or read from stdin)
18 | read [file_path] read the file from the database (or write to stdout)
19 | unlink unlink (delete) the file from the database
20 | list [options] list all of the file keys in the given bucket
21 | stat [options] [bucket_index_or_file_key] get the free and used space for the database
22 | compact trigger a compaction of all database buckets
23 | * print usage information to the console
24 |
25 | Options:
26 |
27 | -h, --help output usage information
28 | -V, --version output the version number
29 | -d, --db path the kfs database to use (default: /home/bookchin/.kfs/default)
30 | ```
31 |
32 | ### Writing a File To KFS
33 |
34 | There are two ways to write a file to a KFS database:
35 |
36 | 1. Supplying an optional path to an existing file
37 | 2. Reading from STDIN
38 |
39 | To write a file that exists on the file system already, just supply it's path:
40 |
41 | ```
42 | kfs write somefilekey /path/to/my/file.bin
43 | ```
44 |
45 | To have the CLI read from STDIN, just pipe the output of another program to it:
46 |
47 | ```
48 | cat /path/to/my/file.bin | kfs write somefilekey
49 | ```
50 |
51 | If an error is encountered, the process will terminate and write the error
52 | message to STDERR.
53 |
54 | ### Reading a File From KFS
55 |
56 | There are two ways to read a file from a KFS database:
57 |
58 | 1. Supplying a path to write the output
59 | 2. Writing to STDOUT
60 |
61 | To read from a KFS and write it to a file, just supply a path:
62 |
63 | ```
64 | kfs read somefilekey /path/to/write/file.webm
65 | ```
66 |
67 | To have the CLI write to STDOUT, just pipe the output to another program:
68 |
69 | ```
70 | kfs read somefilekey | mplayer -
71 | ```
72 |
73 | If an error is encountered, the process will terminate and write the error
74 | message to STDERR.
75 |
76 | ### Unlinking a File From KFS
77 |
78 | To unlink (or mark for deletion), simply provide the file key:
79 |
80 | ```
81 | kfs unlink somefilekey
82 | ```
83 |
84 | If an error is encountered, the process will terminate and write the error
85 | message to STDERR.
86 |
87 | ### Getting Stats for a KFS
88 |
89 | You can see the amount of space available for a given file key:
90 |
91 | ```
92 | kfs stat somefilekey
93 | 246.s 34359738368
94 | ```
95 |
96 | This writes the S-bucket index and the number of bytes available to STDOUT.
97 | You can also view this in a human readable form with the `-h` option:
98 |
99 | ```
100 | kfs stat somefilekey -h
101 | 246.s 32.0 GiB
102 | ```
103 |
--------------------------------------------------------------------------------
/doc/img/FlushTimePerTestRunByDriveType.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/storj-archived/kfs/64ee6098c52e8d07dc9af6691e56ac06b9da7be9/doc/img/FlushTimePerTestRunByDriveType.png
--------------------------------------------------------------------------------
/doc/img/fileSizeVsExecutionTimeUsingFlush.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/storj-archived/kfs/64ee6098c52e8d07dc9af6691e56ac06b9da7be9/doc/img/fileSizeVsExecutionTimeUsingFlush.png
--------------------------------------------------------------------------------
/doc/img/kfs-vs-vanilla-two-sided-test-128.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/storj-archived/kfs/64ee6098c52e8d07dc9af6691e56ac06b9da7be9/doc/img/kfs-vs-vanilla-two-sided-test-128.png
--------------------------------------------------------------------------------
/doc/img/kfs-vs-vanilla-two-sided-test.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/storj-archived/kfs/64ee6098c52e8d07dc9af6691e56ac06b9da7be9/doc/img/kfs-vs-vanilla-two-sided-test.png
--------------------------------------------------------------------------------
/doc/img/mean-by-operation-and-db-hdd-128.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/storj-archived/kfs/64ee6098c52e8d07dc9af6691e56ac06b9da7be9/doc/img/mean-by-operation-and-db-hdd-128.png
--------------------------------------------------------------------------------
/doc/img/mean-by-operation-and-db-hdd.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/storj-archived/kfs/64ee6098c52e8d07dc9af6691e56ac06b9da7be9/doc/img/mean-by-operation-and-db-hdd.png
--------------------------------------------------------------------------------
/doc/img/mean-by-operation-and-db-ssd-128.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/storj-archived/kfs/64ee6098c52e8d07dc9af6691e56ac06b9da7be9/doc/img/mean-by-operation-and-db-ssd-128.png
--------------------------------------------------------------------------------
/doc/img/mean-by-operation-and-db-ssd.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/storj-archived/kfs/64ee6098c52e8d07dc9af6691e56ac06b9da7be9/doc/img/mean-by-operation-and-db-ssd.png
--------------------------------------------------------------------------------
/doc/img/performance-test-overview-kfs-vs-vanilla-hdd-128.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/storj-archived/kfs/64ee6098c52e8d07dc9af6691e56ac06b9da7be9/doc/img/performance-test-overview-kfs-vs-vanilla-hdd-128.png
--------------------------------------------------------------------------------
/doc/img/performance-test-overview-kfs-vs-vanilla-hdd.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/storj-archived/kfs/64ee6098c52e8d07dc9af6691e56ac06b9da7be9/doc/img/performance-test-overview-kfs-vs-vanilla-hdd.png
--------------------------------------------------------------------------------
/doc/img/performance-test-overview-kfs-vs-vanilla-ssd-128.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/storj-archived/kfs/64ee6098c52e8d07dc9af6691e56ac06b9da7be9/doc/img/performance-test-overview-kfs-vs-vanilla-ssd-128.png
--------------------------------------------------------------------------------
/doc/img/performance-test-overview-kfs-vs-vanilla-ssd.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/storj-archived/kfs/64ee6098c52e8d07dc9af6691e56ac06b9da7be9/doc/img/performance-test-overview-kfs-vs-vanilla-ssd.png
--------------------------------------------------------------------------------
/doc/img/sd-by-operation-and-db-hdd-128.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/storj-archived/kfs/64ee6098c52e8d07dc9af6691e56ac06b9da7be9/doc/img/sd-by-operation-and-db-hdd-128.png
--------------------------------------------------------------------------------
/doc/img/sd-by-operation-and-db-hdd.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/storj-archived/kfs/64ee6098c52e8d07dc9af6691e56ac06b9da7be9/doc/img/sd-by-operation-and-db-hdd.png
--------------------------------------------------------------------------------
/doc/img/sd-by-operation-and-db-ssd-128.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/storj-archived/kfs/64ee6098c52e8d07dc9af6691e56ac06b9da7be9/doc/img/sd-by-operation-and-db-ssd-128.png
--------------------------------------------------------------------------------
/doc/img/sd-by-operation-and-db-ssd.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/storj-archived/kfs/64ee6098c52e8d07dc9af6691e56ac06b9da7be9/doc/img/sd-by-operation-and-db-ssd.png
--------------------------------------------------------------------------------
/doc/img/varying_cLevel.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/storj-archived/kfs/64ee6098c52e8d07dc9af6691e56ac06b9da7be9/doc/img/varying_cLevel.png
--------------------------------------------------------------------------------
/doc/img/xor-metric-distribution.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/storj-archived/kfs/64ee6098c52e8d07dc9af6691e56ac06b9da7be9/doc/img/xor-metric-distribution.png
--------------------------------------------------------------------------------
/doc/index.json:
--------------------------------------------------------------------------------
1 | {
2 | "kfs": {
3 | "title": "Programmatic Usage"
4 | },
5 | "cli": {
6 | "title": "Command Line Interface"
7 | },
8 | "performance-testing": {
9 | "title": "Performance Testing the Changes"
10 | },
11 | "about": {
12 | "title": "Motivation and Mechanics"
13 | }
14 | }
15 |
--------------------------------------------------------------------------------
/doc/kfs.md:
--------------------------------------------------------------------------------
1 | This tutorial covers everything you need to know about using KFS within your
2 | application. KFS is based on LevelDB, an embedded key-value store, but the
3 | interface for interacting with a KFS is focused on the storage and retrieval
4 | of files and arbitrary binary streams.
5 |
6 | ### Getting Started
7 |
8 | To create and open a new KFS database (or open an existing one), simply
9 | require the module and create a {@link Btable} object:
10 |
11 | ```
12 | const kfs = require('kfs');
13 | const myDataStore = kfs('/path/to/database.kfs');
14 | ```
15 |
16 | That's it, Your data store is ready to use!
17 |
18 | ### Check if a File Exists
19 |
20 | To check if a file exists at a given key, use the {@link Btable#exists} method:
21 |
22 | ```
23 | const some160bitKey = 'adc83b19e793491b1c6ea0fd8b46cd9f32e592fc';
24 |
25 | myDataStore.exists(some160bitKey, (err, exists) => {
26 | console.log('The file ' + (exists ? 'DOES' : 'DOES NOT') + ' exist!');
27 | });
28 | ```
29 |
30 | ### Check if a File Can Be Stored
31 |
32 | To check the available space for a file at a given key, use the
33 | {@link Btable#getSpaceAvailableForKey} method:
34 |
35 | ```
36 | const fileSizeInBytes = 4096;
37 |
38 | myDataStore.stat(some160bitKey, (err, result) => {
39 | if (err) {
40 | // handle error
41 | }
42 |
43 | let enoughFreeSpace = result.sBucketStats.free > fileSizeInBytes;
44 |
45 | console.log('There ' + (enoughFreeSpace ? 'IS': 'IS NOT') + ' enough space!');
46 | });
47 | ```
48 |
49 | ### Write a File to the Data Store
50 |
51 | To write a raw buffer to the data store, use the {@link Btable#writeFile}
52 | method:
53 |
54 | ```
55 | const myFileBuffer = Buffer.from([/* ... */]);
56 |
57 | myDataStore.writeFile(some160bitKey, myFileBuffer, (err) => {
58 | console.log('File ' + (err ? 'WAS NOT' : 'WAS') + ' written!');
59 | });
60 | ```
61 |
62 | ### Read a File from the Data Store
63 |
64 | To read a file into memory from the data store, use the {@link Btable#readFile}
65 | method:
66 |
67 | ```
68 | myDataStore.readFile(some160bitKey, (err, fileBuffer) => {
69 | console.log(err || fileBuffer);
70 | });
71 | ```
72 |
73 | ### Remove a File from the Data Store
74 |
75 | To remove a file from the data store, use the {@link Btable#unlink} method:
76 |
77 | ```
78 | myDataStore.unlink(some160bitKey, (err) => {
79 | console.log('The file ' + (err ? 'WAS NOT' : 'WAS') + ' removed!');
80 | });
81 | ```
82 |
83 | ### Use the Streaming Interfaces
84 |
85 | When reading or writing larger files, you may not wish to buffer everything
86 | into memory. In these cases, use the {@link Btable#createReadStream} and
87 | {@link Btable#createWriteStream} methods:
88 |
89 | ```
90 | myDataStore.createReadStream(some160bitKey, (err, readableStream) => {
91 | if (err) {
92 | // handle error
93 | }
94 |
95 | readableStream.on('data', (chunk) => {
96 | console.log('Got chunk:', chunk);
97 | });
98 |
99 | readableStream.on('end', () => {
100 | console.log('All chunks read!');
101 | });
102 |
103 | readableStream.on('error', (err) => {
104 | console.log('Failed to read file:', err.message);
105 | });
106 | });
107 | ```
108 |
109 | ```
110 | myDataStore.createWriteStream(some160bitKey, (err, writableStream) => {
111 | if (err) {
112 | // handle error
113 | }
114 |
115 | writableStream.on('finish', () => {
116 | console.log('All chunks written!');
117 | });
118 |
119 | writableStream.on('error', (err) => {
120 | console.log('Failed to write file:', err.message);
121 | });
122 |
123 | writableStream.write(Buffer.from([/* ... */]));
124 | writableStream.write(Buffer.from([/* ... */]));
125 | writableStream.write(Buffer.from([/* ... */]));
126 | writableStream.end();
127 | });
128 | ```
129 |
--------------------------------------------------------------------------------
/doc/performance-testing.md:
--------------------------------------------------------------------------------
1 | One major hypothesis of this project is that KFS enhances performance over
2 | the use a of a standard LevelDB instance. This is due to the nature of how
3 | KFS bounds the cost of LevelDB's compaction mechanism by sharding a data set
4 | over a series of size-capped LevelDB instances. A set of performance tests
5 | were run on a standard LevelDB along with our version which leverages KFS.
6 | This is a short summary of our findings and their implications.
7 |
8 | ### Experiment Design
9 |
10 | A series of one hundred trials were run in sequential order.
11 | Each trial consisted of measuring the execution time for a complete read,
12 | write, and unlink (delete) operation on file sizes of 8, 16, 32, 64, 128, 256
13 | and 512 MiB. Keeping in mind that files are split into discrete 128KiB key/value
14 | pairs, keyed by a hash of the content of the entire file, this means that the
15 | actual number of read/write/delete operations are equal to the size of the file
16 | divided by 128KiB.
17 |
18 | Of particular note is that each sequential test run adds approximately 1GiB to
19 | the full size of the database (since unlinks only tombstone entries). Our
20 | number of trials is consistent with our assertion that LevelDBs performance
21 | degrades significantly after the size of the database exceeds 100GiB.
22 |
23 | This experiment was conducted for both a vanilla (standard) LevelDB and a
24 | version using the KFS protocol. In addition we ran the experiment using
25 | a hard disk drive (HDD) and solid state drive (SSD).
26 |
27 | ### Results
28 |
29 | An overview plot displaying the execution time by file size and operation for
30 | each trial indicates some difference between KFS and a vanilla LevelDB. At a
31 | high level it appears vanilla LevelDB had a higher variance across many
32 | categories. It is our belief that this variance is due to compaction triggering
33 | in LevelDB as the size of the single instance grows quickly.
34 |
35 | Since data is spread in a uniform fashion across a series of LevelDBs in KFS,
36 | this compaction triggering happens less frequently and has a much smaller
37 | impact.
38 |
39 | ---
40 |
41 | 
42 |
43 | 
44 |
45 | ---
46 |
47 | Upon closer inspection the data shows that in every category the mean execution
48 | time is lower for KFS for all categories. As for variance, the story is a bit more
49 | complicated. On SSD vanilla LevelDB has much greater variance than KFS for writes and
50 | unlinks but more consistent for reads. On HDD both KFS and vanilla show greater
51 | variance, but again KFS performs more consistently on writes and unlinks.
52 |
53 | ---
54 |
55 | Mean execution time comparison for SSD and HDD.
56 |
57 | 
58 |
59 | 
60 |
61 | Standard deviation execution time comparison for SSD and HDD.
62 |
63 | 
64 |
65 | 
66 |
67 |
68 | ---
69 |
70 | We ran two sided
71 | [significant tests](http://www.stat.yale.edu/Courses/1997-98/101/sigtest.htm)
72 | on each combination of operation and file size with a p-value cut-off at .05.
73 | For reads at 8, 16, 32, 64, 128 and 256 MiB file sizes, along with unlinks at 64 MiB we are
74 | unable to reject the null hypothesis. Or in other words, we are unable to suggest KFS
75 | performs better than a vanilla LevelDb in those scenarios. For the rest, we did achieve
76 | a 95% confidence level. This suggests that our measurements are not the
77 | result of a statistical fluke and KFS introduces meaningful change for those operations
78 | and file sizes. Please note that any confidence level of 100% is an artifact of rounding.
79 | In this scenario a p-value of 0 is theoretically impossible.
80 |
81 | ---
82 |
83 | 
84 |
85 | ### Conclusion
86 |
87 | While P-Values should not be followed blindly, the data does indicate that
88 | the KFS protocol gives statistically significant gains in speed and consistency.
89 |
90 | To reproduce the data generated for these tests:
91 |
92 | * Clone this git repository
93 | * Make sure you have Node.js and NPM installed
94 | * Run `npm install` from the project root directory
95 | * Run `npm run benchmark [iterations] [path_to_write_results]`
96 |
97 | You can set the path to the database to create using the `KFS_PERF_DIR`
98 | environment variable for testing on different types of drives. If no path to
99 | write results to is specified, they will be written to stdout.
100 |
101 | If you want to experiment with chunk size (discrete key/value pairs associated
102 | with a file), modify the `C` constant in `lib/constants.js`.
103 |
--------------------------------------------------------------------------------
/index.js:
--------------------------------------------------------------------------------
1 | /**
2 | * @module kfs
3 | */
4 |
5 | 'use strict';
6 |
7 | const Btable = require('./lib/b-table');
8 |
9 | /**
10 | * Returns a constructed {@link Btable}
11 | * @function
12 | * @param {string} path - Path to the KFS store
13 | * @param {object} [options] - {@link Btable} options
14 | */
15 | module.exports = (path, opts) => new Btable(path, opts);
16 |
17 | /** {@link Btable} */
18 | module.exports.Btable = Btable;
19 |
20 | /** {@link Sbucket} */
21 | module.exports.Sbucket = require('./lib/s-bucket');
22 |
23 | /** {@link BlockStream} */
24 | module.exports.BlockStream = require('./lib/block-stream');
25 |
26 | /** {@link ReadableFileStream} */
27 | module.exports.ReadableFileStream = require('./lib/read-stream');
28 |
29 | /** {@link WritableFileStream} */
30 | module.exports.WritableFileStream = require('./lib/write-stream');
31 |
32 | /** {@link module:kfs/constants} */
33 | module.exports.constants = require('./lib/constants');
34 |
35 | /** {@link module:kfs/utils} */
36 | module.exports.utils = require('./lib/utils');
37 |
--------------------------------------------------------------------------------
/lib/b-table.js:
--------------------------------------------------------------------------------
1 | 'use strict';
2 |
3 | const merge = require('merge');
4 | const {EventEmitter} = require('events');
5 | const fs = require('fs');
6 | const mkdirp = require('mkdirp');
7 | const utils = require('./utils');
8 | const constants = require('./constants');
9 | const Sbucket = require('./s-bucket');
10 | const path = require('path');
11 | const assert = require('assert');
12 | const async = require('async');
13 |
14 |
15 | /**
16 | * Represents the primary interface for the KFS file store
17 | */
18 | class Btable extends EventEmitter {
19 |
20 | static get RID_FILENAME() {
21 | return 'r.id';
22 | }
23 |
24 | static get DEFAULTS() {
25 | return {
26 | referenceId: null,
27 | maxTableSize: constants.S * constants.B,
28 | sBucketOpts: {}
29 | };
30 | }
31 |
32 | /**
33 | * Constructs series of {@link Sbucket}s composing a sharded table
34 | * @constructor
35 | * @param {String} tablePath - The path to the directory to store the table
36 | * @param {Object} [options]
37 | * @param {String} [options.referenceId] - R bit hex reference ID
38 | * @param {Number} [options.maxTableSize] - Max bytes to cap the database
39 | * @param {Object} [options.sBucketOpts] - Options to pass to Sbucket
40 | */
41 | constructor(tablePath, options) {
42 | super();
43 |
44 | this._options = merge(Btable.DEFAULTS, options);
45 | this._rid = utils.createReferenceId(this._options.referenceId);
46 | this._sBuckets = {};
47 | this._tablePath = utils.coerceTablePath(tablePath);
48 | this._maxTableSize = this._options.maxTableSize;
49 | this._options.sBucketOpts.maxSize = this._maxTableSize / constants.B;
50 |
51 | this._open();
52 | }
53 |
54 | /**
55 | * Opens the Btable, creating it if it does not exist
56 | * @private
57 | */
58 | _open() {
59 | if (!utils.fileDoesExist(this._tablePath)) {
60 | this._initBtableDirectory();
61 | } else {
62 | this._validateTablePath();
63 | }
64 |
65 | this._rid = Buffer(fs.readFileSync(
66 | path.join(this._tablePath, Btable.RID_FILENAME),
67 | { encoding: 'hex' }
68 | ), 'hex');
69 | }
70 |
71 | /**
72 | * Initializes a new KFS database (B-table directory)
73 | * @private
74 | */
75 | _initBtableDirectory() {
76 | mkdirp.sync(this._tablePath);
77 | fs.writeFileSync(
78 | path.join(this._tablePath, Btable.RID_FILENAME),
79 | this._rid,
80 | { encoding: 'hex' }
81 | );
82 | }
83 |
84 | /**
85 | * Validates a path to a directory as a KFS instance
86 | * @private
87 | */
88 | _validateTablePath() {
89 | const dirStats = fs.statSync(this._tablePath);
90 |
91 | assert(dirStats.isDirectory(), 'Table path is not a directory');
92 |
93 | const requiredPaths = [Btable.RID_FILENAME];
94 | const dirContents = fs.readdirSync(this._tablePath);
95 |
96 | for (let pathName of requiredPaths) {
97 | assert(dirContents.indexOf(pathName) !== -1,
98 | 'Table path is not a valid KFS instance');
99 | }
100 | }
101 |
102 | /**
103 | * Determine the {@link Sbucket} index for a given key
104 | * @private
105 | * @param {String} key - The data key to route
106 | * @returns {Number}
107 | */
108 | _getSbucketIndexForKey(key) {
109 | return this._rid[0] ^ Buffer(utils.hashKey(key), 'hex')[0];
110 | }
111 |
112 | /**
113 | * Get the {@link Sbucket} for the supplied index
114 | * @private
115 | * @param {Number} sBucketIndex - The index for the desired bucket
116 | * @returns {Sbucket}
117 | */
118 | _getSbucketAtIndex(sBucketIndex) {
119 | assert(sBucketIndex < constants.B, 'Index must not be greater than B');
120 | assert(sBucketIndex > -1, 'Index must be greater than or equal to 0');
121 |
122 | if (this._sBuckets[sBucketIndex]) {
123 | return this._sBuckets[sBucketIndex];
124 | }
125 |
126 | this._sBuckets[sBucketIndex] = new Sbucket(
127 | path.join(this._tablePath,
128 | utils.createSbucketNameFromIndex(sBucketIndex)),
129 | this._options.sBucketOpts
130 | );
131 |
132 | this._sBuckets[sBucketIndex].removeAllListeners('idle');
133 | this._sBuckets[sBucketIndex].once('idle', () => {
134 | this._sBuckets[sBucketIndex].close();
135 | });
136 |
137 | return this._sBuckets[sBucketIndex];
138 | }
139 |
140 | /**
141 | * Get the {@link Sbucket} for the given key
142 | * @private
143 | * @param {String} key - The key that maps to a {@link Sbucket}
144 | * @param {Btable~_getSbucketForKeyCallback}
145 | */
146 | _getSbucketForKey(key, callback) {
147 | const sIndex = typeof key === 'number'
148 | ? key
149 | : this._getSbucketIndexForKey(key);
150 | const sBucket = this._getSbucketAtIndex(sIndex);
151 |
152 | if (sBucket.readyState !== Sbucket.OPENED) {
153 | return sBucket.open((err) => {
154 | if (err) {
155 | return callback(err);
156 | }
157 |
158 | callback(null, sBucket, sIndex);
159 | });
160 | }
161 |
162 | callback(null, sBucket, sIndex);
163 | }
164 | /**
165 | * @private
166 | * @callback Btable~_getSbucketForKeyCallback
167 | * @param {Error} [error]
168 | * @param {Sbucket} sBucket
169 | */
170 |
171 | /**
172 | * Lists the created {@link Sbucket}s and their sizes
173 | * @param {String|Number} [keyOrIndex] - Optional bucket index or file key
174 | * @param {Btable~statCallback}
175 | */
176 | stat(keyOrIndex, callback) {
177 | const self = this;
178 |
179 | if (typeof keyOrIndex === 'function') {
180 | callback = keyOrIndex;
181 | keyOrIndex = null;
182 | }
183 |
184 | if (keyOrIndex) {
185 | return _getStat(keyOrIndex, (err, stats) => {
186 | callback(err, stats ? [stats] : undefined);
187 | });
188 | }
189 |
190 | let sBuckets = fs.readdirSync(this._tablePath)
191 | .filter((name) => name !== Btable.RID_FILENAME)
192 | .map((sBucketName) => parseInt(sBucketName))
193 | .filter((sBucketIndex) => {
194 | return !Number.isNaN(sBucketIndex) && typeof sBucketIndex === 'number';
195 | });
196 |
197 | function _getStat(sBucketIndex, done) {
198 | self._getSbucketForKey(sBucketIndex, (err, sBucket, sIndex) => {
199 | if (err) {
200 | return done(err);
201 | }
202 |
203 | sBucket.stat((err, stats) => {
204 | if (err) {
205 | return done(err);
206 | }
207 |
208 | done(null, {
209 | sBucketIndex: sIndex,
210 | sBucketStats: stats
211 | });
212 | });
213 | });
214 | }
215 |
216 | async.mapLimit(sBuckets, 3, _getStat, callback);
217 | }
218 | /**
219 | * @callback Btable~statCallback
220 | * @param {Error} [error]
221 | * @param {Object[]} sBuckets
222 | * @param {String} sBuckets[].sBucketIndex - The index of the S-bucket
223 | * @param {Object} sBuckets[].sBucketStats
224 | * @param {Number} sBuckets[].sBucketStats.used - Space used in the bucket
225 | * @param {Number} sBuckets[].sBucketStats.free - Space free in the bucket
226 | */
227 |
228 | /**
229 | * Lists the file keys in the given bucket
230 | * @param {Number|String} keyOrIndex - The bucket index of a file key
231 | * @param {Sbucket~listCallback}
232 | */
233 | list(keyOrIndex, callback) {
234 | var key = typeof keyOrIndex === 'number'
235 | ? keyOrIndex
236 | : utils.coerceKey(keyOrIndex);
237 |
238 | this._getSbucketForKey(key, (err, sBucket) => {
239 | if (err) {
240 | return callback(err);
241 | }
242 |
243 | sBucket.list(callback);
244 | });
245 | }
246 |
247 | /**
248 | * Check if a file exists at the supplied key
249 | * @param {String} key - The key to check for existence
250 | * @param {Sbucket~existsCallback}
251 | */
252 | exists(key, callback) {
253 | this._getSbucketForKey(key, (err, sBucket) => {
254 | if (err) {
255 | return callback(err);
256 | }
257 |
258 | sBucket.exists(key, callback);
259 | });
260 | }
261 |
262 | /**
263 | * Unlinks the data for the given key
264 | * @param {String} key - The key to unlink data from
265 | * @param {Sbucket~unlinkCallback}
266 | */
267 | unlink(key, callback) {
268 | this._getSbucketForKey(key, (err, sBucket) => {
269 | if (err) {
270 | return callback(err);
271 | }
272 |
273 | sBucket.unlink(key, callback);
274 | });
275 | }
276 |
277 | /**
278 | * Performs a flush on each S-bucket in the table to free any dead space
279 | * @param {Btable~flushCallback}
280 | */
281 | flush(callback) {
282 | async.eachSeries(Object.keys(this._sBuckets), (k, next) => {
283 | this._getSbucketForKey(parseInt(k), (err, sBucket) => {
284 | sBucket.flush(next);
285 | });
286 | }, callback);
287 | }
288 | /**
289 | * @callback Btable~flushCallback
290 | * @param {Error|null} error
291 | */
292 |
293 | /**
294 | * Reads the data at the supplied key into a buffer
295 | * @param {String} key - The key for the data to read
296 | * @param {Sbucket~readFileCallback}
297 | */
298 | readFile(key, callback) {
299 | this._getSbucketForKey(key, (err, sBucket) => {
300 | if (err) {
301 | return callback(err);
302 | }
303 |
304 | sBucket.readFile(key, callback);
305 | });
306 | }
307 |
308 | /**
309 | * Creates a readable stream of the data at the given key
310 | * @param {String} key - The key for the data read
311 | * @param {Btable~createReadStreamCallback}
312 | */
313 | createReadStream(key, callback) {
314 | this._getSbucketForKey(key, (err, sBucket) => {
315 | if (err) {
316 | return callback(err);
317 | }
318 |
319 | callback(null, sBucket.createReadStream(key));
320 | });
321 | }
322 | /**
323 | * @callback Btable~createReadStreamCallback
324 | * @param {Error} [error]
325 | * @param {ReadableStream} readStream
326 | */
327 |
328 | /**
329 | * Writes the given buffer to the key
330 | * @param {String} key - The key to write the data to
331 | * @param {Buffer} buffer - The raw buffer to write to the key
332 | * @param {Sbucket~writeFileCallback}
333 | */
334 | writeFile(key, buffer, callback) {
335 | this._getSbucketForKey(key, (err, sBucket) => {
336 | if (err) {
337 | return callback(err);
338 | }
339 |
340 | sBucket.writeFile(key, buffer, callback);
341 | });
342 | }
343 |
344 | /**
345 | * Creates a writable stream to the given key
346 | * @param {String} key - The key to write the data to
347 | * @param {Btable~createWriteStreamCallback}
348 | */
349 | createWriteStream(key, callback) {
350 | this._getSbucketForKey(key, (err, sBucket) => {
351 | if (err) {
352 | return callback(err);
353 | }
354 |
355 | callback(null, sBucket.createWriteStream(key));
356 | });
357 | }
358 | /**
359 | * @callback Btable~createWriteStreamCallback
360 | * @param {Error} [error]
361 | * @param {WritableStream} writeStream
362 | */
363 |
364 | }
365 |
366 | module.exports = Btable;
367 |
--------------------------------------------------------------------------------
/lib/block-stream.js:
--------------------------------------------------------------------------------
1 | 'use strict';
2 |
3 | const {Transform: TransformStream} = require('readable-stream');
4 | const merge = require('merge');
5 |
6 | /**
7 | * Transforms the input stream into an output stream of N-sized chunks
8 | */
9 | class BlockStream extends TransformStream {
10 |
11 | static get DEFAULTS() {
12 | return {
13 | padLastChunk: false
14 | };
15 | }
16 |
17 | /**
18 | * @constructor
19 | * @param {Object} [options]
20 | * @param {Sbucket} [options.sBucket] - The S-bucket for chunks allocation
21 | * @param {Boolean} [options.padLastChunk=false] - Pad last chunk with zeros
22 | */
23 | constructor(options) {
24 | super();
25 | options = merge(BlockStream.DEFAULTS, options);
26 | this._addPadding = options.padLastChunk;
27 | this._bufferLength = 0;
28 | this._offset = 0;
29 | this._inputQueue = [];
30 | this._sBucket = options.sBucket;
31 | }
32 |
33 | /**
34 | * Triggered when data is available
35 | * @event BlockStream#data
36 | * @param {Buffer} chunk
37 | */
38 |
39 | /**
40 | * Triggered when the stream is ended
41 | * @event BlockStream#end
42 | */
43 |
44 | /**
45 | * Implements the transform method
46 | * @private
47 | */
48 | _transform(bytes, encoding, callback) {
49 | this._addToBuffer(bytes);
50 | this._drainInternalBuffer();
51 | callback(null);
52 | }
53 |
54 | /**
55 | * Implements the flush method
56 | * @private
57 | */
58 | _flush(callback) {
59 | if(this._bufferLength === 0) {
60 | return callback(null);
61 | }
62 | const chunk = (this._addPadding &&
63 | this._sBucket._chunkSize !== this._bufferLength)
64 | ? ((this._sBucket._chunkFree.length > 0)
65 | ? this._sBucket._chunkFree.shift()
66 | : Buffer.allocUnsafe(this._sBucket._chunkSize))
67 | .fill(0, this._bufferLength)
68 | : Buffer.allocUnsafe(this._bufferLength);
69 |
70 | var i = 0;
71 | while(this._bufferLength > 0) {
72 | const input = this._inputQueue.shift();
73 | const k = (input.length - this._offset);
74 | input.copy(chunk, i, this._offset);
75 | this._offset = 0;
76 | i += k;
77 | this._bufferLength -= k;
78 | }
79 | this.push(chunk);
80 | this._sBucket._chunkFree.splice(0, this._sBucket._chunkFree.length);
81 | callback(null);
82 | }
83 |
84 | /**
85 | * Drains the internal buffer
86 | * @private
87 | */
88 | _drainInternalBuffer() {
89 | const self = this;
90 |
91 | function _transformChunk(chunk, j) {
92 | var i = 0;
93 | while (i < self._sBucket._chunkSize) {
94 | const input = self._inputQueue.shift();
95 | const k = (input.length - self._offset);
96 | if (j >= k) {
97 | input.copy(chunk, i, self._offset);
98 | self._offset = 0;
99 | i += k;
100 | j -= k;
101 | } else {
102 | input.copy(chunk, i, self._offset, self._offset + j);
103 | self._inputQueue.unshift(input);
104 | self._offset += j;
105 | i += j;
106 | }
107 | }
108 | }
109 |
110 | while (this._bufferLength >= this._sBucket._chunkSize) {
111 | const chunk = (this._sBucket._chunkFree.length > 0)
112 | ? this._sBucket._chunkFree.shift()
113 | : Buffer.allocUnsafe(this._sBucket._chunkSize);
114 | _transformChunk(chunk, this._sBucket._chunkSize);
115 | this.push(chunk);
116 | this._bufferLength -= this._sBucket._chunkSize;
117 | }
118 | }
119 |
120 | /**
121 | * Adds the bytes to the internal buffer
122 | * @private
123 | */
124 | _addToBuffer(bytes) {
125 | this._inputQueue.push(bytes);
126 | this._bufferLength += bytes.length;
127 | }
128 |
129 | }
130 |
131 | module.exports = BlockStream;
132 |
--------------------------------------------------------------------------------
/lib/constants.js:
--------------------------------------------------------------------------------
1 | /**
2 | * @module kfs/constants
3 | */
4 |
5 | 'use strict';
6 |
7 | module.exports = Object.freeze({
8 | /** @constant {Number} R - Number of bits in Reference ID */
9 | R: 160,
10 | /** @constant {Number} C - Number of bytes in a file chunk */
11 | C: 131072,
12 | /** @constant {Number} S - Number of bytes in a {@link Sbucket} */
13 | S: 32 * (1024 * 1024 * 1024),
14 | /** @constant {Number} B - Number of columns in a {@link Btable} */
15 | B: 256,
16 | /** @constant {String} HASH - OpenSSL id for key hashing algorithm */
17 | HASH: 'rmd160',
18 | /** @constant {Number} SBUCKET_IDLE - Time to wait before idle event */
19 | SBUCKET_IDLE: 60000
20 | });
21 |
--------------------------------------------------------------------------------
/lib/read-stream.js:
--------------------------------------------------------------------------------
1 | 'use strict';
2 |
3 | const {Readable: ReadableStream} = require('readable-stream').Readable;
4 | const utils = require('./utils');
5 |
6 | /**
7 | * Creates a readable stream of a file from a {@link Sbucket}
8 | */
9 | class ReadableFileStream extends ReadableStream {
10 |
11 | /**
12 | * @constructor
13 | * @param {Object} options
14 | * @param {Sbucket} options.sBucket
15 | * @param {String} options.fileKey
16 | */
17 | constructor(options) {
18 | super();
19 | this._sBucket = options.sBucket;
20 | this._fileKey = options.fileKey;
21 | this._index = 0;
22 | }
23 |
24 | /**
25 | * Triggered when data is available to read
26 | * @event ReadableFileStream#readable
27 | */
28 |
29 | /**
30 | * Triggered when a data is pushed through the stream
31 | * @event ReadableFileStream#data
32 | * @param {Buffer} bytes
33 | */
34 |
35 | /**
36 | * Triggered when no more data is available
37 | * @event ReadableFileStream#end
38 | */
39 |
40 | /**
41 | * Triggered if an error occurs
42 | * @event ReadableFileStream#error
43 | * @param {Error} error
44 | */
45 |
46 | /**
47 | * @private
48 | */
49 | _read() {
50 | const itemKey = utils.createItemKeyFromIndex(this._fileKey, this._index);
51 |
52 | this._sBucket._db.get(itemKey, (err, result) => {
53 | if (err) {
54 | if (utils.isNotFoundError(err)) {
55 | return this.push(null);
56 | } else {
57 | return this.emit('error', err);
58 | }
59 | }
60 |
61 | this._index++;
62 | this.push(Buffer(result, 'binary'));
63 | });
64 | }
65 |
66 | /**
67 | * Destroys and aborts any reads for this stream
68 | * @param {Sbucket~unlinkCallback}
69 | */
70 | destroy(callback) {
71 | this._sBucket.unlink(this._fileKey, callback);
72 | }
73 |
74 | }
75 |
76 | module.exports = ReadableFileStream;
77 |
--------------------------------------------------------------------------------
/lib/s-bucket.js:
--------------------------------------------------------------------------------
1 | 'use strict';
2 |
3 | const merge = require('merge');
4 | const leveldown = require('leveldown');
5 | const {EventEmitter} = require('events');
6 | const constants = require('./constants');
7 | const utils = require('./utils');
8 | const WritableFileStream = require('./write-stream');
9 | const ReadableFileStream = require('./read-stream');
10 | const BlockStream = require('./block-stream');
11 | const async = require('async');
12 |
13 | /**
14 | * Capped LevelDB database within a {@link Btable}
15 | */
16 | class Sbucket extends EventEmitter {
17 |
18 | static get CLOSED() {
19 | return 4;
20 | }
21 |
22 | static get CLOSING() {
23 | return 3;
24 | }
25 |
26 | static get OPENED() {
27 | return 2;
28 | }
29 |
30 | static get OPENING() {
31 | return 1;
32 | }
33 |
34 | static get SIZE_START_KEY() {
35 | return '0';
36 | }
37 |
38 | static get SIZE_END_KEY() {
39 | return 'z';
40 | }
41 |
42 | static get DEFAULTS() {
43 | return {
44 | maxOpenFiles: 1000,
45 | compression: false,
46 | cacheSize: 8 * (1024 * 1024),
47 | createIfMissing: true,
48 | errorIfExists: false,
49 | writeBufferSize: 4 * (1024 * 1024),
50 | blockSize: 4096,
51 | blockRestartInterval: 16,
52 | maxSize: constants.S,
53 | chunkSize: constants.C
54 | };
55 | }
56 |
57 | /**
58 | * @constructor
59 | * @param {String} dbPath - The path to database on disk
60 | * @param {Object} [options] - Options to pass through to leveldown#open
61 | * @param {Number} [options.maxOpenFiles=1000]
62 | * @param {Boolean} [options.compression=false]
63 | * @param {Number} [options.cacheSize=8388608]
64 | * @param {Boolean} [options.createIfMissing=true]
65 | * @param {Boolean} [options.errorIfExists=false]
66 | * @param {Number} [options.writeBufferSize=4194304]
67 | * @param {Number} [options.blockSize=4096]
68 | * @param {Number} [options.blockRestartInterval=16]
69 | */
70 | constructor(dbPath, options) {
71 | super();
72 | this.setMaxListeners(Infinity);
73 | this._dbPath = dbPath;
74 | this._options = merge(Sbucket.DEFAULTS, options);
75 | this._db = leveldown(dbPath);
76 | this._pendingOperations = 0;
77 | this._maxSize = this._options.maxSize;
78 | this.readyState = Sbucket.CLOSED;
79 | this._chunkFree = [];
80 | this._chunkSize = this._options.chunkSize;
81 | }
82 |
83 | /**
84 | * Triggered when the underlying database opens
85 | * @event Sbucket#open
86 | */
87 |
88 | /**
89 | * Triggered when the underlying database closes
90 | * @event Sbucket#close
91 | */
92 |
93 | /**
94 | * Triggered when there are no more pending operations
95 | * @event Sbucket#idle
96 | */
97 |
98 | /**
99 | * Triggered when the bucket is locked for flushing
100 | * @event Sbucket#locked
101 | */
102 |
103 | /**
104 | * Triggered when the bucket is unlocked
105 | * @event Sbucket#unlocked
106 | */
107 |
108 | /**
109 | * Opens the underlying database
110 | * @fires Sbucket#open
111 | * @param {Sbucket~openCallback}
112 | */
113 | open(callback=utils.noop) {
114 | const self = this;
115 |
116 | function _open() {
117 | self.readyState = Sbucket.OPENING;
118 | self._db.open(self._options, function(err) {
119 | if (err) {
120 | return self.emit('error', err);
121 | }
122 |
123 | self.readyState = Sbucket.OPENED;
124 | self.emit('open');
125 | self._idleCheckInterval = setInterval(
126 | () => self._checkIdleState(),
127 | constants.SBUCKET_IDLE
128 | );
129 | });
130 | }
131 |
132 | function _onError(err) {
133 | self.removeListener('open', _onOpen);
134 | callback(err);
135 | }
136 |
137 | function _onOpen() {
138 | self.removeListener('error', _onError);
139 | callback(null);
140 | }
141 |
142 | this.once('open', _onOpen).once('error', _onError);
143 |
144 | if (this.readyState === Sbucket.OPENED) {
145 | return this.emit('open');
146 | }
147 |
148 | if (this.readyState === Sbucket.OPENING) {
149 | return;
150 | }
151 |
152 | if (this.readyState === Sbucket.CLOSING) {
153 | return this.once('close', _open);
154 | }
155 |
156 | _open();
157 | }
158 | /**
159 | * @callback Sbucket~openCallback
160 | * @param {Error} [error]
161 | */
162 |
163 | /**
164 | * Closes the underlying database
165 | * @fires Sbucket#close
166 | * @param {Sbucket~closeCallback}
167 | */
168 | close(callback=utils.noop) {
169 | const self = this;
170 |
171 | function _close() {
172 | self.readyState = Sbucket.CLOSING;
173 | self._db.close(function(err) {
174 | if (err) {
175 | return self.emit('error', err);
176 | }
177 |
178 | self.readyState = Sbucket.CLOSED;
179 | self.emit('close');
180 | clearInterval(self._idleCheckInterval);
181 | });
182 | }
183 |
184 | function _onError(err) {
185 | self.removeListener('close', _onClose);
186 | callback(err);
187 | }
188 |
189 | function _onClose() {
190 | self.removeListener('error', _onError);
191 | callback(null);
192 | }
193 |
194 | this.once('close', _onClose).once('error', _onError);
195 |
196 | if (this.readyState === Sbucket.CLOSED) {
197 | return this.emit('close');
198 | }
199 |
200 | if (this.readyState === Sbucket.CLOSING) {
201 | return;
202 | }
203 |
204 | if (this.readyState === Sbucket.OPENING) {
205 | return this.once('open', _close);
206 | }
207 |
208 | _close();
209 | }
210 | /**
211 | * @callback Sbucket~closeCallback
212 | * @param {Error} [error]
213 | */
214 |
215 | /**
216 | * Determines if the file is already stored in the db
217 | * @param {String} key - The key for the file stored
218 | * @param {Sbucket~existsCallback}
219 | */
220 | exists(key, callback) {
221 | this._incPendingOps();
222 | this._db.get(utils.createItemKeyFromIndex(key, 0), (err) => {
223 | this._decPendingOps();
224 | callback(null, !err);
225 | });
226 | }
227 | /**
228 | * @callback Sbucket~existsCallback
229 | * @param {Error} [error]
230 | * @param {Boolean} fileDoesExist
231 | */
232 |
233 | /**
234 | * Deletes the file chunks from the database
235 | * @param {String} key - The key for the file stored
236 | * @param {Sbucket~unlinkCallback}
237 | */
238 | unlink(key, callback) {
239 | const self = this;
240 | let index = 0;
241 |
242 | function _del(index, callback) {
243 | const itemKey = utils.createItemKeyFromIndex(key, index);
244 |
245 | self._db.get(itemKey, function(err) {
246 | index++;
247 |
248 | if (!err) {
249 | self._db.del(itemKey, () => _del(index, callback));
250 | } else if (utils.isNotFoundError(err)) {
251 | self._decPendingOps();
252 | callback(null);
253 | } else {
254 | self._decPendingOps();
255 | callback(err);
256 | }
257 | });
258 | }
259 |
260 | this._incPendingOps();
261 | _del(index, callback);
262 | }
263 | /**
264 | * @callback Sbucket~unlinkCallback
265 | * @param {Error} [error]
266 | */
267 |
268 | /**
269 | * Reads the file at the given key into a buffer
270 | * @param {String} key - The key for the file to read
271 | * @param {Sbucket~readFileCallback}
272 | */
273 | readFile(key, callback) {
274 | let fileBuffer = Buffer.from([], 'binary');
275 | const readStream = this.createReadStream(key);
276 |
277 | readStream.on('data', (data) => {
278 | fileBuffer = Buffer.concat([fileBuffer, data]);
279 | });
280 |
281 | readStream.on('end', () => {
282 | this._decPendingOps();
283 | callback(null, fileBuffer);
284 | });
285 |
286 | readStream.on('error', (err) => {
287 | this._decPendingOps();
288 | readStream.removeAllListeners();
289 | callback(err);
290 | });
291 |
292 | this._incPendingOps();
293 | }
294 | /**
295 | * @callback Sbucket~readFileCallback
296 | * @param {Error} [error]
297 | * @param {Buffer} fileBuffer
298 | */
299 |
300 | /**
301 | * Writes the buffer to the given key
302 | * @param {String} key - The key for the file to write
303 | * @param {Buffer} buffer - The data to write to the given key
304 | * @param {Sbucket~writeFileCallback}
305 | */
306 | writeFile(key, buffer, callback) {
307 | const self = this;
308 | const writeStream = this.createWriteStream(key);
309 | let whichSlice = 0;
310 |
311 | function _writeFileSlice() {
312 | var startIndex = whichSlice * self._options.chunkSize;
313 | var endIndex = startIndex + self._options.chunkSize;
314 | var bufferSlice = buffer.slice(startIndex, endIndex);
315 |
316 | if (bufferSlice.length === 0) {
317 | return writeStream.end();
318 | }
319 |
320 | whichSlice++;
321 | writeStream.write(bufferSlice);
322 | _writeFileSlice();
323 | }
324 |
325 | writeStream.on('finish', () => {
326 | this._decPendingOps();
327 | callback(null);
328 | });
329 |
330 | writeStream.on('error', (err) => {
331 | this._decPendingOps();
332 | writeStream.removeAllListeners();
333 | callback(err);
334 | });
335 |
336 | this._incPendingOps();
337 | this.unlink(key, _writeFileSlice);
338 | }
339 | /**
340 | * @callback Sbucket~writeFileCallback
341 | * @param {Error} [error]
342 | */
343 |
344 | /**
345 | * Returns a readable stream of the file at the given key
346 | * @param {String} key - The key for the file to read
347 | * @returns {ReadableFileStream}
348 | */
349 | createReadStream(key) {
350 | const rs = new ReadableFileStream({
351 | sBucket: this,
352 | fileKey: key
353 | });
354 |
355 | this._incPendingOps();
356 | rs.on('end', () => this._decPendingOps());
357 |
358 | return rs;
359 | }
360 |
361 | /**
362 | * Returns a writable stream for a file at the given key
363 | * @param {String} key - The key for the file to read
364 | * @returns {WritableFileStream}
365 | */
366 | createWriteStream(key) {
367 | const bs = new BlockStream({
368 | padLastChunk: false,
369 | sBucket: this
370 | });
371 | const ws = new WritableFileStream({
372 | sBucket: this,
373 | fileKey: key
374 | });
375 |
376 | // NB: Expose the underyling writable stream's #destroy method
377 | bs.destroy = (cb) => ws.destroy(cb);
378 |
379 | this._incPendingOps();
380 | bs.pipe(ws).on('finish', () => this._decPendingOps());
381 |
382 | return bs;
383 | }
384 |
385 | /**
386 | * Get stats for this bucket
387 | * @param {Sbucket~statCallback}
388 | */
389 | stat(callback) {
390 | const [start, end] = [Sbucket.SIZE_START_KEY, Sbucket.SIZE_END_KEY];
391 |
392 | this._incPendingOps();
393 | this._db.approximateSize(start, end, (err, size) => {
394 | this._decPendingOps();
395 |
396 | if (err) {
397 | return callback(err);
398 | }
399 |
400 | callback(null, {
401 | size: size,
402 | free: this._maxSize - size
403 | });
404 | });
405 | }
406 | /**
407 | * @callback Sbucket~statCallback
408 | * @param {Error} [error]
409 | * @param {Object} bucketStats
410 | * @param {Number} bucketStats.size - The used space in bytes
411 | * @param {Number} bucketStats.free - The free space left in bytes
412 | */
413 |
414 | /**
415 | * Get a list of file keys in the bucket and their approximate size
416 | * @param {Sbucket~listCallback}
417 | */
418 | list(callback) {
419 | const self = this;
420 | const iterator = this._db.iterator({
421 | gte: Sbucket.SIZE_START_KEY,
422 | lte: Sbucket.SIZE_END_KEY,
423 | values: false,
424 | keyAsBuffer: false
425 | });
426 | const keys = {};
427 | let currentResult = null;
428 |
429 | function _test() {
430 | return currentResult === null;
431 | }
432 |
433 | function _accumulateKey(next) {
434 | iterator.next((err, key) => {
435 | if (err) {
436 | return next(err);
437 | }
438 |
439 | if (!key) {
440 | currentResult = null;
441 | return next();
442 | }
443 |
444 | currentResult = key.split(' ')[0];
445 | keys[currentResult] = keys[currentResult]
446 | ? keys[currentResult] + self._options.chunkSize
447 | : self._options.chunkSize;
448 | next();
449 | });
450 | }
451 |
452 | this._incPendingOps();
453 | async.doUntil(_accumulateKey, _test, (err) => {
454 | this._decPendingOps();
455 |
456 | if (err) {
457 | return callback(err);
458 | }
459 |
460 | var results = [];
461 |
462 | for (var key in keys) {
463 | results.push({
464 | baseKey: key,
465 | approximateSize: keys[key]
466 | });
467 | }
468 |
469 | callback(null, results);
470 | });
471 | }
472 | /**
473 | * @callback Sbucket~listCallback
474 | * @param {Error} [error]
475 | * @param {Object[]} results
476 | * @param {String} results.baseKey
477 | * @param {Number} results.approximateSize
478 | */
479 |
480 | /**
481 | * Trigger a compaction for the S-bucket
482 | * @param {Sbucket~flushCallback}
483 | */
484 | flush(callback) {
485 | this._db.compactRange(Sbucket.SIZE_START_KEY, Sbucket.SIZE_END_KEY,
486 | callback);
487 | }
488 | /**
489 | * @callback Sbucket~flushCallback
490 | * @param {Error|null} error
491 | */
492 |
493 | /**
494 | * Increments the pending operations counter
495 | * @private
496 | */
497 | _incPendingOps() {
498 | this._pendingOperations++;
499 | }
500 |
501 | /**
502 | * Decrements the pending operations counter
503 | * @private
504 | * @fires Sbucket#idle
505 | */
506 | _decPendingOps() {
507 | this._pendingOperations--;
508 | setImmediate(() => this._checkIdleState());
509 | }
510 |
511 | /**
512 | * Emits the idle event if state is idle
513 | * @private
514 | */
515 | _emitIfStateIsIdle() {
516 | if (this._pendingOperations === 0) {
517 | this.emit('idle');
518 | return true;
519 | }
520 |
521 | return false;
522 | }
523 |
524 | /**
525 | * Checks the idle state and triggers a timeout for emitting the idle event
526 | * @private
527 | * @returns {Boolean} hasNoPendingOperations
528 | */
529 | _checkIdleState() {
530 | if (this._pendingOperations !== 0) {
531 | return false;
532 | }
533 |
534 | setTimeout(() => this._emitIfStateIsIdle(), constants.SBUCKET_IDLE);
535 | return true;
536 | }
537 |
538 | }
539 |
540 | module.exports = Sbucket;
541 |
--------------------------------------------------------------------------------
/lib/utils.js:
--------------------------------------------------------------------------------
1 | /**
2 | * @module kfs/utils
3 | */
4 |
5 | 'use strict';
6 |
7 | const assert = require('assert');
8 | const constants = require('./constants');
9 | const fs = require('fs');
10 | const crypto = require('crypto');
11 | const path = require('path');
12 |
13 | /**
14 | * A stubbed noop function
15 | */
16 | module.exports.noop = function() {};
17 |
18 | /**
19 | * Tests if the string is a valid key
20 | * @param {String} key - The file key
21 | * @returns {Boolean}
22 | */
23 | module.exports.isValidKey = function(key) {
24 | let keyBuffer;
25 |
26 | try {
27 | keyBuffer = Buffer(key, 'hex');
28 | } catch (err) {
29 | return false;
30 | }
31 |
32 | return keyBuffer.length === (constants.R / 8);
33 | };
34 |
35 | /**
36 | * Hashes the given key
37 | * @param {String} key - The file key
38 | * @returns {String}
39 | */
40 | module.exports.hashKey = function(key) {
41 | if (module.exports.isValidKey(key)) {
42 | return key;
43 | }
44 |
45 | return crypto.createHash(constants.HASH).update(key).digest('hex');
46 | };
47 |
48 | /**
49 | * Coerces input into a valid file key
50 | * @param {String} key - The file key
51 | * @returns {String}
52 | */
53 | module.exports.coerceKey = function(key) {
54 | if (!module.exports.isValidKey(key)) {
55 | return module.exports.hashKey(key);
56 | }
57 |
58 | return key;
59 | };
60 |
61 | /**
62 | * Get the key name for a data hash + index
63 | * @param {String} key - Hash of the data
64 | * @param {Number} index - The index of the file chunk
65 | * @returns {String}
66 | */
67 | module.exports.createItemKeyFromIndex = function(key, index) {
68 | assert(typeof index === 'number', 'Invalid index supplied');
69 |
70 | const fileKey = module.exports.hashKey(key);
71 | const indexLength = Math.floor(constants.S / constants.C).toString().length;
72 | const indexString = index.toString();
73 |
74 | let itemIndex = '';
75 |
76 | assert(Buffer(fileKey, 'hex').length * 8 === constants.R, 'Invalid key');
77 | assert(indexString.length <= indexLength, 'Index is out of bounds');
78 |
79 | for (var i = 0; i < indexLength - indexString.length; i++) {
80 | itemIndex += '0';
81 | }
82 |
83 | itemIndex += indexString;
84 |
85 | return `${fileKey} ${itemIndex}`;
86 | };
87 |
88 | /**
89 | * Get the file name of an s bucket based on it's index
90 | * @param {Number} sBucketIndex - The index fo the bucket in the B-table
91 | * @returns {String}
92 | */
93 | module.exports.createSbucketNameFromIndex = function(sBucketIndex) {
94 | assert(typeof sBucketIndex === 'number', 'Invalid index supplied');
95 |
96 | const indexLength = constants.B.toString().length;
97 | const indexString = sBucketIndex.toString();
98 |
99 | let leadingZeroes = '';
100 |
101 | for (var i = 0; i < indexLength - indexString.length; i++) {
102 | leadingZeroes += '0';
103 | }
104 |
105 | return `${leadingZeroes}${indexString}.s`;
106 | };
107 |
108 | /**
109 | * Creates a random reference ID
110 | * @param {String} [rid] - An existing hex reference ID
111 | * @returns {String}
112 | */
113 | module.exports.createReferenceId = function(rid) {
114 | if (!rid) {
115 | rid = crypto.randomBytes(constants.R / 8).toString('hex');
116 | }
117 |
118 | assert(rid.length === 40, 'Invalid reference ID length');
119 |
120 | return Buffer.from(rid, 'hex');
121 | };
122 |
123 | /**
124 | * Check if the given path exists
125 | * @param {String} filePath
126 | * @returns {Boolean}
127 | */
128 | module.exports.fileDoesExist = function(filePath) {
129 | try {
130 | fs.statSync(filePath);
131 | } catch (err) {
132 | return false;
133 | }
134 |
135 | return true;
136 | };
137 |
138 | /**
139 | * Takes a number of bytes and outputs a human readable size
140 | * @param {Number} bytes - The number of bytes to make readable
141 | * @returns {String}
142 | */
143 | module.exports.toHumanReadableSize = function(bytes) {
144 | const thresh = 1024;
145 |
146 | if (Math.abs(bytes) < thresh) {
147 | return bytes + ' B';
148 | }
149 |
150 | const units = ['KiB','MiB','GiB','TiB','PiB','EiB','ZiB','YiB'];
151 | let u = -1;
152 |
153 | do {
154 | bytes /= thresh;
155 | ++u;
156 | } while (Math.abs(bytes) >= thresh && u < units.length - 1);
157 |
158 | return `${bytes.toFixed(1)} ${units[u]}`;
159 | };
160 |
161 | /**
162 | * Ensures that the given path has a kfs extension
163 | * @param {String} tablePath - The path name to a kfs instance
164 | * @returns {String}
165 | */
166 | module.exports.coerceTablePath = function(tablePath) {
167 | if (path.extname(tablePath) !== '.kfs') {
168 | return `${tablePath}.kfs`;
169 | }
170 |
171 | return tablePath;
172 | };
173 |
174 | /**
175 | * Determines if the passed error object is a NotFound error
176 | * @param {Error} error
177 | * @returns {Boolean}
178 | */
179 | module.exports.isNotFoundError = function(error) {
180 | return error && error.message.indexOf('NotFound:') !== -1;
181 | };
182 |
--------------------------------------------------------------------------------
/lib/write-stream.js:
--------------------------------------------------------------------------------
1 | 'use strict';
2 |
3 | const {Writable: WritableStream} = require('readable-stream');
4 | const utils = require('./utils');
5 |
6 | /**
7 | * Creates a writable stream for storing a file in an {@link Sbucket}
8 | */
9 | class WritableFileStream extends WritableStream {
10 |
11 | /**
12 | * @constructor
13 | * @param {Object} options
14 | * @param {Sbucket} options.sBucket - The S-bucket this stream will write to
15 | * @param {String} options.fileKey - The key for the file to write to
16 | */
17 | constructor(options) {
18 | super();
19 | this._sBucket = options.sBucket;
20 | this._fileKey = options.fileKey;
21 | this._index = 0;
22 | }
23 |
24 | /**
25 | * Triggered if an error occurs
26 | * @event WritableFileStream#error
27 | * @param {Error} error
28 | */
29 |
30 | /**
31 | * Triggered when data is finished writing
32 | * @event WritableFileStream#finish
33 | */
34 |
35 | /**
36 | * @private
37 | */
38 | _write(bytes, encoding, callback) {
39 | const itemKey = utils.createItemKeyFromIndex(this._fileKey, this._index);
40 |
41 | this._sBucket._db.put(itemKey, bytes, (err) => {
42 |
43 | if(bytes.length === this._sBucket._chunkSize) {
44 | this._sBucket._chunkFree.push(bytes);
45 | }
46 |
47 | if (err) {
48 | return callback(err);
49 | }
50 |
51 | this._index++;
52 | callback();
53 | });
54 | }
55 |
56 | /**
57 | * Destroys and aborts any writes for this stream
58 | * @param {Sbucket~unlinkCallback}
59 | */
60 | destroy(callback) {
61 | this._sBucket.unlink(this._fileKey, callback);
62 | }
63 |
64 | }
65 |
66 | module.exports = WritableFileStream;
67 |
--------------------------------------------------------------------------------
/package.json:
--------------------------------------------------------------------------------
1 | {
2 | "name": "kfs",
3 | "version": "4.0.0",
4 | "description": "a kademlia inspired local file store based on leveldb ",
5 | "main": "index.js",
6 | "bin": {
7 | "kfs": "bin/kfs.js"
8 | },
9 | "scripts": {
10 | "test": "npm run testsuite && npm run linter",
11 | "testsuite": "mocha test/** --recursive",
12 | "linter": "jshint --config .jshintrc ./index.js ./lib ./test",
13 | "coverage": "istanbul cover _mocha -- --recursive",
14 | "generate-docs": "mkdir -p ./jsdoc && rm -r ./jsdoc && jsdoc index.js lib -r -R README.md -u ./doc -c .jsdoc.json --verbose -d ./jsdoc && mkdir -p jsdoc/doc/img && cp -r doc/img/* jsdoc/doc/img",
15 | "deploy-docs": "gh-pages -d jsdoc --repo git@github.com:Storj/kfs.git",
16 | "benchmark": "node perf/index.js exec"
17 | },
18 | "repository": {
19 | "type": "git",
20 | "url": "git+https://github.com/storj/kfs.git"
21 | },
22 | "keywords": [
23 | "kad",
24 | "kademlia",
25 | "leveldb",
26 | "levelup",
27 | "leveldown",
28 | "file",
29 | "store",
30 | "system"
31 | ],
32 | "author": "Gordon Hall ",
33 | "license": "GPL-3.0",
34 | "bugs": {
35 | "url": "https://github.com/storj/kfs/issues"
36 | },
37 | "homepage": "https://github.com/storj/kfs#readme",
38 | "dependencies": {
39 | "async": "^2.5.0",
40 | "commander": "^2.9.0",
41 | "leveldown": "^2.0.0",
42 | "merge": "^1.2.0",
43 | "mkdirp": "^0.5.1",
44 | "readable-stream": "^2.3.3"
45 | },
46 | "devDependencies": {
47 | "chai": "^3.5.0",
48 | "coveralls": "^2.11.12",
49 | "gh-pages": "^0.11.0",
50 | "ink-docstrap": "bookchin/docstrap",
51 | "istanbul": "^0.4.5",
52 | "jsdoc": "^3.4.0",
53 | "jshint": "^2.9.3",
54 | "memdown": "^1.2.0",
55 | "mocha": "^3.0.2",
56 | "noisegen": "^1.0.0",
57 | "proxyquire": "^1.7.10",
58 | "rimraf": "^2.5.4",
59 | "sinon": "^1.17.5"
60 | }
61 | }
62 |
--------------------------------------------------------------------------------
/perf/index.js:
--------------------------------------------------------------------------------
1 | 'use strict';
2 |
3 | var os = require('os');
4 | var path = require('path');
5 | var mkdirp = require('mkdirp');
6 | var rimraf = require('rimraf');
7 | var async = require('async');
8 | var kfs = require('..');
9 | var fs = require('fs');
10 |
11 | var runWriteSpeedBenchmark = require('./write-speed');
12 | var runReadSpeedBenchmark = require('./read-speed');
13 | var runUnlinkSpeedBenchmark = require('./unlink-speed');
14 |
15 | /**
16 | * Runs the performance benchmarks and passes results to callback
17 | * @param {Object} options
18 | * @param {String} options.tmpPath - Path to create the sandbox
19 | * @param {String} options.tablePath - Path to create the database
20 | * @param {Btable} options.bTable - The {@link Btable} instance to use
21 | * @param {Function} callback
22 | */
23 | module.exports = function(options, callback) {
24 | async.waterfall([
25 | runWriteSpeedBenchmark.bind(null, options),
26 | runReadSpeedBenchmark.bind(null, options),
27 | runUnlinkSpeedBenchmark.bind(null, options)
28 | ], callback);
29 | };
30 |
31 | /**
32 | * Formats the results into a human readable string
33 | * @param {Object} writeResults
34 | * @param {Object} readResults
35 | * @param {Object} unlinkResults
36 | */
37 | module.exports.formatResults = function(wRes, rRes, uRes, fRes) {
38 | return {
39 | writes: wRes,
40 | reads: rRes,
41 | unlinks: uRes,
42 | flush: fRes
43 | };
44 | };
45 |
46 | // NB: If we are running this as a script, go ahead and execute and print out
47 | if (process.argv[2] === 'exec') {
48 | var testsRun = 0;
49 | var tests = parseInt(process.argv[3]) || 1;
50 | var resultsOut = process.argv[4];
51 | var referenceId = kfs.utils.createReferenceId().toString('hex');
52 |
53 | var TMP_PATH = path.join(
54 | process.env.KFS_PERF_DIR || os.tmpdir(),
55 | 'KFS_PERF_SANDBOX'
56 | );
57 | var TABLE_PATH = path.join(TMP_PATH, Date.now().toString());
58 |
59 | if (kfs.utils.fileDoesExist(TMP_PATH)) {
60 | rimraf.sync(TMP_PATH);
61 | }
62 |
63 | mkdirp.sync(TABLE_PATH);
64 |
65 | var bTable = kfs(TABLE_PATH, { referenceId: referenceId });
66 | var results = [];
67 |
68 | function runBenchmarkTests() {
69 | console.log('Running test %s', testsRun + 1);
70 |
71 | module.exports({
72 | tmpPath: TMP_PATH,
73 | tablePath: TABLE_PATH,
74 | bTable: bTable
75 | }, function(err, wResults, rResults, uResults, fResults) {
76 | if (err) {
77 | return console.error('Error running benchmarks:', err);
78 | }
79 |
80 | testsRun++;
81 |
82 | results.push(module.exports.formatResults(
83 | wResults,
84 | rResults,
85 | uResults,
86 | fResults
87 | ));
88 |
89 | if (testsRun < tests) {
90 | return runBenchmarkTests();
91 | }
92 |
93 | console.log('Cleaning test environment...')
94 | rimraf.sync(TMP_PATH);
95 |
96 | if (resultsOut) {
97 | fs.writeFileSync(resultsOut, JSON.stringify(results));
98 | console.info('Results written to %s', resultsOut);
99 | } else {
100 | console.info(require('util').inspect(results, { depth: null }));
101 | }
102 | });
103 | }
104 |
105 | runBenchmarkTests();
106 | }
107 |
--------------------------------------------------------------------------------
/perf/read-speed.js:
--------------------------------------------------------------------------------
1 | 'use strict';
2 |
3 | var kfs = require('..');
4 | var async = require('async');
5 |
6 | module.exports = function(options, wResults, callback) {
7 | console.log('Starting read tests from previously written data...');
8 |
9 | var results = [];
10 | var database = options.bTable;
11 |
12 | async.eachOfSeries(wResults, function(writeResultItem, i, next) {
13 | database.createReadStream(writeResultItem.fileKey, function(err, stream) {
14 | if (err) {
15 | return next(err);
16 | }
17 |
18 | var bytesRead = 0;
19 | var time = 0;
20 | var timer = setInterval(function() { time += 10; }, 10);
21 |
22 | stream.on('error', function(err) {
23 | next(err);
24 | });
25 |
26 | stream.on('data', function(chunk) {
27 | bytesRead += chunk.length;
28 | });
29 |
30 | stream.on('end', function() {
31 | clearInterval(timer);
32 | results.push({
33 | msElapsed: time,
34 | fileKey: writeResultItem.fileKey,
35 | sBucketIndex: writeResultItem.sBucketIndex,
36 | fileSizeBytes: bytesRead
37 | });
38 | next();
39 | });
40 | });
41 | }, function(err) {
42 | if (err) {
43 | return callback(err);
44 | }
45 |
46 | callback(null, wResults, results);
47 | });
48 | };
49 |
--------------------------------------------------------------------------------
/perf/unlink-speed.js:
--------------------------------------------------------------------------------
1 | 'use strict';
2 |
3 | var kfs = require('..');
4 | var async = require('async');
5 |
6 | module.exports = function(options, wResults, rResults, callback) {
7 | console.log('Unlinking (deleting) data written to database...');
8 |
9 | var uResults = [];
10 | var database = options.bTable;
11 | var totalSizeFlushed = 0;
12 |
13 | async.eachOfSeries(rResults, function(readResultItem, i, next) {
14 | var time = 0;
15 | var timer = setInterval(function() { time += 10 }, 10);
16 |
17 | totalSizeFlushed += readResultItem.fileSizeBytes;
18 |
19 | database.unlink(readResultItem.fileKey, function(err) {
20 | if (err) {
21 | return next(err);
22 | }
23 |
24 | clearInterval(timer);
25 | uResults.push({
26 | msElapsed: time,
27 | fileKey: readResultItem.fileKey,
28 | sBucketIndex: readResultItem.sBucketIndex,
29 | fileSizeBytes: readResultItem.fileSizeBytes
30 | });
31 | next();
32 | });
33 | }, function(err) {
34 | if (err) {
35 | return callback(err);
36 | }
37 |
38 | var time = 0;
39 | var timer = setInterval(function() { time += 10 }, 10);
40 |
41 | console.log('Flushing (compacting) data unlinked from database...');
42 | database.flush(function(err) {
43 | var fResults = {
44 | msElapsed: time,
45 | bytesFlushed: totalSizeFlushed
46 | };
47 |
48 | callback(err, wResults, rResults, uResults, fResults);
49 | });
50 | });
51 | };
52 |
--------------------------------------------------------------------------------
/perf/write-speed.js:
--------------------------------------------------------------------------------
1 | 'use strict';
2 |
3 | var kfs = require('..');
4 | var noisegen = require('noisegen');
5 | var fs = require('fs');
6 | var async = require('async');
7 | var path = require('path')
8 |
9 | module.exports = function(options, callback) {
10 | console.log('Generating some random files, hold on...');
11 |
12 | var results = [];
13 | var database = options.bTable;
14 | var index = 0;
15 |
16 | async.eachSeries([
17 | 8 * (1024 * 1024),
18 | 16 * (1024 * 1024),
19 | 32 * (1024 * 1024),
20 | 64 * (1024 * 1024),
21 | 128 * (1024 * 1024),
22 | 256 * (1024 * 1024),
23 | 512 * (1024 * 1024)
24 | ], function(numBytes, next) {
25 | console.log('Preparing %s byte file...', numBytes);
26 | var noise = noisegen({ length: numBytes });
27 | var testPath = path.join(options.tmpPath, index.toString() + '.dat');
28 |
29 | index++;
30 |
31 | if (!kfs.utils.fileDoesExist(testPath)) {
32 | var file = fs.createWriteStream(testPath);
33 | noise.pipe(file).on('error', next).on('finish', next);
34 | } else {
35 | next();
36 | }
37 | }, function(err) {
38 | console.log('Test files prepared, writing to KFS...')
39 |
40 | async.eachSeries([
41 | '0.dat',
42 | '1.dat',
43 | '2.dat',
44 | '3.dat',
45 | '4.dat',
46 | '5.dat',
47 | '6.dat'
48 | ], function(testFileName, next) {
49 | var time = 0;
50 | var timer = setInterval(function() { time += 10 }, 10);
51 | var key = kfs.utils.createReferenceId().toString('hex');
52 | var pathToTestFile = path.join(options.tmpPath, testFileName);
53 |
54 | database.createWriteStream(key, function(err, writeStream) {
55 | if (err) {
56 | return callback(err);
57 | }
58 |
59 | fs.createReadStream(
60 | pathToTestFile
61 | ).pipe(writeStream).on('error', function(err) {
62 | clearInterval(timer);
63 | next(err);
64 | }).on('finish', function() {
65 | clearInterval(timer);
66 | results.push({
67 | msElapsed: time,
68 | fileKey: key,
69 | sBucketIndex: database._getSbucketIndexForKey(key),
70 | fileSizeBytes: fs.statSync(pathToTestFile).size
71 | });
72 | next();
73 | });
74 | });
75 | }, function(err) {
76 | if (err) {
77 | return callback(err);
78 | }
79 |
80 | callback(null, results);
81 | });
82 | });
83 | };
84 |
--------------------------------------------------------------------------------
/test/b-table.integration.js:
--------------------------------------------------------------------------------
1 | 'use strict';
2 |
3 | var os = require('os');
4 | var path = require('path');
5 | var utils = require('../lib/utils');
6 | var Btable = require('../lib/b-table');
7 | var expect = require('chai').expect;
8 | var rimraf = require('rimraf');
9 | var mkdirp = require('mkdirp');
10 | var crypto = require('crypto');
11 | var sinon = require('sinon');
12 |
13 | describe('Btable/Integration', function() {
14 |
15 | var TMP_DIR = path.join(os.tmpdir(), 'KFS_SANDBOX');
16 | var TABLE_PATH = path.join(TMP_DIR, 'testdb-btable-integration');
17 | var db = null;
18 |
19 | before(function(done) {
20 | if (utils.fileDoesExist(TMP_DIR)) {
21 | rimraf.sync(TMP_DIR);
22 | }
23 | mkdirp(TMP_DIR, function() {
24 | db = new Btable(TABLE_PATH);
25 | done();
26 | });
27 | });
28 |
29 | describe('#writeFile', function() {
30 |
31 | it('should write the file to the database', function(done) {
32 | var fileData = new Buffer('hello kfs!');
33 | var fileHash = crypto.createHash('sha1').update(fileData).digest('hex');
34 | db.writeFile(fileHash, fileData, function(err) {
35 | expect(err).to.equal(null);
36 | done();
37 | });
38 | });
39 |
40 | it('should callback with error if cannot get bucket', function(done) {
41 | var _getSbucketForKey = sinon.stub(db, '_getSbucketForKey').callsArgWith(
42 | 1,
43 | new Error('Failed')
44 | );
45 | db.writeFile('0000', Buffer([]), function(err) {
46 | _getSbucketForKey.restore();
47 | expect(err.message).to.equal('Failed');
48 | done();
49 | });
50 | });
51 |
52 | });
53 |
54 | describe('#createWriteStream', function() {
55 |
56 | it('should write the stream to the database', function(done) {
57 | var fileData = new Buffer('kfs hello!');
58 | var fileHash = crypto.createHash('sha1').update(fileData).digest('hex');
59 | db.createWriteStream(fileHash, function(err, writableStream) {
60 | expect(err).to.equal(null);
61 | writableStream.on('finish', done);
62 | writableStream.on('error', done);
63 | writableStream.write(fileData);
64 | writableStream.end();
65 | });
66 | });
67 |
68 | it('should callback with error if cannot get bucket', function(done) {
69 | var _getSbucketForKey = sinon.stub(db, '_getSbucketForKey').callsArgWith(
70 | 1,
71 | new Error('Failed')
72 | );
73 | db.createWriteStream('0000', function(err) {
74 | _getSbucketForKey.restore();
75 | expect(err.message).to.equal('Failed');
76 | done();
77 | });
78 | });
79 |
80 | });
81 |
82 | describe('#list', function() {
83 |
84 | it('should bubble errors from #_getSbucketForKey', function(done) {
85 | var _getSbucketForKey = sinon.stub(db, '_getSbucketForKey').callsArgWith(
86 | 1,
87 | new Error('Failed')
88 | );
89 | db.list('somekey', function(err) {
90 | _getSbucketForKey.restore();
91 | expect(err.message).to.equal('Failed');
92 | done();
93 | });
94 | });
95 |
96 | it('should call SBucket#list', function(done) {
97 | var _getSbucketForKey = sinon.stub(db, '_getSbucketForKey').callsArgWith(
98 | 1,
99 | null,
100 | {
101 | list: function(cb) {
102 | _getSbucketForKey.restore();
103 | cb();
104 | }
105 | }
106 | );
107 | db.list('somekey', done);
108 | });
109 |
110 | });
111 |
112 | describe('#flush', function() {
113 |
114 | it('should call Sbucket#flush for each bucket', function(done) {
115 | let flush = sinon.stub().callsArg(0);
116 | let _getSbucketForKey = sinon.stub(db, '_getSbucketForKey').callsArgWith(
117 | 1,
118 | null,
119 | {
120 | flush: flush
121 | }
122 | );
123 | db.flush(() => {
124 | _getSbucketForKey.restore();
125 | expect(_getSbucketForKey.callCount).to.equal(2);
126 | expect(flush.callCount).to.equal(2);
127 | done();
128 | });
129 | });
130 |
131 | });
132 |
133 | describe('#stat', function() {
134 |
135 | it('should return the stats for existing buckets', function(done) {
136 | db.stat(function(err, results) {
137 | expect(results).to.have.lengthOf(2);
138 | done();
139 | });
140 | });
141 |
142 | it('should return the stats for the given bucket only', function(done) {
143 | db.stat('001.s', function(err, stats) {
144 | expect(stats).to.have.lengthOf(1);
145 | done();
146 | });
147 | });
148 |
149 | it('should bubble errors from _getSbucketForKey', function(done) {
150 | var _getSbucketForKey = sinon.stub(db, '_getSbucketForKey').callsArgWith(
151 | 1,
152 | new Error('Failed')
153 | );
154 | db.stat(function(err) {
155 | _getSbucketForKey.restore();
156 | expect(err.message).to.equal('Failed');
157 | done();
158 | });
159 | });
160 |
161 | it('should bubble errors from Sbucket#stat', function(done) {
162 | var _getSbucketForKey = sinon.stub(db, '_getSbucketForKey').callsArgWith(
163 | 1,
164 | null,
165 | { stat: sinon.stub().callsArgWith(0, new Error('Failed')) }
166 | );
167 | db.stat(function(err) {
168 | _getSbucketForKey.restore();
169 | expect(err.message).to.equal('Failed');
170 | done();
171 | });
172 |
173 | });
174 |
175 | });
176 |
177 | describe('#readFile', function() {
178 |
179 | it('should read the file from the database', function(done) {
180 | var fileData = new Buffer('hello kfs!');
181 | var fileHash = crypto.createHash('sha1').update(fileData).digest('hex');
182 | db.readFile(fileHash, function(err, result) {
183 | expect(err).to.equal(null);
184 | expect(Buffer.compare(result, fileData)).to.equal(0);
185 | done();
186 | });
187 | });
188 |
189 | it('should callback with error if cannot get bucket', function(done) {
190 | var _getSbucketForKey = sinon.stub(db, '_getSbucketForKey').callsArgWith(
191 | 1,
192 | new Error('Failed')
193 | );
194 | db.readFile('0000', function(err) {
195 | _getSbucketForKey.restore();
196 | expect(err.message).to.equal('Failed');
197 | done();
198 | });
199 | });
200 |
201 | });
202 |
203 | describe('#createReadStream', function() {
204 |
205 | it('should write the stream to the database', function(done) {
206 | var fileData = new Buffer('kfs hello!');
207 | var fileHash = crypto.createHash('sha1').update(fileData).digest('hex');
208 | db.createReadStream(fileHash, function(err, readableStream) {
209 | expect(err).to.equal(null);
210 | var data = Buffer([]);
211 | readableStream.on('data', function(chunk) {
212 | data = Buffer.concat([data, chunk]);
213 | });
214 | readableStream.on('end', function() {
215 | expect(Buffer.compare(fileData, data)).to.equal(0);
216 | done();
217 | });
218 | readableStream.on('error', done);
219 | });
220 | });
221 |
222 | it('should callback with error if cannot get bucket', function(done) {
223 | var _getSbucketForKey = sinon.stub(db, '_getSbucketForKey').callsArgWith(
224 | 1,
225 | new Error('Failed')
226 | );
227 | db.createReadStream('0000', function(err) {
228 | _getSbucketForKey.restore();
229 | expect(err.message).to.equal('Failed');
230 | done();
231 | });
232 | });
233 |
234 | });
235 |
236 | describe('#exists', function() {
237 |
238 | it('should callback true for a existing key', function(done) {
239 | var fileData = new Buffer('hello kfs!');
240 | var fileHash = crypto.createHash('sha1').update(fileData).digest('hex');
241 | db.exists(fileHash, function(err, exists) {
242 | expect(exists).to.equal(true);
243 | done();
244 | });
245 | });
246 |
247 | it('should callback false for non-existent key', function(done) {
248 | var key = utils.createReferenceId().toString('hex');
249 | db.exists(key, function(err, exists) {
250 | expect(exists).to.equal(false);
251 | done();
252 | });
253 | });
254 |
255 | it('should callback with error if cannot get bucket', function(done) {
256 | var _getSbucketForKey = sinon.stub(db, '_getSbucketForKey').callsArgWith(
257 | 1,
258 | new Error('Failed')
259 | );
260 | db.exists('0000', function(err) {
261 | _getSbucketForKey.restore();
262 | expect(err.message).to.equal('Failed');
263 | done();
264 | });
265 | });
266 |
267 | });
268 |
269 | describe('#unlink', function() {
270 |
271 | it('should destroy the file from the database', function(done) {
272 | var fileData = new Buffer('hello kfs!');
273 | var fileHash = crypto.createHash('sha1').update(fileData).digest('hex');
274 | db.unlink(fileHash, function(err) {
275 | expect(err).to.equal(null);
276 | db.exists(fileHash, function(err, exists) {
277 | expect(exists).to.equal(false);
278 | done();
279 | });
280 | });
281 | });
282 |
283 | it('should callback with error if cannot get bucket', function(done) {
284 | var _getSbucketForKey = sinon.stub(db, '_getSbucketForKey').callsArgWith(
285 | 1,
286 | new Error('Failed')
287 | );
288 | db.unlink('0000', function(err) {
289 | _getSbucketForKey.restore();
290 | expect(err.message).to.equal('Failed');
291 | done();
292 | });
293 | });
294 |
295 | });
296 |
297 | after(function() {
298 | rimraf.sync(TMP_DIR);
299 | });
300 |
301 | });
302 |
303 |
--------------------------------------------------------------------------------
/test/b-table.unit.js:
--------------------------------------------------------------------------------
1 | 'use strict';
2 |
3 | var Sbucket = require('../lib/s-bucket');
4 | var Btable = require('../lib/b-table');
5 | var expect = require('chai').expect;
6 | var sinon = require('sinon');
7 | var EventEmitter = require('events').EventEmitter;
8 | var utils = require('../lib/utils');
9 | var proxyquire = require('proxyquire');
10 |
11 | describe('Btable', function() {
12 |
13 | describe('@constructor', function() {
14 |
15 | var _open;
16 |
17 | before(function() {
18 | _open = sinon.stub(Btable.prototype, '_open');
19 | });
20 |
21 | it('should merge options with defaults', function() {
22 | var bTable = new Btable('some/path', { someOption: true });
23 | expect(bTable._options.someOption).to.equal(true);
24 | expect(bTable._options.referenceId).to.equal(null);
25 | });
26 |
27 | it('should coerce the table path to add extension', function() {
28 | var bTable = new Btable('some/path');
29 | expect(bTable._tablePath).to.equal('some/path.kfs');
30 | });
31 |
32 | it('should create a reference id', function() {
33 | var bTable = new Btable('');
34 | expect(Buffer.isBuffer(bTable._rid)).to.equal(true);
35 | });
36 |
37 | after(function() {
38 | _open.restore();
39 | });
40 |
41 | });
42 |
43 | describe('#_open', function() {
44 |
45 | var _initBtableDirectory;
46 | var _validateTablePath;
47 | var StubbedBtable = proxyquire('../lib/b-table', {
48 | fs: {
49 | readFileSync: sinon.stub().returns(
50 | utils.createReferenceId().toString('hex')
51 | )
52 | }
53 | });
54 |
55 | before(function() {
56 | _initBtableDirectory = sinon.stub(
57 | StubbedBtable.prototype,
58 | '_initBtableDirectory'
59 | );
60 | _validateTablePath = sinon.stub(
61 | StubbedBtable.prototype,
62 | '_validateTablePath'
63 | );
64 | });
65 |
66 | it('should initialize the db if it does not exist', function() {
67 | /* jshint unused:false */
68 | var _fileDoesExist = sinon.stub(utils, 'fileDoesExist').returns(false);
69 | var bTable = new StubbedBtable('');
70 | _fileDoesExist.restore();
71 | setImmediate(function() {
72 | expect(_initBtableDirectory.called).to.equal(true);
73 | });
74 | });
75 |
76 | it('should validate the db if it does exist', function() {
77 | /* jshint unused:false */
78 | var _fileDoesExist = sinon.stub(utils, 'fileDoesExist').returns(true);
79 | var bTable = new StubbedBtable('');
80 | _fileDoesExist.restore();
81 | setImmediate(function() {
82 | expect(_validateTablePath.called).to.equal(true);
83 | });
84 | });
85 |
86 | it('should throw an error if validating table path fails', function() {
87 | /* jshint unused:false */
88 | _validateTablePath.restore();
89 | _validateTablePath = sinon.stub(
90 | StubbedBtable.prototype,
91 | '_validateTablePath'
92 | ).throws(new Error('Failed'));
93 | var _fileDoesExist = sinon.stub(utils, 'fileDoesExist').returns(true);
94 | expect(function() {
95 | var bTable = new StubbedBtable('');
96 | }).to.throw(Error, 'Failed');
97 | _fileDoesExist.restore();
98 | });
99 |
100 | after(function() {
101 | _initBtableDirectory.restore();
102 | _validateTablePath.restore();
103 | });
104 |
105 | });
106 |
107 | describe('#_initBtableDirectory', function() {
108 |
109 | it('should throw error if mkdirp fails', function() {
110 | var StubbedBtable = proxyquire('../lib/b-table', {
111 | mkdirp: {
112 | sync: sinon.stub().throws(new Error('Failed'))
113 | }
114 | });
115 | expect(function() {
116 | StubbedBtable.prototype._initBtableDirectory.call({
117 | _tablePath: 'some/path.kfs'
118 | });
119 | }).to.throw(Error, 'Failed');
120 | });
121 |
122 | it('should write the reference id file and callback', function() {
123 | var _mkdirp = sinon.stub();
124 | var _writeFileSync = sinon.stub();
125 | var StubbedBtable = proxyquire('../lib/b-table', {
126 | mkdirp: {
127 | sync: _mkdirp
128 | },
129 | fs: {
130 | writeFileSync: _writeFileSync
131 | }
132 | });
133 | StubbedBtable.prototype._initBtableDirectory.call({
134 | _tablePath: 'some/path.kfs'
135 | });
136 | expect(_mkdirp.called).to.equal(true);
137 | expect(_writeFileSync.called).to.equal(true);
138 | });
139 |
140 | });
141 |
142 | describe('#_validateTablePath', function() {
143 |
144 | it('should throw error if not a directory', function() {
145 | var StubbedBtable = proxyquire('../lib/b-table', {
146 | fs: {
147 | statSync: sinon.stub().returns({
148 | isDirectory: sinon.stub().returns(false)
149 | })
150 | }
151 | });
152 | expect(function() {
153 | StubbedBtable.prototype._validateTablePath.call({
154 | _tablePath: 'some/path.kfs'
155 | });
156 | }).to.throw(Error, 'Table path is not a directory');
157 | });
158 |
159 | it('should throw error if not valid table', function() {
160 | var StubbedBtable = proxyquire('../lib/b-table', {
161 | fs: {
162 | statSync: sinon.stub().returns({
163 | isDirectory: sinon.stub().returns(true)
164 | }),
165 | readdirSync: sinon.stub().returns([])
166 | }
167 | });
168 | expect(function() {
169 | StubbedBtable.prototype._validateTablePath.call({
170 | _tablePath: 'some/path.kfs'
171 | });
172 | }).to.throw(Error, 'Table path is not a valid KFS instance');
173 | });
174 |
175 | it('should not throw if valid table', function() {
176 | var StubbedBtable = proxyquire('../lib/b-table', {
177 | fs: {
178 | statSync: sinon.stub().returns({
179 | isDirectory: sinon.stub().returns(true)
180 | }),
181 | readdirSync: sinon.stub().returns([Btable.RID_FILENAME])
182 | }
183 | });
184 | expect(function() {
185 | StubbedBtable.prototype._validateTablePath.call({
186 | _tablePath: 'some/path.kfs'
187 | });
188 | }).to.not.throw(Error);
189 | });
190 |
191 | });
192 |
193 | describe('#_getSbucketIndexForKey', function() {
194 |
195 | it('should return the correct xor value', function() {
196 | expect(
197 | Btable.prototype._getSbucketIndexForKey.call({
198 | _rid: Buffer('00', 'hex')
199 | }, '00')
200 | ).to.equal(96);
201 | });
202 |
203 | });
204 |
205 | describe('#_getSbucketAtIndex', function() {
206 |
207 | it('should return the existing sBucket', function() {
208 | var _sBucket = new EventEmitter();
209 | var sBucket = Btable.prototype._getSbucketAtIndex.call({
210 | _sBuckets: { 0: _sBucket }
211 | }, 0);
212 | expect(sBucket).to.equal(_sBucket);
213 | });
214 |
215 | it('should create a new sBucket at the index', function() {
216 | var StubbedBtable = proxyquire('../lib/b-table', {
217 | './s-bucket': EventEmitter
218 | });
219 | var sBucket = StubbedBtable.prototype._getSbucketAtIndex.call({
220 | _sBuckets: {},
221 | _tablePath: 'some/path.kfs',
222 | _options: { sBucketOpts: {} }
223 | }, 0);
224 | expect(sBucket).to.be.instanceOf(EventEmitter);
225 | });
226 |
227 | it('should close the s-bucket on idle', function(done) {
228 | var StubbedBtable = proxyquire('../lib/b-table', {
229 | './s-bucket': EventEmitter
230 | });
231 | var sBucket = StubbedBtable.prototype._getSbucketAtIndex.call({
232 | _sBuckets: {},
233 | _tablePath: 'some/path.kfs',
234 | _options: { sBucketOpts: {} }
235 | }, 0);
236 | var _close = sinon.stub(sBucket, 'close');
237 | sBucket.emit('idle');
238 | setImmediate(() => {
239 | expect(_close.called).to.equal(true);
240 | done();
241 | });
242 | });
243 |
244 | });
245 |
246 | describe('#_getSbucketForKey', function() {
247 |
248 | it('should callback with the sBucket if opened', function(done) {
249 | var sBucket = new EventEmitter();
250 | sBucket.readyState = Sbucket.OPENED;
251 | var _getSbucketAtIndex = sinon.stub(
252 | Btable.prototype,
253 | '_getSbucketAtIndex'
254 | ).returns(sBucket);
255 | var _open = sinon.stub(Btable.prototype, '_open');
256 | var bTable = new Btable('');
257 | bTable._getSbucketForKey('0f0f', function(err, s) {
258 | _getSbucketAtIndex.restore();
259 | _open.restore();
260 | expect(s).to.equal(sBucket);
261 | done();
262 | });
263 | });
264 |
265 | it('should callback with error if opening sBucket fails', function(done) {
266 | var sBucket = new EventEmitter();
267 | sBucket.readyState = Sbucket.CLOSED;
268 | sBucket.open = sinon.stub().callsArgWith(0, new Error('Failed'));
269 | var _getSbucketAtIndex = sinon.stub(
270 | Btable.prototype,
271 | '_getSbucketAtIndex'
272 | ).returns(sBucket);
273 | var _open = sinon.stub(Btable.prototype, '_open');
274 | var bTable = new Btable('');
275 | bTable._getSbucketForKey('0f0f', function(err) {
276 | _getSbucketAtIndex.restore();
277 | _open.restore();
278 | expect(err.message).to.equal('Failed');
279 | done();
280 | });
281 | });
282 |
283 | it('should callback when sBucket is opened', function(done) {
284 | var sBucket = new EventEmitter();
285 | sBucket.readyState = Sbucket.CLOSED;
286 | sBucket.open = sinon.stub().callsArgWith(0);
287 | var _getSbucketAtIndex = sinon.stub(
288 | Btable.prototype,
289 | '_getSbucketAtIndex'
290 | ).returns(sBucket);
291 | var _open = sinon.stub(Btable.prototype, '_open');
292 | var bTable = new Btable('');
293 | bTable._getSbucketForKey('0f0f', function(err, s) {
294 | _getSbucketAtIndex.restore();
295 | _open.restore();
296 | expect(err).to.equal(null);
297 | expect(s).to.equal(sBucket);
298 | done();
299 | });
300 | });
301 |
302 | });
303 |
304 | });
305 |
--------------------------------------------------------------------------------
/test/block-stream.unit.js:
--------------------------------------------------------------------------------
1 | 'use strict';
2 |
3 | var BlockStream = require('../lib/block-stream');
4 | var expect = require('chai').expect;
5 |
6 | describe('BlockStream', function() {
7 |
8 | describe('#_flush', function() {
9 |
10 | it('should pad the last chunk with zeros', function(done) {
11 | var bs = new BlockStream({
12 | sBucket: {
13 | _chunkFree: [],
14 | _chunkSize : 12
15 | },
16 | padLastChunk: true
17 | });
18 | var buf = Buffer.from([]);
19 | bs.on('data', function(data) {
20 | buf = Buffer.concat([buf, data]);
21 | });
22 | bs.on('end', function() {
23 | expect(buf).to.have.lengthOf(24);
24 | expect(Buffer.compare(buf.slice(18), Buffer(6).fill(0))).to.equal(0);
25 | done();
26 | });
27 | bs.write(Buffer(6).fill(1));
28 | bs.write(Buffer(6).fill(1));
29 | bs.write(Buffer(6).fill(1));
30 | bs.end();
31 | });
32 |
33 | });
34 |
35 | describe('#_transform', function() {
36 |
37 | it('should return N-sized chunks', function(done) {
38 | const bs = new BlockStream({
39 | sBucket: {
40 | _chunkFree: [],
41 | _chunkSize : 12
42 | },
43 | padLastChunk: false
44 | });
45 | const ar = [];
46 | bs.on('data', data => ar.push(data));
47 | bs.once('end', function() {
48 | expect(Buffer.compare(ar.shift(), Buffer.from('ABCDEFGHIJKL')))
49 | .to
50 | .equal(0);
51 |
52 | expect(Buffer.compare(ar.shift(), Buffer.from('MNOPQRSTUVWX')))
53 | .to
54 | .equal(0);
55 |
56 | expect(Buffer.compare(ar.shift(), Buffer.from('YZ')))
57 | .to
58 | .equal(0);
59 |
60 | done();
61 | });
62 | bs.write('AB');
63 | bs.write('CDEFGHIJKL');
64 | bs.write('MNOPQRSTUVWXY');
65 | bs.write('Z');
66 | bs.end();
67 | });
68 |
69 | });
70 |
71 | });
72 |
--------------------------------------------------------------------------------
/test/read-stream.unit.js:
--------------------------------------------------------------------------------
1 | 'use strict';
2 |
3 | var ReadableFileStream = require('../lib/read-stream');
4 | var sinon = require('sinon');
5 | var expect = require('chai').expect;
6 | var utils = require('../lib/utils');
7 |
8 | describe('ReadableFileStream', function() {
9 |
10 | describe('#_read', function() {
11 |
12 | it('should emit an error if get fails', function(done) {
13 | var rs = new ReadableFileStream({
14 | fileKey: utils.createReferenceId(),
15 | sBucket: {
16 | _db: {
17 | get: sinon.stub().callsArgWith(1, new Error('Failed'))
18 | }
19 | }
20 | });
21 | rs.on('error', function(err) {
22 | expect(err.message).to.equal('Failed');
23 | done();
24 | });
25 | rs.read();
26 | });
27 |
28 | });
29 |
30 | describe('#destroy', function() {
31 |
32 | it('should call Sbucket#unlink', function(done) {
33 | var _unlink = sinon.stub().callsArg(1);
34 | var rs = new ReadableFileStream({
35 | fileKey: utils.createReferenceId(),
36 | sBucket: {
37 | unlink: _unlink
38 | }
39 | });
40 | rs.destroy(done);
41 | });
42 |
43 | });
44 |
45 | });
46 |
--------------------------------------------------------------------------------
/test/s-bucket.integration.js:
--------------------------------------------------------------------------------
1 | 'use strict';
2 |
3 | var Sbucket = require('../lib/s-bucket');
4 | var expect = require('chai').expect;
5 | var sinon = require('sinon');
6 | var async = require('async');
7 | var os = require('os');
8 | var path = require('path');
9 | var utils = require('../lib/utils');
10 | var mkdirp = require('mkdirp');
11 | var rimraf = require('rimraf');
12 |
13 | describe('Sbucket/Integration', function() {
14 |
15 | var TMP_DIR = path.join(os.tmpdir(), 'KFS_SANDBOX');
16 | var BUCKET_PATH = path.join(TMP_DIR, 'testdb-sbucket-integration');
17 | var bucket = null;
18 |
19 | before(function(done) {
20 | if (utils.fileDoesExist(TMP_DIR)) {
21 | rimraf.sync(TMP_DIR);
22 | }
23 | mkdirp(TMP_DIR, function() {
24 | bucket = new Sbucket(BUCKET_PATH);
25 | bucket.open(done);
26 | });
27 | });
28 |
29 | describe('#list', function() {
30 |
31 | before(function(done) {
32 | var file0 = new Buffer(65536 * 2);
33 | var file1 = new Buffer(65536 * 2);
34 | var file2 = new Buffer(65536 * 4);
35 | var index = 0;
36 | async.eachSeries([file0, file1, file2], function(buf, next) {
37 | buf.fill(1);
38 | bucket.writeFile(index.toString(), buf, function(err) {
39 | index++;
40 | next(err);
41 | });
42 | }, done);
43 | });
44 |
45 | it('should list all of the files', function(done) {
46 | bucket.list(function(err, list) {
47 | expect(list).to.have.lengthOf(3);
48 | done();
49 | });
50 | });
51 |
52 | it('should bubble errors from te iterator', function(done) {
53 | var _iterator = sinon.stub(bucket._db, 'iterator').returns({
54 | next: sinon.stub().callsArgWith(0, new Error('Failed'))
55 | });
56 | bucket.list(function(err) {
57 | _iterator.restore();
58 | expect(err.message).to.equal('Failed');
59 | done();
60 | });
61 | });
62 |
63 | });
64 |
65 | after(function() {
66 | rimraf.sync(TMP_DIR);
67 | });
68 |
69 | });
70 |
--------------------------------------------------------------------------------
/test/s-bucket.unit.js:
--------------------------------------------------------------------------------
1 | 'use strict';
2 |
3 | var proxyquire = require('proxyquire');
4 | var Sbucket = proxyquire('../lib/s-bucket', { leveldown: require('memdown') });
5 | var expect = require('chai').expect;
6 | var sinon = require('sinon');
7 | var stream = require('readable-stream');
8 | var utils = require('../lib/utils');
9 |
10 | describe('Sbucket', function() {
11 |
12 | describe('#open', function() {
13 |
14 | it('should emit an error if open fails', function(done) {
15 | var sBucket = new Sbucket('');
16 | var _open = sinon.stub(
17 | sBucket._db,
18 | 'open'
19 | ).callsArgWith(1, new Error('Failed'));
20 | sBucket.open(function(err) {
21 | _open.restore();
22 | expect(err.message).to.equal('Failed');
23 | done();
24 | });
25 | });
26 |
27 | it('should not require a callback', function(done) {
28 | var sBucket = new Sbucket('');
29 | sBucket.on('open', done);
30 | setImmediate(function() {
31 | sBucket.open();
32 | });
33 | });
34 |
35 | it('should emit open if already opened', function(done) {
36 | var sBucket = new Sbucket('');
37 | sBucket.readyState = Sbucket.OPENED;
38 | sBucket.open(done);
39 | });
40 |
41 | it('should return if open in progress', function(done) {
42 | var sBucket = new Sbucket('');
43 | sBucket.readyState = Sbucket.OPENING;
44 | sBucket.open(done);
45 | setImmediate(function() {
46 | sBucket.emit('open');
47 | });
48 | });
49 |
50 | it('should wait until close if closing', function(done) {
51 | var sBucket = new Sbucket('');
52 | sBucket.readyState = Sbucket.CLOSING;
53 | sBucket.open(done);
54 | setImmediate(function() {
55 | sBucket.emit('close');
56 | });
57 | });
58 |
59 | });
60 |
61 | describe('#close', function() {
62 |
63 | it('should emit an error if close fails', function(done) {
64 | var sBucket = new Sbucket('');
65 | sBucket.readyState = Sbucket.OPENED;
66 | var _close = sinon.stub(
67 | sBucket._db,
68 | 'close'
69 | ).callsArgWith(0, new Error('Failed'));
70 | sBucket.close(function(err) {
71 | _close.restore();
72 | expect(err.message).to.equal('Failed');
73 | done();
74 | });
75 | });
76 |
77 | it('should not require a callback', function(done) {
78 | var sBucket = new Sbucket('');
79 | var _close = sinon.stub(
80 | sBucket._db,
81 | 'close'
82 | ).callsArgWith(0);
83 | sBucket.readyState = Sbucket.OPENED;
84 | sBucket.on('close', done);
85 | setImmediate(function() {
86 | sBucket.close();
87 | _close.restore();
88 | });
89 | });
90 |
91 | it('should emit close if already closed', function(done) {
92 | var sBucket = new Sbucket('');
93 | var _close = sinon.stub(
94 | sBucket._db,
95 | 'close'
96 | ).callsArgWith(0);
97 | sBucket.close(function() {
98 | _close.restore();
99 | done();
100 | });
101 | });
102 |
103 | it('should return if close in progress', function(done) {
104 | var sBucket = new Sbucket('');
105 | var _close = sinon.stub(
106 | sBucket._db,
107 | 'close'
108 | ).callsArgWith(0);sBucket.readyState = Sbucket.CLOSING;
109 | sBucket.close(done);
110 | setImmediate(function() {
111 | sBucket.emit('close');
112 | _close.restore();
113 | });
114 | });
115 |
116 | it('should wait until open if opening', function(done) {
117 | var sBucket = new Sbucket('');
118 | var _close = sinon.stub(
119 | sBucket._db,
120 | 'close'
121 | ).callsArgWith(0);sBucket.readyState = Sbucket.OPENING;
122 | sBucket.close(done);
123 | setImmediate(function() {
124 | sBucket.emit('open');
125 | _close.restore();
126 | });
127 | });
128 |
129 | });
130 |
131 | describe('#readFile', function() {
132 |
133 | it('should callback with error if read stream fails', function(done) {
134 | var sBucket = new Sbucket('test');
135 | var _rs = new stream.Readable({ read: utils.noop });
136 | var _createReadStream = sinon.stub(
137 | sBucket,
138 | 'createReadStream'
139 | ).returns(_rs);
140 | sBucket.readFile(utils.createReferenceId(), function(err) {
141 | _createReadStream.restore();
142 | expect(err.message).to.equal('Failed');
143 | done();
144 | });
145 | setImmediate(function() {
146 | _rs.emit('error', new Error('Failed'));
147 | });
148 | });
149 |
150 | });
151 |
152 | describe('#createWriteStream', function() {
153 |
154 | it('should return a write stream with a destroy method', function(done) {
155 | var sBucket = new Sbucket('test');
156 | var _unlink = sinon.stub(sBucket, 'unlink').callsArg(1);
157 | var writeStream = sBucket.createWriteStream(utils.createReferenceId());
158 | expect(typeof writeStream.destroy).to.equal('function');
159 | writeStream.destroy(() => {
160 | expect(_unlink.called).to.equal(true);
161 | done();
162 | });
163 | });
164 |
165 | });
166 |
167 | describe('#writeFile', function() {
168 |
169 | it('should callback with error if write stream fails', function(done) {
170 | var sBucket = new Sbucket('test');
171 | var _ws = new stream.Writable({ write: utils.noop });
172 | var _createWriteStream = sinon.stub(
173 | sBucket,
174 | 'createWriteStream'
175 | ).returns(_ws);
176 | sBucket.writeFile(
177 | utils.createReferenceId(),
178 | Buffer('test'),
179 | function(err) {
180 | _createWriteStream.restore();
181 | expect(err.message).to.equal('Failed');
182 | done();
183 | }
184 | );
185 | setImmediate(function() {
186 | _ws.emit('error', new Error('Failed'));
187 | });
188 | });
189 |
190 | });
191 |
192 | describe('#stat', function() {
193 |
194 | it('should callback with error if fails to get size', function(done) {
195 | var sBucket = new Sbucket('test');
196 | var _approximateSize = sinon.stub(
197 | sBucket._db,
198 | 'approximateSize'
199 | ).callsArgWith(2, new Error('Failed'));
200 | sBucket.stat(function(err) {
201 | _approximateSize.restore();
202 | expect(err.message).to.equal('Failed');
203 | done();
204 | });
205 | });
206 |
207 | });
208 |
209 | describe('#flush', function() {
210 |
211 | it('should lock, repair, unlock', function(done) {
212 | var sBucket = new Sbucket('');
213 | var compactRange = sinon.stub(sBucket._db, 'compactRange').callsArg(2);
214 | sBucket.flush(() => {
215 | expect(compactRange.called).to.equal(true);
216 | done();
217 | });
218 | });
219 |
220 | it('should bubble errors', function(done) {
221 | var sBucket = new Sbucket('');
222 | var compactRange = sinon.stub(sBucket._db, 'compactRange').callsArgWith(
223 | 2, new Error('Failed')
224 | );
225 | sBucket.flush((err) => {
226 | compactRange.restore();
227 | expect(err.message).to.equal('Failed');
228 | done();
229 | });
230 | });
231 |
232 | });
233 |
234 | describe('#_checkIdleState', function() {
235 |
236 | it('should emit the idle event if idle for 60000ms', function(done) {
237 | var sBucket = new Sbucket('test');
238 | var clock = sinon.useFakeTimers();
239 | sBucket._checkIdleState();
240 | sBucket.once('idle', done);
241 | clock.tick(60000);
242 | clock.restore();
243 | });
244 |
245 | });
246 |
247 | describe('#_emitIfStateIsIdle', function() {
248 |
249 | it('should emit the idle event if idle', function() {
250 | var sBucket = new Sbucket('test');
251 | expect(sBucket._emitIfStateIsIdle()).to.equal(true);
252 | });
253 |
254 | it('should not emit the idle event if not idle', function() {
255 | var sBucket = new Sbucket('test');
256 | sBucket._incPendingOps();
257 | expect(sBucket._emitIfStateIsIdle()).to.equal(false);
258 | });
259 |
260 | });
261 |
262 | describe('#_incPendingOps', function() {
263 |
264 | it('should increment the _pendingOperations property', function() {
265 | var sBucket = new Sbucket('test');
266 | sBucket._incPendingOps();
267 | expect(sBucket._pendingOperations).to.equal(1);
268 | });
269 |
270 | });
271 |
272 | describe('#_decPendingOps', function() {
273 |
274 | it('should decrement the _pendingOperations property', function() {
275 | var sBucket = new Sbucket('test');
276 | sBucket._pendingOperations = 1;
277 | sBucket._decPendingOps();
278 | expect(sBucket._pendingOperations).to.equal(0);
279 | });
280 |
281 | });
282 |
283 | });
284 |
--------------------------------------------------------------------------------
/test/utils.unit.js:
--------------------------------------------------------------------------------
1 | 'use strict';
2 |
3 | var expect = require('chai').expect;
4 | var utils = require('../lib/utils');
5 |
6 | describe('@module:kfs/utils', function() {
7 |
8 | describe('#noop', function() {
9 |
10 | it('should return undefined', function() {
11 | expect(utils.noop()).to.equal(undefined);
12 | });
13 |
14 | });
15 |
16 | describe('#isValidKey', function() {
17 |
18 | it('should return false for invalid key', function() {
19 | expect(utils.isValidKey({})).to.equal(false);
20 | });
21 |
22 | });
23 |
24 | describe('#toHumanReadableSize', function() {
25 |
26 | it('should covert the bytes to a human readable size', function() {
27 | expect(utils.toHumanReadableSize(34359738368)).to.equal('32.0 GiB');
28 | });
29 |
30 | it('should return the bytes if lower than 1KiB', function() {
31 | expect(utils.toHumanReadableSize(1000)).to.equal('1000 B');
32 | });
33 |
34 | });
35 |
36 | describe('#coerceKey', function() {
37 |
38 | it('should return the valid key', function() {
39 | var key = utils.createReferenceId();
40 | expect(utils.coerceKey(key)).to.equal(key);
41 | });
42 |
43 | it('should return the hash of the invalid key', function() {
44 | expect(utils.coerceKey('test')).to.equal(
45 | '5e52fee47e6b070565f74372468cdc699de89107'
46 | );
47 | });
48 |
49 | });
50 |
51 | describe('#createItemKeyFromIndex', function() {
52 |
53 | it('should return the correct item key', function() {
54 | var fileKey = 'adc83b19e793491b1c6ea0fd8b46cd9f32e592fc';
55 | var itemKey = utils.createItemKeyFromIndex(fileKey, 20);
56 | expect(itemKey).to.equal(
57 | 'adc83b19e793491b1c6ea0fd8b46cd9f32e592fc 000020'
58 | );
59 | });
60 |
61 | });
62 |
63 | describe('#createSbucketNameFromIndex', function() {
64 |
65 | it('should return the correct sBucket dirname', function() {
66 | expect(utils.createSbucketNameFromIndex(42)).to.equal('042.s');
67 | });
68 |
69 | });
70 |
71 | describe('#createReferenceId', function() {
72 |
73 | it('should generate a new reference id if none supplied', function() {
74 | expect(utils.createReferenceId()).to.have.lengthOf(20);
75 | });
76 |
77 | it('should return a hex buffer from the given rid', function() {
78 | var rid = utils.createReferenceId(
79 | 'adc83b19e793491b1c6ea0fd8b46cd9f32e592fc'
80 | );
81 | expect(Buffer.isBuffer(rid)).to.equal(true);
82 | expect(rid).to.have.lengthOf(20);
83 | expect(rid.toString('hex')).to.equal(
84 | 'adc83b19e793491b1c6ea0fd8b46cd9f32e592fc'
85 | );
86 | });
87 |
88 | });
89 |
90 | describe('#fileDoesExist', function() {
91 |
92 | it('should return true if file exists', function() {
93 | expect(utils.fileDoesExist(__dirname)).to.equal(true);
94 | });
95 |
96 | it('should return false if file does not exist', function() {
97 | expect(utils.fileDoesExist(
98 | utils.createReferenceId().toString('hex')
99 | )).to.equal(false);
100 | });
101 |
102 | });
103 |
104 | describe('#coerceTablePath', function() {
105 |
106 | it('should add the .kfs extension if not supplied', function() {
107 | expect(utils.coerceTablePath('test')).to.equal('test.kfs');
108 | });
109 |
110 | it('should not add the .kfs extension if it is supplied', function() {
111 | expect(utils.coerceTablePath('test.kfs')).to.equal('test.kfs');
112 | });
113 |
114 | });
115 |
116 | });
117 |
--------------------------------------------------------------------------------
/test/write-stream.unit.js:
--------------------------------------------------------------------------------
1 | 'use strict';
2 |
3 | var WritableFileStream = require('../lib/write-stream');
4 | var sinon = require('sinon');
5 | var expect = require('chai').expect;
6 | var utils = require('../lib/utils');
7 |
8 | describe('WritableFileStream', function() {
9 |
10 | describe('#_write', function() {
11 |
12 | it('should emit an error if put fails', function(done) {
13 | var ws = new WritableFileStream({
14 | fileKey: utils.createReferenceId(),
15 | sBucket: {
16 | _db: {
17 | put: sinon.stub().callsArgWith(2, new Error('Failed'))
18 | },
19 | _chunkFree: []
20 | }
21 | });
22 | ws.on('error', function(err) {
23 | expect(err.message).to.equal('Failed');
24 | done();
25 | });
26 | ws.write(Buffer('test'));
27 | });
28 |
29 | });
30 |
31 | describe('#destroy', function() {
32 |
33 | it('should call Sbucket#unlink', function(done) {
34 | var _unlink = sinon.stub().callsArg(1);
35 | var ws = new WritableFileStream({
36 | fileKey: utils.createReferenceId(),
37 | sBucket: {
38 | unlink: _unlink
39 | }
40 | });
41 | ws.destroy(done);
42 | });
43 |
44 | });
45 |
46 | });
47 |
--------------------------------------------------------------------------------