├── .gitignore ├── .npmrc ├── .travis.yml ├── LICENSE ├── README.md ├── bench ├── append.js ├── bench-blocksize.js ├── index.js ├── init.js ├── readUInt32BE.js ├── readUInt48BE.js ├── readUInt64BE.js └── run-bench-blocksize.js ├── blocks.js ├── file.js ├── index.js ├── package.json └── test ├── append.js ├── read-integers.js ├── reopen.js └── simple.js /.gitignore: -------------------------------------------------------------------------------- 1 | node_modules 2 | -------------------------------------------------------------------------------- /.npmrc: -------------------------------------------------------------------------------- 1 | package-lock=false 2 | -------------------------------------------------------------------------------- /.travis.yml: -------------------------------------------------------------------------------- 1 | language: node_js 2 | node_js: 3 | - 0.6 4 | - 0.8 5 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Copyright (c) 2016 'Dominic Tarr' 2 | 3 | Permission is hereby granted, free of charge, 4 | to any person obtaining a copy of this software and 5 | associated documentation files (the "Software"), to 6 | deal in the Software without restriction, including 7 | without limitation the rights to use, copy, modify, 8 | merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom 10 | the Software is furnished to do so, 11 | subject to the following conditions: 12 | 13 | The above copyright notice and this permission notice 14 | shall be included in all copies or substantial portions of the Software. 15 | 16 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 17 | EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES 18 | OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. 19 | IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR 20 | ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, 21 | TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE 22 | SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 23 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # aligned-block-file 2 | 3 | read and write to a file in a cache-friendly way by using aligned blocks. 4 | 5 | This module provides an interface to read arbitary buffers from a file, 6 | and manages it as a series of aligned buffers. This allows you to write 7 | high performance binary file formats, where many reads do not necessarily 8 | mean many fs reads. 9 | 10 | ## Blocks(filename, block_size, flags) => AlignedBlockFile 11 | 12 | create an instance, `block_size` is the size of the underlying block. 13 | I suggest 1024 or 4096 or some multiple of your OS block size. 14 | `flags` is passed to [fs.open](http://devdocs.io/node/fs#fs_fs_open_path_flags_mode_callback) 15 | 16 | ### abf.read(start, end, cb) 17 | 18 | read a buffer from the file. If the range is already in the cache 19 | `cb` will be called synchronously. 20 | 21 | ### abf.readUInt32BE (start, cb) 22 | 23 | read a UInt32BE from the file. (`cb` may be sync, if the buffer is already in cache) 24 | 25 | ### abf.readUInt48BE (start, cb) 26 | 27 | read a UInt48BE from the file. (`cb` may be sync, if the buffer is already in cache) 28 | 29 | ### abf.readUInt64BE (start, cb) 30 | 31 | read a UInt64BE from the file, since javascript numbers are restricted to double 32 | this will only set the first 53 bits. Take care with your 53 bit int! if you use bitwise 33 | operations it will collapse back to 32 bit, you need to use `*2` instead of `<< 1`, etc. 34 | (`cb` may be sync, if the buffer is already in cache) 35 | 36 | ### abf.size 37 | 38 | an observable of the files size. 39 | 40 | ### abf.offset 41 | 42 | an observable of the end of the file 43 | 44 | ### append (buf, cb) 45 | 46 | append `buf` to the file. 47 | 48 | this must not be called again until the previous call has returned. 49 | updated values for size and offset will be triggered immediately before the `cb` is called. 50 | 51 | ### truncate(length, cb) 52 | 53 | shorten the file to `length` removing anything after that point. 54 | 55 | ## License 56 | 57 | MIT 58 | 59 | -------------------------------------------------------------------------------- /bench/append.js: -------------------------------------------------------------------------------- 1 | 2 | module.exports = function (ABF) { 3 | var _time = 0 4 | function log (bytes, time, log) { 5 | if(log || _time + 1e3 < time) { 6 | _time = time 7 | console.log(bytes/1e6, time/1e3, (bytes/1e6)/(time/1e3)) 8 | } 9 | } 10 | 11 | require('./')(ABF, { 12 | data: new Buffer.alloc(1024*16), 13 | time:10e3, size: 100e6, 14 | onUpdate: log 15 | }, function (err, size, time) { 16 | if(err) throw err 17 | log(size, time, true) 18 | }) 19 | 20 | } 21 | 22 | if(!module.parent && process.title != 'browser') 23 | 24 | module.exports(require('../')( 25 | '/tmp/bench_aligned-block-file/'+Date.now()+'.blocks' 26 | , 1024, 'a+')) 27 | 28 | -------------------------------------------------------------------------------- /bench/bench-blocksize.js: -------------------------------------------------------------------------------- 1 | 2 | var k = +process.argv[3] || 64 3 | var length = k*1024 4 | var blocks = require('../')(process.argv[2], length, 'r') 5 | 6 | blocks.offset.once(function () { 7 | var start = 0, i = 0 8 | var ts = Date.now() 9 | blocks.read(start, Math.min(start+length, blocks.offset.value), function next (err, buffer, bytes) { 10 | if(err) throw err 11 | ++i 12 | if(buffer.length < length) { 13 | console.log([k, Date.now()-ts, i].join(', ')) 14 | return 15 | } 16 | start += buffer.length 17 | blocks.read(start, Math.min(start+length, blocks.offset.value), next) 18 | }) 19 | }) 20 | 21 | 22 | 23 | 24 | -------------------------------------------------------------------------------- /bench/index.js: -------------------------------------------------------------------------------- 1 | 2 | 3 | module.exports = function (blocks, opts, cb) { 4 | 5 | var start = Date.now() 6 | blocks.offset.once(function (v) { 7 | ;(function next () { 8 | blocks.append(opts.data, function (err) { 9 | if(err) return cb(err) 10 | var time = Date.now()-start 11 | opts.onUpdate && opts.onUpdate(blocks.offset.value, time) 12 | if(time > opts.time || blocks.offset.value > opts.size) 13 | cb(null, blocks.offset.value, time) 14 | else 15 | next() 16 | }) 17 | })() 18 | }) 19 | } 20 | 21 | 22 | 23 | 24 | 25 | -------------------------------------------------------------------------------- /bench/init.js: -------------------------------------------------------------------------------- 1 | var crypto = require('crypto') 2 | 3 | module.exports = function (blocks, n, cb) { 4 | ;(function next (i) { 5 | if(i == n) return cb(null, blocks) 6 | blocks.append(crypto.randomBytes(1024), function (err) { 7 | if(err) cb(err) 8 | else next(i + 1) 9 | }) 10 | })(0) 11 | } 12 | 13 | -------------------------------------------------------------------------------- /bench/readUInt32BE.js: -------------------------------------------------------------------------------- 1 | var init = require('./init') 2 | var looper = require('looper') 3 | 4 | module.exports = function (blocks, cb) { 5 | init(blocks, 100, function (err) { 6 | if(err) throw err 7 | 8 | var start = Date.now() 9 | var c = 0 10 | var next = looper(function () { 11 | var seconds = (Date.now() - start)/1000 12 | if(seconds > 1) 13 | return cb(null, c / seconds, c, seconds) 14 | var index = Math.min(Math.floor(Math.random()*blocks.size()), blocks.size() - 4) 15 | blocks.readUInt32BE(index, function (err, n) { 16 | if(err) return cb(err) 17 | c++ 18 | next() 19 | }) 20 | 21 | }) 22 | console.log('start loop') 23 | next() 24 | }) 25 | } 26 | 27 | if(!module.parent) { 28 | var blocks = require('../')('/tmp/bench-abf'+Date.now(), 1024, 'a+') 29 | module.exports(blocks, function (err, ps, ops, seconds) { 30 | console.log(err, ps, ops, seconds) 31 | }) 32 | } 33 | 34 | 35 | 36 | 37 | -------------------------------------------------------------------------------- /bench/readUInt48BE.js: -------------------------------------------------------------------------------- 1 | var init = require('./init') 2 | var looper = require('looper') 3 | 4 | module.exports = function (blocks, cb) { 5 | init(blocks, 100, function (err) { 6 | if(err) throw err 7 | 8 | var start = Date.now() 9 | var c = 0 10 | var next = looper(function () { 11 | var seconds = (Date.now() - start)/1000 12 | if(seconds > 1) 13 | return cb(null, c / seconds, c, seconds) 14 | var index = Math.min(Math.floor(Math.random()*blocks.size()), blocks.size() - 6) 15 | blocks.readUInt48BE(index, function (err, n) { 16 | if(err) return cb(err) 17 | c++ 18 | next() 19 | }) 20 | 21 | }) 22 | console.log('start loop') 23 | next() 24 | }) 25 | } 26 | 27 | if(!module.parent) { 28 | var blocks = require('../')('/tmp/bench-abf'+Date.now(), 1024, 'a+') 29 | module.exports(blocks, function (err, ps, ops, seconds) { 30 | console.log(err, ps, ops, seconds) 31 | }) 32 | } 33 | 34 | 35 | 36 | 37 | -------------------------------------------------------------------------------- /bench/readUInt64BE.js: -------------------------------------------------------------------------------- 1 | var looper = require('looper') 2 | 3 | function init(blocks, n, cb) { 4 | ;(function next (i) { 5 | if(i == n) return cb(null, blocks) 6 | blocks.append(Buffer.alloc(10), function (err) { 7 | if(err) cb(err) 8 | else next(i + 1) 9 | }) 10 | })(0) 11 | } 12 | 13 | module.exports = function (blocks, cb) { 14 | init(blocks, 100, function (err) { 15 | if(err) throw err 16 | 17 | var start = Date.now() 18 | var c = 0 19 | var next = looper(function () { 20 | var seconds = (Date.now() - start)/1000 21 | if(seconds > 1) 22 | return cb(null, c / seconds, c, seconds) 23 | var index = Math.min(Math.floor(Math.random()*blocks.size()), blocks.size() - 8) 24 | blocks.readUInt64BE(index, function (err, n) { 25 | if(err) return cb(err) 26 | c++ 27 | next() 28 | }) 29 | 30 | }) 31 | console.log('start loop') 32 | next() 33 | }) 34 | } 35 | 36 | if(!module.parent) { 37 | var blocks = require('../')('/tmp/bench-abf'+Date.now(), 1024, 'a+') 38 | module.exports(blocks, function (err, ps, ops, seconds) { 39 | console.log(err, ps, ops, seconds) 40 | }) 41 | } 42 | 43 | 44 | 45 | 46 | -------------------------------------------------------------------------------- /bench/run-bench-blocksize.js: -------------------------------------------------------------------------------- 1 | 2 | var cp = require('child_process') 3 | var i = 1 4 | console.log('blocksize(k), ms, blocks') 5 | ;(function next () { 6 | if(i < 128) { 7 | var proc = cp.spawn(process.execPath, [require.resolve('./bench-blocksize.js'), process.argv[2], i++]) 8 | proc.stdout.pipe(process.stdout) 9 | proc.stderr.pipe(process.stderr) 10 | //.stdout.pipe(process.stdout) 11 | / proc.on('exit', next) 12 | } 13 | })() 14 | 15 | -------------------------------------------------------------------------------- /blocks.js: -------------------------------------------------------------------------------- 1 | var fs = require('fs') 2 | var uint48be = require('uint48be') 3 | var int53 = require('int53') 4 | 5 | /* 6 | Represent a file, as a table of buffers. 7 | copy from a range in the file into a buffer 8 | (may cross buffer boundries) 9 | 10 | Also, write into the file at any point. 11 | always update the cached buffer after the write. 12 | (always read a buffer before write, except for appending a new buffer) 13 | */ 14 | 15 | function assertInteger (a) { 16 | if(!Number.isInteger(a)) 17 | throw new Error('expected positive integer, was:'+JSON.stringify(a)) 18 | } 19 | 20 | var Cache = require('hashlru') 21 | 22 | module.exports = function (file, block_size, cache) { 23 | var cbs = [], br, writing = 0 24 | cache = cache || Cache(1000) 25 | 26 | function get(i, cb) { 27 | var c = cache.get(i) 28 | if(Buffer.isBuffer(c)) 29 | cb(null, c, block_size) 30 | else if(Array.isArray(cbs[i])) 31 | cbs[i].push(cb) 32 | else { 33 | cbs[i] = [cb] 34 | file.get(i, function (err, buf, bytes_read) { 35 | var cb = cbs[i] 36 | cbs[i] = null 37 | if(!err) cache.set(i, buf) 38 | for (var j = 0; j < cb.length; ++j) 39 | cb[j](err, buf, bytes_read) 40 | }) 41 | } 42 | } 43 | 44 | function read(start, end, cb) { 45 | assertInteger(start);assertInteger(end) 46 | //check if start & end are part of the same buffer 47 | var i = Math.floor(start/block_size) 48 | if(file && end > file.offset.value) 49 | return cb(new Error('past end:'+start+'-'+end+' < '+file.offset.value), null, 0) 50 | var bufs = [] 51 | ;(function next (i) { 52 | var block_start = i*block_size 53 | get(i, function (err, block, bytes_read) { 54 | if(err) return cb(err) 55 | //this is not right. 56 | if(bytes_read === 0) return cb(new Error('past end'), null, bytes_read) 57 | 58 | var read_start = start - block_start 59 | var read_end = Math.min(end - block_start, block_size) 60 | bufs.push(block.slice(read_start, read_end)) 61 | start += (read_end - read_start) 62 | 63 | if (start < end) { 64 | next(i+1) 65 | } else { 66 | var buffer = bufs.length == 1 ? bufs[0] : Buffer.concat(bufs) 67 | if (!buffer.length) 68 | return cb(new Error('read an empty buffer at:'+start + ' to ' + end + '\n'+ 69 | JSON.stringify({ 70 | start: start, end: end, i:i, 71 | bytes_read: bytes_read, 72 | bufs: bufs 73 | })) 74 | ) 75 | cb(null, buffer, bytes_read) 76 | } 77 | }) 78 | })(i) 79 | 80 | } 81 | 82 | //start by reading the end of the last block. 83 | //this must always be kept in memory. 84 | 85 | function readInteger(width, reader) { 86 | return function (start, cb) { 87 | var i = Math.floor(start/block_size) 88 | var _i = start%block_size 89 | 90 | //if the UInt32BE aligns with in a block 91 | //read directly and it's 3x faster. 92 | if(_i < block_size - width) 93 | get(i, function (err, block) { 94 | if(err) return cb(err) 95 | var value = reader(block, start%block_size) 96 | cb(null, value) 97 | }) 98 | //but handle overlapping reads this easier way 99 | //instead of messing around with bitwise ops 100 | else 101 | read(start, start+width, function (err, buf, bytes_read) { 102 | if(err) return cb(err) 103 | var value = reader(buf, 0); 104 | cb(isNaN(value) ? new Error('Number is too large') : null, value) 105 | }) 106 | } 107 | } 108 | 109 | return br = { 110 | read: read, 111 | readUInt32BE: readInteger(4, function(b, offset) { 112 | return b.readUInt32BE(offset) 113 | }), 114 | readUInt48BE: readInteger(6, function(b, offset) { 115 | return uint48be.decode(b, offset) 116 | }), 117 | readUInt64BE: readInteger(8, function(b, offset) { 118 | // int53.readUInt64BE will throw if number is too large 119 | try { 120 | return int53.readUInt64BE(b, offset) 121 | } catch(err) { 122 | return NaN; 123 | } 124 | }), 125 | size: file && file.size, 126 | offset: file && file.offset, 127 | //starting to realize: what I really need is just a lib for 128 | //relative copies between two arrays of buffers, with a given offset. 129 | append: function (buf, cb) { 130 | //write to the end of the file. 131 | //if successful, copy into cache. 132 | if(writing++) throw new Error('already appending to this file') 133 | file.offset.once(function (_offset) { 134 | 135 | var start = _offset 136 | var b_start = 0 137 | var i = Math.floor(start/block_size) 138 | if(i*block_size < _offset) //usually true, unless file length is multiple of block_size 139 | get(i, function (err) { //this will add the last block to the cache. 140 | if(err) cb(explain(err, 'precache before append failed')) 141 | else next() 142 | }) 143 | else next() 144 | 145 | function next () { 146 | while(b_start < buf.length) { //start < _offset+buf.length) { 147 | var block_start = i*block_size 148 | var b = cache.get(i) 149 | if(null == b) { 150 | b = Buffer.alloc(block_size) 151 | cache.set(i, b) 152 | } 153 | //including if set in above if... 154 | if(Buffer.isBuffer(b)) { 155 | var len = Math.min(block_size - (start - block_start), block_size) 156 | buf.copy(b, start - block_start, b_start, b_start + len) 157 | start += len 158 | b_start += len 159 | } 160 | else if(Array.isArray(cbs[i])) 161 | throw new Error('should never happen: new block should be initialized, before a read ever happens') 162 | else { 163 | start += block_size 164 | } 165 | 166 | i++ 167 | } 168 | 169 | file.append(buf, function (err, offset) { 170 | if(err) return cb(err) 171 | writing = 0 172 | cb(null, offset) 173 | }) 174 | } 175 | }) 176 | }, 177 | /** 178 | * Writes a buffer directly to a position in the file. 179 | * This wraps `file.write()` and removes the block cache after the file 180 | * write finishes to avoid having the item re-cached during the write. 181 | * 182 | * @param {buffer} buf - the data to write to the file 183 | * @param {number} pos - position in the file to write the buffer 184 | * @param {function} cb - callback that returns any error as an argument 185 | */ 186 | write: (buf, pos, cb) => { 187 | const i = Math.floor(pos/block_size) 188 | file.write(buf, pos, (err) => { 189 | cache.remove(i) 190 | cb(err) 191 | }) 192 | }, 193 | //we arn't specifically clearing the buffers, 194 | //but they should get updated anyway. 195 | truncate: file ? file.truncate : function (len, cb) { 196 | cb() 197 | } 198 | } 199 | } 200 | 201 | -------------------------------------------------------------------------------- /file.js: -------------------------------------------------------------------------------- 1 | var fs = require('fs') 2 | var mkdirp = require('mkdirp') 3 | var Obv = require('obv') 4 | var path = require('path') 5 | const ReadWriteLock = require('rwlock'); 6 | 7 | module.exports = function (file, block_size, flags) { 8 | flags = flags || 'r+' 9 | var fd 10 | var offset = Obv() 11 | 12 | // Positional read and write operations may be hazardous. We want to avoid: 13 | // 14 | // - Concurrent writes to the same part of the file. 15 | // - Reading and writing from the same part of the file. 16 | // 17 | // It's likely that Node.js is handling this deeper in the stack with libuv 18 | // operations like `pread()` and `pwrite()`, but they haven't explicitly 19 | // committed to this behavior: 20 | // 21 | // https://github.com/nodejs/node/issues/18634#issuecomment-363981993 22 | // 23 | // > This is not safe on all platforms, no. 24 | // 25 | 26 | const lock = new ReadWriteLock(); 27 | 28 | mkdirp(path.dirname(file), function () { 29 | //r+ opens the file for reading and writing, but errors if file does not exist. 30 | //to open the file for reading and writing and not error if it does not exist. 31 | //we need to open and close the file for append first. 32 | fs.open(file, 'a', function (_, _fd) { 33 | fs.close(_fd, function (_) { 34 | fs.open(file, flags, function (err, _fd) { 35 | fd = _fd 36 | fs.stat(file, function (err, stat) { 37 | offset.set(err ? 0 : stat.size) 38 | }) 39 | }) 40 | }) 41 | }) 42 | }) 43 | 44 | // This variable *only* tracks appends, not positional writes. 45 | var appending = 0 46 | 47 | return { 48 | get: function (i, cb) { 49 | offset.once(function (_offset) { 50 | lock.readLock((release) => { 51 | var max = ~~(_offset / block_size) 52 | if(i > max) 53 | return cb(new Error('aligned-block-file/file.get: requested block index was greater than max, got:'+i+', expected less than or equal to:'+max)) 54 | 55 | var buf = Buffer.alloc(block_size) 56 | 57 | fs.read(fd, buf, 0, block_size, i*block_size, function (err, bytes_read) { 58 | release() 59 | if(err) cb(err) 60 | else if( 61 | //if bytes_read is wrong 62 | i < max && 63 | buf.length !== bytes_read && 64 | //unless this is the very last block and it is incomplete. 65 | !((i*block_size + bytes_read) == offset.value) 66 | ) 67 | cb(new Error( 68 | 'aligned-block-file/file.get: did not read whole block, expected length:'+ 69 | block_size+' but got:'+bytes_read 70 | )) 71 | else 72 | cb(null, buf, bytes_read) 73 | }) 74 | }) 75 | }) 76 | }, 77 | offset: offset, 78 | size: function () { return offset.value }, 79 | append: function (buf, cb) { 80 | if(appending++) throw new Error('already appending to this file') 81 | offset.once(function (_offset) { 82 | fs.write(fd, buf, 0, buf.length, _offset, function (err, written) { 83 | appending = 0 84 | if(err) return cb(err) 85 | if(written !== buf.length) return cb(new Error('wrote less bytes than expected:'+written+', but wanted:'+buf.length)) 86 | offset.set(_offset+written) 87 | cb(null, _offset+written) 88 | }) 89 | }) 90 | }, 91 | /** 92 | * Writes a buffer directly to a position in the file. This opens the file 93 | * with another file descriptor so that the main file descriptor can just 94 | * append and read without doing any positional writes. 95 | * 96 | * @param {buffer} buf - the data to write to the file 97 | * @param {number} pos - position in the file to write the buffer 98 | * @param {function} cb - callback that returns any error as an argument 99 | */ 100 | write: (buf, pos, cb) => { 101 | if(flags !== 'r+') throw new Error('file opened with flags:'+flags+' refusing to write unless flags are:r+') 102 | offset.once((_offset) => { 103 | const endPos = pos + buf.length 104 | const isPastOffset = endPos > _offset 105 | 106 | if (isPastOffset) { 107 | return cb(new Error(`cannot write past offset: ${endPos} > ${_offset}`)) 108 | } 109 | 110 | lock.writeLock((release) => { 111 | fs.write(fd, buf, 0, buf.length, pos, (err, written) => { 112 | release() 113 | if (err == null && written !== buf.length) { 114 | cb(new Error('wrote less bytes than expected:'+written+', but wanted:'+buf.length)) 115 | } else { 116 | cb(err) 117 | } 118 | }) 119 | }) 120 | }) 121 | }, 122 | truncate: function (len, cb) { 123 | if(appending) throw new Error('already appending, cannot truncate') 124 | offset.once(function (_offset) { 125 | if(_offset <= len) return cb() 126 | fs.ftruncate(fd, len, function (err) { 127 | if(err) cb(err) 128 | else { 129 | offset.set(len) 130 | cb(null, offset.value) 131 | } 132 | }) 133 | }) 134 | } 135 | } 136 | } 137 | 138 | 139 | -------------------------------------------------------------------------------- /index.js: -------------------------------------------------------------------------------- 1 | var File = require('./file') 2 | var Blocks = require('./blocks') 3 | 4 | module.exports = function (file, block_size, flags, cache) { 5 | return Blocks(File(file, block_size, flags), block_size, cache) 6 | } 7 | -------------------------------------------------------------------------------- /package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "aligned-block-file", 3 | "description": "", 4 | "version": "1.2.2", 5 | "homepage": "https://github.com/flumedb/aligned-block-file", 6 | "repository": { 7 | "type": "git", 8 | "url": "git://github.com/flumedb/aligned-block-file.git" 9 | }, 10 | "dependencies": { 11 | "hashlru": "^2.1.0", 12 | "int53": "^1.0.0", 13 | "mkdirp": "^0.5.1", 14 | "obv": "^0.0.1", 15 | "rwlock": "^5.0.0", 16 | "uint48be": "^2.0.1" 17 | }, 18 | "devDependencies": { 19 | "looper": "^4.0.0", 20 | "tape": "^4.10.1" 21 | }, 22 | "scripts": { 23 | "test": "set -e; for t in test/*.js; do node $t; done" 24 | }, 25 | "author": "'Dominic Tarr' (dominictarr.com)", 26 | "license": "MIT" 27 | } 28 | -------------------------------------------------------------------------------- /test/append.js: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | var tape = require('tape') 5 | var Blocks = require('../blocks') 6 | var File = require('../file') 7 | 8 | var store = {} 9 | var cache = { 10 | get: function (i) { return store[i] }, 11 | set: function (i, v) { store[i] = v } 12 | } 13 | 14 | module.exports = function (reduce) { 15 | 16 | var filename = '/tmp/test_block-reader_'+Date.now() 17 | var blocks = reduce(null) 18 | 19 | var a = Buffer.alloc(32, 'a') 20 | var b = Buffer.alloc(32, 'b') 21 | var c = Buffer.alloc(32, 'c') 22 | var d = Buffer.alloc(32, 'd') 23 | var e = Buffer.alloc(24, 'e') 24 | var f = Buffer.alloc(64, 'f') 25 | 26 | tape('append one block', function (t) { 27 | blocks.append(a, function (err, offset) { 28 | if(err) throw err 29 | t.equal(offset, 32) 30 | blocks.read(0, 33, function (err, over, bytes) { 31 | t.ok(err) 32 | t.equal(bytes, 0) 33 | t.end() 34 | }) 35 | 36 | }) 37 | 38 | }) 39 | 40 | 41 | tape('append another block', function (t) { 42 | blocks = reduce(blocks) 43 | blocks.append(b, function (err, offset) { 44 | if(err) throw err 45 | t.equal(offset, 64) 46 | t.end() 47 | }) 48 | 49 | }) 50 | 51 | tape('append a half block', function (t) { 52 | blocks = reduce(blocks) 53 | blocks.append(c.slice(0, 16), function (err, offset) { 54 | if(err) throw err 55 | t.equal(offset, 80) 56 | t.end() 57 | }) 58 | 59 | }) 60 | 61 | tape('read last block', function (t) { 62 | blocks = reduce(blocks) 63 | blocks.read(64, 80, function (err, _c) { 64 | console.log(blocks.offset.value) 65 | if(err) throw err 66 | console.log(_c) 67 | t.deepEqual(_c.slice(0, 16), c.slice(0, 16)) 68 | t.end() 69 | }) 70 | }) 71 | 72 | tape('append another half block', function (t) { 73 | blocks = reduce(blocks) // Blocks(File(filename, 32, 'a+'), 32) 74 | blocks.append(c.slice(0, 16), function (err, offset) { 75 | if(err) throw err 76 | t.equal(offset, 96) 77 | 78 | blocks.read(64, 96, function (err, _c) { 79 | if(err) throw err 80 | console.log(_c) 81 | t.deepEqual(_c, c) 82 | t.end() 83 | }) 84 | 85 | }) 86 | }) 87 | 88 | tape('appending in parallel throws', function (t) { 89 | blocks = reduce(blocks) 90 | blocks.append(a, function (err, offset) { 91 | if(err) throw err 92 | t.equal(offset, 128) 93 | t.end() 94 | }) 95 | 96 | t.throws(function () { 97 | blocks.append(b, function (err, offset) { 98 | t.fail('should never be called') 99 | }) 100 | }) 101 | 102 | }) 103 | 104 | tape('read in parallel with append', function (t) { 105 | store = {} //clear the cache 106 | blocks.offset.once(function (o) { 107 | blocks.append(c, function (err, _o) { 108 | t.equal(160, _o) 109 | t.end() 110 | }) 111 | blocks.read(o, o+16, function (err, buf) { 112 | t.ok(err) 113 | }) 114 | }) 115 | 116 | }) 117 | 118 | 119 | tape('append half block, then overlapping block', function (t) { 120 | blocks = reduce(blocks) 121 | blocks.append(e, function (err, offset) { 122 | if(err) throw err 123 | t.equal(offset, 184) 124 | blocks.read(144, 176, function (err, data) { 125 | if(err) throw err 126 | console.log(err, data) 127 | store = {} 128 | blocks.append(f, function (err, offset) { 129 | blocks.read(176, 180, function (err, data) { 130 | if(err) throw err 131 | console.log(err, data) 132 | if(err) throw err 133 | t.equal(offset, 248) 134 | console.log(store) 135 | t.end() 136 | }) 137 | }) 138 | }) 139 | }) 140 | }) 141 | 142 | tape('truncate', function (t) { 143 | blocks = reduce(blocks) 144 | blocks.truncate(64, function (err, len) { 145 | if(err) throw err 146 | t.equal(blocks.offset.value, 64) 147 | t.equal(len, 64) 148 | 149 | blocks.read(0, 64, function (err, ab) { 150 | t.deepEqual(ab, Buffer.concat([a, b])) 151 | blocks.read(64, 96, function (err, _c, bytes) { 152 | t.ok(err) 153 | t.equal(bytes, 0) 154 | t.equal(_c, null) 155 | t.end() 156 | }) 157 | }) 158 | }) 159 | }) 160 | 161 | } 162 | 163 | if(!module.parent) { 164 | var filename = '/tmp/test_block-reader_'+Date.now() 165 | module.exports(function (b) { 166 | return b ? b : Blocks(File(filename, 32, 'a+'), 32) 167 | }) 168 | } 169 | 170 | 171 | 172 | 173 | 174 | 175 | -------------------------------------------------------------------------------- /test/read-integers.js: -------------------------------------------------------------------------------- 1 | var tape = require('tape') 2 | var Blocks = require('../blocks') 3 | 4 | function Cache () { 5 | var c = [] 6 | return { 7 | get: function (key) { return c[key] }, 8 | set: function (key, value) { c[key] = value } 9 | } 10 | } 11 | 12 | tape('readUInt32BE', function (t) { 13 | var cache = Cache() 14 | cache.set(0, new Buffer.from('00000002', 'hex')) 15 | cache.set(1, new Buffer.from('01000000', 'hex')) 16 | cache.set(2, new Buffer.from('ffffffff', 'hex')) 17 | cache.set(3, new Buffer.from('aaaaffff', 'hex')) 18 | cache.set(4, new Buffer.from('ffffaaaa', 'hex')) 19 | var blocks = Blocks(null, 4, cache) 20 | 21 | t.plan(4) 22 | test(0, 2) // aligned 23 | test(1, 513) //unaligned 24 | test(8, Math.pow(2,32)-1) // maxint 25 | test(14, Math.pow(2,32)-1) // maxint, unaligned 26 | 27 | function test(offset, expected) { 28 | blocks.readUInt32BE(offset, function (err, n) { 29 | if (err) throw err 30 | t.equal(n, expected) 31 | }) 32 | } 33 | }) 34 | 35 | tape('readUInt48BE', function (t) { 36 | var cache = Cache() 37 | cache.set(0, new Buffer.from('000000000002', 'hex')) 38 | cache.set(1, new Buffer.from('010000000000', 'hex')) 39 | cache.set(2, new Buffer.from('ffffffffffff', 'hex')) 40 | cache.set(3, new Buffer.from('aaaaaaffffff', 'hex')) 41 | cache.set(4, new Buffer.from('ffffffaaaaaa', 'hex')) 42 | var blocks = Blocks(null, 6, cache) 43 | 44 | t.plan(4) 45 | test(0, 2) // aligned 46 | test(1, 513) //unaligned 47 | test(12, Math.pow(2,48)-1) // maxint 48 | test(21, Math.pow(2,48)-1) // maxint, unaligned 49 | 50 | function test(offset, expected) { 51 | blocks.readUInt48BE(offset, function (err, n) { 52 | if (err) throw err 53 | t.equal(n, expected) 54 | }) 55 | } 56 | }) 57 | 58 | tape('readUInt64BE', function (t) { 59 | var cache = Cache() 60 | cache.set(0, new Buffer.from('0000000000000002', 'hex')) 61 | cache.set(1, new Buffer.from('0100000000000000', 'hex')) 62 | cache.set(2, new Buffer.from('001fffffffffffff', 'hex')) 63 | cache.set(3, new Buffer.from('aaaaaaaa001fffff', 'hex')) 64 | cache.set(4, new Buffer.from('ffffffffaaaaaaaa', 'hex')) 65 | var blocks = Blocks(null, 8, cache) 66 | 67 | t.plan(5) 68 | test(0, 2) // aligned 69 | test(1, 513) //unaligned 70 | test(16, Math.pow(2,53)-1) // maxint 71 | test(28, Math.pow(2,53)-1) // maxint, unaligned 72 | test(29, NaN, true) // overflow! 73 | 74 | function test(offset, expected, expectError) { 75 | blocks.readUInt64BE(offset, function (err, n) { 76 | if (err && !expectError) throw err 77 | if (!err && expectError) throw new Error('Expected error dir not occur') 78 | if (!err) 79 | t.equal(n, expected) 80 | else 81 | t.equal(isNaN(n), true) 82 | }) 83 | } 84 | }) 85 | -------------------------------------------------------------------------------- /test/reopen.js: -------------------------------------------------------------------------------- 1 | var filename = '/tmp/test_block-reader_'+Date.now() 2 | var Blocks = require('../blocks') 3 | var File = require('../file') 4 | 5 | require('./append')(function (b) { 6 | return Blocks(File(filename, 32, 'a+'), 32) 7 | }) 8 | -------------------------------------------------------------------------------- /test/simple.js: -------------------------------------------------------------------------------- 1 | var fs = require('fs') 2 | 3 | var Blocks = require('../blocks') 4 | var File = require('../file') 5 | 6 | var tape = require('tape') 7 | 8 | var a = Buffer.alloc(32, 'a') 9 | var b = Buffer.alloc(32, 'b') 10 | var c = Buffer.alloc(32, 'c') 11 | 12 | function Cache () { 13 | var c = [] 14 | return { 15 | get: function (key) { return c[key] }, 16 | set: function (key, value) { c[key] = value } 17 | } 18 | } 19 | 20 | tape('splice', function (t) { 21 | var cache = Cache() 22 | var bufs = Blocks(null, 32, cache) 23 | cache.set(0, a) 24 | cache.set(1, b) 25 | cache.set(2, c) 26 | bufs.offset = 96 27 | 28 | function test(start, end, expected) { 29 | bufs.read(start, end, function (err, actual) { 30 | if(err) throw err 31 | t.deepEqual(actual, expected) 32 | }) 33 | } 34 | 35 | test(0, 32, a) 36 | test(32, 64, b) 37 | test(64, 96, c) 38 | 39 | test(0, 64, Buffer.concat([a, b])) 40 | test(32, 96, Buffer.concat([b, c])) 41 | 42 | var _a = a.slice(0, 16) 43 | var _b = b.slice(0, 16) 44 | var _c = c.slice(0, 16) 45 | 46 | test(16, 32 + 16, Buffer.concat([_a, _b])) 47 | test(32 + 16, 64 + 16, Buffer.concat([_b, _c])) 48 | 49 | t.end() 50 | }) 51 | 52 | tape('read file', function (t) { 53 | var file = '/tmp/test_block-reader_'+Date.now() 54 | fs.appendFileSync(file, a) 55 | fs.appendFileSync(file, b) 56 | fs.appendFileSync(file, c) 57 | var bufs = Blocks(File(file, 32, 'a+'), 32) 58 | 59 | t.plan(7) 60 | 61 | function test(start, end, expected) { 62 | bufs.read(start, end, function (err, actual) { 63 | if(err) throw err 64 | t.deepEqual(actual, expected) 65 | }) 66 | } 67 | 68 | test(0, 32, a) 69 | test(32, 64, b) 70 | test(64, 96, c) 71 | 72 | test(0, 64, Buffer.concat([a, b])) 73 | test(32, 96, Buffer.concat([b, c])) 74 | 75 | var _a = a.slice(0, 16) 76 | var _b = b.slice(0, 16) 77 | var _c = c.slice(0, 16) 78 | 79 | test(16, 32 + 16, Buffer.concat([_a, _b])) 80 | test(32 + 16, 64 + 16, Buffer.concat([_b, _c])) 81 | 82 | }) 83 | 84 | tape('read empty file', function (t) { 85 | var file = '/tmp/test_block-reader_'+Date.now() 86 | var bufs = Blocks(File(file, 32, 'a+'), 32) 87 | bufs.read(0, 32, function (err, buf, bytes_read) { 88 | t.ok(err) 89 | t.equal(bytes_read, 0) 90 | t.end() 91 | }) 92 | }) 93 | 94 | tape('overwrite previous data', function (t) { 95 | var file = '/tmp/test_block-reader_'+Date.now() 96 | var bufs = Blocks(File(file, 32, 'r+'), 32) 97 | bufs.append(a, function (err) { 98 | t.error(err) 99 | bufs.read(0, 32, function (err, bufA) { 100 | t.error(err) 101 | t.deepEqual(bufA, a) 102 | bufs.write(b, 0, function (err) { 103 | t.error(err) 104 | bufs.read(0, 32, function (err, bufB) { 105 | t.error(err) 106 | t.deepEqual(bufB, b) 107 | bufs.write(b, 1, function (err) { 108 | t.ok(err, 'error if writing past last offset') 109 | 110 | // let's make a race condition! 111 | // first we'll start writing... 112 | bufs.write(c, 0, function (err) { 113 | t.error(err) 114 | }) 115 | 116 | // and we'll start reading before it's done 117 | bufs.read(0, 32, (err, bufC) => { 118 | t.error(err) 119 | t.deepEqual(bufC, c) 120 | t.end() 121 | }) 122 | }) 123 | }) 124 | }) 125 | }) 126 | }) 127 | }) 128 | 129 | --------------------------------------------------------------------------------