├── .gitignore ├── .travis.yml ├── CHANGELOG.md ├── LICENSE ├── README.md ├── index.js ├── package.json └── test └── test.js /.gitignore: -------------------------------------------------------------------------------- 1 | /coverage 2 | /node_modules 3 | -------------------------------------------------------------------------------- /.travis.yml: -------------------------------------------------------------------------------- 1 | language: node_js 2 | node_js: 3 | - "0.10" 4 | script: 5 | - "npm run test-travis" 6 | after_script: 7 | - "npm install coveralls@2 && cat ./coverage/lcov.info | ./node_modules/.bin/coveralls" 8 | -------------------------------------------------------------------------------- /CHANGELOG.md: -------------------------------------------------------------------------------- 1 | ### 1.0.1 2 | 3 | * use `setImmediate` instead of `nextTick` 4 | 5 | ### 1.0.0 6 | 7 | * `new FdSlicer(fd, options)` must now be `fdSlicer.createFromFd(fd, options)` 8 | * fix behavior when `end` is 0. 9 | * fix `createWriteStream` when using `createFromBuffer` 10 | 11 | ### 0.4.0 12 | 13 | * add ability to create an FdSlicer instance from a Buffer 14 | 15 | ### 0.3.2 16 | 17 | * fix write stream and read stream destroy behavior 18 | 19 | ### 0.3.1 20 | 21 | * write stream: fix end option behavior 22 | 23 | ### 0.3.0 24 | 25 | * write stream emits 'progress' events 26 | * write stream supports 'end' option which causes the stream to emit an error 27 | if a maximum size is exceeded 28 | * improve documentation 29 | 30 | ### 0.2.1 31 | 32 | * Update pend dependency to latest bugfix version. 33 | 34 | ### 0.2.0 35 | 36 | * Add read and write functions 37 | 38 | ### 0.1.0 39 | 40 | * Add `autoClose` option and `ref()` and `unref()`. 41 | 42 | ### 0.0.2 43 | 44 | * Add API documentation 45 | * read stream: create buffer at last possible moment 46 | 47 | ### 0.0.1 48 | 49 | * Initial release 50 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Copyright (c) 2014 Andrew Kelley 2 | 3 | Permission is hereby granted, free of charge, to any person 4 | obtaining a copy of this software and associated documentation files 5 | (the "Software"), to deal in the Software without restriction, 6 | including without limitation the rights to use, copy, modify, merge, 7 | publish, distribute, sublicense, and/or sell copies of the Software, 8 | and to permit persons to whom the Software is furnished to do so, 9 | subject to the following conditions: 10 | 11 | The above copyright notice and this permission notice shall be 12 | included in all copies or substantial portions of the Software. 13 | 14 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 15 | EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 16 | MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 17 | NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 18 | BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 19 | ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 20 | CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # fd-slicer 2 | 3 | [![Build Status](https://travis-ci.org/andrewrk/node-fd-slicer.svg?branch=master)](https://travis-ci.org/andrewrk/node-fd-slicer) 4 | 5 | Safe `fs.ReadStream` and `fs.WriteStream` using the same fd. 6 | 7 | Let's say that you want to perform a parallel upload of a file to a remote 8 | server. To do this, we want to create multiple read streams. The first thing 9 | you might think of is to use the `{start: 0, end: 0}` API of 10 | `fs.createReadStream`. This gives you two choices: 11 | 12 | 0. Use the same file descriptor for all `fs.ReadStream` objects. 13 | 0. Open the file multiple times, resulting in a separate file descriptor 14 | for each read stream. 15 | 16 | Neither of these are acceptable options. The first one is a severe bug, 17 | because the API docs for `fs.write` state: 18 | 19 | > Note that it is unsafe to use `fs.write` multiple times on the same file 20 | > without waiting for the callback. For this scenario, `fs.createWriteStream` 21 | > is strongly recommended. 22 | 23 | `fs.createWriteStream` will solve the problem if you only create one of them 24 | for the file descriptor, but it will exhibit this unsafety if you create 25 | multiple write streams per file descriptor. 26 | 27 | The second option suffers from a race condition. For each additional time the 28 | file is opened after the first, it is possible that the file is modified. So 29 | in our parallel uploading example, we might upload a corrupt file that never 30 | existed on the client's computer. 31 | 32 | This module solves this problem by providing `createReadStream` and 33 | `createWriteStream` that operate on a shared file descriptor and provides 34 | the convenient stream API while still allowing slicing and dicing. 35 | 36 | This module also gives you some additional power that the builtin 37 | `fs.createWriteStream` do not give you. These features are: 38 | 39 | * Emitting a 'progress' event on write. 40 | * Ability to set a maximum size and emit an error if this size is exceeded. 41 | * Ability to create an `FdSlicer` instance from a `Buffer`. This enables you 42 | to provide API for handling files as well as buffers using the same API. 43 | 44 | ## Usage 45 | 46 | ```js 47 | var fdSlicer = require('fd-slicer'); 48 | var fs = require('fs'); 49 | 50 | fs.open("file.txt", 'r', function(err, fd) { 51 | if (err) throw err; 52 | var slicer = fdSlicer.createFromFd(fd); 53 | var firstPart = slicer.createReadStream({start: 0, end: 100}); 54 | var secondPart = slicer.createReadStream({start: 100}); 55 | var firstOut = fs.createWriteStream("first.txt"); 56 | var secondOut = fs.createWriteStream("second.txt"); 57 | firstPart.pipe(firstOut); 58 | secondPart.pipe(secondOut); 59 | }); 60 | ``` 61 | 62 | You can also create from a buffer: 63 | 64 | ```js 65 | var fdSlicer = require('fd-slicer'); 66 | var slicer = FdSlicer.createFromBuffer(someBuffer); 67 | var firstPart = slicer.createReadStream({start: 0, end: 100}); 68 | var secondPart = slicer.createReadStream({start: 100}); 69 | var firstOut = fs.createWriteStream("first.txt"); 70 | var secondOut = fs.createWriteStream("second.txt"); 71 | firstPart.pipe(firstOut); 72 | secondPart.pipe(secondOut); 73 | ``` 74 | 75 | ## API Documentation 76 | 77 | ### fdSlicer.createFromFd(fd, [options]) 78 | 79 | ```js 80 | var fdSlicer = require('fd-slicer'); 81 | fs.open("file.txt", 'r', function(err, fd) { 82 | if (err) throw err; 83 | var slicer = fdSlicer.createFromFd(fd); 84 | // ... 85 | }); 86 | ``` 87 | 88 | Make sure `fd` is a properly initialized file descriptor. If you want to 89 | use `createReadStream` make sure you open it for reading and if you want 90 | to use `createWriteStream` make sure you open it for writing. 91 | 92 | `options` is an optional object which can contain: 93 | 94 | * `autoClose` - if set to `true`, the file descriptor will be automatically 95 | closed once the last stream that references it is closed. Defaults to 96 | `false`. `ref()` and `unref()` can be used to increase or decrease the 97 | reference count, respectively. 98 | 99 | ### fdSlicer.createFromBuffer(buffer, [options]) 100 | 101 | ```js 102 | var fdSlicer = require('fd-slicer'); 103 | var slicer = fdSlicer.createFromBuffer(someBuffer); 104 | // ... 105 | ``` 106 | 107 | `options` is an optional object which can contain: 108 | 109 | * `maxChunkSize` - A `Number` of bytes. see `createReadStream()`. 110 | If falsey, defaults to unlimited. 111 | 112 | #### Properties 113 | 114 | ##### fd 115 | 116 | The file descriptor passed in. `undefined` if created from a buffer. 117 | 118 | #### Methods 119 | 120 | ##### createReadStream(options) 121 | 122 | Available `options`: 123 | 124 | * `start` - Number. The offset into the file to start reading from. Defaults 125 | to 0. 126 | * `end` - Number. Exclusive upper bound offset into the file to stop reading 127 | from. 128 | * `highWaterMark` - Number. The maximum number of bytes to store in the 129 | internal buffer before ceasing to read from the underlying resource. 130 | Defaults to 16 KB. 131 | * `encoding` - String. If specified, then buffers will be decoded to strings 132 | using the specified encoding. Defaults to `null`. 133 | 134 | The ReadableStream that this returns has these additional methods: 135 | 136 | * `destroy(err)` - stop streaming. `err` is optional and is the error that 137 | will be emitted in order to cause the streaming to stop. Defaults to 138 | `new Error("stream destroyed")`. 139 | 140 | If `maxChunkSize` was specified (see `createFromBuffer()`), the read stream 141 | will provide chunks of at most that size. Normally, the read stream provides 142 | the entire range requested in a single chunk, but this can cause performance 143 | problems in some circumstances. 144 | See [thejoshwolfe/yauzl#87](https://github.com/thejoshwolfe/yauzl/issues/87). 145 | 146 | ##### createWriteStream(options) 147 | 148 | Available `options`: 149 | 150 | * `start` - Number. The offset into the file to start writing to. Defaults to 151 | 0. 152 | * `end` - Number. Exclusive upper bound offset into the file. If this offset 153 | is reached, the write stream will emit an 'error' event and stop functioning. 154 | In this situation, `err.code === 'ETOOBIG'`. Defaults to `Infinity`. 155 | * `highWaterMark` - Number. Buffer level when `write()` starts returning 156 | false. Defaults to 16KB. 157 | * `decodeStrings` - Boolean. Whether or not to decode strings into Buffers 158 | before passing them to` _write()`. Defaults to `true`. 159 | 160 | The WritableStream that this returns has these additional methods: 161 | 162 | * `destroy()` - stop streaming 163 | 164 | And these additional properties: 165 | 166 | * `bytesWritten` - number of bytes written to the stream 167 | 168 | And these additional events: 169 | 170 | * 'progress' - emitted when `bytesWritten` changes. 171 | 172 | ##### read(buffer, offset, length, position, callback) 173 | 174 | Equivalent to `fs.read`, but with concurrency protection. 175 | `callback` must be defined. 176 | 177 | ##### write(buffer, offset, length, position, callback) 178 | 179 | Equivalent to `fs.write`, but with concurrency protection. 180 | `callback` must be defined. 181 | 182 | ##### ref() 183 | 184 | Increase the `autoClose` reference count by 1. 185 | 186 | ##### unref() 187 | 188 | Decrease the `autoClose` reference count by 1. 189 | 190 | #### Events 191 | 192 | ##### 'error' 193 | 194 | Emitted if `fs.close` returns an error when auto closing. 195 | 196 | ##### 'close' 197 | 198 | Emitted when fd-slicer closes the file descriptor due to `autoClose`. Never 199 | emitted if created from a buffer. 200 | -------------------------------------------------------------------------------- /index.js: -------------------------------------------------------------------------------- 1 | var fs = require('fs'); 2 | var util = require('util'); 3 | var stream = require('stream'); 4 | var Readable = stream.Readable; 5 | var Writable = stream.Writable; 6 | var PassThrough = stream.PassThrough; 7 | var Pend = require('pend'); 8 | var EventEmitter = require('events').EventEmitter; 9 | 10 | exports.createFromBuffer = createFromBuffer; 11 | exports.createFromFd = createFromFd; 12 | exports.BufferSlicer = BufferSlicer; 13 | exports.FdSlicer = FdSlicer; 14 | 15 | util.inherits(FdSlicer, EventEmitter); 16 | function FdSlicer(fd, options) { 17 | options = options || {}; 18 | EventEmitter.call(this); 19 | 20 | this.fd = fd; 21 | this.pend = new Pend(); 22 | this.pend.max = 1; 23 | this.refCount = 0; 24 | this.autoClose = !!options.autoClose; 25 | } 26 | 27 | FdSlicer.prototype.read = function(buffer, offset, length, position, callback) { 28 | var self = this; 29 | self.pend.go(function(cb) { 30 | fs.read(self.fd, buffer, offset, length, position, function(err, bytesRead, buffer) { 31 | cb(); 32 | callback(err, bytesRead, buffer); 33 | }); 34 | }); 35 | }; 36 | 37 | FdSlicer.prototype.write = function(buffer, offset, length, position, callback) { 38 | var self = this; 39 | self.pend.go(function(cb) { 40 | fs.write(self.fd, buffer, offset, length, position, function(err, written, buffer) { 41 | cb(); 42 | callback(err, written, buffer); 43 | }); 44 | }); 45 | }; 46 | 47 | FdSlicer.prototype.createReadStream = function(options) { 48 | return new ReadStream(this, options); 49 | }; 50 | 51 | FdSlicer.prototype.createWriteStream = function(options) { 52 | return new WriteStream(this, options); 53 | }; 54 | 55 | FdSlicer.prototype.ref = function() { 56 | this.refCount += 1; 57 | }; 58 | 59 | FdSlicer.prototype.unref = function() { 60 | var self = this; 61 | self.refCount -= 1; 62 | 63 | if (self.refCount > 0) return; 64 | if (self.refCount < 0) throw new Error("invalid unref"); 65 | 66 | if (self.autoClose) { 67 | fs.close(self.fd, onCloseDone); 68 | } 69 | 70 | function onCloseDone(err) { 71 | if (err) { 72 | self.emit('error', err); 73 | } else { 74 | self.emit('close'); 75 | } 76 | } 77 | }; 78 | 79 | util.inherits(ReadStream, Readable); 80 | function ReadStream(context, options) { 81 | options = options || {}; 82 | Readable.call(this, options); 83 | 84 | this.context = context; 85 | this.context.ref(); 86 | 87 | this.start = options.start || 0; 88 | this.endOffset = options.end; 89 | this.pos = this.start; 90 | this.destroyed = false; 91 | } 92 | 93 | ReadStream.prototype._read = function(n) { 94 | var self = this; 95 | if (self.destroyed) return; 96 | 97 | var toRead = Math.min(self._readableState.highWaterMark, n); 98 | if (self.endOffset != null) { 99 | toRead = Math.min(toRead, self.endOffset - self.pos); 100 | } 101 | if (toRead <= 0) { 102 | self.destroyed = true; 103 | self.push(null); 104 | self.context.unref(); 105 | return; 106 | } 107 | self.context.pend.go(function(cb) { 108 | if (self.destroyed) return cb(); 109 | var buffer = new Buffer(toRead); 110 | fs.read(self.context.fd, buffer, 0, toRead, self.pos, function(err, bytesRead) { 111 | if (err) { 112 | self.destroy(err); 113 | } else if (bytesRead === 0) { 114 | self.destroyed = true; 115 | self.push(null); 116 | self.context.unref(); 117 | } else { 118 | self.pos += bytesRead; 119 | self.push(buffer.slice(0, bytesRead)); 120 | } 121 | cb(); 122 | }); 123 | }); 124 | }; 125 | 126 | ReadStream.prototype.destroy = function(err) { 127 | if (this.destroyed) return; 128 | err = err || new Error("stream destroyed"); 129 | this.destroyed = true; 130 | this.emit('error', err); 131 | this.context.unref(); 132 | }; 133 | 134 | util.inherits(WriteStream, Writable); 135 | function WriteStream(context, options) { 136 | options = options || {}; 137 | Writable.call(this, options); 138 | 139 | this.context = context; 140 | this.context.ref(); 141 | 142 | this.start = options.start || 0; 143 | this.endOffset = (options.end == null) ? Infinity : +options.end; 144 | this.bytesWritten = 0; 145 | this.pos = this.start; 146 | this.destroyed = false; 147 | 148 | this.on('finish', this.destroy.bind(this)); 149 | } 150 | 151 | WriteStream.prototype._write = function(buffer, encoding, callback) { 152 | var self = this; 153 | if (self.destroyed) return; 154 | 155 | if (self.pos + buffer.length > self.endOffset) { 156 | var err = new Error("maximum file length exceeded"); 157 | err.code = 'ETOOBIG'; 158 | self.destroy(); 159 | callback(err); 160 | return; 161 | } 162 | self.context.pend.go(function(cb) { 163 | if (self.destroyed) return cb(); 164 | fs.write(self.context.fd, buffer, 0, buffer.length, self.pos, function(err, bytes) { 165 | if (err) { 166 | self.destroy(); 167 | cb(); 168 | callback(err); 169 | } else { 170 | self.bytesWritten += bytes; 171 | self.pos += bytes; 172 | self.emit('progress'); 173 | cb(); 174 | callback(); 175 | } 176 | }); 177 | }); 178 | }; 179 | 180 | WriteStream.prototype.destroy = function() { 181 | if (this.destroyed) return; 182 | this.destroyed = true; 183 | this.context.unref(); 184 | }; 185 | 186 | util.inherits(BufferSlicer, EventEmitter); 187 | function BufferSlicer(buffer, options) { 188 | EventEmitter.call(this); 189 | 190 | options = options || {}; 191 | this.refCount = 0; 192 | this.buffer = buffer; 193 | this.maxChunkSize = options.maxChunkSize || Number.MAX_SAFE_INTEGER; 194 | } 195 | 196 | BufferSlicer.prototype.read = function(buffer, offset, length, position, callback) { 197 | var end = position + length; 198 | var delta = end - this.buffer.length; 199 | var written = (delta > 0) ? delta : length; 200 | this.buffer.copy(buffer, offset, position, end); 201 | setImmediate(function() { 202 | callback(null, written); 203 | }); 204 | }; 205 | 206 | BufferSlicer.prototype.write = function(buffer, offset, length, position, callback) { 207 | buffer.copy(this.buffer, position, offset, offset + length); 208 | setImmediate(function() { 209 | callback(null, length, buffer); 210 | }); 211 | }; 212 | 213 | BufferSlicer.prototype.createReadStream = function(options) { 214 | options = options || {}; 215 | var readStream = new PassThrough(options); 216 | readStream.destroyed = false; 217 | readStream.start = options.start || 0; 218 | readStream.endOffset = options.end; 219 | // by the time this function returns, we'll be done. 220 | readStream.pos = readStream.endOffset || this.buffer.length; 221 | 222 | // respect the maxChunkSize option to slice up the chunk into smaller pieces. 223 | var entireSlice = this.buffer.slice(readStream.start, readStream.pos); 224 | var offset = 0; 225 | while (true) { 226 | var nextOffset = offset + this.maxChunkSize; 227 | if (nextOffset >= entireSlice.length) { 228 | // last chunk 229 | if (offset < entireSlice.length) { 230 | readStream.write(entireSlice.slice(offset, entireSlice.length)); 231 | } 232 | break; 233 | } 234 | readStream.write(entireSlice.slice(offset, nextOffset)); 235 | offset = nextOffset; 236 | } 237 | 238 | readStream.end(); 239 | readStream.destroy = function() { 240 | readStream.destroyed = true; 241 | }; 242 | return readStream; 243 | }; 244 | 245 | BufferSlicer.prototype.createWriteStream = function(options) { 246 | var bufferSlicer = this; 247 | options = options || {}; 248 | var writeStream = new Writable(options); 249 | writeStream.start = options.start || 0; 250 | writeStream.endOffset = (options.end == null) ? this.buffer.length : +options.end; 251 | writeStream.bytesWritten = 0; 252 | writeStream.pos = writeStream.start; 253 | writeStream.destroyed = false; 254 | writeStream._write = function(buffer, encoding, callback) { 255 | if (writeStream.destroyed) return; 256 | 257 | var end = writeStream.pos + buffer.length; 258 | if (end > writeStream.endOffset) { 259 | var err = new Error("maximum file length exceeded"); 260 | err.code = 'ETOOBIG'; 261 | writeStream.destroyed = true; 262 | callback(err); 263 | return; 264 | } 265 | buffer.copy(bufferSlicer.buffer, writeStream.pos, 0, buffer.length); 266 | 267 | writeStream.bytesWritten += buffer.length; 268 | writeStream.pos = end; 269 | writeStream.emit('progress'); 270 | callback(); 271 | }; 272 | writeStream.destroy = function() { 273 | writeStream.destroyed = true; 274 | }; 275 | return writeStream; 276 | }; 277 | 278 | BufferSlicer.prototype.ref = function() { 279 | this.refCount += 1; 280 | }; 281 | 282 | BufferSlicer.prototype.unref = function() { 283 | this.refCount -= 1; 284 | 285 | if (this.refCount < 0) { 286 | throw new Error("invalid unref"); 287 | } 288 | }; 289 | 290 | function createFromBuffer(buffer, options) { 291 | return new BufferSlicer(buffer, options); 292 | } 293 | 294 | function createFromFd(fd, options) { 295 | return new FdSlicer(fd, options); 296 | } 297 | -------------------------------------------------------------------------------- /package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "fd-slicer", 3 | "version": "1.1.0", 4 | "description": "safely create multiple ReadStream or WriteStream objects from the same file descriptor", 5 | "main": "index.js", 6 | "scripts": { 7 | "test": "mocha --reporter spec --check-leaks", 8 | "test-cov": "istanbul cover node_modules/mocha/bin/_mocha -- --reporter dot --check-leaks test/test.js", 9 | "test-travis": "istanbul cover node_modules/mocha/bin/_mocha --report lcovonly -- --timeout 10000 --reporter spec --check-leaks test/test.js" 10 | }, 11 | "author": "Andrew Kelley ", 12 | "license": "MIT", 13 | "devDependencies": { 14 | "istanbul": "~0.3.3", 15 | "mocha": "~2.0.1", 16 | "stream-equal": "~0.1.5", 17 | "streamsink": "~1.2.0" 18 | }, 19 | "dependencies": { 20 | "pend": "~1.2.0" 21 | }, 22 | "directories": { 23 | "test": "test" 24 | }, 25 | "repository": { 26 | "type": "git", 27 | "url": "git://github.com/andrewrk/node-fd-slicer.git" 28 | }, 29 | "bugs": { 30 | "url": "https://github.com/andrewrk/node-fd-slicer/issues" 31 | }, 32 | "keywords": [ 33 | "createReadStream", 34 | "createWriteStream" 35 | ] 36 | } 37 | -------------------------------------------------------------------------------- /test/test.js: -------------------------------------------------------------------------------- 1 | var fdSlicer = require('../'); 2 | var fs = require('fs'); 3 | var crypto = require('crypto'); 4 | var path = require('path'); 5 | var streamEqual = require('stream-equal'); 6 | var assert = require('assert'); 7 | var Pend = require('pend'); 8 | var StreamSink = require('streamsink'); 9 | 10 | var describe = global.describe; 11 | var it = global.it; 12 | var before = global.before; 13 | var beforeEach = global.beforeEach; 14 | var after = global.after; 15 | 16 | var testBlobFile = path.join(__dirname, "test-blob.bin"); 17 | var testBlobFileSize = 20 * 1024 * 1024; 18 | var testOutBlobFile = path.join(__dirname, "test-blob-out.bin"); 19 | 20 | describe("FdSlicer", function() { 21 | before(function(done) { 22 | var out = fs.createWriteStream(testBlobFile); 23 | for (var i = 0; i < testBlobFileSize / 1024; i += 1) { 24 | out.write(crypto.pseudoRandomBytes(1024)); 25 | } 26 | out.end(); 27 | out.on('close', done); 28 | }); 29 | beforeEach(function() { 30 | try { 31 | fs.unlinkSync(testOutBlobFile); 32 | } catch (err) { 33 | } 34 | }); 35 | after(function() { 36 | try { 37 | fs.unlinkSync(testBlobFile); 38 | fs.unlinkSync(testOutBlobFile); 39 | } catch (err) { 40 | } 41 | }); 42 | it("reads a 20MB file (autoClose on)", function(done) { 43 | fs.open(testBlobFile, 'r', function(err, fd) { 44 | if (err) return done(err); 45 | var slicer = fdSlicer.createFromFd(fd, {autoClose: true}); 46 | var actualStream = slicer.createReadStream(); 47 | var expectedStream = fs.createReadStream(testBlobFile); 48 | 49 | var pend = new Pend(); 50 | pend.go(function(cb) { 51 | slicer.on('close', cb); 52 | }); 53 | pend.go(function(cb) { 54 | streamEqual(expectedStream, actualStream, function(err, equal) { 55 | if (err) return done(err); 56 | assert.ok(equal); 57 | cb(); 58 | }); 59 | }); 60 | pend.wait(done); 61 | }); 62 | }); 63 | it("reads 4 chunks simultaneously", function(done) { 64 | fs.open(testBlobFile, 'r', function(err, fd) { 65 | if (err) return done(err); 66 | var slicer = fdSlicer.createFromFd(fd); 67 | var actualPart1 = slicer.createReadStream({start: testBlobFileSize * 0/4, end: testBlobFileSize * 1/4}); 68 | var actualPart2 = slicer.createReadStream({start: testBlobFileSize * 1/4, end: testBlobFileSize * 2/4}); 69 | var actualPart3 = slicer.createReadStream({start: testBlobFileSize * 2/4, end: testBlobFileSize * 3/4}); 70 | var actualPart4 = slicer.createReadStream({start: testBlobFileSize * 3/4, end: testBlobFileSize * 4/4}); 71 | var expectedPart1 = slicer.createReadStream({start: testBlobFileSize * 0/4, end: testBlobFileSize * 1/4}); 72 | var expectedPart2 = slicer.createReadStream({start: testBlobFileSize * 1/4, end: testBlobFileSize * 2/4}); 73 | var expectedPart3 = slicer.createReadStream({start: testBlobFileSize * 2/4, end: testBlobFileSize * 3/4}); 74 | var expectedPart4 = slicer.createReadStream({start: testBlobFileSize * 3/4, end: testBlobFileSize * 4/4}); 75 | var pend = new Pend(); 76 | pend.go(function(cb) { 77 | streamEqual(expectedPart1, actualPart1, function(err, equal) { 78 | assert.ok(equal); 79 | cb(err); 80 | }); 81 | }); 82 | pend.go(function(cb) { 83 | streamEqual(expectedPart2, actualPart2, function(err, equal) { 84 | assert.ok(equal); 85 | cb(err); 86 | }); 87 | }); 88 | pend.go(function(cb) { 89 | streamEqual(expectedPart3, actualPart3, function(err, equal) { 90 | assert.ok(equal); 91 | cb(err); 92 | }); 93 | }); 94 | pend.go(function(cb) { 95 | streamEqual(expectedPart4, actualPart4, function(err, equal) { 96 | assert.ok(equal); 97 | cb(err); 98 | }); 99 | }); 100 | pend.wait(function(err) { 101 | if (err) return done(err); 102 | fs.close(fd, done); 103 | }); 104 | }); 105 | }); 106 | 107 | it("writes a 20MB file (autoClose on)", function(done) { 108 | fs.open(testOutBlobFile, 'w', function(err, fd) { 109 | if (err) return done(err); 110 | var slicer = fdSlicer.createFromFd(fd, {autoClose: true}); 111 | var actualStream = slicer.createWriteStream(); 112 | var inStream = fs.createReadStream(testBlobFile); 113 | 114 | slicer.on('close', function() { 115 | var expected = fs.createReadStream(testBlobFile); 116 | var actual = fs.createReadStream(testOutBlobFile); 117 | 118 | streamEqual(expected, actual, function(err, equal) { 119 | if (err) return done(err); 120 | assert.ok(equal); 121 | done(); 122 | }); 123 | }); 124 | inStream.pipe(actualStream); 125 | }); 126 | }); 127 | 128 | it("writes 4 chunks simultaneously", function(done) { 129 | fs.open(testOutBlobFile, 'w', function(err, fd) { 130 | if (err) return done(err); 131 | var slicer = fdSlicer.createFromFd(fd); 132 | var actualPart1 = slicer.createWriteStream({start: testBlobFileSize * 0/4}); 133 | var actualPart2 = slicer.createWriteStream({start: testBlobFileSize * 1/4}); 134 | var actualPart3 = slicer.createWriteStream({start: testBlobFileSize * 2/4}); 135 | var actualPart4 = slicer.createWriteStream({start: testBlobFileSize * 3/4}); 136 | var in1 = fs.createReadStream(testBlobFile, {start: testBlobFileSize * 0/4, end: testBlobFileSize * 1/4}); 137 | var in2 = fs.createReadStream(testBlobFile, {start: testBlobFileSize * 1/4, end: testBlobFileSize * 2/4}); 138 | var in3 = fs.createReadStream(testBlobFile, {start: testBlobFileSize * 2/4, end: testBlobFileSize * 3/4}); 139 | var in4 = fs.createReadStream(testBlobFile, {start: testBlobFileSize * 3/4, end: testBlobFileSize * 4/4}); 140 | var pend = new Pend(); 141 | pend.go(function(cb) { 142 | actualPart1.on('finish', cb); 143 | }); 144 | pend.go(function(cb) { 145 | actualPart2.on('finish', cb); 146 | }); 147 | pend.go(function(cb) { 148 | actualPart3.on('finish', cb); 149 | }); 150 | pend.go(function(cb) { 151 | actualPart4.on('finish', cb); 152 | }); 153 | in1.pipe(actualPart1); 154 | in2.pipe(actualPart2); 155 | in3.pipe(actualPart3); 156 | in4.pipe(actualPart4); 157 | pend.wait(function() { 158 | fs.close(fd, function(err) { 159 | if (err) return done(err); 160 | var expected = fs.createReadStream(testBlobFile); 161 | var actual = fs.createReadStream(testOutBlobFile); 162 | streamEqual(expected, actual, function(err, equal) { 163 | if (err) return done(err); 164 | assert.ok(equal); 165 | done(); 166 | }); 167 | }); 168 | }); 169 | }); 170 | }); 171 | 172 | it("throws on invalid ref", function(done) { 173 | fs.open(testOutBlobFile, 'w', function(err, fd) { 174 | if (err) return done(err); 175 | var slicer = fdSlicer.createFromFd(fd, {autoClose: true}); 176 | assert.throws(function() { 177 | slicer.unref(); 178 | }, /invalid unref/); 179 | fs.close(fd, done); 180 | }); 181 | }); 182 | 183 | it("write stream emits error when max size exceeded", function(done) { 184 | fs.open(testOutBlobFile, 'w', function(err, fd) { 185 | if (err) return done(err); 186 | var slicer = fdSlicer.createFromFd(fd, {autoClose: true}); 187 | var ws = slicer.createWriteStream({start: 0, end: 1000}); 188 | ws.on('error', function(err) { 189 | assert.strictEqual(err.code, 'ETOOBIG'); 190 | slicer.on('close', done); 191 | }); 192 | ws.end(new Buffer(1001)); 193 | }); 194 | }); 195 | 196 | it("write stream does not emit error when max size not exceeded", function(done) { 197 | fs.open(testOutBlobFile, 'w', function(err, fd) { 198 | if (err) return done(err); 199 | var slicer = fdSlicer.createFromFd(fd, {autoClose: true}); 200 | var ws = slicer.createWriteStream({end: 1000}); 201 | slicer.on('close', done); 202 | ws.end(new Buffer(1000)); 203 | }); 204 | }); 205 | 206 | it("write stream start and end work together", function(done) { 207 | fs.open(testOutBlobFile, 'w', function(err, fd) { 208 | if (err) return done(err); 209 | var slicer = fdSlicer.createFromFd(fd, {autoClose: true}); 210 | var ws = slicer.createWriteStream({start: 1, end: 1000}); 211 | ws.on('error', function(err) { 212 | assert.strictEqual(err.code, 'ETOOBIG'); 213 | slicer.on('close', done); 214 | }); 215 | ws.end(new Buffer(1000)); 216 | }); 217 | }); 218 | 219 | it("write stream emits progress events", function(done) { 220 | fs.open(testOutBlobFile, 'w', function(err, fd) { 221 | if (err) return done(err); 222 | var slicer = fdSlicer.createFromFd(fd, {autoClose: true}); 223 | var ws = slicer.createWriteStream(); 224 | var progressEventCount = 0; 225 | var prevBytesWritten = 0; 226 | ws.on('progress', function() { 227 | progressEventCount += 1; 228 | assert.ok(ws.bytesWritten > prevBytesWritten); 229 | prevBytesWritten = ws.bytesWritten; 230 | }); 231 | slicer.on('close', function() { 232 | assert.ok(progressEventCount > 5); 233 | done(); 234 | }); 235 | for (var i = 0; i < 10; i += 1) { 236 | ws.write(new Buffer(16 * 1024 * 2)); 237 | } 238 | ws.end(); 239 | }); 240 | }); 241 | 242 | it("write stream unrefs when destroyed", function(done) { 243 | fs.open(testOutBlobFile, 'w', function(err, fd) { 244 | if (err) return done(err); 245 | var slicer = fdSlicer.createFromFd(fd, {autoClose: true}); 246 | var ws = slicer.createWriteStream(); 247 | slicer.on('close', done); 248 | ws.write(new Buffer(1000)); 249 | ws.destroy(); 250 | }); 251 | }); 252 | 253 | it("read stream unrefs when destroyed", function(done) { 254 | fs.open(testBlobFile, 'r', function(err, fd) { 255 | if (err) return done(err); 256 | var slicer = fdSlicer.createFromFd(fd, {autoClose: true}); 257 | var rs = slicer.createReadStream(); 258 | rs.on('error', function(err) { 259 | assert.strictEqual(err.message, "stream destroyed"); 260 | slicer.on('close', done); 261 | }); 262 | rs.destroy(); 263 | }); 264 | }); 265 | 266 | it("fdSlicer.read", function(done) { 267 | fs.open(testBlobFile, 'r', function(err, fd) { 268 | if (err) return done(err); 269 | var slicer = fdSlicer.createFromFd(fd); 270 | var outBuf = new Buffer(1024); 271 | slicer.read(outBuf, 0, 10, 0, function(err, bytesRead, buf) { 272 | assert.strictEqual(bytesRead, 10); 273 | fs.close(fd, done); 274 | }); 275 | }); 276 | }); 277 | 278 | it("fdSlicer.write", function(done) { 279 | fs.open(testOutBlobFile, 'w', function(err, fd) { 280 | if (err) return done(err); 281 | var slicer = fdSlicer.createFromFd(fd); 282 | slicer.write(new Buffer("blah\n"), 0, 5, 0, function() { 283 | if (err) return done(err); 284 | fs.close(fd, done); 285 | }); 286 | }); 287 | }); 288 | }); 289 | 290 | describe("BufferSlicer", function() { 291 | it("invalid ref", function() { 292 | var slicer = fdSlicer.createFromBuffer(new Buffer(16)); 293 | slicer.ref(); 294 | slicer.unref(); 295 | assert.throws(function() { 296 | slicer.unref(); 297 | }, /invalid unref/); 298 | }); 299 | it("read and write", function(done) { 300 | var buf = new Buffer("through the tangled thread the needle finds its way"); 301 | var slicer = fdSlicer.createFromBuffer(buf); 302 | var outBuf = new Buffer(1024); 303 | slicer.read(outBuf, 10, 11, 8, function(err) { 304 | if (err) return done(err); 305 | assert.strictEqual(outBuf.toString('utf8', 10, 21), "the tangled"); 306 | slicer.write(new Buffer("derp"), 0, 4, 7, function(err) { 307 | if (err) return done(err); 308 | assert.strictEqual(buf.toString('utf8', 7, 19), "derp tangled"); 309 | done(); 310 | }); 311 | }); 312 | }); 313 | it("createReadStream", function(done) { 314 | var str = "I never conquered rarely came, 16 just held such better days"; 315 | var buf = new Buffer(str); 316 | var slicer = fdSlicer.createFromBuffer(buf); 317 | var inStream = slicer.createReadStream(); 318 | var sink = new StreamSink(); 319 | inStream.pipe(sink); 320 | sink.on('finish', function() { 321 | assert.strictEqual(sink.toString(), str); 322 | inStream.destroy(); 323 | done(); 324 | }); 325 | }); 326 | it("createWriteStream exceed buffer size", function(done) { 327 | var slicer = fdSlicer.createFromBuffer(new Buffer(4)); 328 | var outStream = slicer.createWriteStream(); 329 | outStream.on('error', function(err) { 330 | assert.strictEqual(err.code, 'ETOOBIG'); 331 | done(); 332 | }); 333 | outStream.write("hi!\n"); 334 | outStream.write("it warked\n"); 335 | outStream.end(); 336 | }); 337 | it("createWriteStream ok", function(done) { 338 | var buf = new Buffer(1024); 339 | var slicer = fdSlicer.createFromBuffer(buf); 340 | var outStream = slicer.createWriteStream(); 341 | outStream.on('finish', function() { 342 | assert.strictEqual(buf.toString('utf8', 0, "hi!\nit warked\n".length), "hi!\nit warked\n"); 343 | outStream.destroy(); 344 | done(); 345 | }); 346 | outStream.write("hi!\n"); 347 | outStream.write("it warked\n"); 348 | outStream.end(); 349 | }); 350 | }); 351 | --------------------------------------------------------------------------------