├── .gitignore ├── examples ├── .gitignore └── upload.js ├── .travis.yml ├── .editorconfig ├── .jshintignore ├── .jshintrc ├── package.json ├── LICENSE ├── CHANGELOG.md ├── lib └── s3-upload-stream.js ├── README.md └── tests └── test.js /.gitignore: -------------------------------------------------------------------------------- 1 | node_modules/ 2 | *.swp 3 | .DS_Store 4 | -------------------------------------------------------------------------------- /examples/.gitignore: -------------------------------------------------------------------------------- 1 | config.json 2 | video.mp4 3 | -------------------------------------------------------------------------------- /.travis.yml: -------------------------------------------------------------------------------- 1 | language: node_js 2 | node_js: 3 | - "0.10" 4 | -------------------------------------------------------------------------------- /.editorconfig: -------------------------------------------------------------------------------- 1 | root = true 2 | 3 | [*] 4 | end_of_line = lf 5 | insert_final_newline = true 6 | 7 | [*.js] 8 | indent_style = space 9 | indent_size = 2 10 | -------------------------------------------------------------------------------- /.jshintignore: -------------------------------------------------------------------------------- 1 | node_modules 2 | extras/conversion/node_modules 3 | dashboard/dist 4 | dashboard/vendor 5 | dashboard/node_modules 6 | dashboard/test/coverage 7 | scripts 8 | extras 9 | logstash 10 | dashboard 11 | console 12 | -------------------------------------------------------------------------------- /.jshintrc: -------------------------------------------------------------------------------- 1 | { 2 | "asi": false, 3 | "expr": true, 4 | "loopfunc": true, 5 | "curly": false, 6 | "unused": true, 7 | "evil": true, 8 | "white": true, 9 | "undef": true, 10 | "trailing": true, 11 | "browser": true, 12 | "predef": [ 13 | "$", 14 | "FormBot", 15 | "socket", 16 | "confirm", 17 | "it", 18 | "before", 19 | "describe", 20 | "alert", 21 | "require", 22 | "__dirname", 23 | "process", 24 | "exports", 25 | "console", 26 | "setImmediate", 27 | "Buffer", 28 | "module" 29 | ], 30 | "indent": 2 31 | } 32 | -------------------------------------------------------------------------------- /package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "s3-upload-stream", 3 | "description": "Writeable stream for uploading content of unknown size to S3 via the multipart API.", 4 | "version": "1.0.7", 5 | "author": { 6 | "name": "Nathan Peck", 7 | "email": "nathan@storydesk.com" 8 | }, 9 | "repository": { 10 | "type": "git", 11 | "url": "git://github.com/nathanpeck/s3-upload-stream.git" 12 | }, 13 | "devDependencies": { 14 | "mocha": "1.17.1", 15 | "aws-sdk": "2.0.17", 16 | "chai": "1.8.1" 17 | }, 18 | "dependencies": {}, 19 | "peerDependencies": { 20 | "aws-sdk": "2.x" 21 | }, 22 | "keywords": [ 23 | "aws", 24 | "s3", 25 | "upload", 26 | "pipe", 27 | "stream" 28 | ], 29 | "license": "MIT", 30 | "readmeFilename": "README.md", 31 | "main": "./lib/s3-upload-stream.js", 32 | "engines": { 33 | "node": ">=0.10.x" 34 | }, 35 | "scripts": { 36 | "test": "mocha -R spec -s 100 ./tests/test.js" 37 | } 38 | } 39 | -------------------------------------------------------------------------------- /examples/upload.js: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env node 2 | var AWS = require('aws-sdk'), 3 | zlib = require('zlib'), 4 | fs = require('fs'); 5 | 6 | // Make sure AWS credentials are loaded. 7 | AWS.config.loadFromPath('./config.json'); 8 | 9 | // Initialize a stream client. 10 | var s3Stream = require('../lib/s3-upload-stream.js')(new AWS.S3()); 11 | 12 | // Create the streams 13 | var read = fs.createReadStream('./video.mp4'); 14 | var compress = zlib.createGzip(); 15 | var upload = s3Stream.upload({ 16 | "Bucket": "bucket", 17 | "Key": "video.mp4.gz" 18 | }); 19 | 20 | // Handle errors. 21 | upload.on('error', function (error) { 22 | console.log(error); 23 | }); 24 | 25 | // Handle progress. 26 | upload.on('part', function (details) { 27 | console.log(details); 28 | }); 29 | 30 | // Handle upload completion. 31 | upload.on('uploaded', function (details) { 32 | console.log(details); 33 | }); 34 | 35 | // Pipe the incoming filestream through compression, and up to S3. 36 | read.pipe(compress).pipe(upload); 37 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | (The MIT License) 2 | 3 | Copyright (c) 2013-2015 Nathan Peck 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining 6 | a copy of this software and associated documentation files (the 7 | 'Software'), to deal in the Software without restriction, including 8 | without limitation the rights to use, copy, modify, merge, publish, 9 | distribute, sublicense, and/or sell copies of the Software, and to 10 | permit persons to whom the Software is furnished to do so, subject to 11 | the following conditions: 12 | 13 | The above copyright notice and this permission notice shall be 14 | included in all copies or substantial portions of the Software. 15 | 16 | THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, 17 | EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 18 | MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. 19 | IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY 20 | CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, 21 | TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE 22 | SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 23 | -------------------------------------------------------------------------------- /CHANGELOG.md: -------------------------------------------------------------------------------- 1 | Changelog [![Changelog Status](https://changelogs.md/img/changelog-check-green.svg)](https://changelogs.md/github/nathanpeck/s3-upload-stream/) 2 | ========= 3 | 4 | #### 1.0.6 (2014-10-20) 5 | 6 | Removing global state, and adding pause and resume functionality. 7 | 8 | #### 1.0.5 (2014-10-13) 9 | 10 | Changing how buffers are subdivided, in order to provide support for in browser operation. 11 | 12 | #### 1.0.4 (2014-10-13) 13 | 14 | Getting rid of the use of setImmeadiate. Also now the MPU is not initialized until data is actually received by the writable stream, and error checking verifies that data has actually been uploaded to S3 before trying to end the stream. This fixes an issue where empty incoming streams were causing errors to come back from S3 as the module was attempting to complete an empty MPU. 15 | 16 | #### 1.0.3 (2014-10-12) 17 | 18 | Scope changes for certain use cases. 19 | 20 | #### 1.0.2 (2014-09-26) 21 | 22 | Now emits a "finish" event, as well as the "uploaded" event, in order to adhere to Node.js writable stream spec. 23 | 24 | #### 1.0.1 (2014-09-26) 25 | 26 | Fixed error in usage in the documentation and examples. The examples did not use the "new" keyword when creating the upload stream, so there were scope issues when doing parallel uploads. This has been clarified and corrected in the documentation and examples. 27 | 28 | #### 1.0.0 (2014-09-15) 29 | 30 | Major overhaul of the functional interface. Breaks compatability with older versions of the module in favor of a cleaner, more streamlined approach. A migration guide for users of older versions of the module has been included in the documentation. 31 | 32 | #### 0.6.2 (2014-08-31) 33 | 34 | Upgrading the AWS SDK dependency to the latest version. Fixes issue #11 35 | 36 | #### 0.6.1 (2014-08-22) 37 | 38 | * The internal event emitter wasn't set up properly, causing errors about the upload stream object not having the .emit and/or .once methods. This bug impacted versions 0.5.0 and 0.6.0. Fixes issue #10. 39 | 40 | #### 0.6.0 (2014-08-15) 41 | 42 | * Fix for mismatch between documentation and reality in the maxPartSize() and concurrentParts() options. 43 | * New feature: part size and concurrect part helpers can be chained now. 44 | * *Warning, this version has a critical bug. It is recommended that you use 0.6.1 instead* 45 | 46 | ### 0.5.0 (2014-08-11) 47 | 48 | * Added client caching to reuse an existing s3 client rather than creating a new one for each upload. Fixes #6 49 | * Updated the maxPartSize to be a hard limit instead of a soft one so that generated ETAG are consistent to to the reliable size of the uploaded parts. Fixes #7 50 | * Added this file. Fixes #8 51 | * New feature: concurrent part uploads. Now you can optionally enable concurrent part uploads if you wish to allow your application to drain the source stream more quickly and absorb some of the bottle neck when uploading to S3. 52 | * *Warning, this version has a critical bug. It is recommended that you use 0.6.1 instead* 53 | 54 | ### 0.4.0 (2014-06-23) 55 | 56 | * Now with better error handling. If an error occurs while uploading a part to S3, or completing a multipart upload then the in progress multipart upload will be aborted (to delete the uploaded parts from S3) and a more descriptive error message will be emitted instead of the raw error response from S3. 57 | 58 | ### 0.3.0 (2014-05-06) 59 | 60 | * Added tests using a stubbed out version of the Amazon S3 client. These tests will ensure that the upload stream behaves properly, calls S3 correctly, and emits the proper events. 61 | * Added Travis integration 62 | * Also fixed bug with the functionality to dynamically adjust the part size. 63 | 64 | ### 0.2.0 (2014-04-25) 65 | 66 | * Fixed a race condition bug that occured occasionally with streams very close to the 5 MB size threshold where the multipart upload would be finalized on S3 prior to the last data buffer being flushed, resulting in the last part of the stream being cut off in the resulting S3 file. (Notice: If you are using an older version of this module I highly recommend upgrading to get this latest bugfix.) 67 | * Added a method for adjusting the part size dynamically. 68 | 69 | ### 0.1.0 (2014-04-17) 70 | 71 | * Code cleanups and stylistic goodness. 72 | * Made the connection parameters optional for those who are following Amazon's best practices of allowing the SDK to get AWS credentials from environment variables or AMI roles. 73 | 74 | ### 0.0.3 (2013-12-25) 75 | 76 | * Merge for pull request #2 to fix an issue where the latest version of the AWS SDK required a strict type on part number. 77 | 78 | ### 0.0.2 (2013-08-01) 79 | 80 | * Improving the documentation 81 | 82 | ### 0.0.1 (2013-07-31) 83 | 84 | * Initial release 85 | -------------------------------------------------------------------------------- /lib/s3-upload-stream.js: -------------------------------------------------------------------------------- 1 | var Writable = require('stream').Writable, 2 | events = require("events"); 3 | 4 | // Set the S3 client to be used for this upload. 5 | function Client(client) { 6 | if (this instanceof Client === false) { 7 | return new Client(client); 8 | } 9 | 10 | if (!client) { 11 | throw new Error('Must configure an S3 client before attempting to create an S3 upload stream.'); 12 | } 13 | 14 | this.cachedClient = client; 15 | } 16 | 17 | // Generate a writeable stream which uploads to a file on S3. 18 | Client.prototype.upload = function (destinationDetails, sessionDetails) { 19 | var cachedClient = this.cachedClient; 20 | var e = new events.EventEmitter(); 21 | 22 | if (!sessionDetails) sessionDetails = {}; 23 | 24 | // Create the writable stream interface. 25 | var ws = new Writable({ 26 | highWaterMark: 4194304 // 4 MB 27 | }); 28 | 29 | // Data pertaining to the overall upload. 30 | // If resumable parts are passed in, they must be free of gaps. 31 | var multipartUploadID = sessionDetails.UploadId ? sessionDetails.UploadId : null; 32 | var partNumber = sessionDetails.Parts ? (sessionDetails.Parts.length + 1) : 1; 33 | var partIds = sessionDetails.Parts || []; 34 | var receivedSize = 0; 35 | var uploadedSize = 0; 36 | 37 | // Light state management - 38 | // started: used to fire 'ready' even on a quick resume 39 | // paused: used to govern manual pause/resume 40 | var started = false; 41 | var paused = false; 42 | 43 | // Parts which need to be uploaded to S3. 44 | var pendingParts = 0; 45 | var concurrentPartThreshold = 1; 46 | 47 | // Data pertaining to buffers we have received 48 | var receivedBuffers = []; 49 | var receivedBuffersLength = 0; 50 | var partSizeThreshold = 5242880; 51 | 52 | // Set the maximum amount of data that we will keep in memory before flushing it to S3 as a part 53 | // of the multipart upload 54 | ws.maxPartSize = function (partSize) { 55 | if (partSize < 5242880) 56 | partSize = 5242880; 57 | 58 | partSizeThreshold = partSize; 59 | return ws; 60 | }; 61 | 62 | ws.getMaxPartSize = function () { 63 | return partSizeThreshold; 64 | }; 65 | 66 | // Set the maximum amount of data that we will keep in memory before flushing it to S3 as a part 67 | // of the multipart upload 68 | ws.concurrentParts = function (parts) { 69 | if (parts < 1) 70 | parts = 1; 71 | 72 | concurrentPartThreshold = parts; 73 | return ws; 74 | }; 75 | 76 | ws.getConcurrentParts = function () { 77 | return concurrentPartThreshold; 78 | }; 79 | 80 | // Handler to receive data and upload it to S3. 81 | ws._write = function (incomingBuffer, enc, next) { 82 | // Pause/resume check #1 out of 2: 83 | // Block incoming writes immediately on pause. 84 | if (paused) 85 | e.once('resume', write); 86 | else 87 | write(); 88 | 89 | function write() { 90 | absorbBuffer(incomingBuffer); 91 | 92 | if (receivedBuffersLength < partSizeThreshold) 93 | return next(); // Ready to receive more data in _write. 94 | 95 | // We need to upload some data 96 | uploadHandler(next); 97 | } 98 | }; 99 | 100 | // Ask the stream to pause - will allow existing 101 | // part uploads to complete first. 102 | ws.pause = function () { 103 | // if already mid-pause, this does nothing 104 | if (paused) return false; 105 | 106 | // if there's no active upload, this does nothing 107 | if (!started) return false; 108 | 109 | paused = true; 110 | // give caller how many parts are mid-upload 111 | ws.emit('pausing', pendingParts); 112 | 113 | // if there are no parts outstanding, declare the stream 114 | // paused and return currently sent part details. 115 | if (pendingParts === 0) 116 | notifyPaused(); 117 | 118 | // otherwise, the 'paused' event will get sent once the 119 | // last part finishes uploading. 120 | 121 | return true; 122 | }; 123 | 124 | // Lift the pause, and re-kick off the uploading. 125 | ws.resume = function () { 126 | // if we're not paused, this does nothing 127 | if (!paused) return false; 128 | 129 | paused = false; 130 | e.emit('resume'); // internal event 131 | ws.emit('resume'); // external event 132 | 133 | return true; 134 | }; 135 | 136 | // when pausing, return relevant pause state to client 137 | var notifyPaused = function () { 138 | ws.emit('paused', { 139 | UploadId: multipartUploadID, 140 | Parts: partIds, 141 | uploadedSize: uploadedSize 142 | }); 143 | }; 144 | 145 | // Concurrently upload parts to S3. 146 | var uploadHandler = function (next) { 147 | 148 | // If this is the first part, and we're just starting, 149 | // but we have a multipartUploadID, then we're beginning 150 | // a resume and can fire the 'ready' event externally. 151 | if (multipartUploadID && !started) 152 | ws.emit('ready', multipartUploadID); 153 | 154 | started = true; 155 | 156 | if (pendingParts < concurrentPartThreshold) { 157 | // Has the MPU been created yet? 158 | if (multipartUploadID) 159 | upload(); // Upload the part immediately. 160 | else { 161 | e.once('ready', upload); // Wait until multipart upload is initialized. 162 | createMultipartUpload(); 163 | } 164 | } 165 | else { 166 | // Block uploading (and receiving of more data) until we upload 167 | // some of the pending parts 168 | e.once('part', upload); 169 | } 170 | 171 | function upload() { 172 | 173 | // Pause/resume check #2 out of 2: 174 | // Block queued up parts until resumption. 175 | if (paused) 176 | e.once('resume', uploadNow); 177 | else 178 | uploadNow(); 179 | 180 | function uploadNow() { 181 | pendingParts++; 182 | flushPart(function (partDetails) { 183 | --pendingParts; 184 | e.emit('part'); // Internal event 185 | ws.emit('part', partDetails); // External event 186 | 187 | // if we're paused and this was the last outstanding part, 188 | // we can notify the caller that we're really paused now. 189 | if (paused && pendingParts === 0) 190 | notifyPaused(); 191 | }); 192 | next(); 193 | } 194 | } 195 | }; 196 | 197 | // Absorb an incoming buffer from _write into a buffer queue 198 | var absorbBuffer = function (incomingBuffer) { 199 | receivedBuffers.push(incomingBuffer); 200 | receivedBuffersLength += incomingBuffer.length; 201 | }; 202 | 203 | // Take a list of received buffers and return a combined buffer that is exactly 204 | // partSizeThreshold in size. 205 | var preparePartBuffer = function () { 206 | // Combine the buffers we've received and reset the list of buffers. 207 | var combinedBuffer = Buffer.concat(receivedBuffers, receivedBuffersLength); 208 | receivedBuffers.length = 0; // Trick to reset the array while keeping the original reference 209 | receivedBuffersLength = 0; 210 | 211 | if (combinedBuffer.length > partSizeThreshold) { 212 | // The combined buffer is too big, so slice off the end and put it back in the array. 213 | var remainder = new Buffer(combinedBuffer.length - partSizeThreshold); 214 | combinedBuffer.copy(remainder, 0, partSizeThreshold); 215 | receivedBuffers.push(remainder); 216 | receivedBuffersLength = remainder.length; 217 | 218 | // Return the perfectly sized part. 219 | var uploadBuffer = new Buffer(partSizeThreshold); 220 | combinedBuffer.copy(uploadBuffer, 0, 0, partSizeThreshold); 221 | return uploadBuffer; 222 | } 223 | else { 224 | // It just happened to be perfectly sized, so return it. 225 | return combinedBuffer; 226 | } 227 | }; 228 | 229 | // Flush a part out to S3. 230 | var flushPart = function (callback) { 231 | var partBuffer = preparePartBuffer(); 232 | 233 | var localPartNumber = partNumber; 234 | partNumber++; 235 | receivedSize += partBuffer.length; 236 | cachedClient.uploadPart( 237 | { 238 | Body: partBuffer, 239 | Bucket: destinationDetails.Bucket, 240 | Key: destinationDetails.Key, 241 | UploadId: multipartUploadID, 242 | PartNumber: localPartNumber 243 | }, 244 | function (err, result) { 245 | if (err) 246 | abortUpload('Failed to upload a part to S3: ' + JSON.stringify(err)); 247 | else { 248 | uploadedSize += partBuffer.length; 249 | partIds[localPartNumber - 1] = { 250 | ETag: result.ETag, 251 | PartNumber: localPartNumber 252 | }; 253 | 254 | callback({ 255 | ETag: result.ETag, 256 | PartNumber: localPartNumber, 257 | receivedSize: receivedSize, 258 | uploadedSize: uploadedSize 259 | }); 260 | } 261 | } 262 | ); 263 | }; 264 | 265 | // Overwrite the end method so that we can hijack it to flush the last part and then complete 266 | // the multipart upload 267 | ws.originalEnd = ws.end; 268 | ws.end = function (Part, encoding, callback) { 269 | ws.originalEnd(Part, encoding, function afterDoneWithOriginalEnd() { 270 | if (Part) 271 | absorbBuffer(Part); 272 | 273 | // Upload any remaining data 274 | var uploadRemainingData = function () { 275 | if (receivedBuffersLength > 0) { 276 | uploadHandler(uploadRemainingData); 277 | return; 278 | } 279 | 280 | if (pendingParts > 0) { 281 | setTimeout(uploadRemainingData, 50); // Wait 50 ms for the pending uploads to finish before trying again. 282 | return; 283 | } 284 | 285 | completeUpload(); 286 | }; 287 | 288 | uploadRemainingData(); 289 | 290 | if (typeof callback == 'function') 291 | callback(); 292 | }); 293 | }; 294 | 295 | // Turn all the individual parts we uploaded to S3 into a finalized upload. 296 | var completeUpload = function () { 297 | // There is a possibility that the incoming stream was empty, therefore the MPU never started 298 | // and cannot be finalized. 299 | if (multipartUploadID) { 300 | cachedClient.completeMultipartUpload( 301 | { 302 | Bucket: destinationDetails.Bucket, 303 | Key: destinationDetails.Key, 304 | UploadId: multipartUploadID, 305 | MultipartUpload: { 306 | Parts: partIds 307 | } 308 | }, 309 | function (err, result) { 310 | if (err) 311 | abortUpload('Failed to complete the multipart upload on S3: ' + JSON.stringify(err)); 312 | else { 313 | // Emit both events for backwards compatibility, and to follow the spec. 314 | ws.emit('uploaded', result); 315 | ws.emit('finish', result); 316 | started = false; 317 | } 318 | } 319 | ); 320 | } 321 | }; 322 | 323 | // When a fatal error occurs abort the multipart upload 324 | var abortUpload = function (rootError) { 325 | cachedClient.abortMultipartUpload( 326 | { 327 | Bucket: destinationDetails.Bucket, 328 | Key: destinationDetails.Key, 329 | UploadId: multipartUploadID 330 | }, 331 | function (abortError) { 332 | if (abortError) 333 | ws.emit('error', rootError + '\n Additionally failed to abort the multipart upload on S3: ' + abortError); 334 | else 335 | ws.emit('error', rootError); 336 | } 337 | ); 338 | }; 339 | 340 | var createMultipartUpload = function () { 341 | cachedClient.createMultipartUpload( 342 | destinationDetails, 343 | function (err, data) { 344 | if (err) 345 | ws.emit('error', 'Failed to create a multipart upload on S3: ' + JSON.stringify(err)); 346 | else { 347 | multipartUploadID = data.UploadId; 348 | ws.emit('ready', multipartUploadID); 349 | e.emit('ready'); // Internal event 350 | } 351 | } 352 | ); 353 | }; 354 | 355 | return ws; 356 | }; 357 | 358 | Client.globalClient = null; 359 | 360 | Client.client = function (options) { 361 | Client.globalClient = new Client(options); 362 | return Client.globalClient; 363 | }; 364 | 365 | Client.upload = function (destinationDetails, sessionDetails) { 366 | if (!Client.globalClient) { 367 | throw new Error('Must configure an S3 client before attempting to create an S3 upload stream.'); 368 | } 369 | return Client.globalClient.upload(destinationDetails, sessionDetails); 370 | }; 371 | 372 | module.exports = Client; 373 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | ## s3-upload-stream [![Build Status](https://travis-ci.org/nathanpeck/s3-upload-stream.svg)](https://travis-ci.org/nathanpeck/s3-upload-stream) [![Changelog Status](https://changelogs.md/img/changelog-check-green.svg)](https://changelogs.md/github/nathanpeck/s3-upload-stream/) 2 | 3 | A pipeable write stream which uploads to Amazon S3 using the multipart file upload API. 4 | 5 | **NOTE: This module is deprecated after the 2.1.0 release of the AWS SDK on Dec 9, 2014, which added [`S3.upload()`](http://docs.aws.amazon.com/AWSJavaScriptSDK/latest/AWS/S3.html#upload-property). I highly recommend switching away from this module and using the official method supported by AWS.** 6 | 7 | [![NPM](https://nodei.co/npm/s3-upload-stream.png?downloads=true)](https://www.npmjs.org/package/s3-upload-stream) 8 | 9 | ### Changelog 10 | 11 | #### 1.0.6 (2014-10-20) 12 | 13 | Removing global state, and adding pause and resume functionality. 14 | 15 | [Historical Changelogs](CHANGELOG.md) 16 | 17 | ### Why use this stream? 18 | 19 | * This upload stream does not require you to know the length of your content prior to beginning uploading. Many other popular S3 wrappers such as [Knox](https://github.com/LearnBoost/knox) also allow you to upload streams to S3, but they require you to specify the content length. This is not always feasible. 20 | * By piping content to S3 via the multipart file upload API you can keep memory usage low even when operating on a stream that is GB in size. Many other libraries actually store the entire stream in memory and then upload it in one piece. This stream avoids high memory usage by flushing the stream to S3 in 5 MB parts such that it should only ever store 5 MB of the stream data at a time. 21 | * This package is designed to use the official Amazon SDK for Node.js, helping keep it small and efficient. For maximum flexibility you pass in the aws-sdk client yourself, allowing you to use a uniform version of AWS SDK throughout your code base. 22 | * You can provide options for the upload call directly to do things like set server side encryption, reduced redundancy storage, or access level on the object, which some other similar streams are lacking. 23 | * Emits "part" events which expose the amount of incoming data received by the writable stream versus the amount of data that has been uploaded via the multipart API so far, allowing you to create a progress bar if that is a requirement. 24 | * Support for pausing and later resuming in progress multipart uploads. 25 | 26 | ### Limits 27 | 28 | * The multipart upload API does not accept parts less than 5 MB in size. So although this stream emits "part" events which can be used to show progress, the progress is not very granular, as the events are only per part. By default this means that you will receive an event each 5 MB. 29 | * The Amazon SDK has a limit of 10,000 parts when doing a mulitpart upload. Since the part size is currently set to 5 MB this means that your stream will fail to upload if it contains more than 50 GB of data. This can be solved by using the 'stream.maxPartSize()' method of the writable stream to set the max size of an upload part, as documented below. By increasing this value you should be able to save streams that are many TB in size. 30 | 31 | ## Example 32 | 33 | ```js 34 | var AWS = require('aws-sdk'), 35 | zlib = require('zlib'), 36 | fs = require('fs'); 37 | s3Stream = require('s3-upload-stream')(new AWS.S3()), 38 | 39 | // Set the client to be used for the upload. 40 | AWS.config.loadFromPath('./config.json'); 41 | 42 | // Create the streams 43 | var read = fs.createReadStream('/path/to/a/file'); 44 | var compress = zlib.createGzip(); 45 | var upload = s3Stream.upload({ 46 | "Bucket": "bucket-name", 47 | "Key": "key-name" 48 | }); 49 | 50 | // Optional configuration 51 | upload.maxPartSize(20971520); // 20 MB 52 | upload.concurrentParts(5); 53 | 54 | // Handle errors. 55 | upload.on('error', function (error) { 56 | console.log(error); 57 | }); 58 | 59 | /* Handle progress. Example details object: 60 | { ETag: '"f9ef956c83756a80ad62f54ae5e7d34b"', 61 | PartNumber: 5, 62 | receivedSize: 29671068, 63 | uploadedSize: 29671068 } 64 | */ 65 | upload.on('part', function (details) { 66 | console.log(details); 67 | }); 68 | 69 | /* Handle upload completion. Example details object: 70 | { Location: 'https://bucketName.s3.amazonaws.com/filename.ext', 71 | Bucket: 'bucketName', 72 | Key: 'filename.ext', 73 | ETag: '"bf2acbedf84207d696c8da7dbb205b9f-5"' } 74 | */ 75 | upload.on('uploaded', function (details) { 76 | console.log(details); 77 | }); 78 | 79 | // Pipe the incoming filestream through compression, and up to S3. 80 | read.pipe(compress).pipe(upload); 81 | ``` 82 | 83 | ## Usage 84 | 85 | Before uploading you must configure the S3 client for s3-upload-stream to use. Please note that this module has only been tested with AWS SDK 2.0 and greater. 86 | 87 | This module does not include the AWS SDK itself. Rather you must require the AWS SDK in your own application code, instantiate an S3 client and then supply it to s3-upload-stream. 88 | 89 | The main advantage of this is that rather than being stuck with a set version of the AWS SDK that ships with s3-upload-stream you can ensure that s3-upload-stream is using whichever verison of the SDK you want. 90 | 91 | When setting up the S3 client the recommended approach for credential management is to [set your AWS API keys using environment variables](http://docs.aws.amazon.com/AWSJavaScriptSDK/guide/node-configuring.html) or [IAM roles](http://docs.aws.amazon.com/IAM/latest/UserGuide/WorkingWithRoles.html). 92 | 93 | If you are following this approach then you can configure the S3 client very simply: 94 | 95 | ```js 96 | var AWS = require('aws-sdk'), 97 | s3Stream = require('../lib/s3-upload-stream.js')(new AWS.S3()); 98 | ``` 99 | 100 | However, some environments may require you to keep your credentials in a file, or hardcoded. In that case you can use the following form: 101 | 102 | ```js 103 | var AWS = require('aws-sdk'); 104 | 105 | // Make sure AWS credentials are loaded using one of the following techniques 106 | AWS.config.loadFromPath('./config.json'); 107 | AWS.config.update({accessKeyId: 'akid', secretAccessKey: 'secret'}); 108 | 109 | // Create a stream client. 110 | var s3Stream = require('../lib/s3-upload-stream.js')(new AWS.S3()); 111 | ``` 112 | 113 | ### client.upload(destination) 114 | 115 | Create an upload stream that will upload to the specified destination. The upload stream is returned immeadiately. 116 | 117 | The destination details is an object in which you can specify many different [destination properties enumerated in the AWS S3 documentation](http://docs.aws.amazon.com/AWSJavaScriptSDK/latest/AWS/S3.html#createMultipartUpload-property). 118 | 119 | __Example:__ 120 | 121 | ```js 122 | var AWS = require('aws-sdk'), 123 | s3Stream = require('../lib/s3-upload-stream.js')(new AWS.S3()); 124 | 125 | var read = fs.createReadStream('/path/to/a/file'); 126 | var upload = s3Stream.upload({ 127 | Bucket: "bucket-name", 128 | Key: "key-name", 129 | ACL: "public-read", 130 | StorageClass: "REDUCED_REDUNDANCY", 131 | ContentType: "binary/octet-stream" 132 | }); 133 | 134 | read.pipe(upload); 135 | ``` 136 | 137 | ### client.upload(destination, [session]) 138 | 139 | Resume an incomplete multipart upload from a previous session by providing a `session` object with an upload ID, and ETag and numbers for each part. `destination` details is as above. 140 | 141 | __Example:__ 142 | 143 | ```js 144 | var AWS = require('aws-sdk'), 145 | s3Stream = require('../lib/s3-upload-stream.js')(new AWS.S3()); 146 | 147 | var read = fs.createReadStream('/path/to/a/file'); 148 | var upload = s3Stream.upload( 149 | { 150 | Bucket: "bucket-name", 151 | Key: "key-name", 152 | ACL: "public-read", 153 | StorageClass: "REDUCED_REDUNDANCY", 154 | ContentType: "binary/octet-stream" 155 | }, 156 | { 157 | UploadId: "f1j2b47238f12984f71b2o8347f12", 158 | Parts: [ 159 | { 160 | ETag: "3k2j3h45t9v8aydgajsda", 161 | PartNumber: 1 162 | }, 163 | { 164 | Etag: "kjgsdfg876sd8fgk3j44t", 165 | PartNumber: 2 166 | } 167 | ] 168 | } 169 | ); 170 | 171 | read.pipe(upload); 172 | ``` 173 | 174 | ## Stream Methods 175 | 176 | The following methods can be called on the stream returned by from `client.upload()` 177 | 178 | ### stream.pause() 179 | 180 | Pause an active multipart upload stream. 181 | 182 | Calling `pause()` will immediately: 183 | 184 | * stop accepting data from an input stream, 185 | * stop submitting new parts for upload, and 186 | * emit a `pausing` event with the number of parts that are still mid-upload. 187 | 188 | When mid-upload parts are finished, a `paused` event will fire, including an object with `UploadId` and `Parts` data that can be used to resume an upload in a later session. 189 | 190 | ### stream.resume() 191 | 192 | Resume a paused multipart upload stream. 193 | 194 | Calling `resume()` will immediately: 195 | 196 | * resume accepting data from an input stream, 197 | * resume submitting new parts for upload, and 198 | * echo a `resume` event back to any listeners. 199 | 200 | It is safe to call `resume()` at any time after `pause()`. If the stream is between `pausing` and `paused`, then `resume()` will resume data flow and the `paused` event will not be fired. 201 | 202 | ### stream.maxPartSize(sizeInBytes) 203 | 204 | Used to adjust the maximum amount of stream data that will be buffered in memory prior to flushing. The lowest possible value, and default value, is 5 MB. It is not possible to set this value any lower than 5 MB due to Amazon S3 restrictions, but there is no hard upper limit. The higher the value you choose the more stream data will be buffered in memory before flushing to S3. 205 | 206 | The main reason for setting this to a higher value instead of using the default is if you have a stream with more than 50 GB of data, and therefore need larger part sizes in order to flush the entire stream while also staying within Amazon's upper limit of 10,000 parts for the multipart upload API. 207 | 208 | ```js 209 | var AWS = require('aws-sdk'), 210 | s3Stream = require('../lib/s3-upload-stream.js')(new AWS.S3()); 211 | 212 | var read = fs.createReadStream('/path/to/a/file'); 213 | var upload = s3Stream.upload({ 214 | "Bucket": "bucket-name", 215 | "Key": "key-name" 216 | }); 217 | 218 | upload.maxPartSize(20971520); // 20 MB 219 | 220 | read.pipe(upload); 221 | ``` 222 | 223 | ### stream.concurrentParts(numberOfParts) 224 | 225 | Used to adjust the number of parts that are concurrently uploaded to S3. By default this is just one at a time, to keep memory usage low and allow the upstream to deal with backpressure. However, in some cases you may wish to drain the stream that you are piping in quickly, and then issue concurrent upload requests to upload multiple parts. 226 | 227 | Keep in mind that total memory usage will be at least `maxPartSize` * `concurrentParts` as each concurrent part will be `maxPartSize` large, so it is not recommended that you set both `maxPartSize` and `concurrentParts` to high values, or your process will be buffering large amounts of data in its memory. 228 | 229 | ```js 230 | var AWS = require('aws-sdk'), 231 | s3Stream = require('../lib/s3-upload-stream.js')(new AWS.S3()); 232 | 233 | var read = fs.createReadStream('/path/to/a/file'); 234 | var upload = s3Stream.upload({ 235 | "Bucket": "bucket-name", 236 | "Key": "key-name" 237 | }); 238 | 239 | upload.concurrentParts(5); 240 | 241 | read.pipe(upload); 242 | ``` 243 | 244 | ### Tuning configuration of the AWS SDK 245 | 246 | The following configuration tuning can help prevent errors when using less reliable internet connections (such as 3G data if you are using Node.js on the Tessel) by causing the AWS SDK to detect upload timeouts and retry. 247 | 248 | ```js 249 | var AWS = require('aws-sdk'); 250 | AWS.config.httpOptions = {timeout: 5000}; 251 | ``` 252 | 253 | ### Installation 254 | 255 | ``` 256 | npm install s3-upload-stream 257 | ``` 258 | 259 | ### Running Tests 260 | 261 | ``` 262 | npm test 263 | ``` 264 | 265 | ### License 266 | 267 | (The MIT License) 268 | 269 | Copyright (c) 2014-2015 Nathan Peck 270 | 271 | Permission is hereby granted, free of charge, to any person obtaining 272 | a copy of this software and associated documentation files (the 273 | 'Software'), to deal in the Software without restriction, including 274 | without limitation the rights to use, copy, modify, merge, publish, 275 | distribute, sublicense, and/or sell copies of the Software, and to 276 | permit persons to whom the Software is furnished to do so, subject to 277 | the following conditions: 278 | 279 | The above copyright notice and this permission notice shall be 280 | included in all copies or substantial portions of the Software. 281 | 282 | THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, 283 | EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 284 | MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. 285 | IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY 286 | CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, 287 | TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE 288 | SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 289 | -------------------------------------------------------------------------------- /tests/test.js: -------------------------------------------------------------------------------- 1 | var expect = require('chai').expect, 2 | fs = require('fs'), 3 | Writable = require('stream').Writable; 4 | 5 | // Define a stubbed out version of the AWS S3 Node.js client 6 | var AWSstub = { 7 | S3: function () { 8 | this.createMultipartUpload = function (details, callback) { 9 | // Make sure that this AWS function was called with the right parameters. 10 | expect(details).to.have.property('Bucket'); 11 | expect(details.Key).to.be.a('string'); 12 | 13 | expect(details).to.have.property('Key'); 14 | expect(details.Key).to.be.a('string'); 15 | 16 | if (details.Key == 'create-fail') { 17 | // Trigger a simulated error when a magic file name is used. 18 | callback('Simulated failure from mocked API'); 19 | } 20 | else { 21 | callback(null, { 22 | UploadId: 'upload-id' 23 | }); 24 | } 25 | }; 26 | 27 | this.uploadPart = function (details, callback) { 28 | // Make sure that all the properties are there 29 | expect(details).to.have.property('Body'); 30 | expect(details.Body).to.be.instanceof(Buffer); 31 | 32 | expect(details).to.have.property('Bucket'); 33 | expect(details.Bucket).to.equal('test-bucket-name'); 34 | 35 | expect(details).to.have.property('Key'); 36 | expect(details.Key).to.be.a('string'); 37 | 38 | expect(details).to.have.property('UploadId'); 39 | expect(details.UploadId).to.contain('upload-id'); 40 | 41 | expect(details).to.have.property('PartNumber'); 42 | expect(details.PartNumber).to.be.an.integer; 43 | 44 | if (details.Key == 'upload-fail') { 45 | callback('Simulated failure from mocked API'); 46 | } 47 | else { 48 | // Return an ETag 49 | callback(null, { 50 | ETag: 'etag' 51 | }); 52 | } 53 | }; 54 | 55 | this.abortMultipartUpload = function (details, callback) { 56 | // Make sure that all the properties are there 57 | expect(details).to.have.property('Bucket'); 58 | expect(details.Bucket).to.equal('test-bucket-name'); 59 | 60 | expect(details).to.have.property('Key'); 61 | expect(details.Key).to.be.a('string'); 62 | 63 | expect(details).to.have.property('UploadId'); 64 | expect(details.UploadId).to.contain('upload-id'); 65 | 66 | if (details.Key == 'abort-fail') { 67 | // Trigger a simulated error when a magic file name is used. 68 | callback('Simulated failure from mocked API'); 69 | } 70 | else { 71 | callback(); 72 | } 73 | }; 74 | 75 | this.completeMultipartUpload = function (details, callback) { 76 | // Make sure that all the properties are there 77 | expect(details).to.have.property('Bucket'); 78 | expect(details.Bucket).to.equal('test-bucket-name'); 79 | 80 | expect(details).to.have.property('Key'); 81 | expect(details.Key).to.be.a('string'); 82 | 83 | expect(details).to.have.property('UploadId'); 84 | expect(details.UploadId).to.contain('upload-id'); 85 | 86 | expect(details).to.have.property('MultipartUpload'); 87 | expect(details.MultipartUpload).to.an.object; 88 | 89 | expect(details.MultipartUpload).to.have.property('Parts'); 90 | expect(details.MultipartUpload.Parts).to.an.array; 91 | 92 | details.MultipartUpload.Parts.forEach(function (partNumber) { 93 | expect(partNumber).to.be.an.integer; 94 | }); 95 | 96 | if (details.Key == 'complete-fail' || details.Key == 'abort-fail') { 97 | // Trigger a simulated error when a magic file name is used. 98 | callback('Simulated failure from mocked API'); 99 | } 100 | else { 101 | callback(null, { 102 | ETag: 'etag' 103 | }); 104 | } 105 | }; 106 | } 107 | }; 108 | 109 | var s3StreamClient = require('../lib/s3-upload-stream.js')(new AWSstub.S3()); 110 | 111 | describe('Creating upload client', function () { 112 | describe('Without specifying an S3 client', function () { 113 | var uploadStream; 114 | 115 | it('should throw an error', function (done) { 116 | var BadStreamClient = require('../lib/s3-upload-stream.js'); 117 | 118 | try { 119 | uploadStream = BadStreamClient.upload({ 120 | "Bucket": "test-bucket-name", 121 | "Key": "test-file-name" 122 | }); 123 | 124 | done(); 125 | } 126 | catch (e) { 127 | done(); 128 | } 129 | }); 130 | }); 131 | 132 | describe('After specifying an S3 client', function () { 133 | var uploadStream; 134 | 135 | it('should return an instance of Writable stream', function () { 136 | var GoodStreamClient = require('../lib/s3-upload-stream.js')(new AWSstub.S3()); 137 | 138 | uploadStream = GoodStreamClient.upload({ 139 | "Bucket": "test-bucket-name", 140 | "Key": "test-file-name" 141 | }); 142 | 143 | uploadStream.on('error', function () { 144 | throw "Did not expect to receive an error"; 145 | }); 146 | 147 | expect(uploadStream).to.be.instanceof(Writable); 148 | }); 149 | }); 150 | }); 151 | 152 | describe('Stream Methods', function () { 153 | var uploadStream; 154 | 155 | before(function (done) { 156 | uploadStream = s3StreamClient.upload({ 157 | "Bucket": "test-bucket-name", 158 | "Key": "test-file-name" 159 | }); 160 | 161 | uploadStream.on('error', function () { 162 | throw "Did not expect to receive an error"; 163 | }); 164 | 165 | done(); 166 | }); 167 | 168 | it('writable stream should have a maxPartSize method', function () { 169 | expect(uploadStream.maxPartSize).to.be.a('function'); 170 | }); 171 | 172 | it('writable stream should have a concurrentParts method', function () { 173 | expect(uploadStream.concurrentParts).to.be.a('function'); 174 | }); 175 | 176 | describe('Setting max part size to a value greater than 5 MB', function () { 177 | it('max part size should be set to that value', function () { 178 | uploadStream.maxPartSize(20971520); 179 | expect(uploadStream.getMaxPartSize()).to.equal(20971520); 180 | }); 181 | }); 182 | 183 | describe('Setting max part size to a value less than 5 MB', function () { 184 | it('max part size should be set to 5 MB exactly', function () { 185 | uploadStream.maxPartSize(4242880); 186 | expect(uploadStream.getMaxPartSize()).to.equal(5242880); 187 | }); 188 | }); 189 | 190 | describe('Setting concurrent parts to number greater than 1', function () { 191 | it('concurrent parts should be set to that number', function () { 192 | uploadStream.concurrentParts(5); 193 | expect(uploadStream.getConcurrentParts()).to.equal(5); 194 | }); 195 | }); 196 | 197 | describe('Setting concurrent parts to number less than 1', function () { 198 | it('concurrent parts should be set to 1', function () { 199 | uploadStream.concurrentParts(-2); 200 | expect(uploadStream.getConcurrentParts()).to.equal(1); 201 | }); 202 | }); 203 | }); 204 | 205 | describe('Piping data into the writable upload stream', function () { 206 | var uploadStream; 207 | 208 | before(function (done) { 209 | uploadStream = s3StreamClient.upload({ 210 | "Bucket": "test-bucket-name", 211 | "Key": "test-file-name" 212 | }); 213 | 214 | uploadStream.on('error', function () { 215 | throw "Did not expect to receive an error"; 216 | }); 217 | 218 | done(); 219 | }); 220 | 221 | it('should emit valid part and uploaded events', function (done) { 222 | var file = fs.createReadStream(process.cwd() + '/tests/test.js'); 223 | 224 | var ready = false, part = false, uploaded = false; 225 | 226 | uploadStream.on('ready', function(uploadId) { 227 | ready = true; 228 | 229 | expect(uploadId).to.equal('upload-id'); 230 | 231 | if (ready & part & uploaded) 232 | done(); 233 | }); 234 | 235 | uploadStream.on('part', function (details) { 236 | part = true; 237 | 238 | expect(details).to.have.property('ETag'); 239 | expect(details.ETag).to.equal('etag'); 240 | 241 | expect(details).to.have.property('PartNumber'); 242 | expect(details.PartNumber).to.equal(1); 243 | 244 | expect(details).to.have.property('receivedSize'); 245 | expect(details.receivedSize).to.be.an.integer; 246 | 247 | expect(details).to.have.property('uploadedSize'); 248 | expect(details.uploadedSize).to.be.an.integer; 249 | 250 | if (ready & part & uploaded) 251 | done(); 252 | }); 253 | 254 | uploadStream.on('uploaded', function () { 255 | uploaded = true; 256 | 257 | if (ready & part & uploaded) 258 | done(); 259 | }); 260 | 261 | file.on('open', function () { 262 | file.pipe(uploadStream); 263 | }); 264 | 265 | file.on('error', function () { 266 | throw 'Error! Unable to open the file for reading'; 267 | }); 268 | }); 269 | }); 270 | 271 | 272 | /* 273 | Differences from normal creation: 274 | * Constructor passes multipartUploadId and part info 275 | * 'ready' event fires with given multipartUploadID 276 | * First sent part number should start 1 above those given 277 | 278 | ASSUMPTION: 279 | Parts are passed in without gaps. Part number is calculated 280 | based on array length, not at inspecting given part numbers. 281 | */ 282 | describe('Piping data into a resumed upload stream', function () { 283 | var uploadStream; 284 | 285 | before(function (done) { 286 | uploadStream = s3StreamClient.upload({ 287 | Bucket: "test-bucket-name", 288 | Key: "test-file-name" 289 | }, { 290 | // when 'ready' event fires, should have this ID 291 | UploadId: "this-tests-specific-upload-id", 292 | Parts: [ 293 | { 294 | PartNumber: 1, 295 | ETag: "etag-1" 296 | }, 297 | { 298 | PartNumber: 2, 299 | ETag: "etag-2" 300 | } 301 | ] 302 | }); 303 | 304 | uploadStream.on('error', function () { 305 | throw "Did not expect to receive an error"; 306 | }); 307 | 308 | done(); 309 | }); 310 | 311 | it('should emit valid part and uploaded events', function (done) { 312 | var file = fs.createReadStream(process.cwd() + '/tests/test.js'); 313 | 314 | var ready = false, part = false, uploaded = false; 315 | 316 | uploadStream.on('ready', function(uploadId) { 317 | ready = true; 318 | 319 | expect(uploadId).to.equal('this-tests-specific-upload-id'); 320 | 321 | if (ready & part & uploaded) 322 | done(); 323 | }); 324 | 325 | uploadStream.on('part', function (details) { 326 | part = true; 327 | 328 | expect(details).to.have.property('ETag'); 329 | expect(details.ETag).to.equal('etag'); 330 | 331 | // part number should be one more than the highest given 332 | expect(details).to.have.property('PartNumber'); 333 | expect(details.PartNumber).to.equal(3); 334 | 335 | expect(details).to.have.property('receivedSize'); 336 | expect(details.receivedSize).to.be.an.integer; 337 | 338 | expect(details).to.have.property('uploadedSize'); 339 | expect(details.uploadedSize).to.be.an.integer; 340 | 341 | if (ready & part & uploaded) 342 | done(); 343 | }); 344 | 345 | uploadStream.on('uploaded', function () { 346 | uploaded = true; 347 | 348 | if (ready & part & uploaded) 349 | done(); 350 | }); 351 | 352 | file.on('open', function () { 353 | file.pipe(uploadStream); 354 | }); 355 | 356 | file.on('error', function () { 357 | throw 'Error! Unable to open the file for reading'; 358 | }); 359 | }); 360 | }); 361 | 362 | describe('S3 Error catching', function () { 363 | describe('Error creating multipart upload', function () { 364 | it('should emit an error', function (done) { 365 | var uploadStream = s3StreamClient.upload({ 366 | "Bucket": "test-bucket-name", 367 | "Key": "create-fail" 368 | }); 369 | 370 | var file = fs.createReadStream(process.cwd() + '/tests/test.js'); 371 | 372 | uploadStream.on('error', function () { 373 | done(); 374 | }); 375 | 376 | file.on('open', function () { 377 | file.pipe(uploadStream); 378 | }); 379 | }); 380 | }); 381 | 382 | describe('Error uploading part', function () { 383 | var uploadStream; 384 | 385 | before(function (done) { 386 | uploadStream = s3StreamClient.upload({ 387 | "Bucket": "test-bucket-name", 388 | "Key": "upload-fail" 389 | }); 390 | done(); 391 | }); 392 | 393 | it('should emit an error', function (done) { 394 | var file = fs.createReadStream(process.cwd() + '/tests/test.js'); 395 | 396 | uploadStream.on('error', function (err) { 397 | expect(err).to.be.a('string'); 398 | done(); 399 | }); 400 | 401 | file.on('open', function () { 402 | file.pipe(uploadStream); 403 | }); 404 | 405 | file.on('error', function () { 406 | throw 'Error! Unable to open the file for reading'; 407 | }); 408 | }); 409 | }); 410 | 411 | describe('Error completing upload', function () { 412 | var uploadStream; 413 | 414 | before(function (done) { 415 | uploadStream = s3StreamClient.upload({ 416 | "Bucket": "test-bucket-name", 417 | "Key": "complete-fail" 418 | }); 419 | done(); 420 | }); 421 | 422 | it('should emit an error', function (done) { 423 | var file = fs.createReadStream(process.cwd() + '/tests/test.js'); 424 | 425 | uploadStream.on('error', function (err) { 426 | expect(err).to.be.a('string'); 427 | done(); 428 | }); 429 | 430 | file.on('open', function () { 431 | file.pipe(uploadStream); 432 | }); 433 | 434 | file.on('error', function () { 435 | throw 'Error! Unable to open the file for reading'; 436 | }); 437 | }); 438 | }); 439 | 440 | describe('Error aborting upload', function () { 441 | var uploadStream; 442 | 443 | before(function (done) { 444 | uploadStream = s3StreamClient.upload({ 445 | "Bucket": "test-bucket-name", 446 | "Key": "abort-fail" 447 | }); 448 | done(); 449 | }); 450 | 451 | it('should emit an error', function (done) { 452 | var file = fs.createReadStream(process.cwd() + '/tests/test.js'); 453 | 454 | uploadStream.on('error', function (err) { 455 | expect(err).to.be.a('string'); 456 | done(); 457 | }); 458 | 459 | file.on('open', function () { 460 | file.pipe(uploadStream); 461 | }); 462 | 463 | file.on('error', function () { 464 | throw 'Error! Unable to open the file for reading'; 465 | }); 466 | }); 467 | }); 468 | }); 469 | --------------------------------------------------------------------------------