├── .travis.yml ├── .gitignore ├── .jshintrc ├── scripts └── create_fixtures.sh ├── LICENSE ├── package.json ├── test └── test.js ├── Gruntfile.js ├── CHANGELOG.md ├── README.md └── tasks └── aws_s3.js /.travis.yml: -------------------------------------------------------------------------------- 1 | language: node_js 2 | 3 | node_js: 4 | - "0.10" 5 | - "0.11" 6 | 7 | before_script: 8 | - npm install -g grunt-cli 9 | 10 | notifications: 11 | email: false -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | lib-cov 2 | *.seed 3 | *.log 4 | *.csv 5 | *.dat 6 | *.out 7 | *.pid 8 | *.gz 9 | 10 | pids 11 | logs 12 | results 13 | tmp 14 | 15 | npm-debug.log 16 | 17 | node_modules 18 | /test/local 19 | /test/fixtures 20 | 21 | .env 22 | 23 | .DS_Store 24 | -------------------------------------------------------------------------------- /.jshintrc: -------------------------------------------------------------------------------- 1 | { 2 | "curly": true, 3 | "eqeqeq": true, 4 | "immed": true, 5 | "latedef": true, 6 | "newcap": true, 7 | "noarg": true, 8 | "sub": true, 9 | "undef": true, 10 | "boss": true, 11 | "eqnull": true, 12 | "node": true, 13 | "es5": true 14 | } 15 | -------------------------------------------------------------------------------- /scripts/create_fixtures.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | function generateCopies { 3 | for i in `seq 2 $1`; do 4 | touch "$2 $i.txt" 5 | done 6 | } 7 | 8 | function generateFixtures { 9 | mkdir -p test/fixtures/upload/otters/$3 10 | touch "test/fixtures/upload/otters/$3/$2".txt 11 | touch "test/fixtures/upload/otters/$3/$2 copy".txt 12 | generateCopies $1 "test/fixtures/upload/otters/$3/$2 copy" 13 | } 14 | 15 | # $1 = number of elements 16 | # $2 = name of file 17 | # $3 = name of folder 18 | 19 | mkdir -p test/fixtures/upload/otters/{river,sea,updated} 20 | echo My favourite animal > test/fixtures/upload/otters/animal.txt 21 | generateFixtures 911 yay river 22 | generateFixtures 559 yo sea 23 | generateFixtures 911 yay updated 24 | generateFixtures 559 yo updated 25 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Copyright (c) 2015 Mathieu Triay 2 | 3 | Permission is hereby granted, free of charge, to any person 4 | obtaining a copy of this software and associated documentation 5 | files (the "Software"), to deal in the Software without 6 | restriction, including without limitation the rights to use, 7 | copy, modify, merge, publish, distribute, sublicense, and/or sell 8 | copies of the Software, and to permit persons to whom the 9 | Software is furnished to do so, subject to the following 10 | conditions: 11 | 12 | The above copyright notice and this permission notice shall be 13 | included in all copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 16 | EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES 17 | OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 18 | NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT 19 | HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, 20 | WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 21 | FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 22 | OTHER DEALINGS IN THE SOFTWARE. 23 | -------------------------------------------------------------------------------- /package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "grunt-aws-s3", 3 | "description": "Interact with AWS S3 using the AWS SDK", 4 | "version": "2.0.2", 5 | "homepage": "https://github.com/MathieuLoutre/grunt-aws-s3", 6 | "author": { 7 | "name": "Mathieu Triay", 8 | "email": "mathieu.triay@gmail.com" 9 | }, 10 | "repository": { 11 | "type": "git", 12 | "url": "git://github.com/MathieuLoutre/grunt-aws-s3.git" 13 | }, 14 | "bugs": { 15 | "url": "https://github.com/MathieuLoutre/grunt-aws-s3/issues" 16 | }, 17 | "licenses": [ 18 | { 19 | "type": "MIT", 20 | "url": "https://github.com/MathieuLoutre/grunt-aws-s3/blob/master/LICENSE" 21 | } 22 | ], 23 | "engines": { 24 | "node": ">= 0.8.0" 25 | }, 26 | "scripts": { 27 | "pretest": "./scripts/create_fixtures.sh", 28 | "test": "grunt" 29 | }, 30 | "dependencies": { 31 | "aws-sdk": "2.0.x", 32 | "mime-types": "2.0.x", 33 | "lodash": "4.17.x", 34 | "async": "0.9.x", 35 | "progress": "1.1.x" 36 | }, 37 | "devDependencies": { 38 | "grunt-contrib-jshint": "~0.2.0", 39 | "grunt": "~0.4.0", 40 | "chai": "~1.7.2", 41 | "mocha": "~1.12.1", 42 | "grunt-mocha-test": "~0.10.2", 43 | "grunt-contrib-copy": "~0.4.1", 44 | "grunt-contrib-clean": "~0.5.0", 45 | "mock-aws-s3": "2.5.0" 46 | }, 47 | "peerDependencies": { 48 | "grunt": ">=0.4.0" 49 | }, 50 | "keywords": [ 51 | "gruntplugin", 52 | "aws", 53 | "s3", 54 | "sdk" 55 | ] 56 | } 57 | -------------------------------------------------------------------------------- /test/test.js: -------------------------------------------------------------------------------- 1 | var expect = require('chai').expect; 2 | var fs = require('fs'); 3 | 4 | // Gathered from http://stackoverflow.com/questions/5827612/node-js-fs-readdir-recursive-directory-search 5 | function walk (dir) { 6 | 7 | var results = []; 8 | var list = fs.readdirSync(dir); 9 | 10 | list.forEach(function (file) { 11 | 12 | file = dir + '/' + file; 13 | var stat = fs.statSync(file); 14 | 15 | if (stat && stat.isDirectory()) { 16 | results = results.concat(walk(file)); 17 | } 18 | else { 19 | results.push(file); 20 | } 21 | }); 22 | 23 | return results; 24 | } 25 | 26 | describe('S3', function () { 27 | 28 | it('should do what it is supposed to do', function (done) { 29 | 30 | var first = walk(__dirname + '/local/bucket/first'); 31 | var second = walk(__dirname + '/local/bucket/second'); 32 | var updated = walk(__dirname + '/local/bucket/first/otters/updated'); 33 | var backup = walk(__dirname + '/local/download/backup'); 34 | var third = walk(__dirname + '/local/bucket/third'); 35 | var fourth_bucket = walk(__dirname + '/local/bucket/fourth'); 36 | var fourth = walk(__dirname + '/local/download/fourth'); 37 | var fifth = walk(__dirname + '/local/download/fifth'); 38 | var fifth_bucket = walk(__dirname + '/local/bucket/fifth'); 39 | var copies = walk(__dirname + '/local/bucket/copies'); 40 | 41 | expect(first.length).to.equal(1473); 42 | expect(second.length).to.equal(1472); 43 | expect(updated.length).to.equal(0); 44 | expect(backup.length).to.equal(2945); 45 | expect(third.length).to.equal(912); 46 | expect(fourth_bucket.length).to.equal(2945); 47 | expect(fourth.length).to.equal(1472); 48 | expect(fifth.length).to.equal(560); 49 | expect(fifth_bucket.length).to.equal(2); 50 | expect(copies.length).to.equal(1473); 51 | 52 | done(); 53 | }); 54 | }); -------------------------------------------------------------------------------- /Gruntfile.js: -------------------------------------------------------------------------------- 1 | /* 2 | * grunt-aws-s3 3 | * https://github.com/MathieuLoutre/grunt-aws-s3 4 | * 5 | * Copyright (c) 2013 Mathieu Triay 6 | * Licensed under the MIT license. 7 | */ 8 | 9 | 'use strict'; 10 | 11 | module.exports = function (grunt) { 12 | 13 | grunt.registerTask('create_bucket', 'creates the bucket folder', function() { 14 | grunt.file.mkdir(__dirname + '/test/local/bucket'); 15 | }); 16 | 17 | // Project configuration. 18 | grunt.initConfig({ 19 | jshint: { 20 | all: [ 21 | 'tasks/*.js' 22 | ], 23 | options: { 24 | jshintrc: '.jshintrc', 25 | }, 26 | }, 27 | aws_s3: { 28 | test_local: { 29 | options: { 30 | bucket: __dirname + '/test/local/bucket', 31 | uploadConcurrency: 1, 32 | mock: true, 33 | stream: true 34 | }, 35 | files: [ 36 | {expand: true, cwd: "test/local/upload/", src: ['**'], dest: 'first/', stream: false}, 37 | {dest: '/', cwd: 'test/local/download/backup/', action: 'download', stream: false}, 38 | {dest: 'first/otters/updated/', action: 'delete'}, 39 | {dest: 'punk/', action: 'delete'}, 40 | {expand: true, cwd: "test/local/upload/otters/river/", src: ['**'], dest: 'second/', 41 | params: { 42 | Expires: 1893456000, 43 | CacheControl: 'public, max-age=864000', 44 | }}, 45 | {dest: 'otters/funk/', cwd: 'test/local/download/backup/', action: 'download'}, 46 | {expand: true, cwd: "test/local/upload/otters/updated/", src: ['**'], dest: 'second/', differential: true}, 47 | {expand: true, cwd: "test/local/upload/otters/updated/", src: ['**'], dest: 'third/'}, 48 | {dest: 'third/', action: 'delete', differential: true, cwd: "test/local/upload/otters/river/"}, 49 | {expand: true, cwd: "test/local/upload/", src: ['**'], dest: 'fourth/'}, 50 | {dest: 'fourth/otters/river/', cwd: 'test/local/download/fourth/', action: 'download'}, 51 | {dest: 'fourth/otters/updated/', cwd: 'test/local/download/fourth/', action: 'download', differential: true}, 52 | {dest: 'fourth/otters/updated/', cwd: 'test/local/download/fifth/', exclude: "**/yay*", action: 'download'}, 53 | {expand: true, cwd: "test/local/upload/otters/updated/", src: ['**'], dest: 'fifth/'}, 54 | {dest: 'fifth/', exclude: "**/*copy*", flipExclude: true, action: 'delete'}, 55 | {src: 'first/', dest: 'copies/', action: 'copy'}, 56 | ] 57 | }, 58 | test_live: { 59 | options: { 60 | bucket: 'grunt-aws-test-bucket', 61 | uploadConcurrency: 100, 62 | copyConcurrency: 100 63 | }, 64 | files: [ 65 | {expand: true, cwd: "test/local/upload/", src: ['otters/animal.txt'], dest: 'first/', stream: false}, 66 | ] 67 | }, 68 | }, 69 | mochaTest: { 70 | test: { 71 | options: { 72 | reporter: 'spec' 73 | }, 74 | src: ['test/*.js'] 75 | } 76 | }, 77 | clean: { 78 | test: ['test/local/**'] 79 | }, 80 | copy: { 81 | main: { 82 | files: [ 83 | {expand: true, cwd: 'test/fixtures/', src: ['**'], dest: 'test/local'}, 84 | ] 85 | } 86 | } 87 | }); 88 | 89 | // Actually load this plugin's task(s). 90 | grunt.loadTasks('./tasks'); 91 | 92 | grunt.loadNpmTasks('grunt-contrib-jshint'); 93 | grunt.loadNpmTasks('grunt-mocha-test'); 94 | grunt.loadNpmTasks('grunt-contrib-clean'); 95 | grunt.loadNpmTasks('grunt-contrib-copy'); 96 | 97 | grunt.registerTask('default', ['clean', 'copy', 'create_bucket', 'aws_s3:test_local', 'mochaTest']); 98 | grunt.registerTask('test-live', ['clean', 'copy', 'aws_s3:test_live']); 99 | }; 100 | -------------------------------------------------------------------------------- /CHANGELOG.md: -------------------------------------------------------------------------------- 1 | # CHANGELOG 2 | 3 | ### v2.0.2 4 | - Fix lodash contains and update tests (by @eugenioclrc) 5 | 6 | ### v2.0.1 7 | - Update lodash to latest version (by @ffflorian) 8 | 9 | ### v2.0.0 10 | - Support different compression algorithms, remove support for `gzipRename`, use `compressionRename` now (by @smeder) 11 | 12 | ### v1.0.0 13 | - Start semantic versioning 14 | - Warning when cwd used without expand by @stevemayhew 15 | - Update mocking module and fixed tests 16 | 17 | ### v0.14.5 18 | - Support for grunt 1.0.0 (goes from ~0.4.0 to >= 0.4.0) 19 | 20 | ### v0.14.4 21 | - add `s3ForcePathStyle` to params by @albert-lacki 22 | 23 | ### v0.14.3 24 | - auto generate fixtures by @frankcortes 25 | 26 | ### v0.14.2 27 | - add `awsProfile` option by @trioni 28 | 29 | ### v0.14.1 30 | - Fix gzip ContentEncoding and ContentType leaking to other files 31 | 32 | ### v0.14.0 33 | - Export uploaded files to grunt config (an idea from @srlmproductions) 34 | 35 | ### v0.13.1 36 | - Fix ACL on copy by @rayd 37 | 38 | ### v0.13.0 39 | - Fix tests 40 | - new `overwrite` option to prevent overwriting existing files (based on @nickjackson's PR) 41 | - update mock library 42 | 43 | ### v0.12.3 44 | - Add warning when no dest is defined 45 | 46 | ### v0.12.2 47 | - make differential work with gzipRename by @dedsm 48 | 49 | ### v0.12.1 50 | - gzipRename option to change extension of gzip files as they are uploaded 51 | 52 | ### v0.12.0 53 | - Add basic gzip support 54 | 55 | ### v0.11.1 56 | - Fix url encoding for copy action by @ahageali 57 | 58 | ### v0.11.0 59 | - Support for copy action 60 | 61 | ### v0.10.4 62 | - Fix encoding in mime type (w/ @jeantil) 63 | 64 | ### v0.10.3 65 | - Use correct method in mime type lib by @takeno 66 | 67 | ### v0.10.1 68 | - Fix charset with new mime package by @jeantil 69 | 70 | ### v0.10.0 71 | - Endpoint option. 72 | - Drop support for 0.8.x. 73 | - Fix uploading an empty dir #40 74 | - Unify dest base format #44 75 | - FIx uploading no files #46 76 | 77 | ### v0.9.4 78 | - Add session token env variable and option by @azaharakis 79 | 80 | ### v0.9.3 81 | - Progress bar (by @seth-admittedly) 82 | - Fix #36, differential didn't work at root after `0.9.0` 83 | 84 | ### v0.9.2 85 | - Unpublished because of incomplete fix for #36 86 | 87 | ### v0.9.1 88 | - Add signatureVersion option (by @ivanzhaowy) 89 | 90 | ### v0.9.0 91 | - ListObjects for dest instead of the whole bucket by @royra 92 | - Update AWS SDK to 2.0.0 to fix #30 93 | 94 | ### v0.8.6 95 | - Don't check for credentials to allow IAM use by @joshuaspence 96 | 97 | ### v0.8.5  98 | - New option to display changes only 99 | 100 | ### v0.8.4 101 | - Fix bug in setImmediate support detection (was using the shim even on Node 0.10.x) 102 | 103 | ### v0.8.2-3 104 | - Unpublished because the attempt to better detect setImmediate was faulty 105 | 106 | ### v0.8.1 107 | - Shim setImmediate to support Node 0.8.x 108 | 109 | ### v0.8.0 110 | - If a directory is found during download, it will be skipped (and won't create empty dirs). This happened only a an empty directory has been created manually on S3 (by @nicolindemann) 111 | - Use a glob pattern to exclude files when downloading a folder (with @nicolindemann) 112 | - Change dot colour wether the object has been downloaded/uploaded 113 | - Exclude option for delete 114 | - Refactor 115 | - Stream option (with @craigloftus) 116 | - Bug fixes on options priority (if you had differential set to true for the whole task/subtask but to false for a file object, true would take priority) 117 | 118 | ### v0.7.2 119 | 120 | - Follow Grunt 0.4.2 guidelines and include external `lodash` and `async` 121 | - Add more options (httpOptions, maxRetries, sslEnabled) 122 | - Fix a bug when downloading and extra keys where sent to getObject 123 | - Fix download of a single item. Key paths are now relative to given dest if dest is a directory or the file itself 124 | 125 | ### v0.7.1 126 | 127 | - If a marker is not provided when listing objects but the list is flagged as truncated, use last element as marker (by @derekr) 128 | 129 | ### v0.7.0 130 | 131 | - Grouping of uploads together (treated like `delete` and `download` action in the code) 132 | - Improved log 133 | - Debug option to do dry runs 134 | - Differential upload based on MD5 checks (using S3's ETags) 135 | - Differential download based on wether it exists locally or not, MD5 checks and date 136 | - Differential delete based on wether it still exists locally or not 137 | - Tests using `mock-aws-s3` to replace the AWS package during testing 138 | - Code restructure/formatting 139 | - Update docs 140 | 141 | ### v0.6.0 142 | 143 | - Add 'download' option. 144 | - Fix `options.params` not being applied 145 | - Add a `params` option field to the file hash which overrides `options.params` 146 | - The `mime` hash has priority over the `params` option field of the file 147 | - Multiple code style/lint fixes 148 | - Remove uploading of empty directories 149 | - Nicer log 150 | - Add changelog! 151 | - Better documentation 152 | 153 | ### v0.5.0 154 | 155 | - Add option to override automatic MIME type detection 156 | 157 | ### v0.4.1 158 | 159 | - Fix delete task executing separately from upload 160 | 161 | ### v0.4.0 162 | 163 | - Add 'delete' option. 164 | - _Breaks the use of `options.params`_ 165 | 166 | ### v0.3.1 167 | 168 | - Region is now optional, defaults to US Standard 169 | 170 | ### v0.3.0 171 | 172 | - Option for upload concurrency. 173 | 174 | ### v0.2.0 175 | 176 | - Can set additional params and bug fix 177 | 178 | ### v0.1.1 179 | 180 | - Fix bug when using env variable. 181 | 182 | ### v0.1.0 183 | 184 | - First release. Simple upload to S3. 185 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # grunt-aws-s3 2 | 3 | > Interact with AWS S3 using AWS SDK 4 | 5 | ## Warning 6 | 7 | Versions 0.4.0 to 0.5.0 have a bug where `options.params` is ignored. 8 | Version 0.8.0 doesn't actually support Node 0.8.x and 0.9.x. 9 | 10 | It's not recommended to use concurrencies over 100 as you may run into EMFILE/ENOTFOUND errors. 11 | 12 | ## Getting Started 13 | This plugin requires Grunt `~0.4.0` 14 | 15 | If you haven't used [Grunt](http://gruntjs.com/) before, be sure to check out the [Getting Started](http://gruntjs.com/getting-started) guide, as it explains how to create a [Gruntfile](http://gruntjs.com/sample-gruntfile) as well as install and use Grunt plugins. Once you're familiar with that process, you may install this plugin with this command: 16 | 17 | ```shell 18 | npm install grunt-aws-s3 --save-dev 19 | ``` 20 | 21 | Once the plugin has been installed, it may be enabled inside your Gruntfile with this line of JavaScript: 22 | 23 | ```js 24 | grunt.loadNpmTasks('grunt-aws-s3'); 25 | ``` 26 | 27 | Make sure that your AWS IAM policy allows ```s3:GetObject```, ```s3:GetObjectAcl```, ```s3:ListBucket```, ```s3:PutObject```, and ```s3:PutObjectAcl``` on everything under the buckets you plan to deploy to. This task sets ACL properties, so you can easily find yourself in a situation where tools like s3cmd have no problem deploying files to your bucket, while this task fails with "AccessDenied". 28 | 29 | ## The "aws_s3" task 30 | 31 | ### Options 32 | 33 | #### options.accessKeyId (required) 34 | Type: `String` 35 | 36 | The AWS accessKeyId. You can load it via JSON as shown in the example or use the `AWS_ACCESS_KEY_ID` environment variable. 37 | 38 | #### options.secretAccessKey (required) 39 | Type: `String` 40 | 41 | The AWS secretAccessKey. You can load it via JSON as shown in the example or use the `AWS_SECRET_ACCESS_KEY` environment variable. 42 | 43 | #### options.awsProfile 44 | Type: `String` 45 | 46 | Great if you have [credentials profile](http://docs.aws.amazon.com/AWSJavaScriptSDK/guide/node-configuring.html#Creating_the_Shared_Credentials_File) stored in `~/.aws/credentials`. 47 | 48 | #### options.sessionToken 49 | Type: `String` 50 | 51 | The AWS sessionToken. You can load it via JSON as shown in the example or use the `AWS_SESSION_TOKEN` environment variable. 52 | 53 | #### options.bucket (required) 54 | Type: `String` 55 | 56 | The AWS bucket name you want to upload to. 57 | 58 | #### options.endpoint 59 | Type: `String` 60 | 61 | The AWS endpoint you'd like to use. Set by default by the region. 62 | 63 | #### options.s3ForcePathStyle 64 | Type: `Boolean` 65 | Default: `false` 66 | 67 | Force use path-style url (http://endpoint/bucket/path) instead of default host-style (http://bucket.endpoint/path) 68 | 69 | #### options.region 70 | Type: `String` 71 | Default: `US Standard` 72 | 73 | The AWS [region](http://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region). 74 | 75 | If not specified, it uploads to the default 'US Standard' 76 | 77 | #### options.maxRetries 78 | Type: `Integer` 79 | 80 | The maximum amount of retries to attempt with a request. 81 | 82 | #### options.sslEnabled 83 | Type: `Boolean` 84 | 85 | Whether to enable SSL for requests or not. 86 | 87 | #### options.httpOptions 88 | Type: `Object` 89 | 90 | A set of options to pass to the low-level HTTP request. The list of options can be found in the [documentation](http://docs.aws.amazon.com/AWSJavaScriptSDK/latest/AWS/S3.html#constructor-property) 91 | 92 | #### options.signatureVersion 93 | Type: `String` 94 | 95 | Change the signature version to sign requests with. Possible values are: 'v2', 'v3', 'v4'. 96 | 97 | #### options.access 98 | Type: `String` 99 | Default:`public-read` 100 | 101 | The ACL you want to apply to ALL the files that will be uploaded. The ACL values can be found in the [documentation](http://docs.aws.amazon.com/AWSJavaScriptSDK/latest/AWS/S3.html#putObject-property). 102 | 103 | #### options.uploadConcurrency 104 | Type: `Integer` 105 | Default: `1` 106 | 107 | Number of uploads in parallel. By default, there's no concurrency. Must be > 0. 108 | Note: This used to be called `concurrency` but the option has been deprecated, however it is still backwards compatible until 1.0.0. 109 | 110 | #### options.downloadConcurrency 111 | Type: `Integer` 112 | Default: `1` 113 | 114 | Number of download in parallel. By default, there's no concurrency. Must be > 0. 115 | 116 | #### options.copyConcurrency 117 | Type: `Integer` 118 | Default: `1` 119 | 120 | Number of copies in parallel. By default, there's no concurrency. Must be > 0. 121 | 122 | #### options.params 123 | Type: `Object` 124 | 125 | A hash of the params you want to apply to the files. Useful to set the `ContentEncoding` to `gzip` for instance, or set the `CacheControl` value. The list of parameters can be found in the [documentation](http://docs.aws.amazon.com/AWSJavaScriptSDK/latest/AWS/S3.html#putObject-property). `params` will apply to *all* the files in the target. However, the `params` option in the file list has priority over it. 126 | 127 | #### options.mime 128 | Type: `Object` 129 | 130 | The MIME type of every file is determined by a MIME lookup using [node-mime](https://github.com/broofa/node-mime). If you want to override it, you can use this option object. 131 | The keys are the local file paths and the values are the MIME types. 132 | 133 | ```js 134 | { 135 | 'path/to/file': 'application/json', 136 | 'path/to/other/file': 'application/gzip' 137 | } 138 | ``` 139 | 140 | You need to specify the full path of the file, including the `cwd` part. 141 | The `mime` hash has absolute priority over what has been set in `options.params` and the `params` option of the file list. 142 | 143 | #### options.stream 144 | Type: `Boolean` 145 | Default: `false` 146 | 147 | Allows to use streams instead of buffers to upload and download. 148 | The option can either be turned on for the whole subtask or for a specified file object like so: 149 | 150 | ```js 151 | {'action': 'upload', expand: true, cwd: 'dist/js', src: ['**'], stream: true} 152 | ``` 153 | 154 | #### options.debug 155 | Type: `Boolean` 156 | Default: `false` 157 | 158 | This will do a "dry run". It will not upload anything to S3 but you will get the full report just as you would in normal mode. Useful to check what will be changed on the server before actually doing it. Unless one of your actions depends on another (like download following a delete), the report should be accurate. 159 | `listObjects` requests will still be made to list the content of the bucket. 160 | 161 | #### options.differential 162 | Type: `Boolean` 163 | Default: `false` 164 | 165 | `listObjects` requests will be made to list the content of the bucket, then they will be checked against their local file equivalent (if it exists) using MD5 (and sometimes date) comparisons. 166 | This means different things for different actions: 167 | - `upload`: will only upload the files which either don't exist on the bucket or have a different MD5 hash 168 | - `download`: will only download the files which either don't exist locally or have a different MD5 hash and are newer. 169 | - `delete`: will only delete the files which don't exist locally 170 | 171 | The option can either be specified for the whole subtask or for a specified file object like so: 172 | 173 | ```js 174 | {'action': 'upload', expand: true, cwd: 'dist/js', src: ['**'], differential: true} 175 | ``` 176 | 177 | In order to be able to compare to the local file names, it is necessary for `dest` to be a finished path (e.g `directory/` instead of just `dir`) as the comparison is done between the file names found in `cwd` and the files found on the server `dest`. If you want to compare the files in the directory `scripts/` in your bucket and the files in the corresponding local directory `dist/scripts/` you need to have something like: 178 | 179 | ```js 180 | {cwd: 'dist/scripts/', dest: 'scripts/', 'action': 'download', differential: true} 181 | ``` 182 | 183 | #### options.overwrite 184 | Type: `Boolean` 185 | Default: `true` 186 | 187 | By setting this options to `false`, you can prevent overwriting files on the server. The task will scan the whole bucket first and if it encounters a path that's about to be erased will stop. 188 | 189 | #### options.displayChangesOnly 190 | Type: `Boolean` 191 | Default: `false` 192 | 193 | If enabled, only lists files that have changed when performing a differential upload. 194 | 195 | #### options.progress 196 | Type: `String` 197 | Default: `dots` 198 | 199 | Specify the output format for task progress. Valid options are: 200 | - `dots`: will display one dot for each file, green for success, yellow for failure 201 | - `progressBar`: will display a progress bar with current/total count and completion eta 202 | - `none`: will suppress all display of progress 203 | 204 | #### options.changedFiles 205 | Type: `String` 206 | Default: `aws_s3_changed` 207 | 208 | This tasks exports the list of uploaded files to a variable on the grunt config so it can be used by another task (`grunt-invalidate-cloudfront` for instance). By default it's accessible via `grunt.config.get('aws_s3_changed')` and this option allows you to change the variable name. 209 | 210 | 211 | #### options.gzipRename (DEPRECATED - see options.compressionRename) 212 | Type: `String` 213 | Default: `` 214 | 215 | When using the `gzip` abilities of the task (see below), you can use this option to change the extensions of the files uploaded to S3. Values can be: 216 | - `gz`: will replace the compound extension with `.gz` (e.g. `build.css.gz` -> `build.gz`) 217 | - `ext`: will keep the original extension and remove `.gz` (e.g. `build.css.gz` -> `build.css`) 218 | - `swap`: will swap the two extensions (e.g. `build.css.gz` -> `build.gz.css`) 219 | 220 | This only works with the `gzip` abilities of the task which is based on compound extensions like these: `.css.gz`. 221 | 222 | #### options.compressionRename 223 | Type: `String` 224 | Default: `` 225 | 226 | When using the `compression` abilities of the task (see below), you can use this option to change the extensions of the files uploaded to S3. Values can be: 227 | - `compress`: will replace the compound extension with the compression specific extension (e.g. `build.css.gz` -> `build.gz`) 228 | - `ext`: will keep the original extension and remove the compression specific extension (e.g. `build.css.gz` -> `build.css`) 229 | - `swap`: will swap the two extensions (e.g. `build.css.br` -> `build.br.css`) 230 | 231 | This only works with the `compression` abilities of the task which is based on compound extensions like these: `.css.gz`. 232 | 233 | #### options.compressionTypes 234 | Type: `Object` 235 | Default: `{'.br': 'br', '.gz': 'gzip'}` 236 | 237 | When using the `compression` abilities of the task (see below), you can use this option to change if a specific extension is recognized as a compression extension and to change the encoding type this compression algorithm maps to. This option should contain a object that maps extensions to mime types. 238 | 239 | 240 | ### compression 241 | 242 | This task doesn't compress anything for you. The `grunt-contrib-compress` task is here for that and is much more suitable. 243 | However, uploading compressed files is annoying because you need to set `ContentType` and `ContentEncoding` correctly for each of the compressed files. As of version `0.12.0`, this plugin will try to guess if a file needs to have their `ContentType` and `ContentEncoding` changed relying on a convention rather than configuration (inspired by [hapi](https://github.com/hapijs/hapi/blob/master/API.md#built-in-handlers)). 244 | 245 | The convention is that a compressed file must have a compression specific extension, e.g. `.gz`, in its extension as well as its original extension (e.g. `.css`, `.js`) like so: `build.js.gz`. 246 | In this case the plugin will apply the `ContentType` from `build.js` to `build.js.gz` and set the `ContentEncoding` to `gzip`. 247 | 248 | If for some reason you're not following this convention (e.g. you're naming your files `build.gz`), you can force the ContentType through the `mime` option of the plugin which still has priority. Provided the extension is still `.gz`, the `ContentType` will be set for you. Alternatively, you can use the `compressionRename` option which will be able to rename the files on the fly as they're uploaded to S3. 249 | 250 | ### Actions 251 | 252 | This Grunt task supports three modes of interaction with S3, `upload`, `download` and `delete`. Every action that you specify is executed serially, one after the other. If multiple `upload` actions are one after the other, they will be grouped together. 253 | 254 | You choose the action by specifying the key `action` in the file hash like so: 255 | 256 | ```js 257 | {'action': 'upload', expand: true, cwd: 'dist/js', src: ['**'], dest: 'app/js/'} 258 | ``` 259 | 260 | By default, the action is `upload`. 261 | 262 | #### `upload` 263 | 264 | The `upload` action uses the [newest Grunt file format](http://gruntjs.com/configuring-tasks#files), allowing to take advantage of the `expand` and `filter` options. 265 | It is the default action, so you can omit `action: 'upload'` if you want a cleaner look. Don't forget to set a `dest` (use `dest: '/'` for the root). 266 | 267 | Lastly don't forget to set `expand: true` where you use the `cwd` property or Grunt just ignores it, this is explained in [Grunt Building the files object dynamically](http://gruntjs.com/configuring-tasks#building-the-files-object-dynamically) 268 | 269 | ```js 270 | files: [ 271 | {expand: true, cwd: 'dist/staging/scripts', src: ['**'], dest: 'app/scripts/'}, 272 | {expand: true, cwd: 'dist/staging/styles', src: ['**'], dest: 'app/styles/', action: 'upload'} 273 | ] 274 | ``` 275 | 276 | You can also include a `params` hash which will override the options.params one. For example: 277 | 278 | ```js 279 | 280 | params: { 281 | ContentType: 'application/json' 282 | CacheControl: '3000' 283 | } 284 | 285 | // ... 286 | 287 | files: [ 288 | {expand: true, cwd: 'dist/staging/scripts', src: ['**'], dest: 'app/scripts/', params: {CacheControl: '2000'}}, 289 | {expand: true, cwd: 'dist/staging/styles', src: ['**'], dest: 'app/styles/'} 290 | ] 291 | ``` 292 | 293 | This will yield for the params which will eventually be applied: 294 | 295 | ```js 296 | { 297 | ContentType: 'application/json', 298 | CacheControl: '2000' 299 | } 300 | 301 | // AND 302 | 303 | { 304 | ContentType: 'application/json', 305 | CacheControl: '3000' 306 | } 307 | ``` 308 | 309 | The `options.mime` hash, however, has priority over the ContentType. So if the hash looked like this: 310 | 311 | ```js 312 | { 313 | 'dist/staging/styles/LICENCE': 'text/plain' 314 | } 315 | ``` 316 | 317 | The `ContentType` eventually applied to `dist/staging/styles/LICENCE` would be `text/plain` even though we had a `ContentType` specified in `options.params` or in `params` of the file. 318 | 319 | When the `differential` option is enabled, it will only upload the files which either don't exist on the bucket or have a different MD5 hash. 320 | 321 | #### `download` 322 | 323 | The `download` action requires a `cwd`, a `dest` and *no* `src` like so: 324 | 325 | ```js 326 | {cwd: 'download/', dest: 'app/', action: 'download'} 327 | ``` 328 | 329 | The `dest` is used as the Prefix in the [listObjects command](http://docs.aws.amazon.com/AWSJavaScriptSDK/latest/AWS/S3.html#listObjects-property) to find the files _on the server_ (which means it can be a path or a partial path). 330 | The `cwd` is used as the root folder to write the downloaded files. The inner folder structure will be reproduced inside that folder. 331 | 332 | If you specify '/' for `dest`, the whole bucket will be downloaded. It handles automatically buckets with more than a 1000 objects. 333 | If you specify 'app', all paths starting with 'app' will be targeted (e.g. 'app.js', 'app/myapp.js', 'app/index.html, 'app backup/donotdelete.js') but it will leave alone the others (e.g. 'my app/app.js', 'backup app/donotdelete.js'). 334 | 335 | When the `differential` options is enabled, it will only download the files which either don't exist locally or have a different MD5 hash and are newer. 336 | 337 | Note: if `dest` is a file, it will be downloaded to `cwd` + `file name`. If `dest` is a directory ending with `/`, its content will be downloaded to `cwd` + `file names or directories found in dest`. If `dest` is neither a file nor a directory, the files found using it as a prefix will be downloaded to `cwd` + `paths found using dest as the prefix`. 338 | 339 | The `download` action can also take an `exclude` option like so: 340 | 341 | ```js 342 | {cwd: 'download/', dest: 'app/', action: 'download', exclude "**/.*"} 343 | ``` 344 | 345 | The value is a globbing pattern that can be consumed by `grunt.file.isMatch`. You can find more information on [globbing patterns on Grunt's doc](http://gruntjs.com/api/grunt.file#globbing-patterns). In this example, it will exclude all files starting with a `.` (they won't be downloaded). 346 | If you want to reverse the `exclude` (that is, only what will match the pattern will be downloaded), you can use the `flipExclude` option like so: 347 | 348 | ```js 349 | {cwd: 'download/', dest: 'app/', action: 'download', exclude "**/.*", flipExclude: true} 350 | ``` 351 | 352 | In this example, only the files starting with a `.` will be downloaded. 353 | 354 | Example: 355 | 356 | ```js 357 | {cwd: 'download/', dest: 'app/', action: 'download'} // app/myapp.js downloaded to download/myapp.js 358 | {cwd: 'download/', dest: 'app/myapp.js', action: 'download'} // app/myapp.js downloaded to download/myapp.js 359 | {cwd: 'download/', dest: 'app', action: 'download'} // app/myapp.js downloaded to download/app/myapp.js 360 | ``` 361 | 362 | #### `delete` 363 | 364 | The `delete` action just requires a `dest`, no need for a `src` like so: 365 | 366 | ```js 367 | {dest: 'app/', 'action': 'delete'} 368 | ``` 369 | 370 | The `dest` is used as the Prefix in the [listObjects command](http://docs.aws.amazon.com/AWSJavaScriptSDK/latest/AWS/S3.html#listObjects-property) to find the files _on the server_ (which means it can be a path or a partial path). 371 | 372 | If you specify '/', the whole bucket will be wiped. It handles automatically buckets with more than a 1000 objects. 373 | If you specify 'app', all paths starting with 'app' will be targeted (e.g. 'app.js', 'app/myapp.js', 'app/index.html, 'app backup/donotdelete.js') but it will leave alone the others (e.g. 'my app/app.js', 'backup app/donotdelete.js'). 374 | 375 | When the `differential` options is enabled, it will only delete the files which don't exist locally. It also requires a `cwd` key with the path to the local folder to check against. 376 | 377 | Please, be careful with the `delete` action. It doesn't forgive. 378 | 379 | The `delete` action can also take an `exclude` option like so: 380 | 381 | ```js 382 | {dest: 'app/', 'action': 'delete', exclude "**/.*"} 383 | ``` 384 | 385 | The value is a globbing pattern that can be consumed by `grunt.file.isMatch`. You can find more information on [globbing patterns on Grunt's doc](http://gruntjs.com/api/grunt.file#globbing-patterns). In this example, it will exclude all files starting with a `.` (they won't be deleted). 386 | If you want to reverse the `exclude` (that is, only what will match the pattern will be deleted), you can use the `flipExclude` option like so: 387 | 388 | ```js 389 | {dest: 'app/', 'action': 'delete', exclude "**/.*", flipExclude: true} 390 | ``` 391 | 392 | In this example, only the files starting with a `.` will be deleted. 393 | 394 | `dest` is the folder on the bucket that you want to target. At the moment, a globbing pattern shouldn't be in `src` (which would reference local files) but `exclude`. Exclude takes 1 globbing pattern, and can be "flipped" so that it becomes "delete all that match this pattern" rather than "don't delete all that match this pattern". 395 | 396 | If you use `differential`, you need to give a `cwd`, which will indicate which folder `dest` is referencing locally. In that case, `differential` will only delete the files on AWS which don't exist locally (look at this in terms of cleaning up if you have changed some assets names or something). 397 | 398 | #### `copy` 399 | 400 | The `copy` action just requires a `src` and a `dest` so: 401 | 402 | ```js 403 | {src: 'app/', dest: 'copy/', 'action': 'delete'} 404 | 405 | The `src` is used as the Prefix in the [listObjects command](http://docs.aws.amazon.com/AWSJavaScriptSDK/latest/AWS/S3.html#listObjects-property) to find the files _on the server_ (which means it can be a path or a partial path). It will then copy objects to `dest`. 406 | 407 | The `copy` action can also take an `exclude` option like so: 408 | 409 | ```js 410 | {src: 'app/', dest: 'copy/', 'action': 'delete', exclude "**/.*"} 411 | ``` 412 | 413 | The value is a globbing pattern that can be consumed by `grunt.file.isMatch`. You can find more information on [globbing patterns on Grunt's doc](http://gruntjs.com/api/grunt.file#globbing-patterns). In this example, it will exclude all files starting with a `.` (they won't be copied). `flipExclude` also works. 414 | 415 | ### Usage Examples 416 | 417 | The example loads the AWS credentials from a JSON file (DO NOT forget to exclude it from your commits). 418 | 419 | ```JSON 420 | { 421 | "AWSAccessKeyId": "AKxxxxxxxxxx", 422 | "AWSSecretKey": "super-secret-key" 423 | } 424 | ``` 425 | 426 | ```js 427 | aws: grunt.file.readJSON('aws-keys.json'), // Read the file 428 | 429 | aws_s3: { 430 | options: { 431 | accessKeyId: '<%= aws.AWSAccessKeyId %>', // Use the variables 432 | secretAccessKey: '<%= aws.AWSSecretKey %>', // You can also use env variables 433 | region: 'eu-west-1', 434 | uploadConcurrency: 5, // 5 simultaneous uploads 435 | downloadConcurrency: 5 // 5 simultaneous downloads 436 | }, 437 | staging: { 438 | options: { 439 | bucket: 'my-wonderful-staging-bucket', 440 | differential: true, // Only uploads the files that have changed 441 | gzipRename: 'ext' // when uploading a gz file, keep the original extension 442 | }, 443 | files: [ 444 | {dest: 'app/', cwd: 'backup/staging/', action: 'download'}, 445 | {src: 'app/', cwd: 'copy/', action: 'copy'}, 446 | {expand: true, cwd: 'dist/staging/scripts/', src: ['**'], dest: 'app/scripts/'}, 447 | {expand: true, cwd: 'dist/staging/styles/', src: ['**'], dest: 'app/styles/'}, 448 | {dest: 'src/app', action: 'delete'}, 449 | ] 450 | }, 451 | production: { 452 | options: { 453 | bucket: 'my-wonderful-production-bucket', 454 | params: { 455 | ContentEncoding: 'gzip' // applies to all the files! 456 | }, 457 | mime: { 458 | 'dist/assets/production/LICENCE': 'text/plain' 459 | } 460 | }, 461 | files: [ 462 | {expand: true, cwd: 'dist/production/', src: ['**'], dest: 'app/'}, 463 | {expand: true, cwd: 'assets/prod/large', src: ['**'], dest: 'assets/large/', stream: true}, // enable stream to allow large files 464 | {expand: true, cwd: 'assets/prod/', src: ['**'], dest: 'assets/', params: {CacheControl: '2000'}}, 465 | // CacheControl only applied to the assets folder 466 | // LICENCE inside that folder will have ContentType equal to 'text/plain' 467 | ] 468 | }, 469 | clean_production: { 470 | options: { 471 | bucket: 'my-wonderful-production-bucket', 472 | debug: true // Doesn't actually delete but shows log 473 | }, 474 | files: [ 475 | {dest: 'app/', action: 'delete'}, 476 | {dest: 'assets/', exclude: "**/*.tgz", action: 'delete'}, // will not delete the tgz 477 | {dest: 'assets/large/', exclude: "**/*copy*", flipExclude: true, action: 'delete'}, // will delete everything that has copy in the name 478 | ] 479 | }, 480 | download_production: { 481 | options: { 482 | bucket: 'my-wonderful-production-bucket' 483 | }, 484 | files: [ 485 | {dest: 'app/', cwd: 'backup/', action: 'download'}, // Downloads the content of app/ to backup/ 486 | {dest: 'assets/', cwd: 'backup-assets/', exclude: "**/*copy*", action: 'download'}, // Downloads everything which doesn't have copy in the name 487 | ] 488 | }, 489 | secret: { 490 | options: { 491 | bucket: 'my-wonderful-private-bucket', 492 | access: 'private' 493 | }, 494 | files: [ 495 | {expand: true, cwd: 'secret_garden/', src: ['*.key'], dest: 'secret/'}, 496 | ] 497 | } 498 | }, 499 | ``` 500 | 501 | ## Todos 502 | 503 | - Better testing (params, sync, etc.) 504 | 505 | ## Release History 506 | Full [changelog](https://github.com/MathieuLoutre/grunt-aws-s3/blob/master/CHANGELOG.md). 507 | -------------------------------------------------------------------------------- /tasks/aws_s3.js: -------------------------------------------------------------------------------- 1 | /* 2 | * grunt-aws-s3 3 | * https://github.com/MathieuLoutre/grunt-aws-s3 4 | * 5 | * Copyright (c) 2015 Mathieu Triay 6 | * Licensed under the MIT license. 7 | */ 8 | 9 | 'use strict'; 10 | 11 | var path = require('path'); 12 | var fs = require('fs'); 13 | var crypto = require('crypto'); 14 | var AWS = require('aws-sdk'); 15 | var mime = require('mime-types'); 16 | var _ = require('lodash'); 17 | var async = require('async'); 18 | var Progress = require('progress'); 19 | 20 | module.exports = function (grunt) { 21 | 22 | grunt.registerMultiTask('aws_s3', 'Interact with AWS S3 using the AWS SDK', function () { 23 | 24 | var done = this.async(); 25 | 26 | var options = this.options({ 27 | access: 'public-read', 28 | accessKeyId: process.env.AWS_ACCESS_KEY_ID, 29 | secretAccessKey: process.env.AWS_SECRET_ACCESS_KEY, 30 | sessionToken: process.env.AWS_SESSION_TOKEN, 31 | uploadConcurrency: 1, 32 | downloadConcurrency: 1, 33 | copyConcurrency: 1, 34 | mime: {}, 35 | params: {}, 36 | debug: false, 37 | mock: false, 38 | differential: false, 39 | stream: false, 40 | displayChangesOnly: false, 41 | progress: 'dots', 42 | overwrite: true, 43 | changedFiles: 'aws_s3_changed', 44 | compressionTypes: {'.br': 'br', '.gz': 'gzip'} 45 | }); 46 | 47 | // To deprecate 48 | if (options.concurrency !== undefined) { 49 | grunt.log.writeln("The concurrency option is deprecated, use uploadConcurrency instead\n".yellow); 50 | options.uploadConcurrency = options.concurrency; 51 | } 52 | 53 | var filePairOptions = { 54 | differential: options.differential, 55 | stream: options.stream, 56 | flipExclude: false, 57 | exclude: false 58 | }; 59 | 60 | // Replace the AWS SDK by the mock package if we're testing 61 | if (options.mock) { 62 | AWS = require('mock-aws-s3'); 63 | } 64 | 65 | if (options.awsProfile) { 66 | var credentials = new AWS.SharedIniFileCredentials({profile: options.awsProfile}); 67 | AWS.config.credentials = credentials; 68 | } 69 | 70 | if (['dots','progressBar','none'].indexOf(options.progress) < 0) { 71 | grunt.log.writeln('Invalid progress option; defaulting to dots\n'.yellow); 72 | options.progress = 'dots'; 73 | } 74 | 75 | // List of acceptable params for an upload 76 | var put_params = ['CacheControl', 'ContentDisposition', 'ContentEncoding', 77 | 'ContentLanguage', 'ContentLength', 'ContentMD5', 'Expires', 'GrantFullControl', 78 | 'GrantRead', 'GrantReadACP', 'GrantWriteACP', 'Metadata', 'ServerSideEncryption', 79 | 'StorageClass', 'WebsiteRedirectLocation', 'ContentType']; 80 | 81 | // Checks that all params are in put_params 82 | var isValidParams = function (params) { 83 | 84 | return _.every(_.keys(params), function (key) { 85 | return put_params.includes(key); 86 | }); 87 | }; 88 | 89 | var getObjectURL = function (file) { 90 | 91 | file = file || ''; 92 | var prefix = '' 93 | 94 | if (!options.mock) { 95 | prefix = s3.endpoint.href 96 | } 97 | 98 | return prefix + options.bucket + '/' + file; 99 | }; 100 | 101 | // Get the key URL relative to a path string 102 | var getRelativeKeyPath = function (key, dest) { 103 | 104 | var path; 105 | 106 | if (_.last(dest) === '/') { 107 | // if the path string is a directory, remove it from the key 108 | path = key.replace(dest, ''); 109 | } 110 | else if (key.replace(dest, '') === '') { 111 | path = _.last(key.split('/')); 112 | } 113 | else { 114 | path = key; 115 | } 116 | 117 | return path; 118 | }; 119 | 120 | var hashFile = function (options, callback) { 121 | 122 | if (options.stream) { 123 | var local_stream = fs.ReadStream(options.file_path); 124 | var hash = crypto.createHash('md5'); 125 | 126 | local_stream.on('end', function () { 127 | // S3's ETag has quotes around it... 128 | callback(null, '"' + hash.digest('hex') + '"'); 129 | }); 130 | 131 | local_stream.on('error', function (err) { 132 | callback(err); 133 | }); 134 | 135 | local_stream.on('data', function (data) { 136 | hash.update(data); 137 | }); 138 | } 139 | else { 140 | var local_buffer = grunt.file.read(options.file_path, { encoding: null }); 141 | callback(null, '"' + crypto.createHash('md5').update(local_buffer).digest('hex') + '"'); 142 | } 143 | }; 144 | 145 | // Checks that local file is 'date_compare' than server file 146 | var checkFileDate = function (options, callback) { 147 | 148 | fs.stat(options.file_path, function (err, stats) { 149 | 150 | if (err) { 151 | callback(err); 152 | } 153 | else { 154 | var local_date = new Date(stats.mtime).getTime(); 155 | var server_date = new Date(options.server_date).getTime(); 156 | 157 | if (options.compare_date === 'newer') { 158 | callback(null, local_date > server_date); 159 | } 160 | else { 161 | callback(null, local_date < server_date); 162 | } 163 | } 164 | }); 165 | }; 166 | 167 | var isFileDifferent = function (options, callback) { 168 | 169 | hashFile(options, function (err, md5_hash) { 170 | 171 | if (err) { 172 | callback(err); 173 | } 174 | else { 175 | if (md5_hash === options.server_hash) { 176 | callback(null, false); 177 | } 178 | else { 179 | if (options.server_date) { 180 | options.compare_date = options.compare_date || 'older'; 181 | checkFileDate(options, callback); 182 | } 183 | else { 184 | callback(null, true); 185 | } 186 | } 187 | } 188 | }); 189 | }; 190 | 191 | if (!options.bucket) { 192 | grunt.warn("Missing bucket in options"); 193 | } 194 | 195 | var s3_options = { 196 | bucket: options.bucket, 197 | accessKeyId: options.accessKeyId, 198 | secretAccessKey: options.secretAccessKey, 199 | sessionToken: options.sessionToken 200 | }; 201 | 202 | if (!options.region) { 203 | grunt.log.writeln("No region defined. S3 will default to US Standard\n".yellow); 204 | } 205 | else { 206 | s3_options.region = options.region; 207 | } 208 | 209 | if (options.endpoint) { 210 | s3_options.endpoint = options.endpoint; 211 | } 212 | 213 | if (options.params) { 214 | if (!isValidParams(options.params)) { 215 | grunt.warn('"params" can only be ' + put_params.join(', ')); 216 | } 217 | } 218 | 219 | // Allow additional (not required) options 220 | _.extend(s3_options, _.pick(options, ['maxRetries', 'sslEnabled', 'httpOptions', 'signatureVersion', 's3ForcePathStyle'])); 221 | 222 | var s3 = new AWS.S3(s3_options); 223 | 224 | var dest; 225 | var is_expanded; 226 | var objects = []; 227 | var uploads = []; 228 | 229 | // Because Grunt expands the files array automatically, 230 | // we need to group the uploads together to make the difference between actions. 231 | var pushUploads = function() { 232 | 233 | if (uploads.length > 0) { 234 | objects.push({ action: 'upload', files: uploads }); 235 | uploads = []; 236 | } 237 | }; 238 | 239 | var missingExpand = _.find(this.files, function(filePair) { 240 | return ! filePair.orig.expand // expand not specified 241 | && (! filePair.action || filePair.action == 'upload') // upload request or default 242 | && filePair.cwd; 243 | }); 244 | 245 | if (missingExpand) { 246 | grunt.warn("File upload action has 'cwd' but is missing 'expand: true', src list will not expand!"); 247 | } 248 | 249 | this.files.forEach(function (filePair) { 250 | 251 | is_expanded = filePair.orig.expand || false; 252 | 253 | if (filePair.action === 'delete') { 254 | 255 | _.defaults(filePair, filePairOptions); 256 | 257 | if (!filePair.dest) { 258 | grunt.fatal('No "dest" specified for deletion. No need to specify a "src"'); 259 | } 260 | else if (filePair.differential && !filePair.cwd) { 261 | grunt.fatal('Differential delete needs a "cwd"'); 262 | } 263 | 264 | pushUploads(); 265 | 266 | filePair.dest = (filePair.dest === '/') ? '' : filePair.dest; 267 | 268 | objects.push(filePair); 269 | } 270 | else if (filePair.action === 'download') { 271 | 272 | if (is_expanded) { 273 | grunt.fatal('You cannot expand the "src" for downloads'); 274 | } 275 | else if (!filePair.dest) { 276 | grunt.fatal('No "dest" specified for downloads'); 277 | } 278 | else if (!filePair.cwd || filePair.src) { 279 | grunt.fatal('Specify a "cwd" but not a "src" for downloads'); 280 | } 281 | 282 | pushUploads(); 283 | 284 | filePair.dest = (filePair.dest === '/') ? '' : filePair.dest; 285 | 286 | objects.push(_.defaults(filePair, filePairOptions)); 287 | } 288 | else if (filePair.action === 'copy') { 289 | 290 | if (is_expanded) { 291 | grunt.fatal('You cannot expand the "src" for copies'); 292 | } 293 | else if (!filePair.dest) { 294 | grunt.fatal('No "dest" specified for copies'); 295 | } 296 | else if (filePair.cwd || !filePair.src) { 297 | grunt.fatal('Specify a "src" but not a "cwd" for copies'); 298 | } 299 | 300 | pushUploads(); 301 | 302 | filePair.dest = (filePair.dest === '/') ? '' : filePair.dest; 303 | 304 | objects.push(_.defaults(filePair, filePairOptions)); 305 | } 306 | else { 307 | 308 | if (!filePair.dest) { 309 | grunt.fatal("Specify a dest for uploads (e.g. '/' for the root)"); 310 | } 311 | else if (filePair.params && !isValidParams(filePair.params)) { 312 | grunt.warn('"params" can only be ' + put_params.join(', ')); 313 | } 314 | else { 315 | filePair.params = _.defaults(filePair.params || {}, options.params); 316 | _.defaults(filePair, filePairOptions); 317 | 318 | filePair.src.forEach(function (src) { 319 | 320 | // Prevents creating empty folders 321 | if (!grunt.file.isDir(src)) { 322 | 323 | if (_.last(filePair.dest) === '/') { 324 | dest = (is_expanded) ? filePair.dest : unixifyPath(path.join(filePair.dest, src)); 325 | } 326 | else { 327 | dest = filePair.dest; 328 | } 329 | 330 | if (_.first(dest) === '/') { 331 | dest = dest.slice(1); 332 | } 333 | 334 | // '.' means that no dest path has been given (root). Nothing to create there. 335 | if (dest !== '.') { 336 | 337 | uploads.push(_.defaults({ 338 | need_upload: true, 339 | src: src, 340 | dest: dest 341 | }, filePair)); 342 | } 343 | } 344 | }); 345 | } 346 | } 347 | }); 348 | 349 | pushUploads(); 350 | 351 | // Will list *all* the content of the bucket given in options 352 | // Recursively requests the bucket with a marker if there's more than 353 | // 1000 objects. Ensures uniqueness of keys in the returned list. 354 | var listObjects = function (prefix, callback, marker, contents) { 355 | 356 | var search = { 357 | Prefix: prefix, 358 | Bucket: options.bucket 359 | }; 360 | 361 | if (marker) { 362 | search.Marker = marker; 363 | } 364 | 365 | s3.listObjects(search, function (err, list) { 366 | 367 | if (!err) { 368 | 369 | var objects = (contents) ? contents.concat(list.Contents) : list.Contents; 370 | 371 | if (list.IsTruncated) { 372 | var new_marker = _.last(list.Contents).Key; 373 | listObjects(prefix, callback, new_marker, objects); 374 | } 375 | else { 376 | callback(_.uniq(objects, function (o) { return o.Key; })); 377 | } 378 | } 379 | else { 380 | grunt.fatal('Failed to list content of bucket ' + options.bucket + '\n' + err); 381 | } 382 | }); 383 | }; 384 | 385 | var deleteObjects = function (task, callback) { 386 | 387 | grunt.log.writeln('Deleting the content of ' + getObjectURL(task.dest).cyan); 388 | 389 | // List all the objects using dest as the prefix 390 | listObjects(task.dest, function (to_delete) { 391 | 392 | // List local content if it's a differential task 393 | var local_files = (task.differential) ? grunt.file.expand({ cwd: task.cwd }, ["**"]) : []; 394 | 395 | _.each(to_delete, function (o) { 396 | 397 | o.need_delete = true; 398 | o.excluded = task.exclude && grunt.file.isMatch(task.exclude, o.Key); 399 | 400 | if (task.exclude && task.flipExclude) { 401 | o.excluded = !o.excluded; 402 | } 403 | 404 | if (task.differential && !o.excluded) { 405 | // Exists locally or not (remove dest in the key to get the local path) 406 | o.need_delete = local_files.indexOf(getRelativeKeyPath(o.Key, task.dest)) === -1; 407 | } 408 | }); 409 | 410 | // Just list what needs to be deleted so it can be sliced if necessary 411 | var delete_list = _.filter(to_delete, function (o) { return o.need_delete && !o.excluded; }); 412 | 413 | if (options.debug) { 414 | callback(null, to_delete); 415 | } 416 | else if (delete_list.length > 0) { 417 | 418 | // deleteObjects requests can only take up to 1000 keys 419 | // If we are deleting more than a 1000 objects, we need slices 420 | var slices = Math.ceil(delete_list.length/1000); 421 | var errors = []; 422 | var failed = []; 423 | var deleted = []; 424 | var calls = 0; 425 | 426 | if(options.progress === 'progressBar'){ 427 | var progress = new Progress('[:bar] :current/:total :etas', {total : delete_list.length}); 428 | } 429 | 430 | var end = function (err, data) { 431 | 432 | if (err) { 433 | errors.push(err); 434 | data = data || {}; 435 | failed = failed.concat(data.Errors || []); 436 | } 437 | else { 438 | deleted = deleted.concat(data.Deleted); 439 | switch(options.progress){ 440 | case 'progressBar': 441 | progress.tick(); 442 | break; 443 | case 'none': 444 | break; 445 | case 'dots': 446 | default: 447 | grunt.log.write('.'.green); 448 | break; 449 | } 450 | } 451 | 452 | if (++calls === slices) { 453 | if (errors.length > 0) { 454 | callback(JSON.stringify(errors), failed); 455 | } 456 | else { 457 | callback(null, to_delete); 458 | } 459 | } 460 | }; 461 | 462 | var deleteSlice = function (i) { 463 | 464 | var start = 1000 * i; 465 | var slice = { 466 | Objects: _.map(delete_list.slice(start, start + 1000), function (o) { return { Key: o.Key }; }) 467 | }; 468 | 469 | s3.deleteObjects({ Delete: slice, Bucket: options.bucket }, function (err, data) { end(err, data); }); 470 | }; 471 | 472 | for (var i = 0; i < slices; i++) { 473 | deleteSlice(i); 474 | } 475 | } 476 | else { 477 | callback(null, (to_delete.length > 0) ? to_delete : null); 478 | } 479 | }); 480 | }; 481 | 482 | var doCopy = function (object, callback) { 483 | 484 | if (options.debug || !object.need_copy || object.excluded) { 485 | callback(null, false); 486 | } 487 | else { 488 | s3.copyObject({ Key: object.dest, CopySource: encodeURIComponent(options.bucket + '/' + object.Key), Bucket: options.bucket, ACL: options.access }, function (err, data) { 489 | if (err) { 490 | callback(err); 491 | } 492 | else { 493 | callback(null, true); 494 | } 495 | }); 496 | } 497 | }; 498 | 499 | var copyObjects = function (task, callback) { 500 | 501 | grunt.log.writeln('Copying the content of ' + getObjectURL(task.orig.src[0]).cyan + ' to ' + getObjectURL(task.dest).cyan); 502 | 503 | // List all the objects using src as the prefix 504 | listObjects(task.orig.src[0], function (to_copy) { 505 | 506 | if (to_copy.length === 0) { 507 | callback(null, null); 508 | } 509 | else { 510 | 511 | var copy_queue = async.queue(function (object, copyCallback) { 512 | 513 | var key = getRelativeKeyPath(object.Key, task.orig.src[0]); // Remove the src in the key 514 | object.dest = task.dest + key; 515 | object.need_copy = _.last(object.dest) !== '/'; // no need to write directories 516 | object.excluded = task.exclude && grunt.file.isMatch(task.exclude, object.Key); 517 | 518 | if (task.exclude && task.flipExclude) { 519 | object.excluded = !object.excluded; 520 | } 521 | 522 | setImmediate(doCopy, object, copyCallback); 523 | 524 | }, options.copyConcurrency); 525 | 526 | copy_queue.drain = function () { 527 | 528 | callback(null, to_copy); 529 | }; 530 | 531 | if (options.progress === 'progressBar') { 532 | var progress = new Progress('[:bar] :current/:total :etas', { total : to_copy.length }); 533 | } 534 | 535 | copy_queue.push(to_copy, function (err, copied) { 536 | 537 | if (err) { 538 | grunt.fatal('Failed to copy ' + getObjectURL(this.data.Key) + '\n' + err); 539 | } 540 | else { 541 | switch (options.progress) { 542 | case 'progressBar': 543 | progress.tick(); 544 | break; 545 | case 'none': 546 | break; 547 | case 'dots': 548 | default: 549 | var dot = (copied) ? '.'.green : '.'.yellow; 550 | grunt.log.write(dot); 551 | break; 552 | } 553 | } 554 | }); 555 | } 556 | }); 557 | }; 558 | 559 | var doDownload = function (object, callback) { 560 | 561 | if (options.debug || !object.need_download || object.excluded) { 562 | callback(null, false); 563 | } 564 | else if (object.stream) { 565 | grunt.file.mkdir(path.dirname(object.dest)); 566 | 567 | var stream = fs.createWriteStream(object.dest); 568 | var s3_object = s3.getObject({ Key: object.Key, Bucket: options.bucket }).createReadStream(); 569 | 570 | stream.on('finish', function () { 571 | callback(null, true); 572 | }); 573 | 574 | s3_object.on('error', function (err) { 575 | callback(err); 576 | }); 577 | 578 | stream.on('error', function (err) { 579 | callback(err); 580 | }); 581 | 582 | s3_object.pipe(stream); 583 | } 584 | else { 585 | s3.getObject({ Key: object.Key, Bucket: options.bucket }, function (err, data) { 586 | if (err) { 587 | callback(err); 588 | } 589 | else { 590 | grunt.file.write(object.dest, data.Body); 591 | callback(null, true); 592 | } 593 | }); 594 | } 595 | }; 596 | 597 | var downloadObjects = function (task, callback) { 598 | 599 | grunt.log.writeln('Downloading the content of ' + getObjectURL(task.dest).cyan + ' to ' + task.cwd.cyan); 600 | 601 | // List all the objects using dest as the prefix 602 | listObjects(task.dest, function (to_download) { 603 | 604 | // List local content if it's a differential task 605 | var local_files = (task.differential) ? grunt.file.expand({ cwd: task.cwd }, ["**"]) : []; 606 | 607 | if (to_download.length === 0) { 608 | callback(null, null); 609 | } 610 | else { 611 | 612 | var download_queue = async.queue(function (object, downloadCallback) { 613 | 614 | var key = getRelativeKeyPath(object.Key, task.dest); // Remove the dest in the key to not duplicate the path with cwd 615 | object.dest = task.cwd + key; 616 | object.stream = task.stream; 617 | object.need_download = _.last(object.dest) !== '/'; // no need to write directories 618 | object.excluded = task.exclude && grunt.file.isMatch(task.exclude, object.Key); 619 | 620 | if (task.exclude && task.flipExclude) { 621 | object.excluded = !object.excluded; 622 | } 623 | 624 | if (task.differential && object.need_download && !object.excluded) { 625 | var local_index = local_files.indexOf(key); 626 | 627 | // If file exists locally we need to check if it's different 628 | if (local_index !== -1) { 629 | 630 | // Check md5 and if file is older than server file 631 | var check_options = { 632 | file_path: object.dest, 633 | server_hash: object.ETag, 634 | server_date: object.LastModified, 635 | date_compare: 'older' 636 | }; 637 | 638 | isFileDifferent(check_options, function (err, different) { 639 | if (err) { 640 | downloadCallback(err); 641 | } 642 | else { 643 | object.need_download = different; 644 | setImmediate(doDownload, object, downloadCallback); 645 | } 646 | }); 647 | } 648 | else { 649 | setImmediate(doDownload, object, downloadCallback); 650 | } 651 | } 652 | else { 653 | setImmediate(doDownload, object, downloadCallback); 654 | } 655 | 656 | }, options.downloadConcurrency); 657 | 658 | download_queue.drain = function () { 659 | 660 | callback(null, to_download); 661 | }; 662 | 663 | if(options.progress === 'progressBar'){ 664 | var progress = new Progress('[:bar] :current/:total :etas', {total : to_download.length}); 665 | } 666 | 667 | download_queue.push(to_download, function (err, downloaded) { 668 | 669 | if (err) { 670 | grunt.fatal('Failed to download ' + getObjectURL(this.data.Key) + '\n' + err); 671 | } 672 | else { 673 | switch(options.progress){ 674 | case 'progressBar': 675 | progress.tick(); 676 | break; 677 | case 'none': 678 | break; 679 | case 'dots': 680 | default: 681 | var dot = (downloaded) ? '.'.green : '.'.yellow; 682 | grunt.log.write(dot); 683 | break; 684 | } 685 | } 686 | }); 687 | } 688 | }); 689 | }; 690 | 691 | var doCompressionRename = function (object, options) { 692 | var lastDot = object.src.lastIndexOf('.'); 693 | 694 | if (lastDot !== -1) { 695 | var rename = options.gzipRename || options.compressionRename; 696 | var extension = object.src.substr(lastDot); 697 | var type = options.compressionTypes && options.compressionTypes[extension]; 698 | 699 | if (type) { 700 | var originalPath = object.src.substr(0, lastDot); 701 | 702 | object.params = _.defaults( 703 | { 704 | ContentType: mime.contentType(mime.lookup(originalPath) || "application/octet-stream"), 705 | ContentEncoding: type 706 | }, object.params || {}); 707 | 708 | if (rename && object.src.match(new RegExp('\\.[^.]+\\' + extension +'$'))) { 709 | if (rename === 'ext') { 710 | object.dest = object.dest.replace( 711 | new RegExp('\\' + extension + '$'), ''); 712 | } 713 | else if (rename === 'compress' || rename === 'gz') { 714 | object.dest = object.dest.replace( 715 | new RegExp('\\.[^.]+(\\' + extension + ')$'), '$1'); 716 | } 717 | else if (rename === 'swap') { 718 | object.dest = object.dest.replace( 719 | new RegExp('(\\.[^.]+)(\\' + extension + ')$'), '$2$1'); 720 | } 721 | } 722 | } 723 | } 724 | }; 725 | 726 | var doUpload = function (object, callback) { 727 | 728 | if (object.need_upload && !options.debug) { 729 | 730 | var type = options.mime[object.src] || object.params.ContentType || mime.contentType(mime.lookup(object.src) || "application/octet-stream"); 731 | var upload = _.defaults({ 732 | ContentType: type, 733 | Key: object.dest, 734 | Bucket: options.bucket, 735 | ACL: options.access 736 | }, object.params); 737 | 738 | if (object.stream) { 739 | upload.Body = fs.createReadStream(object.src); 740 | } 741 | else { 742 | upload.Body = grunt.file.read(object.src, { encoding: null }); 743 | } 744 | 745 | s3.putObject(upload, function (err, data) { 746 | callback(err, true); 747 | }); 748 | } 749 | else { 750 | callback(null, false); 751 | } 752 | }; 753 | 754 | var uploadObjects = function (task, callback) { 755 | 756 | grunt.log.writeln('Uploading to ' + getObjectURL(task.dest).cyan); 757 | 758 | var startUploads = function (server_files) { 759 | 760 | var upload_queue = async.queue(function (object, uploadCallback) { 761 | 762 | doCompressionRename(object, options); 763 | 764 | var server_file = _.filter(server_files, { Key: object.dest })[0]; 765 | 766 | if (server_file && !options.overwrite) { 767 | uploadCallback(object.dest + " already exists!") 768 | } 769 | else if (server_file && object.differential) { 770 | 771 | isFileDifferent({ file_path: object.src, server_hash: server_file.ETag }, function (err, different) { 772 | object.need_upload = different; 773 | setImmediate(doUpload, object, uploadCallback); 774 | }); 775 | } 776 | else { 777 | setImmediate(doUpload, object, uploadCallback); 778 | } 779 | 780 | }, options.uploadConcurrency); 781 | 782 | upload_queue.drain = function () { 783 | 784 | callback(null, task.files); 785 | }; 786 | 787 | if (options.progress === 'progressBar') { 788 | var progress = new Progress('[:bar] :current/:total :etas', { total : task.files.length }); 789 | } 790 | 791 | upload_queue.push(task.files, function (err, uploaded) { 792 | 793 | if (err) { 794 | grunt.fatal('Failed to upload ' + this.data.src + ' with bucket ' + options.bucket + '\n' + err); 795 | } 796 | else { 797 | switch(options.progress){ 798 | case 'progressBar': 799 | progress.tick(); 800 | break; 801 | case 'none': 802 | break; 803 | case 'dots': 804 | default: 805 | var dot = (uploaded) ? '.'.green : '.'.yellow; 806 | grunt.log.write(dot); 807 | break; 808 | } 809 | } 810 | }); 811 | }; 812 | 813 | var unique_dests = _(task.files) 814 | .filter('differential') 815 | .map('dest') 816 | .compact() 817 | .map(path.dirname) 818 | .sort() 819 | .uniq(true) 820 | .reduce(function (res, dest) { 821 | 822 | var last_path = res[res.length - 1]; 823 | 824 | if (!last_path || dest.indexOf(last_path) !== 0) { 825 | res.push(dest); 826 | } 827 | 828 | return res; 829 | }, []); 830 | 831 | // If there's a '.', we need to scan the whole bucket 832 | if (unique_dests.indexOf('.') > -1 || !options.overwrite) { 833 | unique_dests = ['']; 834 | } 835 | 836 | if (unique_dests.length) { 837 | async.mapLimit(unique_dests, options.uploadConcurrency, function (dest, callback) { 838 | listObjects(dest, function (objects) { 839 | callback(null, objects); 840 | }); 841 | }, function (err, objects) { 842 | if (err) { 843 | callback(err); 844 | } 845 | else { 846 | var server_files = Array.prototype.concat.apply([], objects); 847 | startUploads(server_files); 848 | } 849 | }); 850 | } else { 851 | startUploads([]); 852 | } 853 | }; 854 | 855 | var queue = async.queue(function (task, callback) { 856 | 857 | if (task.action === 'delete') { 858 | deleteObjects(task, callback); 859 | } 860 | else if (task.action === 'download') { 861 | downloadObjects(task, callback); 862 | } 863 | else if (task.action === 'copy') { 864 | copyObjects(task, callback); 865 | } 866 | else { 867 | uploadObjects(task, callback); 868 | } 869 | }, 1); 870 | 871 | queue.drain = function () { 872 | 873 | _.each(objects, function (o) { 874 | 875 | if (o.action === "delete") { 876 | grunt.log.writeln(o.deleted.toString().green + '/' + o.nb_objects.toString().green + ' objects deleted from ' + (options.bucket + '/' + o.dest).green); 877 | } 878 | else if (o.action === "download") { 879 | grunt.log.writeln(o.downloaded.toString().green + '/' + o.nb_objects.toString().green + ' objects downloaded from ' + (options.bucket + '/' + o.dest).green + ' to ' + o.cwd.green); 880 | } 881 | else if (o.action === "copy") { 882 | grunt.log.writeln(o.copied.toString().green + '/' + o.nb_objects.toString().green + ' objects copied from ' + (options.bucket + '/' + o.orig.src[0]).green + ' to ' + (options.bucket + '/' + o.dest).green); 883 | } 884 | else { 885 | grunt.log.writeln(o.uploaded.toString().green + '/' + o.nb_objects.toString().green + ' objects uploaded to bucket ' + (options.bucket + '/').green); 886 | } 887 | }); 888 | 889 | if (options.debug) { 890 | grunt.log.writeln("\nThe debug option was enabled, no changes have actually been made".yellow); 891 | } 892 | 893 | var uploadedFiles = [] 894 | 895 | _.each(objects, function (o) { 896 | if (!o.action || o.action === 'upload') { 897 | _.each(o.files, function (file) { 898 | if (file.need_upload) { 899 | uploadedFiles.push(file.dest) 900 | } 901 | }); 902 | } 903 | }) 904 | 905 | grunt.config.set(options.changedFiles, uploadedFiles) 906 | 907 | done() 908 | }; 909 | 910 | if (objects.length === 0) { 911 | queue.drain() 912 | } 913 | else { 914 | queue.push(objects, function (err, res) { 915 | var object_url = getObjectURL(this.data.dest); 916 | 917 | if (this.data.action === 'delete') { 918 | if (err) { 919 | if (res && res.length > 0) { 920 | grunt.log.writeln('Errors (' + res.length.toString().red + ' objects): ' + _.pluck(res, 'Key').join(', ').red); 921 | } 922 | 923 | grunt.fatal('Deletion failed\n' + err.toString()); 924 | } 925 | else { 926 | if (res && res.length > 0) { 927 | grunt.log.writeln('\nList: (' + res.length.toString().cyan + ' objects):'); 928 | 929 | var deleted = 0; 930 | 931 | _.each(res, function (file) { 932 | 933 | if (file.need_delete && !file.excluded) { 934 | deleted++; 935 | grunt.log.writeln('- ' + file.Key.cyan); 936 | } 937 | else { 938 | var sign = (file.excluded) ? '! ' : '- '; 939 | grunt.log.writeln(sign + file.Key.yellow); 940 | } 941 | }); 942 | 943 | this.data.nb_objects = res.length; 944 | this.data.deleted = deleted; 945 | } 946 | else { 947 | grunt.log.writeln('Nothing to delete'); 948 | this.data.nb_objects = 0; 949 | this.data.deleted = 0; 950 | } 951 | } 952 | } 953 | else if (this.data.action === 'download') { 954 | if (err) { 955 | grunt.fatal('Download failed\n' + err.toString()); 956 | } 957 | else { 958 | if (res && res.length > 0) { 959 | grunt.log.writeln('\nList: (' + res.length.toString().cyan + ' objects):'); 960 | 961 | var task = this.data; 962 | var downloaded = 0; 963 | 964 | _.each(res, function (file) { 965 | 966 | if (file.need_download && !file.excluded) { 967 | downloaded++; 968 | grunt.log.writeln('- ' + getObjectURL(file.Key).cyan + ' -> ' + (task.cwd + getRelativeKeyPath(file.Key, task.dest)).cyan); 969 | } 970 | else { 971 | var sign = (file.excluded) ? ' =/= ' : ' === '; 972 | grunt.log.writeln('- ' + getObjectURL(file.Key).yellow + sign + (task.cwd + getRelativeKeyPath(file.Key, task.dest)).yellow); 973 | } 974 | }); 975 | 976 | this.data.nb_objects = res.length; 977 | this.data.downloaded = downloaded || 0; 978 | } 979 | else { 980 | grunt.log.writeln('Nothing to download'); 981 | this.data.nb_objects = 0; 982 | this.data.downloaded = 0; 983 | } 984 | } 985 | } 986 | else if (this.data.action === 'copy') { 987 | if (err) { 988 | grunt.fatal('Copy failed\n' + err.toString()); 989 | } 990 | else { 991 | if (res && res.length > 0) { 992 | grunt.log.writeln('\nList: (' + res.length.toString().cyan + ' objects):'); 993 | 994 | var task = this.data; 995 | var copied = 0; 996 | 997 | _.each(res, function (file) { 998 | 999 | if (file.need_copy && !file.excluded) { 1000 | copied++; 1001 | grunt.log.writeln('- ' + (options.bucket + '/' + file.Key).cyan + ' -> ' + (options.bucket + '/' + task.dest + getRelativeKeyPath(file.Key, task.dest)).cyan); 1002 | } 1003 | else { 1004 | var sign = (file.excluded) ? ' =/= ' : ' === '; 1005 | grunt.log.writeln('- ' + (options.bucket + '/' + file.Key).yellow + sign + (options.bucket + '/' + task.dest + getRelativeKeyPath(file.Key, task.dest)).yellow); 1006 | } 1007 | }); 1008 | 1009 | this.data.nb_objects = res.length; 1010 | this.data.copied = copied || 0; 1011 | } 1012 | else { 1013 | grunt.log.writeln('Nothing to copy'); 1014 | this.data.nb_objects = 0; 1015 | this.data.copied = 0; 1016 | } 1017 | } 1018 | } 1019 | else { 1020 | if (err) { 1021 | grunt.fatal('Upload failed\n' + err.toString()); 1022 | } 1023 | else { 1024 | grunt.log.writeln('\nList: (' + res.length.toString().cyan + ' objects):'); 1025 | 1026 | var uploaded = 0; 1027 | 1028 | _.each(res, function (file) { 1029 | 1030 | if (file.need_upload) { 1031 | uploaded++; 1032 | grunt.log.writeln('- ' + file.src.cyan + ' -> ' + (object_url + file.dest).cyan); 1033 | } 1034 | else if (!options.displayChangesOnly) { 1035 | grunt.log.writeln('- ' + file.src.yellow + ' === ' + (object_url + file.dest).yellow); 1036 | } 1037 | }); 1038 | 1039 | this.data.nb_objects = res.length; 1040 | this.data.uploaded = uploaded; 1041 | } 1042 | } 1043 | 1044 | grunt.log.writeln(); 1045 | }); 1046 | } 1047 | }); 1048 | 1049 | var unixifyPath = function (filepath) { 1050 | 1051 | if (process.platform === 'win32') { 1052 | return filepath.replace(/\\/g, '/'); 1053 | } 1054 | else { 1055 | return filepath; 1056 | } 1057 | }; 1058 | }; 1059 | --------------------------------------------------------------------------------