├── test
├── one
│ └── two
│ │ └── three
│ │ └── test.txt
├── test.txt
├── test.jpg
├── test.tar.gz
├── local.js
├── gcs.js
├── azure.js
└── s3.js
├── test.txt
├── test2.txt
├── test.jpg
├── test.webp
├── badges
├── npm-audit-badge.png
└── npm-audit-badge.svg
├── logos
├── logo-box-builtby.png
└── logo-box-madefor.png
├── s3TestOptions-sample.js
├── test.svg
├── defaultGzipBlacklist.js
├── .gitignore
├── azureTestOptions-sample.js
├── lib
├── storage
│ ├── noGzipContentTypes.js
│ ├── gcs.js
│ ├── local.js
│ ├── s3.js
│ ├── contentTypes.js
│ └── azure.js
├── utils.js
├── copyFile.js
└── image
│ ├── sharp.js
│ └── imagemagick.js
├── gcsTestOptions-sample.js
├── azureReplicateTestOptions-sample.js
├── .eslintrc
├── LICENSE
├── package.json
├── sample.js
├── webp-test.js
├── test-sharp.js
├── test-imagemagick.js
├── CHANGELOG.md
└── uploadfs.js
/test/one/two/three/test.txt:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/test.txt:
--------------------------------------------------------------------------------
1 | This is a test text file.
2 |
--------------------------------------------------------------------------------
/test/test.txt:
--------------------------------------------------------------------------------
1 | This is a test text file.
2 |
--------------------------------------------------------------------------------
/test2.txt:
--------------------------------------------------------------------------------
1 | This is a test text file, slightly different.
2 |
--------------------------------------------------------------------------------
/test.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/apostrophecms/uploadfs/main/test.jpg
--------------------------------------------------------------------------------
/test.webp:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/apostrophecms/uploadfs/main/test.webp
--------------------------------------------------------------------------------
/test/test.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/apostrophecms/uploadfs/main/test/test.jpg
--------------------------------------------------------------------------------
/test/test.tar.gz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/apostrophecms/uploadfs/main/test/test.tar.gz
--------------------------------------------------------------------------------
/badges/npm-audit-badge.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/apostrophecms/uploadfs/main/badges/npm-audit-badge.png
--------------------------------------------------------------------------------
/logos/logo-box-builtby.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/apostrophecms/uploadfs/main/logos/logo-box-builtby.png
--------------------------------------------------------------------------------
/logos/logo-box-madefor.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/apostrophecms/uploadfs/main/logos/logo-box-madefor.png
--------------------------------------------------------------------------------
/s3TestOptions-sample.js:
--------------------------------------------------------------------------------
1 | module.exports = {
2 | backend: 's3',
3 | secret: 'xxx',
4 | key: 'xxx',
5 | bucket: 'yourownbucketnamefromamazons3',
6 | region: 'us-west-2'
7 | };
8 |
--------------------------------------------------------------------------------
/test.svg:
--------------------------------------------------------------------------------
1 |
4 |
--------------------------------------------------------------------------------
/defaultGzipBlacklist.js:
--------------------------------------------------------------------------------
1 | // By default, the following file types will not be gzipped.
2 | // Each is either (a) precompressed, or (b) poorly handled by a
3 | // browser with significant market share if compressed
4 | module.exports = [ 'jpg', 'png', 'zip', 'gzip', 'png', 'xls', 'docx', 'gif', 'mp4', 'webm' ];
5 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | *.sw*
2 | gcs-credentials-uploadfstest.json
3 | copy-out-test.txt
4 | npm-debug.log
5 | .DS_Store
6 | node_modules
7 | s3TestOptions.js
8 | gcsTestOptions.js
9 | azureTestOptions.js
10 | public/uploads
11 | temp
12 | package-lock.json
13 | .jshintrc
14 | # an extra local test in my checkout
15 | test-jimp.js
16 |
--------------------------------------------------------------------------------
/azureTestOptions-sample.js:
--------------------------------------------------------------------------------
1 | module.exports = {
2 | storage: 'azure',
3 | disabledFileKey: 'Any string is ok, probably longer is better',
4 | account: 'foo',
5 | container: 'bar',
6 | key: 'b@z'
7 | // If set, the key should be a shared access signature (SAS) token,
8 | // otherwise it should be an account key (Shared Credential).
9 | // sas: true
10 | };
11 |
--------------------------------------------------------------------------------
/lib/storage/noGzipContentTypes.js:
--------------------------------------------------------------------------------
1 | // Content types NOT suitable for gzip because
2 | // they are already compressed and it's not worth
3 | // the impact on phones etc. and/or it confuses
4 | // browsers & does not get the expected transfer
5 | // encoding header
6 |
7 | module.exports = [
8 | 'image/gif', 'image/jpeg', 'image/png', 'audio/mpeg', 'video/mpeg', 'video/mp4', 'video/webm', 'video/quicktime', 'application/zip', 'application/gzip', 'application/x-gtar'
9 | ];
10 |
--------------------------------------------------------------------------------
/gcsTestOptions-sample.js:
--------------------------------------------------------------------------------
1 | module.exports = {
2 | // See https://cloud.google.com/docs/authentication/getting-started
3 | // basically you want a service account file on the filesystem with
4 | // the ENV variable GOOGLE_APPLICATION_CREDENTIALS pointing to it
5 | // If you are getting `Error: Invalid Grant`, this is likely your problem
6 | backend: 'gcs',
7 | bucket: 'yourownbucketnamefromgcs',
8 | region: 'us-west-2',
9 | validation: false // Can be one of false, "md5" or "crc32", YMMV
10 | };
11 |
--------------------------------------------------------------------------------
/azureReplicateTestOptions-sample.js:
--------------------------------------------------------------------------------
1 | module.exports = {
2 | storage: 'azure',
3 | disabledFileKey: 'Any string is ok, probably longer is better',
4 | replicateClusters: [
5 | {
6 | account: 'yourAccount',
7 | container: 'container1',
8 | key: 'top_secret_XYZ123'
9 | },
10 | {
11 | account: 'yourAccount',
12 | container: 'container2',
13 | key: 'top_secret_XYZ123'
14 | },
15 | {
16 | account: 'yourAccount2',
17 | container: 'account2_container1',
18 | sas: true,
19 | key: 'your_sas_token'
20 | }
21 | ]
22 | };
23 |
--------------------------------------------------------------------------------
/badges/npm-audit-badge.svg:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/.eslintrc:
--------------------------------------------------------------------------------
1 | {
2 | "extends": "apostrophe",
3 | "rules": {
4 | "no-console": 0,
5 | "node/no-callback-literal": 0
6 | },
7 | "overrides": [
8 | {
9 | "files": [ "**/public/**/*.js" ],
10 | "excludedFiles": [ "**/public/vendor/**/*.js" ],
11 | "globals": {
12 | "window": true,
13 | "document": true,
14 | "location": true,
15 | "apos": true,
16 | "_": true,
17 | "async": true,
18 | "confirm": true,
19 | "$": true,
20 | "CKEDITOR_BASEPATH": true,
21 | "CKEDITOR": true,
22 | "alert": true,
23 | "jQuery": true,
24 | "sluggo": true,
25 | "moog": true,
26 | "Pikaday": true,
27 | "moment": true
28 | }
29 | },
30 | {
31 | "files": [ "test/**/*.js" ],
32 | "globals": {
33 | "describe": true,
34 | "it": true,
35 | "after": true,
36 | "before": true
37 | }
38 | }
39 | ]
40 | }
41 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | Copyright (c) 2013 P'unk Avenue LLC
2 |
3 | Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
4 |
5 | The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
6 |
7 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
8 |
--------------------------------------------------------------------------------
/package.json:
--------------------------------------------------------------------------------
1 | {
2 | "name": "uploadfs",
3 | "version": "1.26.0",
4 | "description": "Store files in a web-accessible location via a simplified API. Can automatically scale and rotate images. Includes S3, Azure and local filesystem-based backends with the most convenient features of each.",
5 | "main": "uploadfs.js",
6 | "scripts": {
7 | "test": "npm run testAzure && GOOGLE_APPLICATION_CREDENTIALS=gcs-credentials-uploadfstest.json mocha test/ && node test-imagemagick.js && eslint .",
8 | "testAzure": "env AZURE_TEST_FILE='test.jpg' mocha test/azure.js",
9 | "webp": "./webp-test.js",
10 | "lint-be": "eslint --fix 'lib/**/*.js'",
11 | "test-sharp": "npm run testAzure && GOOGLE_APPLICATION_CREDENTIALS=gcs-credentials-uploadfstest.json mocha test/ && node test-sharp.js && eslint ."
12 | },
13 | "repository": {
14 | "type": "git",
15 | "url": "git@github.com:apostrophecms/uploadfs.git"
16 | },
17 | "keywords": [
18 | "upload",
19 | "files",
20 | "s3",
21 | "storage"
22 | ],
23 | "author": "Apostrophe Technologies, Inc.",
24 | "license": "MIT",
25 | "dependencies": {
26 | "async": "^1.0.0",
27 | "bluebird": "^3.7.2",
28 | "es6-promise": "^4.1.0",
29 | "fs-extra": "^5.0.0",
30 | "gzipme": "^0.1.1",
31 | "lodash": "^4.17.21",
32 | "rimraf": "^5.0.7"
33 | },
34 | "optionalDependencies": {
35 | "@aws-sdk/client-s3": "^3.908.0",
36 | "@aws-sdk/lib-storage": "^3.908.0",
37 | "@azure/storage-blob": "^12.14.0",
38 | "@google-cloud/storage": "^7.17.0",
39 | "@smithy/node-http-handler": "^4.4.0",
40 | "sharp": "^0.32.6"
41 | },
42 | "devDependencies": {
43 | "eslint": "^8.0.0",
44 | "eslint-config-apostrophe": "^4.0.0",
45 | "mocha": "^10.2.0",
46 | "node-fetch": "^2.6.9",
47 | "stat-mode": "^0.2.2"
48 | }
49 | }
--------------------------------------------------------------------------------
/lib/utils.js:
--------------------------------------------------------------------------------
1 | const crypto = require('crypto');
2 | /**
3 | * Helper functions
4 | **/
5 | module.exports = {
6 | // Use an unguessable filename suffix to disable files.
7 | // This is secure at the web level if the webserver is not
8 | // configured to serve indexes of files, and it does not impede the
9 | // use of rsync etc. Used when options.disabledFileKey is set.
10 | // Use of an HMAC to do this for each filename ensures that even if
11 | // one such filename is exposed, the others remain secure
12 |
13 | getDisabledPath: function(path, disabledFileKey) {
14 | const hmac = crypto.createHmac('sha256', disabledFileKey);
15 | hmac.update(path);
16 | const disabledPath = path + '-disabled-' + hmac.digest('hex');
17 | return disabledPath;
18 | },
19 |
20 | getPathFromDisabledPath: function(path) {
21 | return path.replace(/-disabled-.*/g, '');
22 | },
23 |
24 | // Append a path to a bucket's base URL, with a joining slash if not provided.
25 | // This is shared by several backends, while others have their own path
26 | // handling needs. We want to ensure that both `path/to/file` (which others
27 | // sometimes use) and `/path/to/file` (always used by Apostrophe) behave
28 | // reasonably.
29 | //
30 | // If `path` is nullish `url` is returned as-is.
31 | //
32 | // If `options.strictPaths` is `true`, we do not attempt to provide a slash
33 | // when needed
34 |
35 | addPathToUrl(options, url, path) {
36 | if (options.strictPaths) {
37 | if (path != null) {
38 | return url + path;
39 | } else {
40 | return url;
41 | }
42 | } else {
43 | if (path != null) {
44 | return url + ((path.charAt(0) !== '/') ? '/' : '') + path;
45 | } else {
46 | return url;
47 | }
48 | }
49 | },
50 |
51 | // Leading slashes were the norm with knox, but
52 | // produce unwanted extra slashes in the URL with
53 | // the AWS SDK for S3 and in GCS, so return the
54 | // string without them.
55 | //
56 | // If `options.strictPaths` is true, we do not
57 | // make this modification.
58 |
59 | removeLeadingSlash(options, key) {
60 | if (options.strictPaths) {
61 | return key;
62 | } else {
63 | return key.replace(/^\//, '');
64 | }
65 | }
66 |
67 | };
68 |
--------------------------------------------------------------------------------
/sample.js:
--------------------------------------------------------------------------------
1 | // An extremely simple app that accepts uploaded files
2 | // and stores them in either a local folder or s3,
3 | // depending on which backend you choose.
4 |
5 | const express = require('express');
6 | const uploadfs = require('uploadfs')();
7 | const multipart = require('connect-multiparty');
8 | const multipartMiddleware = multipart();
9 | const path = require('path');
10 |
11 | // For the local backend
12 | const uploadsPath = path.join(__dirname, '/public/uploads');
13 | const uploadsLocalUrl = '/uploads';
14 | const options = {
15 | backend: 'local',
16 | uploadsPath,
17 | uploadsUrl: 'http://localhost:3000' + uploadsLocalUrl,
18 | // Required if you use imageSizes and copyImageIn
19 | tempPath: path.join(__dirname, '/temp'),
20 | imageSizes: [
21 | {
22 | name: 'small',
23 | width: 320,
24 | height: 320
25 | },
26 | {
27 | name: 'medium',
28 | width: 640,
29 | height: 640
30 | },
31 | {
32 | name: 'large',
33 | width: 1140,
34 | height: 1140
35 | }
36 | ]
37 | };
38 |
39 | uploadfs.init(options, createApp);
40 |
41 | function createApp(err) {
42 | if (err) {
43 | console.log(err);
44 | process.exit(1);
45 | }
46 | const app = express();
47 |
48 | // For the local backend: serve the uploaded files at /uploads.
49 | // With the s3 backend you don't need this of course, s3 serves
50 | // the files for you.
51 |
52 | app.use(uploadsLocalUrl, express.static(uploadsPath));
53 |
54 | app.get('/', function(req, res) {
55 | res.send('
');
58 | });
59 |
60 | app.post('/', multipartMiddleware, function(req, res) {
61 | uploadfs.copyImageIn(req.files.photo.path, '/profiles/me', function(e, info) {
62 | if (e) {
63 | res.send('An error occurred: ' + e);
64 | } else {
65 | res.send('All is well. Here is the image in three sizes plus the original.
' +
66 | '' +
67 | '' +
68 | '' +
69 | '');
70 | }
71 | });
72 | });
73 | app.listen(3000);
74 | console.log('Listening at http://localhost:3000');
75 | }
76 |
--------------------------------------------------------------------------------
/lib/copyFile.js:
--------------------------------------------------------------------------------
1 | // Copy a file reliably, with error handling.
2 | // path1 is the original file, path2 is the new file.
3 | // "options" is used in internal recursive calls and
4 | // may be omitted.
5 | //
6 | // Creates any necessary parent folders of path2 automatically.
7 |
8 | const fs = require('fs');
9 | const path = require('path');
10 |
11 | const copy = module.exports = function(path1, path2, options, callback) {
12 | let failed = false;
13 | let retryingWithMkdirp = false;
14 | if (!callback) {
15 | callback = options;
16 | options = {};
17 | }
18 | // Other people's implementations of fs.copy() lack
19 | // error handling, let's be thorough and also implement
20 | // a retry that does mkdirp() for consistency with S3
21 | const sin = fs.createReadStream(path1);
22 | const sout = fs.createWriteStream(path2);
23 |
24 | sin.on('error', function(e) {
25 | if (failed) {
26 | return;
27 | }
28 | failed = true;
29 | errorCleanup();
30 | return callback(e);
31 | });
32 |
33 | sout.on('error', function(e) {
34 | if (failed) {
35 | return;
36 | }
37 | // If the destination folder doesn't exist yet,
38 | // retry the whole thing after recursively creating
39 | // the folder and its parents as needed, avoiding the
40 | // overhead of checking for folders in the majority
41 | // of cases where they already exist.
42 | //
43 | // Try this up to 100 times to guard against race conditions
44 | // with the empty directory cleanup mechanism: as long as
45 | // there are fewer than 100 node processes running this backend
46 | // at once, it should not be possible for a sudden burst
47 | // of rmdir()s to defeat the mkdir() mechanism.
48 | //
49 | // Note that there will only be one node process unless you're using
50 | // cluster, multiple Heroku dynos, or something similar.
51 | //
52 | // If you have more than 100 CPU cores bashing on this folder,
53 | // I respectfully suggest it may be time for the
54 | // S3 backend anyway.
55 |
56 | if ((e.code === 'ENOENT') && ((!options.afterMkdirp) || (options.afterMkdirp <= 100))) {
57 | retryingWithMkdirp = true;
58 | return mkdirp(path.dirname(path2), function (e) {
59 | if (e) {
60 | if (failed) {
61 | return;
62 | }
63 | return callback(e);
64 | }
65 | options.afterMkdirp = options.afterMkdirp ? (options.afterMkdirp + 1) : 1;
66 | return copy(path1, path2, options, callback);
67 | });
68 | }
69 | errorCleanup();
70 | failed = true;
71 | return callback(e);
72 | });
73 |
74 | sout.on('close', function() {
75 | if (retryingWithMkdirp) {
76 | // This is the original stream closing after error (in node 16+
77 | // we always get a close event even on an error), don't consider
78 | // this success, but don't worry either as we're going to try
79 | // again after mkdirp
80 | return;
81 | }
82 | if (failed) {
83 | // We already reported an error
84 | return;
85 | }
86 | // Report success
87 | return callback(null);
88 | });
89 |
90 | // Carry out the actual copying
91 | sin.pipe(sout);
92 |
93 | function errorCleanup() {
94 | // This will fail if we weren't able to write to
95 | // path2 in the first place; don't get excited
96 | fs.unlink(path2, function(e) { });
97 | }
98 | };
99 |
100 | // Legacy-compatible, tested implementation of mkdirp without
101 | // any npm audit vulnerabilities
102 |
103 | function mkdirp(dir, callback) {
104 | dir = path.resolve(dir);
105 | return fs.mkdir(dir, function(err) {
106 | if (!err) {
107 | return callback(null);
108 | }
109 | if (err.code === 'EEXIST') {
110 | return callback(null);
111 | }
112 | if (err.code === 'ENOENT') {
113 | const newDir = path.dirname(dir);
114 | if (newDir === dir) {
115 | return callback(err);
116 | }
117 | return mkdirp(newDir, function(err) {
118 | if (err) {
119 | return callback(err);
120 | }
121 | return mkdirp(dir, callback);
122 | });
123 | }
124 | return callback(err);
125 | });
126 | }
127 |
--------------------------------------------------------------------------------
/lib/storage/gcs.js:
--------------------------------------------------------------------------------
1 | /* jshint node:true */
2 |
3 | // Google Cloud Storage backend for uploadfs. See also
4 | // local.js.
5 |
6 | const storage = require('@google-cloud/storage');
7 | const extname = require('path').extname;
8 | const _ = require('lodash');
9 | const utils = require('../utils');
10 | const path = require('path');
11 |
12 | module.exports = function() {
13 | let contentTypes;
14 | let client;
15 | let cachingTime;
16 | let https;
17 | let bucketName;
18 | let endpoint = 'storage.googleapis.com';
19 | let defaultTypes;
20 | let noProtoEndpoint;
21 | let validation = false;
22 |
23 | const self = {
24 | init: function (options, callback) {
25 | if (!(process.env.GOOGLE_APPLICATION_CREDENTIALS)) {
26 | return callback('GOOGLE_APPLICATION_CREDENTIALS not set in env, cannot proceed');
27 | }
28 | if (options.validation) {
29 | validation = options.validation;
30 | }
31 | // Ultimately the result will look like https://storage.googleapis.com/[BUCKET_NAME]/[OBJECT_NAME]
32 | // The rest is based mostly on s3 knox surmises.
33 | if (options.endpoint) {
34 | endpoint = options.endpoint;
35 | if (!endpoint.match(/^https?:/)) {
36 | const defaultSecure = ((!options.port) || (options.port === 443));
37 | const secure = options.secure || defaultSecure;
38 | let port = options.port || 443;
39 | const protocol = secure ? 'https://' : 'http://';
40 | if (secure && (port === 443)) {
41 | port = '';
42 | } else if ((!secure) && (port === 80)) {
43 | port = '';
44 | } else {
45 | port = ':' + port;
46 | }
47 | endpoint = protocol + endpoint + port;
48 | }
49 | }
50 | // The storage client auth relies on the presence of the service account
51 | // file path expressed in the environment variable
52 | // GOOGLE_APPLICATION_CREDENTIALS and, of course, the presence of such file.
53 | //
54 | //
55 | // See https://cloud.google.com/docs/authentication/getting-started
56 | client = new storage.Storage();
57 | bucketName = options.bucket;
58 | defaultTypes = require(path.join(__dirname, '/contentTypes.js'));
59 | if (options.contentTypes) {
60 | _.extend(contentTypes, defaultTypes, options.contentTypes);
61 | } else {
62 | contentTypes = defaultTypes;
63 | }
64 | https = options.https;
65 | cachingTime = options.cachingTime;
66 | self.options = options;
67 | return callback(null);
68 | },
69 |
70 | copyIn: function(localPath, path, options, callback) {
71 | path = utils.removeLeadingSlash(self.options, path);
72 | let ext = extname(path);
73 | if (ext.length) {
74 | ext = ext.substr(1);
75 | }
76 | let contentType = contentTypes[ext];
77 | if (!contentType) {
78 | contentType = 'application/octet-stream';
79 | }
80 |
81 | let cacheControl = 'no-cache';
82 | if (cachingTime) {
83 | cacheControl = 'public, max-age=' + cachingTime;
84 | }
85 | const uploadOptions = {
86 | destination: path,
87 | gzip: true,
88 | public: true,
89 | validation,
90 | metadata: {
91 | cacheControl,
92 | ContentType: contentType
93 | }
94 | };
95 | const bucket = client.bucket(bucketName);
96 | return bucket.upload(localPath, uploadOptions, callback);
97 | },
98 |
99 | copyOut: function(path, localPath, options, callback) {
100 | path = utils.removeLeadingSlash(self.options, path);
101 | const mergedOptions = _.assign({
102 | destination: localPath,
103 | validation
104 | }, options);
105 | client.bucket(bucketName).file(path).download(mergedOptions, callback);
106 | },
107 |
108 | remove: function(path, callback) {
109 | path = utils.removeLeadingSlash(self.options, path);
110 | client.bucket(bucketName).file(path).delete({}, callback);
111 | },
112 |
113 | enable: function(path, callback) {
114 | path = utils.removeLeadingSlash(self.options, path);
115 | client.bucket(bucketName).file(path).makePublic(callback);
116 | },
117 |
118 | disable: function(path, callback) {
119 | path = utils.removeLeadingSlash(self.options, path);
120 | client.bucket(bucketName).file(path).makePrivate({}, callback);
121 | },
122 |
123 | getUrl: function (path) {
124 | noProtoEndpoint = endpoint.replace(/^https?:\/\//i, '');
125 | const url = (https ? 'https://' : 'http://') + bucketName + '.' + noProtoEndpoint;
126 | return utils.addPathToUrl(self.options, url, path);
127 | },
128 |
129 | destroy: function(callback) {
130 | // No file descriptors or timeouts held
131 | return callback(null);
132 | }
133 | };
134 | return self;
135 | };
136 |
--------------------------------------------------------------------------------
/test/local.js:
--------------------------------------------------------------------------------
1 | /* global describe, it */
2 | const Mode = require('stat-mode');
3 | const assert = require('assert');
4 | const path = require('path');
5 |
6 | describe('UploadFS Local', function () {
7 | this.timeout(4500);
8 | const uploadfs = require('../uploadfs.js')();
9 | const fs = require('fs');
10 | const async = require('async');
11 | const tempPath = path.join(__dirname, '/temp');
12 | const localOptions = {
13 | storage: 'local',
14 | uploadsPath: path.join(__dirname, '/files/'),
15 | uploadsUrl: 'http://localhost:3000/test/'
16 | };
17 | const imageSizes = [
18 | {
19 | name: 'small',
20 | width: 320,
21 | height: 320
22 | },
23 | {
24 | name: 'medium',
25 | width: 640,
26 | height: 640
27 | },
28 | {
29 | name: 'large',
30 | width: 1140,
31 | height: 1140
32 | }
33 | ];
34 |
35 | localOptions.imageSizes = imageSizes;
36 | localOptions.tempPath = tempPath;
37 |
38 | it('Should instantiate uploadfs module without errors', done => {
39 | return uploadfs.init(localOptions, e => {
40 | assert(!e);
41 | done();
42 | });
43 | });
44 |
45 | it('copyIn should work for local filesystem', done => {
46 | return uploadfs.copyIn('./test.txt', '/test_copy.txt', e => {
47 | assert(!e);
48 | const og = fs.readFileSync('./test.txt', 'utf8');
49 | const next = fs.readFileSync('./test/files/test_copy.txt', 'utf8');
50 | assert(og.length, 'lengthy');
51 | assert(next.length, 'lengthy');
52 | assert(og === next, 'Copies are equal');
53 | done();
54 | });
55 | });
56 |
57 | it('copyOut should work for local filesystem', done => {
58 | return uploadfs.copyOut('/test_copy.txt', 'copy-out-test.txt', e => {
59 | assert(!e);
60 | const og = fs.readFileSync('./test.txt', 'utf8');
61 | const next = fs.readFileSync('./copy-out-test.txt', 'utf8');
62 | assert(og.length, 'lengthy');
63 | assert(next.length, 'lengthy');
64 | assert(og === next, 'Copied files are equal');
65 | done();
66 | });
67 | });
68 |
69 | it('streamOut should work for local filesystem', async function() {
70 | const input = uploadfs.streamOut('/test_copy.txt');
71 | const chunks = [];
72 | for await (let chunk of input) {
73 | chunks.push(chunk);
74 | }
75 | const data = Buffer.concat(chunks);
76 | const og = fs.readFileSync('test.txt');
77 | assert(data.equals(og));
78 | });
79 |
80 | it('overwrite with copyIn should work for local filesystem', done => {
81 | return uploadfs.copyIn('./test2.txt', '/test_copy.txt', e => {
82 | assert(!e);
83 | const og = fs.readFileSync('./test2.txt', 'utf8');
84 | const next = fs.readFileSync('./test/files/test_copy.txt', 'utf8');
85 | assert(og.length, 'lengthy');
86 | assert(next.length, 'lengthy');
87 | assert(og === next, 'Copies are equal');
88 | done();
89 | });
90 | });
91 |
92 | it('copyOut should see update for local filesystem', done => {
93 | return uploadfs.copyOut('/test_copy.txt', 'copy-out-test.txt', e => {
94 | assert(!e);
95 | const og = fs.readFileSync('./test2.txt', 'utf8');
96 | const next = fs.readFileSync('./copy-out-test.txt', 'utf8');
97 | assert(og.length, 'lengthy');
98 | assert(next.length, 'lengthy');
99 | assert(og === next, 'Copied files are equal');
100 | done();
101 | });
102 | });
103 |
104 | it('Test disable / enable functionality', done => {
105 | const srcFile = '/test_copy.txt';
106 | const infile = './test/files/test_copy.txt';
107 |
108 | return async.series({
109 | disable: cb => {
110 | assert(fs.existsSync(infile), 'copyIn file exissts');
111 |
112 | uploadfs.disable(srcFile, e => {
113 | const stats = fs.statSync(infile);
114 | const mode = new Mode(stats);
115 | assert(!e, 'uploadfs disable success!');
116 | assert(mode.toString() === '----------', 'File permissions locked down');
117 | return cb(null);
118 | });
119 | },
120 | enable: cb => {
121 | uploadfs.enable(srcFile, e => {
122 | const stats = fs.statSync(infile);
123 | const mode = new Mode(stats);
124 | assert(!e, 'uploadfs disable success!');
125 | assert(mode.toString() === '-rw-r--r--', 'Enabled file has expected permissions');
126 | assert(fs.existsSync(infile), 'copyIn visible to fs');
127 | return cb(null);
128 | });
129 | },
130 | testCopyOut: cb => {
131 | const outsucceeds = 'copy-out-test.txt';
132 | uploadfs.copyOut(srcFile, outsucceeds, e => {
133 | assert(!e, 'node should not be able to copy this file!');
134 | return cb(null);
135 | });
136 | },
137 | testDelete: cb => {
138 | uploadfs.remove(srcFile, e => {
139 | assert(!e, 'Delete file succeeds');
140 | assert(!fs.existsSync(infile), 'uploadfs delete file is gone from local fs');
141 | return cb(null);
142 | });
143 | }
144 | }, function (e) {
145 | fs.unlinkSync('copy-out-test.txt');
146 | assert(!e);
147 | done();
148 | });
149 | });
150 |
151 | it('Should destroy uploadfs module without errors', done => {
152 | return uploadfs.destroy(e => {
153 | assert(!e);
154 | done();
155 | });
156 | });
157 |
158 | });
159 |
--------------------------------------------------------------------------------
/lib/image/sharp.js:
--------------------------------------------------------------------------------
1 | const Sharp = require('sharp');
2 |
3 | module.exports = function () {
4 | return {
5 | /**
6 | * Initialize the module.
7 | */
8 | init: function (options, callback) {
9 | return callback(null);
10 | },
11 |
12 | destroy: function (callback) {
13 | return callback(null);
14 | },
15 |
16 | /**
17 | * Identify a local image file.
18 | *
19 | * If the file is not an image or is too defective to be identified an error is
20 | * passed to the callback.
21 | *
22 | * @param {String} path Local filesystem path to image file
23 | * @param {Function} callback Receives the usual err argument, followed by an
24 | * object with extension, width, height, orientation, originalWidth and
25 | * originalHeight properties.
26 | *
27 | * @see Uploadfs#copyImageIn
28 | */
29 |
30 | identify: function (path, callback) {
31 | // Identify the file type, size, etc. Stuff them into context.info and
32 | // context.extension. Also sets context.info.animated to true if
33 | // an animated GIF is found.
34 |
35 | const info = {};
36 | return Sharp(path).metadata(function (err, metadata) {
37 | if (err) {
38 | return callback(err);
39 | }
40 |
41 | info.originalWidth = metadata.width;
42 | info.originalHeight = metadata.height;
43 | // if exif header data isn't present, default to current orientation being correct
44 | info.orientation = metadata.orientation || 1;
45 | info.width = info.orientation < 5 ? metadata.width : metadata.height;
46 | info.height = info.orientation < 5 ? metadata.height : metadata.width;
47 | info.extension = metadata.format === 'jpeg' ? 'jpg' : metadata.format;
48 | info.animation = metadata.pages > 1;
49 | return callback(null, info);
50 | });
51 | },
52 |
53 | /**
54 | * Generate one or more scaled versions of an image file.
55 | *
56 | * INPUT
57 | *
58 | * The options that may be passed in the context object are:
59 | *
60 | * workingPath: path to the original file (required)
61 | *
62 | * extension: true file extension of original file as
63 | * determined by a previous call to identify (required).
64 | *
65 | * info.width, info.height: the width and height of the rotated image
66 | *
67 | * sizes (required): array of objects with width and height
68 | * properties which are treated as maximums for each axis; the resulting image
69 | * will never exceed the original size, and will otherwise be scaled to
70 | * fill as much of the requested size as possible without changing the aspect
71 | * ratio. Files are generated in the temp folder with a filename made up of the
72 | * name property of the size, a '.', and the extension property of the
73 | * context object.
74 | *
75 | * tempFolder: folder where the scaled versions should be written
76 | * (required)
77 | *
78 | * crop: optional object with top, left, width and height properties
79 | *
80 | * scaledJpegQuality: quality setting for JPEGs (optional; otherwise
81 | * you get whatever default was compiled into sharp)
82 | *
83 | * copyOriginal: if true, copy the "original" image to the tempFolder too,
84 | * but do auto-orient it so that iPhone photos etc. work on the web
85 | *
86 | * All images, including the "original" if copyOriginal is set, are
87 | * auto-rotated to the orientation expected by web browsers.
88 | *
89 | * OUTPUT
90 | *
91 | * After the operation is complete, the following property of the
92 | * context object will be set if the copyOriginal option was set:
93 | *
94 | * adjustedOriginal: will contain the local filesystem path where the
95 | * original was copied (and rotated, if needed).
96 | *
97 | * @param {[type]} context [description]
98 | * @param {Function} callback [description]
99 | * @return {[type]} [description]
100 | */
101 |
102 | convert: function (context, callback) {
103 | // This is for a non-animated image
104 | const _info = context.info;
105 | const isAnimated = _info.animation;
106 | const noCrop = {
107 | left: 0,
108 | top: 0,
109 | width: _info.width,
110 | height: _info.height
111 | };
112 | const crop = context.crop ? context.crop : noCrop;
113 |
114 | const pipeline = Sharp(context.workingPath, { animated: isAnimated })
115 | .rotate()
116 | .extract({
117 | left: crop.left,
118 | top: crop.top,
119 | width: crop.width,
120 | height: crop.height
121 | });
122 |
123 | const promises = [];
124 |
125 | if (context.copyOriginal) {
126 | const copyPath = `${context.tempFolder}/original.${context.extension}`;
127 | context.adjustedOriginal = copyPath;
128 | promises.push(pipeline.clone().withMetadata().toFile(copyPath));
129 | }
130 |
131 | promises.push(sizeOperation());
132 |
133 | Promise.all(promises)
134 | .then((res) => {
135 | return callback(null);
136 | })
137 | .catch((err) => {
138 | console.error(err);
139 | return callback(err);
140 | });
141 |
142 | async function sizeOperation() {
143 | await Promise.all(
144 | context.sizes.map(async (size) => {
145 | const sizePath = `${context.tempFolder}/${size.name}.${context.extension}`;
146 | const width = Math.min(size.width, context.info.width);
147 | const height = Math.min(size.height, context.info.height);
148 | const sizePipeline = pipeline.clone();
149 | sizePipeline.resize({
150 | width,
151 | height,
152 | fit: 'inside'
153 | });
154 | if (context.extension === 'jpg') {
155 | const quality = context.scaledJpegQuality
156 | ? context.scaledJpegQuality
157 | : 80;
158 | sizePipeline.jpeg({ quality });
159 | }
160 | await sizePipeline.toFile(sizePath);
161 | })
162 | );
163 | }
164 | }
165 | };
166 | };
167 |
--------------------------------------------------------------------------------
/test/gcs.js:
--------------------------------------------------------------------------------
1 | /* global describe, it */
2 | const assert = require('assert');
3 | const fetch = require('node-fetch');
4 |
5 | describe('UploadFS GCS', function () {
6 | this.timeout(20000);
7 | const uploadfs = require('../uploadfs.js')();
8 | const fs = require('fs');
9 | const async = require('async');
10 | const tempPath = '../temp';
11 | const dstPath = '/one/two/three/test.txt';
12 | const imageSizes = [
13 | {
14 | name: 'small',
15 | width: 320,
16 | height: 320
17 | },
18 | {
19 | name: 'medium',
20 | width: 640,
21 | height: 640
22 | },
23 | {
24 | name: 'large',
25 | width: 1140,
26 | height: 1140
27 | }
28 | ];
29 |
30 | const gcsOptions = require('../gcsTestOptions.js');
31 |
32 | gcsOptions.imageSizes = imageSizes;
33 | gcsOptions.tempPath = tempPath;
34 | gcsOptions.params = {
35 | Bucket: gcsOptions.bucket
36 | };
37 |
38 | it('uploadfs should init gcs connection without error', function(done) {
39 | return uploadfs.init(gcsOptions, function(e) {
40 | if (e) {
41 | console.log('=======E', e);
42 | }
43 | assert(!e, 'gcs init without error');
44 | uploadfs.copyIn('test.txt', dstPath, function(e) {
45 | if (e) {
46 | console.log('=======EE', e);
47 | }
48 | assert(!e, 'gcs copyIn without error');
49 | done();
50 | });
51 | });
52 | });
53 |
54 | it('CopyIn should work', function (done) {
55 | return uploadfs.copyIn('test.txt', dstPath, function(e) {
56 | assert(!e, 'gcs copyIn without error');
57 | done();
58 | });
59 | });
60 |
61 | it('CopyIn file should be available via gcs', function () {
62 | const url = uploadfs.getUrl() + '/one/two/three/test.txt';
63 | const og = fs.readFileSync('test.txt', 'utf8');
64 |
65 | return fetch(url, {
66 | method: 'GET',
67 | headers: {
68 | 'Accept-Encoding': 'gzip',
69 | 'Content-type': 'text/plain; charset=utf-8'
70 | }
71 | })
72 | .then(function (response) {
73 | assert(response.status === 200, `Request status 200 != ${response.status}`);
74 | return response.text();
75 |
76 | })
77 | .then(function (content) {
78 | assert.strictEqual(content, og, 'Res body equals uploaded file');
79 | });
80 | });
81 |
82 | it('CopyOut should work', done => {
83 | const cpOutPath = 'copy-out-test.txt';
84 | return uploadfs.copyOut(dstPath, cpOutPath, e => {
85 | assert(!e, 'gcs copyOut without error');
86 | const dl = fs.readFileSync(cpOutPath, 'utf8');
87 | const og = fs.readFileSync('test.txt', 'utf8');
88 | assert(dl === og, 'Downloaded file is equal to previous upload');
89 | done();
90 | });
91 | });
92 |
93 | it('disable / enable should work as expected', done => {
94 | return async.series({
95 | disable: cb => {
96 | uploadfs.disable(dstPath, e => {
97 | assert(!e, 'uploadfs disable no err');
98 | cb(null);
99 | });
100 | },
101 | webShouldFail: cb => {
102 | const url = uploadfs.getUrl() + dstPath;
103 | return fetch(url, {
104 | method: 'GET'
105 | })
106 | .then(function (response) {
107 | assert(response.status >= 400, 'Request on disabled resource should fail: expected 40x, got ' + response.status);
108 | cb(null);
109 | })
110 | .catch(cb);
111 | },
112 | enable: cb => {
113 | uploadfs.enable(dstPath, e => {
114 | assert(!e, 'uploadfs enable should not fail');
115 | cb(null);
116 | });
117 | },
118 | webShouldSucceed: cb => {
119 | const url = uploadfs.getUrl() + dstPath;
120 | const og = fs.readFileSync('test.txt', 'utf8');
121 |
122 | return fetch(url, {
123 | method: 'GET'
124 | })
125 | .then(function (res) {
126 | assert(res.status < 400, 'Request for enabled resource should not fail');
127 | // Don't get fussed about presence or absence of UTF-8 in this header
128 | assert(res.headers.get('content-type').match(/text\/plain/),
129 | `Check content-type header expected "text/plain" but got "${res.headers.get('content-type')}"`);
130 | return res.text();
131 | })
132 | .then(function (content) {
133 | assert.strictEqual(og, content, 'Downloaded content should be equal to previous upload');
134 | cb(null);
135 | })
136 | .catch(cb);
137 | }
138 | }, e => {
139 | assert(!e, 'Series should succeed');
140 | done();
141 | });
142 | });
143 |
144 | it('remove should work', done => {
145 | return uploadfs.remove(dstPath, e => {
146 | assert(!e, 'Remove should succeed');
147 |
148 | setTimeout(() => {
149 | const url = uploadfs.getUrl() + dstPath;
150 | fetch(url, {
151 | method: 'GET'
152 | })
153 | .then(function (res) {
154 | assert(res.status >= 400, 'Removed file is gone from gcs');
155 | done();
156 | })
157 | .catch(done);
158 | }, 5000);
159 | });
160 | });
161 |
162 | it('copyImageIn should work', done => {
163 | const imgDstPath = '/images/profiles/me';
164 |
165 | uploadfs.copyImageIn('test.jpg', imgDstPath, (e, info) => {
166 | assert(!e, 'gcs copyImageIn works');
167 |
168 | const url = uploadfs.getUrl();
169 | const paths = [ info.basePath + '.jpg' ];
170 |
171 | paths.push(info.basePath + '.small.jpg');
172 | paths.push(info.basePath + '.medium.jpg');
173 | paths.push(info.basePath + '.large.jpg');
174 |
175 | async.map(paths, (path, cb) => {
176 | const imgPath = url + path;
177 |
178 | fetch(imgPath, {
179 | method: 'GET'
180 | })
181 | .then(function(res) {
182 | assert(res.status === 200, `Request status 200 != ${res.status}`);
183 | return res.text();
184 | }).then(function(res) {
185 | /* @@TODO we should test the correctness of uploaded images */
186 |
187 | // clean up
188 | uploadfs.remove(path, e => {
189 | assert(!e, 'Remove uploaded file after testing');
190 | return cb();
191 | });
192 | })
193 | .catch(cb);
194 | }, e => {
195 | assert(!e, 'Can request all copyImageInned images');
196 | done();
197 | });
198 | });
199 | });
200 | });
201 |
--------------------------------------------------------------------------------
/test/azure.js:
--------------------------------------------------------------------------------
1 | /* global describe, it */
2 | const assert = require('assert');
3 | const fs = require('fs');
4 | const fetch = require('node-fetch');
5 | const uploadfs = require('../uploadfs.js')();
6 | // A JPEG is not a good default because it is exempt from GZIP so
7 | // we get less coverage. -Tom
8 | const srcFile = process.env.AZURE_TEST_FILE || 'test.txt';
9 | const infilePath = 'one/two/three/';
10 | const infile = infilePath + srcFile;
11 | const _ = require('lodash');
12 |
13 | /* helper to automate scraping files from blob svc */
14 | const _getOutfile = function(infile, done) {
15 | const tmpFileName = new Date().getTime() + srcFile;
16 | const ogFile = fs.readFileSync(srcFile, { encoding: 'utf8' });
17 |
18 | return uploadfs.copyOut(infile, tmpFileName, {}, function (e, res) {
19 | try {
20 | assert(!e, 'Azure copy out nominal success');
21 | const content = fs.readFileSync(tmpFileName, { encoding: 'utf8' });
22 | assert(content.length, 'copyOut file has length');
23 | assert(ogFile.length, 'original file body has length');
24 | // console.log(ogFile, content);
25 | assert(ogFile === content, 'Azure copy out equal to original text file');
26 | fs.unlinkSync(tmpFileName);
27 | done();
28 | } catch (ae) {
29 | done(ae);
30 | }
31 | });
32 | };
33 |
34 | describe('UploadFS Azure', function() {
35 | this.timeout(40000);
36 |
37 | const tempPath = '../temp';
38 |
39 | const azureOptions = require('../azureTestOptions.js');
40 | azureOptions.tempPath = tempPath;
41 |
42 | it('Should connect to Azure cloud successfully', function(done) {
43 | uploadfs.init(azureOptions, function(e) {
44 | if (e) {
45 | console.log('error', e);
46 | }
47 | try {
48 | assert(!e, 'Successfully initialize azure service');
49 | done();
50 | } catch (ae) {
51 | done(ae);
52 | }
53 | });
54 | });
55 |
56 | it('getGzipBlackList should return expected defaults if no options provided', function() {
57 | const types = uploadfs._storage.getGzipBlacklist();
58 | assert(Array.isArray(types), 'gzip blacklist array is an array');
59 | assert(types && types.indexOf('zip' >= 0));
60 | });
61 |
62 | it('getGzipBlacklist should be able to remove a type from the blacklist based on user settings', function() {
63 | const types = uploadfs._storage.getGzipBlacklist({ zip: true });
64 | assert(Array.isArray(types), 'gzip blacklist array is an array');
65 | assert(types && types.indexOf('zip' < 0));
66 | });
67 |
68 | it('getGzipBlacklist should be able to add a type to the blacklist based on user settings', function() {
69 | const types = uploadfs._storage.getGzipBlacklist({ foo: false });
70 | assert(Array.isArray(types), 'gzip blacklist array is an array');
71 | assert(types && types.indexOf('foo' >= 0));
72 | });
73 |
74 | it('getGzipBlacklist should quietly ignore `{ ext: false }` in user config if ext is not on default blacklist', function() {
75 | const types = uploadfs._storage.getGzipBlacklist({ foo: true });
76 | assert(Array.isArray(types), 'gzip blacklist array is an array');
77 | assert(types && types.indexOf('foo' <= 0), 'Filetype foo is not added to the blacklist if user wants to gzip it');
78 | });
79 |
80 | it('getGzipBlacklist should ignore duplicates', function() {
81 | const types = uploadfs._storage.getGzipBlacklist({
82 | jpg: false,
83 | zip: false
84 | });
85 | const counts = _.countBy(types);
86 | assert(counts.jpg === 1, 'No duplicate jpg type is present, despite it all');
87 | });
88 |
89 | it('Azure test copyIn should work', function(done) {
90 |
91 | uploadfs.copyIn(srcFile, infile, function(e) {
92 | if (e) {
93 | console.log('test copyIn ERR', e);
94 | }
95 | try {
96 | assert(!e, 'Azure copy in - nominal success');
97 | done();
98 | } catch (ae) {
99 | done(ae);
100 | }
101 | });
102 | });
103 |
104 | it('Azure test copyOut should work', function(done) {
105 | _getOutfile(infile, done);
106 | });
107 |
108 | it('Azure disable should work', function(done) {
109 | uploadfs.disable(infile, function(e, val) {
110 | if (e) {
111 | console.log('error', e);
112 | }
113 | try {
114 | assert(!e, 'Azure disable, nominal success');
115 | done();
116 | } catch (ae) {
117 | done(ae);
118 | }
119 | });
120 | });
121 |
122 | it('Azure test copyOut after disable should fail', function(done) {
123 | setTimeout(function() {
124 | uploadfs.copyOut(infile, 'foo.bar', {}, function(e, res) {
125 | try {
126 | assert(e);
127 | assert(e.name === 'RestError');
128 | assert(e.code === 'BlobNotFound');
129 | assert(e.statusCode === 404);
130 | done();
131 | } catch (ae) {
132 | done(ae);
133 | }
134 | });
135 | }, 5000);
136 | });
137 |
138 | it('Azure enable should work', function(done) {
139 | uploadfs.enable(infile, function(e, val) {
140 | if (e) {
141 | console.log('error', e);
142 | }
143 | try {
144 | assert(!e, 'Azure enable , nominal success');
145 | done();
146 | } catch (ae) {
147 | done(ae);
148 | }
149 | });
150 | });
151 |
152 | it('Azure test copyOut after enable should succeed', function(done) {
153 | _getOutfile(infile, done);
154 | });
155 |
156 | it('Uploadfs should return valid web-servable url pointing to uploaded file', function() {
157 | const url = uploadfs.getUrl(infile);
158 | const ogFile = fs.readFileSync(srcFile);
159 | assert(ogFile.length);
160 | assert(url);
161 |
162 | return fetch(url, {
163 | method: 'GET',
164 | headers: {
165 | 'Accept-Encoding': 'gzip'
166 | }
167 | })
168 | .then(function (response) {
169 | assert(response.status < 400, 'Bad response status');
170 | return response.buffer();
171 | })
172 | .then(function (buffer) {
173 | assert.deepStrictEqual(Buffer.compare(buffer, ogFile), 0, 'Web servable file contents equal original text file contents');
174 | });
175 | });
176 |
177 | it('Azure test remove should work', function(done) {
178 | uploadfs.remove(infile, function(e) {
179 | if (e) {
180 | console.log('error', e);
181 | }
182 | try {
183 | assert(!e, 'Azure remove, nominal success');
184 | done();
185 | } catch (ae) {
186 | done(ae);
187 | }
188 | });
189 | });
190 |
191 | it('Azure test copyOut should fail', function(done) {
192 | const tmpFileName = new Date().getTime() + '_text.txt';
193 |
194 | uploadfs.copyOut(infile, tmpFileName, {}, function (e, res) {
195 | try {
196 | assert(e);
197 | assert(e.name === 'RestError');
198 | assert(e.code === 'BlobNotFound');
199 | assert(e.statusCode === 404);
200 | done();
201 | } catch (ae) {
202 | done(ae);
203 | }
204 | });
205 | });
206 | });
207 |
--------------------------------------------------------------------------------
/webp-test.js:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env node
2 |
3 | const fs = require('fs');
4 | const { join } = require('path');
5 | const async = require('async');
6 | const Promise = require('bluebird');
7 | const { each, find } = require('lodash');
8 | const uploadfs = require('./uploadfs.js')();
9 |
10 | // colored output
11 | const color = (input, num = 255) => `\x1b[38;5;${num}m${input}\x1b[0m`;
12 | const red = input => color(input, 1);
13 | const grn = input => color(input, 2);
14 | const blu = input => color(input, 6);
15 |
16 | // status msg
17 | const check = (num, msg) => console.log(`(${num})${' '.repeat(13)}${msg}`);
18 | const pass = (num, msg = '') =>
19 | console.log(`${grn(`(${num})`)}${' '.repeat(13)}${grn('OK')} ${msg}\n`);
20 | const fail = (num, msg = '') => {
21 | console.log(`${red(`(${num})`)}${' '.repeat(13)}${red('ERROR')} ${msg}\n`);
22 | process.exit(1);
23 | };
24 |
25 | // time
26 | const elapsed = () => (performance.nodeTiming.duration / 1000).toFixed(2);
27 |
28 | // settings
29 | const img = 'test.webp';
30 | const ext = 'webp';
31 | const basePath = '/images/profiles/me';
32 | const imageSizes = [
33 | {
34 | name: 'small',
35 | width: 320,
36 | height: 320
37 | },
38 | {
39 | name: 'medium',
40 | width: 640,
41 | height: 640
42 | },
43 | {
44 | name: 'large',
45 | width: 1140,
46 | height: 1140
47 | }
48 | ];
49 | const config = {
50 | backend: 'local',
51 | image: 'sharp',
52 | uploadsPath: join(__dirname, '/test'),
53 | uploadsUrl: 'http://localhost:3000/test',
54 | tempPath: join(__dirname, '/temp'),
55 | imageSizes
56 | };
57 |
58 | // TEST: crop
59 | const testCopyImageInCrop = cb => {
60 | check(6, `uploadfs.copyImageIn('${blu(img)}') with cropping`);
61 | uploadfs.copyImageIn(
62 | img,
63 | '/images/profiles/me-cropped',
64 | {
65 | crop: {
66 | top: 830,
67 | left: 890,
68 | width: 500,
69 | height: 500
70 | }
71 | },
72 | (e, info) => {
73 | if (e) {
74 | fail(6, e);
75 | }
76 | if (info.basePath !== '/images/profiles/me-cropped') {
77 | fail(6, 'info.basePath is incorrect');
78 | }
79 | pass(6);
80 |
81 | check(7, 'returned image dimensions are reoriented');
82 | if (info.width !== 500 || info.height !== 500) {
83 | fail(7, 'reported size does not match crop');
84 | }
85 |
86 | if (!fs.statSync(`test/images/profiles/me-cropped.${ext}`).size) {
87 | fail(7, 'cannot stat copied image');
88 | }
89 | pass(7);
90 |
91 | check(8, 'removing files');
92 | uploadfs.remove(`${basePath}-cropped.${ext}`, e => {
93 | async.each(
94 | imageSizes,
95 | (size, cb) => {
96 | const name = `${info.basePath}.${size.name}.${ext}`;
97 | if (!fs.statSync(`test${name}`).size) {
98 | fail(8, 'cannot stat scaled/copied image');
99 | }
100 | uploadfs.remove(name, e => cb(e));
101 | },
102 | e => {
103 | if (e) {
104 | fail(8, e);
105 | }
106 | pass(8);
107 |
108 | // done, return
109 | cb();
110 | }
111 | );
112 | });
113 | }
114 | );
115 | };
116 |
117 | // TEST: copy
118 | const testCopyImageIn = cb => {
119 | check(2, `uploadfs.copyImageIn('${blu(img)}')`);
120 | uploadfs.copyImageIn(img, basePath, (e, info) => {
121 | if (e) {
122 | fail(2, e);
123 | }
124 | if (info.basePath !== '/images/profiles/me') {
125 | fail(2, 'info.basePath is incorrect');
126 | }
127 | pass(2);
128 |
129 | // check(3, 'returned image dimensions are reoriented');
130 | // if (info.width !== 1936 || info.height !== 2592) {
131 | // fail(3, 'Width and height missing or not reoriented for web use');
132 | // }
133 | // if (info.originalWidth !== 2592 || info.originalHeight !== 1936) {
134 | // fail(3, 'Original width and height missing or incorrect');
135 | // }
136 | // pass(3);
137 |
138 | check(4, 'locate copied image');
139 | if (!fs.statSync(`test/images/profiles/me.${ext}`).size) {
140 | fail(4, 'cannot stat copied image');
141 | }
142 | pass(4);
143 |
144 | check(5, 'removing files');
145 | uploadfs.remove(`/images/profiles/me.${ext}`, e =>
146 | async.each(
147 | imageSizes,
148 | (size, cb) => {
149 | const name = `${info.basePath}.${size.name}.${ext}`;
150 | if (!fs.statSync(`test${name}`).size) {
151 | fail(5, 'cannot stat scaled/copied image');
152 | }
153 | uploadfs.remove(name, () => cb());
154 | },
155 | e => {
156 | if (e) {
157 | fail(5, e);
158 | }
159 | pass(5);
160 |
161 | // done, test crop next
162 | testCopyImageInCrop(cb);
163 | }
164 | )
165 | );
166 | });
167 | };
168 |
169 | const run = (cb, msg = 'Running tests', opts = config) => {
170 | console.log(`${msg}\n`);
171 | check(1, 'init');
172 | uploadfs.init(opts, e => {
173 | if (e) {
174 | fail(1, e);
175 | }
176 | pass(1);
177 |
178 | // done, test copy next
179 | testCopyImageIn(cb);
180 | });
181 | };
182 |
183 | // initial msg
184 | console.log(`
185 | + ${blu('Config')}
186 |
187 | {
188 | ${blu('processor')}: '${grn(config.image)}',
189 | ${blu('storage')}: '${grn(config.backend)}'
190 | }
191 | `);
192 |
193 | // first run
194 | run(() => {
195 | let filesSeen = false;
196 |
197 | config.postprocessors = [
198 | {
199 | postprocessor: (files, folder, options = { test: false }) => {
200 | console.log(`${' '.repeat(16)}(${blu('using postprocessor')})\n`);
201 |
202 | if (!options.test) {
203 | fail('Postprocessor', 'postprocessor did not receive options');
204 | }
205 | if (!files) {
206 | fail('Postprocessor', 'did not receive files array');
207 | }
208 | if (!files.length) {
209 | return Promise.resolve(true);
210 | }
211 | if (!files[0].match(/\.(gif|jpg|png|webp)$/)) {
212 | fail('Postprocessor', `invalid file extension: ${files[0]}`);
213 | }
214 | if (!fs.existsSync(files[0])) {
215 | fail('Postprocessor', `cannot locate file: ${files[0]}`);
216 | }
217 | if (require('path').dirname(files[0]) !== folder) {
218 | fail('Postprocessor', 'received incorrect folder path');
219 | }
220 | each(config.imageSizes, size => {
221 | if (!find(files, f => f.match(size.name))) {
222 | fail('Postprocessor', `cannot stat resized file (${size.name})`);
223 | }
224 | });
225 | filesSeen = true;
226 | return Promise.resolve(true);
227 | },
228 | extensions: [ 'gif', 'jpg', 'png', 'webp' ],
229 | options: { test: true }
230 | }
231 | ];
232 |
233 | // second run (postprocessing)
234 | run(() => {
235 | if (!filesSeen) {
236 | fail(0, 'postprocessor saw no files');
237 | }
238 |
239 | // All tests passed!
240 | console.log(`+ ${blu('Completed')} in ${grn(elapsed())} seconds\n`);
241 | process.exit(0);
242 | },
243 |
244 | `+ ${blu('Postprocessors')}`
245 | );
246 | }, `+ ${blu('Methods')}`);
247 |
--------------------------------------------------------------------------------
/test-sharp.js:
--------------------------------------------------------------------------------
1 | const uploadfs = require('./uploadfs.js')();
2 | const fs = require('fs');
3 | const async = require('async');
4 | const Promise = require('bluebird');
5 | const _ = require('lodash');
6 | const path = require('path');
7 |
8 | // Test the imagecrunch image backend, written specifically for Macs
9 |
10 | const localOptions = {
11 | storage: 'local',
12 | image: 'sharp',
13 | uploadsPath: path.join(__dirname, '/test'),
14 | uploadsUrl: 'http://localhost:3000/test'
15 | };
16 |
17 | const imageSizes = [
18 | {
19 | name: 'small',
20 | width: 320,
21 | height: 320
22 | },
23 | {
24 | name: 'medium',
25 | width: 640,
26 | height: 640
27 | },
28 | {
29 | name: 'large',
30 | width: 1140,
31 | height: 1140
32 | }
33 | ];
34 |
35 | const tempPath = path.join(__dirname, '/temp');
36 | const basePath = '/images/profiles/me';
37 |
38 | localOptions.imageSizes = imageSizes;
39 | localOptions.tempPath = tempPath;
40 | localOptions.backend = 'local';
41 |
42 | localTestStart(function () {
43 | let filesSeen = false;
44 | console.log('RERUN TESTS WITH TEST OF POSTPROCESSORS');
45 | localOptions.postprocessors = [
46 | {
47 | postprocessor: function(files, folder, options) {
48 | console.log('in a postprocessor');
49 | if (!(options && options.test)) {
50 | console.error('postprocessor did not receive options');
51 | process.exit(1);
52 | }
53 | if (!files) {
54 | console.error('No files array passed to postprocessor');
55 | process.exit(1);
56 | }
57 | if (!files.length) {
58 | return Promise.resolve(true);
59 | }
60 | if (!files[0].match(/\.(gif|jpg|png)$/)) {
61 | console.error('postprocessor invoked for inappropriate file extension');
62 | process.exit(1);
63 | }
64 | if (!fs.existsSync(files[0])) {
65 | console.error('postprocessor invoked for nonexistent file');
66 | process.exit(1);
67 | }
68 | if (require('path').dirname(files[0]) !== folder) {
69 | console.error('folder parameter to postprocessor is incorrect');
70 | }
71 | _.each(localOptions.imageSizes, function(size) {
72 | if (!_.find(files, function(file) {
73 | return file.match(size.name);
74 | })) {
75 | console.error('postprocessor saw no file for the size ' + size.name);
76 | process.exit(1);
77 | }
78 | });
79 | filesSeen = true;
80 | return Promise.resolve(true);
81 | },
82 | extensions: [ 'gif', 'jpg', 'png' ],
83 | options: {
84 | test: true
85 | }
86 | }
87 | ];
88 | localTestStart(function () {
89 | if (!filesSeen) {
90 | console.error('postprocessor saw no files');
91 | process.exit(1);
92 | }
93 | console.log('Tests, done');
94 | process.exit(0);
95 | });
96 | });
97 |
98 | function localTestStart(cb) {
99 | const options = localOptions;
100 | console.log('Initializing uploadfs for the ' + options.backend + ' storage backend with the imagecrunch image backend');
101 | uploadfs.init(options, function(e) {
102 | if (e) {
103 | console.log('uploadfs.init failed:');
104 | console.log(e);
105 | process.exit(1);
106 | }
107 | console.log('uploadfs.init', this.options);
108 | testCopyImageIn();
109 | });
110 |
111 | function testCopyImageIn() {
112 | console.log('testing copyImageIn');
113 |
114 | // Note copyImageIn adds an extension for us
115 | uploadfs.copyImageIn('test.jpg', basePath, function(e, info) {
116 | if (e) {
117 | console.log('testCopyImageIn failed:');
118 | console.log(e);
119 | process.exit(1);
120 | }
121 |
122 | if (info.basePath !== '/images/profiles/me') {
123 | console.log('info.basePath is incorrect');
124 | process.exit(1);
125 | }
126 |
127 | console.log('Testing that returned image dimensions are reoriented');
128 |
129 | if ((info.width !== 1936) || (info.height !== 2592)) {
130 | console.log('Width and height missing or not reoriented for web use');
131 | console.log(info);
132 | process.exit(1);
133 | }
134 |
135 | if ((info.originalWidth !== 2592) || (info.originalHeight !== 1936)) {
136 | console.log('Original width and height missing or incorrect');
137 | console.log(info);
138 | process.exit(1);
139 | }
140 |
141 | const stats = fs.statSync('test/images/profiles/me.jpg');
142 |
143 | if (!stats.size) {
144 | console.log('Copied image is empty or missing');
145 | process.exit(1);
146 | }
147 |
148 | // We already tested remove, just do it to mop up
149 | console.log('Removing files...');
150 | uploadfs.remove('/images/profiles/me.jpg', function(e) {
151 | async.each(imageSizes, function(size, callback) {
152 | const name = info.basePath + '.' + size.name + '.jpg';
153 | const stats = fs.statSync('test' + name);
154 | if (!stats.size) {
155 | console.log('Scaled and copied image is empty or missing (2)');
156 | process.exit(1);
157 | }
158 |
159 | // We already tested remove, just do it to mop up
160 | uploadfs.remove(info.basePath + '.' + size.name + '.jpg', function(e) {
161 | callback();
162 | });
163 | }, function(err) {
164 | if (err) {
165 | console.log('Test failed', err);
166 | process.exit(1);
167 | }
168 | testCopyImageInCrop(cb);
169 | });
170 | }); // remove me.jpg
171 | });
172 | }
173 |
174 | function testCopyImageInCrop(cb) {
175 | console.log('testing copyImageIn with cropping');
176 |
177 | // Note copyImageIn adds an extension for us
178 | // Should grab the flowers
179 | uploadfs.copyImageIn('test.jpg', '/images/profiles/me-cropped', {
180 | crop: {
181 | top: 830,
182 | left: 890,
183 | width: 500,
184 | height: 500
185 | }
186 | }, function(e, info) {
187 | if (e) {
188 | console.log('testCopyImageIn failed:');
189 | console.log(e);
190 | process.exit(1);
191 | }
192 |
193 | if (info.basePath !== '/images/profiles/me-cropped') {
194 | console.log('info.basePath is incorrect');
195 | process.exit(1);
196 | }
197 |
198 | console.log('Testing that returned image dimensions are reoriented');
199 |
200 | if ((info.width !== 500) || (info.height !== 500)) {
201 | console.log('Reported size does not match crop');
202 | console.log(info);
203 | process.exit(1);
204 | }
205 |
206 | const stats = fs.statSync('test/images/profiles/me-cropped.jpg');
207 |
208 | if (!stats.size) {
209 | console.log('Copied image is empty or missing');
210 | process.exit(1);
211 | }
212 |
213 | // We already tested remove, just do it to mop up
214 | console.log('Removing files...');
215 | uploadfs.remove(`${basePath}-cropped.jpg`, function(e) {
216 | async.each(imageSizes, function(size, callback) {
217 | const name = info.basePath + '.' + size.name + '.jpg';
218 | const stats = fs.statSync('test' + name);
219 | if (!stats.size) {
220 | console.log('Scaled and copied image is empty or missing (2)');
221 | process.exit(1);
222 | }
223 | // We already tested remove, just do it to mop up
224 | uploadfs.remove(info.basePath + '.' + size.name + '.jpg', function(e) {
225 | callback(e);
226 | });
227 | }, function (err) {
228 | if (err) {
229 | console.log('Remove file fails', err);
230 | process.exit(1);
231 | }
232 | console.log('Files removed');
233 | cb();
234 | });
235 | });
236 | });
237 | }
238 | }
239 |
--------------------------------------------------------------------------------
/test-imagemagick.js:
--------------------------------------------------------------------------------
1 | const uploadfs = require('./uploadfs.js')();
2 | const fs = require('fs');
3 | const async = require('async');
4 | const Promise = require('bluebird');
5 | const _ = require('lodash');
6 | const path = require('path');
7 |
8 | // Test the imagecrunch image backend, written specifically for Macs
9 |
10 | const localOptions = {
11 | storage: 'local',
12 | image: 'imagemagick',
13 | uploadsPath: path.join(__dirname, '/test'),
14 | uploadsUrl: 'http://localhost:3000/test'
15 | };
16 |
17 | const imageSizes = [
18 | {
19 | name: 'small',
20 | width: 320,
21 | height: 320
22 | },
23 | {
24 | name: 'medium',
25 | width: 640,
26 | height: 640
27 | },
28 | {
29 | name: 'large',
30 | width: 1140,
31 | height: 1140
32 | }
33 | ];
34 |
35 | const tempPath = path.join(__dirname, '/temp');
36 | const basePath = '/images/profiles/me';
37 |
38 | localOptions.imageSizes = imageSizes;
39 | localOptions.tempPath = tempPath;
40 | localOptions.backend = 'local';
41 |
42 | localTestStart(function () {
43 | let filesSeen = false;
44 | console.log('RERUN TESTS WITH TEST OF POSTPROCESSORS');
45 | localOptions.postprocessors = [
46 | {
47 | postprocessor: function(files, folder, options) {
48 | console.log('in a postprocessor');
49 | if (!(options && options.test)) {
50 | console.error('postprocessor did not receive options');
51 | process.exit(1);
52 | }
53 | if (!files) {
54 | console.error('No files array passed to postprocessor');
55 | process.exit(1);
56 | }
57 | if (!files.length) {
58 | return Promise.resolve(true);
59 | }
60 | if (!files[0].match(/\.(gif|jpg|png)$/)) {
61 | console.error('postprocessor invoked for inappropriate file extension');
62 | process.exit(1);
63 | }
64 | if (!fs.existsSync(files[0])) {
65 | console.error('postprocessor invoked for nonexistent file');
66 | process.exit(1);
67 | }
68 | if (require('path').dirname(files[0]) !== folder) {
69 | console.error('folder parameter to postprocessor is incorrect');
70 | }
71 | _.each(localOptions.imageSizes, function(size) {
72 | if (!_.find(files, function(file) {
73 | return file.match(size.name);
74 | })) {
75 | console.error('postprocessor saw no file for the size ' + size.name);
76 | process.exit(1);
77 | }
78 | });
79 | filesSeen = true;
80 | return Promise.resolve(true);
81 | },
82 | extensions: [ 'gif', 'jpg', 'png' ],
83 | options: {
84 | test: true
85 | }
86 | }
87 | ];
88 | localTestStart(function () {
89 | if (!filesSeen) {
90 | console.error('postprocessor saw no files');
91 | process.exit(1);
92 | }
93 | console.log('Tests, done');
94 | process.exit(0);
95 | });
96 | });
97 |
98 | function localTestStart(cb) {
99 | const options = localOptions;
100 | console.log('Initializing uploadfs for the ' + options.backend + ' storage backend with the imagecrunch image backend');
101 | uploadfs.init(options, function(e) {
102 | if (e) {
103 | console.log('uploadfs.init failed:');
104 | console.log(e);
105 | process.exit(1);
106 | }
107 | console.log('uploadfs.init', this.options);
108 | testCopyImageIn();
109 | });
110 |
111 | function testCopyImageIn() {
112 | console.log('testing copyImageIn');
113 |
114 | // Note copyImageIn adds an extension for us
115 | uploadfs.copyImageIn('test.jpg', basePath, function(e, info) {
116 | if (e) {
117 | console.log('testCopyImageIn failed:');
118 | console.log(e);
119 | process.exit(1);
120 | }
121 |
122 | if (info.basePath !== '/images/profiles/me') {
123 | console.log('info.basePath is incorrect');
124 | process.exit(1);
125 | }
126 |
127 | console.log('Testing that returned image dimensions are reoriented');
128 |
129 | if ((info.width !== 1936) || (info.height !== 2592)) {
130 | console.log('Width and height missing or not reoriented for web use');
131 | console.log(info);
132 | process.exit(1);
133 | }
134 |
135 | if ((info.originalWidth !== 2592) || (info.originalHeight !== 1936)) {
136 | console.log('Original width and height missing or incorrect');
137 | console.log(info);
138 | process.exit(1);
139 | }
140 |
141 | const stats = fs.statSync('test/images/profiles/me.jpg');
142 |
143 | if (!stats.size) {
144 | console.log('Copied image is empty or missing');
145 | process.exit(1);
146 | }
147 |
148 | // We already tested remove, just do it to mop up
149 | console.log('Removing files...');
150 | uploadfs.remove('/images/profiles/me.jpg', function(e) {
151 | async.each(imageSizes, function(size, callback) {
152 | const name = info.basePath + '.' + size.name + '.jpg';
153 | const stats = fs.statSync('test' + name);
154 | if (!stats.size) {
155 | console.log('Scaled and copied image is empty or missing (2)');
156 | process.exit(1);
157 | }
158 |
159 | // We already tested remove, just do it to mop up
160 | uploadfs.remove(info.basePath + '.' + size.name + '.jpg', function(e) {
161 | callback();
162 | });
163 | }, function(err) {
164 | if (err) {
165 | console.log('Test failed', err);
166 | process.exit(1);
167 | }
168 | testCopyImageInCrop(cb);
169 | });
170 | }); // remove me.jpg
171 | });
172 | }
173 |
174 | function testCopyImageInCrop(cb) {
175 | console.log('testing copyImageIn with cropping');
176 |
177 | // Note copyImageIn adds an extension for us
178 | // Should grab the flowers
179 | uploadfs.copyImageIn('test.jpg', '/images/profiles/me-cropped', {
180 | crop: {
181 | top: 830,
182 | left: 890,
183 | width: 500,
184 | height: 500
185 | }
186 | }, function(e, info) {
187 | if (e) {
188 | console.log('testCopyImageIn failed:');
189 | console.log(e);
190 | process.exit(1);
191 | }
192 |
193 | if (info.basePath !== '/images/profiles/me-cropped') {
194 | console.log('info.basePath is incorrect');
195 | process.exit(1);
196 | }
197 |
198 | console.log('Testing that returned image dimensions are reoriented');
199 |
200 | if ((info.width !== 500) || (info.height !== 500)) {
201 | console.log('Reported size does not match crop');
202 | console.log(info);
203 | process.exit(1);
204 | }
205 |
206 | const stats = fs.statSync('test/images/profiles/me-cropped.jpg');
207 |
208 | if (!stats.size) {
209 | console.log('Copied image is empty or missing');
210 | process.exit(1);
211 | }
212 |
213 | // We already tested remove, just do it to mop up
214 | console.log('Removing files...');
215 | uploadfs.remove(`${basePath}-cropped.jpg`, function(e) {
216 | async.each(imageSizes, function(size, callback) {
217 | const name = info.basePath + '.' + size.name + '.jpg';
218 | const stats = fs.statSync('test' + name);
219 | if (!stats.size) {
220 | console.log('Scaled and copied image is empty or missing (2)');
221 | process.exit(1);
222 | }
223 | // We already tested remove, just do it to mop up
224 | uploadfs.remove(info.basePath + '.' + size.name + '.jpg', function(e) {
225 | callback(e);
226 | });
227 | }, function (err) {
228 | if (err) {
229 | console.log('Remove file fails', err);
230 | process.exit(1);
231 | }
232 | console.log('Files removed');
233 | cb();
234 | });
235 | });
236 | });
237 | }
238 | }
239 |
--------------------------------------------------------------------------------
/lib/storage/local.js:
--------------------------------------------------------------------------------
1 | /* jshint node:true */
2 |
3 | // Local filesystem-based backend for uploadfs. See also
4 | // s3.js. The main difference between this backend and just using
5 | // the local filesystem directly is that it creates parent
6 | // folders automatically when they are discovered to be missing,
7 | // and it encourages you to write code that will still work
8 | // when you switch to the s3 backend
9 |
10 | const dirname = require('path').dirname;
11 | const fs = require('fs');
12 | const copyFile = require('../copyFile.js');
13 | const async = require('async');
14 | const utils = require('../utils.js');
15 |
16 | module.exports = function() {
17 | let uploadsPath;
18 | let uploadsUrl;
19 | let removeCandidates = [];
20 | let timeout;
21 |
22 | const self = {
23 | init: function(options, callback) {
24 | self.options = options;
25 | uploadsPath = options.uploadsPath;
26 | if (!uploadsPath) {
27 | return callback('uploadsPath not set');
28 | }
29 | uploadsUrl = options.uploadsUrl;
30 | if (!uploadsUrl) {
31 | return callback('uploadsUrl not set');
32 | }
33 | // We use a timeout that we reinstall each time rather than
34 | // an interval to avoid pileups
35 | timeout = setTimeout(cleanup, 1000);
36 | return callback(null);
37 |
38 | function cleanup() {
39 | timeout = null;
40 | const list = removeCandidates;
41 | // Longest paths first, so we don't try to remove parents before children
42 | // and wind up never getting rid of the parent
43 | list.sort(function(a, b) {
44 | if (a.length > b.length) {
45 | return -1;
46 | } else if (a.length < b.length) {
47 | return 1;
48 | } else {
49 | return 0;
50 | }
51 | });
52 | // Building new list for next pass
53 | removeCandidates = [];
54 | // Parallelism here just removes things too soon preventing a parent from being removed
55 | // after a child
56 | return async.eachSeries(list, function(path, callback) {
57 | const uploadPath = uploadsPath + path;
58 | fs.rmdir(uploadPath, function(e) {
59 | // We're not fussy about the outcome, if it still has files in it we're
60 | // actually depending on this to fail
61 | if (!e) {
62 | // It worked, so try to remove the parent (which will fail if not empty, etc.)
63 | add(dirname(path));
64 | }
65 | return callback(null);
66 | });
67 | }, function() {
68 | // Try again in 1 second, typically removing another layer of parents if empty, etc.
69 | if (!self.destroyed) {
70 | timeout = setTimeout(cleanup, 1000);
71 | }
72 | });
73 |
74 | function add(path) {
75 | // Don't remove uploadfs itself
76 | if (path.length > 1) {
77 | removeCandidates.push(path);
78 | }
79 | }
80 | }
81 | },
82 |
83 | destroy: function(callback) {
84 | // node cannot exit if we still hold a timeout
85 | if (timeout) {
86 | clearTimeout(timeout);
87 | }
88 | self.destroyed = true;
89 | return callback(null);
90 | },
91 |
92 | copyIn: function(localPath, path, options, callback) {
93 | const uploadPath = uploadsPath + path;
94 | return copyFile(localPath, uploadPath, callback);
95 | },
96 |
97 | copyOut: function(path, localPath, options, callback) {
98 | const downloadPath = uploadsPath + path;
99 | return copyFile(downloadPath, localPath, callback);
100 | },
101 |
102 | streamOut: function(path, options) {
103 | return fs.createReadStream(uploadsPath + path);
104 | },
105 |
106 | remove: function(path, callback) {
107 | const uploadPath = uploadsPath + path;
108 | fs.unlink(uploadPath, callback);
109 | if (dirname(path).length > 1) {
110 | removeCandidates.push(dirname(path));
111 | }
112 | },
113 |
114 | enable: function(path, callback) {
115 | if (self.options.disabledFileKey) {
116 | return fs.rename(uploadsPath + utils.getDisabledPath(path, self.options.disabledFileKey), uploadsPath + path, callback);
117 | } else {
118 | // World readable, owner writable. Reasonable since
119 | // web accessible files are world readable in that
120 | // sense regardless
121 | return fs.chmod(uploadsPath + path, self.getEnablePermissions(), callback);
122 | }
123 | },
124 |
125 | getEnablePermissions: function() {
126 | return self.options.enablePermissions || parseInt('644', 8);
127 | },
128 |
129 | disable: function(path, callback) {
130 | if (self.options.disabledFileKey) {
131 | return fs.rename(uploadsPath + path, uploadsPath + utils.getDisabledPath(path, self.options.disabledFileKey), callback);
132 | } else {
133 | // No access. Note this means you must explicitly
134 | // enable to get read access back, even with copyFileOut
135 | return fs.chmod(uploadsPath + path, self.getDisablePermissions(), callback);
136 | }
137 | },
138 |
139 | getDisablePermissions: function() {
140 | return self.options.disablePermissions || parseInt('0000', 8);
141 | },
142 |
143 | getUrl: function(path) {
144 | return utils.addPathToUrl(self.options, uploadsUrl, path);
145 | },
146 |
147 | migrateToDisabledFileKey: function(callback) {
148 | if (!self.options.disabledFileKey) {
149 | return callback(new Error('migrateToDisabledFileKey invoked with no disabledFileKey option set.'));
150 | }
151 | const candidates = [];
152 | try {
153 | spelunk('');
154 | } catch (e) {
155 | return callback(e);
156 | }
157 | return async.eachLimit(candidates, 5, function(file, callback) {
158 | fs.chmodSync(uploadsPath + file, self.options.enablePermissions || parseInt('644', 8));
159 | self.disable(file, callback);
160 | }, callback);
161 | function spelunk(folder) {
162 | const files = fs.readdirSync(uploadsPath + folder);
163 | files.forEach(function(file) {
164 | const stats = fs.statSync(uploadsPath + folder + '/' + file);
165 | const mode = stats.mode & parseInt('0777', 8);
166 | if (stats.isDirectory()) {
167 | return spelunk(folder + '/' + file);
168 | }
169 | if (mode === self.getDisablePermissions()) {
170 | candidates.push(folder + '/' + file);
171 | }
172 | });
173 | }
174 | },
175 |
176 | migrateFromDisabledFileKey: function(callback) {
177 | if (self.options.disabledFileKey) {
178 | return callback('migrateFromDisabledFileKey invoked with disabledFileKey option still set.');
179 | }
180 | const candidates = [];
181 | try {
182 | spelunk('');
183 | } catch (e) {
184 | return callback(e);
185 | }
186 | return async.eachLimit(candidates, 5, function(file, callback) {
187 | return async.series([
188 | function(callback) {
189 | return fs.rename(uploadsPath + file, removeDisabledSuffix(uploadsPath + file), callback);
190 | },
191 | function(callback) {
192 | return self.disable(removeDisabledSuffix(file), callback);
193 | }
194 | ], callback);
195 | function removeDisabledSuffix(path) {
196 | return path.replace(/-disabled-[0-9a-f]+$/, '');
197 | }
198 | }, callback);
199 | function spelunk(folder) {
200 | const files = fs.readdirSync(uploadsPath + folder);
201 | files.forEach(function(file) {
202 | const stats = fs.statSync(uploadsPath + folder + '/' + file);
203 | if (stats.isDirectory()) {
204 | return spelunk(folder + '/' + file);
205 | }
206 | if (file.match(/-disabled-[0-9a-f]+$/)) {
207 | candidates.push(folder + '/' + file);
208 | }
209 | });
210 | }
211 | },
212 |
213 | // Exported for unit testing only
214 | _testCopyFile: function(path1, path2, options, callback) {
215 | return copyFile(path1, path2, options, callback);
216 | }
217 | };
218 |
219 | return self;
220 | };
221 |
--------------------------------------------------------------------------------
/lib/storage/s3.js:
--------------------------------------------------------------------------------
1 | /* jshint node:true */
2 |
3 | // Amazon s3-based backend for uploadfs. See also
4 | // local.js.
5 |
6 | const fs = require('fs');
7 | const {
8 | S3Client,
9 | GetObjectCommand,
10 | DeleteObjectCommand,
11 | PutObjectAclCommand
12 | } = require('@aws-sdk/client-s3');
13 | const { Upload } = require('@aws-sdk/lib-storage');
14 | const { NodeHttpHandler } = require('@smithy/node-http-handler');
15 | const { extname } = require('path');
16 | const { PassThrough } = require('stream');
17 | const utils = require('../utils');
18 |
19 | module.exports = function() {
20 | let contentTypes;
21 | let client;
22 | let cachingTime;
23 | let https;
24 | let bucket;
25 | let bucketObjectsACL;
26 | let disabledBucketObjectsACL;
27 | let endpoint;
28 | let defaultTypes;
29 | let noProtoEndpoint;
30 | let pathStyle = false;
31 | let noGzipContentTypes;
32 | let addNoGzipContentTypes;
33 | const self = {
34 | init: function (options, callback) {
35 | // knox bc
36 | endpoint = 's3.amazonaws.com';
37 |
38 | const clientConfig = {
39 | region: options.region
40 | };
41 |
42 | if (options.secret) {
43 | clientConfig.credentials = {
44 | accessKeyId: options.key,
45 | secretAccessKey: options.secret,
46 | ...(options.token && { sessionToken: options.token })
47 | };
48 | }
49 |
50 | bucket = options.bucket;
51 | bucketObjectsACL = options.bucketObjectsACL || 'public-read';
52 | disabledBucketObjectsACL = options.disabledBucketObjectsACL || 'private';
53 | noGzipContentTypes = options.noGzipContentTypes || require('./noGzipContentTypes');
54 | addNoGzipContentTypes = options.addNoGzipContentTypes || [];
55 | // bc for the `endpoint`, `secure` and `port` options
56 | if (options.endpoint) {
57 | endpoint = options.endpoint;
58 | if (!endpoint.match(/^https?:/)) {
59 | // Infer it like knox would
60 | const defaultSecure = ((!options.port) || (options.port === 443));
61 | const secure = options.secure || defaultSecure;
62 | let port = options.port || 443;
63 | const protocol = secure ? 'https://' : 'http://';
64 | if (secure && (port === 443)) {
65 | port = '';
66 | } else if ((!secure) && (port === 80)) {
67 | port = '';
68 | } else {
69 | port = ':' + port;
70 | }
71 | endpoint = protocol + endpoint + port;
72 | }
73 | clientConfig.endpoint = endpoint;
74 | }
75 |
76 | // this is to support the knox style attribute OR AWS forcePathStyle attribute
77 | if (options.style && (options.style === 'path')) {
78 | pathStyle = true;
79 | clientConfig.forcePathStyle = true;
80 | }
81 |
82 | if (options.agent) {
83 | clientConfig.requestHandler = new NodeHttpHandler({
84 | httpAgent: options.agent,
85 | httpsAgent: options.agent
86 | });
87 | }
88 | client = new S3Client(clientConfig);
89 | defaultTypes = require('./contentTypes.js');
90 | if (options.contentTypes) {
91 | contentTypes = {
92 | ...defaultTypes,
93 | ...options.contentTypes
94 | };
95 | } else {
96 | contentTypes = defaultTypes;
97 | }
98 |
99 | https = (options.https === undefined) ? true : options.https;
100 | cachingTime = options.cachingTime;
101 | self.options = options;
102 | return callback(null);
103 | },
104 |
105 | copyIn: function(localPath, path, options, callback) {
106 | let ext = extname(path);
107 | if (ext.length) {
108 | ext = ext.substr(1);
109 | }
110 | let contentType = contentTypes[ext];
111 | if (!contentType) {
112 | contentType = 'application/octet-stream';
113 | }
114 |
115 | const inputStream = fs.createReadStream(localPath);
116 |
117 | const params = {
118 | Bucket: bucket,
119 | ACL: bucketObjectsACL,
120 | Key: utils.removeLeadingSlash(self.options, path),
121 | Body: inputStream,
122 | ContentType: contentType
123 | };
124 |
125 | if (gzipAppropriate(contentType)) {
126 | params.ContentEncoding = 'gzip';
127 | const gzip = require('zlib').createGzip();
128 | inputStream.pipe(gzip);
129 | params.Body = gzip;
130 | }
131 |
132 | if (cachingTime) {
133 | params.CacheControl = 'public, max-age=' + cachingTime;
134 | }
135 |
136 | // Use @aws-sdk/lib-storage for multipart uploads
137 | const upload = new Upload({
138 | client,
139 | params
140 | });
141 |
142 | upload.done()
143 | .then(result => callback(null, result))
144 | .catch(err => callback(err));
145 |
146 | function gzipAppropriate(contentType) {
147 | return ![ ...noGzipContentTypes, ...addNoGzipContentTypes ].includes(contentType);
148 | }
149 | },
150 |
151 | streamOut: function(path, options) {
152 | const result = new PassThrough();
153 | const params = {
154 | Bucket: bucket,
155 | Key: utils.removeLeadingSlash(self.options, path)
156 | };
157 |
158 | const command = new GetObjectCommand(params);
159 |
160 | client.send(command)
161 | .then(response => {
162 | let inputStream = response.Body;
163 |
164 | // Errors do not automatically propagate with pipe()
165 | inputStream.on('error', e => {
166 | result.emit('error', e);
167 | });
168 |
169 | if (response.ContentEncoding === 'gzip') {
170 | const gunzip = require('zlib').createGunzip();
171 | gunzip.on('error', e => {
172 | result.emit('error', e);
173 | });
174 | inputStream.pipe(gunzip);
175 | inputStream = gunzip;
176 | }
177 |
178 | inputStream.pipe(result);
179 | })
180 | .catch(err => {
181 | result.emit('error', {
182 | ...err,
183 | ...err.$response
184 | });
185 | });
186 |
187 | return result;
188 | },
189 |
190 | copyOut: function(path, localPath, options, callback) {
191 | let finished = false;
192 | const outputStream = fs.createWriteStream(localPath);
193 | const inputStream = self.streamOut(path, options);
194 | inputStream.pipe(outputStream);
195 | inputStream.on('error', function(err) {
196 | // Watch out for any oddities in stream implementation
197 | if (finished) {
198 | return;
199 | }
200 | finished = true;
201 | return callback(err);
202 | });
203 | outputStream.on('error', function(err) {
204 | // Watch out for any oddities in stream implementation
205 | if (finished) {
206 | return;
207 | }
208 | finished = true;
209 | return callback(err);
210 | });
211 | outputStream.on('finish', function() {
212 | // Watch out for any oddities in stream implementation
213 | if (finished) {
214 | return;
215 | }
216 | finished = true;
217 | return callback(null);
218 | });
219 | },
220 |
221 | remove: function(path, callback) {
222 | const command = new DeleteObjectCommand({
223 | Bucket: bucket,
224 | Key: utils.removeLeadingSlash(self.options, path)
225 | });
226 |
227 | client.send(command)
228 | .then(result => callback(null, result))
229 | .catch(err => callback(err));
230 | },
231 |
232 | enable: function(path, callback) {
233 | const command = new PutObjectAclCommand({
234 | Bucket: bucket,
235 | ACL: bucketObjectsACL,
236 | Key: utils.removeLeadingSlash(self.options, path)
237 | });
238 |
239 | client.send(command)
240 | .then(result => callback(null, result))
241 | .catch(err => callback(err));
242 | },
243 |
244 | disable: function(path, callback) {
245 | const command = new PutObjectAclCommand({
246 | Bucket: bucket,
247 | ACL: disabledBucketObjectsACL,
248 | Key: utils.removeLeadingSlash(self.options, path)
249 | });
250 |
251 | client.send(command)
252 | .then(result => callback(null, result))
253 | .catch(err => callback(err));
254 | },
255 |
256 | getUrl: function (path) {
257 | let url;
258 | noProtoEndpoint = endpoint.replace(/^https?:\/\//i, '');
259 | if (pathStyle) {
260 | url = (https ? 'https://' : 'http://') + noProtoEndpoint + '/' + bucket;
261 | } else {
262 | url = (https ? 'https://' : 'http://') + bucket + '.' + noProtoEndpoint;
263 | }
264 | return utils.addPathToUrl(self.options, url, path);
265 | },
266 |
267 | destroy: function(callback) {
268 | // No file descriptors or timeouts held
269 | return callback(null);
270 | }
271 | };
272 | return self;
273 | };
274 |
--------------------------------------------------------------------------------
/test/s3.js:
--------------------------------------------------------------------------------
1 | /* global describe, it */
2 | const assert = require('assert');
3 | const fetch = require('node-fetch');
4 | const exec = require('child_process').execSync;
5 | const util = require('util');
6 | const fs = require('fs');
7 |
8 | describe('UploadFS S3', function () {
9 | this.timeout(50000);
10 | const uploadfs = require('../uploadfs.js')();
11 | const init = util.promisify(uploadfs.init);
12 | const remove = util.promisify(uploadfs.remove);
13 | const copyIn = util.promisify(uploadfs.copyIn);
14 | const copyImageIn = util.promisify(uploadfs.copyImageIn);
15 | const copyOut = util.promisify(uploadfs.copyOut);
16 | const enable = util.promisify(uploadfs.enable);
17 | const disable = util.promisify(uploadfs.disable);
18 |
19 | const fs = require('fs');
20 | const tempPath = '../temp';
21 | const dstPath = '/one/two/three/test.txt';
22 | const imageSizes = [
23 | {
24 | name: 'small',
25 | width: 320,
26 | height: 320
27 | },
28 | {
29 | name: 'medium',
30 | width: 640,
31 | height: 640
32 | },
33 | {
34 | name: 'large',
35 | width: 1140,
36 | height: 1140
37 | }
38 | ];
39 |
40 | const s3Options = {
41 | storage: 's3',
42 | // Usually not set so we get sharp, with imagemagick fallback (the default behavior)
43 | image: process.env.UPLOADFS_TEST_IMAGE,
44 | bucket: process.env.UPLOADFS_TEST_S3_BUCKET,
45 | key: process.env.UPLOADFS_TEST_S3_KEY,
46 | secret: process.env.UPLOADFS_TEST_S3_SECRET,
47 | region: process.env.UPLOADFS_TEST_S3_REGION
48 | };
49 |
50 | s3Options.imageSizes = imageSizes;
51 | s3Options.tempPath = tempPath;
52 |
53 | before(async function() {
54 | await init(s3Options);
55 | });
56 |
57 | it('S3 should store and retrieve a .tar.gz file without double-gzipping it', async function() {
58 | await copyIn(`${__dirname}/test.tar.gz`, '/test.tar.gz');
59 | // Is it returned in identical form using copyOut?
60 | await copyOut('/test.tar.gz', `${__dirname}/test2.tar.gz`);
61 | identical(`${__dirname}/test.tar.gz`, `${__dirname}/test2.tar.gz`);
62 | fs.unlinkSync(`${__dirname}/test2.tar.gz`);
63 | // Is it returned in identical form using fetch and the public URL of the file?
64 | const url = uploadfs.getUrl() + '/test.tar.gz';
65 | // curl and the browser exhibit the same confused behavior
66 | // unless .gz has a content type in contentTypes.js and
67 | // is also declared in noGzipContentTypes.js. For whatever
68 | // reason node-fetch doesn't get confused so we test with curl
69 | exec(`curl ${url} --output ${__dirname}/test3.tar.gz`);
70 | identical(`${__dirname}/test.tar.gz`, `${__dirname}/test3.tar.gz`);
71 | fs.unlinkSync(`${__dirname}/test3.tar.gz`);
72 | await remove('/test.tar.gz');
73 | });
74 |
75 | it('CopyIn should work', async function() {
76 | await copyIn('test.txt', dstPath);
77 | });
78 |
79 | it('CopyIn file should be available via s3', async function () {
80 | const url = uploadfs.getUrl() + '/one/two/three/test.txt';
81 | const og = fs.readFileSync('test.txt', 'utf8');
82 |
83 | const response = await fetch(url, {
84 | method: 'GET',
85 | headers: {
86 | 'Accept-Encoding': 'gzip',
87 | 'Content-type': 'text/plain; charset=utf-8'
88 | }
89 | });
90 | assert(response.status === 200, `Request status 200 != ${response.status}`);
91 | const body = await response.text();
92 | assert(body === og, 'Res body equals uploaded file');
93 | });
94 |
95 | it('S3 streamOut should work', async function() {
96 | const input = uploadfs.streamOut(dstPath);
97 | const chunks = [];
98 | for await (const chunk of input) {
99 | chunks.push(chunk);
100 | }
101 | const data = Buffer.concat(chunks);
102 | const og = fs.readFileSync('test.txt');
103 | assert(data.equals(og), 'Streamed file is equal to previous upload');
104 | });
105 |
106 | it('S3 streamOut should handle an error status code from S3 sensibly', async function() {
107 | const input = uploadfs.streamOut('made/up/path');
108 | try {
109 | // This should fail
110 | const chunks = [];
111 | for await (const chunk of input) {
112 | chunks.push(chunk);
113 | }
114 | assert(false, 'Should not get here');
115 | } catch (e) {
116 | assert.equal(e.name, 'NoSuchKey');
117 | assert(e.statusCode >= 400, 'Should be a 4xx or 5xx status code');
118 | }
119 | });
120 |
121 | it('S3 CopyOut should work', async function() {
122 | const cpOutPath = 'copy-out-test.txt';
123 | await copyOut(dstPath, cpOutPath);
124 | const dl = fs.readFileSync(cpOutPath, 'utf8');
125 | const og = fs.readFileSync('test.txt', 'utf8');
126 | assert(dl === og, 'Downloaded file is equal to previous upload');
127 | });
128 |
129 | it('S3 Disable / Enable should work as expected', async function() {
130 | await disable(dstPath);
131 | assert.rejects(testWeb());
132 | await enable(dstPath);
133 | await testWeb();
134 |
135 | async function testWeb() {
136 | const og = fs.readFileSync('test.txt', 'utf8');
137 | const url = uploadfs.getUrl() + dstPath;
138 | const res = await fetch(url, {
139 | method: 'GET',
140 | headers: {
141 | 'Accept-Encoding': 'gzip'
142 | }
143 | });
144 | if (res.status >= 400) {
145 | throw res;
146 | }
147 | const body = await res.text();
148 | assert(res.headers.get('content-type') === 'text/plain', 'Check content-type header');
149 | assert(og === body, 'Downloaded content should be equal to previous upload');
150 | }
151 | });
152 |
153 | it('S3 uploadfs Remove should work', async function() {
154 | await remove(dstPath);
155 | const url = uploadfs.getUrl() + dstPath;
156 | const res = await fetch(url, {
157 | method: 'GET',
158 | headers: {
159 | 'Accept-Encoding': 'gzip'
160 | }
161 | });
162 | assert(res.status >= 400, 'Removed file is gone from s3');
163 | });
164 |
165 | it('S3 uploadfs copyImageIn should work', async function() {
166 | const imgDstPath = '/images/profiles/me';
167 |
168 | const info = await copyImageIn('test.jpg', imgDstPath);
169 | const url = uploadfs.getUrl();
170 | const paths = [ info.basePath + '.jpg' ];
171 |
172 | paths.push(info.basePath + '.small.jpg');
173 | paths.push(info.basePath + '.medium.jpg');
174 | paths.push(info.basePath + '.large.jpg');
175 |
176 | for (const path of paths) {
177 | const imgPath = url + path;
178 | const res = await fetch(imgPath, {
179 | method: 'GET',
180 | headers: {
181 | 'Accept-Encoding': 'gzip'
182 | }
183 | });
184 | assert(res.status === 200);
185 | // Not suitable for images, make sure we didn't force it
186 | assert(res.headers.get('content-encoding') !== 'gzip');
187 | const buffer = await res.buffer();
188 | // JPEG magic number check
189 | assert(buffer[0] === 0xFF);
190 | assert(buffer[1] === 0xD8);
191 | await remove(path);
192 | }
193 | });
194 |
195 | it('S3 uploadfs copyImageIn should work with custom sizes', async function() {
196 | const imgDstPath = '/images/profiles/me';
197 |
198 | const customSizes = [
199 | {
200 | name: 'tiny',
201 | width: 80,
202 | height: 80
203 | },
204 | {
205 | name: 'nice',
206 | width: 120,
207 | height: 120
208 | }
209 | ];
210 |
211 | const info = await copyImageIn('test.jpg', imgDstPath, { sizes: customSizes });
212 |
213 | const url = uploadfs.getUrl();
214 | // Default should be https
215 | assert(url.startsWith('https://'));
216 | const paths = [ info.basePath + '.jpg' ];
217 |
218 | paths.push(info.basePath + '.tiny.jpg');
219 | paths.push(info.basePath + '.nice.jpg');
220 |
221 | for (const path of paths) {
222 | const imgPath = url + path;
223 | const res = await fetch(imgPath, {
224 | method: 'GET',
225 | headers: {
226 | 'Accept-Encoding': 'gzip'
227 | }
228 | });
229 | assert(res.status === 200);
230 | // Not suitable for images, make sure we didn't force it
231 | assert(res.headers.get('content-encoding') !== 'gzip');
232 | const buffer = await res.buffer();
233 | // JPEG magic number check
234 | assert(buffer[0] === 0xFF);
235 | assert(buffer[1] === 0xD8);
236 | await remove(path);
237 | }
238 | });
239 | });
240 |
241 | describe('UploadFS S3 with private ACL', async function () {
242 | this.timeout(50000);
243 | const uploadfs = require('../uploadfs.js')();
244 | const init = util.promisify(uploadfs.init);
245 | const remove = util.promisify(uploadfs.remove);
246 | const copyIn = util.promisify(uploadfs.copyIn);
247 | const copyOut = util.promisify(uploadfs.copyOut);
248 | const enable = util.promisify(uploadfs.enable);
249 | const disable = util.promisify(uploadfs.disable);
250 |
251 | const fs = require('fs');
252 | const tempPath = '../temp';
253 | const dstPath = '/one/two/three/test2.txt';
254 |
255 | const s3Options = {
256 | storage: 's3',
257 | // Usually not set so we get sharp, with imagemagick fallback (the default behavior)
258 | image: process.env.UPLOADFS_TEST_IMAGE,
259 | bucket: process.env.UPLOADFS_TEST_S3_BUCKET,
260 | key: process.env.UPLOADFS_TEST_S3_KEY,
261 | secret: process.env.UPLOADFS_TEST_S3_SECRET,
262 | region: process.env.UPLOADFS_TEST_S3_REGION,
263 | bucketObjectsACL: 'private',
264 | disabledBucketObjectsACL: 'private',
265 | tempPath
266 | };
267 |
268 | before(async function() {
269 | await init(s3Options);
270 | });
271 |
272 | it('test with alternate ACLs', async function() {
273 | await copyIn('test.txt', dstPath);
274 | await testCopyOut();
275 | await assert.rejects(testWeb);
276 | await disable(dstPath);
277 | await assert.rejects(testWeb);
278 | await testCopyOut();
279 | await enable(dstPath);
280 | await assert.rejects(testWeb);
281 | await testCopyOut();
282 | await remove(dstPath);
283 | });
284 |
285 | async function testCopyOut() {
286 | await copyOut(dstPath, `${tempPath}/test2.txt`);
287 | identical('test.txt', `${tempPath}/test2.txt`);
288 | fs.unlinkSync(`${tempPath}/test2.txt`);
289 | }
290 |
291 | async function testWeb() {
292 | const url = uploadfs.getUrl() + '/test.tar.gz';
293 | const response = await fetch(url);
294 | if (response.status >= 400) {
295 | throw response;
296 | }
297 | }
298 | });
299 |
300 | function identical(f1, f2) {
301 | const data1 = fs.readFileSync(f1);
302 | const data2 = fs.readFileSync(f2);
303 | if (data1.compare(data2) !== 0) {
304 | throw new Error(`${f1} and ${f2} are not identical.`);
305 | }
306 | }
307 |
--------------------------------------------------------------------------------
/lib/storage/contentTypes.js:
--------------------------------------------------------------------------------
1 | // An export of known content types from a recent Apache setup.
2 | // This is used by default by s3.js and could be useful in other backends
3 |
4 | module.exports =
5 | {
6 | ez: 'application/andrew-inset',
7 | anx: 'application/annodex',
8 | lin: 'application/bbolin',
9 | cap: 'application/cap',
10 | pcap: 'application/cap',
11 | cu: 'application/cu-seeme',
12 | tsp: 'application/dsptype',
13 | es: 'application/ecmascript',
14 | spl: 'application/x-futuresplash',
15 | hta: 'application/hta',
16 | jar: 'application/java-archive',
17 | ser: 'application/java-serialized-object',
18 | class: 'application/java-vm',
19 | js: 'application/javascript',
20 | m3g: 'application/m3g',
21 | hqx: 'application/mac-binhex40',
22 | cpt: 'image/x-corelphotopaint',
23 | nb: 'application/mathematica',
24 | nbp: 'application/mathematica',
25 | mdb: 'application/msaccess',
26 | doc: 'application/msword',
27 | dot: 'application/msword',
28 | mxf: 'application/mxf',
29 | bin: 'application/octet-stream',
30 | oda: 'application/oda',
31 | ogx: 'application/ogg',
32 | pdf: 'application/pdf',
33 | key: 'application/pgp-keys',
34 | pgp: 'application/pgp-signature',
35 | prf: 'application/pics-rules',
36 | ps: 'application/postscript',
37 | ai: 'application/postscript',
38 | eps: 'application/postscript',
39 | epsi: 'application/postscript',
40 | epsf: 'application/postscript',
41 | eps2: 'application/postscript',
42 | eps3: 'application/postscript',
43 | rar: 'application/rar',
44 | rtf: 'application/rtf',
45 | smi: 'chemical/x-daylight-smiles',
46 | smil: 'application/smil',
47 | xml: 'application/xml',
48 | xsl: 'application/xml',
49 | xsd: 'application/xml',
50 | zip: 'application/zip',
51 | wk: 'application/x-123',
52 | '7z': 'application/x-7z-compressed',
53 | abw: 'application/x-abiword',
54 | dmg: 'application/x-apple-diskimage',
55 | bcpio: 'application/x-bcpio',
56 | torrent: 'application/x-bittorrent',
57 | cab: 'application/x-cab',
58 | cbr: 'application/x-cbr',
59 | cbz: 'application/x-cbz',
60 | cdf: 'application/x-cdf',
61 | cda: 'application/x-cdf',
62 | vcd: 'application/x-cdlink',
63 | pgn: 'application/x-chess-pgn',
64 | cpio: 'application/x-cpio',
65 | csh: 'text/x-csh',
66 | deb: 'application/x-debian-package',
67 | udeb: 'application/x-debian-package',
68 | dcr: 'application/x-director',
69 | dir: 'application/x-director',
70 | dxr: 'application/x-director',
71 | dms: 'application/x-dms',
72 | wad: 'application/x-doom',
73 | dvi: 'application/x-dvi',
74 | rhtml: 'application/x-httpd-eruby',
75 | mm: 'application/x-freemind',
76 | gnumeric: 'application/x-gnumeric',
77 | sgf: 'application/x-go-sgf',
78 | gcf: 'application/x-graphing-calculator',
79 | gtar: 'application/x-gtar',
80 | tgz: 'application/x-gtar',
81 | taz: 'application/x-gtar',
82 | hdf: 'application/x-hdf',
83 | phtml: 'application/x-httpd-php',
84 | pht: 'application/x-httpd-php',
85 | php: 'application/x-httpd-php',
86 | phps: 'application/x-httpd-php-source',
87 | php3: 'application/x-httpd-php3',
88 | php3p: 'application/x-httpd-php3-preprocessed',
89 | php4: 'application/x-httpd-php4',
90 | php5: 'application/x-httpd-php5',
91 | ica: 'application/x-ica',
92 | info: 'application/x-info',
93 | ins: 'application/x-internet-signup',
94 | isp: 'application/x-internet-signup',
95 | iii: 'application/x-iphone',
96 | iso: 'application/x-iso9660-image',
97 | jam: 'application/x-jam',
98 | jnlp: 'application/x-java-jnlp-file',
99 | jmz: 'application/x-jmol',
100 | chrt: 'application/x-kchart',
101 | kil: 'application/x-killustrator',
102 | skp: 'application/x-koan',
103 | skd: 'application/x-koan',
104 | skt: 'application/x-koan',
105 | skm: 'application/x-koan',
106 | kpr: 'application/x-kpresenter',
107 | kpt: 'application/x-kpresenter',
108 | ksp: 'application/x-kspread',
109 | kwd: 'application/x-kword',
110 | kwt: 'application/x-kword',
111 | latex: 'application/x-latex',
112 | lha: 'application/x-lha',
113 | lyx: 'application/x-lyx',
114 | lzh: 'application/x-lzh',
115 | lzx: 'application/x-lzx',
116 | frm: 'application/x-maker',
117 | maker: 'application/x-maker',
118 | frame: 'application/x-maker',
119 | fm: 'application/x-maker',
120 | fb: 'application/x-maker',
121 | book: 'application/x-maker',
122 | fbdoc: 'application/x-maker',
123 | mif: 'chemical/x-mif',
124 | wmd: 'application/x-ms-wmd',
125 | wmz: 'application/x-ms-wmz',
126 | com: 'application/x-msdos-program',
127 | exe: 'application/x-msdos-program',
128 | bat: 'application/x-msdos-program',
129 | dll: 'application/x-msdos-program',
130 | msi: 'application/x-msi',
131 | nc: 'application/x-netcdf',
132 | pac: 'application/x-ns-proxy-autoconfig',
133 | dat: 'application/x-ns-proxy-autoconfig',
134 | nwc: 'application/x-nwc',
135 | o: 'application/x-object',
136 | oza: 'application/x-oz-application',
137 | p7r: 'application/x-pkcs7-certreqresp',
138 | crl: 'application/x-pkcs7-crl',
139 | pyc: 'application/x-python-code',
140 | pyo: 'application/x-python-code',
141 | qgs: 'application/x-qgis',
142 | shp: 'application/x-qgis',
143 | shx: 'application/x-qgis',
144 | qtl: 'application/x-quicktimeplayer',
145 | rpm: 'application/x-redhat-package-manager',
146 | rb: 'application/x-ruby',
147 | sh: 'text/x-sh',
148 | shar: 'application/x-shar',
149 | swf: 'application/x-shockwave-flash',
150 | swfl: 'application/x-shockwave-flash',
151 | scr: 'application/x-silverlight',
152 | sit: 'application/x-stuffit',
153 | sitx: 'application/x-stuffit',
154 | sv4cpio: 'application/x-sv4cpio',
155 | sv4crc: 'application/x-sv4crc',
156 | tar: 'application/x-tar',
157 | tcl: 'text/x-tcl',
158 | gf: 'application/x-tex-gf',
159 | pk: 'application/x-tex-pk',
160 | texinfo: 'application/x-texinfo',
161 | texi: 'application/x-texinfo',
162 | t: 'application/x-troff',
163 | tr: 'application/x-troff',
164 | roff: 'application/x-troff',
165 | man: 'application/x-troff-man',
166 | me: 'application/x-troff-me',
167 | ms: 'application/x-troff-ms',
168 | ustar: 'application/x-ustar',
169 | src: 'application/x-wais-source',
170 | wz: 'application/x-wingz',
171 | crt: 'application/x-x509-ca-cert',
172 | xcf: 'application/x-xcf',
173 | fig: 'application/x-xfig',
174 | xpi: 'application/x-xpinstall',
175 | amr: 'audio/amr',
176 | awb: 'audio/amr-wb',
177 | axa: 'audio/annodex',
178 | au: 'audio/basic',
179 | snd: 'audio/basic',
180 | flac: 'audio/flac',
181 | mid: 'audio/midi',
182 | midi: 'audio/midi',
183 | kar: 'audio/midi',
184 | mpga: 'audio/mpeg',
185 | mpega: 'audio/mpeg',
186 | mp2: 'audio/mpeg',
187 | mp3: 'audio/mpeg',
188 | m4a: 'audio/mpeg',
189 | m3u: 'audio/x-mpegurl',
190 | oga: 'audio/ogg',
191 | ogg: 'audio/ogg',
192 | spx: 'audio/ogg',
193 | aif: 'audio/x-aiff',
194 | aiff: 'audio/x-aiff',
195 | aifc: 'audio/x-aiff',
196 | gsm: 'audio/x-gsm',
197 | wma: 'audio/x-ms-wma',
198 | wax: 'audio/x-ms-wax',
199 | ra: 'audio/x-realaudio',
200 | rm: 'audio/x-pn-realaudio',
201 | ram: 'audio/x-pn-realaudio',
202 | pls: 'audio/x-scpls',
203 | sd2: 'audio/x-sd2',
204 | wav: 'audio/x-wav',
205 | alc: 'chemical/x-alchemy',
206 | cac: 'chemical/x-cache',
207 | cache: 'chemical/x-cache',
208 | csf: 'chemical/x-cache-csf',
209 | cbin: 'chemical/x-cactvs-binary',
210 | cascii: 'chemical/x-cactvs-binary',
211 | ctab: 'chemical/x-cactvs-binary',
212 | cdx: 'chemical/x-cdx',
213 | cer: 'chemical/x-cerius',
214 | c3d: 'chemical/x-chem3d',
215 | chm: 'chemical/x-chemdraw',
216 | cif: 'chemical/x-cif',
217 | cmdf: 'chemical/x-cmdf',
218 | cml: 'chemical/x-cml',
219 | cpa: 'chemical/x-compass',
220 | bsd: 'chemical/x-crossfire',
221 | csml: 'chemical/x-csml',
222 | csm: 'chemical/x-csml',
223 | ctx: 'chemical/x-ctx',
224 | cxf: 'chemical/x-cxf',
225 | cef: 'chemical/x-cxf',
226 | emb: 'chemical/x-embl-dl-nucleotide',
227 | embl: 'chemical/x-embl-dl-nucleotide',
228 | spc: 'chemical/x-galactic-spc',
229 | inp: 'chemical/x-gamess-input',
230 | gam: 'chemical/x-gamess-input',
231 | gamin: 'chemical/x-gamess-input',
232 | fch: 'chemical/x-gaussian-checkpoint',
233 | fchk: 'chemical/x-gaussian-checkpoint',
234 | cub: 'chemical/x-gaussian-cube',
235 | gau: 'chemical/x-gaussian-input',
236 | gjc: 'chemical/x-gaussian-input',
237 | gjf: 'chemical/x-gaussian-input',
238 | gal: 'chemical/x-gaussian-log',
239 | gcg: 'chemical/x-gcg8-sequence',
240 | gen: 'chemical/x-genbank',
241 | hin: 'chemical/x-hin',
242 | istr: 'chemical/x-isostar',
243 | ist: 'chemical/x-isostar',
244 | jdx: 'chemical/x-jcamp-dx',
245 | dx: 'chemical/x-jcamp-dx',
246 | kin: 'chemical/x-kinemage',
247 | mcm: 'chemical/x-macmolecule',
248 | mmd: 'chemical/x-macromodel-input',
249 | mmod: 'chemical/x-macromodel-input',
250 | mol: 'chemical/x-mdl-molfile',
251 | rd: 'chemical/x-mdl-rdfile',
252 | rxn: 'chemical/x-mdl-rxnfile',
253 | sd: 'chemical/x-mdl-sdfile',
254 | sdf: 'chemical/x-mdl-sdfile',
255 | tgf: 'chemical/x-mdl-tgf',
256 | mcif: 'chemical/x-mmcif',
257 | mol2: 'chemical/x-mol2',
258 | b: 'chemical/x-molconn-Z',
259 | gpt: 'chemical/x-mopac-graph',
260 | mop: 'chemical/x-mopac-input',
261 | mopcrt: 'chemical/x-mopac-input',
262 | mpc: 'chemical/x-mopac-input',
263 | zmt: 'chemical/x-mopac-input',
264 | moo: 'chemical/x-mopac-out',
265 | mvb: 'chemical/x-mopac-vib',
266 | asn: 'chemical/x-ncbi-asn1-spec',
267 | prt: 'chemical/x-ncbi-asn1-ascii',
268 | ent: 'chemical/x-pdb',
269 | val: 'chemical/x-ncbi-asn1-binary',
270 | aso: 'chemical/x-ncbi-asn1-binary',
271 | pdb: 'chemical/x-pdb',
272 | ros: 'chemical/x-rosdal',
273 | sw: 'chemical/x-swissprot',
274 | vms: 'chemical/x-vamas-iso14976',
275 | vmd: 'chemical/x-vmd',
276 | xtel: 'chemical/x-xtel',
277 | xyz: 'chemical/x-xyz',
278 | gif: 'image/gif',
279 | ief: 'image/ief',
280 | jpeg: 'image/jpeg',
281 | jpg: 'image/jpeg',
282 | jpe: 'image/jpeg',
283 | pcx: 'image/pcx',
284 | png: 'image/png',
285 | svg: 'image/svg+xml',
286 | tiff: 'image/tiff',
287 | tif: 'image/tiff',
288 | cr2: 'image/x-canon-cr2',
289 | crw: 'image/x-canon-crw',
290 | ras: 'image/x-cmu-raster',
291 | cdr: 'image/x-coreldraw',
292 | pat: 'image/x-coreldrawpattern',
293 | cdt: 'image/x-coreldrawtemplate',
294 | erf: 'image/x-epson-erf',
295 | ico: 'image/x-icon',
296 | art: 'image/x-jg',
297 | jng: 'image/x-jng',
298 | bmp: 'image/x-ms-bmp',
299 | nef: 'image/x-nikon-nef',
300 | orf: 'image/x-olympus-orf',
301 | psd: 'image/x-photoshop',
302 | pnm: 'image/x-portable-anymap',
303 | pbm: 'image/x-portable-bitmap',
304 | pgm: 'image/x-portable-graymap',
305 | ppm: 'image/x-portable-pixmap',
306 | rgb: 'image/x-rgb',
307 | xbm: 'image/x-xbitmap',
308 | xpm: 'image/x-xpixmap',
309 | xwd: 'image/x-xwindowdump',
310 | eml: 'message/rfc822',
311 | igs: 'model/iges',
312 | iges: 'model/iges',
313 | msh: 'model/mesh',
314 | mesh: 'model/mesh',
315 | silo: 'model/mesh',
316 | wrl: 'x-world/x-vrml',
317 | vrml: 'x-world/x-vrml',
318 | manifest: 'text/cache-manifest',
319 | ics: 'text/calendar',
320 | icz: 'text/calendar',
321 | css: 'text/css',
322 | csv: 'text/csv',
323 | 323: 'text/h323',
324 | html: 'text/html',
325 | htm: 'text/html',
326 | shtml: 'text/html',
327 | uls: 'text/iuls',
328 | mml: 'text/mathml',
329 | asc: 'text/plain',
330 | txt: 'text/plain',
331 | text: 'text/plain',
332 | pot: 'text/plain',
333 | brf: 'text/plain',
334 | rtx: 'text/richtext',
335 | sct: 'text/scriptlet',
336 | wsc: 'text/scriptlet',
337 | tm: 'text/texmacs',
338 | ts: 'text/texmacs',
339 | tsv: 'text/tab-separated-values',
340 | bib: 'text/x-bibtex',
341 | boo: 'text/x-boo',
342 | h: 'text/x-chdr',
343 | htc: 'text/x-component',
344 | c: 'text/x-csrc',
345 | d: 'text/x-dsrc',
346 | diff: 'text/x-diff',
347 | patch: 'text/x-diff',
348 | hs: 'text/x-haskell',
349 | java: 'text/x-java',
350 | lhs: 'text/x-literate-haskell',
351 | moc: 'text/x-moc',
352 | p: 'text/x-pascal',
353 | pas: 'text/x-pascal',
354 | gcd: 'text/x-pcs-gcd',
355 | pl: 'text/x-perl',
356 | pm: 'text/x-perl',
357 | py: 'text/x-python',
358 | scala: 'text/x-scala',
359 | etx: 'text/x-setext',
360 | tk: 'text/x-tcl',
361 | tex: 'text/x-tex',
362 | ltx: 'text/x-tex',
363 | sty: 'text/x-tex',
364 | cls: 'text/x-tex',
365 | vcs: 'text/x-vcalendar',
366 | vcf: 'text/x-vcard',
367 | '3gp': 'video/3gpp',
368 | axv: 'video/annodex',
369 | dl: 'video/dl',
370 | dif: 'video/dv',
371 | dv: 'video/dv',
372 | fli: 'video/fli',
373 | gl: 'video/gl',
374 | mpeg: 'video/mpeg',
375 | mpg: 'video/mpeg',
376 | mpe: 'video/mpeg',
377 | mp4: 'video/mp4',
378 | qt: 'video/quicktime',
379 | mov: 'video/quicktime',
380 | ogv: 'video/ogg',
381 | flv: 'video/x-flv',
382 | lsf: 'video/x-la-asf',
383 | lsx: 'video/x-la-asf',
384 | mng: 'video/x-mng',
385 | asf: 'video/x-ms-asf',
386 | asx: 'video/x-ms-asf',
387 | wm: 'video/x-ms-wm',
388 | wmv: 'video/x-ms-wmv',
389 | wmx: 'video/x-ms-wmx',
390 | wvx: 'video/x-ms-wvx',
391 | avi: 'video/x-msvideo',
392 | movie: 'video/x-sgi-movie',
393 | mpv: 'video/x-matroska',
394 | mkv: 'video/x-matroska',
395 | ice: 'x-conference/x-cooltalk',
396 | sisx: 'x-epoc/x-sisx-app',
397 | vrm: 'x-world/x-vrml',
398 | webm: 'video/webm',
399 | gz: 'application/gzip'
400 | };
401 |
--------------------------------------------------------------------------------
/lib/image/imagemagick.js:
--------------------------------------------------------------------------------
1 | /* jshint node:true */
2 | // Use Aaron Heckmann's graphicsmagick interface in its imagemagick-compatible
3 | // configuration so our system requirements don't change and everything still
4 | // works in Heroku. It's a good thing we can do this, since node-imagemagick
5 | // has been abandoned. We also use our own custom command lines for
6 | // drastically better performance and memory usage.
7 | const childProcess = require('child_process');
8 | const _ = require('lodash');
9 | const async = require('async');
10 | const util = require('util');
11 | const execFile = util.promisify(childProcess.execFile);
12 |
13 | module.exports = function() {
14 | let options;
15 | const self = {
16 | /**
17 | * Initialize the module. If _options.gifsicle is true, use gifsicle to manipulate
18 | * animated GIFs
19 | */
20 | init: function(_options, callback) {
21 | options = _options;
22 | return callback(null);
23 | },
24 |
25 | destroy: function(callback) {
26 | // No file descriptors or timeouts held
27 | return callback(null);
28 | },
29 |
30 | /**
31 | * Identify a local image file.
32 | *
33 | * If the file is not an image or is too defective to be identified an error is
34 | * passed to the callback.
35 | *
36 | * Otherwise the second argument to the callback is guaranteed to have extension,
37 | * width, height, orientation, originalWidth and originalHeight properties.
38 | * extension will be gif, jpg or png and is detected from the file's true contents,
39 | * not the original file extension. With the imagemagick backend, width and height
40 | * are automatically rotated to TopLeft orientation while originalWidth and
41 | * originalHeight are not.
42 | *
43 | * If the orientation property is not explicitly set in the file it will be set to
44 | * 'Undefined'.
45 | *
46 | * Any other properties returned are dependent on the version of ImageMagick used
47 | * and are not guaranteed.
48 | *
49 | * @param {String} path Local filesystem path to image file
50 | * @param {Function} callback Receives the usual err argument, followed by an
51 | * object with extension, width, height, orientation, originalWidth,
52 | * originalHeight and animated properties. Any other properties depend on the backend
53 | * in use and are not guaranteed
54 | *
55 | * @see Uploadfs#copyImageIn
56 | */
57 |
58 | async identify(path, callback) {
59 | try {
60 | const info = await getProperties(path);
61 | if (info.extension === 'gif') {
62 | info.animated = await getAnimated(path);
63 | } else {
64 | info.animated = false;
65 | }
66 | return callback(null, info);
67 | } catch (e) {
68 | return callback(e);
69 | }
70 |
71 | async function getProperties() {
72 | // Parse identify output ourselves to avoid using unmaintained third party wrappers. -Tom
73 | const { stdout } = await execFile('identify', [ '-verbose', path ], { encoding: 'utf8' });
74 | const parsed = Object.fromEntries(stdout.split('\n').filter(line => line.trim().includes(': ')).map(line => {
75 | const cat = line.indexOf(':');
76 | return [ line.substring(0, cat).trim(), line.substring(cat + 1).trim() ];
77 | }));
78 | const format = parsed.Format.toLowerCase().split(' ')[0];
79 | const geometry = parsed.Geometry.match(/^(\d+)x(\d+)/);
80 | const info = {
81 | originalWidth: parseInt(geometry[1]),
82 | originalHeight: parseInt(geometry[2]),
83 | orientation: parsed.Orientation
84 | };
85 | if (format === 'jpeg') {
86 | info.extension = 'jpg';
87 | } else {
88 | info.extension = format;
89 | }
90 | const o = info.orientation;
91 | if ((o === 'LeftTop') || (o === 'RightTop') || (o === 'RightBottom') || (o === 'LeftBottom')) {
92 | info.width = info.originalHeight;
93 | info.height = info.originalWidth;
94 | } else {
95 | info.width = info.originalWidth;
96 | info.height = info.originalHeight;
97 | }
98 | return info;
99 | }
100 |
101 | async function getAnimated(path) {
102 | const { stdout } = await execFile('identify', [ '-format', '%n', path ], { encoding: 'utf8' });
103 | const frames = parseInt(stdout, 10);
104 | return frames > 1;
105 | }
106 | },
107 |
108 | /**
109 | * Generate one or more scaled versions of an image file.
110 | *
111 | * INPUT
112 | *
113 | * The options that may be passed in the context object are:
114 | *
115 | * workingPath: path to the original file (required)
116 | *
117 | * extension: true file extension of original file as
118 | * determined by a previous call to identify (required).
119 | *
120 | * info.width, info.height: should be provided as other backends may require
121 | * them, however the imagemagick backend does not need to consult them.
122 | *
123 | * sizes (required): array of objects with width and height
124 | * properties which are treated as maximums for each axis; the resulting image
125 | * will never exceed the original size, and will otherwise be scaled to
126 | * fill as much of the requested size as possible without changing the aspect
127 | * ratio. Files are generated in the temp folder with a filename made up of the
128 | * name property of the size, a '.', and the extension property of the
129 | * context object.
130 | *
131 | * tempFolder: folder where the scaled versions should be written
132 | * (required)
133 | *
134 | * crop: optional object with top, left, width and height properties
135 | *
136 | * scaledJpegQuality: quality setting for JPEGs (optional; otherwise
137 | * you get whatever default was compiled into imagemagick)
138 | *
139 | * copyOriginal: if true, copy the "original" image to the tempFolder too,
140 | * but do auto-orient it so that iPhone photos etc. work on the web
141 | *
142 | * All images, including the "original" if copyOriginal is set, are
143 | * auto-rotated to the orientation expected by web browsers.
144 | *
145 | * OUTPUT
146 | *
147 | * After the operation is complete, the following property of the
148 | * context object will be set if the copyOriginal option was set:
149 | *
150 | * adjustedOriginal: will contain the local filesystem path where the
151 | * original was copied (and rotated, if needed).
152 | *
153 | * @param {[type]} context [description]
154 | * @param {Function} callback [description]
155 | * @return {[type]} [description]
156 | */
157 |
158 | convert: function(context, callback) {
159 | if (context.info.animated) {
160 | if (options.gifsicle) {
161 | return convertAnimatedGifsicle(context, callback);
162 | } else {
163 | return convertAnimated(context, callback);
164 | }
165 | } else {
166 | return convertStandard(context, callback);
167 | }
168 |
169 | // Animated GIF strategy based on gifsicle. gifsicle doesn't hit RAM limits
170 | // when confronted with huge animated GIFs, but it does tend to make files
171 | // bigger and doesn't resize quite as well. Tradeoffs are part of life
172 |
173 | function convertAnimatedGifsicle(context, callback) {
174 | const crop = context.crop;
175 | const imageSizes = context.sizes;
176 | const baseArgs = [];
177 | if (crop) {
178 | baseArgs.push('--crop');
179 | baseArgs.push(crop.left + ',' + crop.top + '+' + crop.width + 'x' + crop.height);
180 | }
181 | baseArgs.push(context.workingPath);
182 | return async.series([ convertOriginal, convertSizes ], callback);
183 | function convertOriginal(callback) {
184 | if (!context.copyOriginal) {
185 | return setImmediate(callback);
186 | }
187 | const path = context.tempFolder + '/original.' + context.extension;
188 | context.adjustedOriginal = path;
189 | const args = baseArgs.slice();
190 | args.push('--optimize');
191 | args.push('-o');
192 | args.push(path);
193 | return spawnThen('gifsicle', args, callback);
194 | }
195 | function convertSizes(callback) {
196 | return async.eachSeries(imageSizes, convertSize, callback);
197 | }
198 | function convertSize(size, callback) {
199 | const args = baseArgs.slice();
200 | args.push('--resize');
201 | // "Largest that fits in the box" is not a built-in feature of gifsicle, so we do the math
202 | const originalWidth = (crop && crop.width) || context.info.width;
203 | const originalHeight = (crop && crop.height) || context.info.height;
204 | let width = originalWidth;
205 | let height = Math.round(size.width * originalHeight / originalWidth);
206 | if (height > originalHeight) {
207 | height = size.height;
208 | width = Math.round(size.height * originalWidth / originalHeight);
209 | }
210 | args.push(width + 'x' + height);
211 | args.push('--optimize');
212 | args.push('-o');
213 | const suffix = size.name + '.' + context.extension;
214 | const tempFile = context.tempFolder + '/' + suffix;
215 | args.push(tempFile);
216 | return spawnThen('gifsicle', args, callback);
217 | }
218 | }
219 |
220 | // Separate animated GIF strategy is back because of tests in which (1) we
221 | // suffered image damage (which could possibly be addressed with -coalesce)
222 | // and (2) imagemagick inexplicably took 4x longer in some cases with the
223 | // single pipeline (which couldn't be addressed without a new approach).
224 | // This is why we don't just rely on -clone 0--1 and a single pipeline. -Tom
225 |
226 | function convertAnimated(context, callback) {
227 | const crop = context.crop;
228 | const imageSizes = context.sizes;
229 | const baseArgs = [];
230 | baseArgs.push(context.workingPath);
231 | // Convert to filmstrip so cropping and resizing
232 | // don't behave strangely
233 | baseArgs.push('-coalesce');
234 | baseArgs.push('-auto-orient');
235 | if (crop) {
236 | baseArgs.push('-crop');
237 | baseArgs.push(crop.width + 'x' + crop.height + '+' + crop.left + '+' + crop.top);
238 | baseArgs.push('+repage');
239 | }
240 | return async.series([ convertOriginal, convertSizes ], callback);
241 | function convertOriginal(callback) {
242 | if (!context.copyOriginal) {
243 | return setImmediate(callback);
244 | }
245 | const path = context.tempFolder + '/original.' + context.extension;
246 | context.adjustedOriginal = path;
247 | const args = baseArgs.slice();
248 | args.push('-layers');
249 | args.push('Optimize');
250 | args.push(path);
251 | return spawnThen('convert', args, callback);
252 | }
253 | function convertSizes(callback) {
254 | return async.eachSeries(imageSizes, convertSize, callback);
255 | }
256 | function convertSize(size, callback) {
257 | const args = baseArgs.slice();
258 | args.push('-resize');
259 | args.push(size.width + 'x' + size.height + '>');
260 | args.push('-layers');
261 | args.push('Optimize');
262 | const suffix = size.name + '.' + context.extension;
263 | const tempFile = context.tempFolder + '/' + suffix;
264 | args.push(tempFile);
265 | return spawnThen('convert', args, callback);
266 | }
267 | }
268 |
269 | function convertStandard(context, callback) {
270 | // For performance we build our own imagemagick command which tackles all the
271 | // sizes in one run, avoiding redundant loads. We also scale to the largest
272 | // size we really want first and use that as a basis for all others, without
273 | // any lossy intermediate files, which is an even bigger win.
274 | //
275 | const args = [];
276 | const crop = context.crop;
277 | const imageSizes = context.sizes;
278 | args.push(context.workingPath);
279 | args.push('-auto-orient');
280 | if (crop) {
281 | args.push('-crop');
282 | args.push(crop.width + 'x' + crop.height + '+' + crop.left + '+' + crop.top);
283 | args.push('+repage');
284 | }
285 | if (context.extension === 'jpg') {
286 | // Always convert to a colorspace all browsers understand.
287 | // CMYK will flat out fail in IE8 for instance
288 | args.push('-colorspace');
289 | args.push('sRGB');
290 | }
291 |
292 | if (context.copyOriginal) {
293 | context.adjustedOriginal = context.tempFolder + '/original.' + context.extension;
294 | args.push('(');
295 | args.push('-clone');
296 | args.push('0--1');
297 | args.push('-write');
298 | args.push(context.adjustedOriginal);
299 | args.push('+delete');
300 | args.push(')');
301 | }
302 |
303 | // Make sure we strip metadata before we get to scaled versions as
304 | // some files have ridiculously huge metadata
305 | args.push('-strip');
306 |
307 | // After testing this with several sets of developer eyeballs, we've
308 | // decided it is kosher to resample to the largest size we're
309 | // interested in keeping, then sample down from there. Since we
310 | // do it all inside imagemagick without creating any intermediate
311 | // lossy files, there is no quality loss, and the speed benefit is
312 | // yet another 2x win! Hooray!
313 | let maxWidth = 0;
314 | let maxHeight = 0;
315 | _.each(imageSizes, function(size) {
316 | if (size.width > maxWidth) {
317 | maxWidth = size.width;
318 | }
319 | if (size.height > maxHeight) {
320 | maxHeight = size.height;
321 | }
322 | });
323 | if (maxWidth && maxHeight) {
324 | args.push('-resize');
325 | args.push(maxWidth + 'x' + maxHeight + '>');
326 | }
327 |
328 | const resizedPaths = [];
329 |
330 | _.each(imageSizes, function(size) {
331 | args.push('(');
332 | args.push('-clone');
333 | args.push('0--1');
334 | args.push('-resize');
335 | args.push(size.width + 'x' + size.height + '>');
336 | if (context.scaledJpegQuality && (context.extension === 'jpg')) {
337 | args.push('-quality');
338 | args.push(context.scaledJpegQuality);
339 | }
340 | args.push('-write');
341 | const suffix = size.name + '.' + context.extension;
342 | const tempFile = context.tempFolder + '/' + suffix;
343 | resizedPaths.push(tempFile);
344 | args.push(tempFile);
345 | args.push('+delete');
346 | args.push(')');
347 | });
348 |
349 | // We don't care about the official output, which would be the
350 | // intermediate scaled version of the image. Use imagemagick's
351 | // official null format
352 |
353 | args.push('null:');
354 |
355 | return spawnThen('convert', args, callback);
356 | }
357 |
358 | function spawnThen(cmd, args, callback) {
359 | // console.log(cmd + ' ' + args.join(' ').replace(/[^\w\-\ ]/g, function(c) {
360 | // return '\\' + c;
361 | // }));
362 | return childProcess.execFile(cmd, args, function(err) {
363 | if (err) {
364 | return callback(err);
365 | }
366 | return callback(null);
367 | });
368 | }
369 | }
370 | };
371 | return self;
372 | };
373 |
--------------------------------------------------------------------------------
/lib/storage/azure.js:
--------------------------------------------------------------------------------
1 | const { BlobServiceClient, StorageSharedKeyCredential } = require('@azure/storage-blob');
2 | const contentTypes = require('./contentTypes');
3 | const extname = require('path').extname;
4 | const fs = require('fs');
5 | const zlib = require('zlib');
6 | const async = require('async');
7 | const utils = require('../utils.js');
8 | const defaultGzipBlacklist = require('../../defaultGzipBlacklist');
9 | const verbose = false;
10 | const _ = require('lodash');
11 |
12 | const DEFAULT_MAX_AGE_IN_SECONDS = 500;
13 | const DEFAULT_MAX_CACHE = 2628000;
14 |
15 | /**
16 | * @typedef {{ svc: BlobServiceClient, container: string }} BlobSvc
17 | *
18 | * @param {BlobSvc} blob
19 | * @param {string} src
20 | * @param {string} dst
21 | * @param {Function} callback
22 | */
23 | function copyBlob(blob, src, dst, callback) {
24 | const srcClient = blob.svc.getContainerClient(blob.container).getBlobClient(src);
25 | const dstClient = blob.svc.getContainerClient(blob.container).getBlobClient(dst);
26 | dstClient.beginCopyFromURL(srcClient.url)
27 | .then((response) => {
28 | if (response.errorCode) {
29 | return callback(response.errorCode);
30 | }
31 | return callback(null, response);
32 | })
33 | .catch(callback);
34 | }
35 |
36 | function __log() {
37 | if (verbose) {
38 | console.error(arguments);
39 | }
40 | }
41 |
42 | /**
43 | * Set the main properties of the selected container.
44 | * @param {BlobSvc['svc']} blobSvc Azure service object
45 | * @param {Object} options Options passed to UploadFS library
46 | * @param {Object} result Service Properties
47 | * @param {Function} callback Callback to be called when operation is terminated
48 | * @return {any} Return the service which has been initialized
49 | */
50 | function setContainerProperties(blobSvc, options, result, callback) {
51 | // Backward compatibility
52 | function propToString(prop) {
53 | if (Array.isArray(prop)) {
54 | return prop.join(',');
55 | }
56 | return prop;
57 | }
58 | blobSvc.getProperties()
59 | .then((response) => {
60 | if (response.errorCode) {
61 | return callback(response.errorCode);
62 | }
63 | const serviceProperties = response;
64 | const allowedOrigins = propToString(options.allowedOrigins) || '*';
65 | const allowedMethods = propToString(options.allowedMethods) || 'GET,PUT,POST';
66 | const allowedHeaders = propToString(options.allowedHeaders) || '*';
67 | const exposedHeaders = propToString(options.exposedHeaders) || '*';
68 | const maxAgeInSeconds = options.maxAgeInSeconds || DEFAULT_MAX_AGE_IN_SECONDS;
69 |
70 | serviceProperties.cors = [
71 | {
72 | allowedOrigins,
73 | allowedMethods,
74 | allowedHeaders,
75 | exposedHeaders,
76 | maxAgeInSeconds
77 | }
78 | ];
79 |
80 | blobSvc.setProperties(serviceProperties)
81 | .then((response) => {
82 | if (response.errorCode) {
83 | return callback(response.errorCode);
84 | }
85 | return callback(null, blobSvc);
86 | })
87 | .catch(callback);
88 | })
89 | .catch(callback);
90 | }
91 |
92 | /**
93 | * Initialize the container ACLs
94 | * @param {BlobSvc['svc']} blobSvc Azure Service object
95 | * @param {String} container Container name
96 | * @param {Object} options Options passed to UploadFS library
97 | * @param {Function} callback Callback to be called when operation is terminated
98 | * @return {any} Returns the result of `setContainerProperties`
99 | */
100 | function initializeContainer(blobSvc, container, options, callback) {
101 | blobSvc.getContainerClient(container)
102 | .setAccessPolicy('blob')
103 | .then((response) => {
104 | if (response.errorCode) {
105 | return callback(response.errorCode);
106 | }
107 | return setContainerProperties(blobSvc, options, response, callback);
108 | })
109 | .catch(callback);
110 | }
111 |
112 | /**
113 | * Create an Azure Container
114 | * @param {Object} cluster Azure Cluster Info
115 | * @param {Object} options Options passed to UploadFS library
116 | * @param {Function} callback Callback to be called when operation is terminated
117 | * @return {any} Returns the initialized service
118 | */
119 | function createContainer(cluster, options, callback) {
120 | let blobSvc;
121 | if (cluster.sas) {
122 | // https://github.com/Azure/azure-sdk-for-js/tree/main/sdk/storage/storage-blob#with-sas-token
123 | blobSvc = new BlobServiceClient(
124 | `https://${cluster.account}.blob.core.windows.net?${cluster.key}`
125 | );
126 | } else {
127 | // https://github.com/Azure/azure-sdk-for-js/tree/main/sdk/storage/storage-blob#with-storagesharedkeycredential
128 | const sharedKeyCredential = new StorageSharedKeyCredential(
129 | cluster.account,
130 | cluster.key
131 | );
132 | blobSvc = new BlobServiceClient(
133 | `https://${cluster.account}.blob.core.windows.net`,
134 | sharedKeyCredential
135 | );
136 | }
137 | const container = cluster.container || options.container;
138 | blobSvc.uploadfsInfo = {
139 | account: cluster.account,
140 | container: options.container || cluster.container
141 | };
142 | blobSvc.getContainerClient(container)
143 | .createIfNotExists()
144 | .then((response) => {
145 | if (response.errorCode && response.errorCode !== 'ContainerAlreadyExists') {
146 | return callback(response.errorCode);
147 | }
148 | return initializeContainer(blobSvc, container, options, callback);
149 | })
150 | .catch(callback);
151 | }
152 |
153 | /**
154 | * Deletes a local file from its path
155 | * @param {String} path File path
156 | * @param {Function} callback Callback to be called when operation is terminated
157 | * @return Always null
158 | */
159 | function removeLocalBlob(path, callback) {
160 | fs.unlink(path, function(error) {
161 | return callback(error);
162 | });
163 | }
164 |
165 | /**
166 | * Send a binary file to a specified container and a specified service
167 | * @param {BlobSvc} blob Azure Service info and container
168 | * @param {String} path Remote path
169 | * @param {String} localPath Local file path
170 | * @param {Function} callback Callback to be called when operation is terminated
171 | * @return {any} Result of the callback
172 | */
173 | function createContainerBlob(blob, path, localPath, _gzip, callback) {
174 | // Draw the extension from uploadfs, where we know they will be using
175 | // reasonable extensions, not from what could be a temporary file
176 | // that came from the gzip code. -Tom
177 | const extension = extname(path).substring(1);
178 | const contentSettings = {
179 | cacheControl: `max-age=${DEFAULT_MAX_CACHE}, public`,
180 | // contentEncoding: _gzip ? 'gzip' : 'deflate',
181 | contentType: contentTypes[extension] || 'application/octet-stream'
182 | };
183 | if (_gzip) {
184 | contentSettings.contentEncoding = 'gzip';
185 | }
186 | blob.svc.getContainerClient(blob.container)
187 | .getBlobClient(path)
188 | .getBlockBlobClient()
189 | .uploadFile(localPath, {
190 | blobHTTPHeaders: {
191 | blobCacheControl: contentSettings.cacheControl,
192 | blobContentType: contentSettings.contentType,
193 | blobContentEncoding: contentSettings.contentEncoding
194 | }
195 | })
196 | .then((response) => {
197 | if (response.errorCode) {
198 | return callback(response.errorCode);
199 | }
200 | return callback(null);
201 | })
202 | .catch(callback);
203 | }
204 |
205 | /**
206 | * Remove remote container binary file
207 | * @param {BlobSvc} blob Azure Service info and container
208 | * @param {String} path Remote file path
209 | * @param {Function} callback Callback to be called when operation is terminated
210 | * @return {any} Result of the callback
211 | */
212 | function removeContainerBlob(blob, path, callback) {
213 | blob.svc.getContainerClient(blob.container)
214 | .getBlobClient(path)
215 | .deleteIfExists()
216 | .then((response) => {
217 | if (response.errorCode && response.errorCode !== 'BlobNotFound') {
218 | __log('Cannot delete ' + path + 'on container ' + blob.container + ': ' + response.errorCode);
219 | return callback(response.errorCode);
220 | }
221 | return callback(null);
222 | })
223 | .catch(callback);
224 | }
225 |
226 | // If err is truthy, annotate it with the account and container name
227 | // for the cluster or blobSvc passed, so that error messages can be
228 | // used to effectively debug the right cluster in a replication scenario.
229 | // 'all' can also be passed to indicate all replicas were tried.
230 |
231 | function clusterError(cluster, err) {
232 | // Accept a blobSvc (which acts for a cluster) or a cluster config object,
233 | // for convenience
234 | cluster = (cluster.svc && cluster.svc.uploadfsInfo) || cluster;
235 | if (!err) {
236 | // Pass through if there is no error, makes this easier to use succinctly
237 | return err;
238 | }
239 | // Allow clusters to be distinguished in error messages. Also report
240 | // the case where everything was tried (copyOut)
241 | if (cluster === 'all') {
242 | err.account = 'ALL';
243 | err.container = 'ALL';
244 | } else {
245 | err.account = cluster.account;
246 | err.container = cluster.container;
247 | }
248 | return err;
249 | }
250 |
251 | module.exports = function() {
252 |
253 | const self = {
254 | blobSvcs: [],
255 | init: function(options, callback) {
256 | if (!options.disabledFileKey) {
257 | return callback(new Error('You must set the disabledFileKey option to a random string when using the azure storage backend.'));
258 | }
259 | this.options = options;
260 | self.gzipBlacklist = self.getGzipBlacklist(options.gzipEncoding || {});
261 |
262 | if (!options.replicateClusters ||
263 | (!Array.isArray(options.replicateClusters)) || (!options.replicateClusters[0])
264 | ) {
265 | options.replicateClusters = [];
266 | options.replicateClusters.push({
267 | account: options.account,
268 | key: options.key,
269 | container: options.container
270 | });
271 | }
272 | async.each(options.replicateClusters, function(cluster, callback) {
273 | createContainer(cluster, options, function(err, svc) {
274 | if (err) {
275 | return callback(clusterError(cluster, err));
276 | }
277 |
278 | self.blobSvcs.push({
279 | svc,
280 | container: cluster.container || options.container
281 | });
282 |
283 | return callback();
284 | });
285 | }, callback);
286 | },
287 |
288 | // Implementation detail. Used when stream-based copies fail.
289 | //
290 | // Cleans up the streams and temporary files (which can be null),
291 | // then delivers err to the callback unless something goes wrong in the cleanup itself
292 | // in which case that error is delivered.
293 |
294 | cleanupStreams: function (
295 | inputStream, outputStream, tempPath, tempPath2, err, callback
296 | ) {
297 | async.parallel({
298 | unlink: function(callback) {
299 | if (!tempPath) {
300 | return callback(null);
301 | }
302 | removeLocalBlob(tempPath, callback);
303 | },
304 |
305 | unlink2: function(callback) {
306 | if (!tempPath2) {
307 | return callback(null);
308 | }
309 | removeLocalBlob(tempPath2, callback);
310 | },
311 |
312 | closeReadStream: function(callback) {
313 | inputStream.destroy();
314 | callback();
315 | },
316 |
317 | closeWriteStream: function(callback) {
318 | outputStream.destroy();
319 | callback();
320 | }
321 | }, cleanupError => {
322 | if (err) {
323 | return callback(err);
324 | }
325 | return callback(cleanupError);
326 | });
327 | },
328 |
329 | copyIn: function(localPath, _path, options, callback) {
330 | if (!self.blobSvcs.length) {
331 | return callback(new Error('At least one valid container must be included in the replicateCluster configuration.'));
332 | }
333 | const fileExt = localPath.split('.').pop();
334 | const path = _path[0] === '/' ? _path.slice(1) : _path;
335 | const tmpFileName = Math.random().toString(36).substring(7);
336 | let tempPath = this.options.tempPath + '/' + tmpFileName;
337 | // options optional
338 | if (!callback) {
339 | callback = options;
340 | }
341 |
342 | if (self.shouldGzip(fileExt)) {
343 | return self.doGzip(localPath, path, tempPath, callback);
344 | } else {
345 | tempPath = localPath; // we don't have a temp path for non-gzipped files
346 | return self.createContainerBlobs(localPath, path, tempPath, false, callback);
347 | }
348 | },
349 |
350 | createContainerBlobs: function(localPath, path, tempPath, _gzip, callback) {
351 | async.each(self.blobSvcs, function(blobSvc, callback) {
352 | createContainerBlob(blobSvc, path, tempPath, _gzip, function(createBlobErr) {
353 | return callback(clusterError(blobSvc, createBlobErr));
354 | });
355 | }, function(err) {
356 | return callback(err);
357 | });
358 | },
359 |
360 | doGzip: function(localPath, path, tempPath, callback) {
361 | const inp = fs.createReadStream(localPath);
362 | const out = fs.createWriteStream(tempPath);
363 | let hasError = false;
364 |
365 | inp.on('error', function(inpErr) {
366 | __log('Error in read stream', inpErr);
367 | if (!hasError) {
368 | hasError = true;
369 | return self.cleanupStreams(inp, out, tempPath, null, inpErr, callback);
370 | }
371 | });
372 |
373 | out.on('error', function(outErr) {
374 | if (!hasError) {
375 | hasError = true;
376 | return self.cleanupStreams(inp, out, tempPath, null, outErr, callback);
377 | }
378 | });
379 |
380 | out.on('finish', function() {
381 | self.createContainerBlobs(localPath, path, tempPath, true, callback);
382 | });
383 | const gzip = zlib.createGzip();
384 | inp.pipe(gzip).pipe(out);
385 | },
386 |
387 | shouldGzip: function(ext) {
388 | return !self.gzipBlacklist.includes(ext);
389 | },
390 |
391 | // Tries all replicas before giving up
392 | copyOut: function(path, localPath, options, callback) {
393 | if (!self.blobSvcs.length) {
394 | return callback(new Error('At least one valid container must be included in the replicateCluster configuration.'));
395 | }
396 | let index = 0;
397 | return attempt();
398 |
399 | function attempt(lastErr) {
400 | if (index >= self.blobSvcs.length) {
401 | return callback(clusterError('all', lastErr));
402 | }
403 | /** @type {BlobSvc} */
404 | const blob = self.blobSvcs[index++];
405 | path = path[0] === '/' ? path.slice(1) : path;
406 | // Temporary name until we know if it is gzipped.
407 | const initialPath = localPath + '.initial';
408 |
409 | return blob.svc.getContainerClient(blob.container)
410 | .getBlobClient(path)
411 | .downloadToFile(initialPath)
412 | .then((response) => {
413 | if (response.errorCode) {
414 | return attempt(response.errorCode);
415 | }
416 | // BC
417 | const returnVal = {
418 | result: response,
419 | response
420 | };
421 | if (response.contentEncoding === 'gzip') {
422 | // Now we know we need to unzip it.
423 | return gunzipBlob();
424 | } else {
425 | // Simple rename, because it was not gzipped after all.
426 | fs.renameSync(initialPath, localPath);
427 | return callback(null, response);
428 | }
429 |
430 | function gunzipBlob() {
431 | const out = fs.createWriteStream(localPath);
432 | const inp = fs.createReadStream(initialPath);
433 | const gunzip = zlib.createGunzip();
434 | let errorSeen = false;
435 | inp.pipe(gunzip);
436 | gunzip.pipe(out);
437 | inp.on('error', function(e) {
438 | fail(e);
439 | });
440 | gunzip.on('error', function(e) {
441 | fail(e);
442 | });
443 | out.on('error', function(e) {
444 | fail(e);
445 | });
446 | out.on('finish', function() {
447 | fs.unlinkSync(initialPath);
448 | return callback(null, returnVal);
449 | });
450 | function fail(e) {
451 | if (errorSeen) {
452 | return;
453 | }
454 | errorSeen = true;
455 | return self.cleanupStreams(inp, out, initialPath, localPath, e, callback);
456 | }
457 | }
458 | })
459 | .catch(attempt);
460 | }
461 | },
462 |
463 | remove: function(path, callback) {
464 | if (!self.blobSvcs.length) {
465 | return callback(new Error('At least one valid container must be included in the replicateCluster configuration.'));
466 | }
467 | path = path[0] === '/' ? path.slice(1) : path;
468 |
469 | async.each(self.blobSvcs, function(blobSvc, callback) {
470 | removeContainerBlob(blobSvc, path, callback);
471 | }, callback);
472 | },
473 |
474 | disable: function(path, callback) {
475 | if (!self.blobSvcs.length) {
476 | return callback(new Error('At least one valid container must be included in the replicateCluster configuration.'));
477 | }
478 | const dPath = utils.getDisabledPath(path, self.options.disabledFileKey);
479 | async.each(self.blobSvcs, function(blob, callback) {
480 | copyBlob(blob, path, dPath, function(e) {
481 | // if copy fails, abort
482 | if (e) {
483 | return callback(clusterError(blob, e));
484 | } else {
485 | // otherwise, remove original file (azure does not currently
486 | // support rename operations, so we dance)
487 | self.remove(path, callback);
488 | }
489 | });
490 | }, function(err) {
491 | callback(err);
492 | });
493 | },
494 |
495 | enable: function(path, callback) {
496 | if (!self.blobSvcs.length) {
497 | return callback(new Error('At least one valid container must be included in the replicateCluster configuration.'));
498 | }
499 | const dPath = utils.getDisabledPath(path, self.options.disabledFileKey);
500 | async.each(self.blobSvcs, function(blob, callback) {
501 | copyBlob(blob, dPath, path, function(e) {
502 | if (e) {
503 | return callback(clusterError(blob, e));
504 | } else {
505 | self.remove(dPath, callback);
506 | }
507 | });
508 | }, function(err) {
509 | callback(err);
510 | });
511 | },
512 |
513 | getUrl: function (path) {
514 | /** @type {BlobSvc} */
515 | const blob = self.blobSvcs[0];
516 | const baseUrl = blob.svc.getContainerClient(blob.container)
517 | .getBlobClient('')
518 | .url
519 | .replace(/\/$/, '');
520 | return utils.addPathToUrl(self.options, baseUrl, path);
521 | },
522 |
523 | destroy: function(callback) {
524 | // No file descriptors or timeouts held
525 | return callback(null);
526 | },
527 |
528 | /**
529 | * Use sane defaults and user config to get array of file extensions to avoid gzipping
530 | * @param gzipEncoding {Object} ex: {jpg: true, rando: false}
531 | * @retyrb {Array} An array of file extensions to ignore
532 | */
533 | getGzipBlacklist: function(gzipEncoding) {
534 | const gzipSettings = gzipEncoding || {};
535 | const { whitelist, blacklist } = Object.keys(gzipSettings).reduce((prev, key) => {
536 | if (gzipSettings[key]) {
537 | prev.whitelist.push(key);
538 | } else {
539 | prev.blacklist.push(key);
540 | }
541 | return prev;
542 | }, {
543 | whitelist: [],
544 | blacklist: []
545 | });
546 |
547 | // @NOTE - we REMOVE whitelisted types from the blacklist array
548 | const gzipBlacklist = defaultGzipBlacklist
549 | .concat(blacklist)
550 | .filter(el => whitelist.indexOf(el));
551 |
552 | return _.uniq(gzipBlacklist);
553 | }
554 | };
555 |
556 | return self;
557 | };
558 |
--------------------------------------------------------------------------------
/CHANGELOG.md:
--------------------------------------------------------------------------------
1 | # Changelog
2 |
3 | ## 1.26.0 (2025-10-30)
4 |
5 | * Replace aws-sdk with @aws-sdk/client-s3.
6 |
7 | ## 1.25.2 (2025-10-01)
8 |
9 | * Fix typo in documentation (`disableFileKey` > `disabledFileKey`).
10 |
11 | ## 1.25.1 (2025-08-28)
12 |
13 | * Bump `@google-cloud/storage` to 7.x to address a deprecation warning.
14 | * `npx mocha test/gcs.js` exits without hanging (there was no bug in the actual functionality, just the test).
15 |
16 | ## 1.25.0 (2025-08-06)
17 |
18 | * Adds SAS token support for the Azure storage backend.
19 |
20 | ## 1.24.3 (2025-03-25)
21 |
22 | * Fix missing variable which led to confusing error messages if the configured image backend is unavailable and prevented automatic fallback from `sharp` to `imagemagick`.
23 |
24 | ## 1.24.2 (2024-12-09)
25 |
26 | * Corrected npm audit warning by eliminating a dependency on `gm` which is not actively maintained.
27 |
28 | ## 1.24.1 (2024-10-15)
29 |
30 | * Bug fix: error handling for `streamOut`. If an HTTP error status code is encountered, the stream will emit an error, and the error object will have a `statusCode` property, allowing downstream code to handle this situation appropriately.
31 |
32 | ## 1.24.0 (2024-10-15)
33 |
34 | * Bug fix: `bucketObjectsACL` is respected by the `enable` method, that method no longer makes files `public` again. Previously it was only respected at `copyIn` / `copyImageIn` time.
35 | * New feature: `disabledBucketObjectsACL` is now also supported, it is used by the `disable` method rather than
36 | assuming `private` (still the default).
37 |
38 | ## 1.23.0 (2024-10-14)
39 |
40 | * Introduced `streamOut` API for `local` and `s3` storage backends.
41 |
42 | ## 1.22.7 (2024-09-24)
43 |
44 | * `.mp3` does not benefit from gzip encoding and the transfer encoding header fails to be sent, so do not use it.
45 |
46 | ## 1.22.6 (2024-09-03)
47 |
48 | * `.gz` files now receive the correct content type in S3.
49 | * `.gz` files are now exempt from gzip transfer encoding because they are already gzipped.
50 | * `s3.js` tests now use environment variables rather than
51 | a git-excluded local file.
52 |
53 | ## 1.22.5 (2024-07-10)
54 |
55 | * Document options for avoiding a public S3 bucket.
56 |
57 | ## 1.22.4 2024-06-11
58 |
59 | * Use latest `rimraf` package, silencing a deprecation warning.
60 |
61 | ## 1.22.3 2023-10-16
62 |
63 | * Security: update `sharp` to fix a [potential security risk](https://security.snyk.io/vuln/SNYK-JS-SHARP-5922108). You should update your project's
64 | dependencies manually or with `npm update` to ensure you get this fix.
65 |
66 | ## 1.22.2 2023-08-03
67 |
68 | * Bump to next major version of google cloud storage API to please `npm audit`. There was no actual security vulnerability due to the way the module in question was actually used.
69 | * Update our eslint configuration.
70 | * Modernize the source from `var` to `const` and `let` in all cases to satisfy eslint and help prevent future bugs. This does not change the behavior of the code.
71 |
72 | ## 1.22.1 2023-05-03
73 |
74 | * Corrected behavior of `getUrl` method for Azure storage, for Apostrophe compatibility. This regression was introduced an hour ago in 1.22.0.
75 |
76 | ## 1.22.0 2023-05-03
77 |
78 | * Remove `azure-storage` in favor of the actively supported `@azure/storage-blob`, refactor. No public API changes.
79 | * Remove `request` package and all related dependencies in favor of the actively supported `node-fetch@2`, refactor tests.
80 | * Update outdated dev dependencies.
81 |
82 | ## 1.21.0 2023-02-11
83 |
84 | * Adds tests for `webp` files, updates the package scripts to include "webp" to run the tests, and a webp test image (Note: one test commented out because `sharp` currently fails to reorient webp files). Thanks to [Isaac Preston](https://github.com/ixc7) for this contribution.
85 | * `https` is now the default protocol for S3. As it is always supported and there are no uploadfs+S3 use cases where `http` is preferred this is not considered a bc break.
86 |
87 | ## 1.20.1 2022-12-13
88 |
89 | * Add `webm` to the list of file formats with a known content type and add it to the list of types that should not be gzip encoded as it is precompressed and Chrome appears to behave poorly if it is gzip encoded
90 |
91 | ## 1.20.0 2022-08-18
92 |
93 | * Default image processing library changed to [sharp.js](https://www.npmjs.com/package/sharp) for excellent performance
94 | * Support for jimp and imagecrunch removed (added fallback to sharp for bc)
95 | * imagemagick is now the fallback if sharp installation fails on a particular platform
96 | * tests for sharp have been added and the package scripts updated to add "test-sharp"
97 |
98 | ## 1.19.0 2022-01-21
99 |
100 | * New options `noGzipContentTypes` and `addNoGzipContentTypes` to configure content types which should not be gzipped when using the `s3` storage backend. Thanks to Christian Litzlbauer.
101 |
102 | ## 1.18.5 2021-12-07
103 |
104 | ### Fixed
105 |
106 | * Local storage is fully compatible with Node 16 and later, as well as earlier releases previously supported.
107 | * Removed a stray folder.
108 |
109 | ## 1.18.4 - 2021-10-08
110 |
111 | ### Fixed
112 |
113 | * Updates jimp to resolve npm audit warning.
114 |
115 | ## 1.18.3 - 2021-08-13
116 |
117 | ### Fixed
118 |
119 | * Set Azure containers public access level to `blob` instead of `container` to ensure anonymous users cannot list the content.
120 |
121 | ## 1.18.2
122 |
123 | * Addressed `npm audit` complaints about `mkdirp` by using a simple `mkdirp` implementation that has no legacy compatibility issues.
124 | * Addressed `npm audit` complaints about `mocha` and friends by upgrading `mocha`.
125 | * There are currently `npm audit` warnings about `azure-storage`, however a fix for this is forthcoming according to the upstream maintainers, and the existing semver ranges in this package will pick it up on `npm audit` when released.
126 |
127 | ## 1.18.1
128 |
129 | * Bug fix: the `sizes` option to `copyImageIn` now works even if `imageSizes` was not passed at all when calling `init`.
130 |
131 | ## 1.18.0
132 |
133 | * Support for a `sizes` option when calling `copyImageIn`, removing the requirement that all uploads are scaled to the same set of sizes. If the option is not provided the globally configured sizes are used.
134 |
135 | ## 1.17.2
136 |
137 | * Documented the `endpoint` option. Thanks to Joe Innes for this contribution.
138 |
139 | ## 1.17.1
140 |
141 | * Updates ESLint configuration and fixes errors.
142 |
143 | ## 1.17.0
144 |
145 | * Updated the `@google-cloud/storage` module to the 5.x series to address a possible security vulnerability reported by `npm audit`. Version 5.x does not support node 8, which is itself not supported, so you should not be running it anymore.
146 | * However, we also made the libraries for all three cloud storage backends (GCS, S3, and Azure) `optionalDependencies`. If they fail to install for any reason, uploadfs will still work, as long as you do not try to use that specific backend.
147 | * A longstanding bug in GCS storage that broke its use with ApostropheCMS has been fixed. Leading slashes in paths are no longer stored in a way that produces double slashes in URLs and breaks Apostrophe's URL-building. As far as we're concerned, this was a bug, since it broke the unit tests.
148 | * However, for the benefit of anyone who preferred this behavior for non-Apostrophe applications, the new `strictPaths: true` option may be passed when configuring uploadfs to get the old behavior in which leading slashes are not finessed and the URL will actually contain a double slash.
149 |
150 | ## 1.16.0
151 |
152 | * Added bucketObjectsACL option to s3.js to allow override of default 'public-read' permission when using a restricted S3 bucket to store assets. Thanks to Shaun Hurley for the contribution.
153 |
154 | ## 1.15.1
155 |
156 | * Using the latest version of jimp, which resolves an `npm audit` issue. JPEG EXIF rotation autocorrection is now standard in jimp so we don't explicitly invoke it anymore but should get the same good results with smartphone photos etc.
157 |
158 | ## 1.15.0
159 |
160 | * gzip content encoding for S3. When using `copyIn` to copy a file of a suitable type into S3, it will be gzipped and the appropriate content encoding will be set so that browsers automatically do the right thing when they download it. Similarly, the `copyOut` implementation for S3 now transparently supports downloading the original, uncompressed content from S3. The standard web image formats and zipfiles are not double-compressed because the benefit is minimal, so the CPU impact on phones is not justified in this case.
161 |
162 | ## 1.14.1
163 |
164 | * Depend on GCS 4.x to address npm audit warning. There appear to be no relevant breaking API GCS.
165 |
166 | ## 1.14.0
167 |
168 | * Failover: azure copyOut now attempts to copy from every available replica, for durability
169 | * azure errors now report the account and container concerned so you can identify the faulty replica; if all were tried (copyOut), ALL is reported. This is done via `account` and `container` properties on the error object
170 | * eslint fixes, including undefined variable fixes
171 |
172 | ## 1.13.0
173 |
174 | * Now compatible with S3-like backends that build the bucket URL as a path rather than a subdomain. To enable this behavior, set the `s3ForcePathStyle` option to `true`. Thanks to Funkhaus Creative for this contribution.
175 |
176 | ## 1.12.0
177 |
178 | * Google Cloud Storage (GCS) support. Thanks to Nick Bauman for this contribution.
179 |
180 | ## 1.11.1
181 |
182 | * Azure storage backend: `mp4` has been added to the list of formats that are excluded from gzip transfer encoding by default. This is because it does not stream properly in Chrome and saves very little space
183 |
184 | ## 1.11.0
185 |
186 | * The new `prefix` option, if present, is prepended to all `uploadfs` paths before they reach the storage layer. This makes it easy for several sites to share, for instance, the same S3 bucket without confusion. The `getUrl()` method also reflects the prefix, unless the `cdn` option is in play, as cdn URLs might not include a prefix. Always set the `url` subproperty of `cdn` with the prefix you need, if any.
187 |
188 | ## 1.10.2
189 |
190 | We fixed some significant issues impacting users of the `azure` storage backend. If you use that backend you should upgrade:
191 |
192 | * Get extensions from uploadfs path so gzipped files are not all application/octet stream
193 | * Pass the content-encoding header properly. Please note that files already uploaded to `azure` with uploadfs are gzipped but do *not* have the correct header and so your webserver may not recognize them correctly, especially if used for CSS files and other text formats. You can resolve this by uploading them again.
194 | * `copyOut` now correctly reverses `copyIn` completely, including gunzipping the file if necessary. Without this change cropping, etc. did not work.
195 | * Default test path covers these issues correctly.
196 |
197 | ## 1.10.1
198 |
199 | * If `replicateClusters` exists but is an empty array, the credential options are used instead. This was not a bug fix, exactly, but it is a nice "do what I mean" feature.
200 | * A single `gzip` object was being reused, leading to failures on subsequent writes to Azure. Fixed.
201 | * The Azure backend contained a global array, thus limiting you to a single instance of `uploadfs` in your project. Fixed.
202 |
203 | ## 1.10.0
204 |
205 | `imagemin` is no longer a dependency. Instead the new `postprocessors` option allows you to optionally pass it in. `imagemin` and its plugins have complicated dependencies that don't build smoothly on all systems, and it makes sense to leave the specifics of this step up to the users who want it.
206 |
207 | Since setting the `imagemin: true` option doesn't hurt anything in 1.10.0 (you still get your images, just not squeezed quite as small), this is not a bc break.
208 |
209 | Deemphasized `imagecrunch`. People don't serve public sites on Macs anyway and homebrew can install `imagemagick` easily.
210 |
211 | ## 1.9.2
212 |
213 | `mocha` and `lodash` upgraded to satisfy `npm audit`.
214 |
215 | ## 1.9.1
216 |
217 | * All `imagemin-` plugin modules are now `optionalDependencies` and uploadfs can print a warning at startup and continue without any one of them. In addition, if `imagemin` fails, this situation is tolerated with a warning printed and the images are still transformed as they would be without `imagemin`. This is necessary because [`imagemin-pngquant` fails on CentOS 7 without sysadmin intervention to install additional system packages outside of npm](https://github.com/imagemin/pngquant-bin/issues/77), and `cjpeg` fails to run without extra libraries even though it does `npm install`, etc.
218 |
219 | ## 1.9.0
220 |
221 | * Azure support.
222 | * Added `migrateToDisabledFileKey` and `migrateFromDisabledFileKey` methods for use when switching to the option of renaming files in a cryptographically secure way rather than changing their permissions. These files change the approach for all existing disabled files.
223 |
224 | ## 1.8.0
225 |
226 | * Added the optional `destroy` method, which allows for graceful release of resources such as file descriptors or timeouts that may belong to backends.
227 |
228 | ## 1.7.2
229 |
230 | * Added mime type for `svg` as standard equipment.
231 | * User-configured mime types now merge with the standard set, making it easy to add a few without starting from scratch.
232 |
233 | Thanks to tortilaman.
234 |
235 | ## 1.7.1
236 |
237 | The `s3` storage backend now respects the `endpoint` option properly when asked to provide URLs. Thanks to tortilaman.
238 |
239 | ## 1.7.0
240 |
241 | Introduced the `disabledFileKey` option, a feature of the local storage backend which substitutes filename obfuscation for file permissions when using `enable` and `disable`. This is useful when you wish to use `rsync` and other tools outside of uploadfs without the aggravation of permissions issues, but preserve the ability to effectively disable web access, as long as the webserver does not offer index listings for folders.
242 |
243 | Documented the need to set `https: true` when working with S3 if your site uses `https`.
244 |
245 | ## 1.6.2
246 |
247 | Node 8.x added an official `stream.destroy` method with different semantics from the old unofficial one. This led to a callback being invoked twice in the event of an error when calling the internal `copyFile` mechanism. A unit test was added, the issue was fixed, and the fix was verified in all supported LTS versions of Node.js.
248 |
249 | ## 1.6.1
250 |
251 | 1.6.0 introduced a bug that broke `enable` and `disable` in some cases. This became apparent when Apostrophe began to invoke these methods. Fixed.
252 |
253 | ## 1.6.0
254 |
255 | `enablePermissions` and `disablePermissions` options, for the `local` storage backend. By default `disable` sets permissions to `0000`. If you prefer to block group access but retain user access, you might set this to `0400`. Note that the use of octal constants in JavaScript is disabled, so it is better to write `parseInt('0400', 8)`.
256 |
257 | ## 1.5.1
258 |
259 | * The s3 storage backend now honors the `cachingTime` option properly again. Thanks to Matt Crider.
260 |
261 | ## 1.5.0
262 |
263 | * The s3 storage backend now uses the official AWS SDK for JavaScript. The knox module is no longer maintained and is missing basic request signature support that is mandatory for newer AWS regions. It is no longer a serious option.
264 |
265 | Every effort has been made to deliver 100% backwards compatibility with the documented options of knox, and the full test suite is passing with the new AWS SDK.
266 |
267 | ## 1.4.0
268 |
269 | * The new pure-JavaScript `jimp` image backend works "out of the box" even when ImageMagick is not installed. For faster operation and GIF support, you should still install ImageMagick. Thanks to Dave Ramirez for contributing this feature.
270 |
271 | ## 1.3.6
272 |
273 | * Octal constants are forbidden in ES6 strict, use `parseInt(x, 8)`. No other changes.
274 |
275 | ## 1.3.5
276 |
277 | * All tests passing.
278 | * Rewrote automatic directory cleanup mechanism of local storage to cope correctly with more complex directory structures.
279 |
280 | ## 1.3.4
281 |
282 | * Bumped dependencies to newer, better maintained versions. All tests passing.
283 | * Removed accidental dependency on `global-tunnel-ng` and commented out a one-time test in `test.js`.
284 |
285 | ## 1.3.3
286 |
287 | * Dependency on `request` is no longer locked down to a minor version, which was unnecessary and caused peer dependency failures in some projects (an npm design flaw IMHO, but never mind)
288 |
289 | ## 1.3.2
290 |
291 | * Updated dependency on `rimraf` module to eliminate deprecation warning for `graceful-fs`
292 |
293 | ## 1.3.1
294 |
295 | * Whoops, refer to original width and height properly for gifsicle
296 |
297 | ## 1.3.0
298 |
299 | * The `imagemagick` image conversion backend now optionally uses `gifsicle` to convert animated GIFs. Turn on this behavior with the `gifsicle: true` option. There are tradeoffs: `gifsicle` is much faster and uses much less RAM, but seems to produce slightly lower quality results. On a very large animation though, you're almost certain to run out of RAM with `imagemagick`. Of course you must install `gifsicle` to take advantage of this.
300 |
301 | ## 1.2.2
302 |
303 | * The very short-lived version 1.2.1 did not retain the originals of GIFs (when desired). This has been fixed.
304 |
305 | ## 1.2.1
306 |
307 | * Animated GIF conversion strategy has been customized once again. We found cases in which the combined pipeline was 4x slower (!) and also needed to add in `-coalesce` to prevent bad frames in some cases.
308 |
309 | ## 1.2.0
310 |
311 | * Added the `cachingTime` and `cdn` options. Thanks to Vispercept.
312 |
313 | * Fixed a bug where the local storage backend could invoke its callbacks twice, with both failure and success, when an error occurs reading from a local file in newer verisons of node (this bug did not appear in 0.10.x). The fix is backwards compatible.
314 |
315 | ## 1.1.10
316 |
317 | Error message when imagemagick is not installed is a little more informative about what you must do.
318 |
319 | ## 1.1.9
320 |
321 | Use latest knox. No functionality changes.
322 |
323 | ## 1.1.7-1.1.8
324 |
325 | Supports multiple instances when using the default storage and image backends. Previously those backends only supported one instance. This was corrected without changing the public API for custom backends, which have always supported multiple instances.
326 |
327 | ## 1.1.5-1.1.6
328 |
329 | GIF animations have been merged back into the main pipeline thanks to `-clone 0--1` which preserves all frames of the animation. It's a little faster, and it's also less code to maintain.
330 |
331 | ## 1.1.4
332 |
333 | GIF animations are preserved in the imagemagick backend, with full support for resizing and cropping. A separate, slower pipeline is used due to limitations of the `+clone` mechanism in imagemagick. The API has not changed.
334 |
335 | ## 1.1.3
336 |
337 | The imagecrunch backend now sets `adjustedOriginal` correctly when it does a simple copy of the original of a PNG or JPEG.
338 |
339 | ## 1.1.0
340 |
341 | The new `disable` and `enable` methods turn web access to the specified path off and on again, respectively. The new `getImageSizes` method simply gives you access to the image sizes that are currently configured.
342 |
343 | There are no changes elsewhere in the code.
344 |
345 | ## 1.0.0
346 |
347 | None! Since the additions in version 0.3.14 we've had no real problems. We now support both alternate storage backends and alternate image rendering backends. Test coverage is thorough and everything's passing. What more could you want? It's time to declare it stable.
348 |
349 | ## 0.3.15
350 |
351 | Decided that imagecrunch should output JSON, so that's now what the backend expects.
352 |
353 | ## 0.3.14
354 |
355 | In addition to storage backends, you may also supply alternate image processing backends. The `backend` option has been renamed to `storage`, however `backend` is accepted for backwards compatibility. The `image` option has been introduced for specifying an image processing backend. In addition to the existing `imagemagick` backend, there is now an `imagecrunch` backend based on the Mac-specific [imagecrunch](http://github.com/punkave/imagecrunch) utility.
356 |
357 | If you do not specify an `image` backend, uploadfs will look for imagecrunch and imagemagick in your PATH, stopping as soon as it finds either the `imagecrunch` command or the `identify` command.
358 |
359 | ## 0.3.13
360 |
361 | `copyImageIn` has been rewritten to run more than 4x faster! We now generate our own imagemagick `convert` pipeline which takes advantage of two big optimizations:
362 |
363 | * Load, orient and crop the original image only once, then output it at several sizes in the same pipeline. This yields a 2x speedup.
364 | * First scale the image to the largest size desired, then scale to smaller sizes based on that as part of the same pipeline, without creating any lossy intermediate files. This yields another 2x speedup and a helvetica of designers were unable to see any difference in quality. ("Helvetica" is the collective noun for a group of designers.)
365 |
366 | The new `parallel` option allows you to specify the maximum number of image sizes to render simultaneously. This defaults to 1, to avoid using a lot of memory and CPU, but if you are under the gun to render a lot of images in a hurry, you can set this as high as the number of image sizes you have. Currently there is no throttling mechanism for multiple unrelated calls to `uploadfs.copyImageIn`, this option relates to the rendering of the various sizes for a single call.
367 |
368 | ## 0.3.11
369 |
370 | The new `parallel` option allows you to specify the maximum number of image sizes to render simultaneously. This defaults to 1, to avoid using a lot of memory and CPU, but if you are under the gun to render a lot of images in a hurry, you can set this as high as the number of image sizes you have. Currently there is no throttling mechanism for multiple unrelated calls to `uploadfs.copyImageIn`, this option relates to the rendering of the various sizes for a single call.
371 |
372 | ## 0.3.7-0.3.10
373 |
374 | Just packaging and documentation. Now a P'unk Avenue project.
375 |
376 | ## 0.3.6
377 |
378 | The `uploadfs` functionality for identifying a local image file via ImageMagick has been refactored and made available as the `identifyLocalImage` method. This method is primarily used internally but is occasionally helpful in migration situations (e.g. "I forgot to save the metadata for any of my images before").
379 |
380 | ## 0.3.5
381 |
382 | Starting in version 0.3.5, you can set the quality level for scaled JPEGs via the scaledJpegQuality option, which defaults to 80. You can pass this option either when initializing `uploadfs` or on individual calls to `copyImageIn`. This option applies only to scaled versions of the image. If uploadfs modifies the "original" image to scale or orient it, Imagemagick's default behavior stays in effect, which is to attempt to maintain the same quality level as the original file. That makes sense for images that will be the basis for further cropping and scaling but results in impractically large files for web deployment of scaled images. Thus the new option and the new default behavior.
383 |
384 | ## 0.3.4
385 |
386 | Starting in version 0.3.4, the getTempPath() method is available. This returns the same `tempPath` that was supplied to uploadfs at initialization time. Note that at this point the folder is guaranteed to exist. This is useful when you need a good place to `copyOut` something to, for instance in preparation to `copyImageIn` once more to carry out a cropping operation.
387 |
388 | ## 0.3.3
389 |
390 | Starting in version 0.3.3, cropping is available. Pass an options object as the third parameter to `copyImageIn`. Set the `crop` property to an object with `top`, `left`, `width` and `height` properties, all specified in pixels. These coordinates are relative to the original image. **When you specify the `crop` property, both the "full size" image copied into uploadfs and any scaled images are cropped.** The uncropped original is NOT copied into uploadfs. If you want the uncropped original, be sure to copy it in separately. The `width` and `height` properties of the `info` object passed to your callback will be the cropped dimensions.
391 |
392 | Also starting in version 0.3.3, `uploadfs` uses the `gm` module rather than the `node-imagemagick` module for image manipulation, but configures `gm` to use imagemagick. This change was made because `node-imagemagick` has been abandoned and `gm` is being actively maintained. This change has not affected the `uploadfs` API in any way. Isn't separation of concerns wonderful?
393 |
394 | ## 0.3.2
395 |
396 | Starting in version 0.3.2, you can copy files back out of uploadfs with `copyOut`. You should not rely heavily on this method, but it is occasionally unavoidable, for instance if you need to crop an image differently. When possible, cache files locally if you may need them locally soon.
397 |
398 | ## 0.3.0
399 |
400 | Starting in version 0.3.0, you must explicitly create an instance of uploadfs. This allows you to have more than one, separately configured instance, and it also avoids serious issues with modules not seeing the same instance automatically as they might expect. For more information see [Singletons in #node.js modules cannot be trusted, or why you can't just do var foo = require('baz').init()](http://justjs.com/posts/singletons-in-node-js-modules-cannot-be-trusted-or-why-you-can-t-just-do-var-foo-require-baz-init).
401 |
402 | Existing code that isn't concerned with sharing uploadfs between multiple modules will only need a two line change to be fully compatible:
403 |
404 | // CHANGE THIS
405 | const uploadfs = require('uploadfs');
406 |
407 | // TO THIS (note the extra parens)
408 | const uploadfs = require('uploadfs')();
409 |
410 | If you use uploadfs in multiple source code files, you'll need to pass your `uploadfs` object explicitly, much as you pass your Express `app` object when you want to add routes to it via another file.
411 |
--------------------------------------------------------------------------------
/uploadfs.js:
--------------------------------------------------------------------------------
1 | const _ = require('lodash');
2 | const async = require('async');
3 | const crypto = require('crypto');
4 | const fs = require('fs');
5 | const { rimraf } = require('rimraf');
6 | const delimiter = require('path').delimiter;
7 |
8 | function generateId() {
9 | return crypto.randomBytes(16).toString('hex');
10 | }
11 |
12 | /**
13 | * Instantiates Uploadfs.
14 | * @class Represents an instance of Uploadfs. Usually you only want one.
15 | */
16 |
17 | function Uploadfs() {
18 | let tempPath, imageSizes;
19 | let scaledJpegQuality;
20 | let ensuredTempDir = false;
21 | const self = this;
22 | /**
23 | * Initialize uploadfs. The init method passes options to the backend and
24 | * invokes a callback when the backend is ready.
25 | * @param {Object} options: backend, imageSizes, orientOriginals, tempPath,
26 | * copyOriginal, scaledJpegQuality, contentType, cdn.
27 | * backend is the only mandatory option. See the
28 | * README and individual methods for details.
29 | * @param {Object} options.cdn - An object, that defines cdn settings
30 | * @param {Boolean} options.cdn.enabled=true - Whether the cdn should be enabled
31 | * @param {String} options.cdn.url - The cdn-url
32 | * @param {Function} callback - Will receive the usual err argument
33 | */
34 | self.init = function (options, callback) {
35 | self.options = options;
36 | self.prefix = self.options.prefix || '';
37 | // bc: support options.backend
38 | self._storage = options.storage || options.backend;
39 | if (!self._storage) {
40 | return callback('Storage backend must be specified');
41 | }
42 | // Load standard storage backends, by name. You can also pass an object
43 | // with your own implementation
44 | if (typeof self._storage === 'string') {
45 | let library;
46 | try {
47 | library = require('./lib/storage/' + self._storage + '.js');
48 | } catch (e) {
49 | console.error(
50 | 'Unable to require the ' +
51 | self._storage +
52 | ' storage backend, your node version may be too old for it'
53 | );
54 | return callback(e);
55 | }
56 | self._storage = library();
57 | }
58 |
59 | // If you want to deliver your images
60 | // over a CDN then this could be set in options
61 | if (options.cdn !== undefined) {
62 | if (
63 | !_.isObject(options.cdn) ||
64 | !_.isString(options.cdn.url) ||
65 | (options.cdn.enabled !== undefined && !_.isBoolean(options.cdn.enabled))
66 | ) {
67 | return callback(
68 | 'CDN must be a valid object: {enabled: boolean, url: string}'
69 | );
70 | }
71 | if (options.cdn.enabled === undefined) {
72 | options.cdn.enabled = true;
73 | }
74 | self.cdn = options.cdn;
75 | }
76 |
77 | // Load image backend
78 | self._image = options.image;
79 | // Throw warnings about deprecated processors or load default
80 | if (self._image === 'jimp' || self._image === 'imagecrunch') {
81 | console.error(
82 | 'The specified processor is no longer supported, defaulting to the sharp.js library.'
83 | );
84 | self._image = 'sharp';
85 | }
86 |
87 | let fallback = false;
88 | // if processor is passed as an string (including 'imagemagick' or 'sharp'),
89 | // try to load it or fail with warning if undefined try to load sharp and
90 | // fallback to imagemagick upon fail
91 | if (typeof self._image === 'string' || self._image === undefined) {
92 | self._image = self._image === undefined ? 'sharp' : self._image;
93 | try {
94 | const requiring = `./lib/image/${self._image}.js`;
95 | self._image = require(requiring)();
96 | } catch (e) {
97 | console.error(e);
98 | if (self._image === 'sharp') {
99 | console.error(
100 | 'Sharp not available on this operating system. Trying to fall back to imagemagick.'
101 | );
102 | fallback = true;
103 | } else {
104 | return callback('The specified processor was not found.');
105 | }
106 | }
107 | }
108 |
109 | if (fallback) {
110 | // Check for presence of imagemagick - if we fail sharp load it doesn't
111 | // mean imagemagick is there
112 | const paths = (process.env.PATH || '').split(delimiter);
113 | if (
114 | _.find(paths, function (p) {
115 | // Allow for Windows and Unix filenames for identify. Silly oversight
116 | // after getting delimiter right (:
117 | if (
118 | fs.existsSync(p + '/identify') ||
119 | fs.existsSync(p + '/identify.exe')
120 | ) {
121 | return true;
122 | }
123 | })
124 | ) {
125 | self._image = require('./lib/image/imagemagick.js')();
126 | } else {
127 | return callback('No supported image processor found.');
128 | }
129 | }
130 |
131 | // Reasonable default JPEG quality setting for scaled copies. Imagemagick's default
132 | // quality is the quality of the original being converted, which is usually a terrible
133 | // idea when it's a super hi res original. And if that isn't apropos it defaults
134 | // to 92 which is still sky high and produces very large files
135 | // sub-comment - I'm not sure about the 92 above, it seems to be 80 below
136 |
137 | scaledJpegQuality = options.scaledJpegQuality || 80;
138 |
139 | imageSizes = options.imageSizes || [];
140 |
141 | tempPath = options.tempPath;
142 |
143 | async.series(
144 | [
145 | // create temp folder if needed
146 | function (callback) {
147 | if (!imageSizes.length) {
148 | return callback();
149 | }
150 |
151 | ensureTempDir();
152 | return callback(null);
153 | },
154 |
155 | // invoke storage backend init with options
156 | function (callback) {
157 | return self._storage.init(options, callback);
158 | },
159 |
160 | // invoke image backend init with options
161 | function (callback) {
162 | return self._image.init(options, callback);
163 | }
164 | ],
165 | callback
166 | );
167 | };
168 |
169 | /**
170 | * The copyIn method takes a local filename and copies it to a path in uploadfs.
171 | * Any intermediate folders that do not exist are automatically created if the
172 | * storage requires such things. Just copy things where you want them to go.
173 | * @param {[String]} localPath The local filename
174 | * @param {[String]} path The path in uploadfs, begins with /
175 | * @param {[Object]} options Options (passed to storage). May be skipped
176 | * @param {Function} callback Will receive the usual err argument
177 | */
178 | self.copyIn = function (localPath, path, options, callback) {
179 | if (typeof options === 'function') {
180 | callback = options;
181 | options = {};
182 | }
183 | path = prefixPath(path);
184 | return self._storage.copyIn(localPath, path, options, callback);
185 | };
186 |
187 | /**
188 | * Obtain the temporary folder used for intermediate files created by copyImageIn.
189 | * Can also be useful when doing your own manipulations with copyOut.
190 | * @see Uploadfs#copyOut
191 | */
192 | self.getTempPath = function () {
193 | return tempPath;
194 | };
195 |
196 | /**
197 | * The copyOut method takes a path in uploadfs and a local filename and copies
198 | * the file back from uploadfs to the local filesystem. This should be used only
199 | * rarely. Heavy reliance on this method sets you up for poor performance in S3.
200 | * However it may be necessary at times, for instance when you want to crop an
201 | * image differently later. Use it only for occasional operations like cropping.
202 | * @param {String} path Path in uploadfs (begins with /)
203 | * @param {String} localPath Path in the local filesystem to copy to
204 | * @param {Object} options Options (passed to backend). May be skipped
205 | * @param {Function} callback Receives the usual err argument
206 | */
207 | self.copyOut = function (path, localPath, options, callback) {
208 | path = prefixPath(path);
209 | if (typeof options === 'function') {
210 | callback = options;
211 | options = {};
212 | }
213 | return self._storage.copyOut(path, localPath, options, callback);
214 | };
215 |
216 | /**
217 | * The streamOut method takes a path in uploadfs and a local filename and
218 | * returns a readable stream. This should be used only rarely.
219 | * Heavy reliance on this method sets you up for poor performance in S3.
220 | * However it may be necessary at times, for instance when access to files
221 | * must be secured on a request-by-request basis.
222 | * @param {String} path Path in uploadfs (begins with /)
223 | * @param {Object} options Options (passed to backend). May be skipped
224 | * @param {Function} callback Receives the usual err argument
225 | */
226 | self.streamOut = function (path, options) {
227 | path = prefixPath(path);
228 | return self._storage.streamOut(path, options);
229 | };
230 |
231 | /**
232 | * Copy an image into uploadfs. Scaled versions as defined by the imageSizes option
233 | * passed at init() time, or as overridden by `options.sizes` on this call,
234 | * are copied into uploadfs as follows:
235 | *
236 | * If 'path' is '/me' and sizes with names 'small', 'medium' and 'large'
237 | * were defined at init() time, the scaled versions will be:
238 | *
239 | * '/me.small.jpg', '/me.medium.jpg', '/me.large.jpg'
240 | *
241 | * And the original file will be copied to:
242 | *
243 | * '/me.jpg'
244 | *
245 | * Note that a file extension is added automatically. If you provide a
246 | * file extension in 'path' it will be honored when copying the original only.
247 | * The scaled versions will get appropriate extensions for their format
248 | * as detected by gm.
249 | *
250 | * If there is no error the second argument passed to the callback will
251 | * be an object with a 'basePath' property containing your original path
252 | * with the file extension removed and an 'extension' property containing
253 | * the automatically added file extension, as a convenience for locating the
254 | * original and scaled versions just by adding .jpg, .small.jpg, .medium.jpg,
255 | * etc.
256 | *
257 | * Scaled versions have the same file format as the original and are no wider
258 | * or taller than specified by the width and height properties of the
259 | * corresponding size, with the aspect ratio always being preserved.
260 | * If options.copyOriginal is explicitly false, the original image is
261 | * not copied into uploadfs at all.
262 | *
263 | * If options.crop is present, the image is cropped according to the
264 | * top, left, width and height properties of options.crop. All properties
265 | * must be integers. If cropping is done, it is performed first before scaling.
266 | *
267 | * IMPORTANT: if options.crop is present, the uncropped original is
268 | * NOT copied into uploadfs. The cropped version is what is copied
269 | * to "path." If you want the uncropped original too, make a separate call
270 | * to copyIn. A common pattern is to copy the original when an image
271 | * is first uploaded, and to perform crops and save them under other names
272 | * later, when a user decides they want cropped versions.
273 | *
274 | * Image scaling is performed with imagemagick, which must be installed
275 | * (note that Heroku provides it). In no case is an image ever scaled to
276 | * be larger than the original. Scaled versions of images with an orientation
277 | * hint, such as iPhone photographs, are automatically rotated by gm
278 | * so that they will display properly in web browsers.
279 | *
280 | * @param {String} localPath Local filesystem path of existing image file
281 | * @param {String} path Path in uploadfs to copy original to. Leave off the
282 | * extension to autodetect the true type. Path begins with /
283 | * @param {Object} options Options: scaledJpegQuality, copyOriginal, crop (see above)
284 | * @param {Function} callback Receives the usual err argument
285 | */
286 |
287 | self.copyImageIn = function (localPath, path, options, callback) {
288 | // We do not call prefixPath here because we rely on copyIn, which does
289 |
290 | if (typeof options === 'function') {
291 | callback = options;
292 | options = {};
293 | }
294 |
295 | const sizes = options.sizes || imageSizes;
296 |
297 | ensureTempDir();
298 |
299 | // We'll pass this context to the image processing backend with
300 | // additional properties
301 | const context = {
302 | crop: options.crop,
303 | sizes
304 | };
305 |
306 | context.scaledJpegQuality = options.scaledJpegQuality || scaledJpegQuality;
307 |
308 | // Identify the file type, size, etc. Stuff them into context.info and
309 | // context.extension
310 |
311 | function identify(path, callback) {
312 | return self.identifyLocalImage(path, function (err, info) {
313 | if (err) {
314 | return callback(err);
315 | }
316 | context.info = info;
317 | context.extension = info.extension;
318 | return callback(null);
319 | });
320 | }
321 |
322 | let originalDone = false;
323 | const copyOriginal = options.copyOriginal !== false;
324 | let originalPath;
325 |
326 | async.series(
327 | {
328 | // Identify the file
329 | identify: function (callback) {
330 | return identify(localPath, function (err) {
331 | if (err) {
332 | return callback(err);
333 | }
334 | return callback(null);
335 | });
336 | },
337 | // make a temporary folder for our work
338 | temporary: function (callback) {
339 | // Name the destination folder
340 | context.tempName = generateId();
341 | // Create destination folder
342 | if (sizes.length) {
343 | context.tempFolder = tempPath + '/' + context.tempName;
344 | return fs.mkdir(context.tempFolder, callback);
345 | } else {
346 | return callback(null);
347 | }
348 | },
349 | // Determine base path in uploadfs, working path for temporary files,
350 | // and final uploadfs path of the original
351 | paths: function (callback) {
352 | context.basePath = path.replace(/\.\w+$/, '');
353 | context.workingPath = localPath;
354 |
355 | // Indulge their wild claims about the extension the original
356 | // should have if any, otherwise provide the truth from identify
357 | if (path.match(/\.\w+$/)) {
358 | originalPath = path;
359 | } else {
360 | originalPath = path + '.' + context.extension;
361 | }
362 | return callback(null);
363 | },
364 | copyOriginal: function (callback) {
365 | // If there are no transformations of the original, copy it
366 | // in directly
367 | if (
368 | !copyOriginal ||
369 | options.orientOriginals !== false ||
370 | options.crop
371 | ) {
372 | return callback(null);
373 | }
374 | originalDone = true;
375 | return self.copyIn(localPath, originalPath, options, callback);
376 | },
377 |
378 | convert: function (callback) {
379 | context.copyOriginal = copyOriginal && !originalDone;
380 | return async.series([ convert, postprocess ], callback);
381 | function convert(callback) {
382 | return self._image.convert(context, callback);
383 | }
384 | function postprocess(callback) {
385 | if (!context.tempFolder) {
386 | // Nowhere to do the work
387 | return callback(null);
388 | }
389 | const filenames = _.map(sizes, function (size) {
390 | return (
391 | context.tempFolder + '/' + size.name + '.' + context.extension
392 | );
393 | });
394 | return self.postprocess(filenames, callback);
395 | }
396 | },
397 |
398 | reidentify: function (callback) {
399 | if (!context.adjustedOriginal) {
400 | return callback(null);
401 | }
402 | // Push and pop the original size properties as we determined
403 | // those on the first identify and don't want to return the values
404 | // for the cropped and/or reoriented version
405 | const originalWidth = context.info.originalWidth;
406 | const originalHeight = context.info.originalHeight;
407 | return identify(context.adjustedOriginal, function (err) {
408 | if (err) {
409 | return callback(err);
410 | }
411 | context.info.originalWidth = originalWidth;
412 | context.info.originalHeight = originalHeight;
413 | return callback(null);
414 | });
415 | },
416 |
417 | copySizes: function (callback) {
418 | return async.each(
419 | sizes,
420 | function (size, callback) {
421 | const suffix = size.name + '.' + context.extension;
422 | const tempFile = context.tempFolder + '/' + suffix;
423 | const permFile = context.basePath + '.' + suffix;
424 | return self.copyIn(tempFile, permFile, options, callback);
425 | },
426 | callback
427 | );
428 | },
429 |
430 | copyAdjustedOriginal: function (callback) {
431 | if (!context.adjustedOriginal) {
432 | return callback(null);
433 | }
434 | return self.copyIn(
435 | context.adjustedOriginal,
436 | originalPath,
437 | options,
438 | callback
439 | );
440 | }
441 | },
442 | function (err) {
443 | // Try to clean up the temp folder. This can fail if its creation
444 | // failed, in which case there is nothing we can or should do,
445 | // thus the empty callback
446 | if (context.tempFolder) {
447 | rimraf(context.tempFolder).then(() => {}).catch(e => {
448 | // Ignore, it probably was not created in the first place
449 | });
450 | }
451 | callback(
452 | err,
453 | err
454 | ? null
455 | : {
456 | basePath: context.basePath,
457 | extension: context.extension,
458 | width: context.info.width,
459 | height: context.info.height,
460 | originalWidth: context.info.originalWidth,
461 | originalHeight: context.info.originalHeight
462 | }
463 | );
464 | }
465 | );
466 | };
467 |
468 | self.getUrl = function (options, callback) {
469 | if (self.cdn && self.cdn.enabled) {
470 | return self.cdn.url;
471 | }
472 | return self._storage.getUrl(options, callback) + self.prefix;
473 | };
474 |
475 | self.remove = function (path, callback) {
476 | path = prefixPath(path);
477 | return self._storage.remove(path, callback);
478 | };
479 |
480 | /**
481 | * Re-enable access to the file. By default newly uploaded
482 | * files ARE web accessible, so you need not call this method
483 | * unless uploadfs.disable has been previously called.
484 | *
485 | * Be aware that you MUST call this method to guarantee access
486 | * to the file via copyOut, as well as via the web, even though
487 | * some backends may only disable access via the web. Do not
488 | * rely on this behavior. (Differences in behavior between
489 | * local filesystems and S3 require we tolerate this difference.)
490 | *
491 | * @param {string} path Path as stored in uploadfs (with extension)
492 | * @param {Function} callback Receives error if any, otherwise null
493 | */
494 |
495 | self.enable = function (path, callback) {
496 | path = prefixPath(path);
497 | return self._storage.enable(path, callback);
498 | };
499 |
500 | /**
501 | * Disable web access to the file. By default new uploads ARE
502 | * accessible; however this method is useful when implementing a
503 | * "recycle bin" or other undo-able delete feature.
504 | *
505 | * The implementation MUST block web access to the file. The
506 | * implementation MAY also block read access via copyOut, so be
507 | * aware that you MUST call uploadfs.enable to reenable access to
508 | * the file to guarantee you have access to it again across all
509 | * storage backends, even if you are using copyOut to access it.
510 | *
511 | * @param {string} path Path as stored in uploadfs (with extension)
512 | * @param {Function} callback Receives error if any, otherwise null
513 | */
514 |
515 | self.disable = function (path, callback) {
516 | path = prefixPath(path);
517 | return self._storage.disable(path, callback);
518 | };
519 |
520 | /**
521 | * Identify a local image file. Normally you don't need to call
522 | * this yourself, it is mostly used by copyImageIn. But you may find it
523 | * useful in certain migration situations, so we have exported it.
524 | *
525 | * If the file is not an image or is too defective to be identified an error is
526 | * passed to the callback.
527 | *
528 | * Otherwise the second argument to the callback is guaranteed to have extension, width,
529 | * height, orientation, originalWidth and originalHeight properties. extension will be
530 | * gif, jpg or png and is detected from the file's true contents, not the original file
531 | * extension. width and height are automatically rotated to TopLeft orientation while
532 | * originalWidth and originalHeight are not.
533 | *
534 | * If the orientation property is not explicitly set in the file it will be set to
535 | * 'Undefined'.
536 | *
537 | * Alternative backends such as "sip" that do not support orientation detection
538 | * will not set this property at all.
539 | *
540 | * Any other properties returned are dependent on the version of ImageMagick (or
541 | * other backend) used and are not guaranteed.
542 | *
543 | * @param {String} path Local filesystem path to image file
544 | * @param {Function} callback Receives the usual err argument, followed by an
545 | * object with extension, width, height, orientation, originalWidth and originalHeight
546 | * properties. Any other properties depend on the backend in use and are not guaranteed.
547 | *
548 | * @see Uploadfs#copyImageIn
549 | */
550 |
551 | self.identifyLocalImage = function (path, callback) {
552 | return self._image.identify(path, callback);
553 | };
554 |
555 | /**
556 | * Returns the image sizes array with which uploadfs was configured.
557 | * This may be of use if you must iterate over the various generated
558 | * images later.
559 | *
560 | * However note that a best practice is to retain information about the sizes
561 | * that were expected when each image was actually uploaded, because you might
562 | * change your mind and add or remove sizes later.
563 | * @return {array} [Image size objects]
564 | */
565 | self.getImageSizes = function () {
566 | return imageSizes;
567 | };
568 |
569 | /**
570 | * Destroys the uploadfs instance, allowing the backends to release any
571 | * resources they may be holding, such as file descriptors or interval timers.
572 | * Backends that hold such resources should implement their own `destroy` method,
573 | * also accepting a callback. The callback will receive an error if anything
574 | * goes awry during the cleanup process. This method does NOT remove any
575 | * content, it just releases system resources.
576 | * @param {function} callback
577 | */
578 | self.destroy = function (callback) {
579 | const callbacks = [
580 | self._storage.destroy || noOperation,
581 | self._image.destroy || noOperation
582 | ];
583 | return async.parallel(callbacks, callback);
584 | function noOperation(callback) {
585 | return callback(null);
586 | }
587 | };
588 |
589 | self.migrateToDisabledFileKey = function (callback) {
590 | const method = self._storage.migrateToDisabledFileKey;
591 | if (!method) {
592 | // Not relevant for this backend
593 | return callback(null);
594 | }
595 | return self._storage.migrateToDisabledFileKey(callback);
596 | };
597 |
598 | self.migrateFromDisabledFileKey = function (callback) {
599 | const method = self._storage.migrateFromDisabledFileKey;
600 | if (!method) {
601 | // Not relevant for this backend
602 | return callback(null);
603 | }
604 | return self._storage.migrateFromDisabledFileKey(callback);
605 | };
606 |
607 | // Called by `convert` to postprocess resized/cropped images
608 | // for optimal file size, etc.
609 |
610 | self.postprocess = function (files, callback) {
611 | const sample = files[0];
612 | if (!sample) {
613 | return callback(null);
614 | }
615 | const relevant = _.filter(
616 | self.options.postprocessors || [],
617 | function (postprocessor) {
618 | const matches = sample.match(/\.(\w+)$/);
619 | if (!matches) {
620 | return false;
621 | }
622 | const extension = matches[1];
623 | return _.includes(postprocessor.extensions, extension);
624 | }
625 | );
626 | const folder = require('path').dirname(sample);
627 | return async.eachSeries(
628 | relevant,
629 | function (postprocessor, callback) {
630 | if (postprocessor.length === 4) {
631 | return postprocessor.postprocessor(
632 | files,
633 | folder,
634 | postprocessor.options,
635 | callback
636 | );
637 | } else {
638 | return postprocessor
639 | .postprocessor(files, folder, postprocessor.options)
640 | .then(function () {
641 | return callback(null);
642 | })
643 | .catch(function (err) {
644 | return callback(err);
645 | });
646 | }
647 | },
648 | callback
649 | );
650 | };
651 |
652 | function prefixPath(path) {
653 | // Resolve any double // that results from the prefix
654 | return (self.prefix + path).replace(/\/\//g, '/');
655 | }
656 |
657 | function ensureTempDir() {
658 | if (!ensuredTempDir) {
659 | if (!fs.existsSync(tempPath)) {
660 | fs.mkdirSync(tempPath);
661 | }
662 | ensuredTempDir = true;
663 | }
664 | }
665 | }
666 |
667 | module.exports = function () {
668 | return new Uploadfs();
669 | };
670 |
--------------------------------------------------------------------------------