├── .eslintrc
├── .gitignore
├── CHANGELOG.md
├── LICENSE
├── README.md
├── azureReplicateTestOptions-sample.js
├── azureTestOptions-sample.js
├── badges
├── npm-audit-badge.png
└── npm-audit-badge.svg
├── defaultGzipBlacklist.js
├── gcsTestOptions-sample.js
├── lib
├── copyFile.js
├── image
│ ├── imagemagick.js
│ └── sharp.js
├── storage
│ ├── azure.js
│ ├── contentTypes.js
│ ├── gcs.js
│ ├── local.js
│ ├── noGzipContentTypes.js
│ └── s3.js
└── utils.js
├── logos
├── logo-box-builtby.png
└── logo-box-madefor.png
├── package.json
├── s3TestOptions-sample.js
├── sample.js
├── test-imagemagick.js
├── test-sharp.js
├── test.jpg
├── test.svg
├── test.txt
├── test.webp
├── test
├── azure.js
├── gcs.js
├── local.js
├── one
│ └── two
│ │ └── three
│ │ └── test.txt
├── s3.js
├── test.jpg
├── test.tar.gz
└── test.txt
├── test2.txt
├── uploadfs.js
└── webp-test.js
/.eslintrc:
--------------------------------------------------------------------------------
1 | {
2 | "extends": "apostrophe",
3 | "rules": {
4 | "no-console": 0,
5 | "node/no-callback-literal": 0
6 | },
7 | "overrides": [
8 | {
9 | "files": [ "**/public/**/*.js" ],
10 | "excludedFiles": [ "**/public/vendor/**/*.js" ],
11 | "globals": {
12 | "window": true,
13 | "document": true,
14 | "location": true,
15 | "apos": true,
16 | "_": true,
17 | "async": true,
18 | "confirm": true,
19 | "$": true,
20 | "CKEDITOR_BASEPATH": true,
21 | "CKEDITOR": true,
22 | "alert": true,
23 | "jQuery": true,
24 | "sluggo": true,
25 | "moog": true,
26 | "Pikaday": true,
27 | "moment": true
28 | }
29 | },
30 | {
31 | "files": [ "test/**/*.js" ],
32 | "globals": {
33 | "describe": true,
34 | "it": true,
35 | "after": true,
36 | "before": true
37 | }
38 | }
39 | ]
40 | }
41 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | *.sw*
2 | gcs-credentials-uploadfstest.json
3 | copy-out-test.txt
4 | npm-debug.log
5 | .DS_Store
6 | node_modules
7 | s3TestOptions.js
8 | gcsTestOptions.js
9 | azureTestOptions.js
10 | public/uploads
11 | temp
12 | package-lock.json
13 | .jshintrc
14 | # an extra local test in my checkout
15 | test-jimp.js
16 |
--------------------------------------------------------------------------------
/CHANGELOG.md:
--------------------------------------------------------------------------------
1 | # Changelog
2 |
3 | ## 1.24.3 (2025-03-25)
4 |
5 | * Fix missing variable which led to confusing error messages if the configured image backend is unavailable and prevented automatic fallback from `sharp` to `imagemagick`.
6 |
7 | ## 1.24.2 (2024-12-09)
8 |
9 | * Corrected npm audit warning by eliminating a dependency on `gm` which is not actively maintained.
10 |
11 | ## 1.24.1 (2024-10-15)
12 |
13 | * Bug fix: error handling for `streamOut`. If an HTTP error status code is encountered, the stream will emit an error, and the error object will have a `statusCode` property, allowing downstream code to handle this situation appropriately.
14 |
15 | ## 1.24.0 (2024-10-15)
16 |
17 | * Bug fix: `bucketObjectsACL` is respected by the `enable` method, that method no longer makes files `public` again. Previously it was only respected at `copyIn` / `copyImageIn` time.
18 | * New feature: `disabledBucketObjectsACL` is now also supported, it is used by the `disable` method rather than
19 | assuming `private` (still the default).
20 |
21 | ## 1.23.0 (2024-10-14)
22 |
23 | * Introduced `streamOut` API for `local` and `s3` storage backends.
24 |
25 | ## 1.22.7 (2024-09-24)
26 |
27 | * `.mp3` does not benefit from gzip encoding and the transfer encoding header fails to be sent, so do not use it.
28 |
29 | ## 1.22.6 (2024-09-03)
30 |
31 | * `.gz` files now receive the correct content type in S3.
32 | * `.gz` files are now exempt from gzip transfer encoding because they are already gzipped.
33 | * `s3.js` tests now use environment variables rather than
34 | a git-excluded local file.
35 |
36 | ## 1.22.5 (2024-07-10)
37 |
38 | * Document options for avoiding a public S3 bucket.
39 |
40 | ## 1.22.4 2024-06-11
41 |
42 | * Use latest `rimraf` package, silencing a deprecation warning.
43 |
44 | ## 1.22.3 2023-10-16
45 |
46 | * Security: update `sharp` to fix a [potential security risk](https://security.snyk.io/vuln/SNYK-JS-SHARP-5922108). You should update your project's
47 | dependencies manually or with `npm update` to ensure you get this fix.
48 |
49 | ## 1.22.2 2023-08-03
50 |
51 | * Bump to next major version of google cloud storage API to please `npm audit`. There was no actual security vulnerability due to the way the module in question was actually used.
52 | * Update our eslint configuration.
53 | * Modernize the source from `var` to `const` and `let` in all cases to satisfy eslint and help prevent future bugs. This does not change the behavior of the code.
54 |
55 | ## 1.22.1 2023-05-03
56 |
57 | * Corrected behavior of `getUrl` method for Azure storage, for Apostrophe compatibility. This regression was introduced an hour ago in 1.22.0.
58 |
59 | ## 1.22.0 2023-05-03
60 |
61 | * Remove `azure-storage` in favor of the actively supported `@azure/storage-blob`, refactor. No public API changes.
62 | * Remove `request` package and all related dependencies in favor of the actively supported `node-fetch@2`, refactor tests.
63 | * Update outdated dev dependencies.
64 |
65 | ## 1.21.0 2023-02-11
66 |
67 | * Adds tests for `webp` files, updates the package scripts to include "webp" to run the tests, and a webp test image (Note: one test commented out because `sharp` currently fails to reorient webp files). Thanks to [Isaac Preston](https://github.com/ixc7) for this contribution.
68 | * `https` is now the default protocol for S3. As it is always supported and there are no uploadfs+S3 use cases where `http` is preferred this is not considered a bc break.
69 |
70 | ## 1.20.1 2022-12-13
71 |
72 | * Add `webm` to the list of file formats with a known content type and add it to the list of types that should not be gzip encoded as it is precompressed and Chrome appears to behave poorly if it is gzip encoded
73 |
74 | ## 1.20.0 2022-08-18
75 |
76 | * Default image processing library changed to [sharp.js](https://www.npmjs.com/package/sharp) for excellent performance
77 | * Support for jimp and imagecrunch removed (added fallback to sharp for bc)
78 | * imagemagick is now the fallback if sharp installation fails on a particular platform
79 | * tests for sharp have been added and the package scripts updated to add "test-sharp"
80 |
81 | ## 1.19.0 2022-01-21
82 |
83 | * New options `noGzipContentTypes` and `addNoGzipContentTypes` to configure content types which should not be gzipped when using the `s3` storage backend. Thanks to Christian Litzlbauer.
84 |
85 | ## 1.18.5 2021-12-07
86 |
87 | ### Fixed
88 |
89 | * Local storage is fully compatible with Node 16 and later, as well as earlier releases previously supported.
90 | * Removed a stray folder.
91 |
92 | ## 1.18.4 - 2021-10-08
93 |
94 | ### Fixed
95 |
96 | * Updates jimp to resolve npm audit warning.
97 |
98 | ## 1.18.3 - 2021-08-13
99 |
100 | ### Fixed
101 |
102 | * Set Azure containers public access level to `blob` instead of `container` to ensure anonymous users cannot list the content.
103 |
104 | ## 1.18.2
105 |
106 | * Addressed `npm audit` complaints about `mkdirp` by using a simple `mkdirp` implementation that has no legacy compatibility issues.
107 | * Addressed `npm audit` complaints about `mocha` and friends by upgrading `mocha`.
108 | * There are currently `npm audit` warnings about `azure-storage`, however a fix for this is forthcoming according to the upstream maintainers, and the existing semver ranges in this package will pick it up on `npm audit` when released.
109 |
110 | ## 1.18.1
111 |
112 | * Bug fix: the `sizes` option to `copyImageIn` now works even if `imageSizes` was not passed at all when calling `init`.
113 |
114 | ## 1.18.0
115 |
116 | * Support for a `sizes` option when calling `copyImageIn`, removing the requirement that all uploads are scaled to the same set of sizes. If the option is not provided the globally configured sizes are used.
117 |
118 | ## 1.17.2
119 |
120 | * Documented the `endpoint` option. Thanks to Joe Innes for this contribution.
121 |
122 | ## 1.17.1
123 |
124 | * Updates ESLint configuration and fixes errors.
125 |
126 | ## 1.17.0
127 |
128 | * Updated the `@google-cloud/storage` module to the 5.x series to address a possible security vulnerability reported by `npm audit`. Version 5.x does not support node 8, which is itself not supported, so you should not be running it anymore.
129 | * However, we also made the libraries for all three cloud storage backends (GCS, S3, and Azure) `optionalDependencies`. If they fail to install for any reason, uploadfs will still work, as long as you do not try to use that specific backend.
130 | * A longstanding bug in GCS storage that broke its use with ApostropheCMS has been fixed. Leading slashes in paths are no longer stored in a way that produces double slashes in URLs and breaks Apostrophe's URL-building. As far as we're concerned, this was a bug, since it broke the unit tests.
131 | * However, for the benefit of anyone who preferred this behavior for non-Apostrophe applications, the new `strictPaths: true` option may be passed when configuring uploadfs to get the old behavior in which leading slashes are not finessed and the URL will actually contain a double slash.
132 |
133 | ## 1.16.0
134 |
135 | * Added bucketObjectsACL option to s3.js to allow override of default 'public-read' permission when using a restricted S3 bucket to store assets. Thanks to Shaun Hurley for the contribution.
136 |
137 | ## 1.15.1
138 |
139 | * Using the latest version of jimp, which resolves an `npm audit` issue. JPEG EXIF rotation autocorrection is now standard in jimp so we don't explicitly invoke it anymore but should get the same good results with smartphone photos etc.
140 |
141 | ## 1.15.0
142 |
143 | * gzip content encoding for S3. When using `copyIn` to copy a file of a suitable type into S3, it will be gzipped and the appropriate content encoding will be set so that browsers automatically do the right thing when they download it. Similarly, the `copyOut` implementation for S3 now transparently supports downloading the original, uncompressed content from S3. The standard web image formats and zipfiles are not double-compressed because the benefit is minimal, so the CPU impact on phones is not justified in this case.
144 |
145 | ## 1.14.1
146 |
147 | * Depend on GCS 4.x to address npm audit warning. There appear to be no relevant breaking API GCS.
148 |
149 | ## 1.14.0
150 |
151 | * Failover: azure copyOut now attempts to copy from every available replica, for durability
152 | * azure errors now report the account and container concerned so you can identify the faulty replica; if all were tried (copyOut), ALL is reported. This is done via `account` and `container` properties on the error object
153 | * eslint fixes, including undefined variable fixes
154 |
155 | ## 1.13.0
156 |
157 | * Now compatible with S3-like backends that build the bucket URL as a path rather than a subdomain. To enable this behavior, set the `s3ForcePathStyle` option to `true`. Thanks to Funkhaus Creative for this contribution.
158 |
159 | ## 1.12.0
160 |
161 | * Google Cloud Storage (GCS) support. Thanks to Nick Bauman for this contribution.
162 |
163 | ## 1.11.1
164 |
165 | * Azure storage backend: `mp4` has been added to the list of formats that are excluded from gzip transfer encoding by default. This is because it does not stream properly in Chrome and saves very little space
166 |
167 | ## 1.11.0
168 |
169 | * The new `prefix` option, if present, is prepended to all `uploadfs` paths before they reach the storage layer. This makes it easy for several sites to share, for instance, the same S3 bucket without confusion. The `getUrl()` method also reflects the prefix, unless the `cdn` option is in play, as cdn URLs might not include a prefix. Always set the `url` subproperty of `cdn` with the prefix you need, if any.
170 |
171 | ## 1.10.2
172 |
173 | We fixed some significant issues impacting users of the `azure` storage backend. If you use that backend you should upgrade:
174 |
175 | * Get extensions from uploadfs path so gzipped files are not all application/octet stream
176 | * Pass the content-encoding header properly. Please note that files already uploaded to `azure` with uploadfs are gzipped but do *not* have the correct header and so your webserver may not recognize them correctly, especially if used for CSS files and other text formats. You can resolve this by uploading them again.
177 | * `copyOut` now correctly reverses `copyIn` completely, including gunzipping the file if necessary. Without this change cropping, etc. did not work.
178 | * Default test path covers these issues correctly.
179 |
180 | ## 1.10.1
181 |
182 | * If `replicateClusters` exists but is an empty array, the credential options are used instead. This was not a bug fix, exactly, but it is a nice "do what I mean" feature.
183 | * A single `gzip` object was being reused, leading to failures on subsequent writes to Azure. Fixed.
184 | * The Azure backend contained a global array, thus limiting you to a single instance of `uploadfs` in your project. Fixed.
185 |
186 | ## 1.10.0
187 |
188 | `imagemin` is no longer a dependency. Instead the new `postprocessors` option allows you to optionally pass it in. `imagemin` and its plugins have complicated dependencies that don't build smoothly on all systems, and it makes sense to leave the specifics of this step up to the users who want it.
189 |
190 | Since setting the `imagemin: true` option doesn't hurt anything in 1.10.0 (you still get your images, just not squeezed quite as small), this is not a bc break.
191 |
192 | Deemphasized `imagecrunch`. People don't serve public sites on Macs anyway and homebrew can install `imagemagick` easily.
193 |
194 | ## 1.9.2
195 |
196 | `mocha` and `lodash` upgraded to satisfy `npm audit`.
197 |
198 | ## 1.9.1
199 |
200 | * All `imagemin-` plugin modules are now `optionalDependencies` and uploadfs can print a warning at startup and continue without any one of them. In addition, if `imagemin` fails, this situation is tolerated with a warning printed and the images are still transformed as they would be without `imagemin`. This is necessary because [`imagemin-pngquant` fails on CentOS 7 without sysadmin intervention to install additional system packages outside of npm](https://github.com/imagemin/pngquant-bin/issues/77), and `cjpeg` fails to run without extra libraries even though it does `npm install`, etc.
201 |
202 | ## 1.9.0
203 |
204 | * Azure support.
205 | * Added `migrateToDisabledFileKey` and `migrateFromDisabledFileKey` methods for use when switching to the option of renaming files in a cryptographically secure way rather than changing their permissions. These files change the approach for all existing disabled files.
206 |
207 | ## 1.8.0
208 |
209 | * Added the optional `destroy` method, which allows for graceful release of resources such as file descriptors or timeouts that may belong to backends.
210 |
211 | ## 1.7.2
212 |
213 | * Added mime type for `svg` as standard equipment.
214 | * User-configured mime types now merge with the standard set, making it easy to add a few without starting from scratch.
215 |
216 | Thanks to tortilaman.
217 |
218 | ## 1.7.1
219 |
220 | The `s3` storage backend now respects the `endpoint` option properly when asked to provide URLs. Thanks to tortilaman.
221 |
222 | ## 1.7.0
223 |
224 | Introduced the `disabledFileKey` option, a feature of the local storage backend which substitutes filename obfuscation for file permissions when using `enable` and `disable`. This is useful when you wish to use `rsync` and other tools outside of uploadfs without the aggravation of permissions issues, but preserve the ability to effectively disable web access, as long as the webserver does not offer index listings for folders.
225 |
226 | Documented the need to set `https: true` when working with S3 if your site uses `https`.
227 |
228 | ## 1.6.2
229 |
230 | Node 8.x added an official `stream.destroy` method with different semantics from the old unofficial one. This led to a callback being invoked twice in the event of an error when calling the internal `copyFile` mechanism. A unit test was added, the issue was fixed, and the fix was verified in all supported LTS versions of Node.js.
231 |
232 | ## 1.6.1
233 |
234 | 1.6.0 introduced a bug that broke `enable` and `disable` in some cases. This became apparent when Apostrophe began to invoke these methods. Fixed.
235 |
236 | ## 1.6.0
237 |
238 | `enablePermissions` and `disablePermissions` options, for the `local` storage backend. By default `disable` sets permissions to `0000`. If you prefer to block group access but retain user access, you might set this to `0400`. Note that the use of octal constants in JavaScript is disabled, so it is better to write `parseInt('0400', 8)`.
239 |
240 | ## 1.5.1
241 |
242 | * The s3 storage backend now honors the `cachingTime` option properly again. Thanks to Matt Crider.
243 |
244 | ## 1.5.0
245 |
246 | * The s3 storage backend now uses the official AWS SDK for JavaScript. The knox module is no longer maintained and is missing basic request signature support that is mandatory for newer AWS regions. It is no longer a serious option.
247 |
248 | Every effort has been made to deliver 100% backwards compatibility with the documented options of knox, and the full test suite is passing with the new AWS SDK.
249 |
250 | ## 1.4.0
251 |
252 | * The new pure-JavaScript `jimp` image backend works "out of the box" even when ImageMagick is not installed. For faster operation and GIF support, you should still install ImageMagick. Thanks to Dave Ramirez for contributing this feature.
253 |
254 | ## 1.3.6
255 |
256 | * Octal constants are forbidden in ES6 strict, use `parseInt(x, 8)`. No other changes.
257 |
258 | ## 1.3.5
259 |
260 | * All tests passing.
261 | * Rewrote automatic directory cleanup mechanism of local storage to cope correctly with more complex directory structures.
262 |
263 | ## 1.3.4
264 |
265 | * Bumped dependencies to newer, better maintained versions. All tests passing.
266 | * Removed accidental dependency on `global-tunnel-ng` and commented out a one-time test in `test.js`.
267 |
268 | ## 1.3.3
269 |
270 | * Dependency on `request` is no longer locked down to a minor version, which was unnecessary and caused peer dependency failures in some projects (an npm design flaw IMHO, but never mind)
271 |
272 | ## 1.3.2
273 |
274 | * Updated dependency on `rimraf` module to eliminate deprecation warning for `graceful-fs`
275 |
276 | ## 1.3.1
277 |
278 | * Whoops, refer to original width and height properly for gifsicle
279 |
280 | ## 1.3.0
281 |
282 | * The `imagemagick` image conversion backend now optionally uses `gifsicle` to convert animated GIFs. Turn on this behavior with the `gifsicle: true` option. There are tradeoffs: `gifsicle` is much faster and uses much less RAM, but seems to produce slightly lower quality results. On a very large animation though, you're almost certain to run out of RAM with `imagemagick`. Of course you must install `gifsicle` to take advantage of this.
283 |
284 | ## 1.2.2
285 |
286 | * The very short-lived version 1.2.1 did not retain the originals of GIFs (when desired). This has been fixed.
287 |
288 | ## 1.2.1
289 |
290 | * Animated GIF conversion strategy has been customized once again. We found cases in which the combined pipeline was 4x slower (!) and also needed to add in `-coalesce` to prevent bad frames in some cases.
291 |
292 | ## 1.2.0
293 |
294 | * Added the `cachingTime` and `cdn` options. Thanks to Vispercept.
295 |
296 | * Fixed a bug where the local storage backend could invoke its callbacks twice, with both failure and success, when an error occurs reading from a local file in newer verisons of node (this bug did not appear in 0.10.x). The fix is backwards compatible.
297 |
298 | ## 1.1.10
299 |
300 | Error message when imagemagick is not installed is a little more informative about what you must do.
301 |
302 | ## 1.1.9
303 |
304 | Use latest knox. No functionality changes.
305 |
306 | ## 1.1.7-1.1.8
307 |
308 | Supports multiple instances when using the default storage and image backends. Previously those backends only supported one instance. This was corrected without changing the public API for custom backends, which have always supported multiple instances.
309 |
310 | ## 1.1.5-1.1.6
311 |
312 | GIF animations have been merged back into the main pipeline thanks to `-clone 0--1` which preserves all frames of the animation. It's a little faster, and it's also less code to maintain.
313 |
314 | ## 1.1.4
315 |
316 | GIF animations are preserved in the imagemagick backend, with full support for resizing and cropping. A separate, slower pipeline is used due to limitations of the `+clone` mechanism in imagemagick. The API has not changed.
317 |
318 | ## 1.1.3
319 |
320 | The imagecrunch backend now sets `adjustedOriginal` correctly when it does a simple copy of the original of a PNG or JPEG.
321 |
322 | ## 1.1.0
323 |
324 | The new `disable` and `enable` methods turn web access to the specified path off and on again, respectively. The new `getImageSizes` method simply gives you access to the image sizes that are currently configured.
325 |
326 | There are no changes elsewhere in the code.
327 |
328 | ## 1.0.0
329 |
330 | None! Since the additions in version 0.3.14 we've had no real problems. We now support both alternate storage backends and alternate image rendering backends. Test coverage is thorough and everything's passing. What more could you want? It's time to declare it stable.
331 |
332 | ## 0.3.15
333 |
334 | Decided that imagecrunch should output JSON, so that's now what the backend expects.
335 |
336 | ## 0.3.14
337 |
338 | In addition to storage backends, you may also supply alternate image processing backends. The `backend` option has been renamed to `storage`, however `backend` is accepted for backwards compatibility. The `image` option has been introduced for specifying an image processing backend. In addition to the existing `imagemagick` backend, there is now an `imagecrunch` backend based on the Mac-specific [imagecrunch](http://github.com/punkave/imagecrunch) utility.
339 |
340 | If you do not specify an `image` backend, uploadfs will look for imagecrunch and imagemagick in your PATH, stopping as soon as it finds either the `imagecrunch` command or the `identify` command.
341 |
342 | ## 0.3.13
343 |
344 | `copyImageIn` has been rewritten to run more than 4x faster! We now generate our own imagemagick `convert` pipeline which takes advantage of two big optimizations:
345 |
346 | * Load, orient and crop the original image only once, then output it at several sizes in the same pipeline. This yields a 2x speedup.
347 | * First scale the image to the largest size desired, then scale to smaller sizes based on that as part of the same pipeline, without creating any lossy intermediate files. This yields another 2x speedup and a helvetica of designers were unable to see any difference in quality. ("Helvetica" is the collective noun for a group of designers.)
348 |
349 | The new `parallel` option allows you to specify the maximum number of image sizes to render simultaneously. This defaults to 1, to avoid using a lot of memory and CPU, but if you are under the gun to render a lot of images in a hurry, you can set this as high as the number of image sizes you have. Currently there is no throttling mechanism for multiple unrelated calls to `uploadfs.copyImageIn`, this option relates to the rendering of the various sizes for a single call.
350 |
351 | ## 0.3.11
352 |
353 | The new `parallel` option allows you to specify the maximum number of image sizes to render simultaneously. This defaults to 1, to avoid using a lot of memory and CPU, but if you are under the gun to render a lot of images in a hurry, you can set this as high as the number of image sizes you have. Currently there is no throttling mechanism for multiple unrelated calls to `uploadfs.copyImageIn`, this option relates to the rendering of the various sizes for a single call.
354 |
355 | ## 0.3.7-0.3.10
356 |
357 | Just packaging and documentation. Now a P'unk Avenue project.
358 |
359 | ## 0.3.6
360 |
361 | The `uploadfs` functionality for identifying a local image file via ImageMagick has been refactored and made available as the `identifyLocalImage` method. This method is primarily used internally but is occasionally helpful in migration situations (e.g. "I forgot to save the metadata for any of my images before").
362 |
363 | ## 0.3.5
364 |
365 | Starting in version 0.3.5, you can set the quality level for scaled JPEGs via the scaledJpegQuality option, which defaults to 80. You can pass this option either when initializing `uploadfs` or on individual calls to `copyImageIn`. This option applies only to scaled versions of the image. If uploadfs modifies the "original" image to scale or orient it, Imagemagick's default behavior stays in effect, which is to attempt to maintain the same quality level as the original file. That makes sense for images that will be the basis for further cropping and scaling but results in impractically large files for web deployment of scaled images. Thus the new option and the new default behavior.
366 |
367 | ## 0.3.4
368 |
369 | Starting in version 0.3.4, the getTempPath() method is available. This returns the same `tempPath` that was supplied to uploadfs at initialization time. Note that at this point the folder is guaranteed to exist. This is useful when you need a good place to `copyOut` something to, for instance in preparation to `copyImageIn` once more to carry out a cropping operation.
370 |
371 | ## 0.3.3
372 |
373 | Starting in version 0.3.3, cropping is available. Pass an options object as the third parameter to `copyImageIn`. Set the `crop` property to an object with `top`, `left`, `width` and `height` properties, all specified in pixels. These coordinates are relative to the original image. **When you specify the `crop` property, both the "full size" image copied into uploadfs and any scaled images are cropped.** The uncropped original is NOT copied into uploadfs. If you want the uncropped original, be sure to copy it in separately. The `width` and `height` properties of the `info` object passed to your callback will be the cropped dimensions.
374 |
375 | Also starting in version 0.3.3, `uploadfs` uses the `gm` module rather than the `node-imagemagick` module for image manipulation, but configures `gm` to use imagemagick. This change was made because `node-imagemagick` has been abandoned and `gm` is being actively maintained. This change has not affected the `uploadfs` API in any way. Isn't separation of concerns wonderful?
376 |
377 | ## 0.3.2
378 |
379 | Starting in version 0.3.2, you can copy files back out of uploadfs with `copyOut`. You should not rely heavily on this method, but it is occasionally unavoidable, for instance if you need to crop an image differently. When possible, cache files locally if you may need them locally soon.
380 |
381 | ## 0.3.0
382 |
383 | Starting in version 0.3.0, you must explicitly create an instance of uploadfs. This allows you to have more than one, separately configured instance, and it also avoids serious issues with modules not seeing the same instance automatically as they might expect. For more information see [Singletons in #node.js modules cannot be trusted, or why you can't just do var foo = require('baz').init()](http://justjs.com/posts/singletons-in-node-js-modules-cannot-be-trusted-or-why-you-can-t-just-do-var-foo-require-baz-init).
384 |
385 | Existing code that isn't concerned with sharing uploadfs between multiple modules will only need a two line change to be fully compatible:
386 |
387 | // CHANGE THIS
388 | const uploadfs = require('uploadfs');
389 |
390 | // TO THIS (note the extra parens)
391 | const uploadfs = require('uploadfs')();
392 |
393 | If you use uploadfs in multiple source code files, you'll need to pass your `uploadfs` object explicitly, much as you pass your Express `app` object when you want to add routes to it via another file.
394 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | Copyright (c) 2013 P'unk Avenue LLC
2 |
3 | Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
4 |
5 | The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
6 |
7 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
8 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # uploadfs
2 |
3 |
4 |
5 | uploadfs copies files to a web-accessible location and provides a consistent way to get the URLs that correspond to those files. uploadfs can also resize, crop and autorotate uploaded images. uploadfs includes S3-based, Azure-based, GCS-based and local filesystem-based backends and you may supply others. The API offers the same conveniences with both backends, avoiding the most frustrating features of each:
6 |
7 | * Parent directories are created automatically as needed (like S3 and Azure)
8 | * Content types are inferred from file extensions (like the filesystem)
9 | * Files are by default marked as readable via the web (like a filesystem + web server)
10 | * Images can be automatically scaled to multiple sizes
11 | * Images can be cropped
12 | * Images are automatically rotated if necessary for proper display on the web (i.e. iPhone photos with rotation hints are right side up)
13 | * Image width, image height and correct file extension are made available to the developer
14 | * Non-image files are also supported
15 | * Web access to files can be disabled and reenabled
16 | * GIF is supported, including animation, with full support for scaling and cropping
17 | * On fire about minimizing file sizes for your resized images? You can plug in `imagemin` and compatible tools using the `postprocessors` option.
18 |
19 | You can also remove a file if needed.
20 |
21 | It is possible to copy a file back from uploadfs, but there is no API to retrieve information about files in uploadfs. This is intentional. *Constantly manipulating directory information is much slower in the cloud than on a local filesystem and you should not become reliant on it.* Your code should maintain its own database of file information if needed, for instance in a MongoDB collection. Copying the actual contents of the file back may occasionally be needed however and this is supported.
22 |
23 | ## Requirements
24 |
25 | You need:
26 |
27 | * A "normal" filesystem in which files stay put forever, *OR* Amazon S3, *OR* Microsoft Azure, *OR* Google Cloud Platform OR a willingness to write a backend for something else (look at `s3.js`, `azure.js` and `local.js` for examples; just supply an object with the same methods, you don't have to supply a factory function).
28 |
29 | * Most modern macOS, Windows and Linux systems running Node.js >= 12.13.0 do not require any additional install or runtime dependencies. They will automatically use `sharp`, which is extremely fast. Systems not meeting these qualifications can still use this module if `imagemagick` is installed on the system. You can also write a backend for something else (look at `sharp.js` or `imagemagick.js` for examples); just supply an object with the same methods, you don't have to supply a factory function.
30 |
31 | * If you need to use `imagemagick` and want faster GIF support: you'll need [gifsicle](https://www.lcdf.org/gifsicle/). It is an optional tool that processes large animated GIFs much faster. This package is not necessary with Sharp. Turn it on with the `gifsicle: true` option when calling `init`. Of course you must install `gifsicle` to use it. (Hint: your operating system probably has a package for it. Don't compile things.)
32 |
33 | * A local filesystem in which files stay put at least during the current request, to hold temporary files for Sharp's conversions. This is no problem with Heroku and most other cloud servers. It's just long-term storage that needs to be in S3 or Azure for some of them.
34 |
35 | > Note that Heroku includes Imagemagick. You can also install it with `apt-get install imagemagick` on Ubuntu servers. Homebrew can install `imagemagick` on Macs.
36 |
37 | ## API Overview
38 |
39 | * The `init` method passes options to the backend and invokes a callback when the backend is ready.
40 |
41 | * The optional `destroy(callback)` method releases any resources such as file descriptors and timeouts held by `uploadfs`.
42 |
43 | * The `copyIn` method takes a local filename and copies it to a path in uploadfs. (Note that Express conveniently sets us up for this by dropping file uploads in a temporary local file for the duration of the request.)
44 |
45 | * The `copyImageIn` method works like `copyIn`. In addition, it also copies in scaled versions of the image, corresponding to the sizes you specify when calling `init()`. Information about the image is returned in the second argument to the callback.
46 |
47 | * If you wish to crop the image, pass an options object as the third parameter to `copyImageIn`. Set the `crop` property to an object with `top`, `left`, `width` and `height` properties, all specified in pixels. These coordinates are relative to the original image. **When you specify the `crop` property, both the "full size" image copied into uploadfs and any scaled images are cropped.** The uncropped original is NOT copied into uploadfs. If you want the uncropped original, be sure to copy it in separately. The `width` and `height` properties of the `info` object passed to your callback will be the cropped dimensions.
48 |
49 | * The default JPEG quality setting for scaled-down versions of your image is `80`. This avoids unacceptably large file sizes for web deployment. You can adjust this via the `scaledJpegQuality` option, either when initializing uploadfs or when calling `copyImageIn`.
50 |
51 | * If you do not wish to always use the same set of image sizes, you may pass a `sizes` property as part of the options object when calling `copyImageIn`.
52 |
53 | * The `copyOut` method takes a path in uploadfs and a local filename and copies the file back from uploadfs to the local filesystem. This should be used only rarely. Heavy reliance on this method sets you up for poor performance in S3 and Azure. However it may be necessary at times, for instance when you want to crop an image differently later. *Heavy reliance on copyOut is a recipe for bad S3 and/or Azure performance. Use it only for occasional operations like cropping.*
54 |
55 | * The `streamOut` method takes a path in uploadfs and returns a readable stream. This should be used only rarely. Heavy reliance on this method sets you up for poor performance in S3 and Azure. However it may be necessary at times, for instance when permissions must be checked on a request-by-request basis in a proxy route. **This method, which is not required for normal use in ApostropheCMS, is currently implemented only in the `local` and `s3` storage backends.** Contributions for Azure and GCS are welcome.
56 |
57 | * The `remove` method removes a file from uploadfs.
58 |
59 | * The `getUrl` method returns the URL to which you should append uploadfs paths to fetch them with a web browser.
60 |
61 | * The `disable` method shuts off web access to a file. Depending on the storage backend it may also block the `copyOut` method, so you should be sure to call `enable` before attempting any further access to the file.
62 |
63 | * The `enable` method restores web access to a file.
64 |
65 | * The `getImageSize` method returns the currently configured image sizes.
66 |
67 | * The `identifyLocalImage` method provides direct access to the `uploadfs` functionality for determining the extension, width, height and orientation of images. Normally `copyIn` does everything you need in one step, but this method is occasionally useful for migration purposes.
68 |
69 | The `destroy` method releases any resources such as file descriptors or timeouts that may be held by the backends, and then invokes its callback. Its use is optional, but command line Node apps might never exit without it.
70 |
71 | ## Working Example
72 |
73 | For a complete, very simple and short working example in which a user uploads a profile photo, see `sample.js`.
74 |
75 | Here's the interesting bit. Note that we do not supply an extension for the final image file, because we want to have Sharp figure that out for us.
76 |
77 | ```javascript
78 | app.post('/', multipartMiddleware, function(req, res) {
79 | uploadfs.copyImageIn(req.files.photo.path, '/profiles/me', function(e, info) {
80 | if (e) {
81 | res.send('An error occurred: ' + e);
82 | } else {
83 | res.send('
All is well. Here is the image in three sizes plus the original.
' +
84 | '
' +
85 | '
' +
86 | '
' +
87 | '
');
88 | }
89 | });
90 | });
91 | ```
92 |
93 | Note the use of `uploadfs.getUrl()` to determine the URL of the uploaded image. **Use this method consistently and your code will find the file in the right place regardless of the backend chosen.**
94 |
95 | ## Retrieving Information About Images
96 |
97 | When you successfully copy an image into uploadfs with copyImageIn, the second argument to your callback has the following useful properties:
98 |
99 | `width` (already rotated for the web if necessary, as with iPhone photos)
100 |
101 | `height` (already rotated for the web if necessary, as with iPhone photos)
102 |
103 | `originalWidth` (not rotated)
104 |
105 | `originalHeight` (not rotated)
106 |
107 | `extension` (`gif`,`jpg`, `webp` or `png`)
108 |
109 | You should record these properties in your own database if you need access to them later.
110 |
111 | **When cropping, the uncropped size of the original image is not returned by uploadfs. It is assumed that if you are cropping you already know what the original dimensions were.**
112 |
113 | The same information is available via `identifyLocalImage` if you want to examine a local file before handing it off to `copyImageIn`.
114 |
115 | ## Removing Files
116 |
117 | Here's how to remove a file:
118 |
119 | ```javascript
120 | uploadfs.remove('/profiles/me.jpg', function(e) { ... });
121 | ```
122 |
123 | ## Disabling Access To Files
124 |
125 | This call shuts off **web access** to a file:
126 |
127 | ```javascript
128 | uploadfs.disable('/profiles/me.jpg', function(e) { ... });
129 | ```
130 |
131 | And this call restores it:
132 |
133 | ```javascript
134 | uploadfs.enable('/profiles/me.jpg', function(e) { ... });
135 | ```
136 |
137 | *Depending on the backend, `disable` may also block the copyOut method*, so be sure to call `enable` before attempting any further access to the file.
138 |
139 | *With the local storage backend, `disable` uses permissions `000` by default.* This is a big hassle if you want to be able to easily use rsync to move the files outside of `uploadfs`. **As an alternative, you can set the `disabledFileKey` option to a random string.** If you do this, uploadfs will *rename* disabled files based on an HMAC digest of the filename and the `disabledFileKey`. This is secure from the webserver's point of view, **as long as your webserver is not configured to display automatic directory listings of files**. But from your local file system's point of view, the file is still completely accessible. And that makes it a lot easier to use `rsync`.
140 |
141 | *With the `azure` storage backend, you MUST set `disabledFileKey`.* This is because Azure provides no way to alter the permissions of a single blob (file). Our only option is to copy the blob to a new, cryptographically unguessable name and remove the old one while it is "disabled," then reverse the operation when it is enabled again.
142 |
143 | For your convenience in the event you should lose your database, the filenames generated still begin with the original filename. The presence of a cryptographically un-guessable part is enough to make them secure.
144 |
145 | Those using `local` storage can change their minds about using `disabledFileKey`. use `uploadfs.migrateToDisabledFileKey(callback)` to migrate your existing disabled files to this approach, and `uploadfs.migrateFromDisabledFileKey(callback)` to migrate back. Before calling the former, add the option to your configuration. Before calling the latter, remove it.
146 |
147 | ## Configuration Options
148 |
149 | Here are the options we pass to `init()` in `sample.js`. Note that we define the image sizes we want the `copyImageIn` function to produce. No image will be wider or taller than the limits specified. The aspect ratio is always maintained, so one axis will often be smaller than the limits specified. Here's a hint: specify the width you really want, and the maximum height you can put up with. That way only obnoxiously tall images will get a smaller width, as a safeguard.
150 | ```javascript
151 | {
152 | storage: 'local',
153 | // Optional. If not specified, Sharp will be used with automatic
154 | // fallback to Imagemagick.
155 | image: 'sharp',
156 | // Options are 'sharp' and 'imagemagick', or a custom image
157 | // processing backend
158 | uploadsPath: __dirname + '/public/uploads',
159 | uploadsUrl: 'http://localhost:3000' + uploadsLocalUrl,
160 | // Required if you use copyImageIn
161 | // Temporary files are made here and later automatically removed
162 | tempPath: __dirname + '/temp',
163 | imageSizes: [
164 | {
165 | name: 'small',
166 | width: 320,
167 | height: 320
168 | },
169 | {
170 | name: 'medium',
171 | width: 640,
172 | height: 640
173 | },
174 | {
175 | name: 'large',
176 | width: 1140,
177 | height: 1140
178 | }
179 | ],
180 | // Render up to 4 image sizes at once. Note this means 4 at once per call
181 | // to copyImageIn. There is currently no built-in throttling of multiple calls to
182 | // copyImageIn
183 | parallel: 4,
184 | // Optional. See "disabling access to files," above
185 | // disabledFileKey: 'this should be a unique, random string'
186 | }
187 | ```
188 |
189 | Here is an equivalent configuration for S3:
190 |
191 | ```javascript
192 | {
193 | storage: 's3',
194 | // Add an arbitrary S3 compatible endpoint
195 | endpoint: 's3-compatible-endpoint.com',
196 | // Get your credentials at aws.amazon.com
197 | secret: 'xxx',
198 | key: 'xxx',
199 | // You need to create your bucket first before using it here
200 | // Go to aws.amazon.com
201 | bucket: 'getyourownbucketplease',
202 | // For read-after-write consistency in the US East region.
203 | // You could also use any other region name except us-standard
204 | region: 'external-1',
205 | // Required if you use copyImageIn, or use Azure at all
206 | tempPath: __dirname + '/temp',
207 | imageSizes: [
208 | {
209 | name: 'small',
210 | width: 320,
211 | height: 320
212 | },
213 | {
214 | name: 'medium',
215 | width: 640,
216 | height: 640
217 | },
218 | {
219 | name: 'large',
220 | width: 1140,
221 | height: 1140
222 | }
223 | ],
224 | // Render up to 4 image sizes at once. Note this means 4 at once per call
225 | // to copyImageIn. There is currently no built-in throttling of multiple calls to
226 | // copyImageIn
227 | parallel: 4
228 | }
229 | ```
230 |
231 | And, an equivalent configuration for Azure:
232 |
233 | ```javascript
234 | {
235 | storage: 'azure',
236 | account: 'storageAccountName',
237 | container: 'storageContainerName',
238 | key: 'accessKey',
239 | disabledFileKey: 'a random string of your choosing',
240 | // Always required for Azure
241 | tempPath: __dirname + '/temp',
242 | // by default we gzip encode EVERYTHING except for a short list of excpetions, found in defaultGzipBlacklist.js
243 | // if for some reason you want to enable gzip encoding for one of these types, you can
244 | // you can also add types to ignore when gzipping
245 | gzipEncoding: {
246 | 'jpg': true,
247 | 'rando': false
248 | },
249 | imageSizes: [
250 | {
251 | name: 'small',
252 | width: 320,
253 | height: 320
254 | },
255 | {
256 | name: 'medium',
257 | width: 640,
258 | height: 640
259 | },
260 | {
261 | name: 'large',
262 | width: 1140,
263 | height: 1140
264 | }
265 | ],
266 | // Render up to 4 image sizes at once. Note this means 4 at once per call
267 | // to copyImageIn. There is currently no built-in throttling of multiple calls to
268 | // copyImageIn
269 | parallel: 4
270 | }
271 | ```
272 |
273 | With Azure you may optionally replicate the content across a cluster:
274 |
275 | ```javascript
276 | {
277 | storage: 'azure',
278 | replicateClusters: [
279 | {
280 | account: 'storageAccountName1',
281 | container: 'storageContainerName1',
282 | key: 'accessKey1',
283 | },
284 | {
285 | account: 'storageAccountName2',
286 | container: 'storageContainerName2',
287 | key: 'accessKey2',
288 | },
289 | ],
290 | ...
291 | }
292 | ```
293 |
294 | And, an equivalent configuration for Google Cloud Storage:
295 |
296 | ```javascript
297 | {
298 | storage: 'gcs',
299 | // Go to the Google Cloud Console, select your project and select the Storage item on the left side of the screen to find / create your bucket. Put your bucket name here.
300 | bucket: 'getyourownbucketplease',
301 | // Select your region
302 | region: 'us-west-2',
303 | // Required if you use copyImageIn, or use Azure at all
304 | tempPath: __dirname + '/temp',
305 | imageSizes: [
306 | {
307 | name: 'small',
308 | width: 320,
309 | height: 320
310 | },
311 | {
312 | name: 'medium',
313 | width: 640,
314 | height: 640
315 | },
316 | {
317 | name: 'large',
318 | width: 1140,
319 | height: 1140
320 | }
321 | ],
322 | // Render up to 4 image sizes at once. Note this means 4 at once per call
323 | // to copyImageIn. There is currently no built-in throttling of multiple calls to
324 | // copyImageIn
325 | parallel: 4
326 | }
327 | ```
328 | Note that GCS assumes the presence of a service account file and an environment variable of `GOOGLE_APPLICATION_CREDENTIALS` set pointing to this file. For example:
329 | ```sh
330 | export GOOGLE_APPLICATION_CREDENTIALS=./projectname-f7f5e919aa79.json
331 | ```
332 |
333 | In the above example, the file named `projectname-f7f5e919aa79.json` is sitting in the root of the module
334 |
335 | For more information, see [Creating and Managing Service Accounts](https://cloud.google.com/iam/docs/creating-managing-service-account-keys) at cloud.google.com.
336 |
337 | > When using Google Cloud Storage, you **must enable object ACLs for the
338 | bucket**. Otherwise you will get this error: "cannot use ACL API to set object policy when object policies are disabled." You have 90 days to do this after first creating a bucket, otherwise you will need to use a new bucket for uploadfs.
339 |
340 | ## Less Frequently Used Options
341 |
342 | * If you are using the `local` backend (files on your server's drive), you might not like that when `disable` is called, the permissions of a file are set to `000` (no one has access). We suggest using the `disableFileKey` option to completely avoid this issue. However, if you wish, you can pass the `disablePermissions` option. As usual with Unix permissions, this is an OCTAL NUMBER, not a decimal one. Octal constants have been deprecated, so in modern JavaScript it is best to write it like this:
343 | *
344 | ```javascript
345 | // Only the owner can read. This is handy if
346 | // your proxy server serves static files for you and
347 | // shares a group but does not run as the same user
348 | disablePermissions: parseInt("0400", 8)
349 | ```
350 |
351 | You can also change the permissions set when `enable` is invoked via `enablePermissions`. Keep in mind that `enable()` is not invoked for a brand new file (it receives the default permissions). You might choose to write:
352 |
353 | ```javascript
354 | // Only the owner and group can read.
355 | enablePermissions: parseInt("0440", 8)
356 | ```
357 |
358 | * In backends like sharp or imagemagick that support it, even the "original" is rotated for you if it is not oriented "top left," as with some iPhone photos. This is necessary for the original to be of any use on the web. But it does modify the original. So if you really don't want this, you can set the `orientOriginals` option to `false`.
359 |
360 | * It is possible to pass your own custom storage module instead of `local` or `s3`. Follow `local.js` or `s3.js` as a model, and specify your backend like this:
361 | *
362 | ```javascript
363 | storage: require('mystorage.js')
364 | ```
365 |
366 | * You may specify an alternate image processing backend via the `image` option. Two backends, `sharp` and `imagemagick`, are built in. You may also supply an object instead of a string to use your own image processor. Just follow the existing `sharp.js` file as a model.
367 |
368 | * In backends like Google Cloud Storage and S3, uploadfs finesses the path so that paths with a leading slash like `/foo/bar.txt` behave reasonably and a double slash never appears in the URL. For Apostrophe this is a requirement. However, if you have your heart set on the double slashes, you can set the `strictPaths` option to `true`.
369 |
370 | ## Extra features for S3: caching, HTTPS, CDNs, permissions, and No Gzip Content Types
371 |
372 | By default, when users fetch files from S3 via the web, the browser is instructed to cache them for 24 hours. This is reasonable, but you can change that cache lifetime by specifying the `cachingTime` option, in seconds:
373 |
374 | ```javascript
375 | // 60*60*24*7 = 1 Week
376 | // Images are delivered with cache-control-header
377 | cachingTime: 604800
378 | ```
379 |
380 | S3 file delivery can be set to use the HTTPS protocol with the `https` option. This is essentially necessary if used on a site that uses the secure protocol.
381 |
382 | ```javascript
383 | https: true
384 | ```
385 |
386 | Also, if you are using a CDN such as cloudfront that automatically mirrors the contents of your S3 bucket, you can specify that CDN so that the `getUrl` method of `uploadfs` returns the CDN's URL rather than a direct URL to Amazon S3 or Azure:
387 |
388 | ```javascript
389 | cdn: {
390 | enabled: true,
391 | url: 'http://myAwesomeCDN'
392 | }
393 | ```
394 |
395 | Note that specifying a CDN in this way does not in any way activate that CDN for you. It just tells `uploadfs` to return a different result from `getUrl`. The rest is up to you. More CDN-related options may be added in the future.
396 |
397 | If you want to make your S3 bucket private and serve content through the Amazon CloudFront service, you need to set the objects' access control levels (ACL) in the bucket to `private`. By default, the `bucketObjectsACL` option sets the object ACL to `public-read`. You need to change this option to `private` to block public access. Additionally, follow the [documentation](https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/private-content-restricting-access-to-s3.html) to ensure your bucket is set up with an Origin Access Control correctly, otherwise CloudFront will not be able to access it.
398 |
399 | There is also a list which contains content types that should not be gzipped for faster delivery from s3. Note that gzip content delivery is completely transparent to the end user and supported by all browsers, so the only types that should be excluded are those that are already compressed (i.e. a waste of CPU to unzip) unless there is an issue with the gzip feature in a particular s3-compatible backend.
400 |
401 | You can override this default list by setting the `noGzipContentTypes` option:
402 |
403 | ```javascript
404 | // Don't gzip jpeg and zip files, but gzip everything else (override default list)
405 | noGzipContentTypes: ['image/jpeg', 'application/zip']
406 | ```
407 |
408 | Alternatively you can just extend the standard list of types not to be gzipped by setting `addNoGzipContentTypes`:
409 |
410 | ```javascript
411 | // Additionally don't gzip pdf files (append to default list)
412 | addNoGzipContentTypes: ['application/pdf']
413 | ```
414 |
415 | ## Important Concerns With S3
416 |
417 | Since 2015, files uploaded to S3 are immediately available in all AWS regions ("read after write consistency"). However, also be aware that no matter what region you choose, updates of an existing file or deletions of a file still won't always be instantly seen everywhere, even if you don't use the `us-standard` region. To avoid this problem, it is best to change filenames when uploading updated versions.
418 |
419 | In `sample.js` we configure Express to actually serve the uploaded files when using the local backend. When using the s3 backend, you don't need to do this, because your files are served from S3. S3 URLs look like this:
420 |
421 | ```html
422 | https://yourbucketname.s3.amazonaws.com/your/path/to/something.jpg
423 | ```
424 |
425 | But your code doesn't need to worry about that. If you use `uploadfs.getUrl()` consistently, code written with one backend will migrate easily to the other.
426 |
427 | It's up to you to create an Amazon S3 bucket and obtain your secret and key. See sample.js for details.
428 |
429 | S3 support is based on the official AWS SDK.
430 |
431 | ## Applying a prefix to paths regardless of storage layer
432 |
433 | If you are running several Apostrophe sites that must share an S3 bucket, you'll notice
434 | that their uploads are jumbled together in a single `/attachments` "folder." With
435 | the local storage method, you can address this by specifying an `uploadsPath` that
436 | includes a different prefix for each site, but for S3 or Azure there was previously no good
437 | solution.
438 |
439 | Starting with version 1.11.0, you can specify a `prefix` option no matter what the
440 | storage backend is. When you do, `uploadfs` will automatically prepend it to
441 | all uploadfs paths that you pass to it. In addition, the `getUrl` method will
442 | include it as well. So you can use this technique to separate files from several
443 | sites even if they share a bucket in S3 or Azure.
444 |
445 | **An important exception:** if you have configured the `cdn` option, `uploadfs` assumes that your cdn's `url` subproperty points to the right place for this individual site. This is necessary because CDNs may have prefix features of their own which remap the URL.
446 |
447 | ## Postprocessing images: extra compression, watermarking, etc.
448 |
449 | It is possible to configure `uploadfs` to run a postprocessor such as `imagemin` on every custom-sized image that it generates. This is intended for file size optimization tools like `imagemin`.
450 |
451 | Here is an example based on the `imagemin` documentation:
452 |
453 | ```javascript
454 | const imagemin = require('imagemin');
455 | const imageminJpegtran = require('imagemin-jpegtran');
456 | const imageminPngquant = require('imagemin-pngquant');
457 |
458 | uploadfs.init({
459 | storage: 'local',
460 | image: 'sharp',
461 | tempPath: __dirname + '/temp',
462 | imageSizes: [
463 | {
464 | name: 'small',
465 | width: 320,
466 | height: 320
467 | },
468 | {
469 | name: 'medium',
470 | width: 640,
471 | height: 640
472 | }
473 | ],
474 | postprocessors: [
475 | {
476 | postprocessor: imagemin,
477 | extensions: [ 'gif', 'jpg', 'png' ],
478 | options: {
479 | plugins: [
480 | imageminJpegtran(),
481 | imageminPngquant({quality: '65-80'})
482 | ]
483 | }
484 | }
485 | ]
486 | });
487 | ```
488 |
489 | A file will not be passed to a postprocessor unless it is configured for the file's true extension as determined by the image backend (`gif`, `jpg`, `png` etc., never `GIF` or `JPEG`).
490 |
491 | The above code will invoke `imagemin` like this:
492 |
493 | ```javascript
494 | imagemin([ '/temp/folder/file1-small.jpg', '/temp/folder/file2-medium.jpg', ... ], '/temp/folder', {
495 | plugins: [
496 | imageminJpegtran(),
497 | imageminPngquant({quality: '65-80'})
498 | ]
499 | }).then(function() {
500 | // All finished
501 | }).catch(function() {
502 | // An error occurred
503 | });
504 | ```
505 |
506 | You may write and use other postprocessors, as long as they expect to be called the same way.
507 |
508 | > Note that the second argument is always the folder that contains all of the files in the first argument's array. `uploadfs` expects your postprocessor to be able to update the files "in place." All of the files in the first argument will have the same extension.
509 |
510 | If your postprocessor expects four arguments, uploadfs will pass a callback, rather than expecting a promise to be returned.
511 |
512 | ## Participating in development
513 |
514 | ### Running the unit tests
515 |
516 | If you wish to run the unit tests of this module, you will need to copy the various `-sample.js` files to `.js` and edit them to match your own credentials and buckets for the various services. In addition, you will need to download your credentials `.json` file for Google Cloud Services and place it in `gcs-credentials-uploadfstest.json`. *None of these steps are needed unless you are running our module's unit tests, which only makes sense if you are contributing to further development.*
517 |
518 | ## About P'unk Avenue and Apostrophe
519 |
520 | `uploadfs` was created at [P'unk Avenue](https://punkave.com) for use in many projects built with Apostrophe, an open-source content management system built on node.js. Appy isn't mandatory for Apostrophe and vice versa, but they play very well together. If you like `uploadfs` you should definitely [check out apostrophecms.com](https://apostrophecms.com). Also be sure to visit us on [github](http://github.com/apostrophecms).
521 |
522 | ## Support
523 |
524 | Feel free to open issues on [github](http://github.com/apostrophecms/uploadfs).
525 |
526 |
527 |
--------------------------------------------------------------------------------
/azureReplicateTestOptions-sample.js:
--------------------------------------------------------------------------------
1 | module.exports = {
2 | storage: 'azure',
3 | disabledFileKey: 'Any string is ok, probably longer is better',
4 | replicateClusters: [
5 | {
6 | account: 'yourAccount',
7 | container: 'container1',
8 | key: 'top_secret_XYZ123'
9 | },
10 | {
11 | account: 'yourAccount',
12 | container: 'container2',
13 | key: 'top_secret_XYZ123'
14 | },
15 | {
16 | account: 'yourAccount2',
17 | container: 'account2_container1',
18 | key: 'more_top_secret_999'
19 | }
20 | ]
21 | };
22 |
--------------------------------------------------------------------------------
/azureTestOptions-sample.js:
--------------------------------------------------------------------------------
1 | module.exports = {
2 | storage: 'azure',
3 | disabledFileKey: 'Any string is ok, probably longer is better',
4 | account: 'foo',
5 | container: 'bar',
6 | key: 'b@z'
7 | };
8 |
--------------------------------------------------------------------------------
/badges/npm-audit-badge.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/apostrophecms/uploadfs/d3a6d707d4ca4fd4012dccb01962e4af441fca8e/badges/npm-audit-badge.png
--------------------------------------------------------------------------------
/badges/npm-audit-badge.svg:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/defaultGzipBlacklist.js:
--------------------------------------------------------------------------------
1 | // By default, the following file types will not be gzipped.
2 | // Each is either (a) precompressed, or (b) poorly handled by a
3 | // browser with significant market share if compressed
4 | module.exports = [ 'jpg', 'png', 'zip', 'gzip', 'png', 'xls', 'docx', 'gif', 'mp4', 'webm' ];
5 |
--------------------------------------------------------------------------------
/gcsTestOptions-sample.js:
--------------------------------------------------------------------------------
1 | module.exports = {
2 | // See https://cloud.google.com/docs/authentication/getting-started
3 | // basically you want a service account file on the filesystem with
4 | // the ENV variable GOOGLE_APPLICATION_CREDENTIALS pointing to it
5 | // If you are getting `Error: Invalid Grant`, this is likely your problem
6 | backend: 'gcs',
7 | bucket: 'yourownbucketnamefromgcs',
8 | region: 'us-west-2',
9 | validation: false // Can be one of false, "md5" or "crc32", YMMV
10 | };
11 |
--------------------------------------------------------------------------------
/lib/copyFile.js:
--------------------------------------------------------------------------------
1 | // Copy a file reliably, with error handling.
2 | // path1 is the original file, path2 is the new file.
3 | // "options" is used in internal recursive calls and
4 | // may be omitted.
5 | //
6 | // Creates any necessary parent folders of path2 automatically.
7 |
8 | const fs = require('fs');
9 | const path = require('path');
10 |
11 | const copy = module.exports = function(path1, path2, options, callback) {
12 | let failed = false;
13 | let retryingWithMkdirp = false;
14 | if (!callback) {
15 | callback = options;
16 | options = {};
17 | }
18 | // Other people's implementations of fs.copy() lack
19 | // error handling, let's be thorough and also implement
20 | // a retry that does mkdirp() for consistency with S3
21 | const sin = fs.createReadStream(path1);
22 | const sout = fs.createWriteStream(path2);
23 |
24 | sin.on('error', function(e) {
25 | if (failed) {
26 | return;
27 | }
28 | failed = true;
29 | errorCleanup();
30 | return callback(e);
31 | });
32 |
33 | sout.on('error', function(e) {
34 | if (failed) {
35 | return;
36 | }
37 | // If the destination folder doesn't exist yet,
38 | // retry the whole thing after recursively creating
39 | // the folder and its parents as needed, avoiding the
40 | // overhead of checking for folders in the majority
41 | // of cases where they already exist.
42 | //
43 | // Try this up to 100 times to guard against race conditions
44 | // with the empty directory cleanup mechanism: as long as
45 | // there are fewer than 100 node processes running this backend
46 | // at once, it should not be possible for a sudden burst
47 | // of rmdir()s to defeat the mkdir() mechanism.
48 | //
49 | // Note that there will only be one node process unless you're using
50 | // cluster, multiple Heroku dynos, or something similar.
51 | //
52 | // If you have more than 100 CPU cores bashing on this folder,
53 | // I respectfully suggest it may be time for the
54 | // S3 backend anyway.
55 |
56 | if ((e.code === 'ENOENT') && ((!options.afterMkdirp) || (options.afterMkdirp <= 100))) {
57 | retryingWithMkdirp = true;
58 | return mkdirp(path.dirname(path2), function (e) {
59 | if (e) {
60 | if (failed) {
61 | return;
62 | }
63 | return callback(e);
64 | }
65 | options.afterMkdirp = options.afterMkdirp ? (options.afterMkdirp + 1) : 1;
66 | return copy(path1, path2, options, callback);
67 | });
68 | }
69 | errorCleanup();
70 | failed = true;
71 | return callback(e);
72 | });
73 |
74 | sout.on('close', function() {
75 | if (retryingWithMkdirp) {
76 | // This is the original stream closing after error (in node 16+
77 | // we always get a close event even on an error), don't consider
78 | // this success, but don't worry either as we're going to try
79 | // again after mkdirp
80 | return;
81 | }
82 | if (failed) {
83 | // We already reported an error
84 | return;
85 | }
86 | // Report success
87 | return callback(null);
88 | });
89 |
90 | // Carry out the actual copying
91 | sin.pipe(sout);
92 |
93 | function errorCleanup() {
94 | // This will fail if we weren't able to write to
95 | // path2 in the first place; don't get excited
96 | fs.unlink(path2, function(e) { });
97 | }
98 | };
99 |
100 | // Legacy-compatible, tested implementation of mkdirp without
101 | // any npm audit vulnerabilities
102 |
103 | function mkdirp(dir, callback) {
104 | dir = path.resolve(dir);
105 | return fs.mkdir(dir, function(err) {
106 | if (!err) {
107 | return callback(null);
108 | }
109 | if (err.code === 'EEXIST') {
110 | return callback(null);
111 | }
112 | if (err.code === 'ENOENT') {
113 | const newDir = path.dirname(dir);
114 | if (newDir === dir) {
115 | return callback(err);
116 | }
117 | return mkdirp(newDir, function(err) {
118 | if (err) {
119 | return callback(err);
120 | }
121 | return mkdirp(dir, callback);
122 | });
123 | }
124 | return callback(err);
125 | });
126 | }
127 |
--------------------------------------------------------------------------------
/lib/image/imagemagick.js:
--------------------------------------------------------------------------------
1 | /* jshint node:true */
2 | // Use Aaron Heckmann's graphicsmagick interface in its imagemagick-compatible
3 | // configuration so our system requirements don't change and everything still
4 | // works in Heroku. It's a good thing we can do this, since node-imagemagick
5 | // has been abandoned. We also use our own custom command lines for
6 | // drastically better performance and memory usage.
7 | const childProcess = require('child_process');
8 | const _ = require('lodash');
9 | const async = require('async');
10 | const util = require('util');
11 | const execFile = util.promisify(childProcess.execFile);
12 |
13 | module.exports = function() {
14 | let options;
15 | const self = {
16 | /**
17 | * Initialize the module. If _options.gifsicle is true, use gifsicle to manipulate
18 | * animated GIFs
19 | */
20 | init: function(_options, callback) {
21 | options = _options;
22 | return callback(null);
23 | },
24 |
25 | destroy: function(callback) {
26 | // No file descriptors or timeouts held
27 | return callback(null);
28 | },
29 |
30 | /**
31 | * Identify a local image file.
32 | *
33 | * If the file is not an image or is too defective to be identified an error is
34 | * passed to the callback.
35 | *
36 | * Otherwise the second argument to the callback is guaranteed to have extension,
37 | * width, height, orientation, originalWidth and originalHeight properties.
38 | * extension will be gif, jpg or png and is detected from the file's true contents,
39 | * not the original file extension. With the imagemagick backend, width and height
40 | * are automatically rotated to TopLeft orientation while originalWidth and
41 | * originalHeight are not.
42 | *
43 | * If the orientation property is not explicitly set in the file it will be set to
44 | * 'Undefined'.
45 | *
46 | * Any other properties returned are dependent on the version of ImageMagick used
47 | * and are not guaranteed.
48 | *
49 | * @param {String} path Local filesystem path to image file
50 | * @param {Function} callback Receives the usual err argument, followed by an
51 | * object with extension, width, height, orientation, originalWidth,
52 | * originalHeight and animated properties. Any other properties depend on the backend
53 | * in use and are not guaranteed
54 | *
55 | * @see Uploadfs#copyImageIn
56 | */
57 |
58 | async identify(path, callback) {
59 | try {
60 | const info = await getProperties(path);
61 | if (info.extension === 'gif') {
62 | info.animated = await getAnimated(path);
63 | } else {
64 | info.animated = false;
65 | }
66 | return callback(null, info);
67 | } catch (e) {
68 | return callback(e);
69 | }
70 |
71 | async function getProperties() {
72 | // Parse identify output ourselves to avoid using unmaintained third party wrappers. -Tom
73 | const { stdout } = await execFile('identify', [ '-verbose', path ], { encoding: 'utf8' });
74 | const parsed = Object.fromEntries(stdout.split('\n').filter(line => line.trim().includes(': ')).map(line => {
75 | const cat = line.indexOf(':');
76 | return [ line.substring(0, cat).trim(), line.substring(cat + 1).trim() ];
77 | }));
78 | const format = parsed.Format.toLowerCase().split(' ')[0];
79 | const geometry = parsed.Geometry.match(/^(\d+)x(\d+)/);
80 | const info = {
81 | originalWidth: parseInt(geometry[1]),
82 | originalHeight: parseInt(geometry[2]),
83 | orientation: parsed.Orientation
84 | };
85 | if (format === 'jpeg') {
86 | info.extension = 'jpg';
87 | } else {
88 | info.extension = format;
89 | }
90 | const o = info.orientation;
91 | if ((o === 'LeftTop') || (o === 'RightTop') || (o === 'RightBottom') || (o === 'LeftBottom')) {
92 | info.width = info.originalHeight;
93 | info.height = info.originalWidth;
94 | } else {
95 | info.width = info.originalWidth;
96 | info.height = info.originalHeight;
97 | }
98 | return info;
99 | }
100 |
101 | async function getAnimated(path) {
102 | const { stdout } = await execFile('identify', [ '-format', '%n', path ], { encoding: 'utf8' });
103 | const frames = parseInt(stdout, 10);
104 | return frames > 1;
105 | }
106 | },
107 |
108 | /**
109 | * Generate one or more scaled versions of an image file.
110 | *
111 | * INPUT
112 | *
113 | * The options that may be passed in the context object are:
114 | *
115 | * workingPath: path to the original file (required)
116 | *
117 | * extension: true file extension of original file as
118 | * determined by a previous call to identify (required).
119 | *
120 | * info.width, info.height: should be provided as other backends may require
121 | * them, however the imagemagick backend does not need to consult them.
122 | *
123 | * sizes (required): array of objects with width and height
124 | * properties which are treated as maximums for each axis; the resulting image
125 | * will never exceed the original size, and will otherwise be scaled to
126 | * fill as much of the requested size as possible without changing the aspect
127 | * ratio. Files are generated in the temp folder with a filename made up of the
128 | * name property of the size, a '.', and the extension property of the
129 | * context object.
130 | *
131 | * tempFolder: folder where the scaled versions should be written
132 | * (required)
133 | *
134 | * crop: optional object with top, left, width and height properties
135 | *
136 | * scaledJpegQuality: quality setting for JPEGs (optional; otherwise
137 | * you get whatever default was compiled into imagemagick)
138 | *
139 | * copyOriginal: if true, copy the "original" image to the tempFolder too,
140 | * but do auto-orient it so that iPhone photos etc. work on the web
141 | *
142 | * All images, including the "original" if copyOriginal is set, are
143 | * auto-rotated to the orientation expected by web browsers.
144 | *
145 | * OUTPUT
146 | *
147 | * After the operation is complete, the following property of the
148 | * context object will be set if the copyOriginal option was set:
149 | *
150 | * adjustedOriginal: will contain the local filesystem path where the
151 | * original was copied (and rotated, if needed).
152 | *
153 | * @param {[type]} context [description]
154 | * @param {Function} callback [description]
155 | * @return {[type]} [description]
156 | */
157 |
158 | convert: function(context, callback) {
159 | if (context.info.animated) {
160 | if (options.gifsicle) {
161 | return convertAnimatedGifsicle(context, callback);
162 | } else {
163 | return convertAnimated(context, callback);
164 | }
165 | } else {
166 | return convertStandard(context, callback);
167 | }
168 |
169 | // Animated GIF strategy based on gifsicle. gifsicle doesn't hit RAM limits
170 | // when confronted with huge animated GIFs, but it does tend to make files
171 | // bigger and doesn't resize quite as well. Tradeoffs are part of life
172 |
173 | function convertAnimatedGifsicle(context, callback) {
174 | const crop = context.crop;
175 | const imageSizes = context.sizes;
176 | const baseArgs = [];
177 | if (crop) {
178 | baseArgs.push('--crop');
179 | baseArgs.push(crop.left + ',' + crop.top + '+' + crop.width + 'x' + crop.height);
180 | }
181 | baseArgs.push(context.workingPath);
182 | return async.series([ convertOriginal, convertSizes ], callback);
183 | function convertOriginal(callback) {
184 | if (!context.copyOriginal) {
185 | return setImmediate(callback);
186 | }
187 | const path = context.tempFolder + '/original.' + context.extension;
188 | context.adjustedOriginal = path;
189 | const args = baseArgs.slice();
190 | args.push('--optimize');
191 | args.push('-o');
192 | args.push(path);
193 | return spawnThen('gifsicle', args, callback);
194 | }
195 | function convertSizes(callback) {
196 | return async.eachSeries(imageSizes, convertSize, callback);
197 | }
198 | function convertSize(size, callback) {
199 | const args = baseArgs.slice();
200 | args.push('--resize');
201 | // "Largest that fits in the box" is not a built-in feature of gifsicle, so we do the math
202 | const originalWidth = (crop && crop.width) || context.info.width;
203 | const originalHeight = (crop && crop.height) || context.info.height;
204 | let width = originalWidth;
205 | let height = Math.round(size.width * originalHeight / originalWidth);
206 | if (height > originalHeight) {
207 | height = size.height;
208 | width = Math.round(size.height * originalWidth / originalHeight);
209 | }
210 | args.push(width + 'x' + height);
211 | args.push('--optimize');
212 | args.push('-o');
213 | const suffix = size.name + '.' + context.extension;
214 | const tempFile = context.tempFolder + '/' + suffix;
215 | args.push(tempFile);
216 | return spawnThen('gifsicle', args, callback);
217 | }
218 | }
219 |
220 | // Separate animated GIF strategy is back because of tests in which (1) we
221 | // suffered image damage (which could possibly be addressed with -coalesce)
222 | // and (2) imagemagick inexplicably took 4x longer in some cases with the
223 | // single pipeline (which couldn't be addressed without a new approach).
224 | // This is why we don't just rely on -clone 0--1 and a single pipeline. -Tom
225 |
226 | function convertAnimated(context, callback) {
227 | const crop = context.crop;
228 | const imageSizes = context.sizes;
229 | const baseArgs = [];
230 | baseArgs.push(context.workingPath);
231 | // Convert to filmstrip so cropping and resizing
232 | // don't behave strangely
233 | baseArgs.push('-coalesce');
234 | baseArgs.push('-auto-orient');
235 | if (crop) {
236 | baseArgs.push('-crop');
237 | baseArgs.push(crop.width + 'x' + crop.height + '+' + crop.left + '+' + crop.top);
238 | baseArgs.push('+repage');
239 | }
240 | return async.series([ convertOriginal, convertSizes ], callback);
241 | function convertOriginal(callback) {
242 | if (!context.copyOriginal) {
243 | return setImmediate(callback);
244 | }
245 | const path = context.tempFolder + '/original.' + context.extension;
246 | context.adjustedOriginal = path;
247 | const args = baseArgs.slice();
248 | args.push('-layers');
249 | args.push('Optimize');
250 | args.push(path);
251 | return spawnThen('convert', args, callback);
252 | }
253 | function convertSizes(callback) {
254 | return async.eachSeries(imageSizes, convertSize, callback);
255 | }
256 | function convertSize(size, callback) {
257 | const args = baseArgs.slice();
258 | args.push('-resize');
259 | args.push(size.width + 'x' + size.height + '>');
260 | args.push('-layers');
261 | args.push('Optimize');
262 | const suffix = size.name + '.' + context.extension;
263 | const tempFile = context.tempFolder + '/' + suffix;
264 | args.push(tempFile);
265 | return spawnThen('convert', args, callback);
266 | }
267 | }
268 |
269 | function convertStandard(context, callback) {
270 | // For performance we build our own imagemagick command which tackles all the
271 | // sizes in one run, avoiding redundant loads. We also scale to the largest
272 | // size we really want first and use that as a basis for all others, without
273 | // any lossy intermediate files, which is an even bigger win.
274 | //
275 | const args = [];
276 | const crop = context.crop;
277 | const imageSizes = context.sizes;
278 | args.push(context.workingPath);
279 | args.push('-auto-orient');
280 | if (crop) {
281 | args.push('-crop');
282 | args.push(crop.width + 'x' + crop.height + '+' + crop.left + '+' + crop.top);
283 | args.push('+repage');
284 | }
285 | if (context.extension === 'jpg') {
286 | // Always convert to a colorspace all browsers understand.
287 | // CMYK will flat out fail in IE8 for instance
288 | args.push('-colorspace');
289 | args.push('sRGB');
290 | }
291 |
292 | if (context.copyOriginal) {
293 | context.adjustedOriginal = context.tempFolder + '/original.' + context.extension;
294 | args.push('(');
295 | args.push('-clone');
296 | args.push('0--1');
297 | args.push('-write');
298 | args.push(context.adjustedOriginal);
299 | args.push('+delete');
300 | args.push(')');
301 | }
302 |
303 | // Make sure we strip metadata before we get to scaled versions as
304 | // some files have ridiculously huge metadata
305 | args.push('-strip');
306 |
307 | // After testing this with several sets of developer eyeballs, we've
308 | // decided it is kosher to resample to the largest size we're
309 | // interested in keeping, then sample down from there. Since we
310 | // do it all inside imagemagick without creating any intermediate
311 | // lossy files, there is no quality loss, and the speed benefit is
312 | // yet another 2x win! Hooray!
313 | let maxWidth = 0;
314 | let maxHeight = 0;
315 | _.each(imageSizes, function(size) {
316 | if (size.width > maxWidth) {
317 | maxWidth = size.width;
318 | }
319 | if (size.height > maxHeight) {
320 | maxHeight = size.height;
321 | }
322 | });
323 | if (maxWidth && maxHeight) {
324 | args.push('-resize');
325 | args.push(maxWidth + 'x' + maxHeight + '>');
326 | }
327 |
328 | const resizedPaths = [];
329 |
330 | _.each(imageSizes, function(size) {
331 | args.push('(');
332 | args.push('-clone');
333 | args.push('0--1');
334 | args.push('-resize');
335 | args.push(size.width + 'x' + size.height + '>');
336 | if (context.scaledJpegQuality && (context.extension === 'jpg')) {
337 | args.push('-quality');
338 | args.push(context.scaledJpegQuality);
339 | }
340 | args.push('-write');
341 | const suffix = size.name + '.' + context.extension;
342 | const tempFile = context.tempFolder + '/' + suffix;
343 | resizedPaths.push(tempFile);
344 | args.push(tempFile);
345 | args.push('+delete');
346 | args.push(')');
347 | });
348 |
349 | // We don't care about the official output, which would be the
350 | // intermediate scaled version of the image. Use imagemagick's
351 | // official null format
352 |
353 | args.push('null:');
354 |
355 | return spawnThen('convert', args, callback);
356 | }
357 |
358 | function spawnThen(cmd, args, callback) {
359 | // console.log(cmd + ' ' + args.join(' ').replace(/[^\w\-\ ]/g, function(c) {
360 | // return '\\' + c;
361 | // }));
362 | return childProcess.execFile(cmd, args, function(err) {
363 | if (err) {
364 | return callback(err);
365 | }
366 | return callback(null);
367 | });
368 | }
369 | }
370 | };
371 | return self;
372 | };
373 |
--------------------------------------------------------------------------------
/lib/image/sharp.js:
--------------------------------------------------------------------------------
1 | const Sharp = require('sharp');
2 |
3 | module.exports = function () {
4 | return {
5 | /**
6 | * Initialize the module.
7 | */
8 | init: function (options, callback) {
9 | return callback(null);
10 | },
11 |
12 | destroy: function (callback) {
13 | return callback(null);
14 | },
15 |
16 | /**
17 | * Identify a local image file.
18 | *
19 | * If the file is not an image or is too defective to be identified an error is
20 | * passed to the callback.
21 | *
22 | * @param {String} path Local filesystem path to image file
23 | * @param {Function} callback Receives the usual err argument, followed by an
24 | * object with extension, width, height, orientation, originalWidth and
25 | * originalHeight properties.
26 | *
27 | * @see Uploadfs#copyImageIn
28 | */
29 |
30 | identify: function (path, callback) {
31 | // Identify the file type, size, etc. Stuff them into context.info and
32 | // context.extension. Also sets context.info.animated to true if
33 | // an animated GIF is found.
34 |
35 | const info = {};
36 | return Sharp(path).metadata(function (err, metadata) {
37 | if (err) {
38 | return callback(err);
39 | }
40 |
41 | info.originalWidth = metadata.width;
42 | info.originalHeight = metadata.height;
43 | // if exif header data isn't present, default to current orientation being correct
44 | info.orientation = metadata.orientation || 1;
45 | info.width = info.orientation < 5 ? metadata.width : metadata.height;
46 | info.height = info.orientation < 5 ? metadata.height : metadata.width;
47 | info.extension = metadata.format === 'jpeg' ? 'jpg' : metadata.format;
48 | info.animation = metadata.pages > 1;
49 | return callback(null, info);
50 | });
51 | },
52 |
53 | /**
54 | * Generate one or more scaled versions of an image file.
55 | *
56 | * INPUT
57 | *
58 | * The options that may be passed in the context object are:
59 | *
60 | * workingPath: path to the original file (required)
61 | *
62 | * extension: true file extension of original file as
63 | * determined by a previous call to identify (required).
64 | *
65 | * info.width, info.height: the width and height of the rotated image
66 | *
67 | * sizes (required): array of objects with width and height
68 | * properties which are treated as maximums for each axis; the resulting image
69 | * will never exceed the original size, and will otherwise be scaled to
70 | * fill as much of the requested size as possible without changing the aspect
71 | * ratio. Files are generated in the temp folder with a filename made up of the
72 | * name property of the size, a '.', and the extension property of the
73 | * context object.
74 | *
75 | * tempFolder: folder where the scaled versions should be written
76 | * (required)
77 | *
78 | * crop: optional object with top, left, width and height properties
79 | *
80 | * scaledJpegQuality: quality setting for JPEGs (optional; otherwise
81 | * you get whatever default was compiled into sharp)
82 | *
83 | * copyOriginal: if true, copy the "original" image to the tempFolder too,
84 | * but do auto-orient it so that iPhone photos etc. work on the web
85 | *
86 | * All images, including the "original" if copyOriginal is set, are
87 | * auto-rotated to the orientation expected by web browsers.
88 | *
89 | * OUTPUT
90 | *
91 | * After the operation is complete, the following property of the
92 | * context object will be set if the copyOriginal option was set:
93 | *
94 | * adjustedOriginal: will contain the local filesystem path where the
95 | * original was copied (and rotated, if needed).
96 | *
97 | * @param {[type]} context [description]
98 | * @param {Function} callback [description]
99 | * @return {[type]} [description]
100 | */
101 |
102 | convert: function (context, callback) {
103 | // This is for a non-animated image
104 | const _info = context.info;
105 | const isAnimated = _info.animation;
106 | const noCrop = {
107 | left: 0,
108 | top: 0,
109 | width: _info.width,
110 | height: _info.height
111 | };
112 | const crop = context.crop ? context.crop : noCrop;
113 |
114 | const pipeline = Sharp(context.workingPath, { animated: isAnimated })
115 | .rotate()
116 | .extract({
117 | left: crop.left,
118 | top: crop.top,
119 | width: crop.width,
120 | height: crop.height
121 | });
122 |
123 | const promises = [];
124 |
125 | if (context.copyOriginal) {
126 | const copyPath = `${context.tempFolder}/original.${context.extension}`;
127 | context.adjustedOriginal = copyPath;
128 | promises.push(pipeline.clone().withMetadata().toFile(copyPath));
129 | }
130 |
131 | promises.push(sizeOperation());
132 |
133 | Promise.all(promises)
134 | .then((res) => {
135 | return callback(null);
136 | })
137 | .catch((err) => {
138 | console.error(err);
139 | return callback(err);
140 | });
141 |
142 | async function sizeOperation() {
143 | await Promise.all(
144 | context.sizes.map(async (size) => {
145 | const sizePath = `${context.tempFolder}/${size.name}.${context.extension}`;
146 | const width = Math.min(size.width, context.info.width);
147 | const height = Math.min(size.height, context.info.height);
148 | const sizePipeline = pipeline.clone();
149 | sizePipeline.resize({
150 | width,
151 | height,
152 | fit: 'inside'
153 | });
154 | if (context.extension === 'jpg') {
155 | const quality = context.scaledJpegQuality
156 | ? context.scaledJpegQuality
157 | : 80;
158 | sizePipeline.jpeg({ quality });
159 | }
160 | await sizePipeline.toFile(sizePath);
161 | })
162 | );
163 | }
164 | }
165 | };
166 | };
167 |
--------------------------------------------------------------------------------
/lib/storage/azure.js:
--------------------------------------------------------------------------------
1 | const { BlobServiceClient, StorageSharedKeyCredential } = require('@azure/storage-blob');
2 | const contentTypes = require('./contentTypes');
3 | const extname = require('path').extname;
4 | const fs = require('fs');
5 | const zlib = require('zlib');
6 | const async = require('async');
7 | const utils = require('../utils.js');
8 | const defaultGzipBlacklist = require('../../defaultGzipBlacklist');
9 | const verbose = false;
10 | const _ = require('lodash');
11 |
12 | const DEFAULT_MAX_AGE_IN_SECONDS = 500;
13 | const DEFAULT_MAX_CACHE = 2628000;
14 |
15 | /**
16 | * @typedef {{ svc: BlobServiceClient, container: string }} BlobSvc
17 | *
18 | * @param {BlobSvc} blob
19 | * @param {string} src
20 | * @param {string} dst
21 | * @param {Function} callback
22 | */
23 | function copyBlob(blob, src, dst, callback) {
24 | const srcClient = blob.svc.getContainerClient(blob.container).getBlobClient(src);
25 | const dstClient = blob.svc.getContainerClient(blob.container).getBlobClient(dst);
26 | dstClient.beginCopyFromURL(srcClient.url)
27 | .then((response) => {
28 | if (response.errorCode) {
29 | return callback(response.errorCode);
30 | }
31 | return callback(null, response);
32 | })
33 | .catch(callback);
34 | }
35 |
36 | function __log() {
37 | if (verbose) {
38 | console.error(arguments);
39 | }
40 | }
41 |
42 | /**
43 | * Set the main properties of the selected container.
44 | * @param {BlobSvc['svc']} blobSvc Azure service object
45 | * @param {Object} options Options passed to UploadFS library
46 | * @param {Object} result Service Properties
47 | * @param {Function} callback Callback to be called when operation is terminated
48 | * @return {any} Return the service which has been initialized
49 | */
50 | function setContainerProperties(blobSvc, options, result, callback) {
51 | // Backward compatibility
52 | function propToString(prop) {
53 | if (Array.isArray(prop)) {
54 | return prop.join(',');
55 | }
56 | return prop;
57 | }
58 | blobSvc.getProperties()
59 | .then((response) => {
60 | if (response.errorCode) {
61 | return callback(response.errorCode);
62 | }
63 | const serviceProperties = response;
64 | const allowedOrigins = propToString(options.allowedOrigins) || '*';
65 | const allowedMethods = propToString(options.allowedMethods) || 'GET,PUT,POST';
66 | const allowedHeaders = propToString(options.allowedHeaders) || '*';
67 | const exposedHeaders = propToString(options.exposedHeaders) || '*';
68 | const maxAgeInSeconds = options.maxAgeInSeconds || DEFAULT_MAX_AGE_IN_SECONDS;
69 |
70 | serviceProperties.cors = [
71 | {
72 | allowedOrigins,
73 | allowedMethods,
74 | allowedHeaders,
75 | exposedHeaders,
76 | maxAgeInSeconds
77 | }
78 | ];
79 |
80 | blobSvc.setProperties(serviceProperties)
81 | .then((response) => {
82 | if (response.errorCode) {
83 | return callback(response.errorCode);
84 | }
85 | return callback(null, blobSvc);
86 | })
87 | .catch(callback);
88 | })
89 | .catch(callback);
90 | }
91 |
92 | /**
93 | * Initialize the container ACLs
94 | * @param {BlobSvc['svc']} blobSvc Azure Service object
95 | * @param {String} container Container name
96 | * @param {Object} options Options passed to UploadFS library
97 | * @param {Function} callback Callback to be called when operation is terminated
98 | * @return {any} Returns the result of `setContainerProperties`
99 | */
100 | function initializeContainer(blobSvc, container, options, callback) {
101 | blobSvc.getContainerClient(container)
102 | .setAccessPolicy('blob')
103 | .then((response) => {
104 | if (response.errorCode) {
105 | return callback(response.errorCode);
106 | }
107 | return setContainerProperties(blobSvc, options, response, callback);
108 | })
109 | .catch(callback);
110 | }
111 |
112 | /**
113 | * Create an Azure Container
114 | * @param {Object} cluster Azure Cluster Info
115 | * @param {Object} options Options passed to UploadFS library
116 | * @param {Function} callback Callback to be called when operation is terminated
117 | * @return {any} Returns the initialized service
118 | */
119 | function createContainer(cluster, options, callback) {
120 | const sharedKeyCredential = new StorageSharedKeyCredential(cluster.account, cluster.key);
121 | const blobSvc = new BlobServiceClient(
122 | `https://${cluster.account}.blob.core.windows.net`,
123 | sharedKeyCredential
124 | );
125 | const container = cluster.container || options.container;
126 | blobSvc.uploadfsInfo = {
127 | account: cluster.account,
128 | container: options.container || cluster.container
129 | };
130 | blobSvc.getContainerClient(container)
131 | .createIfNotExists()
132 | .then((response) => {
133 | if (response.errorCode && response.errorCode !== 'ContainerAlreadyExists') {
134 | return callback(response.errorCode);
135 | }
136 | return initializeContainer(blobSvc, container, options, callback);
137 | })
138 | .catch(callback);
139 | }
140 |
141 | /**
142 | * Deletes a local file from its path
143 | * @param {String} path File path
144 | * @param {Function} callback Callback to be called when operation is terminated
145 | * @return Always null
146 | */
147 | function removeLocalBlob(path, callback) {
148 | fs.unlink(path, function(error) {
149 | return callback(error);
150 | });
151 | }
152 |
153 | /**
154 | * Send a binary file to a specified container and a specified service
155 | * @param {BlobSvc} blob Azure Service info and container
156 | * @param {String} path Remote path
157 | * @param {String} localPath Local file path
158 | * @param {Function} callback Callback to be called when operation is terminated
159 | * @return {any} Result of the callback
160 | */
161 | function createContainerBlob(blob, path, localPath, _gzip, callback) {
162 | // Draw the extension from uploadfs, where we know they will be using
163 | // reasonable extensions, not from what could be a temporary file
164 | // that came from the gzip code. -Tom
165 | const extension = extname(path).substring(1);
166 | const contentSettings = {
167 | cacheControl: `max-age=${DEFAULT_MAX_CACHE}, public`,
168 | // contentEncoding: _gzip ? 'gzip' : 'deflate',
169 | contentType: contentTypes[extension] || 'application/octet-stream'
170 | };
171 | if (_gzip) {
172 | contentSettings.contentEncoding = 'gzip';
173 | }
174 | blob.svc.getContainerClient(blob.container)
175 | .getBlobClient(path)
176 | .getBlockBlobClient()
177 | .uploadFile(localPath, {
178 | blobHTTPHeaders: {
179 | blobCacheControl: contentSettings.cacheControl,
180 | blobContentType: contentSettings.contentType,
181 | blobContentEncoding: contentSettings.contentEncoding
182 | }
183 | })
184 | .then((response) => {
185 | if (response.errorCode) {
186 | return callback(response.errorCode);
187 | }
188 | return callback(null);
189 | })
190 | .catch(callback);
191 | }
192 |
193 | /**
194 | * Remove remote container binary file
195 | * @param {BlobSvc} blob Azure Service info and container
196 | * @param {String} path Remote file path
197 | * @param {Function} callback Callback to be called when operation is terminated
198 | * @return {any} Result of the callback
199 | */
200 | function removeContainerBlob(blob, path, callback) {
201 | blob.svc.getContainerClient(blob.container)
202 | .getBlobClient(path)
203 | .deleteIfExists()
204 | .then((response) => {
205 | if (response.errorCode && response.errorCode !== 'BlobNotFound') {
206 | __log('Cannot delete ' + path + 'on container ' + blob.container + ': ' + response.errorCode);
207 | return callback(response.errorCode);
208 | }
209 | return callback(null);
210 | })
211 | .catch(callback);
212 | }
213 |
214 | // If err is truthy, annotate it with the account and container name
215 | // for the cluster or blobSvc passed, so that error messages can be
216 | // used to effectively debug the right cluster in a replication scenario.
217 | // 'all' can also be passed to indicate all replicas were tried.
218 |
219 | function clusterError(cluster, err) {
220 | // Accept a blobSvc (which acts for a cluster) or a cluster config object,
221 | // for convenience
222 | cluster = (cluster.svc && cluster.svc.uploadfsInfo) || cluster;
223 | if (!err) {
224 | // Pass through if there is no error, makes this easier to use succinctly
225 | return err;
226 | }
227 | // Allow clusters to be distinguished in error messages. Also report
228 | // the case where everything was tried (copyOut)
229 | if (cluster === 'all') {
230 | err.account = 'ALL';
231 | err.container = 'ALL';
232 | } else {
233 | err.account = cluster.account;
234 | err.container = cluster.container;
235 | }
236 | return err;
237 | }
238 |
239 | module.exports = function() {
240 |
241 | const self = {
242 | blobSvcs: [],
243 | init: function(options, callback) {
244 | if (!options.disabledFileKey) {
245 | return callback(new Error('You must set the disabledFileKey option to a random string when using the azure storage backend.'));
246 | }
247 | this.options = options;
248 | self.gzipBlacklist = self.getGzipBlacklist(options.gzipEncoding || {});
249 |
250 | if (!options.replicateClusters || (!Array.isArray(options.replicateClusters)) || (!options.replicateClusters[0])) {
251 | options.replicateClusters = [];
252 | options.replicateClusters.push({
253 | account: options.account,
254 | key: options.key,
255 | container: options.container
256 | });
257 | }
258 | async.each(options.replicateClusters, function(cluster, callback) {
259 | createContainer(cluster, options, function(err, svc) {
260 | if (err) {
261 | return callback(clusterError(cluster, err));
262 | }
263 |
264 | self.blobSvcs.push({
265 | svc,
266 | container: cluster.container || options.container
267 | });
268 |
269 | return callback();
270 | });
271 | }, callback);
272 | },
273 |
274 | // Implementation detail. Used when stream-based copies fail.
275 | //
276 | // Cleans up the streams and temporary files (which can be null),
277 | // then delivers err to the callback unless something goes wrong in the cleanup itself
278 | // in which case that error is delivered.
279 |
280 | cleanupStreams: function (inputStream, outputStream, tempPath, tempPath2, err, callback) {
281 | async.parallel({
282 | unlink: function(callback) {
283 | if (!tempPath) {
284 | return callback(null);
285 | }
286 | removeLocalBlob(tempPath, callback);
287 | },
288 |
289 | unlink2: function(callback) {
290 | if (!tempPath2) {
291 | return callback(null);
292 | }
293 | removeLocalBlob(tempPath2, callback);
294 | },
295 |
296 | closeReadStream: function(callback) {
297 | inputStream.destroy();
298 | callback();
299 | },
300 |
301 | closeWriteStream: function(callback) {
302 | outputStream.destroy();
303 | callback();
304 | }
305 | }, cleanupError => {
306 | if (err) {
307 | return callback(err);
308 | }
309 | return callback(cleanupError);
310 | });
311 | },
312 |
313 | copyIn: function(localPath, _path, options, callback) {
314 | if (!self.blobSvcs.length) {
315 | return callback(new Error('At least one valid container must be included in the replicateCluster configuration.'));
316 | }
317 | const fileExt = localPath.split('.').pop();
318 | const path = _path[0] === '/' ? _path.slice(1) : _path;
319 | const tmpFileName = Math.random().toString(36).substring(7);
320 | let tempPath = this.options.tempPath + '/' + tmpFileName;
321 | // options optional
322 | if (!callback) {
323 | callback = options;
324 | }
325 |
326 | if (self.shouldGzip(fileExt)) {
327 | return self.doGzip(localPath, path, tempPath, callback);
328 | } else {
329 | tempPath = localPath; // we don't have a temp path for non-gzipped files
330 | return self.createContainerBlobs(localPath, path, tempPath, false, callback);
331 | }
332 | },
333 |
334 | createContainerBlobs: function(localPath, path, tempPath, _gzip, callback) {
335 | async.each(self.blobSvcs, function(blobSvc, callback) {
336 | createContainerBlob(blobSvc, path, tempPath, _gzip, function(createBlobErr) {
337 | return callback(clusterError(blobSvc, createBlobErr));
338 | });
339 | }, function(err) {
340 | return callback(err);
341 | });
342 | },
343 |
344 | doGzip: function(localPath, path, tempPath, callback) {
345 | const inp = fs.createReadStream(localPath);
346 | const out = fs.createWriteStream(tempPath);
347 | let hasError = false;
348 |
349 | inp.on('error', function(inpErr) {
350 | __log('Error in read stream', inpErr);
351 | if (!hasError) {
352 | hasError = true;
353 | return self.cleanupStreams(inp, out, tempPath, null, inpErr, callback);
354 | }
355 | });
356 |
357 | out.on('error', function(outErr) {
358 | if (!hasError) {
359 | hasError = true;
360 | return self.cleanupStreams(inp, out, tempPath, null, outErr, callback);
361 | }
362 | });
363 |
364 | out.on('finish', function() {
365 | self.createContainerBlobs(localPath, path, tempPath, true, callback);
366 | });
367 | const gzip = zlib.createGzip();
368 | inp.pipe(gzip).pipe(out);
369 | },
370 |
371 | shouldGzip: function(ext) {
372 | return !self.gzipBlacklist.includes(ext);
373 | },
374 |
375 | // Tries all replicas before giving up
376 | copyOut: function(path, localPath, options, callback) {
377 | if (!self.blobSvcs.length) {
378 | return callback(new Error('At least one valid container must be included in the replicateCluster configuration.'));
379 | }
380 | let index = 0;
381 | return attempt();
382 |
383 | function attempt(lastErr) {
384 | if (index >= self.blobSvcs.length) {
385 | return callback(clusterError('all', lastErr));
386 | }
387 | /** @type {BlobSvc} */
388 | const blob = self.blobSvcs[index++];
389 | path = path[0] === '/' ? path.slice(1) : path;
390 | // Temporary name until we know if it is gzipped.
391 | const initialPath = localPath + '.initial';
392 |
393 | return blob.svc.getContainerClient(blob.container)
394 | .getBlobClient(path)
395 | .downloadToFile(initialPath)
396 | .then((response) => {
397 | if (response.errorCode) {
398 | return attempt(response.errorCode);
399 | }
400 | // BC
401 | const returnVal = {
402 | result: response,
403 | response
404 | };
405 | if (response.contentEncoding === 'gzip') {
406 | // Now we know we need to unzip it.
407 | return gunzipBlob();
408 | } else {
409 | // Simple rename, because it was not gzipped after all.
410 | fs.renameSync(initialPath, localPath);
411 | return callback(null, response);
412 | }
413 |
414 | function gunzipBlob() {
415 | const out = fs.createWriteStream(localPath);
416 | const inp = fs.createReadStream(initialPath);
417 | const gunzip = zlib.createGunzip();
418 | let errorSeen = false;
419 | inp.pipe(gunzip);
420 | gunzip.pipe(out);
421 | inp.on('error', function(e) {
422 | fail(e);
423 | });
424 | gunzip.on('error', function(e) {
425 | fail(e);
426 | });
427 | out.on('error', function(e) {
428 | fail(e);
429 | });
430 | out.on('finish', function() {
431 | fs.unlinkSync(initialPath);
432 | return callback(null, returnVal);
433 | });
434 | function fail(e) {
435 | if (errorSeen) {
436 | return;
437 | }
438 | errorSeen = true;
439 | return self.cleanupStreams(inp, out, initialPath, localPath, e, callback);
440 | }
441 | }
442 | })
443 | .catch(attempt);
444 | }
445 | },
446 |
447 | remove: function(path, callback) {
448 | if (!self.blobSvcs.length) {
449 | return callback(new Error('At least one valid container must be included in the replicateCluster configuration.'));
450 | }
451 | path = path[0] === '/' ? path.slice(1) : path;
452 |
453 | async.each(self.blobSvcs, function(blobSvc, callback) {
454 | removeContainerBlob(blobSvc, path, callback);
455 | }, callback);
456 | },
457 |
458 | disable: function(path, callback) {
459 | if (!self.blobSvcs.length) {
460 | return callback(new Error('At least one valid container must be included in the replicateCluster configuration.'));
461 | }
462 | const dPath = utils.getDisabledPath(path, self.options.disabledFileKey);
463 | async.each(self.blobSvcs, function(blob, callback) {
464 | copyBlob(blob, path, dPath, function(e) {
465 | // if copy fails, abort
466 | if (e) {
467 | return callback(clusterError(blob, e));
468 | } else { // otherwise, remove original file (azure does not currently support rename operations, so we dance)
469 | self.remove(path, callback);
470 | }
471 | });
472 | }, function(err) {
473 | callback(err);
474 | });
475 | },
476 |
477 | enable: function(path, callback) {
478 | if (!self.blobSvcs.length) {
479 | return callback(new Error('At least one valid container must be included in the replicateCluster configuration.'));
480 | }
481 | const dPath = utils.getDisabledPath(path, self.options.disabledFileKey);
482 | async.each(self.blobSvcs, function(blob, callback) {
483 | copyBlob(blob, dPath, path, function(e) {
484 | if (e) {
485 | return callback(clusterError(blob, e));
486 | } else {
487 | self.remove(dPath, callback);
488 | }
489 | });
490 | }, function(err) {
491 | callback(err);
492 | });
493 | },
494 |
495 | getUrl: function (path) {
496 | /** @type {BlobSvc} */
497 | const blob = self.blobSvcs[0];
498 | const baseUrl = blob.svc.getContainerClient(blob.container)
499 | .getBlobClient('')
500 | .url
501 | .replace(/\/$/, '');
502 | return utils.addPathToUrl(self.options, baseUrl, path);
503 | },
504 |
505 | destroy: function(callback) {
506 | // No file descriptors or timeouts held
507 | return callback(null);
508 | },
509 |
510 | /**
511 | * Use sane defaults and user config to get array of file extensions to avoid gzipping
512 | * @param gzipEncoding {Object} ex: {jpg: true, rando: false}
513 | * @retyrb {Array} An array of file extensions to ignore
514 | */
515 | getGzipBlacklist: function(gzipEncoding) {
516 | const gzipSettings = gzipEncoding || {};
517 | const { whitelist, blacklist } = Object.keys(gzipSettings).reduce((prev, key) => {
518 | if (gzipSettings[key]) {
519 | prev.whitelist.push(key);
520 | } else {
521 | prev.blacklist.push(key);
522 | }
523 | return prev;
524 | }, {
525 | whitelist: [],
526 | blacklist: []
527 | });
528 |
529 | // @NOTE - we REMOVE whitelisted types from the blacklist array
530 | const gzipBlacklist = defaultGzipBlacklist.concat(blacklist).filter(el => whitelist.indexOf(el));
531 |
532 | return _.uniq(gzipBlacklist);
533 | }
534 | };
535 |
536 | return self;
537 | };
538 |
--------------------------------------------------------------------------------
/lib/storage/contentTypes.js:
--------------------------------------------------------------------------------
1 | // An export of known content types from a recent Apache setup.
2 | // This is used by default by s3.js and could be useful in other backends
3 |
4 | module.exports =
5 | {
6 | ez: 'application/andrew-inset',
7 | anx: 'application/annodex',
8 | lin: 'application/bbolin',
9 | cap: 'application/cap',
10 | pcap: 'application/cap',
11 | cu: 'application/cu-seeme',
12 | tsp: 'application/dsptype',
13 | es: 'application/ecmascript',
14 | spl: 'application/x-futuresplash',
15 | hta: 'application/hta',
16 | jar: 'application/java-archive',
17 | ser: 'application/java-serialized-object',
18 | class: 'application/java-vm',
19 | js: 'application/javascript',
20 | m3g: 'application/m3g',
21 | hqx: 'application/mac-binhex40',
22 | cpt: 'image/x-corelphotopaint',
23 | nb: 'application/mathematica',
24 | nbp: 'application/mathematica',
25 | mdb: 'application/msaccess',
26 | doc: 'application/msword',
27 | dot: 'application/msword',
28 | mxf: 'application/mxf',
29 | bin: 'application/octet-stream',
30 | oda: 'application/oda',
31 | ogx: 'application/ogg',
32 | pdf: 'application/pdf',
33 | key: 'application/pgp-keys',
34 | pgp: 'application/pgp-signature',
35 | prf: 'application/pics-rules',
36 | ps: 'application/postscript',
37 | ai: 'application/postscript',
38 | eps: 'application/postscript',
39 | epsi: 'application/postscript',
40 | epsf: 'application/postscript',
41 | eps2: 'application/postscript',
42 | eps3: 'application/postscript',
43 | rar: 'application/rar',
44 | rtf: 'application/rtf',
45 | smi: 'chemical/x-daylight-smiles',
46 | smil: 'application/smil',
47 | xml: 'application/xml',
48 | xsl: 'application/xml',
49 | xsd: 'application/xml',
50 | zip: 'application/zip',
51 | wk: 'application/x-123',
52 | '7z': 'application/x-7z-compressed',
53 | abw: 'application/x-abiword',
54 | dmg: 'application/x-apple-diskimage',
55 | bcpio: 'application/x-bcpio',
56 | torrent: 'application/x-bittorrent',
57 | cab: 'application/x-cab',
58 | cbr: 'application/x-cbr',
59 | cbz: 'application/x-cbz',
60 | cdf: 'application/x-cdf',
61 | cda: 'application/x-cdf',
62 | vcd: 'application/x-cdlink',
63 | pgn: 'application/x-chess-pgn',
64 | cpio: 'application/x-cpio',
65 | csh: 'text/x-csh',
66 | deb: 'application/x-debian-package',
67 | udeb: 'application/x-debian-package',
68 | dcr: 'application/x-director',
69 | dir: 'application/x-director',
70 | dxr: 'application/x-director',
71 | dms: 'application/x-dms',
72 | wad: 'application/x-doom',
73 | dvi: 'application/x-dvi',
74 | rhtml: 'application/x-httpd-eruby',
75 | mm: 'application/x-freemind',
76 | gnumeric: 'application/x-gnumeric',
77 | sgf: 'application/x-go-sgf',
78 | gcf: 'application/x-graphing-calculator',
79 | gtar: 'application/x-gtar',
80 | tgz: 'application/x-gtar',
81 | taz: 'application/x-gtar',
82 | hdf: 'application/x-hdf',
83 | phtml: 'application/x-httpd-php',
84 | pht: 'application/x-httpd-php',
85 | php: 'application/x-httpd-php',
86 | phps: 'application/x-httpd-php-source',
87 | php3: 'application/x-httpd-php3',
88 | php3p: 'application/x-httpd-php3-preprocessed',
89 | php4: 'application/x-httpd-php4',
90 | php5: 'application/x-httpd-php5',
91 | ica: 'application/x-ica',
92 | info: 'application/x-info',
93 | ins: 'application/x-internet-signup',
94 | isp: 'application/x-internet-signup',
95 | iii: 'application/x-iphone',
96 | iso: 'application/x-iso9660-image',
97 | jam: 'application/x-jam',
98 | jnlp: 'application/x-java-jnlp-file',
99 | jmz: 'application/x-jmol',
100 | chrt: 'application/x-kchart',
101 | kil: 'application/x-killustrator',
102 | skp: 'application/x-koan',
103 | skd: 'application/x-koan',
104 | skt: 'application/x-koan',
105 | skm: 'application/x-koan',
106 | kpr: 'application/x-kpresenter',
107 | kpt: 'application/x-kpresenter',
108 | ksp: 'application/x-kspread',
109 | kwd: 'application/x-kword',
110 | kwt: 'application/x-kword',
111 | latex: 'application/x-latex',
112 | lha: 'application/x-lha',
113 | lyx: 'application/x-lyx',
114 | lzh: 'application/x-lzh',
115 | lzx: 'application/x-lzx',
116 | frm: 'application/x-maker',
117 | maker: 'application/x-maker',
118 | frame: 'application/x-maker',
119 | fm: 'application/x-maker',
120 | fb: 'application/x-maker',
121 | book: 'application/x-maker',
122 | fbdoc: 'application/x-maker',
123 | mif: 'chemical/x-mif',
124 | wmd: 'application/x-ms-wmd',
125 | wmz: 'application/x-ms-wmz',
126 | com: 'application/x-msdos-program',
127 | exe: 'application/x-msdos-program',
128 | bat: 'application/x-msdos-program',
129 | dll: 'application/x-msdos-program',
130 | msi: 'application/x-msi',
131 | nc: 'application/x-netcdf',
132 | pac: 'application/x-ns-proxy-autoconfig',
133 | dat: 'application/x-ns-proxy-autoconfig',
134 | nwc: 'application/x-nwc',
135 | o: 'application/x-object',
136 | oza: 'application/x-oz-application',
137 | p7r: 'application/x-pkcs7-certreqresp',
138 | crl: 'application/x-pkcs7-crl',
139 | pyc: 'application/x-python-code',
140 | pyo: 'application/x-python-code',
141 | qgs: 'application/x-qgis',
142 | shp: 'application/x-qgis',
143 | shx: 'application/x-qgis',
144 | qtl: 'application/x-quicktimeplayer',
145 | rpm: 'application/x-redhat-package-manager',
146 | rb: 'application/x-ruby',
147 | sh: 'text/x-sh',
148 | shar: 'application/x-shar',
149 | swf: 'application/x-shockwave-flash',
150 | swfl: 'application/x-shockwave-flash',
151 | scr: 'application/x-silverlight',
152 | sit: 'application/x-stuffit',
153 | sitx: 'application/x-stuffit',
154 | sv4cpio: 'application/x-sv4cpio',
155 | sv4crc: 'application/x-sv4crc',
156 | tar: 'application/x-tar',
157 | tcl: 'text/x-tcl',
158 | gf: 'application/x-tex-gf',
159 | pk: 'application/x-tex-pk',
160 | texinfo: 'application/x-texinfo',
161 | texi: 'application/x-texinfo',
162 | t: 'application/x-troff',
163 | tr: 'application/x-troff',
164 | roff: 'application/x-troff',
165 | man: 'application/x-troff-man',
166 | me: 'application/x-troff-me',
167 | ms: 'application/x-troff-ms',
168 | ustar: 'application/x-ustar',
169 | src: 'application/x-wais-source',
170 | wz: 'application/x-wingz',
171 | crt: 'application/x-x509-ca-cert',
172 | xcf: 'application/x-xcf',
173 | fig: 'application/x-xfig',
174 | xpi: 'application/x-xpinstall',
175 | amr: 'audio/amr',
176 | awb: 'audio/amr-wb',
177 | axa: 'audio/annodex',
178 | au: 'audio/basic',
179 | snd: 'audio/basic',
180 | flac: 'audio/flac',
181 | mid: 'audio/midi',
182 | midi: 'audio/midi',
183 | kar: 'audio/midi',
184 | mpga: 'audio/mpeg',
185 | mpega: 'audio/mpeg',
186 | mp2: 'audio/mpeg',
187 | mp3: 'audio/mpeg',
188 | m4a: 'audio/mpeg',
189 | m3u: 'audio/x-mpegurl',
190 | oga: 'audio/ogg',
191 | ogg: 'audio/ogg',
192 | spx: 'audio/ogg',
193 | aif: 'audio/x-aiff',
194 | aiff: 'audio/x-aiff',
195 | aifc: 'audio/x-aiff',
196 | gsm: 'audio/x-gsm',
197 | wma: 'audio/x-ms-wma',
198 | wax: 'audio/x-ms-wax',
199 | ra: 'audio/x-realaudio',
200 | rm: 'audio/x-pn-realaudio',
201 | ram: 'audio/x-pn-realaudio',
202 | pls: 'audio/x-scpls',
203 | sd2: 'audio/x-sd2',
204 | wav: 'audio/x-wav',
205 | alc: 'chemical/x-alchemy',
206 | cac: 'chemical/x-cache',
207 | cache: 'chemical/x-cache',
208 | csf: 'chemical/x-cache-csf',
209 | cbin: 'chemical/x-cactvs-binary',
210 | cascii: 'chemical/x-cactvs-binary',
211 | ctab: 'chemical/x-cactvs-binary',
212 | cdx: 'chemical/x-cdx',
213 | cer: 'chemical/x-cerius',
214 | c3d: 'chemical/x-chem3d',
215 | chm: 'chemical/x-chemdraw',
216 | cif: 'chemical/x-cif',
217 | cmdf: 'chemical/x-cmdf',
218 | cml: 'chemical/x-cml',
219 | cpa: 'chemical/x-compass',
220 | bsd: 'chemical/x-crossfire',
221 | csml: 'chemical/x-csml',
222 | csm: 'chemical/x-csml',
223 | ctx: 'chemical/x-ctx',
224 | cxf: 'chemical/x-cxf',
225 | cef: 'chemical/x-cxf',
226 | emb: 'chemical/x-embl-dl-nucleotide',
227 | embl: 'chemical/x-embl-dl-nucleotide',
228 | spc: 'chemical/x-galactic-spc',
229 | inp: 'chemical/x-gamess-input',
230 | gam: 'chemical/x-gamess-input',
231 | gamin: 'chemical/x-gamess-input',
232 | fch: 'chemical/x-gaussian-checkpoint',
233 | fchk: 'chemical/x-gaussian-checkpoint',
234 | cub: 'chemical/x-gaussian-cube',
235 | gau: 'chemical/x-gaussian-input',
236 | gjc: 'chemical/x-gaussian-input',
237 | gjf: 'chemical/x-gaussian-input',
238 | gal: 'chemical/x-gaussian-log',
239 | gcg: 'chemical/x-gcg8-sequence',
240 | gen: 'chemical/x-genbank',
241 | hin: 'chemical/x-hin',
242 | istr: 'chemical/x-isostar',
243 | ist: 'chemical/x-isostar',
244 | jdx: 'chemical/x-jcamp-dx',
245 | dx: 'chemical/x-jcamp-dx',
246 | kin: 'chemical/x-kinemage',
247 | mcm: 'chemical/x-macmolecule',
248 | mmd: 'chemical/x-macromodel-input',
249 | mmod: 'chemical/x-macromodel-input',
250 | mol: 'chemical/x-mdl-molfile',
251 | rd: 'chemical/x-mdl-rdfile',
252 | rxn: 'chemical/x-mdl-rxnfile',
253 | sd: 'chemical/x-mdl-sdfile',
254 | sdf: 'chemical/x-mdl-sdfile',
255 | tgf: 'chemical/x-mdl-tgf',
256 | mcif: 'chemical/x-mmcif',
257 | mol2: 'chemical/x-mol2',
258 | b: 'chemical/x-molconn-Z',
259 | gpt: 'chemical/x-mopac-graph',
260 | mop: 'chemical/x-mopac-input',
261 | mopcrt: 'chemical/x-mopac-input',
262 | mpc: 'chemical/x-mopac-input',
263 | zmt: 'chemical/x-mopac-input',
264 | moo: 'chemical/x-mopac-out',
265 | mvb: 'chemical/x-mopac-vib',
266 | asn: 'chemical/x-ncbi-asn1-spec',
267 | prt: 'chemical/x-ncbi-asn1-ascii',
268 | ent: 'chemical/x-pdb',
269 | val: 'chemical/x-ncbi-asn1-binary',
270 | aso: 'chemical/x-ncbi-asn1-binary',
271 | pdb: 'chemical/x-pdb',
272 | ros: 'chemical/x-rosdal',
273 | sw: 'chemical/x-swissprot',
274 | vms: 'chemical/x-vamas-iso14976',
275 | vmd: 'chemical/x-vmd',
276 | xtel: 'chemical/x-xtel',
277 | xyz: 'chemical/x-xyz',
278 | gif: 'image/gif',
279 | ief: 'image/ief',
280 | jpeg: 'image/jpeg',
281 | jpg: 'image/jpeg',
282 | jpe: 'image/jpeg',
283 | pcx: 'image/pcx',
284 | png: 'image/png',
285 | svg: 'image/svg+xml',
286 | tiff: 'image/tiff',
287 | tif: 'image/tiff',
288 | cr2: 'image/x-canon-cr2',
289 | crw: 'image/x-canon-crw',
290 | ras: 'image/x-cmu-raster',
291 | cdr: 'image/x-coreldraw',
292 | pat: 'image/x-coreldrawpattern',
293 | cdt: 'image/x-coreldrawtemplate',
294 | erf: 'image/x-epson-erf',
295 | ico: 'image/x-icon',
296 | art: 'image/x-jg',
297 | jng: 'image/x-jng',
298 | bmp: 'image/x-ms-bmp',
299 | nef: 'image/x-nikon-nef',
300 | orf: 'image/x-olympus-orf',
301 | psd: 'image/x-photoshop',
302 | pnm: 'image/x-portable-anymap',
303 | pbm: 'image/x-portable-bitmap',
304 | pgm: 'image/x-portable-graymap',
305 | ppm: 'image/x-portable-pixmap',
306 | rgb: 'image/x-rgb',
307 | xbm: 'image/x-xbitmap',
308 | xpm: 'image/x-xpixmap',
309 | xwd: 'image/x-xwindowdump',
310 | eml: 'message/rfc822',
311 | igs: 'model/iges',
312 | iges: 'model/iges',
313 | msh: 'model/mesh',
314 | mesh: 'model/mesh',
315 | silo: 'model/mesh',
316 | wrl: 'x-world/x-vrml',
317 | vrml: 'x-world/x-vrml',
318 | manifest: 'text/cache-manifest',
319 | ics: 'text/calendar',
320 | icz: 'text/calendar',
321 | css: 'text/css',
322 | csv: 'text/csv',
323 | 323: 'text/h323',
324 | html: 'text/html',
325 | htm: 'text/html',
326 | shtml: 'text/html',
327 | uls: 'text/iuls',
328 | mml: 'text/mathml',
329 | asc: 'text/plain',
330 | txt: 'text/plain',
331 | text: 'text/plain',
332 | pot: 'text/plain',
333 | brf: 'text/plain',
334 | rtx: 'text/richtext',
335 | sct: 'text/scriptlet',
336 | wsc: 'text/scriptlet',
337 | tm: 'text/texmacs',
338 | ts: 'text/texmacs',
339 | tsv: 'text/tab-separated-values',
340 | bib: 'text/x-bibtex',
341 | boo: 'text/x-boo',
342 | h: 'text/x-chdr',
343 | htc: 'text/x-component',
344 | c: 'text/x-csrc',
345 | d: 'text/x-dsrc',
346 | diff: 'text/x-diff',
347 | patch: 'text/x-diff',
348 | hs: 'text/x-haskell',
349 | java: 'text/x-java',
350 | lhs: 'text/x-literate-haskell',
351 | moc: 'text/x-moc',
352 | p: 'text/x-pascal',
353 | pas: 'text/x-pascal',
354 | gcd: 'text/x-pcs-gcd',
355 | pl: 'text/x-perl',
356 | pm: 'text/x-perl',
357 | py: 'text/x-python',
358 | scala: 'text/x-scala',
359 | etx: 'text/x-setext',
360 | tk: 'text/x-tcl',
361 | tex: 'text/x-tex',
362 | ltx: 'text/x-tex',
363 | sty: 'text/x-tex',
364 | cls: 'text/x-tex',
365 | vcs: 'text/x-vcalendar',
366 | vcf: 'text/x-vcard',
367 | '3gp': 'video/3gpp',
368 | axv: 'video/annodex',
369 | dl: 'video/dl',
370 | dif: 'video/dv',
371 | dv: 'video/dv',
372 | fli: 'video/fli',
373 | gl: 'video/gl',
374 | mpeg: 'video/mpeg',
375 | mpg: 'video/mpeg',
376 | mpe: 'video/mpeg',
377 | mp4: 'video/mp4',
378 | qt: 'video/quicktime',
379 | mov: 'video/quicktime',
380 | ogv: 'video/ogg',
381 | flv: 'video/x-flv',
382 | lsf: 'video/x-la-asf',
383 | lsx: 'video/x-la-asf',
384 | mng: 'video/x-mng',
385 | asf: 'video/x-ms-asf',
386 | asx: 'video/x-ms-asf',
387 | wm: 'video/x-ms-wm',
388 | wmv: 'video/x-ms-wmv',
389 | wmx: 'video/x-ms-wmx',
390 | wvx: 'video/x-ms-wvx',
391 | avi: 'video/x-msvideo',
392 | movie: 'video/x-sgi-movie',
393 | mpv: 'video/x-matroska',
394 | mkv: 'video/x-matroska',
395 | ice: 'x-conference/x-cooltalk',
396 | sisx: 'x-epoc/x-sisx-app',
397 | vrm: 'x-world/x-vrml',
398 | webm: 'video/webm',
399 | gz: 'application/gzip'
400 | };
401 |
--------------------------------------------------------------------------------
/lib/storage/gcs.js:
--------------------------------------------------------------------------------
1 | /* jshint node:true */
2 |
3 | // Google Cloud Storage backend for uploadfs. See also
4 | // local.js.
5 |
6 | const storage = require('@google-cloud/storage');
7 | const extname = require('path').extname;
8 | const _ = require('lodash');
9 | const utils = require('../utils');
10 | const path = require('path');
11 |
12 | module.exports = function() {
13 | let contentTypes;
14 | let client;
15 | let cachingTime;
16 | let https;
17 | let bucketName;
18 | let endpoint = 'storage.googleapis.com';
19 | let defaultTypes;
20 | let noProtoEndpoint;
21 | let validation = false;
22 |
23 | const self = {
24 | init: function (options, callback) {
25 | if (!(process.env.GOOGLE_APPLICATION_CREDENTIALS)) {
26 | return callback('GOOGLE_APPLICATION_CREDENTIALS not set in env, cannot proceed');
27 | }
28 | if (options.validation) {
29 | validation = options.validation;
30 | }
31 | // Ultimately the result will look like https://storage.googleapis.com/[BUCKET_NAME]/[OBJECT_NAME]
32 | // The rest is based mostly on s3 knox surmises.
33 | if (options.endpoint) {
34 | endpoint = options.endpoint;
35 | if (!endpoint.match(/^https?:/)) {
36 | const defaultSecure = ((!options.port) || (options.port === 443));
37 | const secure = options.secure || defaultSecure;
38 | let port = options.port || 443;
39 | const protocol = secure ? 'https://' : 'http://';
40 | if (secure && (port === 443)) {
41 | port = '';
42 | } else if ((!secure) && (port === 80)) {
43 | port = '';
44 | } else {
45 | port = ':' + port;
46 | }
47 | endpoint = protocol + endpoint + port;
48 | }
49 | }
50 | // The storage client auth relies on the presence of the service account
51 | // file path expressed in the environment variable
52 | // GOOGLE_APPLICATION_CREDENTIALS and, of course, the presence of such file.
53 | //
54 | //
55 | // See https://cloud.google.com/docs/authentication/getting-started
56 | client = new storage.Storage();
57 | bucketName = options.bucket;
58 | defaultTypes = require(path.join(__dirname, '/contentTypes.js'));
59 | if (options.contentTypes) {
60 | _.extend(contentTypes, defaultTypes, options.contentTypes);
61 | } else {
62 | contentTypes = defaultTypes;
63 | }
64 | https = options.https;
65 | cachingTime = options.cachingTime;
66 | self.options = options;
67 | return callback(null);
68 | },
69 |
70 | copyIn: function(localPath, path, options, callback) {
71 | path = utils.removeLeadingSlash(self.options, path);
72 | let ext = extname(path);
73 | if (ext.length) {
74 | ext = ext.substr(1);
75 | }
76 | let contentType = contentTypes[ext];
77 | if (!contentType) {
78 | contentType = 'application/octet-stream';
79 | }
80 |
81 | let cacheControl = 'no-cache';
82 | if (cachingTime) {
83 | cacheControl = 'public, max-age=' + cachingTime;
84 | }
85 | const uploadOptions = {
86 | destination: path,
87 | gzip: true,
88 | public: true,
89 | validation,
90 | metadata: {
91 | cacheControl,
92 | ContentType: contentType
93 | }
94 | };
95 | const bucket = client.bucket(bucketName);
96 | return bucket.upload(localPath, uploadOptions, callback);
97 | },
98 |
99 | copyOut: function(path, localPath, options, callback) {
100 | path = utils.removeLeadingSlash(self.options, path);
101 | const mergedOptions = _.assign({
102 | destination: localPath,
103 | validation
104 | }, options);
105 | client.bucket(bucketName).file(path).download(mergedOptions, callback);
106 | },
107 |
108 | remove: function(path, callback) {
109 | path = utils.removeLeadingSlash(self.options, path);
110 | client.bucket(bucketName).file(path).delete({}, callback);
111 | },
112 |
113 | enable: function(path, callback) {
114 | path = utils.removeLeadingSlash(self.options, path);
115 | client.bucket(bucketName).file(path).makePublic(callback);
116 | },
117 |
118 | disable: function(path, callback) {
119 | path = utils.removeLeadingSlash(self.options, path);
120 | client.bucket(bucketName).file(path).makePrivate({}, callback);
121 | },
122 |
123 | getUrl: function (path) {
124 | noProtoEndpoint = endpoint.replace(/^https?:\/\//i, '');
125 | const url = (https ? 'https://' : 'http://') + bucketName + '.' + noProtoEndpoint;
126 | return utils.addPathToUrl(self.options, url, path);
127 | },
128 |
129 | destroy: function(callback) {
130 | // No file descriptors or timeouts held
131 | return callback(null);
132 | }
133 | };
134 | return self;
135 | };
136 |
--------------------------------------------------------------------------------
/lib/storage/local.js:
--------------------------------------------------------------------------------
1 | /* jshint node:true */
2 |
3 | // Local filesystem-based backend for uploadfs. See also
4 | // s3.js. The main difference between this backend and just using
5 | // the local filesystem directly is that it creates parent
6 | // folders automatically when they are discovered to be missing,
7 | // and it encourages you to write code that will still work
8 | // when you switch to the s3 backend
9 |
10 | const dirname = require('path').dirname;
11 | const fs = require('fs');
12 | const copyFile = require('../copyFile.js');
13 | const async = require('async');
14 | const utils = require('../utils.js');
15 |
16 | module.exports = function() {
17 | let uploadsPath;
18 | let uploadsUrl;
19 | let removeCandidates = [];
20 | let timeout;
21 |
22 | const self = {
23 | init: function(options, callback) {
24 | self.options = options;
25 | uploadsPath = options.uploadsPath;
26 | if (!uploadsPath) {
27 | return callback('uploadsPath not set');
28 | }
29 | uploadsUrl = options.uploadsUrl;
30 | if (!uploadsUrl) {
31 | return callback('uploadsUrl not set');
32 | }
33 | // We use a timeout that we reinstall each time rather than
34 | // an interval to avoid pileups
35 | timeout = setTimeout(cleanup, 1000);
36 | return callback(null);
37 |
38 | function cleanup() {
39 | timeout = null;
40 | const list = removeCandidates;
41 | // Longest paths first, so we don't try to remove parents before children
42 | // and wind up never getting rid of the parent
43 | list.sort(function(a, b) {
44 | if (a.length > b.length) {
45 | return -1;
46 | } else if (a.length < b.length) {
47 | return 1;
48 | } else {
49 | return 0;
50 | }
51 | });
52 | // Building new list for next pass
53 | removeCandidates = [];
54 | // Parallelism here just removes things too soon preventing a parent from being removed
55 | // after a child
56 | return async.eachSeries(list, function(path, callback) {
57 | const uploadPath = uploadsPath + path;
58 | fs.rmdir(uploadPath, function(e) {
59 | // We're not fussy about the outcome, if it still has files in it we're
60 | // actually depending on this to fail
61 | if (!e) {
62 | // It worked, so try to remove the parent (which will fail if not empty, etc.)
63 | add(dirname(path));
64 | }
65 | return callback(null);
66 | });
67 | }, function() {
68 | // Try again in 1 second, typically removing another layer of parents if empty, etc.
69 | if (!self.destroyed) {
70 | timeout = setTimeout(cleanup, 1000);
71 | }
72 | });
73 |
74 | function add(path) {
75 | // Don't remove uploadfs itself
76 | if (path.length > 1) {
77 | removeCandidates.push(path);
78 | }
79 | }
80 | }
81 | },
82 |
83 | destroy: function(callback) {
84 | // node cannot exit if we still hold a timeout
85 | if (timeout) {
86 | clearTimeout(timeout);
87 | }
88 | self.destroyed = true;
89 | return callback(null);
90 | },
91 |
92 | copyIn: function(localPath, path, options, callback) {
93 | const uploadPath = uploadsPath + path;
94 | return copyFile(localPath, uploadPath, callback);
95 | },
96 |
97 | copyOut: function(path, localPath, options, callback) {
98 | const downloadPath = uploadsPath + path;
99 | return copyFile(downloadPath, localPath, callback);
100 | },
101 |
102 | streamOut: function(path, options) {
103 | return fs.createReadStream(uploadsPath + path);
104 | },
105 |
106 | remove: function(path, callback) {
107 | const uploadPath = uploadsPath + path;
108 | fs.unlink(uploadPath, callback);
109 | if (dirname(path).length > 1) {
110 | removeCandidates.push(dirname(path));
111 | }
112 | },
113 |
114 | enable: function(path, callback) {
115 | if (self.options.disabledFileKey) {
116 | return fs.rename(uploadsPath + utils.getDisabledPath(path, self.options.disabledFileKey), uploadsPath + path, callback);
117 | } else {
118 | // World readable, owner writable. Reasonable since
119 | // web accessible files are world readable in that
120 | // sense regardless
121 | return fs.chmod(uploadsPath + path, self.getEnablePermissions(), callback);
122 | }
123 | },
124 |
125 | getEnablePermissions: function() {
126 | return self.options.enablePermissions || parseInt('644', 8);
127 | },
128 |
129 | disable: function(path, callback) {
130 | if (self.options.disabledFileKey) {
131 | return fs.rename(uploadsPath + path, uploadsPath + utils.getDisabledPath(path, self.options.disabledFileKey), callback);
132 | } else {
133 | // No access. Note this means you must explicitly
134 | // enable to get read access back, even with copyFileOut
135 | return fs.chmod(uploadsPath + path, self.getDisablePermissions(), callback);
136 | }
137 | },
138 |
139 | getDisablePermissions: function() {
140 | return self.options.disablePermissions || parseInt('0000', 8);
141 | },
142 |
143 | getUrl: function(path) {
144 | return utils.addPathToUrl(self.options, uploadsUrl, path);
145 | },
146 |
147 | migrateToDisabledFileKey: function(callback) {
148 | if (!self.options.disabledFileKey) {
149 | return callback(new Error('migrateToDisabledFileKey invoked with no disabledFileKey option set.'));
150 | }
151 | const candidates = [];
152 | try {
153 | spelunk('');
154 | } catch (e) {
155 | return callback(e);
156 | }
157 | return async.eachLimit(candidates, 5, function(file, callback) {
158 | fs.chmodSync(uploadsPath + file, self.options.enablePermissions || parseInt('644', 8));
159 | self.disable(file, callback);
160 | }, callback);
161 | function spelunk(folder) {
162 | const files = fs.readdirSync(uploadsPath + folder);
163 | files.forEach(function(file) {
164 | const stats = fs.statSync(uploadsPath + folder + '/' + file);
165 | const mode = stats.mode & parseInt('0777', 8);
166 | if (stats.isDirectory()) {
167 | return spelunk(folder + '/' + file);
168 | }
169 | if (mode === self.getDisablePermissions()) {
170 | candidates.push(folder + '/' + file);
171 | }
172 | });
173 | }
174 | },
175 |
176 | migrateFromDisabledFileKey: function(callback) {
177 | if (self.options.disabledFileKey) {
178 | return callback('migrateFromDisabledFileKey invoked with disabledFileKey option still set.');
179 | }
180 | const candidates = [];
181 | try {
182 | spelunk('');
183 | } catch (e) {
184 | return callback(e);
185 | }
186 | return async.eachLimit(candidates, 5, function(file, callback) {
187 | return async.series([
188 | function(callback) {
189 | return fs.rename(uploadsPath + file, removeDisabledSuffix(uploadsPath + file), callback);
190 | },
191 | function(callback) {
192 | return self.disable(removeDisabledSuffix(file), callback);
193 | }
194 | ], callback);
195 | function removeDisabledSuffix(path) {
196 | return path.replace(/-disabled-[0-9a-f]+$/, '');
197 | }
198 | }, callback);
199 | function spelunk(folder) {
200 | const files = fs.readdirSync(uploadsPath + folder);
201 | files.forEach(function(file) {
202 | const stats = fs.statSync(uploadsPath + folder + '/' + file);
203 | if (stats.isDirectory()) {
204 | return spelunk(folder + '/' + file);
205 | }
206 | if (file.match(/-disabled-[0-9a-f]+$/)) {
207 | candidates.push(folder + '/' + file);
208 | }
209 | });
210 | }
211 | },
212 |
213 | // Exported for unit testing only
214 | _testCopyFile: function(path1, path2, options, callback) {
215 | return copyFile(path1, path2, options, callback);
216 | }
217 | };
218 |
219 | return self;
220 | };
221 |
--------------------------------------------------------------------------------
/lib/storage/noGzipContentTypes.js:
--------------------------------------------------------------------------------
1 | // Content types NOT suitable for gzip because
2 | // they are already compressed and it's not worth
3 | // the impact on phones etc. and/or it confuses
4 | // browsers & does not get the expected transfer
5 | // encoding header
6 |
7 | module.exports = [
8 | 'image/gif', 'image/jpeg', 'image/png', 'audio/mpeg', 'video/mpeg', 'video/mp4', 'video/webm', 'video/quicktime', 'application/zip', 'application/gzip', 'application/x-gtar'
9 | ];
10 |
--------------------------------------------------------------------------------
/lib/storage/s3.js:
--------------------------------------------------------------------------------
1 | /* jshint node:true */
2 |
3 | // Amazon s3-based backend for uploadfs. See also
4 | // local.js.
5 |
6 | const fs = require('fs');
7 | const AWS = require('aws-sdk');
8 | const extname = require('path').extname;
9 | const _ = require('lodash');
10 | const utils = require('../utils');
11 | const { PassThrough } = require('stream');
12 |
13 | module.exports = function() {
14 | let contentTypes;
15 | let client;
16 | let cachingTime;
17 | let https;
18 | let bucket;
19 | let bucketObjectsACL;
20 | let disabledBucketObjectsACL;
21 | let endpoint;
22 | let defaultTypes;
23 | let noProtoEndpoint;
24 | let pathStyle;
25 | let noGzipContentTypes;
26 | let addNoGzipContentTypes;
27 | const self = {
28 | init: function (options, callback) {
29 | // knox bc
30 | endpoint = 's3.amazonaws.com';
31 | if (options.secret) {
32 | options.credentials = new AWS.Credentials(options.key, options.secret, options.token || null);
33 | }
34 | bucket = options.bucket;
35 | bucketObjectsACL = options.bucketObjectsACL || 'public-read';
36 | disabledBucketObjectsACL = options.disabledBucketObjectsACL || 'private';
37 | options.params = options.params || {};
38 | options.params.Bucket = options.params.Bucket || options.bucket;
39 | noGzipContentTypes = options.noGzipContentTypes || require('./noGzipContentTypes');
40 | addNoGzipContentTypes = options.addNoGzipContentTypes || [];
41 | // bc for the `endpoint`, `secure` and `port` options
42 | if (options.endpoint) {
43 | endpoint = options.endpoint;
44 | if (!endpoint.match(/^https?:/)) {
45 | // Infer it like knox would
46 | const defaultSecure = ((!options.port) || (options.port === 443));
47 | const secure = options.secure || defaultSecure;
48 | let port = options.port || 443;
49 | const protocol = secure ? 'https://' : 'http://';
50 | if (secure && (port === 443)) {
51 | port = '';
52 | } else if ((!secure) && (port === 80)) {
53 | port = '';
54 | } else {
55 | port = ':' + port;
56 | }
57 | endpoint = protocol + endpoint + port;
58 | }
59 | options.params = options.params || {};
60 | options.params.endpoint = options.params.endpoint || new AWS.Endpoint(endpoint);
61 | }
62 |
63 | // this is to support the knox style attribute OR AWS s3ForcePathStyle attribute
64 | if (options.style && (options.style === 'path')) {
65 | options.s3ForcePathStyle = true;
66 | }
67 | pathStyle = !!options.s3ForcePathStyle;
68 |
69 | if (options.agent) {
70 | options.params = options.params || {};
71 | options.params.httpOptions = options.params.httpOptions || {};
72 | options.params.httpOptions.agent = options.params.httpOptions.agent || options.agent;
73 | }
74 | client = new AWS.S3(options);
75 | defaultTypes = require('./contentTypes.js');
76 | if (options.contentTypes) {
77 | _.extend(contentTypes, defaultTypes, options.contentTypes);
78 | } else {
79 | contentTypes = defaultTypes;
80 | }
81 | https = (options.https === undefined) ? true : options.https;
82 | cachingTime = options.cachingTime;
83 | self.options = options;
84 | return callback(null);
85 | },
86 |
87 | copyIn: function(localPath, path, options, callback) {
88 | let ext = extname(path);
89 | if (ext.length) {
90 | ext = ext.substr(1);
91 | }
92 | let contentType = contentTypes[ext];
93 | if (!contentType) {
94 | contentType = 'application/octet-stream';
95 | }
96 |
97 | const inputStream = fs.createReadStream(localPath);
98 |
99 | const params = {
100 | ACL: bucketObjectsACL,
101 | Key: utils.removeLeadingSlash(self.options, path),
102 | Body: inputStream,
103 | ContentType: contentType
104 | };
105 |
106 | if (gzipAppropriate(contentType)) {
107 | params.ContentEncoding = 'gzip';
108 | const gzip = require('zlib').createGzip();
109 | inputStream.pipe(gzip);
110 | params.Body = gzip;
111 | }
112 |
113 | if (cachingTime) {
114 | params.CacheControl = 'public, max-age=' + cachingTime;
115 | }
116 |
117 | return client.upload(params, callback);
118 |
119 | function gzipAppropriate(contentType) {
120 | return !_.includes([ ...noGzipContentTypes, ...addNoGzipContentTypes ], contentType);
121 | }
122 | },
123 |
124 | streamOut: function(path, options) {
125 | const result = new PassThrough();
126 | const params = {
127 | Key: utils.removeLeadingSlash(self.options, path)
128 | };
129 | const request = client.getObject(params);
130 | let inputStream = request.createReadStream();
131 | // Errors do not automatically propagate with pipe()
132 | inputStream.on('error', e => {
133 | result.emit('error', e);
134 | });
135 | request.on('httpHeaders', function(status, headers) {
136 | if (headers['content-encoding'] === 'gzip') {
137 | const gunzip = require('zlib').createGunzip();
138 | gunzip.on('error', e => {
139 | result.emit('error', e);
140 | });
141 | inputStream.pipe(gunzip);
142 | inputStream = gunzip;
143 | }
144 | inputStream.pipe(result);
145 | });
146 | return result;
147 | },
148 |
149 | copyOut: function(path, localPath, options, callback) {
150 | let finished = false;
151 | const outputStream = fs.createWriteStream(localPath);
152 | const inputStream = self.streamOut(path, options);
153 | inputStream.pipe(outputStream);
154 | inputStream.on('error', function(err) {
155 | // Watch out for any oddities in stream implementation
156 | if (finished) {
157 | return;
158 | }
159 | finished = true;
160 | return callback(err);
161 | });
162 | outputStream.on('error', function(err) {
163 | // Watch out for any oddities in stream implementation
164 | if (finished) {
165 | return;
166 | }
167 | finished = true;
168 | return callback(err);
169 | });
170 | outputStream.on('finish', function() {
171 | // Watch out for any oddities in stream implementation
172 | if (finished) {
173 | return;
174 | }
175 | finished = true;
176 | return callback(null);
177 | });
178 | },
179 |
180 | remove: function(path, callback) {
181 | return client.deleteObject({ Key: utils.removeLeadingSlash(self.options, path) }, callback);
182 | },
183 |
184 | enable: function(path, callback) {
185 | return client.putObjectAcl({
186 | ACL: bucketObjectsACL,
187 | Key: utils.removeLeadingSlash(self.options, path)
188 | }, callback);
189 | },
190 |
191 | disable: function(path, callback) {
192 | return client.putObjectAcl({
193 | ACL: disabledBucketObjectsACL,
194 | Key: utils.removeLeadingSlash(self.options, path)
195 | }, callback);
196 | },
197 |
198 | getUrl: function (path) {
199 | let url;
200 | noProtoEndpoint = endpoint.replace(/^https?:\/\//i, '');
201 | if (pathStyle) {
202 | url = (https ? 'https://' : 'http://') + noProtoEndpoint + '/' + bucket;
203 | } else {
204 | url = (https ? 'https://' : 'http://') + bucket + '.' + noProtoEndpoint;
205 | }
206 | return utils.addPathToUrl(self.options, url, path);
207 | },
208 |
209 | destroy: function(callback) {
210 | // No file descriptors or timeouts held
211 | return callback(null);
212 | }
213 | };
214 | return self;
215 | };
216 |
--------------------------------------------------------------------------------
/lib/utils.js:
--------------------------------------------------------------------------------
1 | const crypto = require('crypto');
2 | /**
3 | * Helper functions
4 | **/
5 | module.exports = {
6 | // Use an unguessable filename suffix to disable files.
7 | // This is secure at the web level if the webserver is not
8 | // configured to serve indexes of files, and it does not impede the
9 | // use of rsync etc. Used when options.disabledFileKey is set.
10 | // Use of an HMAC to do this for each filename ensures that even if
11 | // one such filename is exposed, the others remain secure
12 |
13 | getDisabledPath: function(path, disabledFileKey) {
14 | const hmac = crypto.createHmac('sha256', disabledFileKey);
15 | hmac.update(path);
16 | const disabledPath = path + '-disabled-' + hmac.digest('hex');
17 | return disabledPath;
18 | },
19 |
20 | getPathFromDisabledPath: function(path) {
21 | return path.replace(/-disabled-.*/g, '');
22 | },
23 |
24 | // Append a path to a bucket's base URL, with a joining slash if not provided.
25 | // This is shared by several backends, while others have their own path
26 | // handling needs. We want to ensure that both `path/to/file` (which others
27 | // sometimes use) and `/path/to/file` (always used by Apostrophe) behave
28 | // reasonably.
29 | //
30 | // If `path` is nullish `url` is returned as-is.
31 | //
32 | // If `options.strictPaths` is `true`, we do not attempt to provide a slash
33 | // when needed
34 |
35 | addPathToUrl(options, url, path) {
36 | if (options.strictPaths) {
37 | if (path != null) {
38 | return url + path;
39 | } else {
40 | return url;
41 | }
42 | } else {
43 | if (path != null) {
44 | return url + ((path.charAt(0) !== '/') ? '/' : '') + path;
45 | } else {
46 | return url;
47 | }
48 | }
49 | },
50 |
51 | // Leading slashes were the norm with knox, but
52 | // produce unwanted extra slashes in the URL with
53 | // the AWS SDK for S3 and in GCS, so return the
54 | // string without them.
55 | //
56 | // If `options.strictPaths` is true, we do not
57 | // make this modification.
58 |
59 | removeLeadingSlash(options, key) {
60 | if (options.strictPaths) {
61 | return key;
62 | } else {
63 | return key.replace(/^\//, '');
64 | }
65 | }
66 |
67 | };
68 |
--------------------------------------------------------------------------------
/logos/logo-box-builtby.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/apostrophecms/uploadfs/d3a6d707d4ca4fd4012dccb01962e4af441fca8e/logos/logo-box-builtby.png
--------------------------------------------------------------------------------
/logos/logo-box-madefor.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/apostrophecms/uploadfs/d3a6d707d4ca4fd4012dccb01962e4af441fca8e/logos/logo-box-madefor.png
--------------------------------------------------------------------------------
/package.json:
--------------------------------------------------------------------------------
1 | {
2 | "name": "uploadfs",
3 | "version": "1.24.3",
4 | "description": "Store files in a web-accessible location via a simplified API. Can automatically scale and rotate images. Includes S3, Azure and local filesystem-based backends with the most convenient features of each.",
5 | "main": "uploadfs.js",
6 | "scripts": {
7 | "test": "npm run testAzure && GOOGLE_APPLICATION_CREDENTIALS=gcs-credentials-uploadfstest.json mocha test/ && node test-imagemagick.js && eslint .",
8 | "testAzure": "env AZURE_TEST_FILE='test.jpg' mocha test/azure.js",
9 | "webp": "./webp-test.js",
10 | "lint-be": "eslint --fix 'lib/**/*.js'",
11 | "test-sharp": "npm run testAzure && GOOGLE_APPLICATION_CREDENTIALS=gcs-credentials-uploadfstest.json mocha test/ && node test-sharp.js && eslint ."
12 | },
13 | "repository": {
14 | "type": "git",
15 | "url": "git@github.com:apostrophecms/uploadfs.git"
16 | },
17 | "keywords": [
18 | "upload",
19 | "files",
20 | "s3",
21 | "storage"
22 | ],
23 | "author": "Apostrophe Technologies, Inc.",
24 | "license": "MIT",
25 | "dependencies": {
26 | "async": "^1.0.0",
27 | "bluebird": "^3.7.2",
28 | "es6-promise": "^4.1.0",
29 | "fs-extra": "^5.0.0",
30 | "gzipme": "^0.1.1",
31 | "lodash": "^4.17.21",
32 | "rimraf": "^5.0.7"
33 | },
34 | "optionalDependencies": {
35 | "@azure/storage-blob": "^12.14.0",
36 | "@google-cloud/storage": "^6.11.0",
37 | "aws-sdk": "^2.645.0",
38 | "sharp": "^0.32.6"
39 | },
40 | "devDependencies": {
41 | "eslint": "^8.0.0",
42 | "eslint-config-apostrophe": "^4.0.0",
43 | "mocha": "^10.2.0",
44 | "node-fetch": "^2.6.9",
45 | "stat-mode": "^0.2.2"
46 | }
47 | }
--------------------------------------------------------------------------------
/s3TestOptions-sample.js:
--------------------------------------------------------------------------------
1 | module.exports = {
2 | backend: 's3',
3 | secret: 'xxx',
4 | key: 'xxx',
5 | bucket: 'yourownbucketnamefromamazons3',
6 | region: 'us-west-2'
7 | };
8 |
--------------------------------------------------------------------------------
/sample.js:
--------------------------------------------------------------------------------
1 | // An extremely simple app that accepts uploaded files
2 | // and stores them in either a local folder or s3,
3 | // depending on which backend you choose.
4 |
5 | const express = require('express');
6 | const uploadfs = require('uploadfs')();
7 | const multipart = require('connect-multiparty');
8 | const multipartMiddleware = multipart();
9 | const path = require('path');
10 |
11 | // For the local backend
12 | const uploadsPath = path.join(__dirname, '/public/uploads');
13 | const uploadsLocalUrl = '/uploads';
14 | const options = {
15 | backend: 'local',
16 | uploadsPath,
17 | uploadsUrl: 'http://localhost:3000' + uploadsLocalUrl,
18 | // Required if you use imageSizes and copyImageIn
19 | tempPath: path.join(__dirname, '/temp'),
20 | imageSizes: [
21 | {
22 | name: 'small',
23 | width: 320,
24 | height: 320
25 | },
26 | {
27 | name: 'medium',
28 | width: 640,
29 | height: 640
30 | },
31 | {
32 | name: 'large',
33 | width: 1140,
34 | height: 1140
35 | }
36 | ]
37 | };
38 |
39 | uploadfs.init(options, createApp);
40 |
41 | function createApp(err) {
42 | if (err) {
43 | console.log(err);
44 | process.exit(1);
45 | }
46 | const app = express();
47 |
48 | // For the local backend: serve the uploaded files at /uploads.
49 | // With the s3 backend you don't need this of course, s3 serves
50 | // the files for you.
51 |
52 | app.use(uploadsLocalUrl, express.static(uploadsPath));
53 |
54 | app.get('/', function(req, res) {
55 | res.send('');
58 | });
59 |
60 | app.post('/', multipartMiddleware, function(req, res) {
61 | uploadfs.copyImageIn(req.files.photo.path, '/profiles/me', function(e, info) {
62 | if (e) {
63 | res.send('An error occurred: ' + e);
64 | } else {
65 | res.send('
All is well. Here is the image in three sizes plus the original.
' +
66 | '
' +
67 | '
' +
68 | '
' +
69 | '
');
70 | }
71 | });
72 | });
73 | app.listen(3000);
74 | console.log('Listening at http://localhost:3000');
75 | }
76 |
--------------------------------------------------------------------------------
/test-imagemagick.js:
--------------------------------------------------------------------------------
1 | const uploadfs = require('./uploadfs.js')();
2 | const fs = require('fs');
3 | const async = require('async');
4 | const Promise = require('bluebird');
5 | const _ = require('lodash');
6 | const path = require('path');
7 |
8 | // Test the imagecrunch image backend, written specifically for Macs
9 |
10 | const localOptions = {
11 | storage: 'local',
12 | image: 'imagemagick',
13 | uploadsPath: path.join(__dirname, '/test'),
14 | uploadsUrl: 'http://localhost:3000/test'
15 | };
16 |
17 | const imageSizes = [
18 | {
19 | name: 'small',
20 | width: 320,
21 | height: 320
22 | },
23 | {
24 | name: 'medium',
25 | width: 640,
26 | height: 640
27 | },
28 | {
29 | name: 'large',
30 | width: 1140,
31 | height: 1140
32 | }
33 | ];
34 |
35 | const tempPath = path.join(__dirname, '/temp');
36 | const basePath = '/images/profiles/me';
37 |
38 | localOptions.imageSizes = imageSizes;
39 | localOptions.tempPath = tempPath;
40 | localOptions.backend = 'local';
41 |
42 | localTestStart(function () {
43 | let filesSeen = false;
44 | console.log('RERUN TESTS WITH TEST OF POSTPROCESSORS');
45 | localOptions.postprocessors = [
46 | {
47 | postprocessor: function(files, folder, options) {
48 | console.log('in a postprocessor');
49 | if (!(options && options.test)) {
50 | console.error('postprocessor did not receive options');
51 | process.exit(1);
52 | }
53 | if (!files) {
54 | console.error('No files array passed to postprocessor');
55 | process.exit(1);
56 | }
57 | if (!files.length) {
58 | return Promise.resolve(true);
59 | }
60 | if (!files[0].match(/\.(gif|jpg|png)$/)) {
61 | console.error('postprocessor invoked for inappropriate file extension');
62 | process.exit(1);
63 | }
64 | if (!fs.existsSync(files[0])) {
65 | console.error('postprocessor invoked for nonexistent file');
66 | process.exit(1);
67 | }
68 | if (require('path').dirname(files[0]) !== folder) {
69 | console.error('folder parameter to postprocessor is incorrect');
70 | }
71 | _.each(localOptions.imageSizes, function(size) {
72 | if (!_.find(files, function(file) {
73 | return file.match(size.name);
74 | })) {
75 | console.error('postprocessor saw no file for the size ' + size.name);
76 | process.exit(1);
77 | }
78 | });
79 | filesSeen = true;
80 | return Promise.resolve(true);
81 | },
82 | extensions: [ 'gif', 'jpg', 'png' ],
83 | options: {
84 | test: true
85 | }
86 | }
87 | ];
88 | localTestStart(function () {
89 | if (!filesSeen) {
90 | console.error('postprocessor saw no files');
91 | process.exit(1);
92 | }
93 | console.log('Tests, done');
94 | process.exit(0);
95 | });
96 | });
97 |
98 | function localTestStart(cb) {
99 | const options = localOptions;
100 | console.log('Initializing uploadfs for the ' + options.backend + ' storage backend with the imagecrunch image backend');
101 | uploadfs.init(options, function(e) {
102 | if (e) {
103 | console.log('uploadfs.init failed:');
104 | console.log(e);
105 | process.exit(1);
106 | }
107 | console.log('uploadfs.init', this.options);
108 | testCopyImageIn();
109 | });
110 |
111 | function testCopyImageIn() {
112 | console.log('testing copyImageIn');
113 |
114 | // Note copyImageIn adds an extension for us
115 | uploadfs.copyImageIn('test.jpg', basePath, function(e, info) {
116 | if (e) {
117 | console.log('testCopyImageIn failed:');
118 | console.log(e);
119 | process.exit(1);
120 | }
121 |
122 | if (info.basePath !== '/images/profiles/me') {
123 | console.log('info.basePath is incorrect');
124 | process.exit(1);
125 | }
126 |
127 | console.log('Testing that returned image dimensions are reoriented');
128 |
129 | if ((info.width !== 1936) || (info.height !== 2592)) {
130 | console.log('Width and height missing or not reoriented for web use');
131 | console.log(info);
132 | process.exit(1);
133 | }
134 |
135 | if ((info.originalWidth !== 2592) || (info.originalHeight !== 1936)) {
136 | console.log('Original width and height missing or incorrect');
137 | console.log(info);
138 | process.exit(1);
139 | }
140 |
141 | const stats = fs.statSync('test/images/profiles/me.jpg');
142 |
143 | if (!stats.size) {
144 | console.log('Copied image is empty or missing');
145 | process.exit(1);
146 | }
147 |
148 | // We already tested remove, just do it to mop up
149 | console.log('Removing files...');
150 | uploadfs.remove('/images/profiles/me.jpg', function(e) {
151 | async.each(imageSizes, function(size, callback) {
152 | const name = info.basePath + '.' + size.name + '.jpg';
153 | const stats = fs.statSync('test' + name);
154 | if (!stats.size) {
155 | console.log('Scaled and copied image is empty or missing (2)');
156 | process.exit(1);
157 | }
158 |
159 | // We already tested remove, just do it to mop up
160 | uploadfs.remove(info.basePath + '.' + size.name + '.jpg', function(e) {
161 | callback();
162 | });
163 | }, function(err) {
164 | if (err) {
165 | console.log('Test failed', err);
166 | process.exit(1);
167 | }
168 | testCopyImageInCrop(cb);
169 | });
170 | }); // remove me.jpg
171 | });
172 | }
173 |
174 | function testCopyImageInCrop(cb) {
175 | console.log('testing copyImageIn with cropping');
176 |
177 | // Note copyImageIn adds an extension for us
178 | // Should grab the flowers
179 | uploadfs.copyImageIn('test.jpg', '/images/profiles/me-cropped', {
180 | crop: {
181 | top: 830,
182 | left: 890,
183 | width: 500,
184 | height: 500
185 | }
186 | }, function(e, info) {
187 | if (e) {
188 | console.log('testCopyImageIn failed:');
189 | console.log(e);
190 | process.exit(1);
191 | }
192 |
193 | if (info.basePath !== '/images/profiles/me-cropped') {
194 | console.log('info.basePath is incorrect');
195 | process.exit(1);
196 | }
197 |
198 | console.log('Testing that returned image dimensions are reoriented');
199 |
200 | if ((info.width !== 500) || (info.height !== 500)) {
201 | console.log('Reported size does not match crop');
202 | console.log(info);
203 | process.exit(1);
204 | }
205 |
206 | const stats = fs.statSync('test/images/profiles/me-cropped.jpg');
207 |
208 | if (!stats.size) {
209 | console.log('Copied image is empty or missing');
210 | process.exit(1);
211 | }
212 |
213 | // We already tested remove, just do it to mop up
214 | console.log('Removing files...');
215 | uploadfs.remove(`${basePath}-cropped.jpg`, function(e) {
216 | async.each(imageSizes, function(size, callback) {
217 | const name = info.basePath + '.' + size.name + '.jpg';
218 | const stats = fs.statSync('test' + name);
219 | if (!stats.size) {
220 | console.log('Scaled and copied image is empty or missing (2)');
221 | process.exit(1);
222 | }
223 | // We already tested remove, just do it to mop up
224 | uploadfs.remove(info.basePath + '.' + size.name + '.jpg', function(e) {
225 | callback(e);
226 | });
227 | }, function (err) {
228 | if (err) {
229 | console.log('Remove file fails', err);
230 | process.exit(1);
231 | }
232 | console.log('Files removed');
233 | cb();
234 | });
235 | });
236 | });
237 | }
238 | }
239 |
--------------------------------------------------------------------------------
/test-sharp.js:
--------------------------------------------------------------------------------
1 | const uploadfs = require('./uploadfs.js')();
2 | const fs = require('fs');
3 | const async = require('async');
4 | const Promise = require('bluebird');
5 | const _ = require('lodash');
6 | const path = require('path');
7 |
8 | // Test the imagecrunch image backend, written specifically for Macs
9 |
10 | const localOptions = {
11 | storage: 'local',
12 | image: 'sharp',
13 | uploadsPath: path.join(__dirname, '/test'),
14 | uploadsUrl: 'http://localhost:3000/test'
15 | };
16 |
17 | const imageSizes = [
18 | {
19 | name: 'small',
20 | width: 320,
21 | height: 320
22 | },
23 | {
24 | name: 'medium',
25 | width: 640,
26 | height: 640
27 | },
28 | {
29 | name: 'large',
30 | width: 1140,
31 | height: 1140
32 | }
33 | ];
34 |
35 | const tempPath = path.join(__dirname, '/temp');
36 | const basePath = '/images/profiles/me';
37 |
38 | localOptions.imageSizes = imageSizes;
39 | localOptions.tempPath = tempPath;
40 | localOptions.backend = 'local';
41 |
42 | localTestStart(function () {
43 | let filesSeen = false;
44 | console.log('RERUN TESTS WITH TEST OF POSTPROCESSORS');
45 | localOptions.postprocessors = [
46 | {
47 | postprocessor: function(files, folder, options) {
48 | console.log('in a postprocessor');
49 | if (!(options && options.test)) {
50 | console.error('postprocessor did not receive options');
51 | process.exit(1);
52 | }
53 | if (!files) {
54 | console.error('No files array passed to postprocessor');
55 | process.exit(1);
56 | }
57 | if (!files.length) {
58 | return Promise.resolve(true);
59 | }
60 | if (!files[0].match(/\.(gif|jpg|png)$/)) {
61 | console.error('postprocessor invoked for inappropriate file extension');
62 | process.exit(1);
63 | }
64 | if (!fs.existsSync(files[0])) {
65 | console.error('postprocessor invoked for nonexistent file');
66 | process.exit(1);
67 | }
68 | if (require('path').dirname(files[0]) !== folder) {
69 | console.error('folder parameter to postprocessor is incorrect');
70 | }
71 | _.each(localOptions.imageSizes, function(size) {
72 | if (!_.find(files, function(file) {
73 | return file.match(size.name);
74 | })) {
75 | console.error('postprocessor saw no file for the size ' + size.name);
76 | process.exit(1);
77 | }
78 | });
79 | filesSeen = true;
80 | return Promise.resolve(true);
81 | },
82 | extensions: [ 'gif', 'jpg', 'png' ],
83 | options: {
84 | test: true
85 | }
86 | }
87 | ];
88 | localTestStart(function () {
89 | if (!filesSeen) {
90 | console.error('postprocessor saw no files');
91 | process.exit(1);
92 | }
93 | console.log('Tests, done');
94 | process.exit(0);
95 | });
96 | });
97 |
98 | function localTestStart(cb) {
99 | const options = localOptions;
100 | console.log('Initializing uploadfs for the ' + options.backend + ' storage backend with the imagecrunch image backend');
101 | uploadfs.init(options, function(e) {
102 | if (e) {
103 | console.log('uploadfs.init failed:');
104 | console.log(e);
105 | process.exit(1);
106 | }
107 | console.log('uploadfs.init', this.options);
108 | testCopyImageIn();
109 | });
110 |
111 | function testCopyImageIn() {
112 | console.log('testing copyImageIn');
113 |
114 | // Note copyImageIn adds an extension for us
115 | uploadfs.copyImageIn('test.jpg', basePath, function(e, info) {
116 | if (e) {
117 | console.log('testCopyImageIn failed:');
118 | console.log(e);
119 | process.exit(1);
120 | }
121 |
122 | if (info.basePath !== '/images/profiles/me') {
123 | console.log('info.basePath is incorrect');
124 | process.exit(1);
125 | }
126 |
127 | console.log('Testing that returned image dimensions are reoriented');
128 |
129 | if ((info.width !== 1936) || (info.height !== 2592)) {
130 | console.log('Width and height missing or not reoriented for web use');
131 | console.log(info);
132 | process.exit(1);
133 | }
134 |
135 | if ((info.originalWidth !== 2592) || (info.originalHeight !== 1936)) {
136 | console.log('Original width and height missing or incorrect');
137 | console.log(info);
138 | process.exit(1);
139 | }
140 |
141 | const stats = fs.statSync('test/images/profiles/me.jpg');
142 |
143 | if (!stats.size) {
144 | console.log('Copied image is empty or missing');
145 | process.exit(1);
146 | }
147 |
148 | // We already tested remove, just do it to mop up
149 | console.log('Removing files...');
150 | uploadfs.remove('/images/profiles/me.jpg', function(e) {
151 | async.each(imageSizes, function(size, callback) {
152 | const name = info.basePath + '.' + size.name + '.jpg';
153 | const stats = fs.statSync('test' + name);
154 | if (!stats.size) {
155 | console.log('Scaled and copied image is empty or missing (2)');
156 | process.exit(1);
157 | }
158 |
159 | // We already tested remove, just do it to mop up
160 | uploadfs.remove(info.basePath + '.' + size.name + '.jpg', function(e) {
161 | callback();
162 | });
163 | }, function(err) {
164 | if (err) {
165 | console.log('Test failed', err);
166 | process.exit(1);
167 | }
168 | testCopyImageInCrop(cb);
169 | });
170 | }); // remove me.jpg
171 | });
172 | }
173 |
174 | function testCopyImageInCrop(cb) {
175 | console.log('testing copyImageIn with cropping');
176 |
177 | // Note copyImageIn adds an extension for us
178 | // Should grab the flowers
179 | uploadfs.copyImageIn('test.jpg', '/images/profiles/me-cropped', {
180 | crop: {
181 | top: 830,
182 | left: 890,
183 | width: 500,
184 | height: 500
185 | }
186 | }, function(e, info) {
187 | if (e) {
188 | console.log('testCopyImageIn failed:');
189 | console.log(e);
190 | process.exit(1);
191 | }
192 |
193 | if (info.basePath !== '/images/profiles/me-cropped') {
194 | console.log('info.basePath is incorrect');
195 | process.exit(1);
196 | }
197 |
198 | console.log('Testing that returned image dimensions are reoriented');
199 |
200 | if ((info.width !== 500) || (info.height !== 500)) {
201 | console.log('Reported size does not match crop');
202 | console.log(info);
203 | process.exit(1);
204 | }
205 |
206 | const stats = fs.statSync('test/images/profiles/me-cropped.jpg');
207 |
208 | if (!stats.size) {
209 | console.log('Copied image is empty or missing');
210 | process.exit(1);
211 | }
212 |
213 | // We already tested remove, just do it to mop up
214 | console.log('Removing files...');
215 | uploadfs.remove(`${basePath}-cropped.jpg`, function(e) {
216 | async.each(imageSizes, function(size, callback) {
217 | const name = info.basePath + '.' + size.name + '.jpg';
218 | const stats = fs.statSync('test' + name);
219 | if (!stats.size) {
220 | console.log('Scaled and copied image is empty or missing (2)');
221 | process.exit(1);
222 | }
223 | // We already tested remove, just do it to mop up
224 | uploadfs.remove(info.basePath + '.' + size.name + '.jpg', function(e) {
225 | callback(e);
226 | });
227 | }, function (err) {
228 | if (err) {
229 | console.log('Remove file fails', err);
230 | process.exit(1);
231 | }
232 | console.log('Files removed');
233 | cb();
234 | });
235 | });
236 | });
237 | }
238 | }
239 |
--------------------------------------------------------------------------------
/test.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/apostrophecms/uploadfs/d3a6d707d4ca4fd4012dccb01962e4af441fca8e/test.jpg
--------------------------------------------------------------------------------
/test.svg:
--------------------------------------------------------------------------------
1 |
4 |
--------------------------------------------------------------------------------
/test.txt:
--------------------------------------------------------------------------------
1 | This is a test text file.
2 |
--------------------------------------------------------------------------------
/test.webp:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/apostrophecms/uploadfs/d3a6d707d4ca4fd4012dccb01962e4af441fca8e/test.webp
--------------------------------------------------------------------------------
/test/azure.js:
--------------------------------------------------------------------------------
1 | /* global describe, it */
2 | const assert = require('assert');
3 | const fs = require('fs');
4 | const fetch = require('node-fetch');
5 | const uploadfs = require('../uploadfs.js')();
6 | // A JPEG is not a good default because it is exempt from GZIP so
7 | // we get less coverage. -Tom
8 | const srcFile = process.env.AZURE_TEST_FILE || 'test.txt';
9 | const infilePath = 'one/two/three/';
10 | const infile = infilePath + srcFile;
11 | const _ = require('lodash');
12 |
13 | /* helper to automate scraping files from blob svc */
14 | const _getOutfile = function(infile, done) {
15 | const tmpFileName = new Date().getTime() + srcFile;
16 | const ogFile = fs.readFileSync(srcFile, { encoding: 'utf8' });
17 |
18 | return uploadfs.copyOut(infile, tmpFileName, {}, function (e, res) {
19 | try {
20 | assert(!e, 'Azure copy out nominal success');
21 | const content = fs.readFileSync(tmpFileName, { encoding: 'utf8' });
22 | assert(content.length, 'copyOut file has length');
23 | assert(ogFile.length, 'original file body has length');
24 | // console.log(ogFile, content);
25 | assert(ogFile === content, 'Azure copy out equal to original text file');
26 | fs.unlinkSync(tmpFileName);
27 | done();
28 | } catch (ae) {
29 | done(ae);
30 | }
31 | });
32 | };
33 |
34 | describe('UploadFS Azure', function() {
35 | this.timeout(40000);
36 |
37 | const tempPath = '../temp';
38 |
39 | const azureOptions = require('../azureTestOptions.js');
40 | azureOptions.tempPath = tempPath;
41 |
42 | it('Should connect to Azure cloud successfully', function(done) {
43 | uploadfs.init(azureOptions, function(e) {
44 | if (e) {
45 | console.log('error', e);
46 | }
47 | try {
48 | assert(!e, 'Successfully initialize azure service');
49 | done();
50 | } catch (ae) {
51 | done(ae);
52 | }
53 | });
54 | });
55 |
56 | it('getGzipBlackList should return expected defaults if no options provided', function() {
57 | const types = uploadfs._storage.getGzipBlacklist();
58 | assert(Array.isArray(types), 'gzip blacklist array is an array');
59 | assert(types && types.indexOf('zip' >= 0));
60 | });
61 |
62 | it('getGzipBlacklist should be able to remove a type from the blacklist based on user settings', function() {
63 | const types = uploadfs._storage.getGzipBlacklist({ zip: true });
64 | assert(Array.isArray(types), 'gzip blacklist array is an array');
65 | assert(types && types.indexOf('zip' < 0));
66 | });
67 |
68 | it('getGzipBlacklist should be able to add a type to the blacklist based on user settings', function() {
69 | const types = uploadfs._storage.getGzipBlacklist({ foo: false });
70 | assert(Array.isArray(types), 'gzip blacklist array is an array');
71 | assert(types && types.indexOf('foo' >= 0));
72 | });
73 |
74 | it('getGzipBlacklist should quietly ignore `{ ext: false }` in user config if ext is not on default blacklist', function() {
75 | const types = uploadfs._storage.getGzipBlacklist({ foo: true });
76 | assert(Array.isArray(types), 'gzip blacklist array is an array');
77 | assert(types && types.indexOf('foo' <= 0), 'Filetype foo is not added to the blacklist if user wants to gzip it');
78 | });
79 |
80 | it('getGzipBlacklist should ignore duplicates', function() {
81 | const types = uploadfs._storage.getGzipBlacklist({
82 | jpg: false,
83 | zip: false
84 | });
85 | const counts = _.countBy(types);
86 | assert(counts.jpg === 1, 'No duplicate jpg type is present, despite it all');
87 | });
88 |
89 | it('Azure test copyIn should work', function(done) {
90 |
91 | uploadfs.copyIn(srcFile, infile, function(e) {
92 | if (e) {
93 | console.log('test copyIn ERR', e);
94 | }
95 | try {
96 | assert(!e, 'Azure copy in - nominal success');
97 | done();
98 | } catch (ae) {
99 | done(ae);
100 | }
101 | });
102 | });
103 |
104 | it('Azure test copyOut should work', function(done) {
105 | _getOutfile(infile, done);
106 | });
107 |
108 | it('Azure disable should work', function(done) {
109 | uploadfs.disable(infile, function(e, val) {
110 | if (e) {
111 | console.log('error', e);
112 | }
113 | try {
114 | assert(!e, 'Azure disable, nominal success');
115 | done();
116 | } catch (ae) {
117 | done(ae);
118 | }
119 | });
120 | });
121 |
122 | it('Azure test copyOut after disable should fail', function(done) {
123 | setTimeout(function() {
124 | uploadfs.copyOut(infile, 'foo.bar', {}, function(e, res) {
125 | try {
126 | assert(e);
127 | assert(e.name === 'RestError');
128 | assert(e.code === 'BlobNotFound');
129 | assert(e.statusCode === 404);
130 | done();
131 | } catch (ae) {
132 | done(ae);
133 | }
134 | });
135 | }, 5000);
136 | });
137 |
138 | it('Azure enable should work', function(done) {
139 | uploadfs.enable(infile, function(e, val) {
140 | if (e) {
141 | console.log('error', e);
142 | }
143 | try {
144 | assert(!e, 'Azure enable , nominal success');
145 | done();
146 | } catch (ae) {
147 | done(ae);
148 | }
149 | });
150 | });
151 |
152 | it('Azure test copyOut after enable should succeed', function(done) {
153 | _getOutfile(infile, done);
154 | });
155 |
156 | it('Uploadfs should return valid web-servable url pointing to uploaded file', function() {
157 | const url = uploadfs.getUrl(infile);
158 | const ogFile = fs.readFileSync(srcFile);
159 | assert(ogFile.length);
160 | assert(url);
161 |
162 | return fetch(url, {
163 | method: 'GET',
164 | headers: {
165 | 'Accept-Encoding': 'gzip'
166 | }
167 | })
168 | .then(function (response) {
169 | assert(response.status < 400, 'Bad response status');
170 | return response.buffer();
171 | })
172 | .then(function (buffer) {
173 | assert.deepStrictEqual(Buffer.compare(buffer, ogFile), 0, 'Web servable file contents equal original text file contents');
174 | });
175 | });
176 |
177 | it('Azure test remove should work', function(done) {
178 | uploadfs.remove(infile, function(e) {
179 | if (e) {
180 | console.log('error', e);
181 | }
182 | try {
183 | assert(!e, 'Azure remove, nominal success');
184 | done();
185 | } catch (ae) {
186 | done(ae);
187 | }
188 | });
189 | });
190 |
191 | it('Azure test copyOut should fail', function(done) {
192 | const tmpFileName = new Date().getTime() + '_text.txt';
193 |
194 | uploadfs.copyOut(infile, tmpFileName, {}, function (e, res) {
195 | try {
196 | assert(e);
197 | assert(e.name === 'RestError');
198 | assert(e.code === 'BlobNotFound');
199 | assert(e.statusCode === 404);
200 | done();
201 | } catch (ae) {
202 | done(ae);
203 | }
204 | });
205 | });
206 | });
207 |
--------------------------------------------------------------------------------
/test/gcs.js:
--------------------------------------------------------------------------------
1 | /* global describe, it */
2 | const assert = require('assert');
3 | const fetch = require('node-fetch');
4 |
5 | describe('UploadFS GCS', function () {
6 | this.timeout(20000);
7 | const uploadfs = require('../uploadfs.js')();
8 | const fs = require('fs');
9 | const async = require('async');
10 | const tempPath = '../temp';
11 | const dstPath = '/one/two/three/test.txt';
12 | const imageSizes = [
13 | {
14 | name: 'small',
15 | width: 320,
16 | height: 320
17 | },
18 | {
19 | name: 'medium',
20 | width: 640,
21 | height: 640
22 | },
23 | {
24 | name: 'large',
25 | width: 1140,
26 | height: 1140
27 | }
28 | ];
29 |
30 | const gcsOptions = require('../gcsTestOptions.js');
31 |
32 | gcsOptions.imageSizes = imageSizes;
33 | gcsOptions.tempPath = tempPath;
34 | gcsOptions.params = {
35 | Bucket: gcsOptions.bucket
36 | };
37 |
38 | it('uploadfs should init gcs connection without error', function(done) {
39 | return uploadfs.init(gcsOptions, function(e) {
40 | if (e) {
41 | console.log('=======E', e);
42 | }
43 | assert(!e, 'gcs init without error');
44 | uploadfs.copyIn('test.txt', dstPath, function(e) {
45 | if (e) {
46 | console.log('=======EE', e);
47 | }
48 | assert(!e, 'gcs copyIn without error');
49 | done();
50 | });
51 | });
52 | });
53 |
54 | it('CopyIn should work', function (done) {
55 | return uploadfs.copyIn('test.txt', dstPath, function(e) {
56 | assert(!e, 'gcs copyIn without error');
57 | done();
58 | });
59 | });
60 |
61 | it('CopyIn file should be available via gcs', function () {
62 | const url = uploadfs.getUrl() + '/one/two/three/test.txt';
63 | const og = fs.readFileSync('test.txt', 'utf8');
64 |
65 | return fetch(url, {
66 | method: 'GET',
67 | headers: {
68 | 'Accept-Encoding': 'gzip',
69 | 'Content-type': 'text/plain; charset=utf-8'
70 | }
71 | })
72 | .then(function (response) {
73 | assert(response.status === 200, `Request status 200 != ${response.status}`);
74 | return response.text();
75 |
76 | })
77 | .then(function (content) {
78 | assert.strictEqual(content, og, 'Res body equals uploaded file');
79 | });
80 | });
81 |
82 | it('CopyOut should work', done => {
83 | const cpOutPath = 'copy-out-test.txt';
84 | return uploadfs.copyOut(dstPath, cpOutPath, e => {
85 | assert(!e, 'gcs copyOut without error');
86 | const dl = fs.readFileSync(cpOutPath, 'utf8');
87 | const og = fs.readFileSync('test.txt', 'utf8');
88 | assert(dl === og, 'Downloaded file is equal to previous upload');
89 | done();
90 | });
91 | });
92 |
93 | it('disable / enable should work as expected', done => {
94 | return async.series({
95 | disable: cb => {
96 | uploadfs.disable(dstPath, e => {
97 | assert(!e, 'uploadfs disable no err');
98 | cb(null);
99 | });
100 | },
101 | webShouldFail: cb => {
102 | const url = uploadfs.getUrl() + dstPath;
103 | return fetch(url, {
104 | method: 'GET'
105 | })
106 | .then(function (response) {
107 | assert(response.status >= 400, 'Request on disabled resource should fail: expected 40x, got ' + response.status);
108 | cb(null);
109 | })
110 | .catch(cb);
111 | },
112 | enable: cb => {
113 | uploadfs.enable(dstPath, e => {
114 | assert(!e, 'uploadfs enable should not fail');
115 | cb(null);
116 | });
117 | },
118 | webShouldSucceed: cb => {
119 | const url = uploadfs.getUrl() + dstPath;
120 | const og = fs.readFileSync('test.txt', 'utf8');
121 |
122 | return fetch(url, {
123 | method: 'GET'
124 | })
125 | .then(function (res) {
126 | assert(res.status < 400, 'Request for enabled resource should not fail');
127 | // Don't get fussed about presence or absence of UTF-8 in this header
128 | assert(res.headers.get('content-type').match(/text\/plain/),
129 | `Check content-type header expected "text/plain" but got "${res.headers.get('content-type')}"`);
130 | return res.text();
131 | })
132 | .then(function (content) {
133 | assert.strictEqual(og, content, 'Downloaded content should be equal to previous upload');
134 | cb(null);
135 | })
136 | .catch(cb);
137 | }
138 | }, e => {
139 | assert(!e, 'Series should succeed');
140 | done();
141 | });
142 | });
143 |
144 | it('remove should work', done => {
145 | return uploadfs.remove(dstPath, e => {
146 | assert(!e, 'Remove should succeed');
147 |
148 | setTimeout(() => {
149 | const url = uploadfs.getUrl() + dstPath;
150 | fetch(url, {
151 | method: 'GET'
152 | })
153 | .then(function (res) {
154 | assert(res.status >= 400, 'Removed file is gone from gcs');
155 | done();
156 | })
157 | .catch(done);
158 | }, 5000);
159 | });
160 | });
161 |
162 | it('copyImageIn should work', done => {
163 | const imgDstPath = '/images/profiles/me';
164 |
165 | uploadfs.copyImageIn('test.jpg', imgDstPath, (e, info) => {
166 | assert(!e, 'gcs copyImageIn works');
167 |
168 | setTimeout(() => {
169 | const url = uploadfs.getUrl();
170 | const paths = [ info.basePath + '.jpg' ];
171 |
172 | paths.push(info.basePath + '.small.jpg');
173 | paths.push(info.basePath + '.medium.jpg');
174 | paths.push(info.basePath + '.large.jpg');
175 |
176 | async.map(paths, (path, cb) => {
177 | const imgPath = url + path;
178 |
179 | fetch(imgPath, {
180 | method: 'GET'
181 | })
182 | .then(function (res) {
183 | assert(res.status === 200, `Request status 200 != ${res.status}`);
184 | /* @@TODO we should test the correctness of uploaded images */
185 |
186 | // clean up
187 | uploadfs.remove(path, e => {
188 | assert(!e, 'Remove uploaded file after testing');
189 | return cb();
190 | });
191 | })
192 | .catch(cb);
193 | }, e => {
194 | assert(!e, 'Can request all copyImageInned images');
195 | done();
196 | });
197 | // end async.each
198 | }, 5000);
199 | });
200 | });
201 | });
202 |
--------------------------------------------------------------------------------
/test/local.js:
--------------------------------------------------------------------------------
1 | /* global describe, it */
2 | const Mode = require('stat-mode');
3 | const assert = require('assert');
4 | const path = require('path');
5 |
6 | describe('UploadFS Local', function () {
7 | this.timeout(4500);
8 | const uploadfs = require('../uploadfs.js')();
9 | const fs = require('fs');
10 | const async = require('async');
11 | const tempPath = path.join(__dirname, '/temp');
12 | const localOptions = {
13 | storage: 'local',
14 | uploadsPath: path.join(__dirname, '/files/'),
15 | uploadsUrl: 'http://localhost:3000/test/'
16 | };
17 | const imageSizes = [
18 | {
19 | name: 'small',
20 | width: 320,
21 | height: 320
22 | },
23 | {
24 | name: 'medium',
25 | width: 640,
26 | height: 640
27 | },
28 | {
29 | name: 'large',
30 | width: 1140,
31 | height: 1140
32 | }
33 | ];
34 |
35 | localOptions.imageSizes = imageSizes;
36 | localOptions.tempPath = tempPath;
37 |
38 | it('Should instantiate uploadfs module without errors', done => {
39 | return uploadfs.init(localOptions, e => {
40 | assert(!e);
41 | done();
42 | });
43 | });
44 |
45 | it('copyIn should work for local filesystem', done => {
46 | return uploadfs.copyIn('./test.txt', '/test_copy.txt', e => {
47 | assert(!e);
48 | const og = fs.readFileSync('./test.txt', 'utf8');
49 | const next = fs.readFileSync('./test/files/test_copy.txt', 'utf8');
50 | assert(og.length, 'lengthy');
51 | assert(next.length, 'lengthy');
52 | assert(og === next, 'Copies are equal');
53 | done();
54 | });
55 | });
56 |
57 | it('copyOut should work for local filesystem', done => {
58 | return uploadfs.copyOut('/test_copy.txt', 'copy-out-test.txt', e => {
59 | assert(!e);
60 | const og = fs.readFileSync('./test.txt', 'utf8');
61 | const next = fs.readFileSync('./copy-out-test.txt', 'utf8');
62 | assert(og.length, 'lengthy');
63 | assert(next.length, 'lengthy');
64 | assert(og === next, 'Copied files are equal');
65 | done();
66 | });
67 | });
68 |
69 | it('streamOut should work for local filesystem', async function() {
70 | const input = uploadfs.streamOut('/test_copy.txt');
71 | const chunks = [];
72 | for await (let chunk of input) {
73 | chunks.push(chunk);
74 | }
75 | const data = Buffer.concat(chunks);
76 | const og = fs.readFileSync('test.txt');
77 | assert(data.equals(og));
78 | });
79 |
80 | it('overwrite with copyIn should work for local filesystem', done => {
81 | return uploadfs.copyIn('./test2.txt', '/test_copy.txt', e => {
82 | assert(!e);
83 | const og = fs.readFileSync('./test2.txt', 'utf8');
84 | const next = fs.readFileSync('./test/files/test_copy.txt', 'utf8');
85 | assert(og.length, 'lengthy');
86 | assert(next.length, 'lengthy');
87 | assert(og === next, 'Copies are equal');
88 | done();
89 | });
90 | });
91 |
92 | it('copyOut should see update for local filesystem', done => {
93 | return uploadfs.copyOut('/test_copy.txt', 'copy-out-test.txt', e => {
94 | assert(!e);
95 | const og = fs.readFileSync('./test2.txt', 'utf8');
96 | const next = fs.readFileSync('./copy-out-test.txt', 'utf8');
97 | assert(og.length, 'lengthy');
98 | assert(next.length, 'lengthy');
99 | assert(og === next, 'Copied files are equal');
100 | done();
101 | });
102 | });
103 |
104 | it('Test disable / enable functionality', done => {
105 | const srcFile = '/test_copy.txt';
106 | const infile = './test/files/test_copy.txt';
107 |
108 | return async.series({
109 | disable: cb => {
110 | assert(fs.existsSync(infile), 'copyIn file exissts');
111 |
112 | uploadfs.disable(srcFile, e => {
113 | const stats = fs.statSync(infile);
114 | const mode = new Mode(stats);
115 | assert(!e, 'uploadfs disable success!');
116 | assert(mode.toString() === '----------', 'File permissions locked down');
117 | return cb(null);
118 | });
119 | },
120 | enable: cb => {
121 | uploadfs.enable(srcFile, e => {
122 | const stats = fs.statSync(infile);
123 | const mode = new Mode(stats);
124 | assert(!e, 'uploadfs disable success!');
125 | assert(mode.toString() === '-rw-r--r--', 'Enabled file has expected permissions');
126 | assert(fs.existsSync(infile), 'copyIn visible to fs');
127 | return cb(null);
128 | });
129 | },
130 | testCopyOut: cb => {
131 | const outsucceeds = 'copy-out-test.txt';
132 | uploadfs.copyOut(srcFile, outsucceeds, e => {
133 | assert(!e, 'node should not be able to copy this file!');
134 | return cb(null);
135 | });
136 | },
137 | testDelete: cb => {
138 | uploadfs.remove(srcFile, e => {
139 | assert(!e, 'Delete file succeeds');
140 | assert(!fs.existsSync(infile), 'uploadfs delete file is gone from local fs');
141 | return cb(null);
142 | });
143 | }
144 | }, function (e) {
145 | fs.unlinkSync('copy-out-test.txt');
146 | assert(!e);
147 | done();
148 | });
149 | });
150 |
151 | it('Should destroy uploadfs module without errors', done => {
152 | return uploadfs.destroy(e => {
153 | assert(!e);
154 | done();
155 | });
156 | });
157 |
158 | });
159 |
--------------------------------------------------------------------------------
/test/one/two/three/test.txt:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/apostrophecms/uploadfs/d3a6d707d4ca4fd4012dccb01962e4af441fca8e/test/one/two/three/test.txt
--------------------------------------------------------------------------------
/test/s3.js:
--------------------------------------------------------------------------------
1 | /* global describe, it */
2 | const assert = require('assert');
3 | const fetch = require('node-fetch');
4 | const exec = require('child_process').execSync;
5 | const util = require('util');
6 | const fs = require('fs');
7 |
8 | describe('UploadFS S3', function () {
9 | this.timeout(50000);
10 | const uploadfs = require('../uploadfs.js')();
11 | const init = util.promisify(uploadfs.init);
12 | const remove = util.promisify(uploadfs.remove);
13 | const copyIn = util.promisify(uploadfs.copyIn);
14 | const copyImageIn = util.promisify(uploadfs.copyImageIn);
15 | const copyOut = util.promisify(uploadfs.copyOut);
16 | const enable = util.promisify(uploadfs.enable);
17 | const disable = util.promisify(uploadfs.disable);
18 |
19 | const fs = require('fs');
20 | const tempPath = '../temp';
21 | const dstPath = '/one/two/three/test.txt';
22 | const imageSizes = [
23 | {
24 | name: 'small',
25 | width: 320,
26 | height: 320
27 | },
28 | {
29 | name: 'medium',
30 | width: 640,
31 | height: 640
32 | },
33 | {
34 | name: 'large',
35 | width: 1140,
36 | height: 1140
37 | }
38 | ];
39 |
40 | const s3Options = {
41 | storage: 's3',
42 | // Usually not set so we get sharp, with imagemagick fallback (the default behavior)
43 | image: process.env.UPLOADFS_TEST_IMAGE,
44 | bucket: process.env.UPLOADFS_TEST_S3_BUCKET,
45 | key: process.env.UPLOADFS_TEST_S3_KEY,
46 | secret: process.env.UPLOADFS_TEST_S3_SECRET,
47 | region: process.env.UPLOADFS_TEST_S3_REGION
48 | };
49 |
50 | s3Options.imageSizes = imageSizes;
51 | s3Options.tempPath = tempPath;
52 |
53 | it('S3 Should init s3 connection without error', async function() {
54 | await init(s3Options);
55 | await copyIn('test.txt', dstPath);
56 | });
57 |
58 | it('S3 should store and retrieve a .tar.gz file without double-gzipping it', async function() {
59 | await copyIn(`${__dirname}/test.tar.gz`, '/test.tar.gz');
60 | // Is it returned in identical form using copyOut?
61 | await copyOut('/test.tar.gz', `${__dirname}/test2.tar.gz`);
62 | identical(`${__dirname}/test.tar.gz`, `${__dirname}/test2.tar.gz`);
63 | fs.unlinkSync(`${__dirname}/test2.tar.gz`);
64 | // Is it returned in identical form using fetch and the public URL of the file?
65 | const url = uploadfs.getUrl() + '/test.tar.gz';
66 | // curl and the browser exhibit the same confused behavior
67 | // unless .gz has a content type in contentTypes.js and
68 | // is also declared in noGzipContentTypes.js. For whatever
69 | // reason node-fetch doesn't get confused so we test with curl
70 | exec(`curl ${url} --output ${__dirname}/test3.tar.gz`);
71 | identical(`${__dirname}/test.tar.gz`, `${__dirname}/test3.tar.gz`);
72 | fs.unlinkSync(`${__dirname}/test3.tar.gz`);
73 | await remove('/test.tar.gz');
74 | });
75 |
76 | it('CopyIn should work', async function() {
77 | await copyIn('test.txt', dstPath);
78 | });
79 |
80 | it('CopyIn file should be available via s3', async function () {
81 | const url = uploadfs.getUrl() + '/one/two/three/test.txt';
82 | const og = fs.readFileSync('test.txt', 'utf8');
83 |
84 | const response = await fetch(url, {
85 | method: 'GET',
86 | headers: {
87 | 'Accept-Encoding': 'gzip',
88 | 'Content-type': 'text/plain; charset=utf-8'
89 | }
90 | });
91 | assert(response.status === 200, `Request status 200 != ${response.status}`);
92 | const body = await response.text();
93 | assert(body === og, 'Res body equals uploaded file');
94 | });
95 |
96 | it('S3 streamOut should work', async function() {
97 | const input = uploadfs.streamOut(dstPath);
98 | const chunks = [];
99 | for await (let chunk of input) {
100 | chunks.push(chunk);
101 | }
102 | const data = Buffer.concat(chunks);
103 | const og = fs.readFileSync('test.txt');
104 | assert(data.equals(og), 'Streamed file is equal to previous upload');
105 | });
106 |
107 | it('S3 streamOut should handle an error status code from S3 sensibly', async function() {
108 | try {
109 | const input = uploadfs.streamOut('made/up/path');
110 | let status;
111 | try {
112 | // This should fail
113 | const chunks = [];
114 | for await (let chunk of input) {
115 | chunks.push(chunk);
116 | }
117 | } catch (e) {
118 | status = e.statusCode;
119 | }
120 | assert(status >= 400);
121 | } catch (e) {
122 | console.error('second error handler', e);
123 | }
124 | });
125 |
126 | it('S3 CopyOut should work', async function() {
127 | const cpOutPath = 'copy-out-test.txt';
128 | await copyOut(dstPath, cpOutPath);
129 | const dl = fs.readFileSync(cpOutPath, 'utf8');
130 | const og = fs.readFileSync('test.txt', 'utf8');
131 | assert(dl === og, 'Downloaded file is equal to previous upload');
132 | });
133 |
134 | it('S3 Disable / Enable should work as expected', async function() {
135 | await disable(dstPath);
136 | assert.rejects(testWeb());
137 | await enable(dstPath);
138 | await testWeb();
139 |
140 | async function testWeb() {
141 | const og = fs.readFileSync('test.txt', 'utf8');
142 | const url = uploadfs.getUrl() + dstPath;
143 | const res = await fetch(url, {
144 | method: 'GET',
145 | headers: {
146 | 'Accept-Encoding': 'gzip'
147 | }
148 | });
149 | if (res.status >= 400) {
150 | throw res;
151 | }
152 | const body = await res.text();
153 | assert(res.headers.get('content-type') === 'text/plain', 'Check content-type header');
154 | assert(og === body, 'Downloaded content should be equal to previous upload');
155 | }
156 | });
157 |
158 | it('S3 uploadfs Remove should work', async function() {
159 | await remove(dstPath);
160 | const url = uploadfs.getUrl() + dstPath;
161 | const res = await fetch(url, {
162 | method: 'GET',
163 | headers: {
164 | 'Accept-Encoding': 'gzip'
165 | }
166 | });
167 | assert(res.status >= 400, 'Removed file is gone from s3');
168 | });
169 |
170 | it('S3 uploadfs copyImageIn should work', async function() {
171 | const imgDstPath = '/images/profiles/me';
172 |
173 | const info = await copyImageIn('test.jpg', imgDstPath);
174 | const url = uploadfs.getUrl();
175 | const paths = [ info.basePath + '.jpg' ];
176 |
177 | paths.push(info.basePath + '.small.jpg');
178 | paths.push(info.basePath + '.medium.jpg');
179 | paths.push(info.basePath + '.large.jpg');
180 |
181 | for (const path of paths) {
182 | const imgPath = url + path;
183 | const res = await fetch(imgPath, {
184 | method: 'GET',
185 | headers: {
186 | 'Accept-Encoding': 'gzip'
187 | }
188 | });
189 | assert(res.status === 200);
190 | // Not suitable for images, make sure we didn't force it
191 | assert(res.headers.get('content-encoding') !== 'gzip');
192 | const buffer = await res.buffer();
193 | // JPEG magic number check
194 | assert(buffer[0] === 0xFF);
195 | assert(buffer[1] === 0xD8);
196 | await remove(path);
197 | }
198 | });
199 |
200 | it('S3 uploadfs copyImageIn should work with custom sizes', async function() {
201 | const imgDstPath = '/images/profiles/me';
202 |
203 | const customSizes = [
204 | {
205 | name: 'tiny',
206 | width: 80,
207 | height: 80
208 | },
209 | {
210 | name: 'nice',
211 | width: 120,
212 | height: 120
213 | }
214 | ];
215 |
216 | const info = await copyImageIn('test.jpg', imgDstPath, { sizes: customSizes });
217 |
218 | const url = uploadfs.getUrl();
219 | // Default should be https
220 | assert(url.startsWith('https://'));
221 | const paths = [ info.basePath + '.jpg' ];
222 |
223 | paths.push(info.basePath + '.tiny.jpg');
224 | paths.push(info.basePath + '.nice.jpg');
225 |
226 | for (const path of paths) {
227 | const imgPath = url + path;
228 | const res = await fetch(imgPath, {
229 | method: 'GET',
230 | headers: {
231 | 'Accept-Encoding': 'gzip'
232 | }
233 | });
234 | assert(res.status === 200);
235 | // Not suitable for images, make sure we didn't force it
236 | assert(res.headers.get('content-encoding') !== 'gzip');
237 | const buffer = await res.buffer();
238 | // JPEG magic number check
239 | assert(buffer[0] === 0xFF);
240 | assert(buffer[1] === 0xD8);
241 | await remove(path);
242 | }
243 | });
244 | });
245 |
246 | describe('UploadFS S3 with private ACL', async function () {
247 | this.timeout(50000);
248 | const uploadfs = require('../uploadfs.js')();
249 | const init = util.promisify(uploadfs.init);
250 | const remove = util.promisify(uploadfs.remove);
251 | const copyIn = util.promisify(uploadfs.copyIn);
252 | const copyOut = util.promisify(uploadfs.copyOut);
253 | const enable = util.promisify(uploadfs.enable);
254 | const disable = util.promisify(uploadfs.disable);
255 |
256 | const fs = require('fs');
257 | const tempPath = '../temp';
258 | const dstPath = '/one/two/three/test2.txt';
259 |
260 | const s3Options = {
261 | storage: 's3',
262 | // Usually not set so we get sharp, with imagemagick fallback (the default behavior)
263 | image: process.env.UPLOADFS_TEST_IMAGE,
264 | bucket: process.env.UPLOADFS_TEST_S3_BUCKET,
265 | key: process.env.UPLOADFS_TEST_S3_KEY,
266 | secret: process.env.UPLOADFS_TEST_S3_SECRET,
267 | region: process.env.UPLOADFS_TEST_S3_REGION,
268 | bucketObjectsACL: 'private',
269 | disabledBucketObjectsACL: 'private',
270 | tempPath
271 | };
272 |
273 | it('initialize uploadfs', async function() {
274 | await init(s3Options);
275 | });
276 |
277 | it('test with alternate ACLs', async function() {
278 | await copyIn('test.txt', dstPath);
279 | await testCopyOut();
280 | assert.rejects(testWeb());
281 | await disable(dstPath);
282 | assert.rejects(testWeb());
283 | await testCopyOut();
284 | await enable(dstPath);
285 | assert.rejects(testWeb());
286 | await testCopyOut();
287 | await remove(dstPath);
288 | });
289 |
290 | async function testCopyOut() {
291 | await copyOut(dstPath, `${tempPath}/test2.txt`);
292 | identical('test.txt', `${tempPath}/test2.txt`);
293 | fs.unlinkSync(`${tempPath}/test2.txt`);
294 | }
295 |
296 | async function testWeb() {
297 | const url = uploadfs.getUrl() + '/test.tar.gz';
298 | const response = await fetch(url);
299 | if (result.status >= 400) {
300 | console.log(result.status);
301 | throw result;
302 | }
303 | }
304 | });
305 |
306 | function identical(f1, f2) {
307 | const data1 = fs.readFileSync(f1);
308 | const data2 = fs.readFileSync(f2);
309 | if (data1.compare(data2) !== 0) {
310 | throw new Error(`${f1} and ${f2} are not identical.`);
311 | }
312 | }
313 |
--------------------------------------------------------------------------------
/test/test.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/apostrophecms/uploadfs/d3a6d707d4ca4fd4012dccb01962e4af441fca8e/test/test.jpg
--------------------------------------------------------------------------------
/test/test.tar.gz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/apostrophecms/uploadfs/d3a6d707d4ca4fd4012dccb01962e4af441fca8e/test/test.tar.gz
--------------------------------------------------------------------------------
/test/test.txt:
--------------------------------------------------------------------------------
1 | This is a test text file.
2 |
--------------------------------------------------------------------------------
/test2.txt:
--------------------------------------------------------------------------------
1 | This is a test text file, slightly different.
2 |
--------------------------------------------------------------------------------
/uploadfs.js:
--------------------------------------------------------------------------------
1 | const _ = require('lodash');
2 | const async = require('async');
3 | const crypto = require('crypto');
4 | const fs = require('fs');
5 | const { rimraf } = require('rimraf');
6 | const delimiter = require('path').delimiter;
7 |
8 | function generateId() {
9 | return crypto.randomBytes(16).toString('hex');
10 | }
11 |
12 | /**
13 | * Instantiates Uploadfs.
14 | * @class Represents an instance of Uploadfs. Usually you only want one.
15 | */
16 |
17 | function Uploadfs() {
18 | let tempPath, imageSizes;
19 | let scaledJpegQuality;
20 | let ensuredTempDir = false;
21 | const self = this;
22 | /**
23 | * Initialize uploadfs. The init method passes options to the backend and invokes a callback when the backend is ready.
24 | * @param {Object} options: backend, imageSizes, orientOriginals, tempPath, copyOriginal, scaledJpegQuality, contentType, cdn. backend is the only mandatory option. See the README and individual methods for details.
25 | * @param {Object} options.cdn - An object, that defines cdn settings
26 | * @param {Boolean} options.cdn.enabled=true - Whether the cdn should be anbled or not
27 | * @param {String} options.cdn.url - The cdn-url
28 | * @param {Function} callback - Will receive the usual err argument
29 | */
30 | self.init = function (options, callback) {
31 | self.options = options;
32 | self.prefix = self.options.prefix || '';
33 | // bc: support options.backend
34 | self._storage = options.storage || options.backend;
35 | if (!self._storage) {
36 | return callback('Storage backend must be specified');
37 | }
38 | // Load standard storage backends, by name. You can also pass an object
39 | // with your own implementation
40 | if (typeof self._storage === 'string') {
41 | let library;
42 | try {
43 | library = require('./lib/storage/' + self._storage + '.js');
44 | } catch (e) {
45 | console.error(
46 | 'Unable to require the ' +
47 | self._storage +
48 | ' storage backend, your node version may be too old for it'
49 | );
50 | return callback(e);
51 | }
52 | self._storage = library();
53 | }
54 |
55 | // If you want to deliver your images
56 | // over a CDN then this could be set in options
57 | if (options.cdn !== undefined) {
58 | if (
59 | !_.isObject(options.cdn) ||
60 | !_.isString(options.cdn.url) ||
61 | (options.cdn.enabled !== undefined && !_.isBoolean(options.cdn.enabled))
62 | ) {
63 | return callback(
64 | 'CDN must be a valid object: {enabled: boolean, url: string}'
65 | );
66 | }
67 | if (options.cdn.enabled === undefined) {
68 | options.cdn.enabled = true;
69 | }
70 | self.cdn = options.cdn;
71 | }
72 |
73 | // Load image backend
74 | self._image = options.image;
75 | // Throw warnings about deprecated processors or load default
76 | if (self._image === 'jimp' || self._image === 'imagecrunch') {
77 | console.error(
78 | 'The specified processor is no longer supported, defaulting to the sharp.js library.'
79 | );
80 | self._image = 'sharp';
81 | }
82 |
83 | let fallback = false;
84 | // if processor is passed as an string (including 'imagemagick' or 'sharp'), try to load it or fail with warning
85 | // if undefined try to load sharp and fallback to imagemagick upon fail
86 | if (typeof self._image === 'string' || self._image === undefined) {
87 | self._image = self._image === undefined ? 'sharp' : self._image;
88 | try {
89 | const requiring = `./lib/image/${self._image}.js`;
90 | self._image = require(requiring)();
91 | } catch (e) {
92 | console.error(e);
93 | if (self._image === 'sharp') {
94 | console.error(
95 | 'Sharp not available on this operating system. Trying to fall back to imagemagick.'
96 | );
97 | fallback = true;
98 | } else {
99 | return callback('The specified processor was not found.');
100 | }
101 | }
102 | }
103 |
104 | if (fallback) {
105 | // Check for presence of imagemagick - if we fail sharp load it doesn't mean imagemagick is there
106 | const paths = (process.env.PATH || '').split(delimiter);
107 | if (
108 | _.find(paths, function (p) {
109 | // Allow for Windows and Unix filenames for identify. Silly oversight
110 | // after getting delimiter right (:
111 | if (
112 | fs.existsSync(p + '/identify') ||
113 | fs.existsSync(p + '/identify.exe')
114 | ) {
115 | return true;
116 | }
117 | })
118 | ) {
119 | self._image = require('./lib/image/imagemagick.js')();
120 | } else {
121 | return callback('No supported image processor found.');
122 | }
123 | }
124 |
125 | // Reasonable default JPEG quality setting for scaled copies. Imagemagick's default
126 | // quality is the quality of the original being converted, which is usually a terrible idea
127 | // when it's a super hi res original. And if that isn't apropos it defaults
128 | // to 92 which is still sky high and produces very large files
129 | // sub-comment - I'm not sure about the 92 above, it seems to be 80 below
130 |
131 | scaledJpegQuality = options.scaledJpegQuality || 80;
132 |
133 | imageSizes = options.imageSizes || [];
134 |
135 | tempPath = options.tempPath;
136 |
137 | async.series(
138 | [
139 | // create temp folder if needed
140 | function (callback) {
141 | if (!imageSizes.length) {
142 | return callback();
143 | }
144 |
145 | ensureTempDir();
146 | return callback(null);
147 | },
148 |
149 | // invoke storage backend init with options
150 | function (callback) {
151 | return self._storage.init(options, callback);
152 | },
153 |
154 | // invoke image backend init with options
155 | function (callback) {
156 | return self._image.init(options, callback);
157 | }
158 | ],
159 | callback
160 | );
161 | };
162 |
163 | /**
164 | * The copyIn method takes a local filename and copies it to a path in uploadfs. Any intermediate folders that do not exist are automatically created if the storage requires such things. Just copy things where you want them to go.
165 | * @param {[String]} localPath The local filename
166 | * @param {[String]} path The path in uploadfs, begins with /
167 | * @param {[Object]} options Options (passed to storage). May be skipped
168 | * @param {Function} callback Will receive the usual err argument
169 | */
170 | self.copyIn = function (localPath, path, options, callback) {
171 | if (typeof options === 'function') {
172 | callback = options;
173 | options = {};
174 | }
175 | path = prefixPath(path);
176 | return self._storage.copyIn(localPath, path, options, callback);
177 | };
178 |
179 | /**
180 | * Obtain the temporary folder used for intermediate files created by copyImageIn. Can also be useful when doing your own manipulations with copyOut.
181 | * @see Uploadfs#copyOut
182 | */
183 | self.getTempPath = function () {
184 | return tempPath;
185 | };
186 |
187 | /**
188 | * The copyOut method takes a path in uploadfs and a local filename and copies the file back from uploadfs to the local filesystem. This should be used only rarely. Heavy reliance on this method sets you up for poor performance in S3. However it may be necessary at times, for instance when you want to crop an image differently later. Use it only for occasional operations like cropping.
189 | * @param {String} path Path in uploadfs (begins with /)
190 | * @param {String} localPath Path in the local filesystem to copy to
191 | * @param {Object} options Options (passed to backend). May be skipped
192 | * @param {Function} callback Receives the usual err argument
193 | */
194 | self.copyOut = function (path, localPath, options, callback) {
195 | path = prefixPath(path);
196 | if (typeof options === 'function') {
197 | callback = options;
198 | options = {};
199 | }
200 | return self._storage.copyOut(path, localPath, options, callback);
201 | };
202 |
203 | /**
204 | * The streamOut method takes a path in uploadfs and a local filename and returns a readable stream. This should be used only rarely. Heavy reliance on this method sets you up for poor performance in S3. However it may be necessary at times, for instance when access to files must be secured on a request-by-request basis.
205 | * @param {String} path Path in uploadfs (begins with /)
206 | * @param {Object} options Options (passed to backend). May be skipped
207 | * @param {Function} callback Receives the usual err argument
208 | */
209 | self.streamOut = function (path, options) {
210 | path = prefixPath(path);
211 | return self._storage.streamOut(path, options);
212 | };
213 |
214 | /**
215 | * Copy an image into uploadfs. Scaled versions as defined by the imageSizes option
216 | * passed at init() time, or as overridden by `options.sizes` on this call,
217 | * are copied into uploadfs as follows:
218 | *
219 | * If 'path' is '/me' and sizes with names 'small', 'medium' and 'large'
220 | * were defined at init() time, the scaled versions will be:
221 | *
222 | * '/me.small.jpg', '/me.medium.jpg', '/me.large.jpg'
223 | *
224 | * And the original file will be copied to:
225 | *
226 | * '/me.jpg'
227 | *
228 | * Note that a file extension is added automatically. If you provide a
229 | * file extension in 'path' it will be honored when copying the original only.
230 | * The scaled versions will get appropriate extensions for their format
231 | * as detected by gm.
232 | *
233 | * If there is no error the second argument passed to the callback will
234 | * be an object with a 'basePath' property containing your original path
235 | * with the file extension removed and an 'extension' property containing
236 | * the automatically added file extension, as a convenience for locating the
237 | * original and scaled versions just by adding .jpg, .small.jpg, .medium.jpg,
238 | * etc.
239 | *
240 | * Scaled versions have the same file format as the original and are no wider
241 | * or taller than specified by the width and height properties of the
242 | * corresponding size, with the aspect ratio always being preserved.
243 | * If options.copyOriginal is explicitly false, the original image is
244 | * not copied into uploadfs at all.
245 | *
246 | * If options.crop is present, the image is cropped according to the
247 | * top, left, width and height properties of options.crop. All properties must be integers.
248 | * If cropping is done, it is performed first before scaling.
249 | *
250 | * IMPORTANT: if options.crop is present, the uncropped original is
251 | * NOT copied into uploadfs. The cropped version is what is copied
252 | * to "path." If you want the uncropped original too, make a separate call
253 | * to copyIn. A common pattern is to copy the original when an image
254 | * is first uploaded, and to perform crops and save them under other names
255 | * later, when a user decides they want cropped versions.
256 | *
257 | * Image scaling is performed with imagemagick, which must be installed
258 | * (note that Heroku provides it). In no case is an image ever scaled to
259 | * be larger than the original. Scaled versions of images with an orientation
260 | * hint, such as iPhone photographs, are automatically rotated by gm
261 | * so that they will display properly in web browsers.
262 | *
263 | * @param {String} localPath Local filesystem path of existing image file
264 | * @param {String} path Path in uploadfs to copy original to. Leave off the extension to autodetect the true type. Path begins with /
265 | * @param {Object} options Options: scaledJpegQuality, copyOriginal, crop (see above)
266 | * @param {Function} callback Receives the usual err argument
267 | */
268 |
269 | self.copyImageIn = function (localPath, path, options, callback) {
270 | // We do not call prefixPath here because we rely on copyIn, which does
271 |
272 | if (typeof options === 'function') {
273 | callback = options;
274 | options = {};
275 | }
276 |
277 | const sizes = options.sizes || imageSizes;
278 |
279 | ensureTempDir();
280 |
281 | // We'll pass this context to the image processing backend with
282 | // additional properties
283 | const context = {
284 | crop: options.crop,
285 | sizes
286 | };
287 |
288 | context.scaledJpegQuality = options.scaledJpegQuality || scaledJpegQuality;
289 |
290 | // Identify the file type, size, etc. Stuff them into context.info and
291 | // context.extension
292 |
293 | function identify(path, callback) {
294 | return self.identifyLocalImage(path, function (err, info) {
295 | if (err) {
296 | return callback(err);
297 | }
298 | context.info = info;
299 | context.extension = info.extension;
300 | return callback(null);
301 | });
302 | }
303 |
304 | let originalDone = false;
305 | const copyOriginal = options.copyOriginal !== false;
306 | let originalPath;
307 |
308 | async.series(
309 | {
310 | // Identify the file
311 | identify: function (callback) {
312 | return identify(localPath, function (err) {
313 | if (err) {
314 | return callback(err);
315 | }
316 | return callback(null);
317 | });
318 | },
319 | // make a temporary folder for our work
320 | temporary: function (callback) {
321 | // Name the destination folder
322 | context.tempName = generateId();
323 | // Create destination folder
324 | if (sizes.length) {
325 | context.tempFolder = tempPath + '/' + context.tempName;
326 | return fs.mkdir(context.tempFolder, callback);
327 | } else {
328 | return callback(null);
329 | }
330 | },
331 | // Determine base path in uploadfs, working path for temporary files,
332 | // and final uploadfs path of the original
333 | paths: function (callback) {
334 | context.basePath = path.replace(/\.\w+$/, '');
335 | context.workingPath = localPath;
336 |
337 | // Indulge their wild claims about the extension the original
338 | // should have if any, otherwise provide the truth from identify
339 | if (path.match(/\.\w+$/)) {
340 | originalPath = path;
341 | } else {
342 | originalPath = path + '.' + context.extension;
343 | }
344 | return callback(null);
345 | },
346 | copyOriginal: function (callback) {
347 | // If there are no transformations of the original, copy it
348 | // in directly
349 | if (
350 | !copyOriginal ||
351 | options.orientOriginals !== false ||
352 | options.crop
353 | ) {
354 | return callback(null);
355 | }
356 | originalDone = true;
357 | return self.copyIn(localPath, originalPath, options, callback);
358 | },
359 |
360 | convert: function (callback) {
361 | context.copyOriginal = copyOriginal && !originalDone;
362 | return async.series([ convert, postprocess ], callback);
363 | function convert(callback) {
364 | return self._image.convert(context, callback);
365 | }
366 | function postprocess(callback) {
367 | if (!context.tempFolder) {
368 | // Nowhere to do the work
369 | return callback(null);
370 | }
371 | const filenames = _.map(sizes, function (size) {
372 | return (
373 | context.tempFolder + '/' + size.name + '.' + context.extension
374 | );
375 | });
376 | return self.postprocess(filenames, callback);
377 | }
378 | },
379 |
380 | reidentify: function (callback) {
381 | if (!context.adjustedOriginal) {
382 | return callback(null);
383 | }
384 | // Push and pop the original size properties as we determined
385 | // those on the first identify and don't want to return the values
386 | // for the cropped and/or reoriented version
387 | const originalWidth = context.info.originalWidth;
388 | const originalHeight = context.info.originalHeight;
389 | return identify(context.adjustedOriginal, function (err) {
390 | if (err) {
391 | return callback(err);
392 | }
393 | context.info.originalWidth = originalWidth;
394 | context.info.originalHeight = originalHeight;
395 | return callback(null);
396 | });
397 | },
398 |
399 | copySizes: function (callback) {
400 | return async.each(
401 | sizes,
402 | function (size, callback) {
403 | const suffix = size.name + '.' + context.extension;
404 | const tempFile = context.tempFolder + '/' + suffix;
405 | const permFile = context.basePath + '.' + suffix;
406 | return self.copyIn(tempFile, permFile, options, callback);
407 | },
408 | callback
409 | );
410 | },
411 |
412 | copyAdjustedOriginal: function (callback) {
413 | if (!context.adjustedOriginal) {
414 | return callback(null);
415 | }
416 | return self.copyIn(
417 | context.adjustedOriginal,
418 | originalPath,
419 | options,
420 | callback
421 | );
422 | }
423 | },
424 | function (err) {
425 | // Try to clean up the temp folder. This can fail if its creation
426 | // failed, in which case there is nothing we can or should do,
427 | // thus the empty callback
428 | if (context.tempFolder) {
429 | rimraf(context.tempFolder).then(() => {}).catch(e => {
430 | // Ignore, it probably was not created in the first place
431 | });
432 | }
433 | callback(
434 | err,
435 | err
436 | ? null
437 | : {
438 | basePath: context.basePath,
439 | extension: context.extension,
440 | width: context.info.width,
441 | height: context.info.height,
442 | originalWidth: context.info.originalWidth,
443 | originalHeight: context.info.originalHeight
444 | }
445 | );
446 | }
447 | );
448 | };
449 |
450 | self.getUrl = function (options, callback) {
451 | if (self.cdn && self.cdn.enabled) {
452 | return self.cdn.url;
453 | }
454 | return self._storage.getUrl(options, callback) + self.prefix;
455 | };
456 |
457 | self.remove = function (path, callback) {
458 | path = prefixPath(path);
459 | return self._storage.remove(path, callback);
460 | };
461 |
462 | /**
463 | * Re-enable access to the file. By default newly uploaded
464 | * files ARE web accessible, so you need not call this method
465 | * unless uploadfs.disable has been previously called.
466 | *
467 | * Be aware that you MUST call this method to guarantee access
468 | * to the file via copyOut, as well as via the web, even though
469 | * some backends may only disable access via the web. Do not
470 | * rely on this behavior. (Differences in behavior between
471 | * local filesystems and S3 require we tolerate this difference.)
472 | *
473 | * @param {string} path Path as stored in uploadfs (with extension)
474 | * @param {Function} callback Receives error if any, otherwise null
475 | */
476 |
477 | self.enable = function (path, callback) {
478 | path = prefixPath(path);
479 | return self._storage.enable(path, callback);
480 | };
481 |
482 | /**
483 | * Disable web access to the file. By default new uploads ARE
484 | * accessible; however this method is useful when implementing a
485 | * "recycle bin" or other undo-able delete feature.
486 | *
487 | * The implementation MUST block web access to the file. The
488 | * implementation MAY also block read access via copyOut, so be
489 | * aware that you MUST call uploadfs.enable to reenable access to
490 | * the file to guarantee you have access to it again across all
491 | * storage backends, even if you are using copyOut to access it.
492 | *
493 | * @param {string} path Path as stored in uploadfs (with extension)
494 | * @param {Function} callback Receives error if any, otherwise null
495 | */
496 |
497 | self.disable = function (path, callback) {
498 | path = prefixPath(path);
499 | return self._storage.disable(path, callback);
500 | };
501 |
502 | /**
503 | * Identify a local image file. Normally you don't need to call
504 | * this yourself, it is mostly used by copyImageIn. But you may find it
505 | * useful in certain migration situations, so we have exported it.
506 | *
507 | * If the file is not an image or is too defective to be identified an error is
508 | * passed to the callback.
509 | *
510 | * Otherwise the second argument to the callback is guaranteed to have extension, width,
511 | * height, orientation, originalWidth and originalHeight properties. extension will be
512 | * gif, jpg or png and is detected from the file's true contents, not the original file
513 | * extension. width and height are automatically rotated to TopLeft orientation while
514 | * originalWidth and originalHeight are not.
515 | *
516 | * If the orientation property is not explicitly set in the file it will be set to
517 | * 'Undefined'.
518 | *
519 | * Alternative backends such as "sip" that do not support orientation detection
520 | * will not set this property at all.
521 | *
522 | * Any other properties returned are dependent on the version of ImageMagick (or
523 | * other backend) used and are not guaranteed.
524 | *
525 | * @param {String} path Local filesystem path to image file
526 | * @param {Function} callback Receives the usual err argument, followed by an object with extension, width, height, orientation, originalWidth and originalHeight properties. Any other properties depend on the backend in use and are not guaranteed
527 | *
528 | * @see Uploadfs#copyImageIn
529 | */
530 |
531 | self.identifyLocalImage = function (path, callback) {
532 | return self._image.identify(path, callback);
533 | };
534 |
535 | /**
536 | * Returns the image sizes array with which uploadfs was configured.
537 | * This may be of use if you must iterate over the various generated
538 | * images later.
539 | *
540 | * However note that a best practice is to retain information about the sizes
541 | * that were expected when each image was actually uploaded, because you might
542 | * change your mind and add or remove sizes later.
543 | * @return {array} [Image size objects]
544 | */
545 | self.getImageSizes = function () {
546 | return imageSizes;
547 | };
548 |
549 | /**
550 | * Destroys the uploadfs instance, allowing the backends to release any
551 | * resources they may be holding, such as file descriptors or interval timers.
552 | * Backends that hold such resources should implement their own `destroy` method,
553 | * also accepting a callback. The callback will receive an error if anything
554 | * goes awry during the cleanup process. This method does NOT remove any
555 | * content, it just releases system resources.
556 | * @param {function} callback
557 | */
558 | self.destroy = function (callback) {
559 | const callbacks = [
560 | self._storage.destroy || noOperation,
561 | self._image.destroy || noOperation
562 | ];
563 | return async.parallel(callbacks, callback);
564 | function noOperation(callback) {
565 | return callback(null);
566 | }
567 | };
568 |
569 | self.migrateToDisabledFileKey = function (callback) {
570 | const method = self._storage.migrateToDisabledFileKey;
571 | if (!method) {
572 | // Not relevant for this backend
573 | return callback(null);
574 | }
575 | return self._storage.migrateToDisabledFileKey(callback);
576 | };
577 |
578 | self.migrateFromDisabledFileKey = function (callback) {
579 | const method = self._storage.migrateFromDisabledFileKey;
580 | if (!method) {
581 | // Not relevant for this backend
582 | return callback(null);
583 | }
584 | return self._storage.migrateFromDisabledFileKey(callback);
585 | };
586 |
587 | // Called by `convert` to postprocess resized/cropped images
588 | // for optimal file size, etc.
589 |
590 | self.postprocess = function (files, callback) {
591 | const sample = files[0];
592 | if (!sample) {
593 | return callback(null);
594 | }
595 | const relevant = _.filter(
596 | self.options.postprocessors || [],
597 | function (postprocessor) {
598 | const matches = sample.match(/\.(\w+)$/);
599 | if (!matches) {
600 | return false;
601 | }
602 | const extension = matches[1];
603 | return _.includes(postprocessor.extensions, extension);
604 | }
605 | );
606 | const folder = require('path').dirname(sample);
607 | return async.eachSeries(
608 | relevant,
609 | function (postprocessor, callback) {
610 | if (postprocessor.length === 4) {
611 | return postprocessor.postprocessor(
612 | files,
613 | folder,
614 | postprocessor.options,
615 | callback
616 | );
617 | } else {
618 | return postprocessor
619 | .postprocessor(files, folder, postprocessor.options)
620 | .then(function () {
621 | return callback(null);
622 | })
623 | .catch(function (err) {
624 | return callback(err);
625 | });
626 | }
627 | },
628 | callback
629 | );
630 | };
631 |
632 | function prefixPath(path) {
633 | // Resolve any double // that results from the prefix
634 | return (self.prefix + path).replace(/\/\//g, '/');
635 | }
636 |
637 | function ensureTempDir() {
638 | if (!ensuredTempDir) {
639 | if (!fs.existsSync(tempPath)) {
640 | fs.mkdirSync(tempPath);
641 | }
642 | ensuredTempDir = true;
643 | }
644 | }
645 | }
646 |
647 | module.exports = function () {
648 | return new Uploadfs();
649 | };
650 |
--------------------------------------------------------------------------------
/webp-test.js:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env node
2 |
3 | const fs = require('fs');
4 | const { join } = require('path');
5 | const async = require('async');
6 | const Promise = require('bluebird');
7 | const { each, find } = require('lodash');
8 | const uploadfs = require('./uploadfs.js')();
9 |
10 | // colored output
11 | const color = (input, num = 255) => `\x1b[38;5;${num}m${input}\x1b[0m`;
12 | const red = input => color(input, 1);
13 | const grn = input => color(input, 2);
14 | const blu = input => color(input, 6);
15 |
16 | // status msg
17 | const check = (num, msg) => console.log(`(${num})${' '.repeat(13)}${msg}`);
18 | const pass = (num, msg = '') =>
19 | console.log(`${grn(`(${num})`)}${' '.repeat(13)}${grn('OK')} ${msg}\n`);
20 | const fail = (num, msg = '') => {
21 | console.log(`${red(`(${num})`)}${' '.repeat(13)}${red('ERROR')} ${msg}\n`);
22 | process.exit(1);
23 | };
24 |
25 | // time
26 | const elapsed = () => (performance.nodeTiming.duration / 1000).toFixed(2);
27 |
28 | // settings
29 | const img = 'test.webp';
30 | const ext = 'webp';
31 | const basePath = '/images/profiles/me';
32 | const imageSizes = [
33 | {
34 | name: 'small',
35 | width: 320,
36 | height: 320
37 | },
38 | {
39 | name: 'medium',
40 | width: 640,
41 | height: 640
42 | },
43 | {
44 | name: 'large',
45 | width: 1140,
46 | height: 1140
47 | }
48 | ];
49 | const config = {
50 | backend: 'local',
51 | image: 'sharp',
52 | uploadsPath: join(__dirname, '/test'),
53 | uploadsUrl: 'http://localhost:3000/test',
54 | tempPath: join(__dirname, '/temp'),
55 | imageSizes
56 | };
57 |
58 | // TEST: crop
59 | const testCopyImageInCrop = cb => {
60 | check(6, `uploadfs.copyImageIn('${blu(img)}') with cropping`);
61 | uploadfs.copyImageIn(
62 | img,
63 | '/images/profiles/me-cropped',
64 | {
65 | crop: {
66 | top: 830,
67 | left: 890,
68 | width: 500,
69 | height: 500
70 | }
71 | },
72 | (e, info) => {
73 | if (e) {
74 | fail(6, e);
75 | }
76 | if (info.basePath !== '/images/profiles/me-cropped') {
77 | fail(6, 'info.basePath is incorrect');
78 | }
79 | pass(6);
80 |
81 | check(7, 'returned image dimensions are reoriented');
82 | if (info.width !== 500 || info.height !== 500) {
83 | fail(7, 'reported size does not match crop');
84 | }
85 |
86 | if (!fs.statSync(`test/images/profiles/me-cropped.${ext}`).size) {
87 | fail(7, 'cannot stat copied image');
88 | }
89 | pass(7);
90 |
91 | check(8, 'removing files');
92 | uploadfs.remove(`${basePath}-cropped.${ext}`, e => {
93 | async.each(
94 | imageSizes,
95 | (size, cb) => {
96 | const name = `${info.basePath}.${size.name}.${ext}`;
97 | if (!fs.statSync(`test${name}`).size) {
98 | fail(8, 'cannot stat scaled/copied image');
99 | }
100 | uploadfs.remove(name, e => cb(e));
101 | },
102 | e => {
103 | if (e) {
104 | fail(8, e);
105 | }
106 | pass(8);
107 |
108 | // done, return
109 | cb();
110 | }
111 | );
112 | });
113 | }
114 | );
115 | };
116 |
117 | // TEST: copy
118 | const testCopyImageIn = cb => {
119 | check(2, `uploadfs.copyImageIn('${blu(img)}')`);
120 | uploadfs.copyImageIn(img, basePath, (e, info) => {
121 | if (e) {
122 | fail(2, e);
123 | }
124 | if (info.basePath !== '/images/profiles/me') {
125 | fail(2, 'info.basePath is incorrect');
126 | }
127 | pass(2);
128 |
129 | // check(3, 'returned image dimensions are reoriented');
130 | // if (info.width !== 1936 || info.height !== 2592) {
131 | // fail(3, 'Width and height missing or not reoriented for web use');
132 | // }
133 | // if (info.originalWidth !== 2592 || info.originalHeight !== 1936) {
134 | // fail(3, 'Original width and height missing or incorrect');
135 | // }
136 | // pass(3);
137 |
138 | check(4, 'locate copied image');
139 | if (!fs.statSync(`test/images/profiles/me.${ext}`).size) {
140 | fail(4, 'cannot stat copied image');
141 | }
142 | pass(4);
143 |
144 | check(5, 'removing files');
145 | uploadfs.remove(`/images/profiles/me.${ext}`, e =>
146 | async.each(
147 | imageSizes,
148 | (size, cb) => {
149 | const name = `${info.basePath}.${size.name}.${ext}`;
150 | if (!fs.statSync(`test${name}`).size) {
151 | fail(5, 'cannot stat scaled/copied image');
152 | }
153 | uploadfs.remove(name, () => cb());
154 | },
155 | e => {
156 | if (e) {
157 | fail(5, e);
158 | }
159 | pass(5);
160 |
161 | // done, test crop next
162 | testCopyImageInCrop(cb);
163 | }
164 | )
165 | );
166 | });
167 | };
168 |
169 | const run = (cb, msg = 'Running tests', opts = config) => {
170 | console.log(`${msg}\n`);
171 | check(1, 'init');
172 | uploadfs.init(opts, e => {
173 | if (e) {
174 | fail(1, e);
175 | }
176 | pass(1);
177 |
178 | // done, test copy next
179 | testCopyImageIn(cb);
180 | });
181 | };
182 |
183 | // initial msg
184 | console.log(`
185 | + ${blu('Config')}
186 |
187 | {
188 | ${blu('processor')}: '${grn(config.image)}',
189 | ${blu('storage')}: '${grn(config.backend)}'
190 | }
191 | `);
192 |
193 | // first run
194 | run(() => {
195 | let filesSeen = false;
196 |
197 | config.postprocessors = [
198 | {
199 | postprocessor: (files, folder, options = { test: false }) => {
200 | console.log(`${' '.repeat(16)}(${blu('using postprocessor')})\n`);
201 |
202 | if (!options.test) {
203 | fail('Postprocessor', 'postprocessor did not receive options');
204 | }
205 | if (!files) {
206 | fail('Postprocessor', 'did not receive files array');
207 | }
208 | if (!files.length) {
209 | return Promise.resolve(true);
210 | }
211 | if (!files[0].match(/\.(gif|jpg|png|webp)$/)) {
212 | fail('Postprocessor', `invalid file extension: ${files[0]}`);
213 | }
214 | if (!fs.existsSync(files[0])) {
215 | fail('Postprocessor', `cannot locate file: ${files[0]}`);
216 | }
217 | if (require('path').dirname(files[0]) !== folder) {
218 | fail('Postprocessor', 'received incorrect folder path');
219 | }
220 | each(config.imageSizes, size => {
221 | if (!find(files, f => f.match(size.name))) {
222 | fail('Postprocessor', `cannot stat resized file (${size.name})`);
223 | }
224 | });
225 | filesSeen = true;
226 | return Promise.resolve(true);
227 | },
228 | extensions: [ 'gif', 'jpg', 'png', 'webp' ],
229 | options: { test: true }
230 | }
231 | ];
232 |
233 | // second run (postprocessing)
234 | run(() => {
235 | if (!filesSeen) {
236 | fail(0, 'postprocessor saw no files');
237 | }
238 |
239 | // All tests passed!
240 | console.log(`+ ${blu('Completed')} in ${grn(elapsed())} seconds\n`);
241 | process.exit(0);
242 | },
243 |
244 | `+ ${blu('Postprocessors')}`
245 | );
246 | }, `+ ${blu('Methods')}`);
247 |
--------------------------------------------------------------------------------