├── .eslintignore
├── .husky
└── pre-commit
├── test
├── fixtures
│ ├── image0.jpg
│ ├── image1.jpg
│ ├── website-test0.xml
│ ├── cors-invalid2.xml
│ ├── website-test1.xml
│ ├── cors-invalid1.xml
│ ├── cors-invalid0.xml
│ ├── cors-test0.xml
│ ├── website-test2.xml
│ └── website-test3.xml
├── setup.js
├── controllers
│ └── service.spec.js
├── helpers.js
├── middleware
│ ├── vhost.spec.js
│ ├── website.spec.js
│ ├── cors.spec.js
│ └── authentication.spec.js
├── models
│ ├── routing-rule.spec.js
│ └── config.spec.js
└── s3rver.spec.js
├── .mocharc.json
├── .prettierrc
├── .vscode
└── settings.json
├── bin
└── s3rver.js
├── lib
├── models
│ ├── bucket.js
│ ├── account.js
│ ├── routing-rule.js
│ ├── object.js
│ ├── event.js
│ ├── error.js
│ └── config.js
├── middleware
│ ├── logger.js
│ ├── vhost.js
│ ├── response-header-override.js
│ ├── cors.js
│ ├── website.js
│ └── authentication.js
├── controllers
│ ├── service.js
│ └── bucket.js
├── signature
│ ├── v2.js
│ └── v4.js
├── cli.js
├── utils.js
├── routes.js
└── s3rver.js
├── .prettierignore
├── .gitignore
├── example
├── website.xml
└── cors.xml
├── renovate.json
├── .eslintrc
├── LICENSE
├── .github
└── workflows
│ └── ci.yml
├── package.json
└── README.md
/.eslintignore:
--------------------------------------------------------------------------------
1 | .prettierignore
--------------------------------------------------------------------------------
/.husky/pre-commit:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 | . "$(dirname "$0")/_/husky.sh"
3 |
4 | npx lint-staged
5 |
--------------------------------------------------------------------------------
/test/fixtures/image0.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jamhall/s3rver/HEAD/test/fixtures/image0.jpg
--------------------------------------------------------------------------------
/test/fixtures/image1.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jamhall/s3rver/HEAD/test/fixtures/image1.jpg
--------------------------------------------------------------------------------
/.mocharc.json:
--------------------------------------------------------------------------------
1 | {
2 | "file": ["./test/setup.js"],
3 | "recursive": true,
4 | "timeout": 10000
5 | }
6 |
--------------------------------------------------------------------------------
/.prettierrc:
--------------------------------------------------------------------------------
1 | {
2 | "trailingComma": "all",
3 | "tabWidth": 2,
4 | "semi": true,
5 | "singleQuote": true
6 | }
7 |
--------------------------------------------------------------------------------
/.vscode/settings.json:
--------------------------------------------------------------------------------
1 | {
2 | "editor.defaultFormatter": "esbenp.prettier-vscode",
3 | "javascript.format.enable": false,
4 | "json.format.enable": false
5 | }
6 |
--------------------------------------------------------------------------------
/bin/s3rver.js:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env node
2 | 'use strict';
3 | const cli = require('../lib/cli');
4 |
5 | cli.parseAsync(process.argv).catch((err) => {
6 | console.error(err);
7 | process.exit(1);
8 | });
9 |
--------------------------------------------------------------------------------
/test/fixtures/website-test0.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 | index.html
4 |
5 |
--------------------------------------------------------------------------------
/lib/models/bucket.js:
--------------------------------------------------------------------------------
1 | 'use strict';
2 |
3 | class S3Bucket {
4 | constructor(name, creationDate) {
5 | this.name = name;
6 | this.creationDate = creationDate;
7 | }
8 | }
9 | module.exports = S3Bucket;
10 |
--------------------------------------------------------------------------------
/.prettierignore:
--------------------------------------------------------------------------------
1 | # Coverage directory used by tools like istanbul
2 | .nyc_output/
3 | coverage/
4 |
5 | # Dependencies
6 | node_modules/
7 | package-lock.json
8 |
9 | # JSON configuration files
10 | .eslintrc
11 |
12 | # IDEs
13 | .idea/
14 | .vscode/
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | # Logs
2 | logs
3 | *.log
4 |
5 | # Runtime data
6 | pids
7 | *.pid
8 | *.seed
9 | .eslintcache
10 |
11 | # Coverage directory used by tools like istanbul
12 | .nyc_output/
13 | coverage/
14 |
15 | # Dependencies
16 | node_modules/
17 |
18 | # IDEs
19 | .idea
20 |
--------------------------------------------------------------------------------
/example/website.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 | index.html
4 |
5 |
6 | error.html
7 |
8 |
--------------------------------------------------------------------------------
/test/fixtures/cors-invalid2.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 | 3000
5 | *
6 |
7 |
8 |
--------------------------------------------------------------------------------
/test/fixtures/website-test1.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 | index.html
4 |
5 |
6 | error.html
7 |
8 |
--------------------------------------------------------------------------------
/example/cors.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 | *
5 | GET
6 | 3000
7 | Authorization
8 |
9 |
10 |
--------------------------------------------------------------------------------
/test/fixtures/cors-invalid1.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 | *
4 |
5 | *
6 | 3000
7 | *
8 |
9 |
10 |
--------------------------------------------------------------------------------
/test/fixtures/cors-invalid0.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 | https://*.*
5 | HEAD
6 | GET
7 | 3000
8 | *
9 |
10 |
11 |
--------------------------------------------------------------------------------
/renovate.json:
--------------------------------------------------------------------------------
1 | {
2 | "extends": ["config:base"],
3 | "depTypes": [
4 | {
5 | "depType": "dependencies",
6 | "pinVersions": false
7 | }
8 | ],
9 | "packageRules": [
10 | {
11 | "packageNames": ["aws-sdk"],
12 | "schedule": ["before 5am on monday"]
13 | }
14 | ],
15 | "rebaseStalePrs": true,
16 | "automerge": "minor",
17 | "labels": ["tooling", "dependencies"],
18 | "assignees": ["@leontastic"],
19 | "reviewers": ["@leontastic"]
20 | }
21 |
--------------------------------------------------------------------------------
/.eslintrc:
--------------------------------------------------------------------------------
1 | {
2 | "env": {
3 | "node": true,
4 | "es6": true
5 | },
6 | "parserOptions": {
7 | "ecmaVersion": 2018
8 | },
9 | "plugins": ["prettier"],
10 | "extends": [
11 | "standard",
12 | "prettier"
13 | ],
14 | "rules": {
15 | "new-cap": ["error", {
16 | "newIsCapExceptions": ["j2xParser"]
17 | }],
18 |
19 | "prettier/prettier": "error"
20 | },
21 | "overrides": [
22 | {
23 | "files": "test/**/*.spec.*",
24 | "env": {
25 | "mocha": true
26 | },
27 | "rules": {
28 | "no-unused-expressions": "off"
29 | }
30 | }
31 | ]
32 | }
33 |
--------------------------------------------------------------------------------
/lib/models/account.js:
--------------------------------------------------------------------------------
1 | 'use strict';
2 |
3 | class AWSAccount {
4 | constructor(accountId, displayName) {
5 | this.id = accountId;
6 | this.displayName = displayName;
7 | this.accessKeys = new Map();
8 | }
9 |
10 | createKeyPair(accessKeyId, secretAccessKey) {
11 | AWSAccount.registry.set(accessKeyId, this);
12 | this.accessKeys.set(accessKeyId, secretAccessKey);
13 | }
14 |
15 | revokeAccessKey(accessKeyId) {
16 | AWSAccount.registry.delete(accessKeyId);
17 | this.accessKeys.delete(accessKeyId);
18 | }
19 | }
20 | AWSAccount.registry = new Map();
21 |
22 | exports = module.exports = AWSAccount;
23 |
24 | // Hardcoded dummy user used for authenticated requests
25 | exports.DUMMY_ACCOUNT = new AWSAccount(123456789000, 'S3rver');
26 | exports.DUMMY_ACCOUNT.createKeyPair('S3RVER', 'S3RVER');
27 |
--------------------------------------------------------------------------------
/test/fixtures/cors-test0.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 | http://a-test.example.com
5 | GET
6 | HEAD
7 | 3000
8 | Accept-Ranges
9 | Content-Range
10 | Authorization
11 |
12 |
13 | http://*.bar.com
14 | GET
15 | HEAD
16 | 3000
17 | Range
18 | Authorization
19 |
20 |
21 |
22 |
--------------------------------------------------------------------------------
/test/setup.js:
--------------------------------------------------------------------------------
1 | /* eslint-env mocha */
2 | 'use strict';
3 |
4 | const chai = require('chai');
5 | const chaiAsPromised = require('chai-as-promised');
6 | const os = require('os');
7 | const path = require('path');
8 |
9 | const S3rver = require('..');
10 |
11 | const { resetTmpDir, instances } = require('./helpers');
12 |
13 | chai.use(chaiAsPromised);
14 |
15 | // Change the default options to be more test-friendly
16 | const tmpDir = path.join(os.tmpdir(), 's3rver_test');
17 | S3rver.defaultOptions.port = 0;
18 | S3rver.defaultOptions.silent = true;
19 | S3rver.defaultOptions.directory = tmpDir;
20 |
21 | beforeEach(resetTmpDir);
22 |
23 | afterEach(async function () {
24 | await Promise.all(
25 | [...instances].map(async (instance) => {
26 | try {
27 | await instance.close();
28 | } catch (err) {
29 | console.warn(err);
30 | }
31 | }),
32 | );
33 | instances.clear();
34 | });
35 |
--------------------------------------------------------------------------------
/lib/middleware/logger.js:
--------------------------------------------------------------------------------
1 | 'use strict';
2 |
3 | const koaLogger = require('koa-logger');
4 | const { createLogger, format, transports } = require('winston');
5 |
6 | /**
7 | * Creates and assigns a Winston logger instance to an app and returns
8 | */
9 | module.exports = function (app, silent) {
10 | const logger = createLogger({
11 | transports: [
12 | new transports.Console({
13 | level: 'debug',
14 | json: false,
15 | format: format.combine(
16 | format.colorize(),
17 | format.splat(),
18 | format.simple(),
19 | ),
20 | silent,
21 | }),
22 | ],
23 | exitOnError: false,
24 | });
25 | logger.emitErrs = true;
26 | app.logger = app.context.logger = logger;
27 |
28 | return koaLogger((message, args) => {
29 | if (args.length === 6) {
30 | // only log responses
31 | logger.info(message.slice(16));
32 | }
33 | });
34 | };
35 |
--------------------------------------------------------------------------------
/test/fixtures/website-test2.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 | index.html
4 |
5 |
6 |
7 |
8 | test/
9 |
10 |
11 | replacement/
12 |
13 |
14 |
15 |
16 | 404
17 |
18 |
19 |
20 | recursive/
21 |
22 |
23 |
24 |
25 |
--------------------------------------------------------------------------------
/test/controllers/service.spec.js:
--------------------------------------------------------------------------------
1 | 'use strict';
2 |
3 | const { expect } = require('chai');
4 | const { zip } = require('lodash');
5 | const moment = require('moment');
6 |
7 | const { createServerAndClient } = require('../helpers');
8 |
9 | describe('Operations on the Service', () => {
10 | describe('GET Service', () => {
11 | const buckets = [
12 | { name: 'bucket1' },
13 | { name: 'bucket2' },
14 | { name: 'bucket3' },
15 | { name: 'bucket4' },
16 | { name: 'bucket5' },
17 | { name: 'bucket6' },
18 | ];
19 |
20 | it('returns a list of buckets', async function () {
21 | const { s3Client } = await createServerAndClient({
22 | configureBuckets: buckets,
23 | });
24 | const data = await s3Client.listBuckets().promise();
25 | expect(data.Buckets).to.have.lengthOf(6);
26 | for (const [bucket, config] of zip(data.Buckets, buckets)) {
27 | expect(bucket.Name).to.equal(config.name);
28 | expect(moment(bucket.CreationDate).isValid()).to.be.true;
29 | }
30 | });
31 | });
32 | });
33 |
--------------------------------------------------------------------------------
/test/fixtures/website-test3.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 | index.html
4 |
5 |
6 |
7 |
8 | simple/
9 |
10 |
11 | replacement/
12 |
13 |
14 |
15 |
16 | 404
17 | complex/
18 |
19 |
20 | custom
21 | 307
22 | https
23 | replacement
24 |
25 |
26 |
27 |
28 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | The MIT License (MIT)
2 |
3 | Copyright (c) 2015 Jamie Hall
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the "Software"), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in all
13 | copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | SOFTWARE.
22 |
23 |
--------------------------------------------------------------------------------
/lib/controllers/service.js:
--------------------------------------------------------------------------------
1 | 'use strict';
2 |
3 | const { DUMMY_ACCOUNT } = require('../models/account');
4 |
5 | /*
6 | * Operations on the Service
7 | * The following methods correspond to operations you can perform on the Amazon S3 service.
8 | * https://docs.aws.amazon.com/AmazonS3/latest/API/RESTServiceOps.html
9 | */
10 |
11 | /**
12 | * GET Service
13 | * This implementation of the GET operation returns a list of all buckets owned by the authenticated
14 | * sender of the request.
15 | * {@link https://docs.aws.amazon.com/AmazonS3/latest/API/RESTServiceGET.html}
16 | */
17 | exports.getService = async function getService(ctx) {
18 | const buckets = await ctx.store.listBuckets();
19 | ctx.logger.info('Fetched %d buckets', buckets.length);
20 | ctx.body = {
21 | ListAllMyBucketsResult: {
22 | '@': { xmlns: 'http://doc.s3.amazonaws.com/2006-03-01/' },
23 | Owner: {
24 | ID: DUMMY_ACCOUNT.id,
25 | DisplayName: DUMMY_ACCOUNT.displayName,
26 | },
27 | Buckets: {
28 | Bucket: buckets.map((bucket) => ({
29 | Name: bucket.name,
30 | CreationDate: bucket.creationDate.toISOString(),
31 | })),
32 | },
33 | },
34 | };
35 | };
36 |
--------------------------------------------------------------------------------
/lib/middleware/vhost.js:
--------------------------------------------------------------------------------
1 | 'use strict';
2 |
3 | const { escapeRegExp } = require('lodash');
4 | const { isIP } = require('net');
5 | const { hostname } = require('os');
6 |
7 | /**
8 | * Middleware that rewrites URLs for buckets specified via subdomain or host header
9 | */
10 | module.exports = ({ serviceEndpoint, vhostBuckets }) =>
11 | function vhost(ctx, next) {
12 | // prettier-ignore
13 | const pattern = RegExp(`^(?:(.+)\\.)?s3(-website)?([-.].+)?\\.${escapeRegExp(serviceEndpoint)}$`);
14 | const [match, bucket, website] = pattern.exec(ctx.hostname) || [];
15 | ctx.state.vhost = Boolean(bucket);
16 | if (match) {
17 | ctx.state.service = website ? 's3-website' : 's3';
18 | if (bucket) {
19 | // Rewrite path for requests to .s3[-website][-].amazonaws.com
20 | ctx.path = `/${bucket}${ctx.path}`;
21 | }
22 | } else {
23 | // if the request contains any x-amz-* headers or query string parameters,
24 | // consider this an SDK/CLI request
25 | for (const key of [
26 | ...Object.keys(ctx.headers),
27 | ...Object.keys(ctx.query),
28 | ]) {
29 | if (key.toLowerCase().startsWith('x-amz-')) {
30 | ctx.state.service = 's3';
31 | break;
32 | }
33 | }
34 | if (
35 | vhostBuckets &&
36 | !isIP(ctx.hostname) &&
37 | !['localhost', hostname()].includes(ctx.hostname)
38 | ) {
39 | ctx.state.vhost = true;
40 | // otherwise attempt to distinguish virtual host-style requests
41 | ctx.path = `/${ctx.hostname}${ctx.path}`;
42 | }
43 | }
44 | return next();
45 | };
46 |
--------------------------------------------------------------------------------
/lib/models/routing-rule.js:
--------------------------------------------------------------------------------
1 | 'use strict';
2 |
3 | class RoutingRule {
4 | constructor(config) {
5 | this.condition = config.Condition;
6 | this.redirect = config.Redirect;
7 | this.statusCode = (this.redirect && this.redirect.HttpRedirectCode) || 301;
8 | }
9 |
10 | getRedirectLocation(key, defaults) {
11 | let redirectKey = key;
12 |
13 | if (this.redirect.ReplaceKeyPrefixWith) {
14 | redirectKey = key.replace(
15 | (this.condition && this.condition.KeyPrefixEquals) || /^/,
16 | this.redirect.ReplaceKeyPrefixWith,
17 | );
18 | } else if (this.redirect.ReplaceKeyWith) {
19 | redirectKey = this.redirect.ReplaceKeyWith;
20 | }
21 |
22 | const protocol = this.redirect.Protocol || defaults.protocol;
23 | const hostName = this.redirect.HostName || defaults.hostname;
24 |
25 | return `${protocol}://${hostName}/${redirectKey}`;
26 | }
27 |
28 | shouldRedirect(key, statusCode) {
29 | if (!this.condition) {
30 | return true;
31 | }
32 |
33 | if (
34 | this.condition.KeyPrefixEquals &&
35 | this.condition.HttpErrorCodeReturnedEquals
36 | ) {
37 | return (
38 | key.startsWith(this.condition.KeyPrefixEquals) &&
39 | this.condition.HttpErrorCodeReturnedEquals === statusCode
40 | );
41 | }
42 |
43 | if (this.condition.KeyPrefixEquals) {
44 | return key.startsWith(this.condition.KeyPrefixEquals);
45 | }
46 |
47 | if (this.condition.HttpErrorCodeReturnedEquals) {
48 | return this.condition.HttpErrorCodeReturnedEquals === statusCode;
49 | }
50 |
51 | return false;
52 | }
53 | }
54 |
55 | module.exports = RoutingRule;
56 |
--------------------------------------------------------------------------------
/.github/workflows/ci.yml:
--------------------------------------------------------------------------------
1 | name: CI
2 |
3 | on:
4 | push:
5 | branches:
6 | - main
7 | - next
8 | pull_request:
9 |
10 | jobs:
11 | lint:
12 | name: Quality checks
13 |
14 | runs-on: ubuntu-latest
15 |
16 | steps:
17 | - uses: actions/checkout@v2
18 | with:
19 | fetch-depth: 0
20 |
21 | - uses: actions/setup-node@v1
22 | with:
23 | node-version: 16.x
24 |
25 | - name: Install dependencies
26 | run: npm ci
27 |
28 | - name: Security audit
29 | run: npm audit
30 |
31 | - name: Code coverage
32 | run: npx nyc --reporter=lcovonly npm test
33 |
34 | - uses: codecov/codecov-action@v1
35 | with:
36 | files: ./coverage/lcov.info
37 | fail_ci_if_error: true
38 |
39 | - name: Lint
40 | run: npx eslint .
41 |
42 | - name: Check formatting
43 | run: npx prettier . --check
44 |
45 | test:
46 | name: Test - ${{ matrix.os }} - Node v${{ matrix.node-version }}
47 |
48 | strategy:
49 | matrix:
50 | os: [ubuntu-latest, macos-latest, windows-latest]
51 | node-version: [12.x, 14.x, 16.x]
52 | fail-fast: false
53 |
54 | runs-on: ${{ matrix.os }}
55 |
56 | steps:
57 | - name: Set up Git
58 | if: matrix.os == 'windows-latest'
59 | run: git config --global core.autocrlf input
60 |
61 | - uses: actions/checkout@v2
62 |
63 | - name: Use Node.js ${{ matrix.node-version }}
64 | uses: actions/setup-node@v1
65 | with:
66 | node-version: ${{ matrix.node-version }}
67 |
68 | - name: Install dependencies
69 | run: npm ci
70 |
71 | - name: Run tests
72 | run: npm test
73 |
--------------------------------------------------------------------------------
/lib/models/object.js:
--------------------------------------------------------------------------------
1 | 'use strict';
2 |
3 | const { pick, pickBy } = require('lodash');
4 |
5 | const S3Error = require('./error');
6 |
7 | class S3Object {
8 | constructor(bucket, key, content, metadata) {
9 | this.bucket = bucket;
10 | this.key = key;
11 | this.content = content;
12 | if ('x-amz-storage-class' in metadata) {
13 | if (!S3Object.STORAGE_CLASSES.includes(metadata['x-amz-storage-class'])) {
14 | throw new S3Error(
15 | 'InvalidStorageClass',
16 | 'The storage class you specified is not valid',
17 | );
18 | }
19 | }
20 | this.metadata = pick(metadata, [
21 | ...S3Object.ALLOWED_METADATA,
22 |
23 | // intrinsic metadata determined when retrieving objects
24 | 'last-modified',
25 | 'etag',
26 | 'content-length',
27 | ]);
28 | if (!this.metadata['content-type']) {
29 | this.metadata['content-type'] = 'binary/octet-stream';
30 | }
31 | Object.assign(
32 | this.metadata,
33 | pickBy(metadata, (v, k) => k.startsWith('x-amz-meta-')),
34 | );
35 | }
36 |
37 | get size() {
38 | return Number(this.metadata['content-length']);
39 | }
40 |
41 | get lastModifiedDate() {
42 | return new Date(this.metadata['last-modified']);
43 | }
44 | }
45 | S3Object.ALLOWED_METADATA = [
46 | 'cache-control',
47 | 'content-disposition',
48 | 'content-encoding',
49 | 'content-language',
50 | 'content-type',
51 | 'expires',
52 | 'x-amz-storage-class',
53 | 'x-amz-website-redirect-location',
54 | ];
55 | S3Object.STORAGE_CLASSES = [
56 | 'STANDARD',
57 | 'REDUCED_REDUNDANCY',
58 | 'STANDARD_IA',
59 | 'ONEZONE_IA',
60 | 'INTELLIGENT_TIERING',
61 | 'GLACIER',
62 | 'DEEP_ARCHIVE',
63 | 'OUTPOSTS',
64 | ];
65 | module.exports = S3Object;
66 |
--------------------------------------------------------------------------------
/lib/middleware/response-header-override.js:
--------------------------------------------------------------------------------
1 | 'use strict';
2 |
3 | const { chain, isEmpty } = require('lodash');
4 |
5 | const S3Error = require('../models/error');
6 | const { capitalizeHeader } = require('../utils');
7 |
8 | /**
9 | * Derived from
10 | * https://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectGET.html#RESTObjectGET-requests-request-parameters
11 | */
12 | const RESPONSE_HEADERS = {
13 | 'response-content-type': 1,
14 | 'response-content-language': 1,
15 | 'response-expires': 1,
16 | 'response-cache-control': 1,
17 | 'response-content-disposition': 1,
18 | 'response-content-encoding': 1,
19 | };
20 |
21 | /**
22 | * Middleware that handles response headers overrides on signed GET requests.
23 | */
24 | exports = module.exports = () =>
25 | async function responseHeaderOverride(ctx, next) {
26 | if (ctx.state.website) {
27 | // skip for static website requests
28 | return next();
29 | }
30 |
31 | const overrides = chain(ctx.query)
32 | .pickBy((value, key) => {
33 | if (!key.startsWith('response-')) return false;
34 | if (!RESPONSE_HEADERS[key]) {
35 | throw new S3Error(
36 | 'InvalidArgument',
37 | `${key} is not in the set of overridable response headers. ` +
38 | 'Please refer to the S3 API documentation for a complete list ' +
39 | 'of overridable response headers.',
40 | {
41 | ArgumentName: key,
42 | ArgumentValue: value,
43 | },
44 | );
45 | }
46 | return true;
47 | })
48 | .mapKeys((value, key) => capitalizeHeader(key.slice('response-'.length)))
49 | .value();
50 |
51 | switch (ctx.method) {
52 | case 'HEAD':
53 | case 'GET':
54 | if (!isEmpty(overrides) && !ctx.state.account) {
55 | throw new S3Error(
56 | 'InvalidRequest',
57 | 'Request specific response headers cannot be used for anonymous ' +
58 | 'GET requests.',
59 | );
60 | }
61 | await next();
62 | ctx.set(overrides);
63 | break;
64 | default:
65 | return next();
66 | }
67 | };
68 |
69 | exports.RESPONSE_HEADERS = RESPONSE_HEADERS;
70 |
--------------------------------------------------------------------------------
/package.json:
--------------------------------------------------------------------------------
1 | {
2 | "name": "s3rver",
3 | "version": "4.0.0",
4 | "description": "Fake S3 server for node",
5 | "keywords": [
6 | "fake",
7 | "s3",
8 | "server",
9 | "mock",
10 | "false",
11 | "aws",
12 | "amazon"
13 | ],
14 | "author": {
15 | "name": "Jamie Hall",
16 | "email": "hello@jamiehall.eu",
17 | "url": "http://github.com/jamhall/s3rver"
18 | },
19 | "license": "MIT",
20 | "repository": {
21 | "type": "git",
22 | "url": "git@github.com:jamhall/s3rver.git"
23 | },
24 | "bugs": {
25 | "url": "https://github.com/jamhall/s3rver/issues"
26 | },
27 | "homepage": "https://github.com/jamhall/s3rver",
28 | "scripts": {
29 | "coverage": "nyc npm test",
30 | "test": "mocha",
31 | "fmt": "eslint . --fix && prettier . --write",
32 | "prepare": "husky install"
33 | },
34 | "main": "lib/s3rver.js",
35 | "files": [
36 | "lib/",
37 | "example/*.xml"
38 | ],
39 | "bin": "bin/s3rver.js",
40 | "directories": {
41 | "lib": "./lib",
42 | "example": "./example",
43 | "test": "./test"
44 | },
45 | "engines": {
46 | "node": ">=12.13.0"
47 | },
48 | "dependencies": {
49 | "@koa/router": "^10.0.0",
50 | "busboy": "^0.3.1",
51 | "commander": "^8.0.0",
52 | "fast-xml-parser": "^3.19.0",
53 | "he": "^1.2.0",
54 | "koa": "^2.12.1",
55 | "koa-logger": "^3.2.0",
56 | "lodash": "^4.17.20",
57 | "statuses": "^2.0.0",
58 | "winston": "^3.0.0"
59 | },
60 | "devDependencies": {
61 | "aws-sdk": "2.999.0",
62 | "chai": "4.3.4",
63 | "chai-as-promised": "7.1.1",
64 | "eslint": "7.32.0",
65 | "eslint-config-prettier": "8.3.0",
66 | "eslint-config-standard": "16.0.3",
67 | "eslint-plugin-import": "2.24.2",
68 | "eslint-plugin-node": "11.1.0",
69 | "eslint-plugin-prettier": "4.0.0",
70 | "eslint-plugin-promise": "5.1.0",
71 | "eslint-plugin-standard": "4.1.0",
72 | "express": "4.17.1",
73 | "form-data": "4.0.0",
74 | "husky": "7.0.2",
75 | "lint-staged": "11.2.0",
76 | "mocha": "9.1.2",
77 | "moment": "2.29.1",
78 | "nyc": "15.1.0",
79 | "p-map": "4.0.0",
80 | "prettier": "2.4.1",
81 | "request": "2.88.2",
82 | "request-promise-native": "1.0.9"
83 | },
84 | "lint-staged": {
85 | "*.js": "eslint --cache --fix",
86 | "*": "prettier --write --ignore-unknown"
87 | }
88 | }
89 |
--------------------------------------------------------------------------------
/lib/models/event.js:
--------------------------------------------------------------------------------
1 | 'use strict';
2 |
3 | const crypto = require('crypto');
4 |
5 | const { randomHexString } = require('../utils');
6 |
7 | class S3Event {
8 | constructor(eventData, reqParams) {
9 | const { reqHeaders, sourceIp } = reqParams;
10 | let eventName = '';
11 | const s3Object = {
12 | key: eventData.S3Item.key,
13 | sequencer: Date.now().toString(16).toUpperCase(),
14 | };
15 | switch (eventData.eventType) {
16 | case 'Copy':
17 | eventName = 'ObjectCreated:Copy';
18 | s3Object.size = eventData.S3Item.size;
19 | break;
20 |
21 | case 'Put':
22 | eventName = 'ObjectCreated:Put';
23 | s3Object.size = eventData.S3Item.size;
24 | s3Object.eTag = JSON.parse(eventData.S3Item.metadata.etag);
25 | break;
26 |
27 | case 'Post':
28 | eventName = 'ObjectCreated:Post';
29 | s3Object.size = eventData.S3Item.size;
30 | s3Object.eTag = JSON.parse(eventData.S3Item.metadata.etag);
31 | break;
32 |
33 | case 'Delete':
34 | eventName = 'ObjectRemoved:Delete';
35 | break;
36 | }
37 |
38 | return {
39 | Records: [
40 | {
41 | eventVersion: '2.0',
42 | eventSource: 'aws:s3',
43 | awsRegion: 'us-east-1',
44 | eventTime: new Date().toISOString(),
45 | eventName: eventName,
46 | userIdentity: {
47 | principalId: 'AWS:' + randomHexString(21).toUpperCase(),
48 | },
49 | requestParameters: {
50 | sourceIPAddress: sourceIp,
51 | },
52 | responseElements: {
53 | 'x-amz-request-id': randomHexString(16).toUpperCase(),
54 | 'x-amz-id-2': crypto
55 | .createHash('sha256')
56 | .update(reqHeaders.host)
57 | .digest('base64'),
58 | },
59 | s3: {
60 | s3SchemaVersion: '1.0',
61 | configurationId: 'testConfigId',
62 | bucket: {
63 | name: eventData.bucket,
64 | ownerIdentity: {
65 | principalId: randomHexString(14).toUpperCase(),
66 | },
67 | arn: 'arn:aws:s3: : :' + eventData.bucket,
68 | },
69 | object: s3Object,
70 | },
71 | },
72 | ],
73 | };
74 | }
75 | }
76 |
77 | module.exports = S3Event;
78 |
--------------------------------------------------------------------------------
/lib/signature/v2.js:
--------------------------------------------------------------------------------
1 | 'use strict';
2 |
3 | const S3Error = require('../models/error');
4 |
5 | exports.parseHeader = function (headers) {
6 | const [, ...components] = headers.authorization.split(' ');
7 |
8 | if (components.length !== 1) {
9 | throw new S3Error(
10 | 'InvalidArgument',
11 | "Authorization header is invalid -- one and only one ' ' (space) required",
12 | {
13 | ArgumentName: 'Authorization',
14 | ArgumentValue: headers.authorization,
15 | },
16 | );
17 | }
18 |
19 | const match = /([^:]*):([^:]+)/.exec(components[0]);
20 | if (!match) {
21 | throw new S3Error(
22 | 'InvalidArgument',
23 | 'AWS authorization header is invalid. Expected AwsAccessKeyId:signature',
24 | {
25 | ArgumentName: 'Authorization',
26 | ArgumentValue: headers.authorization,
27 | },
28 | );
29 | }
30 |
31 | return { accessKeyId: match[1], signatureProvided: match[2] };
32 | };
33 |
34 | exports.parseQuery = function (query) {
35 | // authentication param names are case-sensitive
36 | if (!('Expires' in query) || !('AWSAccessKeyId' in query)) {
37 | throw new S3Error(
38 | 'AccessDenied',
39 | 'Query-string authentication requires the Signature, Expires and ' +
40 | 'AWSAccessKeyId parameters',
41 | );
42 | }
43 |
44 | const request = {
45 | signature: {
46 | version: 2,
47 | algorithm: 'sha1',
48 | encoding: 'base64',
49 | },
50 | accessKeyId: query.AWSAccessKeyId,
51 | expires: Number(query.Expires),
52 | signatureProvided: query.Signature,
53 | };
54 |
55 | const serverTime = new Date();
56 | const expiresTime = new Date(request.expires * 1000);
57 | if (isNaN(expiresTime)) {
58 | throw new S3Error(
59 | 'AccessDenied',
60 | `Invalid date (should be seconds since epoch): ${query.Expires}`,
61 | );
62 | }
63 |
64 | if (serverTime > expiresTime) {
65 | throw new S3Error('AccessDenied', 'Request has expired', {
66 | Expires: expiresTime.toISOString().replace(/\.\d+/, ''),
67 | ServerTime: serverTime.toISOString().replace(/\.\d+/, ''),
68 | });
69 | }
70 |
71 | return request;
72 | };
73 |
74 | /**
75 | * Generates a string to be signed for signature version 2.
76 | *
77 | * @param {*} canonicalRequest
78 | */
79 | exports.getStringToSign = function (canonicalRequest) {
80 | return [
81 | canonicalRequest.method,
82 | canonicalRequest.contentMD5,
83 | canonicalRequest.contentType,
84 | canonicalRequest.timestamp,
85 | ...canonicalRequest.amzHeaders,
86 | canonicalRequest.querystring
87 | ? `${canonicalRequest.uri}?${canonicalRequest.querystring}`
88 | : canonicalRequest.uri,
89 | ].join('\n');
90 | };
91 |
--------------------------------------------------------------------------------
/lib/cli.js:
--------------------------------------------------------------------------------
1 | 'use strict';
2 |
3 | const fs = require('fs');
4 | const { Command } = require('commander');
5 | const pkg = require('../package.json');
6 | const S3rver = require('./s3rver');
7 |
8 | function ensureDirectory(directory) {
9 | fs.mkdirSync(directory, { recursive: true });
10 | return directory;
11 | }
12 |
13 | // manually parse [config...] arguments for --create-bucket
14 | function parseConfigureBucket(bucketName, memo = []) {
15 | let idx = 0;
16 | do {
17 | idx = program.rawArgs.indexOf('--configure-bucket', idx) + 1;
18 | } while (program.rawArgs[idx] !== bucketName);
19 | idx++;
20 |
21 | const bucketConfigs = [];
22 | while (
23 | idx < program.rawArgs.length &&
24 | !program.rawArgs[idx].startsWith('-')
25 | ) {
26 | bucketConfigs.push(program.rawArgs[idx++]);
27 | }
28 | memo.push({
29 | name: bucketName,
30 | configs: bucketConfigs.map((config) => fs.readFileSync(config)),
31 | });
32 | return memo;
33 | }
34 |
35 | const program = new Command();
36 | program
37 | .usage('-d [options]')
38 | .requiredOption('-d, --directory ', 'Data directory', ensureDirectory)
39 | .option(
40 | '-a, --address ',
41 | 'Hostname or IP to bind to',
42 | S3rver.defaultOptions.address,
43 | )
44 | .option(
45 | '-p, --port ',
46 | 'Port of the http server',
47 | S3rver.defaultOptions.port,
48 | )
49 | .option('-s, --silent', 'Suppress log messages', S3rver.defaultOptions.silent)
50 | .option(
51 | '--key ',
52 | 'Path to private key file for running with TLS',
53 | fs.readFileSync,
54 | )
55 | .option(
56 | '--cert ',
57 | 'Path to certificate file for running with TLS',
58 | fs.readFileSync,
59 | )
60 | .option(
61 | '--service-endpoint ',
62 | 'Overrides the AWS service root for subdomain-style access',
63 | S3rver.defaultOptions.serviceEndpoint,
64 | )
65 | .option(
66 | '--allow-mismatched-signatures',
67 | 'Prevent SignatureDoesNotMatch errors for all well-formed signatures',
68 | )
69 | .option('--no-vhost-buckets', 'Disables vhost-style access for all buckets')
70 | .option(
71 | '--configure-bucket [configs...]',
72 | 'Bucket name and configuration files for creating and configuring a bucket at startup',
73 | parseConfigureBucket,
74 | )
75 | .version(pkg.version, '-v, --version');
76 |
77 | // NOTE: commander doesn't support repeated variadic options,
78 | // we must manually parse this option
79 | program.options.find((option) =>
80 | option.is('--configure-bucket'),
81 | ).variadic = false;
82 |
83 | program.on('--help', () => {
84 | console.log('');
85 | console.log('Examples:');
86 | console.log(' $ s3rver -d /tmp/s3rver -a 0.0.0.0 -p 0');
87 | console.log(
88 | ' $ s3rver -d /tmp/s3rver --configure-bucket test-bucket ./cors.xml ./website.xml',
89 | );
90 | });
91 |
92 | program.action(async ({ configureBucket, ...opts }) => {
93 | opts.configureBuckets = configureBucket;
94 | const { address, port } = await new S3rver(opts).run();
95 | console.log();
96 | console.log('S3rver listening on %s:%d', address, port);
97 | });
98 |
99 | module.exports = program;
100 |
--------------------------------------------------------------------------------
/test/helpers.js:
--------------------------------------------------------------------------------
1 | 'use strict';
2 |
3 | const AWS = require('aws-sdk');
4 | const { RequestSigner } = require('aws4');
5 | const crypto = require('crypto');
6 | const xmlParser = require('fast-xml-parser');
7 | const fs = require('fs');
8 | const he = require('he');
9 | const { times } = require('lodash');
10 | const os = require('os');
11 | const path = require('path');
12 | const pMap = require('p-map');
13 |
14 | const S3rver = require('..');
15 |
16 | const tmpDir = path.join(os.tmpdir(), 's3rver_test');
17 |
18 | const instances = new Set();
19 |
20 | exports.resetTmpDir = function resetTmpDir() {
21 | try {
22 | fs.rmdirSync(tmpDir, { recursive: true });
23 | } catch (err) {
24 | /* directory didn't exist */
25 | }
26 | try {
27 | fs.mkdirSync(tmpDir, { recursive: true });
28 | } catch (err) {
29 | if (err.code !== 'EEXIST') {
30 | throw err;
31 | }
32 | }
33 | };
34 |
35 | exports.generateTestObjects = function generateTestObjects(
36 | s3Client,
37 | bucket,
38 | amount,
39 | ) {
40 | const padding = amount.toString().length;
41 | const objects = times(amount, (i) => ({
42 | Bucket: bucket,
43 | Key: 'key' + i.toString().padStart(padding, '0'),
44 | Body: 'Hello!',
45 | }));
46 |
47 | return pMap(objects, (object) => s3Client.putObject(object).promise(), {
48 | concurrency: 100,
49 | });
50 | };
51 |
52 | exports.md5 = (data) => crypto.createHash('md5').update(data).digest('hex');
53 |
54 | exports.parseXml = (data) =>
55 | xmlParser.parse(data, {
56 | tagValueProcessor: (a) => he.decode(a),
57 | });
58 |
59 | exports.createServerAndClient = async function createServerAndClient(options) {
60 | const s3rver = new S3rver(options);
61 | const { port } = await s3rver.run();
62 | instances.add(s3rver);
63 |
64 | const s3Client = new AWS.S3({
65 | accessKeyId: 'S3RVER',
66 | secretAccessKey: 'S3RVER',
67 | endpoint: `localhost:${port}`,
68 | sslEnabled: false,
69 | s3ForcePathStyle: true,
70 | signatureVersion: 'v4',
71 | });
72 |
73 | return { s3rver, s3Client };
74 | };
75 |
76 | exports.instances = instances;
77 |
78 | exports.StreamingRequestSigner = class extends RequestSigner {
79 | prepareRequest() {
80 | this.request.headers['X-Amz-Content-Sha256'] =
81 | 'STREAMING-AWS4-HMAC-SHA256-PAYLOAD';
82 | return super.prepareRequest();
83 | }
84 |
85 | signature() {
86 | this.previousSignature = super.signature();
87 | this.chunkData = undefined;
88 | return this.previousSignature;
89 | }
90 |
91 | signChunk(chunkData) {
92 | this.chunkData = chunkData;
93 | const chunkLengthHex = chunkData.length.toString(16);
94 | return `${chunkLengthHex};chunk-signature=${this.signature()}`;
95 | }
96 |
97 | stringToSign() {
98 | const hash = (string, encoding) =>
99 | crypto.createHash('sha256').update(string, 'utf8').digest(encoding);
100 |
101 | return this.chunkData === undefined
102 | ? super.stringToSign()
103 | : [
104 | 'AWS4-HMAC-SHA256-PAYLOAD',
105 | this.getDateTime(),
106 | this.credentialString(),
107 | this.previousSignature,
108 | hash('', 'hex'),
109 | hash(this.chunkData, 'hex'),
110 | ].join('\n');
111 | }
112 | };
113 |
--------------------------------------------------------------------------------
/lib/middleware/cors.js:
--------------------------------------------------------------------------------
1 | 'use strict';
2 |
3 | const S3Error = require('../models/error');
4 |
5 | /**
6 | * Derived from https://docs.aws.amazon.com/AmazonS3/latest/dev/cors.html
7 | */
8 | module.exports = () =>
9 | async function cors(ctx, next) {
10 | const config = await ctx.store.getSubresource(
11 | ctx.params.bucket,
12 | undefined,
13 | 'cors',
14 | );
15 | // Prefer the Access-Control-Request-Method header if supplied
16 | const origin = ctx.get('origin');
17 | const method = ctx.get('access-control-request-method') || ctx.method;
18 | const matchedRule = config ? config.matchRule(origin, method) : undefined;
19 |
20 | if (ctx.method === 'OPTIONS') {
21 | if (!origin) {
22 | throw new S3Error(
23 | 'BadRequest',
24 | 'Insufficient information. Origin request header needed.',
25 | );
26 | }
27 |
28 | if (!ctx.get('access-control-request-method')) {
29 | throw new S3Error(
30 | 'BadRequest',
31 | 'Invalid Access-Control-Request-Method: null',
32 | );
33 | }
34 |
35 | // S3 only checks if CORS is enabled *after* checking the existence of access control headers
36 | if (!config) {
37 | throw new S3Error(
38 | 'CORSResponse',
39 | 'CORS is not enabled for this bucket.',
40 | );
41 | }
42 |
43 | const requestHeaders = ctx.get('access-control-request-headers')
44 | ? ctx.get('access-control-request-headers').split(',')
45 | : [];
46 |
47 | const allowedHeaders = matchedRule
48 | ? requestHeaders
49 | .map((header) => header.trim().toLowerCase())
50 | .filter((header) =>
51 | matchedRule.allowedHeaders.some((pattern) =>
52 | pattern.test(header),
53 | ),
54 | )
55 | : [];
56 |
57 | if (!matchedRule || allowedHeaders.length < requestHeaders.length) {
58 | throw new S3Error(
59 | 'CORSResponse',
60 | 'This CORS request is not allowed. This is usually because the ' +
61 | 'evalution of Origin, request method / ' +
62 | 'Access-Control-Request-Method or Access-Control-Request-Headers ' +
63 | "are not whitelisted by the resource's CORS spec.",
64 | );
65 | }
66 |
67 | ctx.set('Access-Control-Allow-Origin', '*');
68 | ctx.set(
69 | 'Access-Control-Allow-Methods',
70 | matchedRule.allowedMethods.join(', '),
71 | );
72 | if (ctx.get('access-control-request-headers')) {
73 | ctx.set('Access-Control-Allow-Headers', allowedHeaders.join(', '));
74 | }
75 |
76 | ctx.set(
77 | 'Vary',
78 | 'Origin, Access-Control-Request-Headers, Access-Control-Request-Method',
79 | );
80 |
81 | ctx.body = '';
82 | } else if (config && matchedRule) {
83 | ctx.set(
84 | 'Access-Control-Allow-Origin',
85 | matchedRule.hasWildcardOrigin ? '*' : origin,
86 | );
87 | if (matchedRule.exposeHeaders.length) {
88 | ctx.set(
89 | 'Access-Control-Expose-Headers',
90 | matchedRule.exposeHeaders.join(', '),
91 | );
92 | }
93 | if (matchedRule.maxAgeSeconds != null) {
94 | ctx.set('Access-Control-Max-Age', matchedRule.maxAgeSeconds);
95 | }
96 | ctx.set('Access-Control-Allow-Credentials', true);
97 | ctx.set(
98 | 'Vary',
99 | 'Origin, Access-Control-Request-Headers, Access-Control-Request-Method',
100 | );
101 | }
102 | return next();
103 | };
104 |
--------------------------------------------------------------------------------
/test/middleware/vhost.spec.js:
--------------------------------------------------------------------------------
1 | 'use strict';
2 |
3 | const { expect } = require('chai');
4 | const { zip } = require('lodash');
5 | const moment = require('moment');
6 | const os = require('os');
7 | const request = require('request-promise-native').defaults({
8 | resolveWithFullResponse: true,
9 | });
10 |
11 | const { createServerAndClient, parseXml } = require('../helpers');
12 |
13 | describe('Virtual Host resolution', () => {
14 | const buckets = [{ name: 'bucket-a' }, { name: 'bucket-b' }];
15 |
16 | it('lists objects with subdomain-domain style bucket access', async function () {
17 | const { s3Client } = await createServerAndClient({
18 | configureBuckets: buckets,
19 | });
20 | const res = await request(s3Client.endpoint.href, {
21 | headers: { host: 'bucket-a.s3.amazonaws.com' },
22 | });
23 | expect(res.body).to.include(`bucket-a`);
24 | });
25 |
26 | it('lists objects with a vhost-style bucket access', async function () {
27 | const { s3Client } = await createServerAndClient({
28 | configureBuckets: buckets,
29 | });
30 | const res = await request(s3Client.endpoint.href, {
31 | headers: { host: 'bucket-a' },
32 | });
33 | expect(res.body).to.include(`bucket-a`);
34 | });
35 |
36 | it('lists buckets when vhost-style bucket access is disabled', async function () {
37 | const { s3Client } = await createServerAndClient({
38 | vhostBuckets: false,
39 | configureBuckets: buckets,
40 | });
41 | const res = await request(s3Client.endpoint.href, {
42 | headers: { host: 'bucket-a' },
43 | });
44 | const parsedBody = parseXml(res.body);
45 | expect(parsedBody).to.haveOwnProperty('ListAllMyBucketsResult');
46 | const parsedBuckets = parsedBody.ListAllMyBucketsResult.Buckets.Bucket;
47 | expect(parsedBuckets).to.be.instanceOf(Array);
48 | expect(parsedBuckets).to.have.lengthOf(buckets.length);
49 | for (const [bucket, config] of zip(parsedBuckets, buckets)) {
50 | expect(bucket.Name).to.equal(config.name);
51 | expect(moment(bucket.CreationDate).isValid()).to.be.true;
52 | }
53 | });
54 |
55 | it('lists buckets at a custom service endpoint', async function () {
56 | const { s3Client } = await createServerAndClient({
57 | serviceEndpoint: 'example.com',
58 | configureBuckets: buckets,
59 | });
60 | const res = await request(s3Client.endpoint.href, {
61 | headers: { host: 's3.example.com' },
62 | });
63 | const parsedBody = parseXml(res.body);
64 | expect(parsedBody).to.haveOwnProperty('ListAllMyBucketsResult');
65 | const parsedBuckets = parsedBody.ListAllMyBucketsResult.Buckets.Bucket;
66 | expect(parsedBuckets).to.be.instanceOf(Array);
67 | expect(parsedBuckets).to.have.lengthOf(buckets.length);
68 | for (const [bucket, config] of zip(parsedBuckets, buckets)) {
69 | expect(bucket.Name).to.equal(config.name);
70 | expect(moment(bucket.CreationDate).isValid()).to.be.true;
71 | }
72 | });
73 |
74 | it('lists buckets at the OS hostname', async function () {
75 | const { s3Client } = await createServerAndClient({
76 | configureBuckets: buckets,
77 | });
78 | const res = await request(s3Client.endpoint.href, {
79 | headers: { host: os.hostname() },
80 | });
81 | const parsedBody = parseXml(res.body);
82 | expect(parsedBody).to.haveOwnProperty('ListAllMyBucketsResult');
83 | const parsedBuckets = parsedBody.ListAllMyBucketsResult.Buckets.Bucket;
84 | expect(parsedBuckets).to.be.instanceOf(Array);
85 | expect(parsedBuckets).to.have.lengthOf(buckets.length);
86 | for (const [bucket, config] of zip(parsedBuckets, buckets)) {
87 | expect(bucket.Name).to.equal(config.name);
88 | expect(moment(bucket.CreationDate).isValid()).to.be.true;
89 | }
90 | });
91 |
92 | it('lists objects in a bucket at a custom service endpoint', async function () {
93 | const { s3Client } = await createServerAndClient({
94 | serviceEndpoint: 'example.com',
95 | configureBuckets: buckets,
96 | });
97 | const res = await request(s3Client.endpoint.href, {
98 | headers: { host: 'bucket-a.s3.example.com' },
99 | });
100 | const parsedBody = parseXml(res.body);
101 | expect(parsedBody.ListBucketResult.Name).to.equal('bucket-a');
102 | });
103 | });
104 |
--------------------------------------------------------------------------------
/test/models/routing-rule.spec.js:
--------------------------------------------------------------------------------
1 | 'use strict';
2 |
3 | const { expect } = require('chai');
4 |
5 | const RoutingRule = require('../../lib/models/routing-rule');
6 |
7 | describe('RoutingRule', () => {
8 | describe('Condition', () => {
9 | const matchingKey = 'prefix/key';
10 | const nonMatchKey = 'without-prefix/key';
11 | const matchingStatusCode = 404;
12 | const nonMatchStatusCode = 200;
13 |
14 | it('redirects with no condition', () => {
15 | const rule = new RoutingRule({});
16 |
17 | expect(rule.shouldRedirect('key', 200)).to.exist;
18 | });
19 |
20 | it('redirects using only KeyPrefixEquals', () => {
21 | const rule = new RoutingRule({
22 | Condition: {
23 | KeyPrefixEquals: 'prefix',
24 | },
25 | });
26 |
27 | expect(rule.shouldRedirect(matchingKey, 200)).to.be.true;
28 | expect(rule.shouldRedirect(nonMatchKey, 200)).to.be.false;
29 | });
30 |
31 | it('redirects using only HttpErrorCodeReturnedEquals', () => {
32 | const rule = new RoutingRule({
33 | Condition: {
34 | HttpErrorCodeReturnedEquals: 404,
35 | },
36 | });
37 |
38 | expect(rule.shouldRedirect('key', matchingStatusCode)).to.be.true;
39 | expect(rule.shouldRedirect('key', nonMatchStatusCode)).to.be.false;
40 | });
41 |
42 | it('redirects using both KeyPrefixEquals and HttpErrorCodeReturnedEquals', () => {
43 | const rule = new RoutingRule({
44 | Condition: {
45 | KeyPrefixEquals: 'prefix',
46 | HttpErrorCodeReturnedEquals: 404,
47 | },
48 | });
49 |
50 | expect(rule.shouldRedirect(matchingKey, matchingStatusCode)).to.be.true;
51 | expect(rule.shouldRedirect(nonMatchKey, matchingStatusCode)).to.be.false;
52 | expect(rule.shouldRedirect(matchingKey, nonMatchStatusCode)).to.be.false;
53 | expect(rule.shouldRedirect(nonMatchKey, nonMatchStatusCode)).to.be.false;
54 | });
55 | });
56 |
57 | describe('Redirect', () => {
58 | const defaults = {
59 | protocol: 'https',
60 | hostname: 'example.com',
61 | };
62 |
63 | it('redirects using only HostName', () => {
64 | const rule = new RoutingRule({
65 | Redirect: {
66 | HostName: 'localhost',
67 | },
68 | });
69 |
70 | expect(rule.statusCode).to.equal(301);
71 | expect(rule.getRedirectLocation('key', defaults)).to.equal(
72 | 'https://localhost/key',
73 | );
74 | });
75 |
76 | it('redirects using only HttpRedirectCode', () => {
77 | const rule = new RoutingRule({
78 | Redirect: {
79 | HttpRedirectCode: 307,
80 | },
81 | });
82 |
83 | expect(rule.statusCode).to.equal(307);
84 | expect(rule.getRedirectLocation('key', defaults)).to.equal(
85 | 'https://example.com/key',
86 | );
87 | });
88 |
89 | it('redirects using only Protocol', () => {
90 | const rule = new RoutingRule({
91 | Redirect: {
92 | Protocol: 'http',
93 | },
94 | });
95 |
96 | expect(rule.statusCode).to.equal(301);
97 | expect(rule.getRedirectLocation('key', defaults)).to.equal(
98 | 'http://example.com/key',
99 | );
100 | });
101 |
102 | it('redirects using only ReplaceKeyPrefixWith', () => {
103 | const rule = new RoutingRule({
104 | Condition: {
105 | KeyPrefixEquals: 'prefix',
106 | },
107 | Redirect: {
108 | ReplaceKeyPrefixWith: 'replacement',
109 | },
110 | });
111 |
112 | expect(rule.statusCode).to.equal(301);
113 | expect(rule.getRedirectLocation('prefix/key', defaults)).to.equal(
114 | 'https://example.com/replacement/key',
115 | );
116 | });
117 |
118 | it('replaces blank prefix with ReplaceKeyPrefixWith', () => {
119 | const rule = new RoutingRule({
120 | Redirect: {
121 | ReplaceKeyPrefixWith: 'replacement/',
122 | },
123 | });
124 |
125 | expect(rule.statusCode).to.equal(301);
126 | expect(rule.getRedirectLocation('prefix/key', defaults)).to.equal(
127 | 'https://example.com/replacement/prefix/key',
128 | );
129 | });
130 |
131 | it('redirects using only ReplaceKeyWith', () => {
132 | const rule = new RoutingRule({
133 | Redirect: {
134 | ReplaceKeyWith: 'replacement',
135 | },
136 | });
137 |
138 | expect(rule.statusCode).to.equal(301);
139 | expect(rule.getRedirectLocation('key', defaults)).to.equal(
140 | 'https://example.com/replacement',
141 | );
142 | });
143 |
144 | it('redirects using a combination of options', () => {
145 | const rule = new RoutingRule({
146 | Condition: {
147 | KeyPrefixEquals: 'prefix',
148 | },
149 | Redirect: {
150 | Protocol: 'http',
151 | HttpRedirectCode: 307,
152 | HostName: 'localhost',
153 | ReplaceKeyPrefixWith: 'replacement',
154 | },
155 | });
156 |
157 | expect(rule.statusCode).to.equal(307);
158 | expect(rule.getRedirectLocation('prefix/key', defaults)).to.equal(
159 | 'http://localhost/replacement/key',
160 | );
161 | });
162 | });
163 | });
164 |
--------------------------------------------------------------------------------
/lib/utils.js:
--------------------------------------------------------------------------------
1 | 'use strict';
2 |
3 | const crypto = require('crypto');
4 | const xmlParser = require('fast-xml-parser');
5 | const fs = require('fs');
6 | const he = require('he');
7 | const path = require('path');
8 | const { PassThrough } = require('stream');
9 |
10 | const S3Error = require('./models/error');
11 |
12 | exports.walk = function* walk(dir, recurseFilter) {
13 | for (const filename of fs.readdirSync(dir)) {
14 | const filePath = path.posix.join(dir, filename);
15 | const stats = fs.statSync(filePath);
16 | if (!stats.isDirectory()) {
17 | yield filePath;
18 | } else if (!recurseFilter || recurseFilter(filePath)) {
19 | yield* walk(filePath, recurseFilter);
20 | }
21 | }
22 | };
23 |
24 | exports.capitalizeHeader = function (header) {
25 | const exceptions = {
26 | 'content-md5': 'Content-MD5',
27 | dnt: 'DNT',
28 | etag: 'ETag',
29 | 'last-event-id': 'Last-Event-ID',
30 | tcn: 'TCN',
31 | te: 'TE',
32 | 'www-authenticate': 'WWW-Authenticate',
33 | 'x-dnsprefetch-control': 'X-DNSPrefetch-Control',
34 | };
35 |
36 | header = header.toLowerCase();
37 |
38 | if (header in exceptions) return exceptions[header];
39 | if (header.startsWith('x-amz-')) return header;
40 |
41 | // Capitalize the first letter of each word
42 | return header
43 | .split('-')
44 | .map((word) => word[0].toUpperCase() + word.slice(1))
45 | .join('-');
46 | };
47 |
48 | exports.concatStreams = function (streams) {
49 | const passThrough = new PassThrough();
50 | streams = [...streams];
51 | const pipeNext = (stream) => {
52 | if (!stream) return passThrough.end();
53 |
54 | stream.once('end', () => pipeNext(streams.shift()));
55 | stream.pipe(passThrough, { end: false });
56 | };
57 | pipeNext(streams.shift());
58 | return passThrough;
59 | };
60 |
61 | /**
62 | * URI-encodes a string according to RFC 3986. This is what AWS uses for
63 | * S3 resource URIs.
64 | *
65 | * @param {string} string
66 | */
67 | exports.encodeURIComponentRFC3986 = function (string) {
68 | return encodeURIComponent(string).replace(
69 | /[!'()*]/g,
70 | (ch) => '%' + ch.charCodeAt(0).toString(16).toUpperCase(),
71 | );
72 | };
73 |
74 | exports.getXmlRootTag = function (xml) {
75 | const traversal = xmlParser.getTraversalObj(xml.toString());
76 | const [[root]] = Object.values(traversal.child);
77 | return root && root.tagname;
78 | };
79 |
80 | exports.randomBase64String = function (length) {
81 | return crypto
82 | .randomBytes(Math.ceil((length * 3) / 4))
83 | .toString('base64')
84 | .slice(0, length);
85 | };
86 |
87 | exports.randomHexString = function (length) {
88 | return crypto
89 | .randomBytes(Math.ceil(length / 2))
90 | .toString('hex')
91 | .slice(0, length);
92 | };
93 |
94 | /**
95 | * Inserts separators into AWS ISO8601 formatted-dates to make it parsable by JS.
96 | *
97 | * @param dateString
98 | */
99 | exports.parseISO8601String = function (dateString) {
100 | if (typeof dateString !== 'string') {
101 | return new Date(NaN);
102 | }
103 | // attempt to parse as ISO8601 with inserted separators
104 | // yyyyMMddTHHmmssZ
105 | // ^ ^ ^ ^
106 | const chars = [...dateString];
107 | chars.splice(13, 0, ':');
108 | chars.splice(11, 0, ':');
109 | chars.splice(6, 0, '-');
110 | chars.splice(4, 0, '-');
111 | return new Date(chars.join(''));
112 | };
113 |
114 | /**
115 | * Attempts to parse a dateString as a regular JS Date before falling back to
116 | * AWS's "ISO8601 Long Format" date.
117 | *
118 | * @param dateString
119 | */
120 | exports.parseDate = function (dateString) {
121 | let date = new Date(dateString);
122 | if (isNaN(date)) {
123 | date = exports.parseISO8601String(dateString);
124 | }
125 | return date;
126 | };
127 |
128 | /**
129 | * Like Date.prototype.toISOString(), but without separators and milliseconds.
130 | *
131 | * @param date
132 | */
133 | exports.toISO8601String = function (date) {
134 | return new Date(date).toISOString().replace(/[-:]|\.\d+/g, '');
135 | };
136 |
137 | /**
138 | * Reads a request body to as parsed XML.
139 | *
140 | * @param {Koa.Context} ctx
141 | */
142 | exports.xmlBodyParser = async function xmlBodyParser(ctx) {
143 | const { req } = ctx;
144 | const xmlString = await new Promise((resolve, reject) => {
145 | let payload = '';
146 | req.on('data', (data) => (payload += data.toString('utf8')));
147 | req.on('end', () => resolve(payload));
148 | req.on('error', reject);
149 | });
150 | if (xmlParser.validate(xmlString) !== true) {
151 | throw new S3Error(
152 | 'MalformedXML',
153 | 'The XML you provided was not well-formed or did not validate against ' +
154 | 'our published schema.',
155 | );
156 | }
157 | ctx.request.body = xmlParser.parse(xmlString, {
158 | tagValueProcessor: (a) => he.decode(a),
159 | });
160 | };
161 |
162 | /**
163 | * Reads a request body stream to a string.
164 | *
165 | * @param {Koa.Context} ctx
166 | */
167 | exports.utf8BodyParser = async function (ctx) {
168 | const { req } = ctx;
169 | ctx.request.body = await new Promise((resolve, reject) => {
170 | let payload = '';
171 | req.on('data', (data) => (payload += data.toString('utf8')));
172 | req.on('end', () => resolve(payload));
173 | req.on('error', reject);
174 | });
175 | };
176 |
177 | exports.ensureDir = async function (dirPath) {
178 | const options = { recursive: true, mode: 0o0755 };
179 | if (process.platform === 'win32') {
180 | delete options.mode;
181 | }
182 | await fs.promises.mkdir(dirPath, options);
183 | };
184 |
--------------------------------------------------------------------------------
/lib/models/error.js:
--------------------------------------------------------------------------------
1 | 'use strict';
2 |
3 | const { mapKeys, omit } = require('lodash');
4 | const { j2xParser } = require('fast-xml-parser');
5 | const he = require('he');
6 |
7 | class S3Error extends Error {
8 | static fromError(err) {
9 | return Object.assign(
10 | new S3Error(
11 | 'InternalError',
12 | err.expose
13 | ? err.message
14 | : 'We encountered an internal error. Please try again.',
15 | ),
16 | omit(err, 'code', 'message', 'expose'),
17 | );
18 | }
19 |
20 | constructor(code, message, detail = {}) {
21 | super(message);
22 | this.code = code;
23 | this.detail = detail;
24 | this.errors = [];
25 | this.headers = {
26 | 'x-amz-error-code': code,
27 | 'x-amz-error-message': message,
28 | ...mapKeys(detail, (value, key) => `x-amz-error-detail-${key}`),
29 | };
30 | this.status = s3Statuses[code] || 500;
31 | this.expose = this.status < 500;
32 | }
33 |
34 | toXML() {
35 | const parser = new j2xParser({
36 | tagValueProcessor: (a) =>
37 | he.encode(a.toString(), { useNamedReferences: true }),
38 | });
39 | return [
40 | '',
41 | parser.parse({
42 | Error: {
43 | Code: this.code,
44 | Message: this.message,
45 | ...this.detail,
46 | },
47 | }),
48 | ].join('\n');
49 | }
50 |
51 | toHTML() {
52 | const encode = (a) => he.encode(a.toString(), { useNamedReferences: true });
53 | return [
54 | // Real S3 doesn't respond with DOCTYPE
55 | '',
56 | // prettier-ignore
57 | `${encode(this.description)}`,
58 | '',
59 | `${encode(this.description)}
`,
60 | '',
61 | `- Code: ${encode(this.code)}
`,
62 | `- Message: ${this.message}
`,
63 | Object.entries(this.detail)
64 | .map(([key, value]) => `- ${key}: ${encode(value)}
`)
65 | .join('\n'),
66 | '
',
67 | (this.errors || [])
68 | .map((error) =>
69 | [
70 | `${encode(error.description)}
`,
71 | '',
72 | `- Code: ${encode(error.code)}
`,
73 | `- Message: ${encode(error.message)}
`,
74 | Object.entries(error.detail)
75 | .map(([key, value]) => `- ${key}: ${encode(value)}
`)
76 | .join('\n'),
77 | '
',
78 | ].join('\n'),
79 | )
80 | .join('\n'),
81 | '
',
82 | '',
83 | '',
84 | '', // trailing newline
85 | ].join('\n');
86 | }
87 | }
88 | module.exports = S3Error;
89 |
90 | // sourced from https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html
91 | const s3Statuses = {
92 | AccessDenied: 403,
93 | AccountProblem: 403,
94 | AllAccessDisabled: 403,
95 | AmbiguousGrantByEmailAddress: 400,
96 | AuthorizationHeaderMalformed: 400,
97 | BadDigest: 400,
98 | BucketAlreadyExists: 409,
99 | BucketAlreadyOwnedByYou: 409,
100 | BucketNotEmpty: 409,
101 | CredentialsNotSupported: 400,
102 | CrossLocationLoggingProhibited: 403,
103 | EntityTooSmall: 400,
104 | EntityTooLarge: 400,
105 | ExpiredToken: 400,
106 | IllegalVersioningConfigurationException: 400,
107 | IncompleteBody: 400,
108 | IncorrectNumberOfFilesInPostRequest: 400,
109 | InlineDataTooLarge: 400,
110 | InternalError: 500,
111 | InvalidAccessKeyId: 403,
112 | InvalidArgument: 400,
113 | InvalidBucketName: 400,
114 | InvalidBucketState: 409,
115 | InvalidDigest: 400,
116 | InvalidEncryptionAlgorithmError: 400,
117 | InvalidLocationConstraint: 400,
118 | InvalidObjectState: 403,
119 | InvalidPart: 400,
120 | InvalidPartOrder: 400,
121 | InvalidPayer: 403,
122 | InvalidPolicyDocument: 400,
123 | InvalidRange: 416,
124 | InvalidRequest: 400,
125 | InvalidSecurity: 403,
126 | InvalidSOAPRequest: 400,
127 | InvalidStorageClass: 400,
128 | InvalidTargetBucketForLogging: 400,
129 | InvalidToken: 400,
130 | InvalidURI: 400,
131 | KeyTooLongError: 400,
132 | MalformedACLError: 400,
133 | MalformedPOSTRequest: 400,
134 | MalformedXML: 400,
135 | MaxMessageLengthExceeded: 400,
136 | MaxPostPreDataLengthExceededError: 400,
137 | MetadataTooLarge: 400,
138 | MethodNotAllowed: 405,
139 | MissingContentLength: 411,
140 | MissingRequestBodyError: 400,
141 | MissingSecurityElement: 400,
142 | MissingSecurityHeader: 400,
143 | NoLoggingStatusForKey: 400,
144 | NoSuchBucket: 404,
145 | NoSuchBucketPolicy: 404,
146 | NoSuchKey: 404,
147 | NoSuchLifecycleConfiguration: 404,
148 | NoSuchUpload: 404,
149 | NoSuchVersion: 404,
150 | NotImplemented: 501,
151 | NotSignedUp: 403,
152 | OperationAborted: 409,
153 | PermanentRedirect: 301,
154 | PreconditionFailed: 412,
155 | Redirect: 307,
156 | RestoreAlreadyInProgress: 409,
157 | RequestIsNotMultiPartContent: 400,
158 | RequestTimeout: 400,
159 | RequestTimeTooSkewed: 403,
160 | RequestTorrentOfBucketError: 400,
161 | SignatureDoesNotMatch: 403,
162 | ServiceUnavailable: 503,
163 | SlowDown: 503,
164 | TemporaryRedirect: 307,
165 | TokenRefreshRequired: 400,
166 | TooManyBuckets: 400,
167 | UnexpectedContent: 400,
168 | UnresolvableGrantByEmailAddress: 400,
169 | UserKeyMustBeSpecified: 400,
170 |
171 | // Additional errors not documented by the above
172 | AuthorizationQueryParametersError: 400,
173 | BadRequest: 403,
174 | CORSResponse: 403,
175 | InvalidChunkSizeError: 403,
176 | InvalidRedirectLocation: 400,
177 | NoSuchCORSConfiguration: 404,
178 | NoSuchWebsiteConfiguration: 404,
179 | UnsupportedQuery: 404,
180 | };
181 |
--------------------------------------------------------------------------------
/lib/routes.js:
--------------------------------------------------------------------------------
1 | 'use strict';
2 |
3 | const Router = require('@koa/router');
4 | const { union } = require('lodash');
5 |
6 | const bucketCtrl = require('./controllers/bucket');
7 | const objectCtrl = require('./controllers/object');
8 | const serviceCtrl = require('./controllers/service');
9 | const authenticationMiddleware = require('./middleware/authentication');
10 | const corsMiddleware = require('./middleware/cors');
11 | const responseHeaderOverrideMiddleware = require('./middleware/response-header-override');
12 | const websiteMiddleware = require('./middleware/website');
13 | const S3Error = require('./models/error');
14 |
15 | const router = new Router();
16 |
17 | // NOTE: The below is only an approximation of S3's behavior
18 | // For the most part, S3 will complain if you attempt a bucket method on an object, but
19 | // it won't consisently reject actions on buckets that are supported by objects (and vice-versa).
20 | const queryMethod = (methods) =>
21 | async function queryMethod(ctx, next) {
22 | const matchedMethods = methods.filter((method) => method in ctx.query);
23 | if (matchedMethods.length > 1) {
24 | throw new S3Error(
25 | 'InvalidArgument',
26 | `Conflicting query string parameters: ${matchedMethods.join(', ')}`,
27 | {
28 | ArgumentName: 'ResourceType',
29 | ArgumentValue: matchedMethods[0],
30 | },
31 | );
32 | }
33 | if (matchedMethods.length === 1) {
34 | ctx.params.queryMethod = matchedMethods[0];
35 | }
36 | await next();
37 | if (ctx.state.methodIsNotAllowed) {
38 | throw new S3Error(
39 | 'MethodNotAllowed',
40 | 'The specified method is not allowed against this resource.',
41 | {
42 | Method: ctx.method.toUpperCase(),
43 | ResourceType: ctx.params.queryMethod.toUpperCase(),
44 | },
45 | );
46 | }
47 | };
48 |
49 | router.all('/:bucket/:key*', corsMiddleware());
50 | router.use('/:bucket/:key*', websiteMiddleware());
51 | router.use('/:bucket?/:key*', authenticationMiddleware());
52 | router.use('/:bucket/:key*', responseHeaderOverrideMiddleware());
53 |
54 | router.get('/', serviceCtrl.getService);
55 |
56 | router
57 | .use('/:bucket', queryMethod(bucketCtrl.METHODS))
58 | .delete('/:bucket', bucketCtrl.bucketExists, (ctx) => {
59 | switch (ctx.params.queryMethod) {
60 | case undefined:
61 | return bucketCtrl.deleteBucket(ctx);
62 | case 'cors':
63 | return bucketCtrl.deleteBucketCors(ctx);
64 | case 'website':
65 | return bucketCtrl.deleteBucketWebsite(ctx);
66 | case 'analysis':
67 | case 'encryption':
68 | case 'lifecycle':
69 | case 'publicAccessBlock':
70 | case 'metrics':
71 | case 'policy':
72 | case 'replication':
73 | case 'tagging':
74 | throw new S3Error(
75 | 'NotImplemented',
76 | 'A parameter you provided implies functionality that is not implemented',
77 | );
78 | default:
79 | ctx.state.methodIsNotAllowed = true;
80 | }
81 | })
82 | .get('/:bucket', bucketCtrl.bucketExists, (ctx) => {
83 | switch (ctx.params.queryMethod) {
84 | case undefined:
85 | return bucketCtrl.getBucket(ctx);
86 | case 'cors':
87 | return bucketCtrl.getBucketCors(ctx);
88 | case 'location':
89 | return bucketCtrl.getBucketLocation(ctx);
90 | case 'website':
91 | return bucketCtrl.getBucketWebsite(ctx);
92 | case 'acl':
93 | case 'analytics':
94 | case 'encryption':
95 | case 'inventory':
96 | case 'lifecycle':
97 | case 'logging':
98 | case 'metrics':
99 | case 'notification':
100 | case 'object-lock':
101 | case 'policyStatus':
102 | case 'publicAccessBlock':
103 | case 'replication':
104 | case 'requestPayment':
105 | case 'tagging':
106 | case 'uploads':
107 | case 'versioning':
108 | throw new S3Error(
109 | 'NotImplemented',
110 | 'A parameter you provided implies functionality that is not implemented',
111 | );
112 | default:
113 | ctx.state.methodIsNotAllowed = true;
114 | }
115 | })
116 | .post('/:bucket', bucketCtrl.bucketExists, (ctx) => {
117 | switch (ctx.params.queryMethod) {
118 | case undefined:
119 | return objectCtrl.postObject(ctx);
120 | case 'delete':
121 | return objectCtrl.deleteMultipleObjects(ctx);
122 | default:
123 | ctx.state.methodIsNotAllowed = true;
124 | }
125 | })
126 | .put('/:bucket', async (ctx) => {
127 | if (ctx.params.queryMethod) {
128 | await bucketCtrl.bucketExists(ctx);
129 | }
130 | switch (ctx.params.queryMethod) {
131 | case undefined:
132 | return bucketCtrl.putBucket(ctx);
133 | case 'cors':
134 | return bucketCtrl.putBucketCors(ctx);
135 | case 'website':
136 | return bucketCtrl.putBucketWebsite(ctx);
137 | case 'accelerate':
138 | case 'acl':
139 | case 'analytics':
140 | case 'encryption':
141 | case 'inventory':
142 | case 'lifecycle':
143 | case 'logging':
144 | case 'metrics':
145 | case 'notification':
146 | case 'policy':
147 | case 'publicAccessBlock':
148 | case 'replication':
149 | case 'requestPayment':
150 | case 'tagging':
151 | case 'versioning':
152 | throw new S3Error(
153 | 'NotImplemented',
154 | 'A parameter you provided implies functionality that is not implemented',
155 | );
156 | default:
157 | ctx.state.methodIsNotAllowed = true;
158 | }
159 | });
160 |
161 | const objectMethods = union(bucketCtrl.METHODS, objectCtrl.METHODS).sort();
162 | router
163 | .use('/:bucket/:key+', bucketCtrl.bucketExists, queryMethod(objectMethods))
164 | .delete('/:bucket/:key+', (ctx) => {
165 | switch (ctx.params.queryMethod) {
166 | case undefined:
167 | return objectCtrl.deleteObject(ctx);
168 | case 'tagging':
169 | throw new S3Error('NotImplemented');
170 | default:
171 | ctx.state.methodIsNotAllowed = true;
172 | }
173 | })
174 | .get('/:bucket/:key+', (ctx) => {
175 | switch (ctx.params.queryMethod) {
176 | case undefined:
177 | return objectCtrl.getObject(ctx);
178 | case 'acl':
179 | return objectCtrl.getObjectAcl(ctx);
180 | case 'tagging':
181 | return objectCtrl.getObjectTagging(ctx);
182 | case 'legal-hold':
183 | case 'retention':
184 | case 'torrent':
185 | throw new S3Error(
186 | 'NotImplemented',
187 | 'A parameter you provided implies functionality that is not implemented',
188 | );
189 | default:
190 | ctx.state.methodIsNotAllowed = true;
191 | }
192 | })
193 | .post('/:bucket/:key+', (ctx) => {
194 | switch (ctx.params.queryMethod) {
195 | case 'uploadId':
196 | return objectCtrl.completeMultipartUpload(ctx);
197 | case 'uploads':
198 | return objectCtrl.initiateMultipartUpload(ctx);
199 | case undefined:
200 | case 'select':
201 | throw new S3Error('NotImplemented');
202 | default:
203 | ctx.state.methodIsNotAllowed = true;
204 | }
205 | })
206 | .put('/:bucket/:key+', (ctx) => {
207 | switch (ctx.params.queryMethod) {
208 | case undefined:
209 | return 'x-amz-copy-source' in ctx.headers
210 | ? objectCtrl.putObjectCopy(ctx)
211 | : objectCtrl.putObject(ctx);
212 | case 'uploadId':
213 | return 'x-amz-copy-source' in ctx.headers
214 | ? objectCtrl.uploadPartCopy(ctx)
215 | : objectCtrl.uploadPart(ctx);
216 | case 'tagging':
217 | return objectCtrl.putObjectTagging(ctx);
218 | case 'acl':
219 | throw new S3Error(
220 | 'NotImplemented',
221 | 'A parameter you provided implies functionality that is not implemented',
222 | );
223 | default:
224 | ctx.state.methodIsNotAllowed = true;
225 | }
226 | });
227 |
228 | // append trailing slash to key when applicable
229 | router.param('key', (key, ctx, next) => {
230 | if (key && ctx.path.endsWith('/')) {
231 | ctx.params.key = key + '/';
232 | }
233 | return next();
234 | });
235 |
236 | module.exports = router;
237 |
--------------------------------------------------------------------------------
/lib/middleware/website.js:
--------------------------------------------------------------------------------
1 | 'use strict';
2 |
3 | const status = require('statuses');
4 | const { format } = require('util');
5 |
6 | const { getObject } = require('../controllers/object');
7 | const S3Error = require('../models/error');
8 |
9 | exports = module.exports = () =>
10 | async function website(ctx, next) {
11 | ctx.redirect = ctx.request.redirect = redirect;
12 |
13 | // validate an incoming website redirect location if one is set
14 | if (ctx.get('x-amz-website-redirect-location')) {
15 | if (!ctx.params.key || ctx.method !== 'PUT') {
16 | throw new S3Error(
17 | 'InvalidArgument',
18 | 'x-amz-website-redirect-location is not supported for this operation.',
19 | { ArgumentName: 'x-amz-website-redirect-location' },
20 | );
21 | } else if (
22 | !/^(https?:\/)?\//.test(ctx.get('x-amz-website-redirect-location'))
23 | ) {
24 | throw new S3Error(
25 | 'InvalidArgument',
26 | "The website redirect location must have a prefix of 'http://' or 'https://' or '/'.",
27 | );
28 | }
29 | }
30 |
31 | const config = await ctx.store.getSubresource(
32 | ctx.params.bucket,
33 | undefined,
34 | 'website',
35 | );
36 | if (ctx.state.service === 's3' || (!ctx.state.service && !config)) {
37 | // requests to the the API endpoint use normal output behavior
38 | // (vhost-style buckets without website configurations also always use this behavior)
39 | return next();
40 | }
41 |
42 | ctx.onerror = onerror;
43 | ctx.state.website = config || {};
44 |
45 | // throw website-specific errors for requests to a .s3-website vhost
46 | if (ctx.state.service === 's3-website') {
47 | // disallow x-amz-* query params for website requests
48 | for (const key of Object.keys(ctx.query)) {
49 | if (key.toLowerCase().startsWith('x-amz-')) {
50 | throw new S3Error(
51 | 'UnsupportedQuery',
52 | 'The request contained an unsupported query string parameter.',
53 | { ParameterName: key },
54 | );
55 | }
56 | }
57 | if (!config) {
58 | throw new S3Error(
59 | 'NoSuchWebsiteConfiguration',
60 | 'The specified bucket does not have a website configuration',
61 | { BucketName: ctx.params.bucket },
62 | );
63 | }
64 | }
65 |
66 | try {
67 | if (!ctx.params.key) {
68 | throw new S3Error('NoSuchKey', '', { Key: '' });
69 | }
70 | await next();
71 | } catch (err) {
72 | if (err.code !== 'NoSuchKey') throw err;
73 |
74 | const key = err.detail.Key;
75 | const indexDocumentPrefix =
76 | key === '' || key.endsWith('/') ? key : key + '/';
77 | const indexExists = await ctx.store.existsObject(
78 | ctx.params.bucket,
79 | indexDocumentPrefix + config.indexDocumentSuffix,
80 | );
81 |
82 | if (indexExists) {
83 | if (key !== indexDocumentPrefix) {
84 | // Redirect keys that do not have a trailing slash when an index document exists
85 | if (ctx.state.vhost) {
86 | ctx.redirect(`/${key}/`);
87 | } else {
88 | // This isn't possible on real S3, but for convenience this allows website
89 | // redirects without setting up virtual hosts
90 | ctx.redirect(`/${ctx.params.bucket}/${key}/`);
91 | }
92 | } else {
93 | ctx.params = {
94 | ...ctx.params,
95 | key: indexDocumentPrefix + config.indexDocumentSuffix,
96 | };
97 | await getObject(ctx);
98 | }
99 | } else {
100 | // Only 404s are supported for RoutingRules right now, this may be a deviation from S3 behaviour but we don't
101 | // have a reproduction of a scenario where S3 does a redirect on a status code other than 404. If you're
102 | // reading this comment and you have a use-case, please raise an issue with details of your scenario. Thanks!
103 | const routingRule = (config.routingRules || []).find((rule) =>
104 | rule.shouldRedirect(key, 404),
105 | );
106 | if (!routingRule) {
107 | throw new S3Error('NoSuchKey', 'The specified key does not exist.', {
108 | Key: indexDocumentPrefix + config.indexDocumentSuffix,
109 | });
110 | }
111 | const location = routingRule.getRedirectLocation(key, {
112 | protocol: ctx.protocol,
113 | hostname: ctx.state.vhost
114 | ? ctx.host
115 | : `${ctx.host}/${ctx.params.bucket}`,
116 | });
117 |
118 | ctx.status = routingRule.statusCode;
119 | ctx.redirect(location);
120 | }
121 | } finally {
122 | const objectRedirectLocation = ctx.response.get(
123 | 'x-amz-website-redirect-location',
124 | );
125 | if (objectRedirectLocation) {
126 | ctx.body.destroy();
127 | ctx.status = 301;
128 | ctx.remove('x-amz-website-redirect-location');
129 | ctx.redirect(objectRedirectLocation);
130 | }
131 | }
132 | };
133 |
134 | /**
135 | * Overrides Koa's redirect behavior with one more closely matching S3
136 | *
137 | * @param {string} url
138 | */
139 | function redirect(url) {
140 | // unset headers
141 | const { res } = this;
142 | res
143 | .getHeaderNames()
144 | .filter((name) => !name.match(/^access-control-|vary|x-amz-/i))
145 | .forEach((name) => res.removeHeader(name));
146 |
147 | this.set('Location', url);
148 |
149 | // status
150 | if (!status.redirect[this.status]) this.status = 302;
151 |
152 | if (this.status === 302) {
153 | const redirect = new S3Error('Found', 'Resource Found');
154 | redirect.description = '302 Moved Temporarily';
155 | this.body = redirect.toHTML();
156 | this.type = 'text/html';
157 | } else {
158 | this.body = '';
159 | this.type = '';
160 | }
161 | }
162 |
163 | /**
164 | * Koa context.onerror handler modified to write a HTML-formatted response body
165 | * @param {Error} err
166 | */
167 | async function onerror(err) {
168 | // don't do anything if there is no error.
169 | // this allows you to pass `this.onerror`
170 | // to node-style callbacks.
171 | if (err == null) return;
172 |
173 | if (!(err instanceof Error))
174 | err = new Error(format('non-error thrown: %j', err));
175 |
176 | let headerSent = false;
177 | if (this.headerSent || !this.writable) {
178 | headerSent = err.headerSent = true;
179 | }
180 |
181 | // delegate
182 | this.app.emit('error', err, this);
183 |
184 | // nothing we can do here other
185 | // than delegate to the app-level
186 | // handler and log.
187 | if (headerSent) {
188 | return;
189 | }
190 |
191 | const { res } = this;
192 |
193 | if (!(err instanceof S3Error)) {
194 | err = S3Error.fromError(err);
195 | }
196 |
197 | // first unset all headers
198 | res
199 | .getHeaderNames()
200 | .filter((name) => !name.match(/^access-control-|vary|x-amz-/i))
201 | .forEach((name) => res.removeHeader(name));
202 |
203 | // (the presence of x-amz-error-* headers needs additional research)
204 | // this.set(err.headers);
205 |
206 | // force text/html
207 | this.type = 'text/html';
208 |
209 | if (!err.description)
210 | err.description = `${err.status} ${status.message[err.status]}`;
211 |
212 | // respond
213 | const { website } = this.state;
214 | if (
215 | err.code !== 'NoSuchBucket' &&
216 | err.code !== 'UnsupportedQuery' &&
217 | website.errorDocumentKey
218 | ) {
219 | // attempt to serve error document
220 | const object = await this.store.getObject(
221 | this.params.bucket,
222 | website.errorDocumentKey,
223 | );
224 | if (object) {
225 | const objectRedirectLocation =
226 | object.metadata['x-amz-website-redirect-location'];
227 | if (objectRedirectLocation) {
228 | object.content.destroy();
229 | this.status = 301;
230 | this.redirect(objectRedirectLocation);
231 | res.end(this.body);
232 | } else {
233 | this.type = object.metadata['content-type'];
234 | this.length = object.size;
235 | object.content.pipe(res);
236 | }
237 | return;
238 | }
239 | this.logger.error(
240 | 'Custom Error Document not found: ' + website.errorDocumentKey,
241 | );
242 | const errorDocumentErr = new S3Error(
243 | 'NoSuchKey',
244 | 'The specified key does not exist.',
245 | { Key: website.errorDocumentKey },
246 | );
247 | errorDocumentErr.description =
248 | 'An Error Occurred While Attempting to Retrieve a Custom Error Document';
249 | err.errors.push(errorDocumentErr);
250 | }
251 |
252 | const msg = err.toHTML();
253 | this.status = err.status;
254 | this.length = Buffer.byteLength(msg);
255 | res.end(msg);
256 | }
257 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # S3rver
2 |
3 | [](https://nodei.co/npm/s3rver/)
4 |
5 | [](https://travis-ci.org/jamhall/s3rver)
6 | [](https://david-dm.org/jamhall/s3rver)
7 | [](https://david-dm.org/jamhall/s3rver?type=dev)
8 |
9 | S3rver is a lightweight server that responds to **some** of the same calls [Amazon S3](http://docs.aws.amazon.com/AWSJavaScriptSDK/latest/AWS/S3.html) responds to. It is extremely useful for testing S3 in a sandbox environment without actually making calls to Amazon.
10 |
11 | The goal of S3rver is to minimise runtime dependencies and be more of a development tool to test S3 calls in your code rather than a production server looking to duplicate S3 functionality.
12 |
13 | ## Supported methods
14 |
15 | ### Buckets
16 |
17 | - Create bucket
18 | - Delete bucket
19 | - List buckets
20 | - List content of buckets (prefix, delimiter, marker and max keys, common prefixes)
21 |
22 | ### Objects
23 |
24 | - Put object (support for metadata, including ContentEncoding (gzipped files)
25 | - Post object (multipart)
26 | - Delete object(s)
27 | - Get object (including using the HEAD method)
28 | - Get dummy ACLs for an object
29 | - Copy object (including updating of metadata)
30 | - Listen to Put, Copy, Post and Delete events.
31 |
32 | ## Quick Start
33 |
34 | Install s3rver:
35 |
36 | ```bash
37 | $ npm install s3rver -g
38 | ```
39 |
40 | You will now have a command on your path called _s3rver_
41 |
42 | Executing this command for the various options:
43 |
44 | ```bash
45 | $ s3rver --help
46 | ```
47 |
48 | ## Supported clients
49 |
50 | Please see [Fake S3's wiki page](https://github.com/jubos/fake-s3/wiki/Supported-Clients) for a list of supported clients.
51 | When listening on HTTPS with a self-signed certificate, the AWS SDK in a Node.js environment will need `httpOptions: { agent: new https.Agent({ rejectUnauthorized: false }) }` in order to allow interaction.
52 |
53 | If your client only supports signed requests, specify the credentials
54 |
55 | ```javascript
56 | {
57 | accessKeyId: "S3RVER",
58 | secretAccessKey: "S3RVER",
59 | }
60 | ```
61 |
62 | in your client's configuration.
63 |
64 | Please test, if you encounter any problems please do not hesitate to open an issue :)
65 |
66 | ## Static Website Hosting
67 |
68 | If you specify a [website configuration file](https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTwebsite.html#RESTBucketPUTwebsite-examples),
69 | S3rver supports simulating S3's static website mode for incoming `GET` requests.
70 |
71 | ### Hostname Resolution
72 |
73 | By default a bucket name needs to be given. So for a bucket called `mysite.local`, with an indexDocument of `index.html`. Visiting `http://localhost:4568/mysite.local/` in your browser will display the `index.html` file uploaded to the bucket.
74 |
75 | However you can also setup a local hostname in your /etc/hosts file pointing at 127.0.0.1
76 |
77 | ```
78 | localhost 127.0.0.1
79 | mysite.local 127.0.0.1
80 | ```
81 |
82 | Now you can access the served content at `http://mysite.local:4568/`
83 |
84 | ## Tests
85 |
86 | The tests should be run by one of the active LTS versions. The CI Server runs the tests on the latest active releases.
87 |
88 | To run the test suite, first install the dependencies, then run `npm test`:
89 |
90 | ```bash
91 | $ npm install
92 | $ npm test
93 | ```
94 |
95 | ## Programmatically running s3rver
96 |
97 | You can also run s3rver programmatically.
98 |
99 | > This is particularly useful if you want to integrate s3rver into another projects tests that depends on access to an s3 environment
100 |
101 | ## Class: `S3rver`
102 |
103 | ### new S3rver([options])
104 |
105 | Creates a S3rver instance
106 |
107 |
108 | | Option | Type | Default | Description
109 | | ------------------------------ | -------------------- | --------------- | -----------
110 | | address | `string` | `localhost` | Host/IP to bind to
111 | | port | `number` | `4568` | Port of the HTTP server
112 | | key | `string` \| `Buffer` | | Private key for running with TLS
113 | | cert | `string` \| `Buffer` | | Certificate for running with TLS
114 | | silent | `boolean` | `false` | Suppress log messages
115 | | serviceEndpoint | `string` | `amazonaws.com` | Override the AWS service root for subdomain-style access
116 | | directory | `string` | | Data directory
117 | | resetOnClose | `boolean` | `false` | Remove all bucket data on server close
118 | | allowMismatchedSignatures | `boolean` | `false` | Prevent `SignatureDoesNotMatch` errors for all well-formed signatures
119 | | vhostBuckets | `boolean` | `true` | Allow vhost-style access for all buckets
120 | | configureBuckets\[].name | `string` | | The name of a prefabricated bucket to create when the server starts
121 | | configureBuckets\[].configs\[] | `string` \| `Buffer` | | Raw XML string or Buffer of Bucket config
122 |
123 |
124 | For your convenience, we've provided sample bucket configurations you can access using `require.resolve`:
125 |
126 | ```javascript
127 | const corsConfig = require.resolve('s3rver/example/cors.xml');
128 | const websiteConfig = require.resolve('s3rver/example/website.xml');
129 |
130 | const s3rver = new S3rver({
131 | configureBuckets: [
132 | {
133 | name: 'test-bucket',
134 | configs: [fs.readFileSync(corsConfig), fs.readFileSync(websiteConfig)],
135 | },
136 | ],
137 | });
138 | ```
139 |
140 | Additional references for defining these configurations can be found here:
141 |
142 | - CORS: https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTcors.html
143 | - Static website: https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTwebsite.html
144 |
145 | ### s3rver.run(callback)
146 |
147 | ### s3rver.close(callback)
148 |
149 | Starts/stops the server on the configured port and host. Returns a Promise if no callback is specified.
150 |
151 | Example in mocha:
152 |
153 | ```javascript
154 | const S3rver = require('s3rver');
155 | let instance;
156 |
157 | before(function (done) {
158 | instance = new S3rver({
159 | port: 4569,
160 | address: 'localhost',
161 | silent: false,
162 | directory: '/tmp/s3rver_test_directory',
163 | }).run(done);
164 | });
165 |
166 | after(function (done) {
167 | instance.close(done);
168 | });
169 | ```
170 |
171 | ### s3rver.callback() ⇒ `function (req, res)`
172 |
173 | _Alias:_ **s3rver.getMiddleware()**
174 |
175 | Creates and returns a callback that can be passed into `http.createServer()` or mounted in an Express app.
176 |
177 | ### s3rver.configureBuckets() => `Promise`
178 |
179 | Convenience method for configurating a set of buckets without going through S3's
180 | API. Useful for quickly provisioning buckets before starting up the server.
181 |
182 | ### s3rver.reset() => `void`
183 |
184 | Resets all bucket and configurations supported by the configured store.
185 |
186 | ## Subscribing to S3 Events
187 |
188 | ### Event: `'event'`
189 |
190 | You can subscribe to notifications for PUT, POST, COPY and DELETE object events in the bucket when you run S3rver programmatically.
191 | Please refer to [AWS's documentation](http://docs.aws.amazon.com/AmazonS3/latest/dev/notification-content-structure.html) for details of event object.
192 |
193 | ```javascript
194 | const S3rver = require('s3rver');
195 | const { fromEvent } = require('rxjs');
196 | const { filter } = require('rxjs/operators');
197 |
198 | const instance = new S3rver({
199 | port: 4569,
200 | address: '0.0.0.0',
201 | silent: false,
202 | directory: '/tmp/s3rver_test_directory',
203 | }).run((err, { address, port } = {}) => {
204 | if (err) {
205 | console.error(err);
206 | } else {
207 | console.log('now listening at address %s and port %d', address, port);
208 | }
209 | });
210 |
211 | const s3Events = fromEvent(instance, 'event');
212 | s3Events.subscribe((event) => console.log(event));
213 | s3Events
214 | .pipe(filter((event) => event.Records[0].eventName == 'ObjectCreated:Copy'))
215 | .subscribe((event) => console.log(event));
216 | ```
217 |
218 | ## Using [s3fs-fuse](https://github.com/s3fs-fuse/s3fs-fuse) with S3rver
219 |
220 | You can connect to s3rver and mount a bucket to your local file system by using the following command:
221 |
222 | ```bash
223 | $ s3fs bucket1 /tmp/3 -o url="http://localhost:4568" -o use_path_request_style -d -f -o f2 -o curldbg
224 | ```
225 |
--------------------------------------------------------------------------------
/lib/s3rver.js:
--------------------------------------------------------------------------------
1 | 'use strict';
2 |
3 | const xmlParser = require('fast-xml-parser');
4 | const Koa = require('koa');
5 | const { defaults, isPlainObject } = require('lodash');
6 | const he = require('he');
7 | const http = require('http');
8 | const https = require('https');
9 | const os = require('os');
10 | const path = require('path');
11 | const { callbackify, format, promisify } = require('util');
12 |
13 | const loggerMiddleware = require('./middleware/logger');
14 | const vhostMiddleware = require('./middleware/vhost');
15 | const { getConfigModel } = require('./models/config');
16 | const S3Error = require('./models/error');
17 | const FilesystemStore = require('./stores/filesystem');
18 | const router = require('./routes');
19 | const { getXmlRootTag } = require('./utils');
20 |
21 | class S3rver extends Koa {
22 | constructor(options) {
23 | super();
24 | this.context.onerror = onerror;
25 | const {
26 | silent,
27 | serviceEndpoint,
28 | directory,
29 | resetOnClose,
30 | allowMismatchedSignatures,
31 | vhostBuckets,
32 | configureBuckets,
33 | ...serverOptions
34 | } = defaults({}, options, S3rver.defaultOptions);
35 | this.serverOptions = serverOptions;
36 | this._configureBuckets = configureBuckets;
37 | this.silent = silent;
38 | this.resetOnClose = resetOnClose;
39 | this.allowMismatchedSignatures = allowMismatchedSignatures;
40 | this.store = this.context.store = new FilesystemStore(directory);
41 |
42 | // Log all requests
43 | this.use(loggerMiddleware(this, silent));
44 |
45 | try {
46 | // encode object responses as XML
47 | const parser = new xmlParser.j2xParser({
48 | ignoreAttributes: false,
49 | attrNodeName: '@',
50 | tagValueProcessor: (a) => he.escape(a.toString()),
51 | });
52 | this.use(async (ctx, next) => {
53 | await next();
54 | if (isPlainObject(ctx.body)) {
55 | ctx.type = 'application/xml';
56 | ctx.body =
57 | '\n' + parser.parse(ctx.body);
58 | }
59 | });
60 |
61 | // Express mount interop
62 | this.use((ctx, next) => {
63 | ctx.mountPath = ctx.mountPath || ctx.req.baseUrl;
64 | return next();
65 | });
66 |
67 | this.use(vhostMiddleware({ serviceEndpoint, vhostBuckets }));
68 | this.use(router.routes());
69 | } catch (err) {
70 | this.logger.exceptions.unhandle();
71 | this.logger.close();
72 | throw err;
73 | }
74 | }
75 |
76 | /**
77 | * Convenience method for configurating a set of buckets without going through
78 | * S3's API. Useful for quickly provisioning buckets before starting up the
79 | * server.
80 | */
81 | async configureBuckets() {
82 | return Promise.all(
83 | this._configureBuckets.map(async (bucket) => {
84 | const bucketExists = !!(await this.store.getBucket(bucket.name));
85 | const replacedConfigs = [];
86 | await this.store.putBucket(bucket.name);
87 | for (const configXml of bucket.configs || []) {
88 | const xml = configXml.toString();
89 | let Model;
90 | switch (getXmlRootTag(xml)) {
91 | case 'CORSConfiguration':
92 | Model = getConfigModel('cors');
93 | break;
94 | case 'WebsiteConfiguration':
95 | Model = getConfigModel('website');
96 | break;
97 | }
98 | if (!Model) {
99 | throw new Error(
100 | 'error reading bucket config: unsupported configuration type',
101 | );
102 | }
103 | const config = Model.validate(xml);
104 | const existingConfig = await this.store.getSubresource(
105 | bucket.name,
106 | undefined,
107 | config.type,
108 | );
109 | await this.store.putSubresource(bucket.name, undefined, config);
110 | if (existingConfig) {
111 | replacedConfigs.push(config.type);
112 | }
113 | }
114 | // warn if we're updating a bucket that already exists
115 | if (replacedConfigs.length) {
116 | this.logger.warn(
117 | 'replaced %s config for bucket "%s"',
118 | replacedConfigs.join(),
119 | bucket.name,
120 | );
121 | } else if (bucketExists) {
122 | this.logger.warn('the bucket "%s" already exists', bucket.name);
123 | }
124 | }),
125 | );
126 | }
127 |
128 | /**
129 | * Resets all buckets and configurations supported by the configured store.
130 | */
131 | reset() {
132 | this.store.reset();
133 | }
134 |
135 | /**
136 | * Starts the HTTP server.
137 | *
138 | * @param {Function} [callback] Function called with (err, addressObj) as arguments.
139 | * @returns {this|Promise} The S3rver instance. If no callback function is supplied, a Promise
140 | * is returned.
141 | */
142 | run(callback) {
143 | const runAsync = async () => {
144 | await this.configureBuckets();
145 |
146 | const { address, port, ...listenOptions } = this.serverOptions;
147 | this.httpServer = await this.listen(port, address, listenOptions);
148 | return this.httpServer.address();
149 | };
150 |
151 | if (typeof callback === 'function') {
152 | callbackify(runAsync)(callback);
153 | return this;
154 | } else {
155 | return runAsync();
156 | }
157 | }
158 |
159 | listen(...args) {
160 | const { key, cert, pfx } = this.serverOptions;
161 | const server =
162 | (key && cert) || pfx
163 | ? https.createServer(this.serverOptions)
164 | : http.createServer(); // Node < 8.12 does not support http.createServer([options])
165 |
166 | const [callback] = args.slice(-1);
167 | server.on('request', this.callback()).on('close', () => {
168 | this.logger.exceptions.unhandle();
169 | this.logger.close();
170 | if (this.resetOnClose) {
171 | this.reset();
172 | }
173 | });
174 | if (typeof callback === 'function') {
175 | return server.listen(...args);
176 | } else {
177 | return new Promise((resolve, reject) =>
178 | server.listen(...args, (err) => (err ? reject(err) : resolve(server))),
179 | );
180 | }
181 | }
182 |
183 | /**
184 | * Proxies httpServer.close().
185 | *
186 | * @param {Function} [callback]
187 | * @returns {this|Promise}
188 | */
189 | close(callback) {
190 | if (!this.httpServer) {
191 | const err = new Error('Not running');
192 | if (typeof callback === 'function') {
193 | callback(err);
194 | return this;
195 | } else {
196 | return Promise.reject(err);
197 | }
198 | }
199 | if (typeof callback === 'function') {
200 | this.httpServer.close(callback);
201 | return this;
202 | } else {
203 | return promisify(this.httpServer.close.bind(this.httpServer))();
204 | }
205 | }
206 | }
207 | S3rver.defaultOptions = {
208 | address: 'localhost',
209 | port: 4568,
210 | key: undefined,
211 | cert: undefined,
212 | silent: false,
213 | serviceEndpoint: 'amazonaws.com',
214 | directory: path.join(os.tmpdir(), 's3rver'),
215 | resetOnClose: false,
216 | allowMismatchedSignatures: false,
217 | vhostBuckets: true,
218 | configureBuckets: [],
219 | };
220 | S3rver.prototype.getMiddleware = S3rver.prototype.callback;
221 |
222 | module.exports = S3rver;
223 |
224 | /**
225 | * Koa context.onerror handler modified to write a XML-formatted response body
226 | * @param {Error} err
227 | */
228 | function onerror(err) {
229 | // don't do anything if there is no error.
230 | // this allows you to pass `this.onerror`
231 | // to node-style callbacks.
232 | if (err == null) return;
233 |
234 | if (!(err instanceof Error))
235 | err = new Error(format('non-error thrown: %j', err));
236 |
237 | let headerSent = false;
238 | if (this.headerSent || !this.writable) {
239 | headerSent = err.headerSent = true;
240 | }
241 |
242 | // delegate
243 | this.app.emit('error', err, this);
244 |
245 | // nothing we can do here other
246 | // than delegate to the app-level
247 | // handler and log.
248 | if (headerSent) {
249 | return;
250 | }
251 |
252 | const { res } = this;
253 |
254 | if (!(err instanceof S3Error)) {
255 | err = S3Error.fromError(err);
256 | }
257 |
258 | // first unset all headers
259 | res
260 | .getHeaderNames()
261 | .filter((name) => !name.match(/^access-control-|vary|x-amz-/i))
262 | .forEach((name) => res.removeHeader(name));
263 |
264 | // (the presence of x-amz-error-* headers needs additional research)
265 | // this.set(err.headers);
266 |
267 | // force application/xml
268 | this.type = 'application/xml';
269 |
270 | // respond
271 | const msg = err.toXML();
272 | this.status = err.status;
273 | this.length = Buffer.byteLength(msg);
274 | res.end(msg);
275 | }
276 |
--------------------------------------------------------------------------------
/lib/middleware/authentication.js:
--------------------------------------------------------------------------------
1 | 'use strict';
2 |
3 | const { createHmac } = require('crypto');
4 | const { mapKeys, pickBy } = require('lodash');
5 |
6 | const AWSAccount = require('../models/account');
7 | const S3Error = require('../models/error');
8 | const { RESPONSE_HEADERS } = require('./response-header-override');
9 |
10 | const v2 = require('../signature/v2');
11 | const v4 = require('../signature/v4');
12 | const { encodeURIComponentRFC3986, parseDate } = require('../utils');
13 |
14 | const SUBRESOURCES = {
15 | acl: 1,
16 | accelerate: 1,
17 | analytics: 1,
18 | cors: 1,
19 | lifecycle: 1,
20 | delete: 1,
21 | inventory: 1,
22 | location: 1,
23 | logging: 1,
24 | metrics: 1,
25 | notification: 1,
26 | partNumber: 1,
27 | policy: 1,
28 | requestPayment: 1,
29 | replication: 1,
30 | restore: 1,
31 | tagging: 1,
32 | torrent: 1,
33 | uploadId: 1,
34 | uploads: 1,
35 | versionId: 1,
36 | versioning: 1,
37 | versions: 1,
38 | website: 1,
39 | };
40 |
41 | /**
42 | * Middleware that verifies signed HTTP requests
43 | *
44 | * This also processes request and response headers specified via query params.
45 | *
46 | * {@link https://docs.aws.amazon.com/AmazonS3/latest/dev/RESTAuthentication.html}
47 | */
48 | module.exports = () =>
49 | async function authentication(ctx, next) {
50 | if (ctx.state.website) {
51 | // skip for static website requests
52 | return next();
53 | }
54 |
55 | if (ctx.method === 'OPTIONS') {
56 | // skip for CORS OPTION requests
57 | return next();
58 | }
59 |
60 | const amzQueryHeaders = pickBy(
61 | mapKeys(ctx.query, (value, key) => key.toLowerCase()),
62 | (value, key) => key.startsWith('x-amz-'),
63 | );
64 | // x-amz-* values specified in query params take precedence over those in the headers
65 | Object.assign(ctx.headers, amzQueryHeaders);
66 |
67 | const mechanisms = {
68 | header: 'authorization' in ctx.headers,
69 | queryV2: 'Signature' in ctx.query,
70 | queryV4: 'X-Amz-Algorithm' in ctx.query,
71 | };
72 |
73 | const mechanismCount = Object.values(mechanisms).filter(Boolean).length;
74 | if (mechanismCount === 0) {
75 | return next();
76 | }
77 | if (mechanismCount !== 1) {
78 | throw new S3Error(
79 | 'InvalidArgument',
80 | 'Only one auth mechanism allowed; only the X-Amz-Algorithm query ' +
81 | 'parameter, Signature query string parameter or the Authorization ' +
82 | 'header should be specified',
83 | {
84 | ArgumentName: 'Authorization',
85 | ArgumentValue: ctx.get('Authorization'),
86 | },
87 | );
88 | }
89 |
90 | let canonicalizedResource = ctx.mountPath || '';
91 | if (ctx.params.bucket) {
92 | // the following behavior is derived from the behavior of the JS aws-sdk
93 | if (ctx.state.vhost) {
94 | canonicalizedResource = '/' + ctx.params.bucket + canonicalizedResource;
95 | } else {
96 | canonicalizedResource += '/' + ctx.params.bucket;
97 | }
98 | if (ctx.params.key) {
99 | canonicalizedResource += '/' + ctx.params.key;
100 | }
101 | } else {
102 | canonicalizedResource += '/';
103 | }
104 | canonicalizedResource = canonicalizedResource
105 | .split('/')
106 | .map(encodeURIComponentRFC3986)
107 | .join('/');
108 |
109 | // begin parsing for each part of the signature algorithm and the rest of the canonical request
110 | const request = mechanisms.header
111 | ? parseHeader(ctx.headers)
112 | : mechanisms.queryV2
113 | ? v2.parseQuery(ctx.query)
114 | : mechanisms.queryV4
115 | ? v4.parseQuery(ctx.query)
116 | : undefined;
117 |
118 | const canonicalizedQueryString = Object.entries(ctx.query)
119 | .filter(([param]) => {
120 | if (
121 | mechanisms.queryV2 &&
122 | ['Signature', 'AWSAccessKeyId', 'Expires'].includes(param)
123 | ) {
124 | return false;
125 | }
126 | if (mechanisms.queryV4 && param === 'X-Amz-Signature') {
127 | return false;
128 | }
129 | return (
130 | request.signature.version !== 2 ||
131 | SUBRESOURCES[param] ||
132 | RESPONSE_HEADERS[param]
133 | );
134 | })
135 | .map(
136 | ([param, value]) =>
137 | request.signature.version === 2
138 | ? [param, value].slice(0, value ? 2 : 1).join('=') // v2 signing doesn't encode values in the signature calculation
139 | : [param, value].map(encodeURIComponent).join('='), // v4 signing requires the = be present even when there's no value
140 | )
141 | .sort()
142 | .join('&');
143 |
144 | const canonicalizedAmzHeaders = Object.keys(ctx.headers)
145 | .filter((headerName) => headerName.startsWith('x-amz-'))
146 | .sort()
147 | .map(
148 | (headerName) =>
149 | `${headerName}:${ctx.get(headerName).replace(/ +/g, ' ')}`,
150 | );
151 |
152 | const canonicalRequest = {
153 | method:
154 | ctx.method === 'OPTIONS'
155 | ? ctx.get('Access-Control-Request-Method')
156 | : ctx.method,
157 | contentMD5: ctx.get('Content-MD5'),
158 | contentType: ctx.get('Content-Type'),
159 | headers: ctx.headers,
160 | timestamp: undefined,
161 | uri: canonicalizedResource,
162 | querystring: canonicalizedQueryString,
163 | amzHeaders: canonicalizedAmzHeaders,
164 | };
165 |
166 | switch (request.signature.version) {
167 | case 2:
168 | // S3 signing uses expiration time as timestamp
169 | canonicalRequest.timestamp = request.expires;
170 | break;
171 | case 4:
172 | canonicalRequest.timestamp = request.time;
173 | break;
174 | }
175 |
176 | const account = AWSAccount.registry.get(request.accessKeyId);
177 | if (!account) {
178 | throw new S3Error(
179 | 'InvalidAccessKeyId',
180 | 'The AWS Access Key Id you provided does not exist in our records.',
181 | { AWSAccessKeyId: request.accessKeyId },
182 | );
183 | }
184 |
185 | if (request.signature.version === 2) {
186 | request.stringToSign = v2.getStringToSign(canonicalRequest);
187 | request.signingKey = account.accessKeys.get(request.accessKeyId);
188 | } else if (request.signature.version === 4) {
189 | request.stringToSign = v4.getStringToSign(canonicalRequest, request);
190 | request.signingKey = v4.getSigningKey(
191 | account.accessKeys.get(request.accessKeyId),
192 | request.credential.date,
193 | request.credential.region,
194 | request.credential.service,
195 | );
196 | }
197 | const calculatedSignature = createHmac(
198 | request.signature.algorithm,
199 | request.signingKey,
200 | )
201 | .update(request.stringToSign, 'utf8')
202 | .digest(request.signature.encoding);
203 |
204 | if (request.signatureProvided === calculatedSignature) {
205 | ctx.state.account = account;
206 | }
207 |
208 | if (!ctx.state.account) {
209 | if (ctx.app.allowMismatchedSignatures) {
210 | ctx.state.account = account;
211 | } else {
212 | throw new S3Error(
213 | 'SignatureDoesNotMatch',
214 | 'The request signature we calculated does not match the signature ' +
215 | 'you provided. Check your key and signing method.',
216 | {
217 | AWSAccessKeyId: request.accessKeyId,
218 | StringToSign: request.stringToSign,
219 | StringToSignBytes: Buffer.from(request.stringToSign)
220 | .toString('hex')
221 | .match(/../g)
222 | .join(' '),
223 | },
224 | );
225 | }
226 | }
227 |
228 | return next();
229 | };
230 |
231 | function parseHeader(headers) {
232 | const request = {
233 | signature: undefined,
234 | accessKeyId: undefined,
235 | time: headers['x-amz-date'] || headers.date,
236 | };
237 |
238 | const [algorithm] = headers.authorization.split(' ');
239 | switch (algorithm.toUpperCase()) {
240 | case 'AWS':
241 | request.signature = {
242 | version: 2,
243 | algorithm: 'sha1',
244 | encoding: 'base64',
245 | };
246 | break;
247 | case 'AWS4-HMAC-SHA256':
248 | request.signature = {
249 | version: 4,
250 | algorithm: 'sha256',
251 | encoding: 'hex',
252 | };
253 | break;
254 | default:
255 | throw new S3Error('InvalidArgument', 'Unsupported Authorization Type', {
256 | ArgumentName: 'Authorization',
257 | ArgumentValue: headers.authorization,
258 | });
259 | }
260 |
261 | switch (request.signature.version) {
262 | case 2:
263 | Object.assign(request, v2.parseHeader(headers));
264 | break;
265 | case 4:
266 | Object.assign(request, v4.parseHeader(headers));
267 | break;
268 | }
269 |
270 | const serverTime = new Date();
271 | const requestTime = parseDate(request.time);
272 | if (isNaN(requestTime)) {
273 | throw new S3Error(
274 | 'AccessDenied',
275 | 'AWS authentication requires a valid Date or x-amz-date header',
276 | );
277 | }
278 |
279 | if (Math.abs(serverTime - requestTime) > 900000) {
280 | // 15 minutes
281 | throw new S3Error(
282 | 'RequestTimeTooSkewed',
283 | 'The difference between the request time and the current time is too large.',
284 | {
285 | RequestTime: request.time,
286 | ServerTime: serverTime.toISOString().replace(/\.\d+/, ''),
287 | },
288 | );
289 | }
290 | return request;
291 | }
292 |
--------------------------------------------------------------------------------
/test/s3rver.spec.js:
--------------------------------------------------------------------------------
1 | 'use strict';
2 |
3 | const AWS = require('aws-sdk');
4 | const { expect } = require('chai');
5 | const { once } = require('events');
6 | const express = require('express');
7 | const FormData = require('form-data');
8 | const fs = require('fs');
9 | const crypto = require('crypto');
10 | const request = require('request-promise-native').defaults({
11 | resolveWithFullResponse: true,
12 | });
13 |
14 | const { createServerAndClient, generateTestObjects } = require('./helpers');
15 |
16 | const S3rver = require('../lib/s3rver');
17 |
18 | describe('S3rver', () => {
19 | describe('#run', () => {
20 | it('supports running on port 0', async function () {
21 | const server = new S3rver({
22 | port: 0,
23 | });
24 | const { port } = await server.run();
25 | await server.close();
26 | expect(port).to.be.above(0);
27 | });
28 |
29 | it('creates preconfigured buckets on startup', async function () {
30 | const buckets = [{ name: 'bucket1' }, { name: 'bucket2' }];
31 | const server = new S3rver({
32 | configureBuckets: buckets,
33 | });
34 | const { port } = await server.run();
35 | const s3Client = new AWS.S3({
36 | accessKeyId: 'S3RVER',
37 | secretAccessKey: 'S3RVER',
38 | endpoint: `http://localhost:${port}`,
39 | sslEnabled: false,
40 | s3ForcePathStyle: true,
41 | });
42 | try {
43 | const res = await s3Client.listBuckets().promise();
44 | expect(res.Buckets).to.have.lengthOf(2);
45 | } finally {
46 | await server.close();
47 | }
48 | });
49 |
50 | it('creates a preconfigured bucket with configs on startup', async function () {
51 | const bucket = {
52 | name: 'bucket1',
53 | configs: [
54 | fs.readFileSync('./example/cors.xml'),
55 | fs.readFileSync('./example/website.xml'),
56 | ],
57 | };
58 | const server = new S3rver({
59 | configureBuckets: [bucket],
60 | });
61 | const { port } = await server.run();
62 | const s3Client = new AWS.S3({
63 | accessKeyId: 'S3RVER',
64 | secretAccessKey: 'S3RVER',
65 | endpoint: `http://localhost:${port}`,
66 | sslEnabled: false,
67 | s3ForcePathStyle: true,
68 | });
69 | try {
70 | await s3Client.getBucketCors({ Bucket: bucket.name }).promise();
71 | await s3Client.getBucketWebsite({ Bucket: bucket.name }).promise();
72 | } finally {
73 | await server.close();
74 | }
75 | });
76 | });
77 |
78 | describe('#close', () => {
79 | it('cleans up after close if the resetOnClose setting is true', async function () {
80 | const bucket = { name: 'foobars' };
81 |
82 | const server = new S3rver({
83 | resetOnClose: true,
84 | configureBuckets: [bucket],
85 | });
86 | const { port } = await server.run();
87 | const s3Client = new AWS.S3({
88 | accessKeyId: 'S3RVER',
89 | secretAccessKey: 'S3RVER',
90 | endpoint: `http://localhost:${port}`,
91 | sslEnabled: false,
92 | s3ForcePathStyle: true,
93 | });
94 | try {
95 | await generateTestObjects(s3Client, bucket.name, 10);
96 | } finally {
97 | await server.close();
98 | }
99 | await expect(server.store.listBuckets()).to.eventually.have.lengthOf(0);
100 | });
101 |
102 | it('does not clean up after close if the resetOnClose setting is false', async function () {
103 | const bucket = { name: 'foobars' };
104 |
105 | const server = new S3rver({
106 | resetOnClose: false,
107 | configureBuckets: [bucket],
108 | });
109 | const { port } = await server.run();
110 | const s3Client = new AWS.S3({
111 | accessKeyId: 'S3RVER',
112 | secretAccessKey: 'S3RVER',
113 | endpoint: `http://localhost:${port}`,
114 | sslEnabled: false,
115 | s3ForcePathStyle: true,
116 | });
117 | try {
118 | await generateTestObjects(s3Client, bucket.name, 10);
119 | } finally {
120 | await server.close();
121 | }
122 | await expect(server.store.listBuckets()).to.eventually.have.lengthOf(1);
123 | });
124 |
125 | it('does not clean up after close if the resetOnClose setting is not set', async function () {
126 | const bucket = { name: 'foobars' };
127 |
128 | const server = new S3rver({
129 | configureBuckets: [bucket],
130 | });
131 | const { port } = await server.run();
132 | const s3Client = new AWS.S3({
133 | accessKeyId: 'S3RVER',
134 | secretAccessKey: 'S3RVER',
135 | endpoint: `http://localhost:${port}`,
136 | sslEnabled: false,
137 | s3ForcePathStyle: true,
138 | });
139 | try {
140 | await generateTestObjects(s3Client, bucket.name, 10);
141 | } finally {
142 | await server.close();
143 | }
144 | await expect(server.store.listBuckets()).to.eventually.have.lengthOf(1);
145 | });
146 | });
147 |
148 | describe("event 'event'", () => {
149 | let s3rver;
150 | let s3Client;
151 |
152 | beforeEach(async () => {
153 | ({ s3rver, s3Client } = await createServerAndClient({
154 | configureBuckets: [{ name: 'bucket-a' }, { name: 'bucket-b' }],
155 | }));
156 | });
157 |
158 | it('triggers an event with a valid message structure', async function () {
159 | const eventPromise = once(s3rver, 'event');
160 | const body = 'Hello!';
161 | await s3Client
162 | .putObject({ Bucket: 'bucket-a', Key: 'testPutKey', Body: body })
163 | .promise();
164 | const [event] = await eventPromise;
165 | const iso8601 = /^\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}\.\d{3}Z$/;
166 | expect(event.Records[0].eventTime).to.match(iso8601);
167 | expect(new Date(event.Records[0].eventTime)).to.not.satisfy(isNaN);
168 | });
169 |
170 | it('triggers a Post event', async function () {
171 | const eventPromise = once(s3rver, 'event');
172 | const body = 'Hello!';
173 |
174 | const form = new FormData();
175 | form.append('key', 'testPostKey');
176 | form.append('file', body);
177 | await request.post('bucket-a', {
178 | baseUrl: s3Client.endpoint.href,
179 | body: form,
180 | headers: form.getHeaders(),
181 | });
182 |
183 | const [event] = await eventPromise;
184 | expect(event.Records[0].eventName).to.equal('ObjectCreated:Post');
185 | expect(event.Records[0].s3.bucket.name).to.equal('bucket-a');
186 | expect(event.Records[0].s3.object).to.contain({
187 | key: 'testPostKey',
188 | size: body.length,
189 | eTag: crypto.createHash('md5').update(body).digest('hex'),
190 | });
191 | });
192 |
193 | it('triggers a Put event', async function () {
194 | const eventPromise = once(s3rver, 'event');
195 | const body = 'Hello!';
196 | await s3Client
197 | .putObject({ Bucket: 'bucket-a', Key: 'testPutKey', Body: body })
198 | .promise();
199 | const [event] = await eventPromise;
200 | expect(event.Records[0].eventName).to.equal('ObjectCreated:Put');
201 | expect(event.Records[0].s3.bucket.name).to.equal('bucket-a');
202 | expect(event.Records[0].s3.object).to.contain({
203 | key: 'testPutKey',
204 | size: body.length,
205 | eTag: crypto.createHash('md5').update(body).digest('hex'),
206 | });
207 | });
208 |
209 | it('triggers a Copy event', async function () {
210 | const body = 'Hello!';
211 | await s3Client
212 | .putObject({ Bucket: 'bucket-a', Key: 'testPut', Body: body })
213 | .promise();
214 | const eventPromise = once(s3rver, 'event');
215 | await s3Client
216 | .copyObject({
217 | Bucket: 'bucket-b',
218 | Key: 'testCopy',
219 | CopySource: '/bucket-a/testPut',
220 | })
221 | .promise();
222 | const [event] = await eventPromise;
223 | expect(event.Records[0].eventName).to.equal('ObjectCreated:Copy');
224 | expect(event.Records[0].s3.bucket.name).to.equal('bucket-b');
225 | expect(event.Records[0].s3.object).to.contain({
226 | key: 'testCopy',
227 | size: body.length,
228 | });
229 | });
230 |
231 | it('triggers a Delete event', async function () {
232 | const body = 'Hello!';
233 | await s3Client
234 | .putObject({
235 | Bucket: 'bucket-a',
236 | Key: 'testDelete',
237 | Body: body,
238 | })
239 | .promise();
240 | const eventPromise = once(s3rver, 'event');
241 | await s3Client
242 | .deleteObject({ Bucket: 'bucket-a', Key: 'testDelete' })
243 | .promise();
244 | const [event] = await eventPromise;
245 | expect(event.Records[0].eventName).to.equal('ObjectRemoved:Delete');
246 | expect(event.Records[0].s3.bucket.name).to.equal('bucket-a');
247 | expect(event.Records[0].s3.object).to.contain({
248 | key: 'testDelete',
249 | });
250 | });
251 | });
252 |
253 | it('can be mounted on a subpath in an Express app', async function () {
254 | const s3rver = new S3rver({
255 | configureBuckets: [{ name: 'bucket-a' }, { name: 'bucket-b' }],
256 | });
257 | await s3rver.configureBuckets();
258 |
259 | const app = express();
260 | app.use('/basepath', s3rver.getMiddleware());
261 | const httpServer = app.listen(0);
262 | await once(httpServer, 'listening');
263 |
264 | try {
265 | const { port } = httpServer.address();
266 | const s3Client = new AWS.S3({
267 | accessKeyId: 'S3RVER',
268 | secretAccessKey: 'S3RVER',
269 | endpoint: `http://localhost:${port}/basepath`,
270 | sslEnabled: false,
271 | s3ForcePathStyle: true,
272 | });
273 | const res = await s3Client.listBuckets().promise();
274 | expect(res.Buckets).to.have.lengthOf(2);
275 | await s3Client
276 | .putObject({ Bucket: 'bucket-a', Key: 'text', Body: 'Hello!' })
277 | .promise();
278 | } finally {
279 | httpServer.close();
280 | await once(httpServer, 'close');
281 | }
282 | });
283 | });
284 |
--------------------------------------------------------------------------------
/test/models/config.spec.js:
--------------------------------------------------------------------------------
1 | 'use strict';
2 |
3 | const { expect } = require('chai');
4 |
5 | const { S3WebsiteConfiguration } = require('../../lib/models/config');
6 |
7 | describe('S3WebsiteConfiguration', () => {
8 | const notWellFormedError =
9 | 'The XML you provided was not well-formed or did not validate against our published schema';
10 |
11 | describe('RoutingRules', () => {
12 | it('rejects when multiple RoutingRules elements exist', () => {
13 | expect(() =>
14 | S3WebsiteConfiguration.validate(`
15 |
16 |
17 | index.html
18 |
19 |
20 |
21 |
22 | example.com
23 |
24 |
25 |
26 |
27 |
28 |
29 | example.com
30 |
31 |
32 |
33 | `),
34 | ).to.throw(notWellFormedError);
35 | });
36 |
37 | it('rejects when no RoutingRules.RoutingRule elements exist', () => {
38 | expect(() =>
39 | S3WebsiteConfiguration.validate(`
40 |
41 |
42 | index.html
43 |
44 |
45 |
46 |
47 | `),
48 | ).to.throw(notWellFormedError);
49 | });
50 |
51 | it('accepts single RoutingRules.RoutingRule', () => {
52 | expect(
53 | S3WebsiteConfiguration.validate(`
54 |
55 |
56 | index.html
57 |
58 |
59 |
60 |
61 | example.com
62 |
63 |
64 |
65 | `),
66 | ).to.exist;
67 | });
68 |
69 | it('accepts multiple RoutingRules.RoutingRule', () => {
70 | expect(
71 | S3WebsiteConfiguration.validate(`
72 |
73 |
74 | index.html
75 |
76 |
77 |
78 |
79 | example.com
80 |
81 |
82 |
83 |
84 | example.com
85 |
86 |
87 |
88 | `),
89 | ).to.exist;
90 | });
91 |
92 | describe('Condition', () => {
93 | it('rejects when no KeyPrefixEquals or HttpErrorCodeReturnedEquals elements exist', () => {
94 | expect(() =>
95 | S3WebsiteConfiguration.validate(`
96 |
97 |
98 | index.html
99 |
100 |
101 |
102 |
103 |
104 |
105 |
106 | example.com
107 |
108 |
109 |
110 | `),
111 | ).to.throw(notWellFormedError);
112 | });
113 |
114 | it('rejects when HttpErrorCodeReturnedEquals is not in range', () => {
115 | expect(() =>
116 | S3WebsiteConfiguration.validate(`
117 |
118 |
119 | index.html
120 |
121 |
122 |
123 |
124 | 304
125 |
126 |
127 | example.com
128 |
129 |
130 |
131 | `),
132 | ).to.throw(
133 | 'The provided HTTP error code (304) is not valid. Valid codes are 4XX or 5XX.',
134 | );
135 |
136 | expect(() =>
137 | S3WebsiteConfiguration.validate(`
138 |
139 |
140 | index.html
141 |
142 |
143 |
144 |
145 | 600
146 |
147 |
148 | example.com
149 |
150 |
151 |
152 | `),
153 | ).to.throw(
154 | 'The provided HTTP error code (600) is not valid. Valid codes are 4XX or 5XX.',
155 | );
156 | });
157 |
158 | it('accepts a Condition with a KeyPrefixEquals element', () => {
159 | expect(
160 | S3WebsiteConfiguration.validate(`
161 |
162 |
163 | index.html
164 |
165 |
166 |
167 |
168 | test
169 |
170 |
171 | example.com
172 |
173 |
174 |
175 | `),
176 | ).to.exist;
177 | });
178 |
179 | it('accepts a Condition with a HttpErrorCodeReturnedEquals element', () => {
180 | expect(
181 | S3WebsiteConfiguration.validate(`
182 |
183 |
184 | index.html
185 |
186 |
187 |
188 |
189 | 404
190 |
191 |
192 | example.com
193 |
194 |
195 |
196 | `),
197 | ).to.exist;
198 | });
199 |
200 | it('accepts a config with no Condition', () => {
201 | expect(
202 | S3WebsiteConfiguration.validate(`
203 |
204 |
205 | index.html
206 |
207 |
208 |
209 |
210 | example.com
211 |
212 |
213 |
214 | `),
215 | ).to.exist;
216 | });
217 | });
218 |
219 | describe('Redirect', () => {
220 | it("rejects when Redirect doesn't exist", () => {
221 | expect(() =>
222 | S3WebsiteConfiguration.validate(`
223 |
224 |
225 | index.html
226 |
227 |
228 |
229 |
230 | test
231 |
232 |
233 |
234 | `),
235 | ).to.throw(notWellFormedError);
236 | });
237 |
238 | it('rejects when no valid Redirect options exist', () => {
239 | expect(() =>
240 | S3WebsiteConfiguration.validate(`
241 |
242 |
243 | index.html
244 |
245 |
246 |
247 |
248 | test
249 |
250 |
251 |
252 |
253 |
254 |
255 | `),
256 | ).to.throw(notWellFormedError);
257 | });
258 |
259 | it("rejects when Protocol isn't http or https", () => {
260 | expect(() =>
261 | S3WebsiteConfiguration.validate(`
262 |
263 |
264 | index.html
265 |
266 |
267 |
268 |
269 | test
270 |
271 |
272 | ftp
273 |
274 |
275 |
276 | `),
277 | ).to.throw(
278 | 'Invalid protocol, protocol can be http or https. If not defined the protocol will be selected automatically.',
279 | );
280 | });
281 |
282 | it('accepts a valid Redirect config', () => {
283 | expect(
284 | S3WebsiteConfiguration.validate(`
285 |
286 |
287 | index.html
288 |
289 |
290 |
291 |
292 | example.com
293 |
294 |
295 |
296 | `),
297 | ).to.exist;
298 | });
299 |
300 | it('parses values with XML encoding', () => {
301 | const config = S3WebsiteConfiguration.validate(`
302 |
303 |
304 | index.html
305 |
306 |
307 |
308 |
309 | url?test=1&key=
310 |
311 |
312 |
313 |
314 | `);
315 |
316 | expect(config.routingRules[0].redirect.ReplaceKeyPrefixWith).to.equal(
317 | 'url?test=1&key=',
318 | );
319 | });
320 |
321 | it('rejects a Redirect config with both ReplaceKeyWith and ReplaceKeyPrefixWith elements', () => {
322 | expect(() =>
323 | S3WebsiteConfiguration.validate(`
324 |
325 |
326 | index.html
327 |
328 |
329 |
330 |
331 | test
332 |
333 |
334 | foo
335 | bar
336 |
337 |
338 |
339 | `),
340 | ).to.throw(
341 | 'You can only define ReplaceKeyPrefix or ReplaceKey but not both.',
342 | );
343 | });
344 | });
345 | });
346 | });
347 |
--------------------------------------------------------------------------------
/lib/models/config.js:
--------------------------------------------------------------------------------
1 | 'use strict';
2 |
3 | const xmlParser = require('fast-xml-parser');
4 | const { escapeRegExp } = require('lodash');
5 | const he = require('he');
6 |
7 | const S3Error = require('./error');
8 | const RoutingRule = require('./routing-rule');
9 |
10 | exports.getConfigModel = function getConfigModel(type) {
11 | switch (type) {
12 | case 'cors':
13 | return S3CorsConfiguration;
14 | case 'website':
15 | return S3WebsiteConfiguration;
16 | case 'tagging':
17 | return TaggingConfiguration;
18 | }
19 | };
20 |
21 | class S3ConfigBase {
22 | /**
23 | * Validates a given XML config against S3's spec.
24 | * @param {string} xml
25 | * @returns S3Config
26 | */
27 | static validate() {
28 | throw new Error('Not implemented');
29 | }
30 |
31 | /**
32 | * Parses an XML document
33 | * @param {string} type
34 | * @param {string} config
35 | */
36 | constructor(type, config) {
37 | if (this.constructor === S3ConfigBase) {
38 | throw new Error('Cannot create an instance of an abstract class');
39 | }
40 | this.type = type;
41 | this.rawConfig = xmlParser.parse(config, {
42 | ignoreAttributes: false,
43 | parseNodeValue: true,
44 | tagValueProcessor: he.decode,
45 | });
46 | }
47 |
48 | toJSON() {
49 | return this.rawConfig;
50 | }
51 |
52 | toXML(space) {
53 | const parser = new xmlParser.j2xParser({
54 | ignoreAttributes: false,
55 | format: typeof space === 'number',
56 | indentBy: ' '.repeat(space),
57 | });
58 | return parser.parse(this.rawConfig);
59 | }
60 | }
61 |
62 | class S3CorsConfiguration extends S3ConfigBase {
63 | static validate(xml) {
64 | if (xmlParser.validate(xml) !== true) {
65 | throw new S3Error(
66 | 'MalformedXML',
67 | 'The XML you provided was not well-formed or did not validate ' +
68 | 'against our published schema',
69 | );
70 | }
71 | const config = new S3CorsConfiguration(xml);
72 | const { CORSConfiguration } = config.rawConfig;
73 | if (!CORSConfiguration || !CORSConfiguration.CORSRule) {
74 | throw new S3Error(
75 | 'MalformedXML',
76 | 'The XML you provided was not well-formed or did not validate ' +
77 | 'against our published schema',
78 | );
79 | }
80 | for (const rule of [].concat(CORSConfiguration.CORSRule)) {
81 | if (
82 | !rule.AllowedOrigin ||
83 | !rule.AllowedMethod ||
84 | Array.isArray(rule.MaxAgeSeconds)
85 | ) {
86 | throw new S3Error(
87 | 'MalformedXML',
88 | 'The XML you provided was not well-formed or did not validate ' +
89 | 'against our published schema',
90 | );
91 | }
92 |
93 | for (const method of [].concat(rule.AllowedMethod)) {
94 | if (!S3CorsConfiguration.allowedMethods.includes(method)) {
95 | throw new S3Error(
96 | 'InvalidRequest',
97 | 'Found unsupported HTTP method in CORS config. Unsupported method is ' +
98 | method,
99 | );
100 | }
101 | }
102 | }
103 | return config;
104 | }
105 |
106 | static createWildcardRegExp(str, flags = '') {
107 | const parts = str.split('*');
108 | if (parts.length > 2)
109 | throw new S3Error(
110 | 'InvalidRequest',
111 | `AllowedOrigin "${str}" can not have more than one wildcard.`,
112 | );
113 | return new RegExp(`^${parts.map(escapeRegExp).join('.*')}$`, flags);
114 | }
115 |
116 | constructor(config) {
117 | super('cors', config);
118 | const { CORSConfiguration = {} } = this.rawConfig;
119 | this.rules = [].concat(CORSConfiguration.CORSRule || []).map((rule) => ({
120 | hasWildcardOrigin: [].concat(rule.AllowedOrigin || []).includes('*'),
121 | allowedOrigins: []
122 | .concat(rule.AllowedOrigin || [])
123 | .map((o) => S3CorsConfiguration.createWildcardRegExp(o)),
124 | allowedMethods: [].concat(rule.AllowedMethod || []),
125 | allowedHeaders: []
126 | .concat(rule.AllowedHeader || [])
127 | .map((h) => S3CorsConfiguration.createWildcardRegExp(h, 'i')),
128 | exposeHeaders: [].concat(rule.ExposeHeader || []),
129 | maxAgeSeconds: rule.MaxAgeSeconds,
130 | }));
131 | }
132 |
133 | matchRule(origin, method) {
134 | return this.rules.find(
135 | (rule) =>
136 | rule.allowedOrigins.some((pattern) => pattern.test(origin)) &&
137 | rule.allowedMethods.includes(method.toUpperCase()),
138 | );
139 | }
140 |
141 | getAllowedHeaders(rule, requestHeaders) {
142 | if (!requestHeaders) return [];
143 | return requestHeaders
144 | .map((header) => header.trim().toLowerCase())
145 | .filter((header) =>
146 | rule.allowedHeaders.some((pattern) => pattern.test(header)),
147 | );
148 | }
149 | }
150 | // https://docs.aws.amazon.com/AmazonS3/latest/dev/cors.html#cors-allowed-methods
151 | S3CorsConfiguration.allowedMethods = ['GET', 'PUT', 'POST', 'DELETE', 'HEAD'];
152 | exports.S3CorsConfiguration = S3CorsConfiguration;
153 |
154 | class S3WebsiteConfiguration extends S3ConfigBase {
155 | static validate(xml) {
156 | if (xmlParser.validate(xml) !== true) {
157 | throw new S3Error(
158 | 'MalformedXML',
159 | 'The XML you provided was not well-formed or did not validate ' +
160 | 'against our published schema',
161 | );
162 | }
163 |
164 | const config = new S3WebsiteConfiguration(xml);
165 | const { WebsiteConfiguration } = config.rawConfig;
166 | if (!WebsiteConfiguration) {
167 | throw new S3Error(
168 | 'MalformedXML',
169 | 'The XML you provided was not well-formed or did not validate ' +
170 | 'against our published schema',
171 | );
172 | }
173 | const {
174 | IndexDocument,
175 | ErrorDocument,
176 | RedirectAllRequestsTo,
177 | RoutingRules,
178 | } = WebsiteConfiguration;
179 | if (RedirectAllRequestsTo) {
180 | if (Array.isArray(RedirectAllRequestsTo)) {
181 | throw new S3Error(
182 | 'MalformedXML',
183 | 'The XML you provided was not well-formed or did not validate ' +
184 | 'against our published schema',
185 | );
186 | }
187 | if (IndexDocument) {
188 | throw new S3Error(
189 | 'InvalidArgument',
190 | 'RedirectAllRequestsTo cannot be provided in conjunction with ' +
191 | 'other Routing Rules.',
192 | {
193 | ArgumentName: 'RedirectAllRequestsTo',
194 | ArgumentValue: 'not null',
195 | },
196 | );
197 | }
198 | } else if (IndexDocument) {
199 | if (
200 | Array.isArray(IndexDocument) ||
201 | !IndexDocument.Suffix ||
202 | Array.isArray(IndexDocument.Suffix)
203 | ) {
204 | throw new S3Error(
205 | 'MalformedXML',
206 | 'The XML you provided was not well-formed or did not validate ' +
207 | 'against our published schema',
208 | );
209 | }
210 | if (ErrorDocument) {
211 | if (
212 | Array.isArray(ErrorDocument) ||
213 | !ErrorDocument.Key ||
214 | Array.isArray(ErrorDocument.Key)
215 | ) {
216 | throw new S3Error(
217 | 'MalformedXML',
218 | 'The XML you provided was not well-formed or did not validate ' +
219 | 'against our published schema',
220 | );
221 | }
222 | }
223 | } else {
224 | throw new S3Error(
225 | 'InvalidArgument',
226 | 'A value for IndexDocument Suffix must be provided if RedirectAllRequestsTo is empty',
227 | {
228 | ArgumentName: 'IndexDocument',
229 | ArgumentValue: 'null',
230 | },
231 | );
232 | }
233 | if (
234 | !IndexDocument ||
235 | Array.isArray(ErrorDocument) ||
236 | (ErrorDocument && !ErrorDocument.Key)
237 | ) {
238 | throw new S3Error(
239 | 'MalformedXML',
240 | 'The XML you provided was not well-formed or did not validate ' +
241 | 'against our published schema',
242 | );
243 | }
244 | if (IndexDocument.Suffix.indexOf('/') !== -1) {
245 | throw new S3Error('The IndexDocument Suffix is not well formed', {
246 | ArgumentName: 'IndexDocument',
247 | ArgumentValue: IndexDocument.Suffix,
248 | });
249 | }
250 | if (RoutingRules) {
251 | if (Array.isArray(RoutingRules) || !RoutingRules.RoutingRule) {
252 | throw new S3Error(
253 | 'MalformedXML',
254 | 'The XML you provided was not well-formed or did not validate ' +
255 | 'against our published schema',
256 | );
257 | }
258 |
259 | const routingRules = Array.isArray(RoutingRules.RoutingRule)
260 | ? RoutingRules.RoutingRule
261 | : [RoutingRules.RoutingRule];
262 |
263 | for (const { Condition, Redirect } of routingRules) {
264 | if (
265 | (Condition &&
266 | !Condition.KeyPrefixEquals &&
267 | !Condition.HttpErrorCodeReturnedEquals) ||
268 | !Redirect ||
269 | (!Redirect.HostName &&
270 | !Redirect.Protocol &&
271 | !Redirect.ReplaceKeyPrefixWith &&
272 | !Redirect.ReplaceKeyWith &&
273 | !Redirect.HttpRedirectCode)
274 | ) {
275 | throw new S3Error(
276 | 'MalformedXML',
277 | 'The XML you provided was not well-formed or did not validate ' +
278 | 'against our published schema',
279 | );
280 | }
281 |
282 | if (
283 | Condition &&
284 | Condition.HttpErrorCodeReturnedEquals &&
285 | (Condition.HttpErrorCodeReturnedEquals < 400 ||
286 | Condition.HttpErrorCodeReturnedEquals >= 600)
287 | ) {
288 | throw new S3Error(
289 | 'InvalidArgument',
290 | `The provided HTTP error code (${Condition.HttpErrorCodeReturnedEquals}) is not valid. Valid codes are 4XX or 5XX.`,
291 | {
292 | ArgumentName: 'HttpErrorCodeReturnedEquals',
293 | ArgumentValue: Condition.HttpErrorCodeReturnedEquals,
294 | },
295 | );
296 | }
297 |
298 | if (
299 | Redirect.Protocol &&
300 | Redirect.Protocol !== 'http' &&
301 | Redirect.Protocol !== 'https'
302 | ) {
303 | throw new S3Error(
304 | 'InvalidArgument',
305 | 'Invalid protocol, protocol can be http or https. If not defined the protocol will be selected automatically.',
306 | {
307 | ArgumentName: 'Protocol',
308 | ArgumentValue: Redirect.Protocol,
309 | },
310 | );
311 | }
312 |
313 | if (Redirect.ReplaceKeyWith && Redirect.ReplaceKeyPrefixWith) {
314 | throw new S3Error(
315 | 'MalformedXML',
316 | 'You can only define ReplaceKeyPrefix or ReplaceKey but not both.',
317 | );
318 | }
319 | }
320 | }
321 | return config;
322 | }
323 |
324 | constructor(config) {
325 | super('website', config);
326 | const { WebsiteConfiguration = {} } = this.rawConfig;
327 | if (WebsiteConfiguration.IndexDocument) {
328 | this.indexDocumentSuffix = WebsiteConfiguration.IndexDocument.Suffix;
329 | if (WebsiteConfiguration.ErrorDocument) {
330 | this.errorDocumentKey = WebsiteConfiguration.ErrorDocument.Key;
331 | }
332 | }
333 | if (
334 | WebsiteConfiguration.RoutingRules &&
335 | WebsiteConfiguration.RoutingRules.RoutingRule
336 | ) {
337 | const routingRules = Array.isArray(
338 | WebsiteConfiguration.RoutingRules.RoutingRule,
339 | )
340 | ? WebsiteConfiguration.RoutingRules.RoutingRule
341 | : [WebsiteConfiguration.RoutingRules.RoutingRule];
342 | this.routingRules = routingRules.map((config) => new RoutingRule(config));
343 | }
344 | }
345 | }
346 | exports.S3WebsiteConfiguration = S3WebsiteConfiguration;
347 |
348 | class TaggingConfiguration extends S3ConfigBase {
349 | static validate(xml) {
350 | if (xmlParser.validate(xml) !== true) {
351 | throw new S3Error(
352 | 'MalformedXML',
353 | 'The XML you provided was not well-formed or did not validate ' +
354 | 'against our published schema',
355 | );
356 | }
357 |
358 | return new TaggingConfiguration(xml);
359 | }
360 |
361 | constructor(config) {
362 | super('tagging', config);
363 | }
364 | }
365 | TaggingConfiguration.EMPTY = new TaggingConfiguration(
366 | ``,
367 | );
368 |
369 | exports.TaggingConfiguration = TaggingConfiguration;
370 |
--------------------------------------------------------------------------------
/test/middleware/website.spec.js:
--------------------------------------------------------------------------------
1 | 'use strict';
2 |
3 | const { expect } = require('chai');
4 | const fs = require('fs');
5 | const request = require('request-promise-native').defaults({
6 | resolveWithFullResponse: true,
7 | });
8 |
9 | const { createServerAndClient } = require('../helpers');
10 |
11 | describe('Static Website Tests', function () {
12 | let s3Client;
13 | const buckets = [
14 | // a bucket with no additional config
15 | {
16 | name: 'bucket-a',
17 | },
18 |
19 | // A standard static hosting configuration with no custom error page
20 | {
21 | name: 'website0',
22 | configs: [
23 | fs.readFileSync(require.resolve('../fixtures/website-test0.xml')),
24 | ],
25 | },
26 |
27 | // A static website with a custom error page
28 | {
29 | name: 'website1',
30 | configs: [
31 | fs.readFileSync(require.resolve('../fixtures/website-test1.xml')),
32 | ],
33 | },
34 |
35 | // A static website with a single simple routing rule
36 | {
37 | name: 'website2',
38 | configs: [
39 | fs.readFileSync(require.resolve('../fixtures/website-test2.xml')),
40 | ],
41 | },
42 |
43 | // A static website with multiple routing rules
44 | {
45 | name: 'website3',
46 | configs: [
47 | fs.readFileSync(require.resolve('../fixtures/website-test3.xml')),
48 | ],
49 | },
50 | ];
51 |
52 | this.beforeEach(async () => {
53 | ({ s3Client } = await createServerAndClient({
54 | configureBuckets: buckets,
55 | }));
56 | });
57 |
58 | it('fails to read an object at the website endpoint from a bucket with no website configuration', async function () {
59 | await s3Client
60 | .putObject({
61 | Bucket: 'bucket-a',
62 | Key: 'page/index.html',
63 | Body: 'Hello',
64 | })
65 | .promise();
66 | let res;
67 | try {
68 | res = await request('page/', {
69 | baseUrl: s3Client.endpoint.href,
70 | headers: { host: `bucket-a.s3-website-us-east-1.amazonaws.com` },
71 | });
72 | } catch (err) {
73 | res = err.response;
74 | }
75 | expect(res.statusCode).to.equal(404);
76 | expect(res.headers).to.have.property(
77 | 'content-type',
78 | 'text/html; charset=utf-8',
79 | );
80 | expect(res.body).to.contain('Code: NoSuchWebsiteConfiguration');
81 | });
82 |
83 | it('returns an index page at / path', async function () {
84 | const expectedBody = 'Hello';
85 | await s3Client
86 | .putObject({
87 | Bucket: 'website0',
88 | Key: 'index.html',
89 | Body: expectedBody,
90 | })
91 | .promise();
92 | const res = await request('website0/', {
93 | baseUrl: s3Client.endpoint.href,
94 | headers: { accept: 'text/html' },
95 | });
96 | expect(res.body).to.equal(expectedBody);
97 | });
98 |
99 | it('allows redirects for image requests', async function () {
100 | let res;
101 | try {
102 | res = await request('website3/complex/image.png', {
103 | baseUrl: s3Client.endpoint.href,
104 | headers: { accept: 'image/png' },
105 | followRedirect: false,
106 | });
107 | } catch (err) {
108 | res = err.response;
109 | }
110 | expect(res.statusCode).to.equal(307);
111 | expect(res.headers).to.have.property(
112 | 'location',
113 | 'https://custom/replacement',
114 | );
115 | });
116 |
117 | it('returns an index page at /page/ path', async function () {
118 | const expectedBody = 'Hello';
119 | await s3Client
120 | .putObject({
121 | Bucket: 'website0',
122 | Key: 'page/index.html',
123 | Body: expectedBody,
124 | })
125 | .promise();
126 | const res = await request('website0/page/', {
127 | baseUrl: s3Client.endpoint.href,
128 | headers: { accept: 'text/html' },
129 | });
130 | expect(res.body).to.equal(expectedBody);
131 | });
132 |
133 | it('does not return an index page at /page/ path if an object is stored with a trailing /', async function () {
134 | const indexBody = 'Hello';
135 | const expectedBody = 'Goodbye';
136 | await s3Client
137 | .putObject({
138 | Bucket: 'website0',
139 | Key: 'page/index.html',
140 | Body: indexBody,
141 | })
142 | .promise();
143 | await s3Client
144 | .putObject({
145 | Bucket: 'website0',
146 | Key: 'page/',
147 | Body: expectedBody,
148 | })
149 | .promise();
150 |
151 | const res = await request('website0/page/', {
152 | baseUrl: s3Client.endpoint.href,
153 | headers: { accept: 'text/html' },
154 | });
155 | expect(res.body).to.equal(expectedBody);
156 | });
157 |
158 | it('redirects with a 302 status at /page path', async function () {
159 | const body = 'Hello';
160 | await s3Client
161 | .putObject({
162 | Bucket: 'website0',
163 | Key: 'page/index.html',
164 | Body: body,
165 | })
166 | .promise();
167 | let res;
168 | try {
169 | res = await request('website0/page', {
170 | baseUrl: s3Client.endpoint.href,
171 | headers: { accept: 'text/html' },
172 | followRedirect: false,
173 | });
174 | } catch (err) {
175 | res = err.response;
176 | }
177 | expect(res.statusCode).to.equal(302);
178 | expect(res.headers).to.have.property('location', '/website0/page/');
179 | });
180 |
181 | it('redirects with 302 status at /page path for subdomain-style bucket', async function () {
182 | const body = 'Hello';
183 | await s3Client
184 | .putObject({
185 | Bucket: 'website0',
186 | Key: 'page/index.html',
187 | Body: body,
188 | })
189 | .promise();
190 | let res;
191 | try {
192 | res = await request('page', {
193 | baseUrl: s3Client.endpoint.href,
194 | headers: { host: 'website0.s3-website-us-east-1.amazonaws.com' },
195 | followRedirect: false,
196 | });
197 | } catch (err) {
198 | res = err.response;
199 | }
200 | expect(res.statusCode).to.equal(302);
201 | expect(res.headers).to.have.property('location', '/page/');
202 | });
203 |
204 | it('returns a HTML 404 error page', async function () {
205 | let res;
206 | try {
207 | res = await request('website0/page/not-exists', {
208 | baseUrl: s3Client.endpoint.href,
209 | headers: { accept: 'text/html' },
210 | });
211 | } catch (err) {
212 | res = err.response;
213 | }
214 | expect(res.statusCode).to.equal(404);
215 | expect(res.headers).to.have.property(
216 | 'content-type',
217 | 'text/html; charset=utf-8',
218 | );
219 | expect(res.body).to.contain.string('Key: page/not-exists');
220 | });
221 |
222 | it('returns a HTML 404 error page for a missing index key', async function () {
223 | let res;
224 | try {
225 | res = await request('website0/page/not-exists/', {
226 | baseUrl: s3Client.endpoint.href,
227 | headers: { accept: 'text/html' },
228 | });
229 | } catch (err) {
230 | res = err.response;
231 | }
232 | expect(res.statusCode).to.equal(404);
233 | expect(res.headers).to.have.property(
234 | 'content-type',
235 | 'text/html; charset=utf-8',
236 | );
237 | expect(res.body).to.contain.string('Key: page/not-exists/index.html');
238 | });
239 |
240 | it('serves a custom error page if it exists', async function () {
241 | const body = 'Oops!';
242 | await s3Client
243 | .putObject({
244 | Bucket: 'website1',
245 | Key: 'error.html',
246 | Body: body,
247 | ContentType: 'text/html',
248 | })
249 | .promise();
250 | let res;
251 | try {
252 | res = await request('website1/page/not-exists', {
253 | baseUrl: s3Client.endpoint.href,
254 | headers: { accept: 'text/html' },
255 | });
256 | } catch (err) {
257 | res = err.response;
258 | }
259 | expect(res.headers).to.have.property(
260 | 'content-type',
261 | 'text/html; charset=utf-8',
262 | );
263 | expect(res.body).to.equal(body);
264 | });
265 |
266 | it('returns a XML error document for SDK requests', async function () {
267 | let error;
268 | try {
269 | await s3Client
270 | .getObject({
271 | Bucket: 'website0',
272 | Key: 'page/not-exists',
273 | })
274 | .promise();
275 | } catch (err) {
276 | error = err;
277 | }
278 | expect(error).to.exist;
279 | expect(error.statusCode).to.equal(404);
280 | expect(error.code).to.equal('NoSuchKey');
281 | });
282 |
283 | it('stores an object with website-redirect-location metadata', async function () {
284 | const redirectLocation = 'https://github.com/jamhall/s3rver';
285 | await s3Client
286 | .putObject({
287 | Bucket: 'website0',
288 | Key: 'index.html',
289 | Body: 'Hello',
290 | WebsiteRedirectLocation: redirectLocation,
291 | })
292 | .promise();
293 | const res = await s3Client
294 | .getObject({
295 | Bucket: 'website0',
296 | Key: 'index.html',
297 | })
298 | .promise();
299 | expect(res).to.have.property('WebsiteRedirectLocation', redirectLocation);
300 | });
301 |
302 | it('redirects for an object stored with a website-redirect-location', async function () {
303 | const redirectLocation = 'https://github.com/jamhall/s3rver';
304 | await s3Client
305 | .putObject({
306 | Bucket: 'website0',
307 | Key: 'index.html',
308 | Body: 'Hello',
309 | WebsiteRedirectLocation: redirectLocation,
310 | })
311 | .promise();
312 | let res;
313 | try {
314 | res = await request(`website0/`, {
315 | baseUrl: s3Client.endpoint.href,
316 | headers: { accept: 'text/html' },
317 | followRedirect: false,
318 | });
319 | } catch (err) {
320 | res = err.response;
321 | }
322 | expect(res.statusCode).to.equal(301);
323 | expect(res.headers).to.have.property('location', redirectLocation);
324 | });
325 |
326 | it('redirects for a custom error page stored with a website-redirect-location', async function () {
327 | const redirectLocation = 'https://github.com/jamhall/s3rver';
328 | const body = 'Hello';
329 | await s3Client
330 | .putObject({
331 | Bucket: 'website1',
332 | Key: 'error.html',
333 | Body: body,
334 | WebsiteRedirectLocation: redirectLocation,
335 | })
336 | .promise();
337 | let res;
338 | try {
339 | res = await request(`website1/page/`, {
340 | baseUrl: s3Client.endpoint.href,
341 | headers: { accept: 'text/html' },
342 | followRedirect: false,
343 | });
344 | } catch (err) {
345 | res = err.response;
346 | }
347 | expect(res.statusCode).to.equal(301);
348 | expect(res.headers).to.have.property('location', redirectLocation);
349 | });
350 |
351 | describe('Routing rules', () => {
352 | it('evaluates a single simple routing rule', async function () {
353 | let res;
354 | try {
355 | res = await request(`website2/test/key/`, {
356 | baseUrl: s3Client.endpoint.href,
357 | headers: { accept: 'text/html' },
358 | followRedirect: false,
359 | });
360 | } catch (err) {
361 | res = err.response;
362 | }
363 | expect(res.statusCode).to.equal(301);
364 | expect(res.headers).to.have.property(
365 | 'location',
366 | s3Client.endpoint.href + 'website2/replacement/key/',
367 | );
368 | });
369 |
370 | it('does not evaluate routing rules for an index page', async function () {
371 | const expectedBody = 'Hello';
372 | await s3Client
373 | .putObject({
374 | Bucket: 'website2',
375 | Key: 'recursive/foo/index.html',
376 | Body: expectedBody,
377 | })
378 | .promise();
379 | const res = await request('website2/recursive/foo/', {
380 | baseUrl: s3Client.endpoint.href,
381 | headers: { accept: 'text/html' },
382 | });
383 | expect(res.body).to.equal(expectedBody);
384 | });
385 |
386 | it('does not evaluate routing rules for an index page redirect', async function () {
387 | const expectedBody = 'Hello';
388 | await s3Client
389 | .putObject({
390 | Bucket: 'website2',
391 | Key: 'recursive/foo/index.html',
392 | Body: expectedBody,
393 | })
394 | .promise();
395 | let res;
396 | try {
397 | res = await request('website2/recursive/foo', {
398 | baseUrl: s3Client.endpoint.href,
399 | headers: { accept: 'text/html' },
400 | followRedirect: false,
401 | });
402 | } catch (err) {
403 | res = err.response;
404 | }
405 | expect(res.statusCode).to.equal(302);
406 | expect(res.headers).to.have.property(
407 | 'location',
408 | '/website2/recursive/foo/',
409 | );
410 | });
411 |
412 | it('evaluates a multi-rule config', async function () {
413 | let res;
414 | try {
415 | res = await request(`website3/simple/key`, {
416 | baseUrl: s3Client.endpoint.href,
417 | headers: { accept: 'text/html' },
418 | followRedirect: false,
419 | });
420 | } catch (err) {
421 | res = err.response;
422 | }
423 | expect(res.statusCode).to.equal(301);
424 | expect(res.headers).to.have.property(
425 | 'location',
426 | s3Client.endpoint.href + 'website3/replacement/key',
427 | );
428 | });
429 |
430 | it('evaluates a complex rule', async function () {
431 | let res;
432 | try {
433 | res = await request(`website3/complex/key`, {
434 | baseUrl: s3Client.endpoint.href,
435 | headers: { accept: 'text/html' },
436 | followRedirect: false,
437 | });
438 | } catch (err) {
439 | res = err.response;
440 | }
441 | expect(res.statusCode).to.equal(307);
442 | expect(res.headers).to.have.property(
443 | 'location',
444 | 'https://custom/replacement',
445 | );
446 | });
447 | });
448 | });
449 |
--------------------------------------------------------------------------------
/test/middleware/cors.spec.js:
--------------------------------------------------------------------------------
1 | 'use strict';
2 |
3 | const AWS = require('aws-sdk');
4 | const { expect } = require('chai');
5 | const fs = require('fs');
6 | const request = require('request-promise-native').defaults({
7 | resolveWithFullResponse: true,
8 | });
9 |
10 | const S3rver = require('../..');
11 |
12 | describe('CORS Policy Tests', function () {
13 | const buckets = [
14 | // provides rules for origins http://a-test.example.com and http://*.bar.com
15 | {
16 | name: 'bucket0',
17 | configs: [fs.readFileSync(require.resolve('../fixtures/cors-test0.xml'))],
18 | },
19 | ];
20 |
21 | it('fails to initialize a configuration with multiple wildcard characters', async function () {
22 | let error;
23 | try {
24 | const server = new S3rver({
25 | configureBuckets: [
26 | {
27 | name: 'bucket0',
28 | configs: [
29 | fs.readFileSync(require.resolve('../fixtures/cors-invalid0.xml')),
30 | ],
31 | },
32 | ],
33 | });
34 | await server.run();
35 | await server.close();
36 | } catch (err) {
37 | error = err;
38 | }
39 | expect(error).to.exist;
40 | expect(error.message).to.include(' can not have more than one wildcard.');
41 | });
42 |
43 | it('fails to initialize a configuration with an illegal AllowedMethod', async function () {
44 | const server = new S3rver({
45 | configureBuckets: [
46 | {
47 | name: 'bucket1',
48 | configs: [
49 | fs.readFileSync(require.resolve('../fixtures/cors-invalid1.xml')),
50 | ],
51 | },
52 | ],
53 | });
54 | let error;
55 | try {
56 | await server.run();
57 | await server.close();
58 | } catch (err) {
59 | error = err;
60 | }
61 | expect(error).to.exist;
62 | expect(error.message).to.include(
63 | 'Found unsupported HTTP method in CORS config.',
64 | );
65 | });
66 |
67 | it('fails to initialize a configuration with missing required fields', async function () {
68 | const server = new S3rver({
69 | configureBuckets: [
70 | {
71 | name: 'bucket2',
72 | configs: [
73 | fs.readFileSync(require.resolve('../fixtures/cors-invalid2.xml')),
74 | ],
75 | },
76 | ],
77 | });
78 | let error;
79 | try {
80 | await server.run();
81 | await server.close();
82 | } catch (err) {
83 | error = err;
84 | }
85 | expect(error).to.exist;
86 | expect(error.code).to.equal('MalformedXML');
87 | });
88 |
89 | it('deletes a CORS configuration in an configured bucket', async function () {
90 | const server = new S3rver({
91 | configureBuckets: [buckets[0]],
92 | });
93 | const { port } = await server.run();
94 | const s3Client = new AWS.S3({
95 | accessKeyId: 'S3RVER',
96 | secretAccessKey: 'S3RVER',
97 | endpoint: `http://localhost:${port}`,
98 | sslEnabled: false,
99 | s3ForcePathStyle: true,
100 | });
101 | let error;
102 | try {
103 | await s3Client.deleteBucketCors({ Bucket: buckets[0].name }).promise();
104 | await s3Client.getBucketCors({ Bucket: buckets[0].name }).promise();
105 | } catch (err) {
106 | error = err;
107 | } finally {
108 | await server.close();
109 | }
110 | expect(error).to.exist;
111 | expect(error.code).to.equal('NoSuchCORSConfiguration');
112 | });
113 |
114 | it('adds the Access-Control-Allow-Origin header for a wildcard origin', async function () {
115 | const origin = 'http://a-test.example.com';
116 | const bucket = {
117 | name: 'foobars',
118 | configs: [fs.readFileSync('./example/cors.xml')],
119 | };
120 |
121 | const server = new S3rver({
122 | configureBuckets: [bucket],
123 | });
124 | const { port } = await server.run();
125 | const s3Client = new AWS.S3({
126 | accessKeyId: 'S3RVER',
127 | secretAccessKey: 'S3RVER',
128 | endpoint: `http://localhost:${port}`,
129 | sslEnabled: false,
130 | s3ForcePathStyle: true,
131 | });
132 | try {
133 | await s3Client
134 | .putObject({
135 | Bucket: bucket.name,
136 | Key: 'image',
137 | Body: await fs.promises.readFile(
138 | require.resolve('../fixtures/image0.jpg'),
139 | ),
140 | ContentType: 'image/jpeg',
141 | })
142 | .promise();
143 | const url = s3Client.getSignedUrl('getObject', {
144 | Bucket: bucket.name,
145 | Key: 'image',
146 | });
147 | const res = await request(url, {
148 | headers: { origin },
149 | });
150 | expect(res.statusCode).to.equal(200);
151 | expect(res.headers).to.have.property('access-control-allow-origin', '*');
152 | } finally {
153 | await server.close();
154 | }
155 | });
156 |
157 | it('adds the Access-Control-Allow-Origin header for a matching origin', async function () {
158 | const origin = 'http://a-test.example.com';
159 | const server = new S3rver({
160 | configureBuckets: [buckets[0]],
161 | });
162 | const { port } = await server.run();
163 | const s3Client = new AWS.S3({
164 | accessKeyId: 'S3RVER',
165 | secretAccessKey: 'S3RVER',
166 | endpoint: `http://localhost:${port}`,
167 | sslEnabled: false,
168 | s3ForcePathStyle: true,
169 | });
170 | try {
171 | await s3Client
172 | .putObject({
173 | Bucket: buckets[0].name,
174 | Key: 'image',
175 | Body: await fs.promises.readFile(
176 | require.resolve('../fixtures/image0.jpg'),
177 | ),
178 | ContentType: 'image/jpeg',
179 | })
180 | .promise();
181 | const url = s3Client.getSignedUrl('getObject', {
182 | Bucket: buckets[0].name,
183 | Key: 'image',
184 | });
185 | const res = await request(url, {
186 | headers: { origin },
187 | });
188 | expect(res.statusCode).to.equal(200);
189 | expect(res.headers).to.have.property(
190 | 'access-control-allow-origin',
191 | origin,
192 | );
193 | } finally {
194 | await server.close();
195 | }
196 | });
197 |
198 | it('matches an origin to a CORSRule with a wildcard character', async function () {
199 | const origin = 'http://foo.bar.com';
200 | const server = new S3rver({
201 | configureBuckets: [buckets[0]],
202 | });
203 | const { port } = await server.run();
204 | const s3Client = new AWS.S3({
205 | accessKeyId: 'S3RVER',
206 | secretAccessKey: 'S3RVER',
207 | endpoint: `http://localhost:${port}`,
208 | sslEnabled: false,
209 | s3ForcePathStyle: true,
210 | });
211 | try {
212 | await s3Client
213 | .putObject({
214 | Bucket: buckets[0].name,
215 | Key: 'image',
216 | Body: await fs.promises.readFile(
217 | require.resolve('../fixtures/image0.jpg'),
218 | ),
219 | ContentType: 'image/jpeg',
220 | })
221 | .promise();
222 | const url = s3Client.getSignedUrl('getObject', {
223 | Bucket: buckets[0].name,
224 | Key: 'image',
225 | });
226 | const res = await request(url, {
227 | headers: { origin },
228 | });
229 | expect(res.statusCode).to.equal(200);
230 | expect(res.headers).to.have.property(
231 | 'access-control-allow-origin',
232 | origin,
233 | );
234 | } finally {
235 | await server.close();
236 | }
237 | });
238 |
239 | it('omits the Access-Control-Allow-Origin header for a non-matching origin', async function () {
240 | const origin = 'http://b-test.example.com';
241 | const server = new S3rver({
242 | configureBuckets: [buckets[0]],
243 | });
244 | const { port } = await server.run();
245 | const s3Client = new AWS.S3({
246 | accessKeyId: 'S3RVER',
247 | secretAccessKey: 'S3RVER',
248 | endpoint: `http://localhost:${port}`,
249 | sslEnabled: false,
250 | s3ForcePathStyle: true,
251 | });
252 | try {
253 | await s3Client
254 | .putObject({
255 | Bucket: buckets[0].name,
256 | Key: 'image',
257 | Body: await fs.promises.readFile(
258 | require.resolve('../fixtures/image0.jpg'),
259 | ),
260 | ContentType: 'image/jpeg',
261 | })
262 | .promise();
263 | const url = s3Client.getSignedUrl('getObject', {
264 | Bucket: buckets[0].name,
265 | Key: 'image',
266 | });
267 | const res = await request(url, {
268 | headers: { origin },
269 | });
270 | expect(res.statusCode).to.equal(200);
271 | expect(res.headers).to.not.have.property('access-control-allow-origin');
272 | } finally {
273 | await server.close();
274 | }
275 | });
276 |
277 | it('exposes appropriate headers for a range request', async function () {
278 | const origin = 'http://a-test.example.com';
279 | const server = new S3rver({
280 | configureBuckets: [buckets[0]],
281 | });
282 | const { port } = await server.run();
283 | const s3Client = new AWS.S3({
284 | accessKeyId: 'S3RVER',
285 | secretAccessKey: 'S3RVER',
286 | endpoint: `http://localhost:${port}`,
287 | sslEnabled: false,
288 | s3ForcePathStyle: true,
289 | });
290 | try {
291 | await s3Client
292 | .putObject({
293 | Bucket: buckets[0].name,
294 | Key: 'image',
295 | Body: await fs.promises.readFile(
296 | require.resolve('../fixtures/image0.jpg'),
297 | ),
298 | ContentType: 'image/jpeg',
299 | })
300 | .promise();
301 | const url = s3Client.getSignedUrl('getObject', {
302 | Bucket: buckets[0].name,
303 | Key: 'image',
304 | });
305 | const res = await request(url, {
306 | headers: { origin, range: 'bytes=0-99' },
307 | });
308 | expect(res.statusCode).to.equal(206);
309 | expect(res.headers).to.have.property(
310 | 'access-control-expose-headers',
311 | 'Accept-Ranges, Content-Range',
312 | );
313 | } finally {
314 | await server.close();
315 | }
316 | });
317 |
318 | it('responds to OPTIONS requests with allowed headers', async function () {
319 | const origin = 'http://foo.bar.com';
320 | const server = new S3rver({
321 | configureBuckets: [buckets[0]],
322 | });
323 | const { port } = await server.run();
324 | const s3Client = new AWS.S3({
325 | accessKeyId: 'S3RVER',
326 | secretAccessKey: 'S3RVER',
327 | endpoint: `http://localhost:${port}`,
328 | sslEnabled: false,
329 | s3ForcePathStyle: true,
330 | });
331 | const url = s3Client.getSignedUrl('getObject', {
332 | Bucket: buckets[0].name,
333 | Key: 'image',
334 | });
335 | try {
336 | const res = await request(url, {
337 | method: 'OPTIONS',
338 | headers: {
339 | origin,
340 | 'Access-Control-Request-Method': 'GET',
341 | 'Access-Control-Request-Headers': 'Range, Authorization',
342 | },
343 | });
344 | expect(res.statusCode).to.equal(200);
345 | expect(res.headers).to.have.property('access-control-allow-origin', '*');
346 | expect(res.headers).to.have.property(
347 | 'access-control-allow-headers',
348 | 'range, authorization',
349 | );
350 | } finally {
351 | await server.close();
352 | }
353 | });
354 |
355 | it('responds to OPTIONS requests with a Forbidden response', async function () {
356 | const origin = 'http://a-test.example.com';
357 | const server = new S3rver({
358 | configureBuckets: [buckets[0]],
359 | });
360 | const { port } = await server.run();
361 | const s3Client = new AWS.S3({
362 | accessKeyId: 'S3RVER',
363 | secretAccessKey: 'S3RVER',
364 | endpoint: `http://localhost:${port}`,
365 | sslEnabled: false,
366 | s3ForcePathStyle: true,
367 | });
368 | const url = s3Client.getSignedUrl('getObject', {
369 | Bucket: buckets[0].name,
370 | Key: 'image',
371 | });
372 | let error;
373 | try {
374 | await request(url, {
375 | method: 'OPTIONS',
376 | headers: {
377 | origin,
378 | 'Access-Control-Request-Method': 'GET',
379 | 'Access-Control-Request-Headers': 'Range, Authorization',
380 | },
381 | });
382 | } catch (err) {
383 | error = err;
384 | } finally {
385 | await server.close();
386 | }
387 | expect(error).to.exist;
388 | expect(error.response.statusCode).to.equal(403);
389 | });
390 |
391 | it('responds to OPTIONS requests with a Forbidden response when CORS is disabled', async function () {
392 | const origin = 'http://foo.bar.com';
393 | const bucket = { name: 'foobar' };
394 | const server = new S3rver({
395 | configureBuckets: [bucket],
396 | });
397 | const { port } = await server.run();
398 | const s3Client = new AWS.S3({
399 | accessKeyId: 'S3RVER',
400 | secretAccessKey: 'S3RVER',
401 | endpoint: `http://localhost:${port}`,
402 | sslEnabled: false,
403 | s3ForcePathStyle: true,
404 | });
405 | const url = s3Client.getSignedUrl('getObject', {
406 | Bucket: bucket.name,
407 | Key: 'image',
408 | });
409 | let error;
410 | try {
411 | await request(url, {
412 | method: 'OPTIONS',
413 | headers: {
414 | origin,
415 | 'Access-Control-Request-Method': 'GET',
416 | },
417 | });
418 | } catch (err) {
419 | error = err;
420 | } finally {
421 | await server.close();
422 | }
423 | expect(error).to.exist;
424 | expect(error.response.statusCode).to.equal(403);
425 | });
426 |
427 | it('responds correctly to OPTIONS requests that dont specify access-control-request-headers', async function () {
428 | const origin = 'http://a-test.example.com';
429 | const server = new S3rver({
430 | configureBuckets: [buckets[0]],
431 | });
432 | const { port } = await server.run();
433 | const s3Client = new AWS.S3({
434 | accessKeyId: 'S3RVER',
435 | secretAccessKey: 'S3RVER',
436 | endpoint: `http://localhost:${port}`,
437 | sslEnabled: false,
438 | s3ForcePathStyle: true,
439 | });
440 | const url = s3Client.getSignedUrl('getObject', {
441 | Bucket: buckets[0].name,
442 | Key: 'image',
443 | });
444 | try {
445 | await request(url, {
446 | method: 'OPTIONS',
447 | headers: {
448 | origin,
449 | 'Access-Control-Request-Method': 'GET',
450 | // No Access-Control-Request-Headers specified...
451 | },
452 | });
453 | } finally {
454 | await server.close();
455 | }
456 | });
457 | });
458 |
--------------------------------------------------------------------------------
/lib/signature/v4.js:
--------------------------------------------------------------------------------
1 | 'use strict';
2 |
3 | const { Transform } = require('stream');
4 | const { createHash, createHmac } = require('crypto');
5 |
6 | const S3Error = require('../models/error');
7 | const { parseISO8601String } = require('../utils');
8 |
9 | const CRLF = Buffer.from('\r\n');
10 |
11 | exports.parseHeader = function (headers) {
12 | if (!('x-amz-content-sha256' in headers)) {
13 | throw new S3Error(
14 | 'InvalidRequest',
15 | 'Missing required header for this request: x-amz-content-sha256',
16 | );
17 | }
18 | if (
19 | !headers['x-amz-content-sha256'].match(
20 | /^(UNSIGNED-PAYLOAD|STREAMING-AWS4-HMAC-SHA256-PAYLOAD|[0-9A-Fa-f]{64})$/,
21 | )
22 | ) {
23 | throw new S3Error(
24 | 'InvalidArgument',
25 | 'x-amz-content-sha256 must be UNSIGNED-PAYLOAD, STREAMING-AWS4-HMAC-SHA256-PAYLOAD, or a valid sha256 value.',
26 | {
27 | ArgumentName: 'x-amz-content-sha256',
28 | ArgumentValue: headers['x-amz-content-sha256'],
29 | },
30 | );
31 | }
32 |
33 | const componentMap = new Map(
34 | headers.authorization
35 | .split(' ')
36 | .slice(1)
37 | .join('')
38 | .split(',')
39 | .map((component) => {
40 | const [key, ...value] = component.split('=');
41 | return [key, value.join('=')];
42 | }),
43 | );
44 |
45 | if (componentMap.size !== 3) {
46 | throw new S3Error(
47 | 'AuthorizationHeaderMalformed',
48 | 'The authorization header is malformed; the authorization header ' +
49 | 'requires three components: Credential, SignedHeaders, and ' +
50 | 'Signature.',
51 | );
52 | }
53 |
54 | for (const componentName of ['Credential', 'SignedHeaders', 'Signature']) {
55 | if (!componentMap.has(componentName)) {
56 | throw new S3Error(
57 | 'AuthorizationHeaderMalformed',
58 | `The authorization header is malformed; missing ${componentName}.`,
59 | );
60 | }
61 | // skip verification of each authorization header component
62 | }
63 |
64 | const [accessKeyId, date, region, service, termination] = componentMap
65 | .get('Credential')
66 | .split('/');
67 |
68 | return {
69 | accessKeyId,
70 | credential: { date, region, service, termination },
71 | signedHeaders: componentMap.get('SignedHeaders').split(';'),
72 | signatureProvided: componentMap.get('Signature'),
73 | };
74 | };
75 |
76 | exports.parseQuery = function (query) {
77 | // query param values are case-sensitive
78 | if (query['X-Amz-Algorithm'] !== 'AWS4-HMAC-SHA256') {
79 | throw new S3Error(
80 | 'AuthorizationQueryParametersError',
81 | 'X-Amz-Algorithm only supports "AWS4-HMAC-SHA256"',
82 | );
83 | }
84 |
85 | if (
86 | !('X-Amz-Credential' in query) ||
87 | !('X-Amz-Signature' in query) ||
88 | !('X-Amz-Date' in query) ||
89 | !('X-Amz-SignedHeaders' in query) ||
90 | !('X-Amz-Expires' in query)
91 | ) {
92 | throw new S3Error(
93 | 'AuthorizationQueryParametersError',
94 | 'Query-string authentication version 4 requires the ' +
95 | 'X-Amz-Algorithm, X-Amz-Credential, X-Amz-Signature, X-Amz-Date, ' +
96 | 'X-Amz-SignedHeaders, and X-Amz-Expires parameters.',
97 | );
98 | }
99 | const [accessKeyId, date, region, service, termination] =
100 | query['X-Amz-Credential'].split('/');
101 |
102 | const request = {
103 | signature: {
104 | version: 4,
105 | algorithm: 'sha256',
106 | encoding: 'hex',
107 | },
108 | accessKeyId,
109 | credential: { date, region, service, termination },
110 | time: query['X-Amz-Date'],
111 | signedHeaders: query['X-Amz-SignedHeaders'].split(';'),
112 | signatureProvided: query['X-Amz-Signature'],
113 | };
114 |
115 | const requestTime = parseISO8601String(request.time);
116 | if (isNaN(requestTime)) {
117 | throw new S3Error(
118 | 'AuthorizationQueryParametersError',
119 | "X-Amz-Date must be in the ISO8601 Long Format \"yyyyMMdd'T'HHmmss'Z'\"",
120 | );
121 | }
122 |
123 | const expires = Number(query['X-Amz-Expires']);
124 | if (isNaN(expires))
125 | if (expires < 0) {
126 | throw new S3Error(
127 | 'AuthorizationQueryParametersError',
128 | 'X-Amz-Expires must be non-negative',
129 | );
130 | }
131 | if (expires > 604800) {
132 | throw new S3Error(
133 | 'AuthorizationQueryParametersError',
134 | 'X-Amz-Expires must be less than a week (in seconds); that is, the ' +
135 | 'given X-Amz-Expires must be less than 604800 seconds',
136 | );
137 | }
138 |
139 | const serverTime = new Date();
140 | // NOTE: S3 doesn't care about time skew for presigned requests
141 | const expiresTime = new Date(Number(requestTime) + expires * 1000);
142 |
143 | if (serverTime > expiresTime) {
144 | throw new S3Error('AccessDenied', 'Request has expired', {
145 | 'X-Amz-Expires': query['X-Amz-Expires'],
146 | Expires: expiresTime.toISOString().replace(/\.\d+/, ''),
147 | ServerTime: serverTime.toISOString().replace(/\.\d+/, ''),
148 | });
149 | }
150 |
151 | return request;
152 | };
153 |
154 | /**
155 | * Generates a string to be signed for signature version 4.
156 | *
157 | * @param {*} canonicalRequest
158 | * @param {*} signature
159 | */
160 | exports.getStringToSign = function (
161 | canonicalRequest,
162 | { credential, signedHeaders },
163 | ) {
164 | const canonicalHeaders = signedHeaders
165 | .map((header) => `${header}:${canonicalRequest.headers[header]}\n`)
166 | .join('');
167 |
168 | const contentHash =
169 | canonicalRequest.headers['x-amz-content-sha256'] || 'UNSIGNED-PAYLOAD';
170 |
171 | const canonicalRequestString = [
172 | canonicalRequest.method,
173 | canonicalRequest.uri,
174 | canonicalRequest.querystring,
175 | canonicalHeaders,
176 | signedHeaders.join(';'),
177 | contentHash,
178 | ].join('\n');
179 |
180 | return [
181 | 'AWS4-HMAC-SHA256',
182 | canonicalRequest.timestamp,
183 | [
184 | credential.date,
185 | credential.region,
186 | credential.service,
187 | credential.termination,
188 | ].join('/'),
189 | createHash('sha256').update(canonicalRequestString).digest('hex'),
190 | ].join('\n');
191 | };
192 |
193 | /**
194 | * Performs the calculation of an authentication code for a string using the specified key and
195 | * various components required by v4.
196 | *
197 | * @param {String} secretKey a secret access key
198 | * @param {String} date received from the credential header
199 | * @param {String} region received From the credential header
200 | * @param {String} service received From the credential header
201 | */
202 | exports.getSigningKey = function (secretKey, date, region, service) {
203 | const dateKey = createHmac('sha256', 'AWS4' + secretKey)
204 | .update(date)
205 | .digest();
206 | const regionKey = createHmac('sha256', dateKey).update(region).digest();
207 | const serviceKey = createHmac('sha256', regionKey).update(service).digest();
208 | const signingKey = createHmac('sha256', serviceKey)
209 | .update('aws4_request')
210 | .digest();
211 |
212 | return signingKey;
213 | };
214 |
215 | class AwsChunkedTransform extends Transform {
216 | constructor(expectedContentLength) {
217 | super();
218 |
219 | if (Number.isNaN(expectedContentLength)) {
220 | throw new S3Error(
221 | 'MissingContentLength',
222 | 'You must provide the Content-Length HTTP header.',
223 | );
224 | }
225 |
226 | this.chunkDecoderState = {
227 | num: 0,
228 | size: NaN,
229 | header: null,
230 | hash: null,
231 | bytesRemaining: 0,
232 | buf: Buffer.alloc(0),
233 | decodedContentLength: 0,
234 | expectedContentLength,
235 | };
236 | }
237 |
238 | endsWithCr(buf) {
239 | return buf[buf.length - 1] === CRLF[0];
240 | }
241 |
242 | startsWithLf(buf, position = 0) {
243 | return buf[position] === CRLF[1];
244 | }
245 |
246 | finalizeChunkHeader() {
247 | const [sizeHex, ...params] = this.chunkDecoderState.buf
248 | .slice(0, -CRLF.length)
249 | .toString()
250 | .split(';');
251 | const nextChunkSize = parseInt(sizeHex, 16);
252 |
253 | // The chunk number AWS reports seem to be off by one, likely because the check for the small
254 | // chunk sizes doesn't happen until next chunk's header is parsed and the counter is incremented
255 | this.chunkDecoderState.num++;
256 | if (nextChunkSize > 0) {
257 | if (this.chunkDecoderState.size < 8192) {
258 | throw new S3Error(
259 | 'InvalidChunkSizeError',
260 | 'Only the last chunk is allowed to have a size less than 8192 bytes',
261 | {
262 | Chunk: this.chunkDecoderState.num,
263 | BadChunkSize: this.chunkDecoderState.size,
264 | },
265 | );
266 | }
267 | } else if (
268 | !Number.isInteger(nextChunkSize) ||
269 | this.chunkDecoderState.size === 0
270 | ) {
271 | // chunks with noninteger sizes or additional chunks sent after an empty chunk should
272 | // trigger an IncompleteBody error
273 | throw new S3Error(
274 | 'IncompleteBody',
275 | 'The request body terminated unexpectedly',
276 | );
277 | }
278 | this.chunkDecoderState.size = this.chunkDecoderState.bytesRemaining =
279 | nextChunkSize;
280 | this.chunkDecoderState.hash = createHash('sha256');
281 | this.chunkDecoderState.header = new Map(
282 | params.map((entry) => entry.split('=')),
283 | );
284 |
285 | // AWS's chunk header parsing seems to naively assume that only one param is ever
286 | // specified and breaks in strange ways depending on if additional params are appended or
287 | // prepended. The behavior below matches S3 most of the time.
288 | if (
289 | !this.chunkDecoderState.header.has('chunk-signature') ||
290 | this.chunkDecoderState.header.size > 1
291 | ) {
292 | throw new S3Error(
293 | 'IncompleteBody',
294 | 'The request body terminated unexpectedly',
295 | );
296 | }
297 |
298 | this.chunkDecoderState.buf = Buffer.alloc(0);
299 | }
300 |
301 | finalizeChunk() {
302 | if (!CRLF.equals(this.chunkDecoderState.buf)) {
303 | throw new S3Error(
304 | 'IncompleteBody',
305 | 'The request body terminated unexpectedly',
306 | );
307 | }
308 | this.chunkDecoderState.decodedContentLength += this.chunkDecoderState.size;
309 | this.chunkDecoderState.buf = Buffer.alloc(0);
310 | this.chunkDecoderState.header = null;
311 | }
312 |
313 | /**
314 | * Consumes bytes from a chunk until a CRLF sequence is discovered
315 | *
316 | * @param {Buffer} chunk
317 | * @param {number} position
318 | * @returns the number of bytes read from the chunk including the CRLF if one was discovered
319 | */
320 | consumeUntilCrlf(chunk, position = 0) {
321 | let crlfIdx;
322 | if (
323 | this.endsWithCr(this.chunkDecoderState.buf) &&
324 | this.startsWithLf(chunk, position)
325 | ) {
326 | crlfIdx = -1;
327 | } else {
328 | crlfIdx = chunk.indexOf(CRLF, position);
329 | if (crlfIdx === -1) {
330 | crlfIdx = chunk.length;
331 | }
332 | }
333 | this.chunkDecoderState.buf = Buffer.concat([
334 | this.chunkDecoderState.buf,
335 | chunk.slice(position, crlfIdx + CRLF.length),
336 | ]);
337 | return Math.min(crlfIdx + CRLF.length, chunk.length) - position;
338 | }
339 |
340 | /**
341 | * Consumes bytes from a chunk up to the expected chunk length
342 | *
343 | * @param {Buffer} chunk
344 | * @param {number} position
345 | * @returns the number of bytes read from the chunk
346 | */
347 | consumePayload(chunk, position = 0) {
348 | const payload = chunk.slice(
349 | position,
350 | position + this.chunkDecoderState.bytesRemaining,
351 | );
352 | this.chunkDecoderState.buf = Buffer.concat([
353 | this.chunkDecoderState.buf,
354 | payload,
355 | ]);
356 | this.chunkDecoderState.hash.update(payload);
357 | this.chunkDecoderState.bytesRemaining -= payload.length;
358 | return payload.length;
359 | }
360 |
361 | _transform(chunk, encoding, callback) {
362 | if (!this.readableFlowing) {
363 | // don't transform anything if nothing is reading the data yet
364 | this.once('resume', () => this._transform(chunk, encoding, callback));
365 | return;
366 | }
367 | try {
368 | let payload = Buffer.alloc(0);
369 | let i = 0;
370 | do {
371 | if (this.chunkDecoderState.header) {
372 | // header has been parsed, start reading bytes
373 | if (this.chunkDecoderState.bytesRemaining) {
374 | i += this.consumePayload(chunk, i);
375 | payload = Buffer.concat([payload, this.chunkDecoderState.buf]);
376 | this.chunkDecoderState.buf = Buffer.alloc(0);
377 | } else {
378 | if (this.chunkDecoderState.hash) {
379 | // TODO: validate signatures before verifying CRLF
380 | // const hashDigest = this.chunkDecoderState.hash.digest();
381 | this.chunkDecoderState.hash = null;
382 | }
383 | i += this.consumeUntilCrlf(chunk, i);
384 | if (this.chunkDecoderState.buf.length >= CRLF.length) {
385 | this.finalizeChunk();
386 | }
387 | }
388 | } else {
389 | i += this.consumeUntilCrlf(chunk, i);
390 | if (CRLF.equals(this.chunkDecoderState.buf.slice(-CRLF.length))) {
391 | this.finalizeChunkHeader();
392 | }
393 | }
394 | } while (i < chunk.length);
395 | callback(null, payload);
396 | } catch (err) {
397 | callback(err);
398 | }
399 | }
400 |
401 | _flush(callback) {
402 | const { size, decodedContentLength, expectedContentLength } =
403 | this.chunkDecoderState;
404 | // the final chunk processed should have zero length
405 | if (size !== 0) {
406 | callback(
407 | new S3Error(
408 | 'IncompleteBody',
409 | 'The request body terminated unexpectedly',
410 | ),
411 | );
412 | } else if (decodedContentLength !== expectedContentLength) {
413 | callback(
414 | new S3Error(
415 | 'IncompleteBody',
416 | 'You did not provide the number of bytes specified by the Content-Length HTTP header',
417 | ),
418 | );
419 | } else {
420 | callback(null);
421 | }
422 | }
423 | }
424 |
425 | /**
426 | * Transforms a request body stream sent using aws-chunked encoding.
427 | *
428 | * Content hash verification is unimplemented.
429 | *
430 | * @param {Koa.Context} ctx
431 | */
432 | exports.aws4SignatureBodyParser = function (ctx) {
433 | ctx.request.body =
434 | ctx.header['x-amz-content-sha256'] === 'STREAMING-AWS4-HMAC-SHA256-PAYLOAD'
435 | ? ctx.req.pipe(
436 | new AwsChunkedTransform(
437 | parseInt(ctx.get('X-Amz-Decoded-Content-Length')),
438 | ),
439 | )
440 | : ctx.req;
441 | };
442 |
--------------------------------------------------------------------------------
/test/middleware/authentication.spec.js:
--------------------------------------------------------------------------------
1 | 'use strict';
2 |
3 | const { expect } = require('chai');
4 | const express = require('express');
5 | const fs = require('fs');
6 | const request = require('request-promise-native').defaults({
7 | resolveWithFullResponse: true,
8 | });
9 | const { URL } = require('url');
10 |
11 | const { toISO8601String } = require('../../lib/utils');
12 |
13 | const { createServerAndClient } = require('../helpers');
14 |
15 | describe('REST Authentication', () => {
16 | let s3rver;
17 | let s3Client;
18 | const buckets = [
19 | {
20 | name: 'bucket-a',
21 | },
22 | ];
23 |
24 | beforeEach(async function () {
25 | ({ s3rver, s3Client } = await createServerAndClient({
26 | configureBuckets: buckets,
27 | }));
28 | });
29 |
30 | it('can GET a signed URL with subdomain bucket', async function () {
31 | await s3Client
32 | .putObject({ Bucket: 'bucket-a', Key: 'text', Body: 'Hello!' })
33 | .promise();
34 | const endpointHref = s3Client.endpoint.href;
35 | s3Client.setEndpoint(`https://s3.amazonaws.com`);
36 | Object.assign(s3Client.config, {
37 | s3ForcePathStyle: false,
38 | });
39 | const url = s3Client.getSignedUrl('getObject', {
40 | Bucket: 'bucket-a',
41 | Key: 'text',
42 | });
43 | const { host, pathname, searchParams } = new URL(url);
44 | const res = await request(new URL(pathname, endpointHref), {
45 | qs: searchParams,
46 | headers: { host },
47 | });
48 | expect(res.body).to.equal('Hello!');
49 | });
50 |
51 | it('can GET a signed URL with vhost bucket', async function () {
52 | await s3Client
53 | .putObject({ Bucket: 'bucket-a', Key: 'text', Body: 'Hello!' })
54 | .promise();
55 | const endpointHref = s3Client.endpoint.href;
56 | s3Client.setEndpoint(
57 | `${s3Client.endpoint.protocol}//bucket-a:${s3Client.endpoint.port}${s3Client.endpoint.path}`,
58 | );
59 | Object.assign(s3Client.config, {
60 | s3ForcePathStyle: false,
61 | s3BucketEndpoint: true,
62 | });
63 | const url = s3Client.getSignedUrl('getObject', {
64 | Bucket: 'bucket-a',
65 | Key: 'text',
66 | });
67 | const { host, pathname, searchParams } = new URL(url);
68 | const res = await request(new URL(pathname, endpointHref), {
69 | qs: searchParams,
70 | headers: { host },
71 | });
72 | expect(res.body).to.equal('Hello!');
73 | });
74 |
75 | it('rejects a request specifying multiple auth mechanisms', async function () {
76 | let res;
77 | try {
78 | res = await request('bucket-a/mykey', {
79 | baseUrl: s3Client.endpoint.href,
80 | qs: {
81 | 'X-Amz-Algorithm': 'AWS4-HMAC-SHA256',
82 | Signature: 'dummysig',
83 | },
84 | headers: {
85 | Authorization: 'AWS S3RVER:dummysig',
86 | },
87 | });
88 | } catch (err) {
89 | res = err.response;
90 | }
91 | expect(res.statusCode).to.equal(400);
92 | expect(res.body).to.contain('InvalidArgument');
93 | });
94 |
95 | it('rejects a request with an invalid authorization header [v2]', async function () {
96 | let res;
97 | try {
98 | res = await request('bucket-a/mykey', {
99 | baseUrl: s3Client.endpoint.href,
100 | headers: {
101 | Authorization: 'AWS S3RVER dummysig',
102 | },
103 | });
104 | } catch (err) {
105 | res = err.response;
106 | }
107 | expect(res.statusCode).to.equal(400);
108 | expect(res.body).to.contain('InvalidArgument');
109 | });
110 |
111 | it('rejects a request with an invalid authorization header [v4]', async function () {
112 | let res;
113 | try {
114 | res = await request('bucket-a/mykey', {
115 | baseUrl: s3Client.endpoint.href,
116 | headers: {
117 | // omitting Signature and SignedHeaders components
118 | Authorization:
119 | 'AWS4-HMAC-SHA256 Credential=S3RVER/20060301/us-east-1/s3/aws4_request',
120 | 'X-Amz-Content-SHA256': 'UNSIGNED-PAYLOAD',
121 | },
122 | });
123 | } catch (err) {
124 | res = err.response;
125 | }
126 | expect(res.statusCode).to.equal(400);
127 | expect(res.body).to.contain('AuthorizationHeaderMalformed');
128 | });
129 |
130 | it('rejects a request with invalid query params [v2]', async function () {
131 | let res;
132 | try {
133 | res = await request('bucket-a/mykey', {
134 | baseUrl: s3Client.endpoint.href,
135 | qs: {
136 | AWSAccessKeyId: 'S3RVER',
137 | Signature: 'dummysig',
138 | // expiration is omitted
139 | },
140 | });
141 | } catch (err) {
142 | res = err.response;
143 | }
144 | expect(res.statusCode).to.equal(403);
145 | expect(res.body).to.contain('AccessDenied');
146 | });
147 |
148 | it('rejects a request with invalid query params [v4]', async function () {
149 | let res;
150 | try {
151 | res = await request('bucket-a/mykey', {
152 | baseUrl: s3Client.endpoint.href,
153 | qs: {
154 | 'X-Amz-Algorithm': 'AWS4-HMAC-SHA256',
155 | 'X-Amz-Signature': 'dummysig',
156 | // omitting most other parameters for sig v4
157 | },
158 | });
159 | } catch (err) {
160 | res = err.response;
161 | }
162 | expect(res.statusCode).to.equal(400);
163 | expect(res.body).to.contain(
164 | 'AuthorizationQueryParametersError',
165 | );
166 | });
167 |
168 | it('rejects a request with an incorrect signature in header [v2]', async function () {
169 | let res;
170 | try {
171 | res = await request('bucket-a/mykey', {
172 | baseUrl: s3Client.endpoint.href,
173 | headers: {
174 | Authorization: 'AWS S3RVER:badsig',
175 | 'X-Amz-Date': new Date().toUTCString(),
176 | },
177 | });
178 | } catch (err) {
179 | res = err.response;
180 | }
181 | expect(res.statusCode).to.equal(403);
182 | expect(res.body).to.contain('SignatureDoesNotMatch');
183 | });
184 |
185 | it('rejects a request with an incorrect signature in query params [v2]', async function () {
186 | let res;
187 | try {
188 | res = await request('bucket-a/mykey', {
189 | baseUrl: s3Client.endpoint.href,
190 | qs: {
191 | AWSAccessKeyId: 'S3RVER',
192 | Signature: 'badsig',
193 | Expires: (Date.now() / 1000).toFixed() + 900,
194 | },
195 | });
196 | } catch (err) {
197 | res = err.response;
198 | }
199 | expect(res.statusCode).to.equal(403);
200 | expect(res.body).to.contain('SignatureDoesNotMatch');
201 | });
202 |
203 | it('rejects a request with a large time skew', async function () {
204 | let res;
205 | try {
206 | res = await request('bucket-a/mykey', {
207 | baseUrl: s3Client.endpoint.href,
208 | headers: {
209 | Authorization: 'AWS S3RVER:dummysig',
210 | // 20 minutes in the future
211 | 'X-Amz-Date': new Date(Date.now() + 20000 * 60).toUTCString(),
212 | },
213 | });
214 | } catch (err) {
215 | res = err.response;
216 | }
217 | expect(res.statusCode).to.equal(403);
218 | expect(res.body).to.contain('RequestTimeTooSkewed');
219 | });
220 |
221 | it('rejects an expired presigned request [v2]', async function () {
222 | s3Client.config.set('signatureVersion', 's3');
223 | const url = s3Client.getSignedUrl('getObject', {
224 | Bucket: 'bucket-a',
225 | Key: 'mykey',
226 | Expires: -10, // 10 seconds in the past
227 | });
228 | let res;
229 | try {
230 | res = await request(url);
231 | } catch (err) {
232 | res = err.response;
233 | }
234 | expect(res.statusCode).to.equal(403);
235 | expect(res.body).to.contain('AccessDenied');
236 | });
237 |
238 | it('rejects an expired presigned request [v4]', async function () {
239 | s3Client.config.set('signatureVersion', 'v4');
240 | const url = s3Client.getSignedUrl('getObject', {
241 | Bucket: 'bucket-a',
242 | Key: 'mykey',
243 | Expires: -10, // 10 seconds in the past
244 | });
245 | let res;
246 | try {
247 | res = await request(url);
248 | } catch (err) {
249 | res = err.response;
250 | }
251 | expect(res.statusCode).to.equal(403);
252 | expect(res.body).to.contain('AccessDenied');
253 | });
254 |
255 | it('rejects a presigned request with an invalid expiration [v4]', async function () {
256 | // aws-sdk unfortunately doesn't expose a way to set the timestamp of the request to presign
257 | // so we have to construct a mostly-valid request ourselves
258 | let res;
259 | try {
260 | res = await request('bucket-a/mykey', {
261 | baseUrl: s3Client.endpoint.href,
262 | qs: {
263 | 'X-Amz-Algorithm': 'AWS4-HMAC-SHA256',
264 | 'X-Amz-Credential': 'S3RVER/20060301/us-east-1/s3/aws4_request',
265 | 'X-Amz-SignedHeaders': 'host',
266 | 'X-Amz-Signature': 'dummysig',
267 | // 10 minutes in the past
268 | 'X-Amz-Date': toISO8601String(Date.now() - 20000 * 60),
269 | 'X-Amz-Expires': 20,
270 | },
271 | });
272 | } catch (err) {
273 | res = err.response;
274 | }
275 | expect(res.statusCode).to.equal(403);
276 | expect(res.body).to.contain('AccessDenied');
277 | });
278 |
279 | it('overrides response headers in signed GET requests', async function () {
280 | await s3Client
281 | .putObject({
282 | Bucket: 'bucket-a',
283 | Key: 'image',
284 | Body: await fs.promises.readFile(
285 | require.resolve('../fixtures/image0.jpg'),
286 | ),
287 | })
288 | .promise();
289 | const url = s3Client.getSignedUrl('getObject', {
290 | Bucket: 'bucket-a',
291 | Key: 'image',
292 | ResponseContentType: 'image/jpeg',
293 | ResponseContentDisposition: 'attachment',
294 | });
295 | const res = await request(url);
296 | expect(res.headers['content-type']).to.equal('image/jpeg');
297 | expect(res.headers['content-disposition']).to.equal('attachment');
298 | });
299 |
300 | it('rejects anonymous requests with response header overrides in GET requests', async function () {
301 | await s3Client
302 | .putObject({
303 | Bucket: 'bucket-a',
304 | Key: 'image',
305 | Body: await fs.promises.readFile(
306 | require.resolve('../fixtures/image0.jpg'),
307 | ),
308 | })
309 | .promise();
310 | let res;
311 | try {
312 | res = await request('bucket-a/image', {
313 | baseUrl: s3Client.endpoint.href,
314 | qs: {
315 | 'response-content-type': 'image/jpeg',
316 | },
317 | });
318 | } catch (err) {
319 | res = err.response;
320 | }
321 | expect(res.statusCode).to.equal(400);
322 | expect(res.body).to.contain('InvalidRequest');
323 | });
324 |
325 | it('adds x-amz-meta-* metadata specified via query parameters', async function () {
326 | const url = s3Client.getSignedUrl('putObject', {
327 | Bucket: 'bucket-a',
328 | Key: 'mykey',
329 | Metadata: {
330 | somekey: 'value',
331 | },
332 | });
333 | await request.put(url, { body: 'Hello!' });
334 | const object = await s3Client
335 | .headObject({
336 | Bucket: 'bucket-a',
337 | Key: 'mykey',
338 | })
339 | .promise();
340 | expect(object.Metadata).to.have.property('somekey', 'value');
341 | });
342 |
343 | it('can use signed URLs while mounted on a subpath', async function () {
344 | const app = express();
345 | app.use('/basepath', s3rver.getMiddleware());
346 |
347 | const { httpServer } = s3rver;
348 | httpServer.removeAllListeners('request');
349 | httpServer.on('request', app);
350 | s3Client.setEndpoint(
351 | `${s3Client.endpoint.protocol}//localhost:${s3Client.endpoint.port}/basepath`,
352 | );
353 |
354 | await s3Client
355 | .putObject({ Bucket: 'bucket-a', Key: 'text', Body: 'Hello!' })
356 | .promise();
357 | const url = s3Client.getSignedUrl('getObject', {
358 | Bucket: 'bucket-a',
359 | Key: 'text',
360 | });
361 | const res = await request(url);
362 | expect(res.body).to.equal('Hello!');
363 | });
364 |
365 | it('can use signed vhost URLs while mounted on a subpath', async function () {
366 | await s3Client
367 | .putObject({ Bucket: 'bucket-a', Key: 'text', Body: 'Hello!' })
368 | .promise();
369 |
370 | const app = express();
371 | app.use('/basepath', s3rver.getMiddleware());
372 |
373 | const { httpServer } = s3rver;
374 | httpServer.removeAllListeners('request');
375 | httpServer.on('request', app);
376 |
377 | const endpointHref = s3Client.endpoint.href;
378 | s3Client.setEndpoint(
379 | `${s3Client.endpoint.protocol}//bucket-a:${s3Client.endpoint.port}/basepath`,
380 | );
381 | Object.assign(s3Client.config, {
382 | s3ForcePathStyle: false,
383 | s3BucketEndpoint: true,
384 | });
385 | const url = s3Client.getSignedUrl('getObject', {
386 | Bucket: 'bucket-a',
387 | Key: 'text',
388 | });
389 | const { host, pathname, searchParams } = new URL(url);
390 | const res = await request(new URL(pathname, endpointHref), {
391 | qs: searchParams,
392 | headers: { host },
393 | });
394 | expect(res.body).to.equal('Hello!');
395 | });
396 |
397 | it('rejects a request with an incorrect signature in header [v4]', async function () {
398 | let res;
399 | try {
400 | res = await request('bucket-a/mykey', {
401 | baseUrl: s3Client.endpoint.href,
402 | headers: {
403 | Authorization:
404 | 'AWS4-HMAC-SHA256 Credential=S3RVER/20060301/us-east-1/s3/aws4_request, SignedHeaders=host, Signature=badsig',
405 | 'X-Amz-Content-SHA256': 'UNSIGNED-PAYLOAD',
406 | 'X-Amz-Date': toISO8601String(Date.now()),
407 | },
408 | });
409 | } catch (err) {
410 | res = err.response;
411 | }
412 | expect(res.statusCode).to.equal(403);
413 | expect(res.body).to.contain('SignatureDoesNotMatch');
414 | });
415 |
416 | it('rejects a request with an incorrect signature in query params [v4]', async function () {
417 | let res;
418 | try {
419 | res = await request('bucket-a/mykey', {
420 | baseUrl: s3Client.endpoint.href,
421 | qs: {
422 | 'X-Amz-Algorithm': 'AWS4-HMAC-SHA256',
423 | 'X-Amz-Credential': 'S3RVER/20200815/eu-west-2/s3/aws4_request',
424 | 'X-Amz-Date': toISO8601String(Date.now()),
425 | 'X-Amz-Expires': 30,
426 | 'X-Amz-SignedHeaders': 'host',
427 | 'X-Amz-Signature': 'badsig',
428 | },
429 | });
430 | } catch (err) {
431 | res = err.response;
432 | }
433 | expect(res.statusCode).to.equal(403);
434 | expect(res.body).to.contain('SignatureDoesNotMatch');
435 | });
436 | });
437 |
--------------------------------------------------------------------------------
/lib/controllers/bucket.js:
--------------------------------------------------------------------------------
1 | 'use strict';
2 |
3 | const crypto = require('crypto');
4 |
5 | const { DUMMY_ACCOUNT } = require('../models/account');
6 | const S3Error = require('../models/error');
7 | const {
8 | S3CorsConfiguration,
9 | S3WebsiteConfiguration,
10 | } = require('../models/config');
11 | const { utf8BodyParser } = require('../utils');
12 |
13 | function generateContinuationToken(bucket, keyName, region) {
14 | const key = Buffer.alloc(8, 'S3RVER', 'utf8');
15 | const iv = crypto.randomBytes(8);
16 | // ensure the first byte of IV lies between [212, 216)
17 | iv[0] = (iv[0] & 0b00000011) | 0b11010100;
18 | // use DES for its 8-byte block size
19 | // (real S3 has blocks of lengths [9,8,7] repeating)
20 | const cipher = crypto.createCipheriv('des', key, iv);
21 | return Buffer.concat([
22 | iv,
23 | cipher.update(`${region}/${bucket}/${keyName}`, 'utf8'),
24 | cipher.final(),
25 | ]).toString('base64');
26 | }
27 |
28 | function decipherContinuationToken(token) {
29 | const buf = Buffer.from(token, 'base64');
30 | if (buf.length < 8) return '';
31 | const key = Buffer.alloc(8, 'S3RVER', 'utf8');
32 | const iv = buf.slice(0, 8);
33 | const decipher = crypto.createDecipheriv('des', key, iv);
34 | const ciphertext = buf.slice(8);
35 | return Buffer.concat([
36 | decipher.update(ciphertext),
37 | decipher.final(),
38 | ]).toString('utf8');
39 | }
40 |
41 | exports.bucketExists = async function bucketExists(ctx, next) {
42 | const bucketName = ctx.params.bucket;
43 | const bucket = await ctx.app.store.getBucket(bucketName);
44 | if (!bucket) {
45 | ctx.logger.error('No bucket found for "%s"', bucketName);
46 | throw new S3Error('NoSuchBucket', 'The specified bucket does not exist', {
47 | BucketName: bucketName,
48 | });
49 | }
50 | ctx.bucket = bucket;
51 | if (next) await next();
52 | };
53 |
54 | /*
55 | * Operations on Buckets
56 | * The following methods correspond to operations you can perform on Amazon S3 buckets.
57 | * https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketOps.html
58 | */
59 |
60 | exports.METHODS = [
61 | 'acl',
62 | 'analytics',
63 | 'cors',
64 | 'delete',
65 | 'encryption',
66 | 'inventory',
67 | 'lifecycle',
68 | 'location',
69 | 'metrics',
70 | 'notification',
71 | 'object-lock',
72 | 'policy',
73 | 'policyStatus',
74 | 'publicAccessBlock',
75 | 'replication',
76 | 'requestPayment',
77 | 'tagging',
78 | 'uploads',
79 | 'versions',
80 | 'website',
81 | ];
82 |
83 | /**
84 | * DELETE Bucket
85 | * Deletes the bucket named in the URI. All objects (including all object versions and delete
86 | * markers) in the bucket must be deleted before the bucket itself can be deleted.
87 | * {@link https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketDELETE.html}
88 | */
89 | exports.deleteBucket = async function deleteBucket(ctx) {
90 | const { objects } = await ctx.store.listObjects(ctx.params.bucket, {
91 | maxKeys: 1,
92 | });
93 | if (objects.length) {
94 | throw new S3Error(
95 | 'BucketNotEmpty',
96 | 'The bucket your tried to delete is not empty',
97 | { BucketName: ctx.params.bucket },
98 | );
99 | }
100 | await ctx.store.deleteBucket(ctx.params.bucket);
101 | ctx.status = 204;
102 | };
103 |
104 | /**
105 | * DELETE Bucket cors
106 | * Deletes the cors configuration information set for the bucket.
107 | * {@link https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketDELETEcors.html}
108 | */
109 | exports.deleteBucketCors = async function deleteBucketCors(ctx) {
110 | await ctx.store.deleteSubresource(ctx.params.bucket, undefined, 'cors');
111 | ctx.body = '';
112 | };
113 |
114 | /**
115 | * DELETE Bucket website
116 | * This operation removes the website configuration for a bucket. Amazon S3
117 | * returns a 200 OK response upon successfully deleting a website configuration
118 | * on the specified bucket. You will get a 200 OK response if the website
119 | * configuration you are trying to delete does not exist on the bucket. Amazon
120 | * S3 returns a 404 response if the bucket specified in the request does not
121 | * exist.
122 | * {@link https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketDELETEwebsite.html}
123 | */
124 | exports.deleteBucketWebsite = async function deleteBucketWebsite(ctx) {
125 | await ctx.store.deleteSubresource(ctx.params.bucket, undefined, 'website');
126 | ctx.body = '';
127 | };
128 |
129 | /**
130 | * GET Bucket (List Objects) Version 1/2
131 | * This implementation of the GET operation returns some or all (up to 1,000) of the objects in a
132 | * bucket.
133 | * {@link https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketGET.html}
134 | */
135 | exports.getBucket = async function getBucket(ctx) {
136 | const options = {
137 | delimiter: ctx.query.delimiter || undefined,
138 | encodingType: ctx.query['encoding-type'], // currently unimplemented
139 | maxKeys: 1000,
140 | startAfter: undefined,
141 | prefix: ctx.query.prefix || undefined,
142 | fetchOwner: undefined,
143 | };
144 | if (ctx.query['max-keys']) {
145 | if (!ctx.query['max-keys'].match(/^-?\d+$/)) {
146 | throw new S3Error(
147 | 'InvalidArgument',
148 | 'Provided max-keys not an integer or within integer range',
149 | {
150 | ArgumentName: 'max-keys',
151 | ArgumentValue: ctx.query['max-keys'],
152 | },
153 | );
154 | }
155 | const maxKeys = Number(ctx.query['max-keys']);
156 | if (maxKeys < 0 || maxKeys > 2147483647) {
157 | throw new S3Error(
158 | 'InvalidArgument',
159 | 'Argument maxKeys must be an integer between 0 and 2147483647',
160 | {
161 | ArgumentName: 'maxKeys',
162 | ArgumentValue: maxKeys,
163 | },
164 | );
165 | }
166 | options.maxKeys = Math.min(1000, maxKeys);
167 | }
168 | switch (ctx.query['list-type']) {
169 | case '2':
170 | if ('marker' in ctx.query) {
171 | throw new S3Error(
172 | 'InvalidArgument',
173 | 'Marker unsupported with REST.GET.BUCKET in list-type=2',
174 | { ArgumentName: 'marker' },
175 | );
176 | }
177 | if (ctx.query['continuation-token']) {
178 | const token = decipherContinuationToken(
179 | ctx.query['continuation-token'],
180 | );
181 | const [, region, bucket, startAfter] =
182 | /([\w-.]+)\/([\w-.]+)\/(.+)/.exec(token) || [];
183 | if (region !== 'us-east-1' || bucket !== ctx.params.bucket) {
184 | throw new S3Error(
185 | 'InvalidArgument',
186 | 'The continuation token provided is incorrect',
187 | { ArgumentName: 'continuation-token' },
188 | );
189 | }
190 | options.startAfter = startAfter;
191 | } else {
192 | options.startAfter = ctx.query['start-after'];
193 | }
194 | options.fetchOwner = ctx.query['fetch-owner'] === 'true';
195 | break;
196 | default:
197 | // fall back to version 1
198 | if ('continuation-token' in ctx.query) {
199 | throw new S3Error(
200 | 'InvalidArgument',
201 | 'continuation-token only supported in REST.GET.BUCKET with list-type=2',
202 | { ArgumentName: 'continuation-token' },
203 | );
204 | }
205 | if ('start-after' in ctx.query) {
206 | throw new S3Error(
207 | 'InvalidArgument',
208 | // yes, for some reason they decided to camelCase the start-after argument in this error message
209 | 'startAfter only supported in REST.GET.BUCKET with list-type=2',
210 | { ArgumentName: 'start-after' },
211 | );
212 | }
213 | options.fetchOwner = true;
214 | options.startAfter = ctx.query.marker;
215 | break;
216 | }
217 | ctx.logger.info(
218 | 'Fetched bucket "%s" with options %j',
219 | ctx.params.bucket,
220 | options,
221 | );
222 | try {
223 | const result =
224 | options.maxKeys === 0
225 | ? {
226 | objects: [],
227 | commonPrefixes: [],
228 | isTruncated: false,
229 | }
230 | : await ctx.store.listObjects(ctx.params.bucket, options);
231 | ctx.logger.info(
232 | 'Found %d objects for bucket "%s"',
233 | result.objects.length,
234 | ctx.params.bucket,
235 | );
236 | ctx.body = {
237 | ListBucketResult: {
238 | '@': { xmlns: 'http://doc.s3.amazonaws.com/2006-03-01/' },
239 | Name: ctx.params.bucket,
240 | Prefix: options.prefix || '', // never omit
241 | ...(ctx.query['list-type'] === '2'
242 | ? {
243 | StartAfter: ctx.query['continuation-token']
244 | ? undefined
245 | : options.startAfter,
246 | ContinuationToken: ctx.query['continuation-token'] || undefined,
247 | NextContinuationToken: result.isTruncated
248 | ? generateContinuationToken(
249 | ctx.params.bucket,
250 | result.objects[result.objects.length - 1].key,
251 | 'us-east-1',
252 | )
253 | : undefined,
254 | KeyCount: result.objects.length,
255 | }
256 | : {
257 | Marker: options.startAfter || '', // never omit
258 | NextMarker:
259 | options.delimiter && result.isTruncated
260 | ? result.objects[result.objects.length - 1].key
261 | : undefined,
262 | }),
263 | MaxKeys: ctx.query['max-keys'] || 1000, // S3 has a hard limit at 1000 but will still echo back the original input
264 | Delimiter: options.delimiter || undefined, // omit when "" or undefined
265 | IsTruncated: result.isTruncated || false,
266 | Contents: result.objects.map((object) => ({
267 | Key: object.key,
268 | LastModified: object.lastModifiedDate.toISOString(),
269 | ETag: object.metadata.etag,
270 | Size: object.size,
271 | Owner: options.fetchOwner
272 | ? {
273 | ID: DUMMY_ACCOUNT.id,
274 | DisplayName: DUMMY_ACCOUNT.displayName,
275 | }
276 | : undefined,
277 | StorageClass: 'STANDARD',
278 | })),
279 | CommonPrefixes: result.commonPrefixes.map((prefix) => ({
280 | Prefix: prefix,
281 | })),
282 | },
283 | };
284 | } catch (err) {
285 | ctx.logger.error(
286 | 'Error listing objects in bucket "%s"',
287 | ctx.params.bucket,
288 | err,
289 | );
290 | throw err;
291 | }
292 | };
293 |
294 | /**
295 | * GET Bucket cors
296 | * Returns the cors configuration information set for the bucket.
297 | * {@link https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketGETcors.html}
298 | */
299 | exports.getBucketCors = async function getBucketCors(ctx) {
300 | const config = await ctx.store.getSubresource(
301 | ctx.params.bucket,
302 | undefined,
303 | 'cors',
304 | );
305 | if (!config) {
306 | throw new S3Error(
307 | 'NoSuchCORSConfiguration',
308 | 'The CORS configuration does not exist',
309 | { BucketName: ctx.params.bucket },
310 | );
311 | }
312 | ctx.type = 'application/xml';
313 | ctx.body = config.toXML();
314 | };
315 |
316 | /**
317 | * GET Bucket location
318 | * This implementation of the GET operation returns the location configuration
319 | * associated with a bucket.
320 | * {@link https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketGETlocation.html}
321 | */
322 | exports.getBucketLocation = async function getBucketLocation(ctx) {
323 | // always return default bucket location
324 | ctx.body = {
325 | LocationConstraint: {
326 | '@': { xmlns: 'http://doc.s3.amazonaws.com/2006-03-01/' },
327 | },
328 | };
329 | };
330 |
331 | /**
332 | * GET Bucket website
333 | * This implementation of the GET operation returns the website configuration
334 | * associated with a bucket. To host website on Amazon S3, you can configure a
335 | * bucket as website by adding a website configuration.
336 | * {@link https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketGETwebsite.html}
337 | */
338 | exports.getBucketWebsite = async function getBucketWebsite(ctx) {
339 | const config = await ctx.store.getSubresource(
340 | ctx.params.bucket,
341 | undefined,
342 | 'website',
343 | );
344 | if (!config) {
345 | throw new S3Error(
346 | 'NoSuchWebsiteConfiguration',
347 | 'The specified bucket does not have a website configuration',
348 | { BucketName: ctx.params.bucket },
349 | );
350 | }
351 | ctx.type = 'application/xml';
352 | ctx.body = config.toXML();
353 | };
354 |
355 | /**
356 | * PUT Bucket
357 | * This implementation of the PUT operation creates a new bucket. To create a bucket, you must
358 | * register with Amazon S3 and have a valid AWS Access Key ID to authenticate requests. Anonymous
359 | * requests are never allowed to create buckets. By creating the bucket, you become the bucket
360 | * owner.
361 | * {@link https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUT.html}
362 | */
363 | exports.putBucket = async function putBucket(ctx) {
364 | const bucketName = ctx.params.bucket;
365 | /**
366 | * Derived from http://docs.aws.amazon.com/AmazonS3/latest/dev/BucketRestrictions.html
367 | */
368 | if (!/^[a-z0-9]+(.?[-a-z0-9]+)*$/.test(bucketName)) {
369 | ctx.logger.error(
370 | 'Error creating bucket "%s" because the name is invalid',
371 | bucketName,
372 | );
373 | throw new S3Error(
374 | 'InvalidBucketName',
375 | 'Bucket names can contain lowercase letters, numbers, and hyphens. ' +
376 | 'Each label must start and end with a lowercase letter or a number.',
377 | );
378 | }
379 | if (bucketName.length < 3 || bucketName.length > 63) {
380 | ctx.logger.error(
381 | 'Error creating bucket "%s" because the name is invalid',
382 | bucketName,
383 | );
384 | throw new S3Error(
385 | 'InvalidBucketName',
386 | 'The bucket name must be between 3 and 63 characters.',
387 | );
388 | }
389 | const bucket = await ctx.store.getBucket(bucketName);
390 | if (bucket) {
391 | ctx.logger.error(
392 | 'Error creating bucket. Bucket "%s" already exists',
393 | bucketName,
394 | );
395 | throw new S3Error(
396 | 'BucketAlreadyExists',
397 | 'The requested bucket already exists',
398 | );
399 | }
400 | await ctx.store.putBucket(bucketName);
401 | ctx.logger.info('Created new bucket "%s" successfully', bucketName);
402 | ctx.set('Location', '/' + bucketName);
403 | ctx.body = '';
404 | };
405 |
406 | /**
407 | * PUT Bucket cors
408 | * Sets the cors configuration for your bucket. If the configuration exists,
409 | * Amazon S3 replaces it.
410 | * {@link https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTcors.html}
411 | */
412 | exports.putBucketCors = async function putBucketCors(ctx) {
413 | await utf8BodyParser(ctx);
414 | const config = S3CorsConfiguration.validate(ctx.request.body);
415 | await ctx.store.putSubresource(ctx.params.bucket, undefined, config);
416 | ctx.body = '';
417 | };
418 |
419 | /**
420 | * PUT Bucket website
421 | * Sets the configuration of the website that is specified in the website
422 | * subresource. To configure a bucket as a website, you can add this
423 | * subresource on the bucket with website configuration information such as the
424 | * file name of the index document and any redirect rules.
425 | * {@link https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTwebsite.html}
426 | */
427 | exports.putBucketWebsite = async function putBucketWebsite(ctx) {
428 | await utf8BodyParser(ctx);
429 | const config = S3WebsiteConfiguration.validate(ctx.request.body);
430 | await ctx.store.putSubresource(ctx.params.bucket, undefined, config);
431 | ctx.body = '';
432 | };
433 |
--------------------------------------------------------------------------------