├── .gitattributes ├── index.js ├── tsconfig.json ├── .gitignore ├── .travis.yml ├── lib ├── utils.ts ├── dynamodb-wrapper.d.ts ├── error-types.ts ├── utils.spec.ts ├── error-types.spec.ts ├── table-prefixes.ts ├── partition-strategy.ts ├── estimate-item-size.ts ├── partition-strategy.spec.ts ├── table-prefixes.spec.ts ├── estimate-item-size.spec.ts ├── dynamodb-wrapper.ts └── dynamodb-wrapper.spec.ts ├── LICENSE ├── CHANGELOG.md ├── tslint.json ├── package.json ├── gulpfile.js ├── test └── mock-dynamodb.ts ├── README.md └── index.d.ts /.gitattributes: -------------------------------------------------------------------------------- 1 | # Fix CRLF 2 | * text=auto -------------------------------------------------------------------------------- /index.js: -------------------------------------------------------------------------------- 1 | module.exports = require('./bin/dynamodb-wrapper').DynamoDBWrapper; -------------------------------------------------------------------------------- /tsconfig.json: -------------------------------------------------------------------------------- 1 | { 2 | "compilerOptions": { 3 | "target": "es6", 4 | "module": "commonjs" 5 | } 6 | } -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Logs 2 | logs 3 | *.log 4 | npm-debug.log* 5 | 6 | # MAC OSX 7 | .DS_Store 8 | 9 | # Webstorm 10 | .idea 11 | 12 | # Dependency 13 | node_modules/ 14 | 15 | # Project files 16 | lib/**/*.js 17 | test/**/*.js 18 | bin/ 19 | coverage/ -------------------------------------------------------------------------------- /.travis.yml: -------------------------------------------------------------------------------- 1 | language: node_js 2 | node_js: 3 | - "6" 4 | - "7" 5 | - "8" 6 | - "9" 7 | - "10" 8 | - "11" 9 | - "12" 10 | git: 11 | depth: 3 12 | cache: 13 | timeout: 3600 14 | directories: 15 | - node_modules 16 | before_install: 17 | - npm install -g npm 18 | script: npm test 19 | after_success: 20 | - npm run coveralls -------------------------------------------------------------------------------- /lib/utils.ts: -------------------------------------------------------------------------------- 1 | export function getNonNegativeInteger(arr: any[]) { 2 | for (let v of arr) { 3 | if (typeof v === 'number' && !isNaN(v) && v < Number.MAX_SAFE_INTEGER && v >= 0 && Math.round(v) === v) { 4 | return v; 5 | } 6 | } 7 | 8 | return 0; 9 | } 10 | 11 | export function getPositiveInteger(arr: any[]) { 12 | for (let v of arr) { 13 | if (typeof v === 'number' && !isNaN(v) && v < Number.MAX_SAFE_INTEGER && v > 0 && Math.round(v) === v) { 14 | return v; 15 | } 16 | } 17 | 18 | return 1; 19 | } 20 | 21 | export function appendArray(array1: any[], array2: any[]) { 22 | if (Array.isArray(array2)) { 23 | Array.prototype.push.apply(array1, array2); 24 | } 25 | } 26 | 27 | export function wait(ms: number): Promise { 28 | return new Promise(resolve => { 29 | setTimeout(() => { 30 | resolve(); 31 | }, ms); 32 | }); 33 | } -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | The MIT License (MIT) 2 | Copyright (c) 2016 Jeff Bradford 3 | 4 | Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: 5 | 6 | The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. 7 | 8 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -------------------------------------------------------------------------------- /lib/dynamodb-wrapper.d.ts: -------------------------------------------------------------------------------- 1 | declare interface IDynamoDBWrapperOptions { 2 | tableNamePrefix?: string; 3 | groupDelayMs?: number; 4 | maxRetries?: number; 5 | retryDelayOptions?: { 6 | base?: number; 7 | customBackoff?: Function; 8 | }; 9 | } 10 | 11 | declare interface IBatchWriteItemOptions { 12 | [tableName: string]: IBatchWriteItemOption | string | number; 13 | } 14 | 15 | declare interface IBatchWriteItemOption { 16 | partitionStrategy?: string; 17 | targetItemCount?: number; 18 | targetGroupWCU?: number; 19 | groupDelayMs?: number; 20 | } 21 | 22 | declare interface IQueryOptions { 23 | groupDelayMs?: number; 24 | } 25 | 26 | declare interface IScanOptions { 27 | groupDelayMs?: number; 28 | } 29 | 30 | declare type TDictionary = { [key: string] : T; }; 31 | 32 | declare type TDynamoDBItem = TDictionary; 33 | 34 | declare type AttributeValue = { 35 | S?: string; 36 | N?: string; 37 | B?: any; 38 | SS?: string[]; 39 | NS?: string[]; 40 | BS?: any[]; 41 | M?: any; 42 | L?: any; 43 | NULL?: boolean; 44 | BOOL?: boolean; 45 | }; -------------------------------------------------------------------------------- /CHANGELOG.md: -------------------------------------------------------------------------------- 1 | # dynamodb-wrapper changelog 2 | 3 | ## 1.4.1 4 | 5 | - resolved a minor typings issue in `index.d.ts` (#6) 6 | 7 | ## 1.4.0 8 | 9 | - added `batchGroupWritten` event to enable observer to follow the batch progress 10 | - added `batchGetItem` method 11 | 12 | ## 1.3.0 13 | 14 | - added a couple missing fields to `index.d.ts` for TypeScript consumers 15 | - enhancement: `batchWriteItem` now supports writing to multiple tables. 16 | - There is an updated example in the README 17 | - The format of the *options* object passed to `batchWriteItem` has changed, refer to `index.d.s` for details. (The previous format is still supported for backwards compatibility.) 18 | 19 | ## 1.2.3 20 | 21 | - fixed a bug where a table prefix could be prepended multiple times 22 | - added retries for LimitExceededException 23 | 24 | ## 1.2.1, 1.2.2 25 | 26 | - updated typescript typings for the public API 27 | 28 | ## 1.2.0 29 | 30 | - added support for `createTable`, `updateTable`, `describeTable` and `deleteTable` methods 31 | 32 | ## 1.1.1 33 | 34 | - fixed an off-by-1 bug where the `retry` event would erroneously fire (e.g. retry #1 with maxRetries=0) 35 | - fixed a bug where ConsumedCapacity was not aggregated correctly for BatchWriteItem 36 | 37 | ## 1.1.0 38 | 39 | - new feature: table prefixes 40 | - new feature: event hooks for `retry` and `consumedCapacity` 41 | - enhancement: `batchWriteItem` now supports DeleteRequests 42 | 43 | ## 1.0.x 44 | 45 | - project setup -------------------------------------------------------------------------------- /tslint.json: -------------------------------------------------------------------------------- 1 | { 2 | "rules": { 3 | "align": [true, "parameters", "statements"], 4 | "ban": [true, "object", "function"], 5 | "class-name": true, 6 | "comment-format": [true, "check-space"], 7 | "curly": true, 8 | "forin": true, 9 | "indent": [true, "spaces"], 10 | "interface-name": true, 11 | "jsdoc-format": true, 12 | "label-position": true, 13 | "max-line-length": [true, 140], 14 | "member-ordering": [true, "static-before-instance", "variables-before-function"], 15 | "no-arg": true, 16 | "no-bitwise": true, 17 | "no-conditional-assignment": true, 18 | "no-console": [true, "log", "error", "warn"], 19 | "no-consecutive-blank-lines": true, 20 | "no-construct": true, 21 | "no-debugger": true, 22 | "no-duplicate-variable": true, 23 | "no-shadowed-variable": true, 24 | "no-eval": true, 25 | "no-inferrable-types": true, 26 | "no-internal-module": true, 27 | "no-switch-case-fall-through": true, 28 | "no-trailing-whitespace": true, 29 | "no-unused-expression": [true, "check-parameters"], 30 | "no-var-requires": true, 31 | "one-line": [true, "check-open-brace", "check-whitespace"], 32 | "quotemark": [true, "single", "avoid-escape"], 33 | "semicolon": true, 34 | "switch-default": true, 35 | "triple-equals": true, 36 | "whitespace": [true, 37 | "check-branch", 38 | "check-decl", 39 | "check-module", 40 | "check-separator", 41 | "check-type", 42 | "check-typecast"] 43 | } 44 | } -------------------------------------------------------------------------------- /package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "dynamodb-wrapper", 3 | "version": "1.4.1", 4 | "description": "A DynamoDB library that extends aws-sdk with bulk read/write, events, streams, and more", 5 | "repository": { 6 | "type": "git", 7 | "url": "https://github.com/Shadowblazen/dynamodb-wrapper.git" 8 | }, 9 | "main": "index.js", 10 | "typings": "index.d.ts", 11 | "scripts": { 12 | "prepublish": "./node_modules/gulp/bin/gulp.js && ./node_modules/gulp/bin/gulp.js prepublish", 13 | "build": "./node_modules/gulp/bin/gulp.js build", 14 | "test": "./node_modules/gulp/bin/gulp.js test", 15 | "coveralls": "cat ./coverage/lcov.info | ./node_modules/coveralls/bin/coveralls.js && rm -rf ./coverage" 16 | }, 17 | "files": [ 18 | "index.js", 19 | "index.d.ts", 20 | "bin" 21 | ], 22 | "keywords": [ 23 | "dynamo", 24 | "dynamodb", 25 | "wrapper", 26 | "stream", 27 | "aws", 28 | "sdk", 29 | "aws-sdk", 30 | "extract", 31 | "transform", 32 | "load", 33 | "etl", 34 | "bulk", 35 | "copy", 36 | "bcp", 37 | "import", 38 | "export", 39 | "batch" 40 | ], 41 | "author": { 42 | "name": "Jeff Bradford", 43 | "email": "jbradford3@hotmail.com" 44 | }, 45 | "license": "MIT", 46 | "engines": { 47 | "node": ">=4.0.0" 48 | }, 49 | "devDependencies": { 50 | "@types/aws-sdk": "2.7.0", 51 | "@types/jasmine": "3.3.12", 52 | "@types/node": "12.0.1", 53 | "coveralls": "3.0.3", 54 | "del": "4.1.1", 55 | "gulp": "4.0.2", 56 | "gulp-istanbul": "1.1.3", 57 | "gulp-jasmine": "4.0.0", 58 | "gulp-tslint": "8.1.4", 59 | "gulp-typescript": "5.0.1", 60 | "jasmine-console-reporter": "3.1.0", 61 | "tslint": "5.16.0", 62 | "typescript": "3.4.5" 63 | }, 64 | "peerDependencies": { 65 | "aws-sdk": "2.x" 66 | } 67 | } 68 | -------------------------------------------------------------------------------- /lib/error-types.ts: -------------------------------------------------------------------------------- 1 | export const ErrorCode = { 2 | ProvisionedThroughputExceededException: 'ProvisionedThroughputExceededException', 3 | LimitExceededException: 'LimitExceededException', 4 | ThrottlingException: 'ThrottlingException', 5 | NotYetImplementedError: 'NotYetImplementedError' 6 | }; 7 | 8 | export const ErrorMessage = { 9 | ProvisionedThroughputExceededException: 'The level of configured provisioned throughput for the table was exceeded.' + 10 | ' Consider increasing your provisioning level with the UpdateTable API', 11 | LimitExceededException: 'The number of concurrent table requests (cumulative number of tables in the CREATING,' + 12 | ' DELETING or UPDATING state) exceeds the maximum allowed of 10. Also, for tables with secondary indexes, only' + 13 | ' one of those tables can be in the CREATING state at any point in time.', 14 | BatchWriteMultipleTables: 'Expected exactly 1 table name in RequestItems, but found 0 or 2+.' + 15 | ' Writing to more than 1 table with BatchWriteItem is supported in the AWS DynamoDB API,' + 16 | ' but this capability is not yet implemented by this wrapper library.', 17 | BatchWriteDeleteRequest: 'DeleteRequest in BatchWriteItem is supported in the AWS DynamoDB API,' + 18 | ' but this capability is not yet implemented by this wrapper library.', 19 | ItemCollectionMetrics: 'ReturnItemCollectionMetrics is supported in the AWS DynamoDB API,' + 20 | ' but this capability is not yet implemented by this wrapper library.' 21 | }; 22 | 23 | export class Exception { 24 | public code: string; 25 | public message: string; 26 | public statusCode: number; 27 | public time: string; 28 | public retryable: boolean; 29 | public retryDelay: number; 30 | 31 | constructor(code: string, message: string) { 32 | this.code = code; 33 | this.message = message; 34 | this.statusCode = 400; 35 | this.time = new Date().toISOString(); 36 | this.retryable = false; 37 | this.retryDelay = 0; 38 | } 39 | } -------------------------------------------------------------------------------- /lib/utils.spec.ts: -------------------------------------------------------------------------------- 1 | import { 2 | getNonNegativeInteger, 3 | getPositiveInteger, 4 | appendArray, 5 | wait 6 | } from './utils'; 7 | 8 | describe('lib/utils', () => { 9 | 10 | describe('getNonNegativeInteger()', () => { 11 | 12 | it('returns the first non-negative integer in the input', () => { 13 | expect(getNonNegativeInteger([0])).toBe(0); 14 | expect(getNonNegativeInteger([1, 2])).toBe(1); 15 | expect(getNonNegativeInteger(['foo', NaN, Infinity, -1, 1.2, 3])).toBe(3); 16 | }); 17 | 18 | it('returns 0 if the input does not contain a non-negative integer', () => { 19 | expect(getNonNegativeInteger([])).toBe(0); 20 | expect(getNonNegativeInteger(['foo', NaN, Infinity, -1, 1.2])).toBe(0); 21 | }); 22 | 23 | }); 24 | 25 | describe('getPositiveInteger()', () => { 26 | 27 | it('returns the first positive integer in the input', () => { 28 | expect(getPositiveInteger([1])).toBe(1); 29 | expect(getPositiveInteger([1, 2])).toBe(1); 30 | expect(getPositiveInteger(['foo', NaN, Infinity, -1, 1.2, 3])).toBe(3); 31 | }); 32 | 33 | it('returns 1 if the input does not contain a positive integer', () => { 34 | expect(getPositiveInteger([])).toBe(1); 35 | expect(getPositiveInteger(['foo', NaN, Infinity, -1, 1.2])).toBe(1); 36 | }); 37 | 38 | }); 39 | 40 | describe('appendArray()', () => { 41 | 42 | it('should append the contents of array2 to array1', () => { 43 | let arr1 = ['a', 'b', 'c']; 44 | let arr2 = ['d', 'e', 'f']; 45 | appendArray(arr1, arr2); 46 | expect(arr1).toEqual(['a', 'b', 'c', 'd', 'e', 'f']); 47 | }); 48 | 49 | it('should be a no-op if the 2nd parameter is not an array', () => { 50 | let arr1 = ['a', 'b', 'c']; 51 | let arr2: any = undefined; 52 | appendArray(arr1, arr2); 53 | expect(arr1).toEqual(['a', 'b', 'c']); 54 | }); 55 | 56 | }); 57 | 58 | describe('wait()', () => { 59 | 60 | it('should return a promise', () => { 61 | let p = wait(0); 62 | expect(p instanceof Promise).toBe(true); 63 | }); 64 | 65 | }); 66 | 67 | }); -------------------------------------------------------------------------------- /gulpfile.js: -------------------------------------------------------------------------------- 1 | // npm modules 2 | const del = require('del'); 3 | const { src, dest, series } = require('gulp'); 4 | const tslint = require('gulp-tslint'); 5 | const tsc = require('gulp-typescript'); 6 | const jasmine = require('gulp-jasmine'); 7 | const JasmineConsoleReporter = require('jasmine-console-reporter'); 8 | const istanbul = require('gulp-istanbul'); 9 | 10 | function cleanTask(done) { 11 | del(['bin', 'coverage', 'test/**/*.js']).then(function () { 12 | done(); 13 | }); 14 | } 15 | 16 | function lintTask() { 17 | return src('lib/**/*.ts') 18 | .pipe(tslint({ 19 | formatter: 'verbose' 20 | })) 21 | .pipe(tslint.report({ 22 | emitError: true 23 | })); 24 | } 25 | 26 | function transpileLibTask() { 27 | const tsProject = tsc.createProject('tsconfig.json'); 28 | 29 | return src('lib/**/*.ts') 30 | .pipe(tsProject()) 31 | .pipe(dest('bin')); 32 | } 33 | 34 | function transpileTestTask() { 35 | const tsProject = tsc.createProject('tsconfig.json'); 36 | 37 | return src('test/**/*.ts') 38 | .pipe(tsProject()) 39 | .pipe(dest('test')); 40 | } 41 | 42 | function pretestCoverageInstrumentationTask() { 43 | const filesGlob = [ 44 | 'bin/**/*.js', 45 | 46 | // ignore test-related files 47 | '!bin/**/*.spec.js', 48 | '!bin/jasmine-runner.js' 49 | ]; 50 | 51 | return src(filesGlob) 52 | .pipe(istanbul({ 53 | includeUntested: true 54 | })) 55 | .pipe(istanbul.hookRequire()); 56 | } 57 | 58 | function posttestCoverageReportsTask() { 59 | return src('bin/**/*.spec.js') 60 | .pipe(istanbul.writeReports()); 61 | } 62 | 63 | function testTask() { 64 | return src('bin/**/*spec.js').pipe(jasmine({ 65 | reporter: new JasmineConsoleReporter({ 66 | colors: process.argv.indexOf('--no-color') === -1, 67 | verbosity: 3 68 | }) 69 | })); 70 | } 71 | 72 | function prepublishTask(done) { 73 | del(['bin/jasmine-runner.js', 'bin/**/*.spec.js']).then(function () { 74 | done(); 75 | }); 76 | } 77 | 78 | exports.prepublish = prepublishTask; 79 | exports.build = series(lintTask, cleanTask, transpileLibTask, transpileTestTask); 80 | exports.test = series(exports.build, pretestCoverageInstrumentationTask, testTask, posttestCoverageReportsTask); 81 | exports.default = exports.test; -------------------------------------------------------------------------------- /lib/error-types.spec.ts: -------------------------------------------------------------------------------- 1 | import { 2 | ErrorCode, 3 | ErrorMessage, 4 | Exception 5 | } from './error-types'; 6 | 7 | describe('lib/error-types', () => { 8 | 9 | it('should define error codes', () => { 10 | expect(ErrorCode).toEqual({ 11 | ProvisionedThroughputExceededException: 'ProvisionedThroughputExceededException', 12 | LimitExceededException: 'LimitExceededException', 13 | ThrottlingException: 'ThrottlingException', 14 | NotYetImplementedError: 'NotYetImplementedError' 15 | }); 16 | }); 17 | 18 | it('should define error messages', () => { 19 | expect(ErrorMessage).toEqual({ 20 | ProvisionedThroughputExceededException: 'The level of configured provisioned throughput for the table was exceeded.' + 21 | ' Consider increasing your provisioning level with the UpdateTable API', 22 | LimitExceededException: 'The number of concurrent table requests (cumulative number of tables in the CREATING,' + 23 | ' DELETING or UPDATING state) exceeds the maximum allowed of 10. Also, for tables with secondary indexes, only' + 24 | ' one of those tables can be in the CREATING state at any point in time.', 25 | BatchWriteMultipleTables: 'Expected exactly 1 table name in RequestItems, but found 0 or 2+.' + 26 | ' Writing to more than 1 table with BatchWriteItem is supported in the AWS DynamoDB API,' + 27 | ' but this capability is not yet implemented by this wrapper library.', 28 | BatchWriteDeleteRequest: 'DeleteRequest in BatchWriteItem is supported in the AWS DynamoDB API,' + 29 | ' but this capability is not yet implemented by this wrapper library.', 30 | ItemCollectionMetrics: 'ReturnItemCollectionMetrics is supported in the AWS DynamoDB API,' + 31 | ' but this capability is not yet implemented by this wrapper library.' 32 | }); 33 | }); 34 | 35 | it('should create an exception', () => { 36 | let code = ErrorCode.NotYetImplementedError; 37 | let message = ErrorMessage.ItemCollectionMetrics; 38 | 39 | let e = new Exception(code, message); 40 | 41 | expect(e.code).toBe(code); 42 | expect(e.message).toBe(message); 43 | expect(e.statusCode).toBe(400); 44 | expect(e.time).toBeDefined(); 45 | expect(e.retryable).toBe(false); 46 | expect(e.retryDelay).toBe(0); 47 | }); 48 | 49 | }); -------------------------------------------------------------------------------- /lib/table-prefixes.ts: -------------------------------------------------------------------------------- 1 | export function addTablePrefixToRequest(prefix: string, params: any): void { 2 | if (prefix.length > 0) { 3 | // used in most API methods 4 | if (params.TableName) { 5 | params.TableName = addPrefix(prefix, params.TableName); 6 | } 7 | 8 | // used in BatchGetItem, BatchWriteItem 9 | if (params.RequestItems) { 10 | params.RequestItems = addPrefixes(prefix, params.RequestItems); 11 | } 12 | } 13 | } 14 | 15 | export function removeTablePrefixFromResponse(prefix: string, response: any): void { 16 | if (prefix.length > 0) { 17 | // used in BatchGetItem 18 | if (response.Responses) { 19 | response.Responses = removePrefixes(prefix, response.Responses); 20 | } 21 | 22 | // used in BatchGetItem 23 | if (response.UnprocessedKeys) { 24 | response.UnprocessedKeys = removePrefixes(prefix, response.UnprocessedKeys); 25 | } 26 | 27 | if (response.UnprocessedItems) { 28 | response.UnprocessedItems = removePrefixes(prefix, response.UnprocessedItems); 29 | } 30 | 31 | // used in BatchWriteItem 32 | if (response.ItemCollectionMetrics && !response.ItemCollectionMetrics.ItemCollectionKey && 33 | !response.ItemCollectionMetrics.SizeEstimateRangeGB) { 34 | response.ItemCollectionMetrics = removePrefixes(prefix, response.ItemCollectionMetrics); 35 | } 36 | 37 | if (response.ConsumedCapacity) { 38 | // used in BatchGetItem, BatchWriteItem 39 | if (Array.isArray(response.ConsumedCapacity)) { 40 | for (let consumedCapacity of response.ConsumedCapacity) { 41 | consumedCapacity.TableName = removePrefix(prefix, consumedCapacity.TableName); 42 | } 43 | } else { 44 | // used in most API methods 45 | response.ConsumedCapacity.TableName = removePrefix(prefix, response.ConsumedCapacity.TableName); 46 | } 47 | } 48 | } 49 | } 50 | 51 | export function addPrefix(prefix: string, tableName: string): string { 52 | return tableName.indexOf(prefix) === 0 ? tableName : prefix + tableName; 53 | } 54 | 55 | export function addPrefixes(prefix: string, map: any): any { 56 | let outMap = {}; 57 | for (let tableName in map) { 58 | /* tslint:disable:forin */ 59 | // noinspection JSUnfilteredForInLoop 60 | outMap[addPrefix(prefix, tableName)] = map[tableName]; 61 | /* tslint:enable:forin */ 62 | } 63 | return outMap; 64 | } 65 | 66 | export function removePrefix(prefix: string, tableName: string) { 67 | return tableName.substr(prefix.length); 68 | } 69 | 70 | export function removePrefixes(prefix: string, map) { 71 | let outMap = {}; 72 | for (let tableName in map) { 73 | /* tslint:disable:forin */ 74 | // noinspection JSUnfilteredForInLoop 75 | outMap[removePrefix(prefix, tableName)] = map[tableName]; 76 | /* tslint:enable:forin */ 77 | } 78 | return outMap; 79 | } -------------------------------------------------------------------------------- /lib/partition-strategy.ts: -------------------------------------------------------------------------------- 1 | import { DynamoDB } from 'aws-sdk'; 2 | import { estimateWriteCapacityUnits } from './estimate-item-size'; 3 | 4 | const BATCH_WRITE_MAX_ITEM_COUNT = 25; 5 | 6 | /** 7 | * The EqualItemCount partition strategy is the "simple" approach to partitioning, which works well 8 | * when all your items have predicable, equal size in WriteCapacityUnits. 9 | * 10 | * For example, with partitionStrategy="EqualItemCount" and targetItemCount=10, an array of 32 items 11 | * would be partitioned into 4 groups as [10], [10], [10], [2]. A total of 4 requests would be made 12 | * to AWS - 1 request for each group. 13 | * 14 | * @param writeRequests - an array of WriteRequest objects 15 | * @param startIndex - index to begin the partition at 16 | * @param options - options with partition settings 17 | * @returns {DynamoDB.WriteRequests} a subset of the writeRequests array 18 | */ 19 | 20 | export function getNextGroupByItemCount(writeRequests: DynamoDB.WriteRequests, startIndex: number, 21 | options: IBatchWriteItemOption): DynamoDB.WriteRequests { 22 | 23 | if (startIndex < 0 || startIndex >= writeRequests.length) { 24 | return null; 25 | } else { 26 | const TARGET_ITEM_COUNT = Math.min(options.targetItemCount, BATCH_WRITE_MAX_ITEM_COUNT); 27 | let endIndex = Math.min(startIndex + TARGET_ITEM_COUNT, writeRequests.length); 28 | return writeRequests.slice(startIndex, endIndex); 29 | } 30 | } 31 | 32 | /** 33 | * The EvenlyDistributedGroupWCU partition strategy is used to partition the array into groups of 34 | * equal total WCU, up to a given threshold. 35 | * 36 | * In contrast to the EqualItemCount partition strategy - which makes each group have the same length, 37 | * this strategy allows for variable length groups with evenly distributed WCU. 38 | * 39 | * If a table is configured to have a write throughput of 10 WCU, and we're writing 40 | * 1 group per second, we can make full use of available throughput if each group's items 41 | * have a sum of 10 WCU. This partition strategy accomplishes this by estimating the size 42 | * of your items and placing them into groups with a normalized total WCU. 43 | * 44 | * Suppose we wish to write an array of items [A, B, C, D, E, F], where the sizes of the items are: 45 | * 46 | * A = 11 WCU 47 | * B = 5 WCU 48 | * C = 2 WCU 49 | * D = 3 WCU 50 | * E = 1 WCU 51 | * F = 8 WCU 52 | * 53 | * With targetGroupWCU=10, the partition would be: 54 | * 55 | * [A] = total of 11 WCU 56 | * [B, C, D] = total of 10 WCU 57 | * [E, F] = total of 9 WCU 58 | * 59 | * With targetGroupWCU=20, the partition would be: 60 | * 61 | * [A, B, C] = total of 18 WCU 62 | * [D, E, F] = total of 12 WCU 63 | * 64 | * With targetGroupWCU=30, all items fit into a single group: 65 | * 66 | * [A, B, C, D, E, F] = total of 30 WCU 67 | * 68 | * @param writeRequests - an array of WriteRequest objects 69 | * @param startIndex - index to begin the partition at 70 | * @param options - options with partition settings 71 | * @returns {DynamoDB.WriteRequests} a subset of the writeRequests array 72 | */ 73 | 74 | export function getNextGroupByTotalWCU(writeRequests: DynamoDB.WriteRequests, startIndex: number, 75 | options: IBatchWriteItemOption): DynamoDB.WriteRequests { 76 | 77 | if (startIndex < 0 || startIndex >= writeRequests.length) { 78 | return null; 79 | } else { 80 | // always include the first item 81 | let estimatedWCU = _estimateWCUForPutOrDeleteRequest(writeRequests[startIndex]); 82 | let totalEstimatedWCU = estimatedWCU; 83 | let i = startIndex + 1; 84 | 85 | while (i < writeRequests.length && i - startIndex < BATCH_WRITE_MAX_ITEM_COUNT) { 86 | estimatedWCU = _estimateWCUForPutOrDeleteRequest(writeRequests[i]); 87 | 88 | if (totalEstimatedWCU + estimatedWCU <= options.targetGroupWCU) { 89 | totalEstimatedWCU += estimatedWCU; 90 | i++; 91 | } else { 92 | // do not include the item in the group, because the total WCU would exceed the threshold 93 | break; 94 | } 95 | } 96 | 97 | return writeRequests.slice(startIndex, i); 98 | } 99 | } 100 | 101 | function _estimateWCUForPutOrDeleteRequest(writeRequest: DynamoDB.WriteRequest): number { 102 | if (writeRequest.DeleteRequest) { 103 | // assume the item to be deleted is 1 WCU (it's not possible to know the WCU 104 | // before we actually delete the item, so all we can do is guess) 105 | return 1; 106 | } else { 107 | return estimateWriteCapacityUnits(writeRequest.PutRequest.Item); 108 | } 109 | } -------------------------------------------------------------------------------- /lib/estimate-item-size.ts: -------------------------------------------------------------------------------- 1 | /** 2 | * Estimates the number of Write Capacity Units that will be consumed when writing this item to DynamoDB. 3 | * 4 | * @param {TDynamoDBItem} item 5 | * @see http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/WorkingWithTables.html#ItemSizeCalculations 6 | * @returns {number} 7 | */ 8 | 9 | export function estimateWriteCapacityUnits(item: TDynamoDBItem): number { 10 | return Math.ceil(estimateItemSize(item) / 1024); 11 | } 12 | 13 | /** 14 | * Estimates the number of Read Capacity Units that will be consumed when reading this item from DynamoDB. 15 | * 16 | * @param {TDynamoDBItem} item 17 | * @see http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/WorkingWithTables.html#ItemSizeCalculations 18 | * @returns {number} 19 | */ 20 | 21 | export function estimateReadCapacityUnits(item: TDynamoDBItem): number { 22 | return Math.ceil(estimateItemSize(item) / 4096); 23 | } 24 | 25 | /** 26 | * Estimates the size of a DynamoDB item in bytes. 27 | * 28 | * For practical purposes, this is useful for estimating the amount of capacity units that will 29 | * be consumed when reading or writing an item to DynamoDB. 30 | * 31 | * @param {TDynamoDBItem} item 32 | * @see http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/WorkingWithTables.html#ItemSizeCalculations 33 | * @returns {number} the estimated number of bytes the item will require for storage in DynamoDB 34 | */ 35 | 36 | export function estimateItemSize(item: TDynamoDBItem): number { 37 | let totalBytes = 0; 38 | for (let key in item) { 39 | /* tslint:disable:forin */ 40 | // noinspection JSUnfilteredForInLoop 41 | totalBytes += estimateAttributeValueSize(item[key], key); 42 | /* tslint:enable:forin */ 43 | } 44 | return totalBytes; 45 | } 46 | 47 | /** 48 | * Estimates the size of a DynamoDB AttributeValue in bytes. 49 | * 50 | * @param {AttributeValue} value 51 | * @param {string} name 52 | * @see http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/WorkingWithTables.html#ItemSizeCalculations 53 | * @returns {number} 54 | */ 55 | 56 | function estimateAttributeValueSize(value: AttributeValue, name?: string): number { 57 | let totalBytes = 0; 58 | 59 | // add the size of the attribute name 60 | // assume strings are ~1 byte per character (accurate for alphanumeric English UTF-8 text) 61 | if (name) { 62 | totalBytes += name.length; 63 | } 64 | 65 | let attributeKey = Object.keys(value)[0]; 66 | switch (attributeKey) { 67 | case 'NULL': 68 | case 'BOOL': 69 | // 1 byte to store a null or boolean value 70 | totalBytes += 1; 71 | break; 72 | case 'N': 73 | case 'S': 74 | // assume the number is stored in string format 75 | // assume strings are ~1 byte per character (accurate for alphanumeric English UTF-8 text) 76 | totalBytes += value[attributeKey].length; 77 | break; 78 | case 'NS': 79 | case 'SS': 80 | // sum of sizes of each element in the set 81 | let eSet = value[attributeKey]; 82 | for (let e of eSet) { 83 | // assume the number is stored in string format 84 | // assume strings are ~1 byte per character (accurate for alphanumeric English UTF-8 text) 85 | totalBytes += e.length; 86 | } 87 | break; 88 | case 'L': 89 | // overhead required for a DynamoDB List 90 | totalBytes += 3; 91 | // sum of the sizes of all AttributeValue elements in the list 92 | let list = value[attributeKey]; 93 | for (let v of list) { 94 | totalBytes += estimateAttributeValueSize(v); 95 | } 96 | break; 97 | case 'M': 98 | // overhead required for a DynamoDB Map 99 | totalBytes += 3; 100 | // sum of sizes of each element in the map 101 | let map = value[attributeKey]; 102 | for (let key in map) { 103 | /* tslint:disable:forin */ 104 | // noinspection JSUnfilteredForInLoop 105 | totalBytes += estimateAttributeValueSize(map[key], key); 106 | /* tslint:enable:forin */ 107 | } 108 | break; 109 | case 'B': 110 | throw new Error('NotYetImplementedException: DynamoDB Binary data type is not yet supported'); 111 | case 'BS': 112 | throw new Error('NotYetImplementedException: DynamoDB BinarySet data type is not yet supported'); 113 | default: 114 | throw new Error('ValidationException: Invalid attributeKey "' + attributeKey + '"'); 115 | } 116 | 117 | return totalBytes; 118 | } -------------------------------------------------------------------------------- /lib/partition-strategy.spec.ts: -------------------------------------------------------------------------------- 1 | import { DynamoDB } from 'aws-sdk'; 2 | import { getNextGroupByItemCount, getNextGroupByTotalWCU } from './partition-strategy'; 3 | 4 | describe('lib/partition-strategy', () => { 5 | 6 | const ONE_KB = (() => { 7 | let str = ''; 8 | for (let i = 0; i < 1000; i++) { 9 | str += 'a'; 10 | } 11 | return str; 12 | }); 13 | 14 | function _setupWriteRequests(countRequests: number): DynamoDB.WriteRequests { 15 | let writeRequests: any = []; 16 | for (let i = 0; i < countRequests; i++) { 17 | writeRequests.push({ 18 | PutRequest: { 19 | Item: { 20 | MyPartitionKey: { N: i.toString() }, 21 | MyKey: { S: ONE_KB } 22 | } 23 | } 24 | }); 25 | } 26 | return writeRequests; 27 | } 28 | 29 | function _setupDeleteRequests(countRequests: number): DynamoDB.WriteRequests { 30 | let writeRequests: any = []; 31 | for (let i = 0; i < countRequests; i++) { 32 | writeRequests.push({ 33 | DeleteRequest: { 34 | Key: { 35 | MyPartitionKey: { N: i.toString() } 36 | } 37 | } 38 | }); 39 | } 40 | return writeRequests; 41 | } 42 | 43 | describe('getNextGroupByItemCount()', () => { 44 | 45 | it('should partition the requests array into groups of equal length', () => { 46 | let writeRequests = _setupWriteRequests(10); 47 | let options = { 48 | targetItemCount: 4 49 | }; 50 | let nextGroup; 51 | 52 | nextGroup = getNextGroupByItemCount(writeRequests, 0, options); 53 | expect(nextGroup).toEqual(writeRequests.slice(0, 4)); 54 | 55 | nextGroup = getNextGroupByItemCount(writeRequests, 4, options); 56 | expect(nextGroup).toEqual(writeRequests.slice(4, 8)); 57 | 58 | nextGroup = getNextGroupByItemCount(writeRequests, 8, options); 59 | expect(nextGroup).toEqual(writeRequests.slice(8, 10)); 60 | }); 61 | 62 | it('should not exceed the BatchWriteItem max item count for an individual group', () => { 63 | let writeRequests = _setupWriteRequests(30); 64 | let options = { 65 | targetItemCount: 9999 66 | }; 67 | let nextGroup; 68 | 69 | nextGroup = getNextGroupByItemCount(writeRequests, 0, options); 70 | expect(nextGroup).toEqual(writeRequests.slice(0, 25)); 71 | 72 | nextGroup = getNextGroupByItemCount(writeRequests, 25, options); 73 | expect(nextGroup).toEqual(writeRequests.slice(25, 30)); 74 | }); 75 | 76 | it('should return null when the startIndex is out of range', () => { 77 | let writeRequests = _setupWriteRequests(10); 78 | let options = { 79 | targetItemCount: 4 80 | }; 81 | let nextGroup; 82 | 83 | nextGroup = getNextGroupByItemCount(writeRequests, 10, options); 84 | expect(nextGroup).toBe(null); 85 | 86 | nextGroup = getNextGroupByItemCount(writeRequests, -1, options); 87 | expect(nextGroup).toBe(null); 88 | }); 89 | 90 | }); 91 | 92 | describe('getNextGroupByTotalWCU()', () => { 93 | 94 | it('should partition the requests array into groups of approximately equal total WCU', () => { 95 | let writeRequests = _setupWriteRequests(10); 96 | let options = { 97 | targetGroupWCU: 5 98 | }; 99 | let nextGroup; 100 | 101 | nextGroup = getNextGroupByTotalWCU(writeRequests, 0, options); 102 | expect(nextGroup).toEqual(writeRequests.slice(0, 5)); 103 | 104 | nextGroup = getNextGroupByTotalWCU(writeRequests, 5, options); 105 | expect(nextGroup).toEqual(writeRequests.slice(5, 10)); 106 | }); 107 | 108 | it('should not exceed the BatchWriteItem max item count for an individual group', () => { 109 | let writeRequests = _setupWriteRequests(30); 110 | let options = { 111 | targetGroupWCU: 9999 112 | }; 113 | let nextGroup; 114 | 115 | nextGroup = getNextGroupByTotalWCU(writeRequests, 0, options); 116 | expect(nextGroup).toEqual(writeRequests.slice(0, 25)); 117 | 118 | nextGroup = getNextGroupByTotalWCU(writeRequests, 25, options); 119 | expect(nextGroup).toEqual(writeRequests.slice(25, 30)); 120 | }); 121 | 122 | it('should assume that DeleteRequests always consume 1 WCU', () => { 123 | let writeRequests = _setupDeleteRequests(11); 124 | let options = { 125 | targetGroupWCU: 5 126 | }; 127 | let nextGroup; 128 | 129 | nextGroup = getNextGroupByTotalWCU(writeRequests, 0, options); 130 | expect(nextGroup).toEqual(writeRequests.slice(0, 5)); 131 | 132 | nextGroup = getNextGroupByTotalWCU(writeRequests, 5, options); 133 | expect(nextGroup).toEqual(writeRequests.slice(5, 10)); 134 | 135 | nextGroup = getNextGroupByTotalWCU(writeRequests, 10, options); 136 | expect(nextGroup).toEqual(writeRequests.slice(10, 11)); 137 | }); 138 | 139 | it('should return null when the startIndex is out of range', () => { 140 | let writeRequests = _setupWriteRequests(10); 141 | let options = { 142 | targetGroupWCU: 5 143 | }; 144 | let nextGroup; 145 | 146 | nextGroup = getNextGroupByTotalWCU(writeRequests, 10, options); 147 | expect(nextGroup).toBe(null); 148 | 149 | nextGroup = getNextGroupByTotalWCU(writeRequests, -1, options); 150 | expect(nextGroup).toBe(null); 151 | }); 152 | 153 | }); 154 | 155 | }); -------------------------------------------------------------------------------- /lib/table-prefixes.spec.ts: -------------------------------------------------------------------------------- 1 | import { 2 | addTablePrefixToRequest, 3 | removeTablePrefixFromResponse 4 | } from './table-prefixes'; 5 | 6 | describe('lib/table-prefixes', () => { 7 | 8 | describe('addTablePrefixToRequest()', () => { 9 | 10 | it('should insert table prefix for TableName (used in most API methods)', () => { 11 | let params = { 12 | TableName: 'MyTable' 13 | }; 14 | 15 | addTablePrefixToRequest('dev-', params); 16 | 17 | expect(params.TableName).toBe('dev-MyTable'); 18 | }); 19 | 20 | it('should NOT insert table prefix if TableName already begins with the prefix', () => { 21 | let params = { 22 | TableName: 'dev-MyTable' 23 | }; 24 | 25 | addTablePrefixToRequest('dev-', params); 26 | 27 | expect(params.TableName).toBe('dev-MyTable'); 28 | expect(params.TableName).not.toBe('dev-dev-MyTable'); 29 | }); 30 | 31 | it('should insert table prefix in RequestItems (used in BatchGetItem, BatchWriteItem)', () => { 32 | let params: any = { 33 | RequestItems: { 34 | Table1: [], 35 | Table2: [] 36 | } 37 | }; 38 | 39 | addTablePrefixToRequest('dev-', params); 40 | 41 | expect(params).toEqual({ 42 | RequestItems: { 43 | 'dev-Table1': [], 44 | 'dev-Table2': [] 45 | } 46 | }); 47 | }); 48 | 49 | it('should NOT insert table prefix in RequestItems if the table name already begins with the prefix', () => { 50 | let params: any = { 51 | RequestItems: { 52 | 'dev-Table1': [], 53 | 'dev-Table2': [] 54 | } 55 | }; 56 | 57 | addTablePrefixToRequest('dev-', params); 58 | 59 | expect(params).toEqual({ 60 | RequestItems: { 61 | 'dev-Table1': [], 62 | 'dev-Table2': [] 63 | } 64 | }); 65 | 66 | expect(params).not.toEqual({ 67 | RequestItems: { 68 | 'dev-dev-Table1': [], 69 | 'dev-dev-Table2': [] 70 | } 71 | }); 72 | }); 73 | 74 | it('should be a noop if the table prefix is empty string', () => { 75 | let params = { 76 | TableName: 'MyTable' 77 | }; 78 | 79 | addTablePrefixToRequest('', params); 80 | 81 | expect(params.TableName).toBe('MyTable'); 82 | }); 83 | 84 | }); 85 | 86 | describe('removeTablePrefixFromResponse()', () => { 87 | 88 | it('should remove table prefix from Responses (used in BatchGetItem)', () => { 89 | let response: any = { 90 | Responses: { 91 | 'dev-Table1': [], 92 | 'dev-Table2': [] 93 | } 94 | }; 95 | 96 | removeTablePrefixFromResponse('dev-', response); 97 | 98 | expect(response).toEqual({ 99 | Responses: { 100 | 'Table1': [], 101 | 'Table2': [] 102 | } 103 | }); 104 | }); 105 | 106 | it('should remove table prefix from UnprocessedKeys (used in BatchGetItem)', () => { 107 | let response: any = { 108 | UnprocessedKeys: { 109 | 'dev-Table1': {}, 110 | 'dev-Table2': {} 111 | } 112 | }; 113 | 114 | removeTablePrefixFromResponse('dev-', response); 115 | 116 | expect(response).toEqual({ 117 | UnprocessedKeys: { 118 | 'Table1': {}, 119 | 'Table2': {} 120 | } 121 | }); 122 | }); 123 | 124 | it('should remove table prefix from UnprocessedItems (used in BatchWriteItem)', () => { 125 | let response: any = { 126 | UnprocessedItems: { 127 | 'dev-Table1': [], 128 | 'dev-Table2': [] 129 | } 130 | }; 131 | 132 | removeTablePrefixFromResponse('dev-', response); 133 | 134 | expect(response).toEqual({ 135 | UnprocessedItems: { 136 | 'Table1': [], 137 | 'Table2': [] 138 | } 139 | }); 140 | }); 141 | 142 | it('should remove table prefix from ItemCollectionMetrics (used in BatchWriteItem)', () => { 143 | let response: any = { 144 | ItemCollectionMetrics: { 145 | 'dev-Table1': [], 146 | 'dev-Table2': [] 147 | } 148 | }; 149 | 150 | removeTablePrefixFromResponse('dev-', response); 151 | 152 | expect(response).toEqual({ 153 | ItemCollectionMetrics: { 154 | 'Table1': [], 155 | 'Table2': [] 156 | } 157 | }); 158 | }); 159 | 160 | it('should remove table prefix from ConsumedCapacity (used in most API methods)', () => { 161 | let response = { 162 | ConsumedCapacity: { 163 | TableName: 'dev-MyTable' 164 | } 165 | }; 166 | 167 | removeTablePrefixFromResponse('dev-', response); 168 | 169 | expect(response).toEqual({ 170 | ConsumedCapacity: { 171 | TableName: 'MyTable' 172 | } 173 | }); 174 | }); 175 | 176 | it('should remove table prefix from ConsumedCapacityMultiple (used in BatchWriteItem)', () => { 177 | let response = { 178 | ConsumedCapacity: [ 179 | { 180 | TableName: 'dev-Table1' 181 | }, 182 | { 183 | TableName: 'dev-Table2' 184 | } 185 | ] 186 | }; 187 | 188 | removeTablePrefixFromResponse('dev-', response); 189 | 190 | expect(response).toEqual({ 191 | ConsumedCapacity: [ 192 | { 193 | TableName: 'Table1' 194 | }, 195 | { 196 | TableName: 'Table2' 197 | } 198 | ] 199 | }); 200 | }); 201 | 202 | it('should be a noop if the table prefix is empty string', () => { 203 | let response: any = { 204 | Responses: { 205 | Table1: [], 206 | Table2: [] 207 | } 208 | }; 209 | 210 | removeTablePrefixFromResponse('', response); 211 | 212 | expect(response).toEqual({ 213 | Responses: { 214 | 'Table1': [], 215 | 'Table2': [] 216 | } 217 | }); 218 | }); 219 | 220 | }); 221 | 222 | }); -------------------------------------------------------------------------------- /lib/estimate-item-size.spec.ts: -------------------------------------------------------------------------------- 1 | import { 2 | estimateWriteCapacityUnits, 3 | estimateReadCapacityUnits, 4 | estimateItemSize 5 | } from './estimate-item-size'; 6 | 7 | describe('lib/estimate-item-size', () => { 8 | 9 | it('estimates write capacity units (1 WCU = 1 kilobyte)', () => { 10 | 11 | let item, numBytes, wcu; 12 | 13 | item = _makeTestItem(42); 14 | numBytes = estimateItemSize(item); 15 | wcu = estimateWriteCapacityUnits(item); 16 | expect(numBytes).toBe(42); 17 | expect(wcu).toBe(1); 18 | 19 | item = _makeTestItem(1024); 20 | numBytes = estimateItemSize(item); 21 | wcu = estimateWriteCapacityUnits(item); 22 | expect(numBytes).toBe(1024); 23 | expect(wcu).toBe(1); 24 | 25 | item = _makeTestItem(1025); 26 | numBytes = estimateItemSize(item); 27 | wcu = estimateWriteCapacityUnits(item); 28 | expect(numBytes).toBe(1025); 29 | expect(wcu).toBe(2); 30 | 31 | item = _makeTestItem(2048); 32 | numBytes = estimateItemSize(item); 33 | wcu = estimateWriteCapacityUnits(item); 34 | expect(numBytes).toBe(2048); 35 | expect(wcu).toBe(2); 36 | 37 | item = _makeTestItem(2049); 38 | numBytes = estimateItemSize(item); 39 | wcu = estimateWriteCapacityUnits(item); 40 | expect(numBytes).toBe(2049); 41 | expect(wcu).toBe(3); 42 | 43 | }); 44 | 45 | it('estimates read capacity units (1 RCU = 4 kilobytes)', () => { 46 | 47 | let item, numBytes, rcu; 48 | 49 | item = _makeTestItem(42); 50 | numBytes = estimateItemSize(item); 51 | rcu = estimateReadCapacityUnits(item); 52 | expect(numBytes).toBe(42); 53 | expect(rcu).toBe(1); 54 | 55 | item = _makeTestItem(4096); 56 | numBytes = estimateItemSize(item); 57 | rcu = estimateReadCapacityUnits(item); 58 | expect(numBytes).toBe(4096); 59 | expect(rcu).toBe(1); 60 | 61 | item = _makeTestItem(4097); 62 | numBytes = estimateItemSize(item); 63 | rcu = estimateReadCapacityUnits(item); 64 | expect(numBytes).toBe(4097); 65 | expect(rcu).toBe(2); 66 | 67 | item = _makeTestItem(8192); 68 | numBytes = estimateItemSize(item); 69 | rcu = estimateReadCapacityUnits(item); 70 | expect(numBytes).toBe(8192); 71 | expect(rcu).toBe(2); 72 | 73 | item = _makeTestItem(8193); 74 | numBytes = estimateItemSize(item); 75 | rcu = estimateReadCapacityUnits(item); 76 | expect(numBytes).toBe(8193); 77 | expect(rcu).toBe(3); 78 | 79 | }); 80 | 81 | it('estimates the size of a Null attribute', () => { 82 | 83 | let item: TDynamoDBItem = { 84 | MyAttr: { NULL: true } 85 | }; 86 | 87 | let numBytes = estimateItemSize(item); 88 | 89 | // 'MyAttr' (6 bytes) + 1 byte to store a null 90 | expect(numBytes).toBe(6 + 1); 91 | 92 | }); 93 | 94 | it('estimates the size of a Boolean attribute', () => { 95 | 96 | let item: TDynamoDBItem = { 97 | MyAttr: { BOOL: true } 98 | }; 99 | 100 | let numBytes = estimateItemSize(item); 101 | 102 | // 'MyAttr' (6 bytes) + 1 byte to store a boolean 103 | expect(numBytes).toBe(6 + 1); 104 | 105 | }); 106 | 107 | it('estimates the size of a Number attribute', () => { 108 | 109 | let item: TDynamoDBItem = { 110 | MyAttr: { N: '42.123456789' } 111 | }; 112 | 113 | let numBytes = estimateItemSize(item); 114 | 115 | // 'MyAttr' (6 bytes) + string length of the number (12 bytes) 116 | expect(numBytes).toBe(6 + 12); 117 | 118 | }); 119 | 120 | it('estimates the size of a String attribute', () => { 121 | 122 | let item: TDynamoDBItem = { 123 | MyAttr: { S: 'Value' } 124 | }; 125 | 126 | let numBytes = estimateItemSize(item); 127 | 128 | // 'MyAttr' (6 bytes) + 'Value' (5 bytes) 129 | expect(numBytes).toBe(6 + 5); 130 | 131 | }); 132 | 133 | it('estimates the size of a NumberSet attribute', () => { 134 | 135 | let item: TDynamoDBItem = { 136 | MyAttr: { 137 | NS: ['10', '20', '30', '40', '50', '60', '70'] 138 | } 139 | }; 140 | 141 | let numBytes = estimateItemSize(item); 142 | 143 | // 'MyAttr' (6 bytes) + sum of all string lengths in the set (2 * 7 bytes) 144 | expect(numBytes).toBe(6 + 2 * 7); 145 | 146 | }); 147 | 148 | it('estimates the size of a StringSet attribute', () => { 149 | 150 | let item: TDynamoDBItem = { 151 | MyAttr: { 152 | SS: ['aa', 'bb', 'cc', 'dd', 'ee', 'ff', 'gg'] 153 | } 154 | }; 155 | 156 | let numBytes = estimateItemSize(item); 157 | 158 | // 'MyAttr' (6 bytes) + sum of all string lengths in the set (2 * 7 bytes) 159 | expect(numBytes).toBe(6 + 2 * 7); 160 | 161 | }); 162 | 163 | it('estimates the size of a List attribute', () => { 164 | 165 | let item: TDynamoDBItem = { 166 | MyAttr: { 167 | L: [ 168 | { S: 'berry' }, 169 | { N: '42' } 170 | ] 171 | } 172 | }; 173 | 174 | let numBytes = estimateItemSize(item); 175 | 176 | // 'MyAttr' (6 bytes) + list overhead (3 bytes) + 'berry' (5 bytes) + '42' (2 bytes) 177 | expect(numBytes).toBe(6 + 3 + 5 + 2); 178 | 179 | }); 180 | 181 | it('estimates the size of a Map attribute', () => { 182 | 183 | let item: TDynamoDBItem = { 184 | MyAttr: { 185 | M: { 186 | FirstAttr: { S: 'berry' }, 187 | SecondAttr: { N: '42' } 188 | } 189 | } 190 | }; 191 | 192 | let numBytes = estimateItemSize(item); 193 | 194 | // 'MyAttr' (6 bytes) + map overhead (3 bytes) + 'berry' (5 bytes) + '42' (2 bytes) 195 | // + 'FirstAttr' (9 bytes) + 'SecondAttr' (10 bytes) 196 | expect(numBytes).toBe(6 + 3 + 5 + 2 + 9 + 10); 197 | 198 | }); 199 | 200 | it('throws a not yet implemented error when attempting to estimate the size of a Binary attribute', () => { 201 | 202 | let item: TDynamoDBItem = { 203 | MyAttr: { 204 | B: null 205 | } 206 | }; 207 | 208 | expect(() => { 209 | estimateItemSize(item); 210 | }).toThrow(new Error('NotYetImplementedException: DynamoDB Binary data type is not yet supported')); 211 | 212 | }); 213 | 214 | it('throws a not yet implemented error when attempting to estimate the size of a BinarySet attribute', () => { 215 | 216 | let item: TDynamoDBItem = { 217 | MyAttr: { 218 | BS: null 219 | } 220 | }; 221 | 222 | expect(() => { 223 | estimateItemSize(item); 224 | }).toThrow(new Error('NotYetImplementedException: DynamoDB BinarySet data type is not yet supported')); 225 | 226 | }); 227 | 228 | it('throws an error if an invalid data type is encountered', () => { 229 | 230 | let item: any = { 231 | MyAttr: { 232 | BAD_KEY: null 233 | } 234 | }; 235 | 236 | expect(() => { 237 | estimateItemSize(item); 238 | }).toThrow(new Error('ValidationException: Invalid attributeKey "BAD_KEY"')); 239 | 240 | }); 241 | 242 | function _makeTestItem(numBytes: number): TDynamoDBItem { 243 | let str = ''; 244 | for (let i = 3; i < numBytes; i++) { 245 | str += 'a'; 246 | } 247 | return { 248 | foo: { S: str } 249 | }; 250 | } 251 | 252 | }); -------------------------------------------------------------------------------- /test/mock-dynamodb.ts: -------------------------------------------------------------------------------- 1 | import { DynamoDB } from 'aws-sdk'; 2 | 3 | export interface IMockDynamoDBOptions { 4 | customResponses?: ICustomResponses; 5 | } 6 | 7 | export interface ICustomResponses { 8 | // 'ProvisionedThroughputExceededException' 9 | // 'SomeUnprocessedItems' 10 | // 'ValidationException' 11 | [responseNumber: number]: string; 12 | } 13 | 14 | export class MockDynamoDB { 15 | private _countRequests: number; 16 | private _customResponses: ICustomResponses; 17 | 18 | constructor(options?: any) { 19 | options = options || {}; 20 | 21 | this._customResponses = options.customResponses || {}; 22 | this._countRequests = 0; 23 | } 24 | 25 | public createTable() { 26 | return this._mockApiResult(); 27 | } 28 | 29 | public updateTable() { 30 | return this._mockApiResult(); 31 | } 32 | 33 | public describeTable() { 34 | return this._mockApiResult(); 35 | } 36 | 37 | public deleteTable() { 38 | return this._mockApiResult(); 39 | } 40 | 41 | public getItem() { 42 | return this._mockApiResult(); 43 | } 44 | 45 | public updateItem() { 46 | return this._mockApiResult(); 47 | } 48 | 49 | public deleteItem() { 50 | return this._mockApiResult(); 51 | } 52 | 53 | public batchGetItem() { 54 | return this._mockApiResult(); 55 | } 56 | 57 | public putItem(params: DynamoDB.PutItemInput) { 58 | return { 59 | promise: () => { 60 | return new Promise((resolve, reject) => { 61 | this._countRequests++; 62 | 63 | if (this._isAllValidationException()) { 64 | reject({ 65 | code: 'ValidationException', 66 | statusCode: 400 67 | }); 68 | } else if (this._isThroughputExceeded()) { 69 | reject({ 70 | code: 'ProvisionedThroughputExceededException', 71 | statusCode: 400 72 | }); 73 | } else { 74 | // case: put item successful 75 | resolve({ 76 | ConsumedCapacity: { 77 | TableName: params.TableName, 78 | CapacityUnits: 1 79 | } 80 | }); 81 | } 82 | }); 83 | } 84 | }; 85 | } 86 | 87 | public query(params: DynamoDB.QueryInput) { 88 | return this._mockQueryOrScanResponse(params); 89 | } 90 | 91 | public scan(params: DynamoDB.ScanInput) { 92 | return this._mockQueryOrScanResponse(params); 93 | } 94 | 95 | public batchWriteItem(params: DynamoDB.BatchWriteItemInput) { 96 | return { 97 | promise: () => { 98 | return new Promise((resolve, reject) => { 99 | this._countRequests++; 100 | 101 | if (this._isThroughputExceeded()) { 102 | reject({ 103 | code: 'ProvisionedThroughputExceededException', 104 | statusCode: 400 105 | }); 106 | } else { 107 | let tableNames = Object.keys(params.RequestItems); 108 | let firstTableName = tableNames[0]; 109 | let numPutItemSucceeded = 0; 110 | let response = {}; 111 | 112 | if (this._isSomeUnprocessedItems()) { 113 | // case: PutRequest succeeded for 1st item, failed for all the others 114 | numPutItemSucceeded = 1; 115 | response['UnprocessedItems'] = params.RequestItems; 116 | response['UnprocessedItems'][firstTableName] = response['UnprocessedItems'][firstTableName].slice(1); 117 | } else { 118 | // case: PutRequest succeeded for all items 119 | numPutItemSucceeded = params.RequestItems[firstTableName].length; 120 | } 121 | 122 | if (params.ReturnConsumedCapacity === 'INDEXES') { 123 | response['ConsumedCapacity'] = [ 124 | { 125 | CapacityUnits: 6 * numPutItemSucceeded, 126 | TableName: firstTableName, 127 | Table: { 128 | CapacityUnits: numPutItemSucceeded 129 | }, 130 | LocalSecondaryIndexes: { 131 | MyLocalIndex: { 132 | CapacityUnits: 3 * numPutItemSucceeded 133 | } 134 | }, 135 | GlobalSecondaryIndexes: { 136 | MyGlobalIndex: { 137 | CapacityUnits: 2 * numPutItemSucceeded 138 | } 139 | } 140 | } 141 | ]; 142 | } else if (params.ReturnConsumedCapacity === 'TOTAL') { 143 | response['ConsumedCapacity'] = [ 144 | { 145 | CapacityUnits: 6 * numPutItemSucceeded, 146 | TableName: firstTableName 147 | } 148 | ]; 149 | } 150 | 151 | resolve(response); 152 | } 153 | }); 154 | } 155 | }; 156 | } 157 | 158 | private _mockQueryOrScanResponse(params: any) { 159 | return { 160 | promise: () => { 161 | return new Promise(resolve => { 162 | this._countRequests++; 163 | 164 | // create mock response 165 | let response = { 166 | Items: [] 167 | }; 168 | 169 | // create mock items 170 | for (let i = 0; i < params.Limit; i++) { 171 | response.Items.push({}); 172 | } 173 | 174 | if (params.ReturnConsumedCapacity === 'INDEXES') { 175 | response['ConsumedCapacity'] = { 176 | CapacityUnits: 7, 177 | TableName: params.TableName, 178 | Table: { 179 | CapacityUnits: 2 180 | }, 181 | LocalSecondaryIndexes: { 182 | MyLocalIndex: { 183 | CapacityUnits: 4 184 | } 185 | }, 186 | GlobalSecondaryIndexes: { 187 | MyGlobalIndex: { 188 | CapacityUnits: 1 189 | } 190 | } 191 | }; 192 | } else if (params.ReturnConsumedCapacity === 'TOTAL') { 193 | response['ConsumedCapacity'] = { 194 | CapacityUnits: 7, 195 | TableName: params.TableName 196 | }; 197 | } 198 | 199 | if (this._countRequests < 3) { 200 | response['LastEvaluatedKey'] = 'foo'; 201 | } 202 | 203 | resolve(response); 204 | }); 205 | } 206 | }; 207 | } 208 | 209 | private _mockApiResult() { 210 | return { 211 | promise: () => { 212 | return new Promise(resolve => { 213 | resolve({}); 214 | }); 215 | } 216 | }; 217 | } 218 | 219 | private _isThroughputExceeded() { 220 | return this._customResponses[this._countRequests] === 'ProvisionedThroughputExceededException'; 221 | } 222 | 223 | private _isSomeUnprocessedItems() { 224 | return this._customResponses[this._countRequests] === 'SomeUnprocessedItems'; 225 | } 226 | 227 | private _isAllValidationException() { 228 | return this._customResponses[this._countRequests] === 'ValidationException'; 229 | } 230 | 231 | } -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | [![NPM version][npm-image]][npm-url] [![Build Status][travis-image]][travis-url] [![Coverage Status][coveralls-image]][coveralls-url] [![License][license-image]][license-url] 2 | 3 | [npm-image]: https://img.shields.io/npm/v/dynamodb-wrapper.svg 4 | [npm-url]: https://www.npmjs.com/package/dynamodb-wrapper 5 | 6 | [travis-image]: https://img.shields.io/travis/Shadowblazen/dynamodb-wrapper.svg 7 | [travis-url]: https://travis-ci.org/Shadowblazen/dynamodb-wrapper 8 | 9 | [coveralls-image]: https://img.shields.io/coveralls/Shadowblazen/dynamodb-wrapper.svg 10 | [coveralls-url]: https://coveralls.io/github/Shadowblazen/dynamodb-wrapper?branch=master 11 | 12 | [license-image]: https://img.shields.io/npm/l/dynamodb-wrapper.svg 13 | [license-url]: https://opensource.org/licenses/MIT 14 | 15 | ## What is dynamodb-wrapper? 16 | 17 | - **Enhanced AWS SDK:** Public interface closely resembles the AWS SDK, making it easier to learn and use. 18 | - **Bulk I/O:** Easily read, write or delete entire collections of items in DynamoDB with a single API call. 19 | - **Events:** Add event hooks to be notified of important events, such as whenever read/write capacity is consumed, or requests are retried due to throttling. 20 | - **Table prefixes:** DynamoDBWrapper can add a table name prefix in requests and remove it in responses. This is helpful if you have multiple environments within the same AWS Account and region. 21 | 22 | ## Installing 23 | 24 | ``` 25 | npm install dynamodb-wrapper 26 | ``` 27 | 28 | ## Usage 29 | 30 | ### Setup 31 | 32 | Construct the DynamoDBWrapper class 33 | 34 | ```js 35 | var AWS = require('aws-sdk'); 36 | var DynamoDBWrapper = require('dynamodb-wrapper'); 37 | 38 | var dynamoDB = new AWS.DynamoDB({ 39 | // optionally disable AWS retry logic - reasoning explained below 40 | maxRetries: 0 41 | }); 42 | 43 | // see the Configuration section of the README for more options 44 | var dynamoDBWrapper = new DynamoDBWrapper(dynamoDB, { 45 | // optionally enable DynamoDBWrapper retry logic 46 | maxRetries: 6, 47 | retryDelayOptions: { 48 | base: 100 49 | } 50 | }); 51 | ``` 52 | 53 | *(Optional)* If you use DynamoDBWrapper retry logic instead of AWS retry logic, you gain the following benefits: 54 | 55 | 1. Improved batch processing: DynamoDBWrapper will automatically retry any *UnprocessedItems* in your `batchWriteItem` requests. 56 | 2. You can add a `retry` event listener to be notified when requests are throttled. In your application, you can log these events, or even respond by increasing provisioned throughput on the affected table. 57 | 3. DynamoDBWrapper's `retryDelayOptions` actually work as documented (this functionality doesn't work in the AWS JavaScript SDK yet, but there's an [open ticket for this feature request](https://github.com/aws/aws-sdk-js/issues/1100)). 58 | 59 | ``` 60 | dynamoDBWrapper.events.on('retry', function (e) { 61 | console.log( 62 | 'An API call to DynamoDB.' + e.method + '() acting on table ' + 63 | e.tableName + ' was throttled. Retry attempt #' + e.retryCount + 64 | ' will occur after a delay of ' + e.retryDelayMs + 'ms.' 65 | ); 66 | }); 67 | 68 | // An API call to DynamoDB.batchWriteItem() acting on table MyTable 69 | // was throttled. Retry attempt #3 will occur after a delay of 800ms. 70 | ``` 71 | 72 | *(Optional)* If you use the `ReturnConsumedCapacity` property in your AWS requests, the `consumedCapacity` event listener can notify you whenever read/write capacity is consumed. 73 | 74 | ``` 75 | dynamoDBWrapper.events.on('consumedCapacity', function (e) { 76 | console.log( 77 | 'An API call to DynamoDB.' + e.method + '() consumed ' + 78 | e.capacityType, JSON.stringify(e.consumedCapacity, null, 2) 79 | ); 80 | }); 81 | 82 | // An API call to DynamoDB.batchWriteItem() consumed WriteCapacityUnits 83 | // [ 84 | // { 85 | // "TableName": "MyTable", 86 | // "CapacityUnits": 20 87 | // } 88 | // ] 89 | ``` 90 | 91 | *(Optional)* When using the `DynamoDBWrapper.batchWriteItem()` API method, there is a `batchGroupWritten` event that will notify you of how many items have been processed so far. 92 | 93 | ``` 94 | dynamoDBWrapper.events.on('batchGroupWritten', function (e) { 95 | console.log(e.tableName, e.processedCount); 96 | }); 97 | ``` 98 | 99 | ### Example: Bulk Read 100 | 101 | Read large collections of data from a DynamoDB table with a single API call. Multiple pages of data are aggregated and returned in a single response. 102 | 103 | @see http://docs.aws.amazon.com/amazondynamodb/latest/APIReference/API_Query.html 104 | 105 | ```js 106 | // params for Query - same format as in the AWS SDK 107 | var sampleQueryParams = { 108 | TableName: 'MyTable', 109 | KeyConditionExpression: 'MyPartitionKey = :pk', 110 | ExpressionAttributeValues: { 111 | ':pk': { 112 | N: '1' 113 | } 114 | } 115 | }; 116 | 117 | // fetches all pages of data from DynamoDB 118 | // promise resolves with the aggregation of all pages, 119 | // or rejects immediately if an error occurs 120 | 121 | dynamoDBWrapper.query(sampleQueryParams) 122 | .then(function (response) { 123 | console.log(response.Items); 124 | }) 125 | .catch(function (err) { 126 | console.error(err); 127 | }); 128 | ``` 129 | 130 | ### Example: Bulk Write/Delete 131 | 132 | Insert or delete large collections of items in one or more DynamoDB tables with a single API call. DynamoDBWrapper batches your requests and aggregates the results into a single response. Use configuration values to fine tune throughput consumption for your use case. 133 | 134 | @see http://docs.aws.amazon.com/amazondynamodb/latest/APIReference/API_BatchWriteItem.html 135 | 136 | ```js 137 | // params for BatchWriteItem - same format as in the AWS SDK 138 | var sampleParams = { 139 | RequestItems: { 140 | MyTable: [ 141 | { 142 | PutRequest: { 143 | Item: { 144 | MyPartitionKey: { N: '1' } 145 | } 146 | } 147 | }, 148 | { 149 | DeleteRequest: { 150 | Key: { 151 | MyPartitionKey: { N: '2' } 152 | } 153 | } 154 | }, 155 | // this array can have thousands of items ... 156 | ], 157 | AnotherTable: [ 158 | { 159 | PutRequest: { 160 | Item: { 161 | User: { S: 'Batman' } 162 | } 163 | } 164 | }, 165 | { 166 | DeleteRequest: { 167 | Key: { 168 | User: { S: 'Superman' } 169 | } 170 | } 171 | }, 172 | // this array can have thousands of items ... 173 | ] 174 | } 175 | }; 176 | 177 | // performs all Puts/Deletes in the array 178 | // promise resolves when all items are written successfully, 179 | // or rejects immediately if an error occurs 180 | 181 | dynamoDBWrapper.batchWriteItem(sampleParams, { 182 | // use configuration to control and optimize throughput consumption 183 | 184 | // write 10 items to MyTable every 500 milliseconds 185 | // this strategy is best if you have known, consistent item sizes 186 | MyTable: { 187 | partitionStrategy: 'EqualItemCount', 188 | targetItemCount: 10, 189 | groupDelayMs: 500 190 | } 191 | 192 | // write up to 50 WCU of data to AnotherTable every 1000 milliseconds 193 | // this strategy is best if you have unknown or variable item sizes, 194 | // because it evenly distributes the items across requests so as 195 | // to minimize throughput spikes (which can cause throttling) 196 | AnotherTable: { 197 | partitionStrategy: 'EvenlyDistributedGroupWCU', 198 | targetGroupWCU: 50, 199 | groupDelayMs: 1000 200 | } 201 | }) 202 | .then(function (response) { 203 | console.log(response); 204 | }) 205 | .catch(function (err) { 206 | console.error(err); 207 | }); 208 | ``` 209 | 210 | ### Example: Table Prefixes 211 | 212 | You may wish to work with duplicate copies of the same set of tables. For example: "dev-MyTable" and "stg-MyTable" if you have dev and stage environments under the same AWS account. DynamoDBWrapper supports this use-case via a configuration-driven `tableNamePrefix` option. 213 | 214 | ```js 215 | // load this "environment" variable from a config file 216 | // so it will have different values per environment 217 | // we'll just use dev for this example 218 | 219 | var environment = 'dev-'; 220 | 221 | // Configure DynamoDBWrapper with the environment-specific prefix 222 | // Note: we only do this here in one place; there's no need to 223 | // sprinkle the prefix throughout your codebase. 224 | 225 | var AWS = require('aws-sdk'); 226 | var DynamoDBWrapper = require('dynamodb-wrapper'); 227 | var dynamoDB = new AWS.DynamoDB(); 228 | var dynamoDBWrapper = new DynamoDBWrapper(dynamoDB, { 229 | tableNamePrefix: environment 230 | }); 231 | 232 | // Create the table like usual... 233 | 234 | dynamoDBWrapper.createTable({ 235 | TableName: 'MyTable', 236 | // more params... 237 | }); 238 | 239 | // The new table will be named "dev-MyTable" instead of "MyTable" 240 | // This works with all the other API methods too. 241 | 242 | var promise = dynamoDBWrapper.getItem({ 243 | TableName: 'MyTable', 244 | ReturnConsumedCapacity: 'TOTAL', 245 | // more params... 246 | }); 247 | 248 | // Although the real table name in AWS DynamoDB is "dev-MyTable", 249 | // the prefix will be stripped from the response for transparency: 250 | 251 | promise.then(function (response) { 252 | console.log(response); 253 | }); 254 | 255 | // { 256 | // ConsumedCapacity: { 257 | // TableName: 'MyTable', // <-- no prefix in the response 258 | // CapacityUnits: 1 259 | // }, 260 | // Item: { ... } 261 | // } 262 | 263 | // In summary: the prefix is prepended to all requests 264 | // and stripped from all responses 265 | ``` 266 | 267 | ## Configuration 268 | 269 | *This section is copied from the `IDynamoDBWrapperOptions` interface in the `index.d.ts` file.* 270 | 271 | The `DynamoDBWrapper` constructor accepts an optional configuration object with the following properties: 272 | - `tableNamePrefix` (string) - A prefix to add to all requests and remove from all responses. 273 | - `groupDelayMs` (number) - The delay (in millseconds) between individual requests made by `query()`, `scan()`, and `batchWriteItem()`. Defaults to 100 ms. 274 | - `maxRetries` (number) - The maximum amount of retries to attempt with a request. Note: this property is identical to the one described in [the AWS documentation](http://docs.aws.amazon.com/AWSJavaScriptSDK/latest/AWS/DynamoDB.html#constructor-property). 275 | - `retryDelayOptions` (object) - A set of options to configure the retry delay on retryable errors. Note: this property is identical to the one described in [the AWS documentation](http://docs.aws.amazon.com/AWSJavaScriptSDK/latest/AWS/DynamoDB.html#constructor-property). Currently supported options are: 276 | - `base` (number) - The base number of milliseconds to use in the exponential backoff for operation retries. Defaults to 100 ms. 277 | - `customBackoff` (Function) - A custom function that accepts a retry count and returns the amount of time to delay in milliseconds. The `base` option will be ignored if this option is supplied. 278 | 279 | ## API 280 | 281 | The `DynamoDBWrapper` class supports a Promise-based API with the following methods. These are wrappers around the AWS SDK method of the same name. Please refer to the AWS [API documentation](http://docs.aws.amazon.com/amazondynamodb/latest/APIReference/API_Operations.html) and [JavaScript SDK documentation](http://docs.aws.amazon.com/AWSJavaScriptSDK/latest/AWS/DynamoDB.html) for more details: 282 | 283 | The following methods are passed straight through to the AWS JavaScript SDK: 284 | 285 | - `createTable(params)` 286 | - `updateTable(params)` 287 | - `describeTable(params)` 288 | - `deleteTable(params)` 289 | - `getItem(params)` 290 | - `updateItem(params)` 291 | - `putItem(params)` 292 | - `deleteItem(params)` 293 | - `batchGetItem(params)` 294 | 295 | ## Enhanced API methods 296 | 297 | The following API methods have enhanced behavior to support bulk I/O: 298 | 299 | - `query(params, options)` - Fetches all pages of data from a DynamoDB query, making multiple requests and aggregating responses when necessary. 300 | - `options.groupDelayMs` (number) - the delay between individual requests. Overrides the configuration property of the same name in the constructor. Defaults to 100 ms. 301 | - `scan(params, options)` - Fetches all pages of data from a DynamoDB scan, making multiple requests and aggregating responses when necessary. 302 | - `options.groupDelayMs` (number) - the delay between individual requests. Overrides the configuration property of the same name in the constructor. Defaults to 100 ms. 303 | - `batchWriteItem(params, options)` - Writes or deletes large collections of items in multiple DynamoDB tables, batching items and making multiple requests when necessary. 304 | - `options` is a mapping of table names to option hashes. Each option hash may have the following properties: 305 | - `groupDelayMs` (number) - the delay between individual requests. Overrides the configuration property of the same name in the constructor. Defaults to 100 ms. 306 | - `partitionStrategy` (string) - strategy to use when partitioning the write requests array. Possible values: *EqualItemCount* or *EvenlyDistributedGroupWCU*. 307 | - `targetItemCount` (number) - the number of items to put in each group when using the *EqualItemCount* partition strategy. 308 | - `targetGroupWCU` (number) - the size threshold (in WriteCapacityUnits) of each group when using the *EvenlyDistributedGroupWCU* partition strategy. 309 | 310 | ## Roadmap 311 | 312 | - **Streams:** Add method signatures that return Streams (instead of Promises), allowing for better integration ecosystems such as gulp 313 | -------------------------------------------------------------------------------- /lib/dynamodb-wrapper.ts: -------------------------------------------------------------------------------- 1 | /// 2 | 3 | import { EventEmitter } from 'events'; 4 | import { DynamoDB } from 'aws-sdk'; 5 | import { ErrorCode, ErrorMessage, Exception } from './error-types'; 6 | import { getNextGroupByItemCount, getNextGroupByTotalWCU } from './partition-strategy'; 7 | import { addTablePrefixToRequest, removeTablePrefixFromResponse, removePrefix, removePrefixes } from './table-prefixes'; 8 | import { getNonNegativeInteger, getPositiveInteger, appendArray, wait } from './utils'; 9 | 10 | export class DynamoDBWrapper { 11 | public dynamoDB: any; 12 | public events: EventEmitter; 13 | 14 | public tableNamePrefix: string; 15 | public groupDelayMs: number; 16 | public maxRetries: number; 17 | public retryDelayOptions: any; 18 | 19 | constructor(dynamoDB: any, options?: IDynamoDBWrapperOptions) { 20 | this.dynamoDB = dynamoDB; 21 | this.events = new EventEmitter(); 22 | 23 | options = options || {}; 24 | options.retryDelayOptions = options.retryDelayOptions || {}; 25 | this.tableNamePrefix = typeof options.tableNamePrefix === 'string' ? options.tableNamePrefix : ''; 26 | this.groupDelayMs = getNonNegativeInteger([options.groupDelayMs, 100]); 27 | this.maxRetries = getNonNegativeInteger([options.maxRetries, 10]); 28 | this.retryDelayOptions = {}; 29 | this.retryDelayOptions.base = getNonNegativeInteger([options.retryDelayOptions.base, 100]); 30 | if (typeof options.retryDelayOptions.customBackoff === 'function') { 31 | this.retryDelayOptions.customBackoff = options.retryDelayOptions.customBackoff; 32 | } 33 | } 34 | 35 | /** 36 | * A lightweight wrapper around the DynamoDB CreateTable method. 37 | * 38 | * @param params 39 | * @returns {Promise} 40 | */ 41 | 42 | public async createTable(params: DynamoDB.CreateTableInput): Promise { 43 | return await this._callDynamoDB('createTable', params); 44 | } 45 | 46 | /** 47 | * A lightweight wrapper around the DynamoDB UpdateTable method. 48 | * 49 | * @param params 50 | * @returns {Promise} 51 | */ 52 | 53 | public async updateTable(params: DynamoDB.UpdateTableInput): Promise { 54 | return await this._callDynamoDB('updateTable', params); 55 | } 56 | 57 | /** 58 | * A lightweight wrapper around the DynamoDB DescribeTable method. 59 | * 60 | * @param params 61 | * @returns {Promise} 62 | */ 63 | 64 | public async describeTable(params: DynamoDB.DescribeTableInput): Promise { 65 | return await this._callDynamoDB('describeTable', params); 66 | } 67 | 68 | /** 69 | * A lightweight wrapper around the DynamoDB DeleteTable method. 70 | * 71 | * @param params 72 | * @returns {Promise} 73 | */ 74 | 75 | public async deleteTable(params: DynamoDB.DeleteTableInput): Promise { 76 | return await this._callDynamoDB('deleteTable', params); 77 | } 78 | 79 | /** 80 | * A lightweight wrapper around the DynamoDB GetItem method. 81 | * 82 | * @param params 83 | * @returns {Promise} 84 | */ 85 | 86 | public async getItem(params: DynamoDB.GetItemInput): Promise { 87 | return await this._callDynamoDB('getItem', params); 88 | } 89 | 90 | /** 91 | * A lightweight wrapper around the DynamoDB UpdateItem method. 92 | * 93 | * @param params 94 | * @returns {Promise} 95 | */ 96 | 97 | public async updateItem(params: DynamoDB.UpdateItemInput): Promise { 98 | return await this._callDynamoDB('updateItem', params); 99 | } 100 | 101 | /** 102 | * A lightweight wrapper around the DynamoDB PutItem method. 103 | * 104 | * @param params 105 | * @returns {Promise} 106 | */ 107 | 108 | public async putItem(params: DynamoDB.PutItemInput): Promise { 109 | return await this._callDynamoDB('putItem', params); 110 | } 111 | 112 | /** 113 | * A lightweight wrapper around the DynamoDB DeleteItem method. 114 | * 115 | * @param params 116 | * @returns {Promise} 117 | */ 118 | 119 | public async deleteItem(params: DynamoDB.DeleteItemInput): Promise { 120 | return await this._callDynamoDB('deleteItem', params); 121 | } 122 | 123 | /** 124 | * A lightweight wrapper around the DynamoDB BatchGetItem method. 125 | * 126 | * @param params 127 | * @returns {Promise} 128 | */ 129 | 130 | public async batchGetItem(params: DynamoDB.BatchGetItemInput): Promise { 131 | return await this._callDynamoDB('batchGetItem', params); 132 | } 133 | 134 | /** 135 | * A lightweight wrapper around the DynamoDB Query method. 136 | * 137 | * The underlying DynamoDB Query method always returns 1 page of data per call. This method will return all pages 138 | * of data, making multiple calls to the underlying DynamoDB Query method if there is more than 1 page of data. 139 | * All pages of data are aggregated and returned in the response. 140 | * 141 | * @param params 142 | * @param [options] 143 | * @returns {Promise} 144 | */ 145 | 146 | public async query(params: DynamoDB.QueryInput, options?: IQueryOptions): Promise { 147 | // set default options 148 | options = options || {}; 149 | options.groupDelayMs = getNonNegativeInteger([options.groupDelayMs, this.groupDelayMs]); 150 | 151 | let responses = await this._queryOrScanHelper('query', params, options.groupDelayMs); 152 | return _makeQueryOrScanResponse(responses); 153 | } 154 | 155 | /** 156 | * A lightweight wrapper around the DynamoDB Scan method. 157 | * 158 | * The underlying DynamoDB Scan method always returns 1 page of data per call. This method will return all pages 159 | * of data, making multiple calls to the underlying DynamoDB Scan method if there is more than 1 page of data. 160 | * All pages of data are aggregated and returned in the response. 161 | * 162 | * @param params 163 | * @param [options] 164 | * @returns {Promise} 165 | */ 166 | 167 | public async scan(params: DynamoDB.ScanInput, options?: IScanOptions): Promise { 168 | // set default options 169 | options = options || {}; 170 | options.groupDelayMs = getNonNegativeInteger([options.groupDelayMs, this.groupDelayMs]); 171 | 172 | let responses = await this._queryOrScanHelper('scan', params, options.groupDelayMs); 173 | return _makeQueryOrScanResponse(responses); 174 | } 175 | 176 | /** 177 | * A lightweight wrapper around the DynamoDB BatchWriteItem method. 178 | * 179 | * The underlying DynamoDB BatchWriteItem method only supports an array of at most 25 requests. 180 | * This method supports an unlimited number of requests, and will partition the requests into 181 | * groups of count <= 25, making multiple calls to the underlying DynamoDB BatchWriteItem method 182 | * if there is more than 1 group. 183 | * 184 | * See partition-strategy.ts for partition implementation details. 185 | * 186 | * @param params 187 | * @param [options] 188 | * @returns {Promise} 189 | */ 190 | 191 | public async batchWriteItem(params: DynamoDB.BatchWriteItemInput, 192 | options?: IBatchWriteItemOptions): Promise { 193 | 194 | _validateBatchWriteItemParams(params); 195 | 196 | let tableNames = Object.keys(params.RequestItems); 197 | let totalRequestItems = 0; 198 | let promises = []; 199 | options = options || {}; 200 | 201 | for (let tableName of tableNames) { 202 | // set default options 203 | let option = options[tableName] || {}; 204 | option.partitionStrategy = option.partitionStrategy || options.partitionStrategy; 205 | option.groupDelayMs = getNonNegativeInteger([option.groupDelayMs, options.groupDelayMs, this.groupDelayMs]); 206 | option.targetGroupWCU = getPositiveInteger([option.targetGroupWCU, options.targetGroupWCU, 5]); 207 | option.targetItemCount = getPositiveInteger([option.targetItemCount, options.targetItemCount, 25]); 208 | 209 | totalRequestItems += params.RequestItems[tableName].length; 210 | promises.push(this._batchWriteItemHelper(tableName, params, option)); 211 | } 212 | 213 | let responsesPerTable = await Promise.all(promises); 214 | 215 | return _makeBatchWriteItemResponse(tableNames, responsesPerTable, totalRequestItems); 216 | } 217 | 218 | private async _queryOrScanHelper(method: string, params: any, groupDelayMs: number): Promise { 219 | let list = []; 220 | 221 | // first page of data 222 | let res = await this._callDynamoDB(method, params); 223 | list.push(res); 224 | 225 | // make subsequent requests to get remaining pages of data 226 | while (res.LastEvaluatedKey) { 227 | await wait(groupDelayMs); 228 | params.ExclusiveStartKey = res.LastEvaluatedKey; 229 | res = await this._callDynamoDB(method, params); 230 | list.push(res); 231 | } 232 | 233 | return list; 234 | } 235 | 236 | private async _batchWriteItemHelper(tableName: string, params: DynamoDB.BatchWriteItemInput, 237 | options: IBatchWriteItemOption): Promise { 238 | 239 | let list = []; 240 | 241 | const writeRequests = params.RequestItems[tableName]; 242 | const getNextGroup = options.partitionStrategy === 'EvenlyDistributedGroupWCU' 243 | ? getNextGroupByTotalWCU 244 | : getNextGroupByItemCount; 245 | 246 | // construct params for the next group of items 247 | let groupParams: any = { 248 | RequestItems: {} 249 | }; 250 | if (params.ReturnConsumedCapacity) { 251 | groupParams.ReturnConsumedCapacity = params.ReturnConsumedCapacity; 252 | } 253 | 254 | // first group 255 | let groupStartIndex = 0; 256 | let nextGroup = getNextGroup(writeRequests, groupStartIndex, options); 257 | 258 | while (true) { 259 | // make batch write request 260 | groupParams.RequestItems[tableName] = nextGroup; 261 | let res = await this._callDynamoDB('batchWriteItem', groupParams); 262 | list.push(res); 263 | 264 | // update loop conditions 265 | groupStartIndex += nextGroup.length; 266 | nextGroup = getNextGroup(writeRequests, groupStartIndex, options); 267 | 268 | // notifies observers the number of items processed so far 269 | this.events.emit('batchGroupWritten', { 270 | tableName: tableName, 271 | processedCount: groupStartIndex 272 | }); 273 | 274 | // wait before processing the next group, or exit if nothing left to do 275 | if (nextGroup) { 276 | await wait(options.groupDelayMs); 277 | } else { 278 | break; 279 | } 280 | } 281 | 282 | return list; 283 | } 284 | 285 | /** 286 | * Entry point for calling into the AWS SDK. This is the centralized location for retry logic and events. 287 | * All DynamoDBWrapper public methods are funneled through here. 288 | * 289 | * @param method 290 | * @param params 291 | * @returns {any} 292 | * @private 293 | */ 294 | 295 | private async _callDynamoDB(method: string, params: any): Promise { 296 | let retryCount = 0; 297 | let responses = []; 298 | let shouldRetry, result, error; 299 | 300 | while (true) { 301 | shouldRetry = false; 302 | result = null; 303 | 304 | try { 305 | addTablePrefixToRequest(this.tableNamePrefix, params); 306 | result = await this.dynamoDB[method](params).promise(); 307 | removeTablePrefixFromResponse(this.tableNamePrefix, result); 308 | responses.push(result); 309 | this._emitConsumedCapacitySummary(method, result); 310 | 311 | // BatchWriteItem: retry unprocessed items 312 | if (result.UnprocessedItems && Object.keys(result.UnprocessedItems).length > 0) { 313 | params.RequestItems = result.UnprocessedItems; 314 | shouldRetry = true; 315 | } 316 | } catch (e) { 317 | error = e; 318 | if (e.code === ErrorCode.ProvisionedThroughputExceededException || 319 | e.code === ErrorCode.ThrottlingException || 320 | e.code === ErrorCode.LimitExceededException || 321 | e.statusCode === 500 || e.statusCode === 503) { 322 | shouldRetry = true; 323 | } else { 324 | throw e; 325 | } 326 | } 327 | 328 | if (shouldRetry && ++retryCount <= this.maxRetries) { 329 | let waitMs = this._backoffFunction(retryCount); 330 | let tableName = removePrefix(this.tableNamePrefix, _extractTableNameFromRequest(params)); 331 | this.events.emit('retry', { 332 | tableName: tableName, 333 | method: method, 334 | retryCount: retryCount, 335 | retryDelayMs: waitMs 336 | }); 337 | await wait(waitMs); 338 | } else { 339 | break; 340 | } 341 | } 342 | 343 | if (method === 'batchWriteItem') { 344 | result = {}; 345 | _aggregateConsumedCapacityMultipleFromResponses(responses, result); 346 | } 347 | 348 | if (retryCount > this.maxRetries) { 349 | if (method === 'batchWriteItem') { 350 | // instead of throwing an error, always return UnprocessedItems and defer decision upstream 351 | // set UnprocessedItems equal to the UnprocessedItems from the last response 352 | // in the array, or use params.RequestItems if there were no successful responses 353 | result.UnprocessedItems = responses.length > 0 354 | ? responses[responses.length - 1].UnprocessedItems 355 | : removePrefixes(this.tableNamePrefix, params.RequestItems); 356 | } else { 357 | throw error; 358 | } 359 | } 360 | 361 | return result; 362 | } 363 | 364 | private _backoffFunction(retryCount: number): number { 365 | if (this.retryDelayOptions.customBackoff) { 366 | return this.retryDelayOptions.customBackoff(retryCount); 367 | } else { 368 | return this.retryDelayOptions.base * Math.pow(2, retryCount - 1); 369 | } 370 | } 371 | 372 | private _emitConsumedCapacitySummary(method: string, response: any) { 373 | if (response.ConsumedCapacity) { 374 | let capacityType = _getMethodCapacityUnitsType(method); 375 | this.events.emit('consumedCapacity', { 376 | method: method, 377 | capacityType: capacityType, 378 | consumedCapacity: response.ConsumedCapacity 379 | }); 380 | } 381 | } 382 | 383 | } 384 | 385 | function _extractTableNameFromRequest(params: any): string { 386 | return params.RequestItems ? Object.keys(params.RequestItems)[0] : params.TableName; 387 | } 388 | 389 | function _getMethodCapacityUnitsType(method: string): string { 390 | switch (method) { 391 | case 'getItem': 392 | case 'query': 393 | case 'scan': 394 | case 'batchGetItem': 395 | return 'ReadCapacityUnits'; 396 | case 'putItem': 397 | case 'updateItem': 398 | case 'deleteItem': 399 | case 'batchWriteItem': 400 | default: 401 | return 'WriteCapacityUnits'; 402 | } 403 | } 404 | 405 | function _makeQueryOrScanResponse(responses: any): any { 406 | let count = 0; 407 | let scannedCount = 0; 408 | let items = []; 409 | 410 | for (let res of responses) { 411 | count += res.Count; 412 | scannedCount += res.ScannedCount; 413 | appendArray(items, res.Items); 414 | } 415 | 416 | let result: any = { 417 | Count: count, 418 | ScannedCount: scannedCount, 419 | Items: items 420 | }; 421 | 422 | if (responses[0].ConsumedCapacity) { 423 | let listCC = responses.map(res => res.ConsumedCapacity); 424 | result.ConsumedCapacity = _aggregateConsumedCapacity(listCC); 425 | } 426 | 427 | return result; 428 | } 429 | 430 | function _makeBatchWriteItemResponse(tableNames: string[], responsesPerTable: DynamoDB.BatchWriteItemOutput[][], 431 | totalRequestItems: number): DynamoDB.BatchWriteItemOutput { 432 | 433 | let totalUnprocessedItems = 0; 434 | let unprocessedItemsHash = {}; 435 | 436 | for (let i = 0; i < tableNames.length; i++) { 437 | let unprocessedItems = []; 438 | let tableName = tableNames[i]; 439 | let listResponses = responsesPerTable[i]; 440 | 441 | // get a flat array of unprocessed items for this table 442 | for (let res of listResponses) { 443 | if (res.UnprocessedItems) { 444 | appendArray(unprocessedItems, res.UnprocessedItems[tableName]); 445 | } 446 | } 447 | 448 | // if there are any unprocessed items for this table, add them to the hash 449 | if (unprocessedItems.length > 0) { 450 | unprocessedItemsHash[tableName] = unprocessedItems; 451 | totalUnprocessedItems += unprocessedItems.length; 452 | } 453 | } 454 | 455 | // if all items are unprocessed, throw an error 456 | if (totalUnprocessedItems === totalRequestItems) { 457 | throw new Exception( 458 | ErrorCode.ProvisionedThroughputExceededException, 459 | ErrorMessage.ProvisionedThroughputExceededException 460 | ); 461 | } 462 | 463 | // otherwise, construct a 200 OK response 464 | let result: any = {}; 465 | if (totalUnprocessedItems > 0) { 466 | result.UnprocessedItems = unprocessedItemsHash; 467 | } 468 | 469 | // aggregate consumed capacity for all tables 470 | let responses = []; 471 | for (let r of responsesPerTable) { 472 | appendArray(responses, r); 473 | } 474 | _aggregateConsumedCapacityMultipleFromResponses(responses, result); 475 | 476 | return result; 477 | } 478 | 479 | function _aggregateConsumedCapacityMultipleFromResponses(responses: any[], result: any): void { 480 | let listCCM = []; 481 | 482 | if (responses.length > 0) { 483 | for (let res of responses) { 484 | if (res.ConsumedCapacity) { 485 | listCCM.push(res.ConsumedCapacity); 486 | } 487 | } 488 | } 489 | 490 | if (listCCM.length > 0) { 491 | result.ConsumedCapacity = _aggregateConsumedCapacityMultiple(listCCM); 492 | } 493 | } 494 | 495 | function _aggregateConsumedCapacityMultiple(listCCM: DynamoDB.ConsumedCapacityMultiple[]): DynamoDB.ConsumedCapacityMultiple { 496 | let map: TDictionary = {}; 497 | 498 | for (let ccm of listCCM) { 499 | for (let cc of ccm) { 500 | if (!map[cc.TableName]) { 501 | map[cc.TableName] = []; 502 | } 503 | map[cc.TableName].push(cc); 504 | } 505 | } 506 | 507 | let result = []; 508 | 509 | for (let tableName in map) { 510 | /* tslint:disable:forin */ 511 | // noinspection JSUnfilteredForInLoop 512 | result.push(_aggregateConsumedCapacity(map[tableName])); 513 | /* tslint:enable:forin */ 514 | } 515 | 516 | return result; 517 | } 518 | 519 | function _aggregateConsumedCapacity(listCC: DynamoDB.ConsumedCapacity[]): DynamoDB.ConsumedCapacity { 520 | let totalCC: any = { 521 | CapacityUnits: 0, 522 | TableName: listCC[0].TableName 523 | }; 524 | 525 | // total 526 | for (let cc of listCC) { 527 | totalCC.CapacityUnits += cc.CapacityUnits; 528 | } 529 | 530 | // table 531 | if (listCC[0].Table) { 532 | totalCC.Table = { 533 | CapacityUnits: 0 534 | }; 535 | 536 | for (let cc of listCC) { 537 | totalCC.Table.CapacityUnits += cc.Table.CapacityUnits; 538 | } 539 | } 540 | 541 | // local secondary indexes 542 | if (listCC[0].LocalSecondaryIndexes) { 543 | totalCC.LocalSecondaryIndexes = _aggregateConsumedCapacityForIndexes(listCC, 'LocalSecondaryIndexes'); 544 | } 545 | 546 | // global secondary indexes 547 | if (listCC[0].GlobalSecondaryIndexes) { 548 | totalCC.GlobalSecondaryIndexes = _aggregateConsumedCapacityForIndexes(listCC, 'GlobalSecondaryIndexes'); 549 | } 550 | 551 | return totalCC; 552 | } 553 | 554 | function _aggregateConsumedCapacityForIndexes(listCC: DynamoDB.ConsumedCapacity[], 555 | secondaryIndex: string): DynamoDB.SecondaryIndexesCapacityMap { 556 | let resultIndexes: any = {}; 557 | 558 | for (let cc of listCC) { 559 | let indexes = cc[secondaryIndex]; 560 | for (let key in indexes) { 561 | /* tslint:disable:forin */ 562 | // noinspection JSUnfilteredForInLoop 563 | resultIndexes[key] = resultIndexes[key] || { 564 | CapacityUnits: 0 565 | }; 566 | // noinspection JSUnfilteredForInLoop 567 | resultIndexes[key].CapacityUnits += indexes[key].CapacityUnits; 568 | /* tslint:enable:forin */ 569 | } 570 | } 571 | 572 | return resultIndexes; 573 | } 574 | 575 | function _validateBatchWriteItemParams(params: DynamoDB.BatchWriteItemInput): void { 576 | // ReturnItemCollectionMetrics not yet supported 577 | if (params.ReturnItemCollectionMetrics === 'SIZE') { 578 | throw new Exception( 579 | ErrorCode.NotYetImplementedError, 580 | ErrorMessage.ItemCollectionMetrics 581 | ); 582 | } 583 | } 584 | -------------------------------------------------------------------------------- /index.d.ts: -------------------------------------------------------------------------------- 1 | declare module "dynamodb-wrapper" { 2 | 3 | // ------------------------------------------------------------------------ 4 | // DynamoDBWrapper public API 5 | // ------------------------------------------------------------------------ 6 | 7 | class DynamoDBWrapper { 8 | constructor(dynamoDB: any, options?: DynamoDBWrapper.IDynamoDBWrapperOptions); 9 | dynamoDB: any; 10 | events: any; 11 | createTable(params: DynamoDBWrapper.CreateTableInput): Promise; 12 | updateTable(params: DynamoDBWrapper.UpdateTableInput): Promise; 13 | describeTable(params: DynamoDBWrapper.DescribeTableInput): Promise; 14 | deleteTable(params: DynamoDBWrapper.DeleteTableInput): Promise; 15 | getItem(params: DynamoDBWrapper.GetItemInput): Promise; 16 | updateItem(params: DynamoDBWrapper.UpdateItemInput): Promise; 17 | putItem(params: DynamoDBWrapper.PutItemInput): Promise; 18 | deleteItem(params: DynamoDBWrapper.DeleteItemInput): Promise; 19 | batchGetItem(params: DynamoDBWrapper.BatchGetItemInput): Promise; 20 | query(params: DynamoDBWrapper.QueryInput, options?: DynamoDBWrapper.IQueryOptions): Promise; 21 | scan(params: DynamoDBWrapper.ScanInput, options?: DynamoDBWrapper.IScanOptions): Promise; 22 | batchWriteItem(params: DynamoDBWrapper.BatchWriteItemInput, options?: DynamoDBWrapper.IBatchWriteItemOptions): Promise; 23 | } 24 | 25 | module DynamoDBWrapper { 26 | 27 | export interface IDynamoDBWrapperOptions { 28 | 29 | // A prefix to add to all requests and remove from all responses. 30 | tableNamePrefix?: string; 31 | 32 | // The DynamoDBWrapper methods query(), scan(), and batchWriteItem() make multiple requests when necessary. 33 | // This setting is the delay (in millseconds) between individual requests made by these operations. 34 | groupDelayMs?: number; 35 | 36 | // The maximum amount of retries to attempt with a request. 37 | // @see http://docs.aws.amazon.com/AWSJavaScriptSDK/latest/AWS/DynamoDB.html#constructor-property 38 | maxRetries?: number; 39 | 40 | // A set of options to configure the retry delay on retryable errors. Currently supported options are: 41 | // @see http://docs.aws.amazon.com/AWSJavaScriptSDK/latest/AWS/DynamoDB.html#constructor-property 42 | retryDelayOptions?: { 43 | // The base number of milliseconds to use in the exponential backoff for operation retries. Defaults to 100 ms. 44 | base?: number; 45 | // A custom function that accepts a retry count and returns the amount of time to delay in milliseconds. 46 | // The base option will be ignored if this option is supplied. 47 | customBackoff?: Function; 48 | }; 49 | 50 | } 51 | 52 | export interface IBatchWriteItemOptions { 53 | [tableName: string]: IBatchWriteItemOption | string | number; 54 | } 55 | 56 | export interface IBatchWriteItemOption { 57 | partitionStrategy?: string; 58 | targetItemCount?: number; 59 | targetGroupWCU?: number; 60 | groupDelayMs?: number; 61 | } 62 | 63 | export interface IQueryOptions { 64 | groupDelayMs?: number; 65 | } 66 | 67 | export interface IScanOptions { 68 | groupDelayMs?: number; 69 | } 70 | 71 | // ------------------------------------------------------------------------ 72 | // Copy/pasted from aws-sdk typings (aws-dynamodb.d.ts) 73 | // ------------------------------------------------------------------------ 74 | 75 | export type AttributeAction = string; 76 | export type AttributeDefinitions = AttributeDefinition[]; 77 | export type AttributeMap = {[key: string]: AttributeValue}; 78 | export type AttributeName = string; // max: 65535 79 | export type AttributeNameList = AttributeName[]; // min: 1 80 | export type AttributeUpdates = {[key: string]: AttributeValueUpdate}; 81 | export type AttributeValueList = AttributeValue[]; 82 | export type Backfilling = boolean; 83 | export type BatchGetRequestMap = {[key: string]: KeysAndAttributes}; // max: 100, min: 1 84 | export type BatchGetResponseMap = {[key: string]: ItemList}; 85 | export type BatchWriteItemRequestMap = {[key: string]: WriteRequests}; // max: 25, min: 1 86 | export type BinaryAttributeValue = any; // type: blob 87 | export type BinarySetAttributeValue = BinaryAttributeValue[]; 88 | export type BooleanAttributeValue = boolean; 89 | export type BooleanObject = boolean; 90 | export type ComparisonOperator = string; 91 | export type ConditionExpression = string; 92 | export type ConditionalOperator = string; 93 | export type ConsistentRead = boolean; 94 | export type ConsumedCapacityMultiple = ConsumedCapacity[]; 95 | export type ConsumedCapacityUnits = number; 96 | export type Date = number; 97 | export type ErrorMessage = string; 98 | export type ExpectedAttributeMap = {[key: string]: ExpectedAttributeValue}; 99 | export type ExpressionAttributeNameMap = {[key: string]: AttributeName}; 100 | export type ExpressionAttributeNameVariable = string; 101 | export type ExpressionAttributeValueMap = {[key: string]: AttributeValue}; 102 | export type ExpressionAttributeValueVariable = string; 103 | export type FilterConditionMap = {[key: string]: Condition}; 104 | export type GlobalSecondaryIndexDescriptionList = GlobalSecondaryIndexDescription[]; 105 | export type GlobalSecondaryIndexList = GlobalSecondaryIndex[]; 106 | export type GlobalSecondaryIndexUpdateList = GlobalSecondaryIndexUpdate[]; 107 | export type IndexName = string; // pattern: "[a-zA-Z0-9_.-]+", max: 255, min: 3 108 | export type IndexStatus = string; 109 | export type Integer = number; 110 | export type ItemCollectionKeyAttributeMap = {[key: string]: AttributeValue}; 111 | export type ItemCollectionMetricsMultiple = ItemCollectionMetrics[]; 112 | export type ItemCollectionMetricsPerTable = {[key: string]: ItemCollectionMetricsMultiple}; 113 | export type ItemCollectionSizeEstimateBound = number; 114 | export type ItemCollectionSizeEstimateRange = ItemCollectionSizeEstimateBound[]; 115 | export type ItemList = AttributeMap[]; 116 | export type Key = {[key: string]: AttributeValue}; 117 | export type KeyConditions = {[key: string]: Condition}; 118 | export type KeyExpression = string; 119 | export type KeyList = Key[]; // max: 100, min: 1 120 | export type KeySchema = KeySchemaElement[]; // max: 2, min: 1 121 | export type KeySchemaAttributeName = string; // max: 255, min: 1 122 | export type KeyType = string; 123 | export type ListAttributeValue = AttributeValue[]; 124 | export type ListTablesInputLimit = number; // max: 100, min: 1 125 | export type LocalSecondaryIndexDescriptionList = LocalSecondaryIndexDescription[]; 126 | export type LocalSecondaryIndexList = LocalSecondaryIndex[]; 127 | export type Long = number; 128 | export type MapAttributeValue = {[key: string]: AttributeValue}; 129 | export type NonKeyAttributeName = string; // max: 255, min: 1 130 | export type NonKeyAttributeNameList = NonKeyAttributeName[]; // max: 20, min: 1 131 | export type NullAttributeValue = boolean; 132 | export type NumberAttributeValue = string; 133 | export type NumberSetAttributeValue = NumberAttributeValue[]; 134 | export type PositiveIntegerObject = number; // min: 1 135 | export type PositiveLongObject = number; // min: 1 136 | export type ProjectionExpression = string; 137 | export type ProjectionType = string; 138 | export type PutItemInputAttributeMap = {[key: string]: AttributeValue}; 139 | export type ReturnConsumedCapacity = string; 140 | export type ReturnItemCollectionMetrics = string; 141 | export type ReturnValue = string; 142 | export type ScalarAttributeType = string; 143 | export type ScanSegment = number; // max: 999999 144 | export type ScanTotalSegments = number; // max: 1000000, min: 1 145 | export type SecondaryIndexesCapacityMap = {[key: string]: Capacity}; 146 | export type Select = string; 147 | export type StreamArn = string; // max: 1024, min: 37 148 | export type StreamEnabled = boolean; 149 | export type StreamViewType = string; 150 | export type String = string; 151 | export type StringAttributeValue = string; 152 | export type StringSetAttributeValue = StringAttributeValue[]; 153 | export type TableName = string; // pattern: "[a-zA-Z0-9_.-]+", max: 255, min: 3 154 | export type TableNameList = TableName[]; 155 | export type TableStatus = string; 156 | export type UpdateExpression = string; 157 | export type WriteRequests = WriteRequest[]; // max: 25, min: 1 158 | 159 | export interface AttributeDefinition { 160 | AttributeName: KeySchemaAttributeName; 161 | AttributeType: ScalarAttributeType; 162 | } 163 | export interface AttributeValue { 164 | S?: StringAttributeValue; 165 | N?: NumberAttributeValue; 166 | B?: BinaryAttributeValue; 167 | SS?: StringSetAttributeValue; 168 | NS?: NumberSetAttributeValue; 169 | BS?: BinarySetAttributeValue; 170 | M?: MapAttributeValue; 171 | L?: ListAttributeValue; 172 | NULL?: NullAttributeValue; 173 | BOOL?: BooleanAttributeValue; 174 | } 175 | export interface AttributeValueUpdate { 176 | Value?: AttributeValue; 177 | Action?: AttributeAction; 178 | } 179 | export interface BatchGetItemInput { 180 | RequestItems: BatchGetRequestMap; 181 | ReturnConsumedCapacity?: ReturnConsumedCapacity; 182 | } 183 | export interface BatchGetItemOutput { 184 | Responses?: BatchGetResponseMap; 185 | UnprocessedKeys?: BatchGetRequestMap; 186 | ConsumedCapacity?: ConsumedCapacityMultiple; 187 | } 188 | export interface BatchWriteItemInput { 189 | RequestItems: BatchWriteItemRequestMap; 190 | ReturnConsumedCapacity?: ReturnConsumedCapacity; 191 | ReturnItemCollectionMetrics?: ReturnItemCollectionMetrics; 192 | } 193 | export interface BatchWriteItemOutput { 194 | UnprocessedItems?: BatchWriteItemRequestMap; 195 | ItemCollectionMetrics?: ItemCollectionMetricsPerTable; 196 | ConsumedCapacity?: ConsumedCapacityMultiple; 197 | } 198 | export interface Capacity { 199 | CapacityUnits?: ConsumedCapacityUnits; 200 | } 201 | export interface Condition { 202 | AttributeValueList?: AttributeValueList; 203 | ComparisonOperator: ComparisonOperator; 204 | } 205 | export interface ConditionalCheckFailedException { 206 | message?: ErrorMessage; 207 | } 208 | export interface ConsumedCapacity { 209 | TableName?: TableName; 210 | CapacityUnits?: ConsumedCapacityUnits; 211 | Table?: Capacity; 212 | LocalSecondaryIndexes?: SecondaryIndexesCapacityMap; 213 | GlobalSecondaryIndexes?: SecondaryIndexesCapacityMap; 214 | } 215 | export interface CreateGlobalSecondaryIndexAction { 216 | IndexName: IndexName; 217 | KeySchema: KeySchema; 218 | Projection: Projection; 219 | ProvisionedThroughput: ProvisionedThroughput; 220 | } 221 | export interface CreateTableInput { 222 | AttributeDefinitions: AttributeDefinitions; 223 | TableName: TableName; 224 | KeySchema: KeySchema; 225 | LocalSecondaryIndexes?: LocalSecondaryIndexList; 226 | GlobalSecondaryIndexes?: GlobalSecondaryIndexList; 227 | ProvisionedThroughput: ProvisionedThroughput; 228 | StreamSpecification?: StreamSpecification; 229 | } 230 | export interface CreateTableOutput { 231 | TableDescription?: TableDescription; 232 | } 233 | export interface DeleteGlobalSecondaryIndexAction { 234 | IndexName: IndexName; 235 | } 236 | export interface DeleteItemInput { 237 | TableName: TableName; 238 | Key: Key; 239 | Expected?: ExpectedAttributeMap; 240 | ConditionalOperator?: ConditionalOperator; 241 | ReturnValues?: ReturnValue; 242 | ReturnConsumedCapacity?: ReturnConsumedCapacity; 243 | ReturnItemCollectionMetrics?: ReturnItemCollectionMetrics; 244 | ConditionExpression?: ConditionExpression; 245 | ExpressionAttributeNames?: ExpressionAttributeNameMap; 246 | ExpressionAttributeValues?: ExpressionAttributeValueMap; 247 | } 248 | export interface DeleteItemOutput { 249 | Attributes?: AttributeMap; 250 | ConsumedCapacity?: ConsumedCapacity; 251 | ItemCollectionMetrics?: ItemCollectionMetrics; 252 | } 253 | export interface DeleteRequest { 254 | Key: Key; 255 | } 256 | export interface DeleteTableInput { 257 | TableName: TableName; 258 | } 259 | export interface DeleteTableOutput { 260 | TableDescription?: TableDescription; 261 | } 262 | export interface DescribeTableInput { 263 | TableName: TableName; 264 | } 265 | export interface DescribeTableOutput { 266 | Table?: TableDescription; 267 | } 268 | export interface ExpectedAttributeValue { 269 | Value?: AttributeValue; 270 | Exists?: BooleanObject; 271 | ComparisonOperator?: ComparisonOperator; 272 | AttributeValueList?: AttributeValueList; 273 | } 274 | export interface GetItemInput { 275 | TableName: TableName; 276 | Key: Key; 277 | AttributesToGet?: AttributeNameList; 278 | ConsistentRead?: ConsistentRead; 279 | ReturnConsumedCapacity?: ReturnConsumedCapacity; 280 | ProjectionExpression?: ProjectionExpression; 281 | ExpressionAttributeNames?: ExpressionAttributeNameMap; 282 | } 283 | export interface GetItemOutput { 284 | Item?: AttributeMap; 285 | ConsumedCapacity?: ConsumedCapacity; 286 | } 287 | export interface GlobalSecondaryIndex { 288 | IndexName: IndexName; 289 | KeySchema: KeySchema; 290 | Projection: Projection; 291 | ProvisionedThroughput: ProvisionedThroughput; 292 | } 293 | export interface GlobalSecondaryIndexDescription { 294 | IndexName?: IndexName; 295 | KeySchema?: KeySchema; 296 | Projection?: Projection; 297 | IndexStatus?: IndexStatus; 298 | Backfilling?: Backfilling; 299 | ProvisionedThroughput?: ProvisionedThroughputDescription; 300 | IndexSizeBytes?: Long; 301 | ItemCount?: Long; 302 | IndexArn?: String; 303 | } 304 | export interface GlobalSecondaryIndexUpdate { 305 | Update?: UpdateGlobalSecondaryIndexAction; 306 | Create?: CreateGlobalSecondaryIndexAction; 307 | Delete?: DeleteGlobalSecondaryIndexAction; 308 | } 309 | export interface InternalServerError { 310 | message?: ErrorMessage; 311 | } 312 | export interface ItemCollectionMetrics { 313 | ItemCollectionKey?: ItemCollectionKeyAttributeMap; 314 | SizeEstimateRangeGB?: ItemCollectionSizeEstimateRange; 315 | } 316 | export interface ItemCollectionSizeLimitExceededException { 317 | message?: ErrorMessage; 318 | } 319 | export interface KeySchemaElement { 320 | AttributeName: KeySchemaAttributeName; 321 | KeyType: KeyType; 322 | } 323 | export interface KeysAndAttributes { 324 | Keys: KeyList; 325 | AttributesToGet?: AttributeNameList; 326 | ConsistentRead?: ConsistentRead; 327 | ProjectionExpression?: ProjectionExpression; 328 | ExpressionAttributeNames?: ExpressionAttributeNameMap; 329 | } 330 | export interface LimitExceededException { 331 | message?: ErrorMessage; 332 | } 333 | export interface ListTablesInput { 334 | ExclusiveStartTableName?: TableName; 335 | Limit?: ListTablesInputLimit; 336 | } 337 | export interface ListTablesOutput { 338 | TableNames?: TableNameList; 339 | LastEvaluatedTableName?: TableName; 340 | } 341 | export interface LocalSecondaryIndex { 342 | IndexName: IndexName; 343 | KeySchema: KeySchema; 344 | Projection: Projection; 345 | } 346 | export interface LocalSecondaryIndexDescription { 347 | IndexName?: IndexName; 348 | KeySchema?: KeySchema; 349 | Projection?: Projection; 350 | IndexSizeBytes?: Long; 351 | ItemCount?: Long; 352 | IndexArn?: String; 353 | } 354 | export interface Projection { 355 | ProjectionType?: ProjectionType; 356 | NonKeyAttributes?: NonKeyAttributeNameList; 357 | } 358 | export interface ProvisionedThroughput { 359 | ReadCapacityUnits: PositiveLongObject; 360 | WriteCapacityUnits: PositiveLongObject; 361 | } 362 | export interface ProvisionedThroughputDescription { 363 | LastIncreaseDateTime?: Date; 364 | LastDecreaseDateTime?: Date; 365 | NumberOfDecreasesToday?: PositiveLongObject; 366 | ReadCapacityUnits?: PositiveLongObject; 367 | WriteCapacityUnits?: PositiveLongObject; 368 | } 369 | export interface ProvisionedThroughputExceededException { 370 | message?: ErrorMessage; 371 | } 372 | export interface PutItemInput { 373 | TableName: TableName; 374 | Item: PutItemInputAttributeMap; 375 | Expected?: ExpectedAttributeMap; 376 | ReturnValues?: ReturnValue; 377 | ReturnConsumedCapacity?: ReturnConsumedCapacity; 378 | ReturnItemCollectionMetrics?: ReturnItemCollectionMetrics; 379 | ConditionalOperator?: ConditionalOperator; 380 | ConditionExpression?: ConditionExpression; 381 | ExpressionAttributeNames?: ExpressionAttributeNameMap; 382 | ExpressionAttributeValues?: ExpressionAttributeValueMap; 383 | } 384 | export interface PutItemOutput { 385 | Attributes?: AttributeMap; 386 | ConsumedCapacity?: ConsumedCapacity; 387 | ItemCollectionMetrics?: ItemCollectionMetrics; 388 | } 389 | export interface PutRequest { 390 | Item: PutItemInputAttributeMap; 391 | } 392 | export interface QueryInput { 393 | TableName: TableName; 394 | IndexName?: IndexName; 395 | Select?: Select; 396 | AttributesToGet?: AttributeNameList; 397 | Limit?: PositiveIntegerObject; 398 | ConsistentRead?: ConsistentRead; 399 | KeyConditions?: KeyConditions; 400 | QueryFilter?: FilterConditionMap; 401 | ConditionalOperator?: ConditionalOperator; 402 | ScanIndexForward?: BooleanObject; 403 | ExclusiveStartKey?: Key; 404 | ReturnConsumedCapacity?: ReturnConsumedCapacity; 405 | ProjectionExpression?: ProjectionExpression; 406 | FilterExpression?: ConditionExpression; 407 | KeyConditionExpression?: KeyExpression; 408 | ExpressionAttributeNames?: ExpressionAttributeNameMap; 409 | ExpressionAttributeValues?: ExpressionAttributeValueMap; 410 | } 411 | export interface QueryOutput { 412 | Items?: ItemList; 413 | Count?: Integer; 414 | ScannedCount?: Integer; 415 | LastEvaluatedKey?: Key; 416 | ConsumedCapacity?: ConsumedCapacity; 417 | } 418 | export interface ResourceInUseException { 419 | message?: ErrorMessage; 420 | } 421 | export interface ResourceNotFoundException { 422 | message?: ErrorMessage; 423 | } 424 | export interface ScanInput { 425 | TableName: TableName; 426 | IndexName?: IndexName; 427 | AttributesToGet?: AttributeNameList; 428 | Limit?: PositiveIntegerObject; 429 | Select?: Select; 430 | ScanFilter?: FilterConditionMap; 431 | ConditionalOperator?: ConditionalOperator; 432 | ExclusiveStartKey?: Key; 433 | ReturnConsumedCapacity?: ReturnConsumedCapacity; 434 | TotalSegments?: ScanTotalSegments; 435 | Segment?: ScanSegment; 436 | ProjectionExpression?: ProjectionExpression; 437 | FilterExpression?: ConditionExpression; 438 | ExpressionAttributeNames?: ExpressionAttributeNameMap; 439 | ExpressionAttributeValues?: ExpressionAttributeValueMap; 440 | ConsistentRead?: ConsistentRead; 441 | } 442 | export interface ScanOutput { 443 | Items?: ItemList; 444 | Count?: Integer; 445 | ScannedCount?: Integer; 446 | LastEvaluatedKey?: Key; 447 | ConsumedCapacity?: ConsumedCapacity; 448 | } 449 | export interface StreamSpecification { 450 | StreamEnabled?: StreamEnabled; 451 | StreamViewType?: StreamViewType; 452 | } 453 | export interface TableDescription { 454 | AttributeDefinitions?: AttributeDefinitions; 455 | TableName?: TableName; 456 | KeySchema?: KeySchema; 457 | TableStatus?: TableStatus; 458 | CreationDateTime?: Date; 459 | ProvisionedThroughput?: ProvisionedThroughputDescription; 460 | TableSizeBytes?: Long; 461 | ItemCount?: Long; 462 | TableArn?: String; 463 | LocalSecondaryIndexes?: LocalSecondaryIndexDescriptionList; 464 | GlobalSecondaryIndexes?: GlobalSecondaryIndexDescriptionList; 465 | StreamSpecification?: StreamSpecification; 466 | LatestStreamLabel?: String; 467 | LatestStreamArn?: StreamArn; 468 | } 469 | export interface UpdateGlobalSecondaryIndexAction { 470 | IndexName: IndexName; 471 | ProvisionedThroughput: ProvisionedThroughput; 472 | } 473 | export interface UpdateItemInput { 474 | TableName: TableName; 475 | Key: Key; 476 | AttributeUpdates?: AttributeUpdates; 477 | Expected?: ExpectedAttributeMap; 478 | ConditionalOperator?: ConditionalOperator; 479 | ReturnValues?: ReturnValue; 480 | ReturnConsumedCapacity?: ReturnConsumedCapacity; 481 | ReturnItemCollectionMetrics?: ReturnItemCollectionMetrics; 482 | UpdateExpression?: UpdateExpression; 483 | ConditionExpression?: ConditionExpression; 484 | ExpressionAttributeNames?: ExpressionAttributeNameMap; 485 | ExpressionAttributeValues?: ExpressionAttributeValueMap; 486 | } 487 | export interface UpdateItemOutput { 488 | Attributes?: AttributeMap; 489 | ConsumedCapacity?: ConsumedCapacity; 490 | ItemCollectionMetrics?: ItemCollectionMetrics; 491 | } 492 | export interface UpdateTableInput { 493 | AttributeDefinitions?: AttributeDefinitions; 494 | TableName: TableName; 495 | ProvisionedThroughput?: ProvisionedThroughput; 496 | GlobalSecondaryIndexUpdates?: GlobalSecondaryIndexUpdateList; 497 | StreamSpecification?: StreamSpecification; 498 | } 499 | export interface UpdateTableOutput { 500 | TableDescription?: TableDescription; 501 | } 502 | export interface WriteRequest { 503 | PutRequest?: PutRequest; 504 | DeleteRequest?: DeleteRequest; 505 | } 506 | 507 | } 508 | 509 | export = DynamoDBWrapper; 510 | } -------------------------------------------------------------------------------- /lib/dynamodb-wrapper.spec.ts: -------------------------------------------------------------------------------- 1 | import { DynamoDB } from 'aws-sdk'; 2 | import { IMockDynamoDBOptions, MockDynamoDB } from '../test/mock-dynamodb'; 3 | import { DynamoDBWrapper } from './dynamodb-wrapper'; 4 | 5 | describe('lib/dynamodb-wrapper', () => { 6 | 7 | function _setupDynamoDBWrapper(options?: IMockDynamoDBOptions) { 8 | options = options || {}; 9 | let mockDynamoDB = new MockDynamoDB(options); 10 | return { 11 | dynamoDB: mockDynamoDB, 12 | dynamoDBWrapper: new DynamoDBWrapper(mockDynamoDB, { 13 | tableNamePrefix: 'local-', 14 | groupDelayMs: 0, 15 | maxRetries: 2, 16 | retryDelayOptions: { 17 | base: 0 18 | } 19 | }) 20 | }; 21 | } 22 | 23 | it('should initialize with default options', () => { 24 | let dynamoDB = new MockDynamoDB(); 25 | let dynamoDBWrapper = new DynamoDBWrapper(dynamoDB); 26 | 27 | expect(dynamoDBWrapper.tableNamePrefix).toBe(''); 28 | expect(dynamoDBWrapper.groupDelayMs).toBe(100); 29 | expect(dynamoDBWrapper.maxRetries).toBe(10); 30 | expect(dynamoDBWrapper.retryDelayOptions).toEqual({ 31 | base: 100 32 | }); 33 | }); 34 | 35 | it('should initialize with custom options', () => { 36 | let dynamoDB = new MockDynamoDB(); 37 | let dynamoDBWrapper = new DynamoDBWrapper(dynamoDB, { 38 | tableNamePrefix: 'local', 39 | groupDelayMs: 5, 40 | maxRetries: 3, 41 | retryDelayOptions: { 42 | base: 42, 43 | customBackoff: function (retryCount) { return 100 * retryCount; } 44 | } 45 | }); 46 | 47 | expect(dynamoDBWrapper.tableNamePrefix).toBe('local'); 48 | expect(dynamoDBWrapper.groupDelayMs).toBe(5); 49 | expect(dynamoDBWrapper.maxRetries).toBe(3); 50 | expect(dynamoDBWrapper.retryDelayOptions.base).toBe(42); 51 | expect(dynamoDBWrapper.retryDelayOptions.customBackoff).toBeDefined(); 52 | }); 53 | 54 | it('should initialize with custom options', () => { 55 | let dynamoDB = new MockDynamoDB(); 56 | let dynamoDBWrapper = new DynamoDBWrapper(dynamoDB, { 57 | tableNamePrefix: 'local', 58 | groupDelayMs: 5, 59 | maxRetries: 3, 60 | retryDelayOptions: { 61 | base: 42, 62 | customBackoff: function (retryCount) { return 100 * retryCount; } 63 | } 64 | }); 65 | 66 | expect(dynamoDBWrapper.tableNamePrefix).toBe('local'); 67 | expect(dynamoDBWrapper.groupDelayMs).toBe(5); 68 | expect(dynamoDBWrapper.maxRetries).toBe(3); 69 | expect(dynamoDBWrapper.retryDelayOptions.base).toBe(42); 70 | expect(dynamoDBWrapper.retryDelayOptions.customBackoff).toBeDefined(); 71 | }); 72 | 73 | [ 74 | 'createTable', 75 | 'updateTable', 76 | 'describeTable', 77 | 'deleteTable', 78 | 'getItem', 79 | 'putItem', 80 | 'updateItem', 81 | 'deleteItem', 82 | 'batchGetItem' 83 | ].forEach(method => { 84 | it('should pass ' + method + '() calls straight through to the AWS SDK', async () => { 85 | let params: any = { 86 | TableName: 'Test' 87 | }; 88 | let mock = _setupDynamoDBWrapper(); 89 | let dynamoDB = mock.dynamoDB; 90 | let dynamoDBWrapper = mock.dynamoDBWrapper; 91 | dynamoDBWrapper.tableNamePrefix = 'local-'; 92 | 93 | spyOn(dynamoDB, method).and.callThrough(); 94 | await dynamoDBWrapper[method](params); 95 | 96 | expect(params.TableName).toBe('local-Test'); 97 | expect(dynamoDB[method]).toHaveBeenCalledWith(params); 98 | }); 99 | }); 100 | 101 | describe('putItem()', () => { 102 | 103 | function _setupPutItemParams(options?): DynamoDB.PutItemInput { 104 | options = options || {}; 105 | 106 | let params: any = { 107 | TableName: 'Test', 108 | Item: { 109 | MyPartitionKey: { N: '1' } 110 | } 111 | }; 112 | 113 | if (options.ReturnConsumedCapacity) { 114 | params.ReturnConsumedCapacity = options.ReturnConsumedCapacity; 115 | } 116 | 117 | return params; 118 | } 119 | 120 | it('should put item', async () => { 121 | let params = _setupPutItemParams(); 122 | let mock = _setupDynamoDBWrapper(); 123 | let dynamoDB = mock.dynamoDB; 124 | let dynamoDBWrapper = mock.dynamoDBWrapper; 125 | 126 | spyOn(dynamoDB, 'putItem').and.callThrough(); 127 | await dynamoDBWrapper.putItem(params); 128 | 129 | expect(dynamoDB.putItem).toHaveBeenCalledWith(params); 130 | }); 131 | 132 | it('should emit a "consumedCapacity" event when a response contains a ConsumedCapacity object', async () => { 133 | let params = _setupPutItemParams({ ReturnConsumedCapacity: 'TOTAL' }); 134 | let mock = _setupDynamoDBWrapper(); 135 | let dynamoDB = mock.dynamoDB; 136 | let dynamoDBWrapper = mock.dynamoDBWrapper; 137 | 138 | let event = null; 139 | 140 | dynamoDBWrapper.events.on('consumedCapacity', function onConsumedCapacity(e) { 141 | event = e; 142 | }); 143 | 144 | await dynamoDBWrapper.putItem(params); 145 | 146 | expect(event).toEqual({ 147 | method: 'putItem', 148 | capacityType: 'WriteCapacityUnits', 149 | consumedCapacity: { 150 | TableName: 'Test', 151 | CapacityUnits: 1 152 | } 153 | }); 154 | }); 155 | 156 | it('should retry a failed request (throttled)', async () => { 157 | let params = _setupPutItemParams(); 158 | let mock = _setupDynamoDBWrapper({ 159 | customResponses: { 160 | 1: 'ProvisionedThroughputExceededException' 161 | } 162 | }); 163 | let dynamoDB = mock.dynamoDB; 164 | let dynamoDBWrapper = mock.dynamoDBWrapper; 165 | 166 | spyOn(dynamoDB, 'putItem').and.callThrough(); 167 | await dynamoDBWrapper.putItem(params); 168 | 169 | expect(dynamoDB.putItem).toHaveBeenCalledTimes(2); 170 | }); 171 | 172 | it('should emit a "retry" event when retrying a failed request', async () => { 173 | let params = _setupPutItemParams(); 174 | let mock = _setupDynamoDBWrapper({ 175 | customResponses: { 176 | 1: 'ProvisionedThroughputExceededException' 177 | } 178 | }); 179 | let dynamoDB = mock.dynamoDB; 180 | let dynamoDBWrapper = mock.dynamoDBWrapper; 181 | 182 | let event = null; 183 | 184 | dynamoDBWrapper.events.on('retry', function onRetry(e) { 185 | event = e; 186 | }); 187 | 188 | await dynamoDBWrapper.putItem(params); 189 | 190 | expect(event).toEqual({ 191 | tableName: 'Test', 192 | method: 'putItem', 193 | retryCount: 1, 194 | retryDelayMs: 0 195 | }); 196 | }); 197 | 198 | it('should throw a fatal exception when the maximum number of retries is exceeded', async () => { 199 | let params = _setupPutItemParams(); 200 | let mock = _setupDynamoDBWrapper({ 201 | customResponses: { 202 | 1: 'ProvisionedThroughputExceededException', 203 | 2: 'ProvisionedThroughputExceededException', 204 | 3: 'ProvisionedThroughputExceededException' 205 | } 206 | }); 207 | let dynamoDB = mock.dynamoDB; 208 | let dynamoDBWrapper = mock.dynamoDBWrapper; 209 | dynamoDBWrapper.retryDelayOptions.customBackoff = () => 0; 210 | 211 | spyOn(dynamoDB, 'putItem').and.callThrough(); 212 | 213 | let exception; 214 | try { 215 | await dynamoDBWrapper.putItem(params); 216 | } catch (e) { 217 | exception = e; 218 | } 219 | 220 | expect(dynamoDB.putItem).toHaveBeenCalledTimes(3); 221 | expect(exception.code).toBe('ProvisionedThroughputExceededException'); 222 | expect(exception.statusCode).toBe(400); 223 | }); 224 | 225 | it('should pass AWS non-retryable errors through', async () => { 226 | let params = _setupPutItemParams(); 227 | let mock = _setupDynamoDBWrapper({ 228 | customResponses: { 229 | 1: 'ValidationException' 230 | } 231 | }); 232 | let dynamoDB = mock.dynamoDB; 233 | let dynamoDBWrapper = mock.dynamoDBWrapper; 234 | 235 | let exception; 236 | try { 237 | await dynamoDBWrapper.putItem(params); 238 | } catch (e) { 239 | exception = e; 240 | } 241 | 242 | expect(exception.code).toBe('ValidationException'); 243 | expect(exception.statusCode).toBe(400); 244 | }); 245 | }); 246 | 247 | describe('query()', () => { 248 | 249 | function _setupQueryParams(returnConsumedCapacity?: string): DynamoDB.QueryInput { 250 | let params: any = { 251 | TableName: 'Test', 252 | KeyConditionExpression: 'MyPartitionKey = :pk', 253 | ExpressionAttributeValues: { 254 | ':pk': { 255 | N: '1' 256 | } 257 | }, 258 | Limit: 2 259 | }; 260 | 261 | if (returnConsumedCapacity) { 262 | params['ReturnConsumedCapacity'] = returnConsumedCapacity; 263 | } 264 | 265 | return params; 266 | } 267 | 268 | it('should query by LastEvaluatedKey to return all pages of data', async () => { 269 | let params = _setupQueryParams(); 270 | let mock = _setupDynamoDBWrapper(); 271 | let dynamoDB = mock.dynamoDB; 272 | let dynamoDBWrapper = mock.dynamoDBWrapper; 273 | 274 | spyOn(dynamoDB, 'query').and.callThrough(); 275 | let response = await dynamoDBWrapper.query(params); 276 | 277 | expect(dynamoDB.query).toHaveBeenCalledTimes(3); 278 | expect(response.Items.length).toBe(6); 279 | expect(response.ConsumedCapacity).not.toBeDefined(); 280 | }); 281 | 282 | it('should aggregate consumed capacity (TOTAL) from multiple responses', async () => { 283 | let params = _setupQueryParams('TOTAL'); 284 | let mock = _setupDynamoDBWrapper(); 285 | let dynamoDB = mock.dynamoDB; 286 | let dynamoDBWrapper = mock.dynamoDBWrapper; 287 | 288 | spyOn(dynamoDB, 'query').and.callThrough(); 289 | let response = await dynamoDBWrapper.query(params); 290 | 291 | expect(dynamoDB.query).toHaveBeenCalledTimes(3); 292 | expect(response.Items.length).toBe(6); 293 | expect(response.ConsumedCapacity).toEqual({ 294 | CapacityUnits: 21, 295 | TableName: 'Test' 296 | }); 297 | }); 298 | 299 | it('should aggregate consumed capacity (INDEXES) from multiple responses', async () => { 300 | let params = _setupQueryParams('INDEXES'); 301 | let mock = _setupDynamoDBWrapper(); 302 | let dynamoDB = mock.dynamoDB; 303 | let dynamoDBWrapper = mock.dynamoDBWrapper; 304 | 305 | spyOn(dynamoDB, 'query').and.callThrough(); 306 | let response = await dynamoDBWrapper.query(params); 307 | 308 | expect(dynamoDB.query).toHaveBeenCalledTimes(3); 309 | expect(response.Items.length).toBe(6); 310 | expect(response.ConsumedCapacity).toEqual({ 311 | CapacityUnits: 21, 312 | TableName: 'Test', 313 | Table: { 314 | CapacityUnits: 6 315 | }, 316 | LocalSecondaryIndexes: { 317 | MyLocalIndex: { 318 | CapacityUnits: 12 319 | } 320 | }, 321 | GlobalSecondaryIndexes: { 322 | MyGlobalIndex: { 323 | CapacityUnits: 3 324 | } 325 | } 326 | }); 327 | }); 328 | }); 329 | 330 | describe('scan()', () => { 331 | 332 | function _setupScanParams(returnConsumedCapacity?: string): DynamoDB.QueryInput { 333 | let params: any = { 334 | TableName: 'Test', 335 | KeyConditionExpression: 'MyPartitionKey = :pk', 336 | ExpressionAttributeValues: { 337 | ':pk': { 338 | N: '1' 339 | } 340 | }, 341 | Limit: 2 342 | }; 343 | 344 | if (returnConsumedCapacity) { 345 | params['ReturnConsumedCapacity'] = returnConsumedCapacity; 346 | } 347 | 348 | return params; 349 | } 350 | 351 | it('should scan by LastEvaluatedKey to return all pages of data', async () => { 352 | let params = _setupScanParams(); 353 | let mock = _setupDynamoDBWrapper(); 354 | let dynamoDB = mock.dynamoDB; 355 | let dynamoDBWrapper = mock.dynamoDBWrapper; 356 | 357 | spyOn(dynamoDB, 'scan').and.callThrough(); 358 | let response = await dynamoDBWrapper.scan(params); 359 | 360 | expect(dynamoDB.scan).toHaveBeenCalledTimes(3); 361 | expect(response.Items.length).toBe(6); 362 | expect(response.ConsumedCapacity).not.toBeDefined(); 363 | }); 364 | 365 | it('should aggregate consumed capacity (TOTAL) from multiple responses', async () => { 366 | let params = _setupScanParams('TOTAL'); 367 | let mock = _setupDynamoDBWrapper(); 368 | let dynamoDB = mock.dynamoDB; 369 | let dynamoDBWrapper = mock.dynamoDBWrapper; 370 | 371 | spyOn(dynamoDB, 'scan').and.callThrough(); 372 | let response = await dynamoDBWrapper.scan(params); 373 | 374 | expect(dynamoDB.scan).toHaveBeenCalledTimes(3); 375 | expect(response.Items.length).toBe(6); 376 | expect(response.ConsumedCapacity).toEqual({ 377 | CapacityUnits: 21, 378 | TableName: 'Test' 379 | }); 380 | }); 381 | 382 | it('should aggregate consumed capacity (INDEXES) from multiple responses', async () => { 383 | let params = _setupScanParams('INDEXES'); 384 | let mock = _setupDynamoDBWrapper(); 385 | let dynamoDB = mock.dynamoDB; 386 | let dynamoDBWrapper = mock.dynamoDBWrapper; 387 | 388 | spyOn(dynamoDB, 'scan').and.callThrough(); 389 | let response = await dynamoDBWrapper.scan(params); 390 | 391 | expect(dynamoDB.scan).toHaveBeenCalledTimes(3); 392 | expect(response.Items.length).toBe(6); 393 | expect(response.ConsumedCapacity).toEqual({ 394 | CapacityUnits: 21, 395 | TableName: 'Test', 396 | Table: { 397 | CapacityUnits: 6 398 | }, 399 | LocalSecondaryIndexes: { 400 | MyLocalIndex: { 401 | CapacityUnits: 12 402 | } 403 | }, 404 | GlobalSecondaryIndexes: { 405 | MyGlobalIndex: { 406 | CapacityUnits: 3 407 | } 408 | } 409 | }); 410 | }); 411 | }); 412 | 413 | describe('batchWriteItem()', () => { 414 | 415 | interface ISetupOpts { 416 | isMultipleTables?: boolean; 417 | returnConsumedCapacity?: string; 418 | } 419 | 420 | function _setupBatchWriteItemParams(opts?: ISetupOpts): DynamoDB.BatchWriteItemInput { 421 | let params: any = { 422 | RequestItems: { 423 | Test: [] 424 | } 425 | }; 426 | 427 | for (let i = 0; i < 10; i++) { 428 | params.RequestItems['Test'].push({ 429 | PutRequest: { 430 | Item: { 431 | MyPartitionKey: { N: i.toString() } 432 | } 433 | } 434 | }); 435 | } 436 | 437 | if (opts && opts.isMultipleTables) { 438 | params.RequestItems['AnotherTest'] = []; 439 | for (let i = 0; i < 4; i++) { 440 | params.RequestItems['AnotherTest'].push({ 441 | PutRequest: { 442 | Item: { 443 | MyPartitionKey: { N: i.toString() } 444 | } 445 | } 446 | }); 447 | } 448 | } 449 | 450 | if (opts && opts.returnConsumedCapacity) { 451 | params['ReturnConsumedCapacity'] = opts.returnConsumedCapacity; 452 | } 453 | 454 | return params; 455 | } 456 | 457 | it('should throw a NotYetImplemented exception for item collection metrics', async () => { 458 | let mock = _setupDynamoDBWrapper(); 459 | let dynamoDBWrapper = mock.dynamoDBWrapper; 460 | let params: any = { 461 | ReturnItemCollectionMetrics: 'SIZE' 462 | }; 463 | 464 | let exception; 465 | try { 466 | await dynamoDBWrapper.batchWriteItem(params); 467 | } catch (e) { 468 | exception = e; 469 | } 470 | 471 | expect(exception.code).toBe('NotYetImplementedError'); 472 | expect(exception.message).toBe('ReturnItemCollectionMetrics is supported in the AWS DynamoDB API, ' + 473 | 'but this capability is not yet implemented by this wrapper library.'); 474 | }); 475 | 476 | it('should batch write items with default options', async () => { 477 | let params = _setupBatchWriteItemParams(); 478 | let mock = _setupDynamoDBWrapper(); 479 | let dynamoDB = mock.dynamoDB; 480 | let dynamoDBWrapper = mock.dynamoDBWrapper; 481 | 482 | spyOn(dynamoDB, 'batchWriteItem').and.callThrough(); 483 | 484 | await dynamoDBWrapper.batchWriteItem(params); 485 | 486 | expect(dynamoDB.batchWriteItem).toHaveBeenCalledTimes(1); 487 | }); 488 | 489 | it('should emit batchGroupWritten after processing a group', async () => { 490 | let params = _setupBatchWriteItemParams({ 491 | isMultipleTables: true 492 | }); 493 | let mock = _setupDynamoDBWrapper(); 494 | let dynamoDB = mock.dynamoDB; 495 | let dynamoDBWrapper = mock.dynamoDBWrapper; 496 | 497 | let counts = new Map(); 498 | dynamoDBWrapper.events.on('batchGroupWritten', function onConsumedCapacity(e) { 499 | counts.set(e.tableName, e.processedCount); 500 | }); 501 | 502 | await dynamoDBWrapper.batchWriteItem(params); 503 | 504 | expect(counts.size).toEqual(2); 505 | expect(counts.get('Test')).toEqual(10); 506 | expect(counts.get('AnotherTest')).toEqual(4); 507 | }); 508 | 509 | it('should batch write items with custom options per table', async () => { 510 | let params = _setupBatchWriteItemParams(); 511 | let mock = _setupDynamoDBWrapper(); 512 | let dynamoDB = mock.dynamoDB; 513 | let dynamoDBWrapper = mock.dynamoDBWrapper; 514 | 515 | spyOn(dynamoDB, 'batchWriteItem').and.callThrough(); 516 | 517 | await dynamoDBWrapper.batchWriteItem(params, { 518 | Test: { 519 | partitionStrategy: 'EvenlyDistributedGroupWCU', 520 | targetGroupWCU: 4 521 | } 522 | }); 523 | 524 | expect(dynamoDB.batchWriteItem).toHaveBeenCalledTimes(3); 525 | }); 526 | 527 | it('should batch write items with custom options (legacy for backwards compatibility)', async () => { 528 | let params = _setupBatchWriteItemParams(); 529 | let mock = _setupDynamoDBWrapper(); 530 | let dynamoDB = mock.dynamoDB; 531 | let dynamoDBWrapper = mock.dynamoDBWrapper; 532 | 533 | spyOn(dynamoDB, 'batchWriteItem').and.callThrough(); 534 | 535 | await dynamoDBWrapper.batchWriteItem(params, { 536 | partitionStrategy: 'EvenlyDistributedGroupWCU', 537 | targetGroupWCU: 4 538 | }); 539 | 540 | expect(dynamoDB.batchWriteItem).toHaveBeenCalledTimes(3); 541 | }); 542 | 543 | it('should retry failed requests when there are UnprocessedItems', async () => { 544 | let params = _setupBatchWriteItemParams(); 545 | let mock = _setupDynamoDBWrapper({ 546 | customResponses: { 547 | 1: 'SomeUnprocessedItems' 548 | } 549 | }); 550 | let dynamoDB = mock.dynamoDB; 551 | let dynamoDBWrapper = mock.dynamoDBWrapper; 552 | 553 | spyOn(dynamoDB, 'batchWriteItem').and.callThrough(); 554 | 555 | await dynamoDBWrapper.batchWriteItem(params, { 556 | partitionStrategy: 'EqualItemCount', 557 | targetItemCount: 10 558 | }); 559 | 560 | expect(dynamoDB.batchWriteItem).toHaveBeenCalledTimes(2); 561 | }); 562 | 563 | it('should return a 200 OK response if some items were unprocessed', async () => { 564 | let params = _setupBatchWriteItemParams(); 565 | let mock = _setupDynamoDBWrapper({ 566 | customResponses: { 567 | 1: 'ProvisionedThroughputExceededException', 568 | 2: 'ProvisionedThroughputExceededException', 569 | 3: 'SomeUnprocessedItems' 570 | } 571 | }); 572 | let dynamoDB = mock.dynamoDB; 573 | let dynamoDBWrapper = mock.dynamoDBWrapper; 574 | 575 | spyOn(dynamoDB, 'batchWriteItem').and.callThrough(); 576 | 577 | let response = await dynamoDBWrapper.batchWriteItem(params, { 578 | partitionStrategy: 'EqualItemCount', 579 | targetItemCount: 10 580 | }); 581 | 582 | expect(dynamoDB.batchWriteItem).toHaveBeenCalledTimes(3); 583 | expect(response.UnprocessedItems['Test'].length).toEqual(9); 584 | }); 585 | 586 | it('should throw a ProvisionedThroughputExceededException if ALL items are unprocessed', async () => { 587 | let params = _setupBatchWriteItemParams(); 588 | let mock = _setupDynamoDBWrapper({ 589 | customResponses: { 590 | 1: 'ProvisionedThroughputExceededException', 591 | 2: 'ProvisionedThroughputExceededException', 592 | 3: 'ProvisionedThroughputExceededException' 593 | } 594 | }); 595 | let dynamoDB = mock.dynamoDB; 596 | let dynamoDBWrapper = mock.dynamoDBWrapper; 597 | 598 | spyOn(dynamoDB, 'batchWriteItem').and.callThrough(); 599 | 600 | let exception; 601 | try { 602 | await dynamoDBWrapper.batchWriteItem(params, { 603 | partitionStrategy: 'EqualItemCount', 604 | targetItemCount: 10 605 | }); 606 | } catch (e) { 607 | exception = e; 608 | } 609 | 610 | expect(dynamoDB.batchWriteItem).toHaveBeenCalledTimes(3); 611 | expect(exception.code).toBe('ProvisionedThroughputExceededException'); 612 | expect(exception.message).toBe('The level of configured provisioned throughput for the table was exceeded. ' + 613 | 'Consider increasing your provisioning level with the UpdateTable API'); 614 | }); 615 | 616 | it('should aggregate consumed capacity (TOTAL) from multiple responses', async () => { 617 | let params = _setupBatchWriteItemParams({ 618 | returnConsumedCapacity: 'TOTAL' 619 | }); 620 | let mock = _setupDynamoDBWrapper(); 621 | let dynamoDB = mock.dynamoDB; 622 | let dynamoDBWrapper = mock.dynamoDBWrapper; 623 | 624 | spyOn(dynamoDB, 'batchWriteItem').and.callThrough(); 625 | 626 | let response = await dynamoDBWrapper.batchWriteItem(params, { 627 | Test: { 628 | partitionStrategy: 'EqualItemCount', 629 | targetItemCount: 4 630 | } 631 | }); 632 | 633 | expect(dynamoDB.batchWriteItem).toHaveBeenCalledTimes(3); 634 | expect(response.ConsumedCapacity).toEqual([ 635 | { 636 | CapacityUnits: 60, 637 | TableName: 'Test' 638 | } 639 | ]); 640 | }); 641 | 642 | it('should aggregate consumed capacity (INDEXES) from multiple responses', async () => { 643 | let params = _setupBatchWriteItemParams({ 644 | returnConsumedCapacity: 'INDEXES' 645 | }); 646 | let mock = _setupDynamoDBWrapper(); 647 | let dynamoDB = mock.dynamoDB; 648 | let dynamoDBWrapper = mock.dynamoDBWrapper; 649 | 650 | spyOn(dynamoDB, 'batchWriteItem').and.callThrough(); 651 | 652 | let response = await dynamoDBWrapper.batchWriteItem(params, { 653 | Test: { 654 | partitionStrategy: 'EqualItemCount', 655 | targetItemCount: 4 656 | } 657 | }); 658 | 659 | expect(dynamoDB.batchWriteItem).toHaveBeenCalledTimes(3); 660 | expect(response.ConsumedCapacity).toEqual([ 661 | { 662 | CapacityUnits: 60, 663 | TableName: 'Test', 664 | Table: { 665 | CapacityUnits: 10 666 | }, 667 | LocalSecondaryIndexes: { 668 | MyLocalIndex: { 669 | CapacityUnits: 30 670 | } 671 | }, 672 | GlobalSecondaryIndexes: { 673 | MyGlobalIndex: { 674 | CapacityUnits: 20 675 | } 676 | } 677 | } 678 | ]); 679 | }); 680 | 681 | it('should aggregate consumed capacity (TOTAL) when some requests are throttled', async () => { 682 | let params = _setupBatchWriteItemParams({ 683 | returnConsumedCapacity: 'TOTAL' 684 | }); 685 | let mock = _setupDynamoDBWrapper({ 686 | customResponses: { 687 | 1: 'SomeUnprocessedItems' 688 | } 689 | }); 690 | let dynamoDB = mock.dynamoDB; 691 | let dynamoDBWrapper = mock.dynamoDBWrapper; 692 | 693 | spyOn(dynamoDB, 'batchWriteItem').and.callThrough(); 694 | 695 | let response = await dynamoDBWrapper.batchWriteItem(params, { 696 | partitionStrategy: 'EqualItemCount', 697 | targetItemCount: 10 698 | }); 699 | 700 | expect(dynamoDB.batchWriteItem).toHaveBeenCalledTimes(2); 701 | expect(response.ConsumedCapacity).toEqual([ 702 | { 703 | CapacityUnits: 60, 704 | TableName: 'Test' 705 | } 706 | ]); 707 | }); 708 | 709 | it('should support batch writing to multiple tables', async () => { 710 | let params = _setupBatchWriteItemParams({ 711 | isMultipleTables: true, 712 | returnConsumedCapacity: 'INDEXES' 713 | }); 714 | let mock = _setupDynamoDBWrapper(); 715 | let dynamoDB = mock.dynamoDB; 716 | let dynamoDBWrapper = mock.dynamoDBWrapper; 717 | 718 | spyOn(dynamoDB, 'batchWriteItem').and.callThrough(); 719 | 720 | let response = await dynamoDBWrapper.batchWriteItem(params, { 721 | Test: { 722 | partitionStrategy: 'EqualItemCount', 723 | targetItemCount: 6 724 | }, 725 | AnotherTest: { 726 | partitionStrategy: 'EqualItemCount', 727 | targetItemCount: 1 728 | } 729 | }); 730 | 731 | expect(dynamoDB.batchWriteItem).toHaveBeenCalledTimes(2 + 4); 732 | expect(response.ConsumedCapacity).toEqual([ 733 | { 734 | CapacityUnits: 60, 735 | TableName: 'Test', 736 | Table: { 737 | CapacityUnits: 10 738 | }, 739 | LocalSecondaryIndexes: { 740 | MyLocalIndex: { 741 | CapacityUnits: 30 742 | } 743 | }, 744 | GlobalSecondaryIndexes: { 745 | MyGlobalIndex: { 746 | CapacityUnits: 20 747 | } 748 | } 749 | }, 750 | { 751 | CapacityUnits: 24, 752 | TableName: 'AnotherTest', 753 | Table: { 754 | CapacityUnits: 4 755 | }, 756 | LocalSecondaryIndexes: { 757 | MyLocalIndex: { 758 | CapacityUnits: 12 759 | } 760 | }, 761 | GlobalSecondaryIndexes: { 762 | MyGlobalIndex: { 763 | CapacityUnits: 8 764 | } 765 | } 766 | } 767 | ]); 768 | 769 | }); 770 | 771 | }); 772 | 773 | }); 774 | --------------------------------------------------------------------------------