├── core ├── LogService.js ├── AutoScaleConfig.js ├── AutoScaleConfigRepository.js └── AutoScaleService.js ├── .gitignore ├── package.json ├── handlers ├── scale-down.js ├── scale-up.js └── manage-config.js ├── README.md └── serverless.yml /core/LogService.js: -------------------------------------------------------------------------------- 1 | 'use strict'; 2 | 3 | class LogService { 4 | 5 | constructor() { 6 | 7 | } 8 | 9 | log() { 10 | console.log.apply(console, arguments); 11 | } 12 | } 13 | 14 | module.exports = LogService; 15 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | *~ 2 | .idea 3 | .ds_store 4 | *.env* 5 | coverage 6 | sandbox.js 7 | 8 | # Logs 9 | logs 10 | *.log 11 | npm-debug.log 12 | 13 | # Dependency directory 14 | node_modules 15 | 16 | # IDE 17 | **/.idea 18 | 19 | # Serverless directory 20 | .serverless 21 | -------------------------------------------------------------------------------- /package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "dynamodb-auto-scaling", 3 | "version": "1.0.0", 4 | "description": "", 5 | "scripts": { 6 | "test": "echo \"Error: no test specified\" && exit 1" 7 | }, 8 | "repository": { 9 | "type": "git", 10 | "url": "https://code.trek10.com/client-management/dynamodb-auto-scaling.git" 11 | }, 12 | "author": "", 13 | "license": "ISC", 14 | "dependencies": { 15 | "moment": "^2.17.1" 16 | }, 17 | "devDependencies": { 18 | "aws-sdk": "^2.60.0" 19 | } 20 | } 21 | -------------------------------------------------------------------------------- /handlers/scale-down.js: -------------------------------------------------------------------------------- 1 | 'use strict'; 2 | 3 | const LogService = require('../core/LogService'); 4 | const AutoScaleService = require('../core/AutoScaleService'); 5 | const AutoScaleConfigRepository = require('../core/AutoScaleConfigRepository'); 6 | 7 | const awsOptions = { region: process.env.REGION }; 8 | 9 | module.exports.index = (event, context, callback) => { 10 | const service = new AutoScaleService( 11 | awsOptions, 12 | new AutoScaleConfigRepository(awsOptions), 13 | new LogService(awsOptions) 14 | ); 15 | 16 | service.scaleDownTables() 17 | .then(context.succeed) 18 | .catch(context.fail); 19 | }; -------------------------------------------------------------------------------- /handlers/scale-up.js: -------------------------------------------------------------------------------- 1 | 'use strict'; 2 | 3 | const LogService = require('../core/LogService'); 4 | const AutoScaleService = require('../core/AutoScaleService'); 5 | const AutoScaleConfigRepository = require('../core/AutoScaleConfigRepository'); 6 | 7 | const awsOptions = { region: process.env.REGION }; 8 | 9 | module.exports.index = (event, context, callback) => { 10 | const service = new AutoScaleService( 11 | awsOptions, 12 | new AutoScaleConfigRepository(awsOptions), 13 | new LogService(awsOptions) 14 | ); 15 | 16 | service.scaleUpTables() 17 | .then(context.succeed) 18 | .catch(context.fail); 19 | }; 20 | -------------------------------------------------------------------------------- /handlers/manage-config.js: -------------------------------------------------------------------------------- 1 | 'use strict'; 2 | 3 | const AutoScaleConfigRepository = require('../core/AutoScaleConfigRepository.js'); 4 | const ddbOptions = { region: process.env.REGION }; 5 | 6 | function response(callback) { 7 | return { 8 | send: function (promise) { 9 | promise.then(data => callback(null, { 10 | statusCode: 200, 11 | body: JSON.stringify(data), 12 | })).catch(err => callback(err)); 13 | } 14 | }; 15 | } 16 | 17 | module.exports.load = (event, context, callback) => { 18 | const repository = new AutoScaleConfigRepository(ddbOptions); 19 | response(callback).send(repository.load()); 20 | }; 21 | 22 | module.exports.fetch = (event, context, callback) => { 23 | const repository = new AutoScaleConfigRepository(ddbOptions); 24 | response(callback).send(repository.fetch(event.pathParameters.tableName)); 25 | }; 26 | 27 | module.exports.save = (event, context, callback) => { 28 | const body = JSON.parse(event.body); 29 | const repository = new AutoScaleConfigRepository(ddbOptions); 30 | response(callback).send(repository.save(body)); 31 | }; 32 | 33 | module.exports.delete = (event, context, callback) => { 34 | const repository = new AutoScaleConfigRepository(ddbOptions); 35 | response(callback).send(repository.delete(event.pathParameters.tableName)); 36 | }; -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # THERE IS AN AWS OFFICIALY SUPPORTED METHOD FOR DOING THIS! 2 | 3 | As all good ideas in AWS, eventually their platform expands to include it: https://aws.amazon.com/blogs/aws/new-auto-scaling-for-amazon-dynamodb/. This project may issue critical bug fixes, but should be considered deprecated. 4 | 5 | ### Prerequisites 6 | 7 | Install [Serverless framework](https://serverless.com/) globally: 8 | 9 | `$ npm install -g serverless` 10 | 11 | ### Deployment 12 | 13 | Install node modules and deploy serverless stack to AWS account 14 | 15 | ``` 16 | $ npm install 17 | $ sls deploy --stage 18 | ``` 19 | 20 | Stage flag is optional... 21 | 22 | ### Auto-Scale Lambdas 23 | 24 | Auto-scaling lambdas are deployed with scheduled events which run *every 1 minute for scale up* and *every 6 hours for scale down* by default. Schedule settings can be adjusted in serverless.yml file. 25 | 26 | ### Add an API Key 27 | 28 | By default we protect all API endpoints, and the quickest way to get started is just API Gateway API Keys. Create a key, give it permissions to hit your stage. You leverage the key by passing it in with the `x-api-key` header on any requests to the Configuration REST API. 29 | 30 | ### Configuration REST API 31 | 32 | API gateway endpoints will be deployed to maintain auto-scaling configuration: 33 | 34 | `GET https://api.domain.name/ddb-auto-scale/config` - Load all available configuration records. 35 | 36 | `GET https://api.domain.name/ddb-auto-scale/config/{tableName}` - Load configuration record for specified DynamoDB table. 37 | 38 | `POST https://api.domain.name/ddb-auto-scale/config` - Save configuration record for DynamoDB table (see [Configuration Data Structure](#config) section below). 39 | 40 | `DELETE https://api.domain.name/ddb-auto-scale/config/{tableName}` - Delete configuration record for specified DynamoDB table. 41 | 42 | ### Configuration Data Structure 43 | 44 | All auto-scaling properties for specified DynamoDB table along with its global secondary indexes are described by configuration JSON object with following structure: 45 | 46 | ``` 47 | { 48 | tableName: name of DDB table to auto-scale (required) 49 | 50 | min: minimal capacity units (default 1) 51 | max: maximal capacity units (default 100) 52 | 53 | threshold: value which determines if we need to scale (scale up if consummed capacity units >= provisioned capacity units * threshold, default 0.8) 54 | increase: number of capacity units by which we increase (default 10) 55 | 56 | upTimeSpan: a time span for which we analyze consummed capacity units for scale up (default to 5 minutes) 57 | downTimeSpan: a time span for which we analyze consummed capacity units for scale down (default to 30 minutes) 58 | 59 | indexes: array of indexes scale configuration [ 60 | indexName: an index name to auto-scale (required) 61 | 62 | ... similar configuration structure as for table ... 63 | ] 64 | } 65 | ``` 66 | 67 | -------------------------------------------------------------------------------- /serverless.yml: -------------------------------------------------------------------------------- 1 | service: dynamodb-auto-scaling 2 | 3 | custom: 4 | stage: ${opt:stage, self:provider.stage} 5 | region: ${opt:region, self:provider.region} 6 | 7 | rates: 8 | up: ${self:custom.${self:custom.stage}.rates.up, self:provider.scalingSettings.rates.up} 9 | down: ${self:custom.${self:custom.stage}.rates.down, self:provider.scalingSettings.rates.down} 10 | 11 | provider: 12 | name: aws 13 | runtime: nodejs6.10 14 | 15 | stage: dev 16 | region: us-east-1 17 | 18 | scalingSettings: 19 | rates: 20 | up: 5 minutes 21 | down: 6 hours 22 | 23 | throughputs: 24 | min: 1 25 | max: 100 26 | increase: 10 27 | threshold: 0.8 28 | 29 | analyzeTimespans: 30 | up: 5 minutes 31 | down: 30 minutes 32 | 33 | configTableName: ddb-auto-scale-config 34 | 35 | iamRoleStatements: 36 | - Effect: Allow 37 | Action: 38 | - dynamodb:* 39 | Resource: arn:aws:dynamodb:${self:custom.region}:*:table/* 40 | - Effect: Allow 41 | Action: 42 | - cloudwatch:* 43 | Resource: "*" 44 | - Effect: Allow 45 | Action: 46 | - logs:CreateLogGroup 47 | - logs:CreateLogStream 48 | - logs:PutLogEvents 49 | Resource: arn:aws:logs:${self:custom.region}:*:* 50 | 51 | environment: 52 | STAGE: ${self:custom.stage} 53 | REGION: ${self:custom.region} 54 | TROUGHPUTS_MIN: ${self:provider.scalingSettings.throughputs.min} 55 | TROUGHPUTS_MAX: ${self:provider.scalingSettings.throughputs.max} 56 | TROUGHPUTS_INCREASE: ${self:provider.scalingSettings.throughputs.increase} 57 | TROUGHPUTS_THRESHOLD: ${self:provider.scalingSettings.throughputs.threshold} 58 | ANALYZE_TIMESPAN_UP: ${self:provider.scalingSettings.analyzeTimespans.up} 59 | ANALYZE_TIMESPAN_DOWN: ${self:provider.scalingSettings.analyzeTimespans.down} 60 | CONFIG_TABLE_NAME: ${self:provider.configTableName}-${self:custom.stage} 61 | 62 | functions: 63 | scaleUp: 64 | handler: handlers/scale-up.index 65 | events: 66 | - schedule: rate(${self:custom.rates.up}) 67 | 68 | scaleDown: 69 | handler: handlers/scale-down.index 70 | events: 71 | - schedule: rate(${self:custom.rates.down}) 72 | 73 | configLoad: 74 | handler: handlers/manage-config.load 75 | events: 76 | - http: 77 | path: ddb-auto-scale/config 78 | method: GET 79 | private: true 80 | 81 | configFetch: 82 | handler: handlers/manage-config.fetch 83 | events: 84 | - http: 85 | path: ddb-auto-scale/config/{tableName} 86 | method: GET 87 | private: true 88 | 89 | configSave: 90 | handler: handlers/manage-config.save 91 | events: 92 | - http: 93 | path: ddb-auto-scale/config 94 | method: POST 95 | private: true 96 | 97 | configDelete: 98 | handler: handlers/manage-config.delete 99 | events: 100 | - http: 101 | path: ddb-auto-scale/config/{tableName} 102 | method: DELETE 103 | private: true 104 | 105 | resources: 106 | Resources: 107 | ddbAutoScaleConfigTable: 108 | Type: AWS::DynamoDB::Table 109 | Properties: 110 | TableName: ${self:provider.configTableName}-${self:custom.stage} 111 | AttributeDefinitions: 112 | - AttributeName: tableName 113 | AttributeType: S 114 | KeySchema: 115 | - AttributeName: tableName 116 | KeyType: HASH 117 | ProvisionedThroughput: 118 | ReadCapacityUnits: 5 119 | WriteCapacityUnits: 5 120 | -------------------------------------------------------------------------------- /core/AutoScaleConfig.js: -------------------------------------------------------------------------------- 1 | 'use strict'; 2 | 3 | const moment = require('moment'); 4 | 5 | function clamp(min, max, value) { 6 | return Math.min(max, Math.max(min, value)); 7 | } 8 | 9 | function getConfigValue(self, subConfig) { 10 | return Object.assign({}, self || {}, self[subConfig] || {}); 11 | } 12 | 13 | function timeSpanToDuration(timeSpan) { 14 | const tokens = timeSpan.split(' '); 15 | return moment.duration(parseInt(tokens[0]), tokens[1]); 16 | } 17 | 18 | const DEFAULTS = { 19 | min: parseInt(process.env.TROUGHPUTS_MIN), 20 | max: parseInt(process.env.TROUGHPUTS_MAX), 21 | 22 | increase: parseInt(process.env.TROUGHPUTS_INCREASE), 23 | threshold: parseFloat(process.env.TROUGHPUTS_THRESHOLD), 24 | 25 | upTimeSpan: process.env.ANALYZE_TIMESPAN_UP, 26 | downTimeSpan: process.env.ANALYZE_TIMESPAN_DOWN, 27 | 28 | get upDuration() { 29 | return timeSpanToDuration(this.upTimeSpan); 30 | }, 31 | 32 | get downDuration() { 33 | return timeSpanToDuration(this.downTimeSpan); 34 | }, 35 | 36 | scaleUp(consumedUnits, provisionedUnits, subConfig) { 37 | const config = getConfigValue(this, subConfig); 38 | if (config.isDisabled) return provisionedUnits; 39 | 40 | const result = consumedUnits < provisionedUnits * config.threshold ? provisionedUnits : 41 | Math.max(provisionedUnits + config.increase, consumedUnits + config.increase); 42 | 43 | return clamp(config.min, config.max, result); 44 | }, 45 | 46 | scaleDown(consumedUnits, provisionedUnits, subConfig) { 47 | const config = getConfigValue(this, subConfig); 48 | if (config.isDisabled) return provisionedUnits; 49 | 50 | const result = (consumedUnits / config.threshold) - config.increase; 51 | return clamp(config.min, config.max, result); 52 | } 53 | }; 54 | 55 | class AutoScaleConfig { 56 | 57 | static create(rawData) { 58 | return rawData ? new AutoScaleConfig(rawData) : null; 59 | } 60 | 61 | /** 62 | * 63 | * @param rawData - config raw data 64 | */ 65 | constructor(rawData) { 66 | delete rawData.upDuration; 67 | delete rawData.downDuration; 68 | Object.assign(this, DEFAULTS, rawData); 69 | 70 | // table name is required 71 | if (!this.tableName) throw new Error('Table name is required'); 72 | 73 | // build indexes configuration 74 | this.indexes = Array.isArray(rawData.indexes) ? 75 | 76 | // indexes can be supplied as array 77 | rawData.indexes.filter(_ => _.indexName).reduce((indexes, index) => { 78 | delete index.upDuration; 79 | delete index.downDuration; 80 | indexes[index.indexName] = Object.assign({}, DEFAULTS, index); 81 | return indexes; 82 | }, {}) : 83 | 84 | // or as dictionary object 85 | Object.getOwnPropertyNames(rawData.indexes || {}).reduce((indexes, indexName) => { 86 | delete rawData.indexes[indexName].upDuration; 87 | delete rawData.indexes[indexName].downDuration; 88 | indexes[indexName] = Object.assign({}, DEFAULTS, rawData.indexes[indexName]); 89 | return indexes; 90 | }, {}); 91 | } 92 | 93 | scale(indexName) { 94 | const self = this; 95 | 96 | const dir = direction => ({ 97 | method: subConfigName => ({ 98 | from: provisionedUnits => ({ 99 | to: consumedUnits => (self.indexes[indexName] || self)['scale' + direction](consumedUnits, provisionedUnits, subConfigName) 100 | }) 101 | }) 102 | }); 103 | 104 | return { 105 | get up() { return dir('Up'); }, 106 | get down() { return dir('Down'); } 107 | }; 108 | } 109 | } 110 | 111 | module.exports = AutoScaleConfig; -------------------------------------------------------------------------------- /core/AutoScaleConfigRepository.js: -------------------------------------------------------------------------------- 1 | 'use strict'; 2 | 3 | const aws = require('aws-sdk'); 4 | const AutoScaleConfig = require('./AutoScaleConfig'); 5 | 6 | let AUTO_SCALE_CONFIG_CACHE = null; 7 | 8 | /** 9 | * Repository class to access configuration data 10 | */ 11 | class AutoScaleConfigRepository { 12 | 13 | /** 14 | * create instance of DB auto scale config repository 15 | * @param ddbOptions - options of DynamoDB 16 | * @param configTableName - table name which stores auto scale config data 17 | */ 18 | constructor(ddbOptions, configTableName) { 19 | //assign config table name and make it required 20 | this.tableName = configTableName || process.env.CONFIG_TABLE_NAME; 21 | if (!this.tableName) throw new Error('Config table name is required'); 22 | 23 | //create document DB instance 24 | this.db = new aws.DynamoDB.DocumentClient(ddbOptions || {}); 25 | } 26 | 27 | /** 28 | * load all config items from database 29 | * @returns {Promise.<[AutoScaleConfig]>} 30 | */ 31 | load() { 32 | // return from cache if we have cached values 33 | if (AUTO_SCALE_CONFIG_CACHE && AUTO_SCALE_CONFIG_CACHE.length) 34 | return Promise.resolve(AUTO_SCALE_CONFIG_CACHE); 35 | 36 | return this.db.scan({ 37 | TableName: this.tableName 38 | }).promise().then(result => { 39 | AUTO_SCALE_CONFIG_CACHE = result.Items.map(AutoScaleConfig.create); 40 | return AUTO_SCALE_CONFIG_CACHE; 41 | }); 42 | } 43 | 44 | /** 45 | * return config item for specified table 46 | * @param tableName - table name to get config for 47 | * @returns {Promise.} 48 | */ 49 | fetch(tableName) { 50 | // return from cache if we have cached values 51 | if (AUTO_SCALE_CONFIG_CACHE && AUTO_SCALE_CONFIG_CACHE.length) 52 | return Promise.resolve(AUTO_SCALE_CONFIG_CACHE.find(x => x.tableName === tableName)); 53 | 54 | return this.db.query({ 55 | TableName: this.tableName, 56 | KeyConditionExpression: 'tableName = :tableName', 57 | ExpressionAttributeValues: { ':tableName': tableName } 58 | }).promise().then(result => AutoScaleConfig.create(result.Items[0])); 59 | } 60 | 61 | /** 62 | * save config item to database 63 | * @param config - config item to be saved 64 | * @returns {Promise} 65 | */ 66 | save(config) { 67 | if (config.constructor.name === 'Object') 68 | config = AutoScaleConfig.create(config); 69 | 70 | // save config data as plain JSON object 71 | const item = Object.assign({}, config); 72 | delete item.downDuration; 73 | delete item.upDuration; 74 | 75 | if (item.indexes) Object.getOwnPropertyNames(item.indexes).forEach(indexName => { 76 | delete item.indexes[indexName].downDuration; 77 | delete item.indexes[indexName].upDuration; 78 | }); 79 | 80 | return this.db.put({ 81 | TableName: this.tableName, 82 | Item: item 83 | }).promise().then(() => { 84 | // empty cache 85 | AUTO_SCALE_CONFIG_CACHE = null; 86 | return true; 87 | }); 88 | } 89 | 90 | /** 91 | * delete config item for specified table 92 | * @param tableName - table name to delete config for 93 | * @returns {Promise} 94 | */ 95 | delete(tableName) { 96 | return this.db.delete({ 97 | TableName: this.tableName, 98 | Key: { tableName: tableName } 99 | }).promise().then(() => { 100 | // remove item from cache 101 | if (!AUTO_SCALE_CONFIG_CACHE || !AUTO_SCALE_CONFIG_CACHE.length) return true; 102 | const remove = AUTO_SCALE_CONFIG_CACHE.find(x => x.tableName === tableName); 103 | const removeIndex = AUTO_SCALE_CONFIG_CACHE.indexOf(remove); 104 | AUTO_SCALE_CONFIG_CACHE.splice(removeIndex, 1); 105 | return true; 106 | }); 107 | } 108 | } 109 | 110 | module.exports = AutoScaleConfigRepository; -------------------------------------------------------------------------------- /core/AutoScaleService.js: -------------------------------------------------------------------------------- 1 | 'use strict'; 2 | 3 | const aws = require('aws-sdk'); 4 | const moment = require('moment'); 5 | 6 | function getAttribute(tableDescription, attribute, indexName) { 7 | return !indexName ? 8 | 9 | tableDescription[attribute.replace('?', 'Table')] : 10 | 11 | tableDescription.GlobalSecondaryIndexes.find( 12 | i => i.IndexName === indexName 13 | )[attribute.replace('?', 'Index')]; 14 | } 15 | 16 | class AutoScaleService { 17 | 18 | /** 19 | * create instance of DB auto scaler 20 | * @param awsOptions - AWS configuration options 21 | * @param configRepository - table scaling config repository 22 | * @param logService - logging service 23 | */ 24 | constructor(awsOptions, configRepository, logService) { 25 | this.db = new aws.DynamoDB(awsOptions || {}); 26 | this.cw = new aws.CloudWatch(awsOptions || {}); 27 | 28 | this.logger = logService; 29 | if (!this.logger) throw new Error('logService is required'); 30 | 31 | this.configRepository = configRepository; 32 | if (!this.configRepository) throw new Error('configRepository is required'); 33 | 34 | this.scaleUpTable = this.scaleUpTable.bind(this); 35 | this.scaleDownTable = this.scaleDownTable.bind(this); 36 | } 37 | 38 | /** 39 | * scale all configured tables up 40 | * @returns {Promise<*>} 41 | */ 42 | scaleUpTables() { 43 | this.logger.log('SCALE UP STARTED...'); 44 | 45 | // load all tables from configuration and scale up 46 | return this.configRepository.load() 47 | .then(tables => Promise.all(tables.map(this.scaleUpTable))) 48 | .then(res => this.logger.log('SCALE UP SUCCEEDED.')) 49 | .catch(err => this.logger.log('SCALE UP FAILED!', err)); 50 | } 51 | 52 | /** 53 | * scale all configured tables down 54 | * @returns {Promise<*>} 55 | */ 56 | scaleDownTables() { 57 | this.logger.log('SCALE DOWN STARTED...'); 58 | 59 | // load all tables from configuration and scale down 60 | return this.configRepository.load() 61 | .then(tables => Promise.all(tables.map(this.scaleDownTable))) 62 | .then(res => this.logger.log('SCALE DOWN SUCCEEDED.')) 63 | .catch(err => this.logger.log('SCALE DOWN FAILED!', err)); 64 | } 65 | 66 | /** 67 | * scale specified table up 68 | * @param config - table auto-scaling configuration 69 | * @returns {Promise<*>} 70 | */ 71 | scaleUpTable(config) { 72 | this.logger.log('SCALE UP TABLE:', config); 73 | 74 | return this.scaleTable(config, 'up') 75 | .then(res => this.logger.log('SCALE UP TABLE SUCCEEDED.', res)) 76 | .catch(err => this.logger.log('SCALE UP TABLE FAILED!', err)); 77 | } 78 | 79 | /** 80 | * scale specified table down 81 | * @param config - table auto-scaling configuration 82 | * @returns {Promise<*>} 83 | */ 84 | scaleDownTable(config) { 85 | this.logger.log('SCALE DOWN TABLE:', config); 86 | 87 | return this.scaleTable(config, 'down') 88 | .then(res => this.logger.log('SCALE DOWN TABLE SUCCEEDED.', res)) 89 | .catch(err => this.logger.log('SCALE DOWN TABLE FAILED!', err)); 90 | } 91 | 92 | /** 93 | * perform scale for specified table auto-scale config 94 | * @param config - table auto-scaling configuration 95 | * @param direction - scale direction 96 | * @returns {Promise<*>} 97 | */ 98 | scaleTable(config, direction) { 99 | this.logger.log('Getting table schema:', config.tableName); 100 | const analyzeDuration = config[direction + 'Duration']; 101 | const shared = {}; 102 | 103 | // get table schema from DDB 104 | return this.getTableSchema(config.tableName).then(table => { 105 | shared.table = table; 106 | // get get consumed capacity units for table for time period specified in configuration 107 | return this.getConsumedCapacityUnits().forTable(table).forTimespan(analyzeDuration); 108 | }).then(metrics => { 109 | const table = shared.table; 110 | 111 | // prepare table update request 112 | const tableUpdateRequest = { 113 | TableName: table.TableName, 114 | 115 | ProvisionedThroughput: { 116 | ReadCapacityUnits: 0, 117 | WriteCapacityUnits: 0 118 | }, 119 | 120 | // include global secondary indexes scale from configuration 121 | GlobalSecondaryIndexUpdates: (table.GlobalSecondaryIndexes || []).map(index => ({ 122 | Update: { 123 | IndexName: index.IndexName, 124 | ProvisionedThroughput: { 125 | ReadCapacityUnits: 0, 126 | WriteCapacityUnits: 0 127 | } 128 | } 129 | })) 130 | }; 131 | 132 | // update counters to determine if we 133 | // need to run table update operation 134 | const updateCounts = { 135 | total: 0, 136 | table: 0, 137 | indexes: { } 138 | }; 139 | 140 | // loop through consumed capacity metrics 141 | metrics.forEach(point => { 142 | this.logger.log('Received %s metric for %s:%s: Sum: %d Per second: %d', 143 | point.metricName, 144 | point.tableName, 145 | point.indexName || 'No index', 146 | point.Sum || 0, 147 | (point.Sum || 0) / analyzeDuration.asSeconds() 148 | ); 149 | 150 | // skip table/indexes that are not active 151 | const status = getAttribute(table, '?Status', point.indexName); 152 | if (status !== 'ACTIVE') return this.logger.log( 153 | `${point.tableName}:${point.indexName} is not ACTIVE, skipping...` 154 | ); 155 | 156 | // don't scale down if we just scaled up 157 | if (direction === 'down') { 158 | const lastIncreaseDateTime = getAttribute(table, 'ProvisionedThroughput', point.indexName).LastIncreaseDateTime; 159 | const lastIncrease = lastIncreaseDateTime ? moment(lastIncreaseDateTime) : null; 160 | 161 | if (lastIncrease && lastIncrease > moment().subtract(analyzeDuration)) 162 | return this.logger.log(`${point.tableName}:${point.indexName} was just scaled up, skipping...`); 163 | } 164 | 165 | // determine is it Read or Write metrics 166 | const method = point.metricName.replace('Consumed', '').replace('CapacityUnits', '').toLowerCase(); 167 | 168 | // get provisioned throughput for table or index 169 | const provisionedThroughput = (point.indexName ? 170 | table.GlobalSecondaryIndexes.find(_ => _.IndexName === point.indexName) : table 171 | ).ProvisionedThroughput; 172 | 173 | // get pointer to target throughput property 174 | const targetThroughput = (point.indexName ? 175 | tableUpdateRequest.GlobalSecondaryIndexUpdates.find(_ => _.Update.IndexName === point.indexName).Update : tableUpdateRequest 176 | ).ProvisionedThroughput; 177 | 178 | // add update counter for index 179 | if (point.indexName && !updateCounts.indexes[point.indexName]) 180 | updateCounts.indexes[point.indexName] = 0; 181 | 182 | // calculate target throughput units based on scale direction 183 | const unitsField = point.metricName.replace('Consumed', ''); 184 | const provisioned = provisionedThroughput[unitsField]; 185 | const consumed = (point.Sum || 0) / analyzeDuration.asSeconds(); 186 | const newProvisioned = Math.floor(config.scale(point.indexName)[direction].method(method).from(provisioned).to(consumed)); 187 | 188 | targetThroughput[unitsField] = newProvisioned; 189 | 190 | // log and increment counters if value changed 191 | if (newProvisioned !== provisioned) { 192 | updateCounts.total++; 193 | if (!point.indexName) updateCounts.table++; 194 | else updateCounts.indexes[point.indexName]++; 195 | 196 | // log results 197 | this.logger.log('Scaling: %j', { 198 | table: config.tableName, 199 | indexName: point.indexName, 200 | parameter: unitsField, 201 | action: 'SCALE ' + direction.toUpperCase(), 202 | consumed: consumed, 203 | provisioned: provisioned, 204 | scaled: newProvisioned 205 | }); 206 | } 207 | }); 208 | 209 | // remove unchanged sections from update request 210 | if (!updateCounts.table) delete tableUpdateRequest.ProvisionedThroughput; 211 | 212 | tableUpdateRequest.GlobalSecondaryIndexUpdates = tableUpdateRequest.GlobalSecondaryIndexUpdates 213 | .filter(index => updateCounts.indexes[index.Update.IndexName]); 214 | 215 | if (!tableUpdateRequest.GlobalSecondaryIndexUpdates.length) 216 | delete tableUpdateRequest.GlobalSecondaryIndexUpdates; 217 | 218 | // send table update request if we have updates 219 | return updateCounts.total === 0 ? Promise.resolve(table) : 220 | this.db.updateTable(tableUpdateRequest).promise().then(res => { 221 | return res.TableDescription; 222 | }); 223 | }); 224 | } 225 | 226 | /** 227 | * get table schema details 228 | * @param tableName - table name to load data for 229 | * @returns {*} 230 | */ 231 | getTableSchema(tableName) { 232 | return this.db.describeTable({ TableName: tableName }) 233 | .promise().then(res => res.Table); 234 | } 235 | 236 | /** 237 | * get consumed capacity units resolver 238 | * @param metrics - metrics to be resolved 239 | * @returns {*} 240 | */ 241 | getConsumedCapacityUnits(metrics) { 242 | const self = this; 243 | 244 | // get both read and write if missed 245 | if (!metrics) metrics = [ 246 | 'ConsumedReadCapacityUnits', 247 | 'ConsumedWriteCapacityUnits' 248 | ]; 249 | 250 | // make sure we always receive list of metrics 251 | if (typeof metrics === 'string') metrics = [metrics]; 252 | 253 | return { 254 | forTable(nameOrSchema) { 255 | return { 256 | forTimespan(timeSpanDuration) { 257 | const resolveTableSchema = typeof nameOrSchema === 'string' ? 258 | self.getTableSchema(nameOrSchema) : Promise.resolve(nameOrSchema); 259 | 260 | const endDate = moment(); 261 | const startDate = moment().subtract(timeSpanDuration); 262 | 263 | return resolveTableSchema.then(tableSchema => { 264 | // create get metrics request object 265 | const getRequest = (metricName, indexName) => { 266 | const result = { 267 | MetricName: metricName, 268 | Namespace: 'AWS/DynamoDB', 269 | 270 | Statistics: ['Average', 'Sum', 'Minimum', 'Maximum'], 271 | Unit: 'Count', 272 | 273 | Period: timeSpanDuration.asSeconds(), 274 | StartTime: startDate.toDate(), 275 | EndTime: endDate.toDate(), 276 | 277 | Dimensions: [{ 278 | Name: 'TableName', 279 | Value: tableSchema.TableName 280 | }] 281 | }; 282 | 283 | // add index name if exists 284 | if (indexName) result.Dimensions.push({ 285 | Name: 'GlobalSecondaryIndexName', 286 | Value: indexName 287 | }); 288 | 289 | return result; 290 | }; 291 | 292 | const getTableMetricsRequests = []; 293 | 294 | // build requests for supplied all metrics 295 | metrics.forEach(metricName => { 296 | // push table request for metric 297 | getTableMetricsRequests.push( 298 | getRequest(metricName) 299 | ); 300 | 301 | // push index requests for metric 302 | (tableSchema.GlobalSecondaryIndexes || []).forEach(index => getTableMetricsRequests.push( 303 | getRequest(metricName, index.IndexName) 304 | )) 305 | }); 306 | 307 | return Promise.all(getTableMetricsRequests.map(request => { 308 | return self.cw.getMetricStatistics(request).promise().then(response => { 309 | const data = response.Datapoints[0] || {}; 310 | 311 | // set metric and table names for data 312 | data.metricName = request.MetricName; 313 | data.tableName = request.Dimensions[0].Value; 314 | 315 | // set index name for data if it is for index 316 | if (request.Dimensions.length > 1) 317 | data.indexName = request.Dimensions[1].Value; 318 | 319 | return data; 320 | }); 321 | })); 322 | }); 323 | } 324 | } 325 | } 326 | }; 327 | } 328 | } 329 | 330 | module.exports = AutoScaleService; --------------------------------------------------------------------------------