├── .gitignore ├── CloudWatch_SaveAsCSV.jpg ├── LICENSE ├── README.md ├── aws-credentials.ts ├── burst-bucket.test.ts ├── burst-bucket.ts ├── cloudwatch-opener.ts ├── csv-ingestion.ts ├── ddb-sim.test.ts ├── ddb-sim.ts ├── example_main.ts ├── index.html ├── index.ts ├── jest.config.js ├── normalize.css ├── optimization-worker.ts ├── package-lock.json ├── package.json ├── plotting.ts ├── pricing.ts ├── scripts ├── dump-stats-for-region.ts ├── dump-table-details-for-region.ts ├── make-csv-for-table.sh ├── make-optimize-report-for-region.ts ├── make-updated-optimized-cost-report-for-region.ts └── print-table-names.sh ├── skeleton.css ├── table-consumption-fetcher.ts ├── tsconfig.json └── typings └── optimization-js └── index.d.ts /.gitignore: -------------------------------------------------------------------------------- 1 | .DS_Store 2 | .swp 3 | .parcel-cache/ 4 | .vscode/ 5 | *.csv 6 | dist/ 7 | node_modules/ 8 | xx* 9 | -------------------------------------------------------------------------------- /CloudWatch_SaveAsCSV.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/gabehollombe/dynamodb-scaling-simulator/1e9298e0ec930687400dd57c5a7c64581c88fe35/CloudWatch_SaveAsCSV.jpg -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2023 Gabe Hollombe 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | 2 | ## This is Beta software. 3 | 4 | ## The tool might have bugs, but the info it provides has proven helpful in its current state, even if it may not be the best it can be. 5 | 6 | # DynamoDB Scaling Simulator 7 | 8 | ## What's this? 9 | This repo contains a tool to help you simulate how a provisioned-capacity DynamoDB table will perform (will it throttle requests or not?) under different auto-scaling configurations. 10 | 11 | It will also try to calculate the best config for you that results in the lowest cost and no throttles. 12 | 13 | ## How do I use it? 14 | 1. Clone this repo 15 | 16 | 1. Ensure you have Node.js and NPM installed 17 | 18 | 1. Run `npm install` to get the dependenicies 19 | 20 | 1. Launch the GUI via `npm start` 21 | 22 | 1. Look for the URL that Parcel is hosting the build at and open it in your browser. Defaults to http://localhost:1234 23 | 24 | 1. Follow the instructions in the GUI. Use the first form to get data from CloudWatch, then the second form to configure scaling simulator settings and generate graphs and recommented optimal scaling configs for reads and writes. 25 | 26 | 1. If you're interested in trying iterations of your own guessed configurations, look at the graph for any simulated throttled events. This is probably what you want to avoid. 27 | 28 | ## How does it work? 29 | This tool... 30 | 1. Uses historic CloudWatch metrics data for the table, for the configured time range (`ConsumedReadCapacityUnits`, `ConsumedWriteCapacityUnits`, `ReadThrottleEvents`, `WriteThrottleEvents`) 31 | 32 | 2. Instantiates a new simulated table with a given auto-scaling config (min capacity, max capacity, target utilization). 33 | 34 | 3. Calculates the average per-second demand based on each minute's total read/write demand for the table (summing Consumed + Throttled metrics for reads and writes, respectively) 35 | 36 | 4. Feeds each minute's average per-second demand into the simulated table 37 | 38 | 5. Records the results of serving that minute of demand (amount of capacity successfully consumed, amount of requested capacity that was throttled) 39 | 40 | 6. Graphs these metrics and calculates avg daily cost for your scaling config 41 | 42 | 7. Also attempts to 'solve' for an optimized scaling config that results in no throttles and has the lowest price. 43 | 44 | 45 | ## Important Caveats. This tool... 46 | - Does not think about hot partitions as a reason to throttle. Only considers total capacity avaialble vs requested. 47 | 48 | 49 | - Gets CloudWatch at the minute-level and we simulate by calculating the average demand per second for that minute (total demand for the minute / 60). This means that if there was a 'micro burst' of a few seconds of super high demand on the table and the rest of the minute was relatively quiet, we'll see a low average here. In reality, some of that 'micro burst' should be throttled in the simulator but we can't simulate at the second level of granularity because we lack the data. 50 | 51 | So just be aware that **while this simulation still pretty helpful, it's not anything close to a promise of what actually happened**. 52 | 53 | - Won't give useful data if your total per-minute summed RCUs or WCUs are really low (less than 60). This is because, like the point immediately above mentions, we export the total RCU/WCU sum per minute from Cloudwatch but then divide that number by 60 to calculate an average RCU/WCU per second, since this is what the tool simulates on. Low-usage tables will end up with their average-per-minute values rounded down to 0. The good news is that if your usage is so low, it doesn't _really_ matter if you're optimizing your DDB scaling config or not because you're not spending more than a few cents a day anyway in either case. 54 | 55 | - Correctly simulates scaling up (when last 2 minutes of usage are higher than provisioned capacity) and down (when last 15 mintues are all at a utilization that is at least 20% lower than the target utilization), according to [this knowledge base article](https://aws.amazon.com/premiumsupport/knowledge-center/dynamodb-auto-scaling/). However, it requires you to configure how long of a delay you want to simulate between when the table _wants_ to scale and when that scaling event actually occurs. 56 | 57 | - Doesn't know DDB's exact scaling algorithm, so it does something simple and just sets the new capacity based on the most recently requested utilization and the target utilization. 58 | 59 | DDB's scaling algorithm _probably_ will scale up more aggressively if it also sees recent throttles? 60 | 61 | - Currently only simulates the default scaledown limits: 4 in 1st hour, 1 each additional hour) 62 | 63 | ## TODOs 64 | - [ ] Try calculating scaledown targets by looking at the last 60 minute average rather than the most recent requested amount (this may be more like what DDB actually does?) 65 | 66 | - [ ] Show warning if user tries to use < 20% or > 90% target utilization (DDB only supports values inside this range) 67 | 68 | - [ ] Show Standard and Infrequent Access costs (don't just assume Standard) 69 | 70 | - [ ] Integrate pricing API instead of asking users to put the prices in 71 | 72 | ## Ideas 73 | 74 | - Show a side-by-side view in the GUI between actual vs simulated 75 | 76 | - Let the tool look at _all_ of your tables in an account, decide on 'optimal' scaling configs for each table, and calculate potential total cost savings 😀 77 | -------------------------------------------------------------------------------- /aws-credentials.ts: -------------------------------------------------------------------------------- 1 | import { AssumeRoleCommand, AssumeRoleCommandOutput, Credentials } from "@aws-sdk/client-sts"; 2 | import { STSClient } from "@aws-sdk/client-sts"; 3 | import { fromIni } from "@aws-sdk/credential-providers"; 4 | import { AwsCredentialIdentity } from "@aws-sdk/types"; 5 | 6 | export async function getCredentialsFromAssumingRole(region: string, profile: string, roleArn: string): Promise { 7 | const client = new STSClient({ region, credentials: fromIni({ profile }) }) //, credentials: fromIni({ profile: 'pd-staging' }) }) 8 | const command = new AssumeRoleCommand({ 9 | RoleArn: roleArn, 10 | RoleSessionName: "session1", 11 | DurationSeconds: 60 * 60, 12 | }) 13 | const response: AssumeRoleCommandOutput = await client.send(command) 14 | return { 15 | accessKeyId: response.Credentials?.AccessKeyId!, 16 | secretAccessKey: response.Credentials?.SecretAccessKey!, 17 | sessionToken: response.Credentials?.SessionToken!, 18 | } 19 | } -------------------------------------------------------------------------------- /burst-bucket.test.ts: -------------------------------------------------------------------------------- 1 | import { BurstBuckets } from './burst-bucket'; 2 | 3 | describe('BurstBuckets', () => { 4 | let burstBuckets: BurstBuckets; 5 | 6 | beforeEach(() => { 7 | burstBuckets = new BurstBuckets(3); 8 | }); 9 | 10 | it('should add numbers to buckets in a sliding fashion', () => { 11 | burstBuckets.add(1); 12 | burstBuckets.add(2); 13 | expect(burstBuckets.buckets).toEqual([1, 2, 0]); 14 | burstBuckets.add(3); 15 | burstBuckets.add(4); 16 | expect(burstBuckets.buckets).toEqual([4, 2, 3]); 17 | }); 18 | 19 | it('should consume numbers from buckets', () => { 20 | burstBuckets.add(1) 21 | burstBuckets.add(3) 22 | burstBuckets.add(5) 23 | expect(burstBuckets.buckets).toEqual([1, 3, 5]); 24 | expect(burstBuckets.sum()).toEqual(9) 25 | 26 | burstBuckets.consume(2); 27 | expect(burstBuckets.buckets).toEqual([0, 2, 5]); 28 | expect(burstBuckets.sum()).toEqual(7) 29 | 30 | burstBuckets.add(1) 31 | burstBuckets.add(3) 32 | burstBuckets.add(5) 33 | expect(burstBuckets.sum()).toEqual(9) 34 | burstBuckets.consume(9); 35 | expect(burstBuckets.buckets).toEqual([0, 0, 0]); 36 | }) 37 | 38 | it('foo', () => { 39 | let b = new BurstBuckets(5) 40 | b.buckets = [ 41 | // 785.0714285714287, 42 | // 786.0714285714287, 43 | // 632.0714285714287, 44 | // 786.0714285714287, 45 | // 786.5714285714287 46 | 1.0, 47 | 2.0, 48 | 3.0, 49 | 4.0, 50 | 5.0 51 | ] 52 | b.currentIndex = 2 53 | 54 | b.sum() //? 55 | b.consume(15.0) 56 | 57 | }) 58 | 59 | it('should throw an error when consuming more than capacity', () => { 60 | burstBuckets.add(1) 61 | burstBuckets.add(2) 62 | burstBuckets.add(3) 63 | expect(() => burstBuckets.consume(7)).toThrowError('Not enough burst capacity!'); 64 | }); 65 | 66 | it('should sum the numbers in the buckets', () => { 67 | burstBuckets.add(1) 68 | burstBuckets.add(2) 69 | burstBuckets.add(3) 70 | expect(burstBuckets.sum()).toEqual(6); 71 | }); 72 | }); 73 | -------------------------------------------------------------------------------- /burst-bucket.ts: -------------------------------------------------------------------------------- 1 | export class BurstBuckets { 2 | public buckets: number[]; 3 | public currentIndex: number; 4 | 5 | constructor(private capacity: number) { 6 | this.buckets = Array(capacity).fill(0); 7 | this.currentIndex = 0; 8 | } 9 | 10 | public add(num: number): void { 11 | this.buckets[this.currentIndex] = num; 12 | this.currentIndex = (this.currentIndex + 1) % this.capacity; 13 | } 14 | 15 | public consume(amount: number): void { 16 | let remainingAmount = amount; 17 | let i = 0 18 | while (remainingAmount > 0 && i < this.capacity) { 19 | const bucketIndex = (this.currentIndex + i) % this.capacity; 20 | const bucket = this.buckets[bucketIndex]; 21 | 22 | if (bucket >= remainingAmount) { 23 | this.buckets[bucketIndex] = bucket - remainingAmount 24 | remainingAmount = 0 25 | } else { 26 | remainingAmount -= this.buckets[bucketIndex] 27 | this.buckets[bucketIndex] = 0 28 | } 29 | 30 | i++ 31 | } 32 | 33 | if (remainingAmount > 0) { 34 | throw new Error("Not enough burst capacity!"); 35 | } 36 | } 37 | 38 | 39 | public sum(): number { 40 | return this.buckets.reduce((total, num) => total + num, 0); 41 | } 42 | } 43 | -------------------------------------------------------------------------------- /cloudwatch-opener.ts: -------------------------------------------------------------------------------- 1 | import {spawn} from 'child_process' 2 | 3 | export function getCloudWatchUrl(region: string, table_name: string): string { 4 | return `https://${region}.console.aws.amazon.com/cloudwatch/home?region=${region}#metricsV2:graph=~(metrics~(~(~'AWS*2fDynamoDB~'ProvisionedReadCapacityUnits~'TableName~'${table_name}~(stat~'Average~label~'ProvisionedRCUAvg))~(~'.~'ConsumedReadCapacityUnits~'.~'.~(stat~'Sum~label~'ConsumedRCUSum))~(~'.~'ProvisionedWriteCapacityUnits~'.~'.~(stat~'Average~label~'ProvisionedWCUAvg))~(~'.~'ConsumedWriteCapacityUnits~'.~'.~(stat~'Sum~label~'ConsumedWCUSum))~(~'.~'ReadThrottleEvents~'.~'.~(label~'ReadThrottlesSum~stat~'Sum))~(~'.~'WriteThrottleEvents~'.~'.~(label~'WriteThrottlesSum~stat~'Sum)))~title~'Provisioned*2c*20Consumed*2c*20Throttled~view~'timeSeries~stacked~false~region~'${region}~period~60~yAxis~(left~(showUnits~false)))` 5 | } 6 | 7 | export async function openCloudwatchGraph(region: string, table_name: string): Promise { 8 | let url = getCloudWatchUrl(region, table_name) 9 | let start = (process.platform == 'darwin'? 'open': process.platform == 'win32'? 'start': 'xdg-open'); 10 | spawn(start, [url]) 11 | } 12 | 13 | // // Usage: openCloudwatchGraph(region, tableName) 14 | // if (!process.env.REGION && process.env.TABLE_NAME) { 15 | // console.log('REGION and TABLE_NAME must be passed to this script as env vars') 16 | // } 17 | // else { 18 | // openCloudwatchGraph(process.env.REGION as string, process.env.TABLE_NAME as string) 19 | // console.log('Opened CloudWatch graph in a browser window.') 20 | // console.log(`Select "Actions -> Download as .csv" and save to "data.csv" in this same directory: ${__dirname}`) 21 | // console.log(`Then edit example_main_csv.ts in this directory to config scaling values.`) 22 | // console.log(`Then run "npx ts-node example_main_csv.ts to get your graph.`) 23 | // } -------------------------------------------------------------------------------- /csv-ingestion.ts: -------------------------------------------------------------------------------- 1 | import { SimTimestepInput } from "./plotting"; 2 | 3 | function round(s: string): number { 4 | if (s == ''){ 5 | return 0 6 | } 7 | else { 8 | return Math.round(parseFloat(s)) 9 | } 10 | } 11 | 12 | export function csvRowToMetrics(r: string[]) { 13 | // cols are: datetime, provisioned read avg, consumed read, provisioned write avg, consumed write, read throttles, write throttles 14 | const timestamp = r[0].replace(/\//g, "-",).replace(" ", "T").replace(/00$/, "00.000Z") 15 | return { 16 | timestamp: new Date(Date.parse(timestamp)), 17 | provisionedRead: round(r[1]), 18 | consumedRead: round(r[2]), 19 | provisionedWrite: round(r[3]), 20 | consumedWrite: round(r[4]), 21 | throttledReads: round(r[5]), 22 | throttledWrites: round(r[6]) 23 | } 24 | } 25 | 26 | export function arrayToMetricsRecords(arr: string[][]): { timestamp: Date; provisionedRead: number; consumedRead: number; provisionedWrite: number; consumedWrite: number; throttledReads: number; throttledWrites: number }[] { 27 | return arr.map(csvRowToMetrics) 28 | } 29 | 30 | export function makeRecordsForSimulator(records: any[]): { readRecords: SimTimestepInput[], writeRecords: SimTimestepInput[] } { 31 | const readRecords = records.map(r => { return { timestamp: r.timestamp, consumed: r.consumedRead, throttled: r.throttledReads } }) 32 | const writeRecords = records.map(r => { return { timestamp: r.timestamp, consumed: r.consumedWrite, throttled: r.throttledWrites } }) 33 | return { readRecords, writeRecords } 34 | } -------------------------------------------------------------------------------- /ddb-sim.test.ts: -------------------------------------------------------------------------------- 1 | import {describe, expect, test} from '@jest/globals'; 2 | import { RingBuffer } from 'ring-buffer-ts'; 3 | import { BurstBuckets } from './burst-bucket'; 4 | import { TableCapacity, TableCapacityConfig } from './ddb-sim'; 5 | import dayjs from 'dayjs' 6 | 7 | describe('TableCapacity', () => { 8 | it('should initialize with correct properties', () => { 9 | const config = { min: 100, max: 1000, target: 0.5, scaling_delay_in_seconds: 0 }; 10 | const tableCapacity = new TableCapacity(config); 11 | 12 | expect(tableCapacity.config).toBe(config); 13 | expect(tableCapacity.capacity).toBe(config.min); 14 | expect(tableCapacity.burst_buckets).toBeInstanceOf(BurstBuckets); 15 | expect(tableCapacity.past_utilizations).toBeInstanceOf(RingBuffer); 16 | expect(tableCapacity.throttled_timestamps).toHaveLength(0); 17 | }); 18 | 19 | describe('process', () => { 20 | it('should keep last 5 ticks of unused capacity as burst', () => { 21 | const config = { min: 100, max: 1000, target: 0.5, scaling_delay_in_seconds: 0 }; 22 | const tableCapacity = new TableCapacity(config); 23 | const timestamp = Date.now(); 24 | tableCapacity.capacity = 100 25 | 26 | expect(tableCapacity.burst_buckets.sum()).toEqual(0) 27 | 28 | // at each tick, we add unusused burst, then the table might scale up/down 29 | 30 | // tick 1 31 | expect(tableCapacity.capacity).toEqual(100) 32 | tableCapacity.process(timestamp, 30); 33 | expect(tableCapacity.burst_buckets.sum()).toEqual(70) 34 | 35 | // tick 2 36 | expect(tableCapacity.capacity).toEqual(100) 37 | tableCapacity.process(timestamp, 20); 38 | expect(tableCapacity.burst_buckets.sum()).toEqual(150) 39 | 40 | // tick 3 41 | expect(tableCapacity.capacity).toEqual(100) 42 | tableCapacity.process(timestamp, 100); 43 | expect(tableCapacity.burst_buckets.sum()).toEqual(150) 44 | 45 | // tick 4 46 | expect(tableCapacity.capacity).toEqual(100) 47 | tableCapacity.process(timestamp, 80); 48 | expect(tableCapacity.burst_buckets.sum()).toEqual(170) 49 | 50 | // tick 5 51 | expect(tableCapacity.capacity).toEqual(160) // scaled up after prev request 52 | tableCapacity.process(timestamp, 100); 53 | expect(tableCapacity.burst_buckets.sum()).toEqual(230) 54 | 55 | // at tick 6, tick 1's 70 should fall off 56 | expect(tableCapacity.capacity).toEqual(200) // scaled up again after prev request 57 | tableCapacity.process(timestamp, 200); 58 | expect(tableCapacity.burst_buckets.sum()).toEqual(160) 59 | }); 60 | 61 | it('should consume from burst and throttle when capacity is not enough', () => { 62 | const config = { min: 100, max: 1000, target: 0.5, scaling_delay_in_seconds: 0 }; 63 | const tableCapacity = new TableCapacity(config); 64 | 65 | const timestamp = Date.now(); 66 | const amount_requested = 200; 67 | 68 | const { consumedCapacity, throttled } = tableCapacity.process(timestamp, amount_requested); 69 | 70 | expect(tableCapacity.burst_buckets.sum()).toBe(0); 71 | expect(throttled).toBe(100) 72 | }); 73 | it('returns the amount of capacity consumed and the amount of throttled requests and the burst available', () => { 74 | const config = { min: 100, max: 1000, target: 0.5, scaling_delay_in_seconds: 0 }; 75 | const tableCapacity = new TableCapacity(config); 76 | tableCapacity.capacity = 100; 77 | const timestamp = Date.now(); 78 | let results 79 | 80 | results = tableCapacity.process(timestamp, 150); 81 | // request 150, capacity 100, so... 82 | expect(results.consumedCapacity).toEqual(100); 83 | expect(results.throttled).toEqual(50); 84 | expect(results.burstAvailable).toEqual(0); 85 | 86 | results = tableCapacity.process(timestamp, 20); 87 | expect(results.consumedCapacity).toEqual(20); 88 | expect(results.throttled).toEqual(0); 89 | expect(results.burstAvailable).toEqual(80); 90 | }); 91 | 92 | it('should only scale up after two consecutive ticks over threshold', () => { 93 | const config = { min: 100, max: 1000, target: 0.5, scaling_delay_in_seconds: 0 }; 94 | const tableCapacity = new TableCapacity(config); 95 | 96 | const timestamp = Date.now(); 97 | const amount_requested = 100 98 | 99 | // first tick, no change 100 | tableCapacity.process(timestamp, amount_requested); 101 | expect(tableCapacity.capacity).toEqual(100); 102 | 103 | // second tick, should scale up 104 | tableCapacity.process(timestamp, amount_requested); 105 | expect(tableCapacity.capacity).toEqual(200); // 200 is twice the capacity requested (100) and we have a target util of 0.5 106 | }); 107 | 108 | it('should only scale down after 15 consecutive ticks where the utilization is 20% lower than the target', () => { 109 | const config = { min: 100, max: 1000, target: 0.5, scaling_delay_in_seconds: 0 }; 110 | const tableCapacity = new TableCapacity(config); 111 | const initial_capacity = 200 112 | tableCapacity.capacity = initial_capacity // overriding capacity to 200 before we start lowball requests 113 | 114 | const timestamp = Date.now(); 115 | const amount_requested = 50 116 | 117 | // start us off with something that wouldn't trigger a scale down because at the beginning all 15 past utilization slots are 0... 118 | tableCapacity.process(timestamp, 200); 119 | expect(tableCapacity.capacity).toEqual(initial_capacity); 120 | 121 | // 14 more requests at 20% lower than value at target util, no scale down yet... 122 | tableCapacity.process(timestamp, amount_requested); 123 | expect(tableCapacity.capacity).toEqual(initial_capacity); 124 | tableCapacity.process(timestamp, amount_requested); 125 | expect(tableCapacity.capacity).toEqual(initial_capacity); 126 | tableCapacity.process(timestamp, amount_requested); 127 | expect(tableCapacity.capacity).toEqual(initial_capacity); 128 | tableCapacity.process(timestamp, amount_requested); 129 | expect(tableCapacity.capacity).toEqual(initial_capacity); 130 | tableCapacity.process(timestamp, amount_requested); 131 | expect(tableCapacity.capacity).toEqual(initial_capacity); 132 | tableCapacity.process(timestamp, amount_requested); 133 | expect(tableCapacity.capacity).toEqual(initial_capacity); 134 | tableCapacity.process(timestamp, amount_requested); 135 | expect(tableCapacity.capacity).toEqual(initial_capacity); 136 | tableCapacity.process(timestamp, amount_requested); 137 | expect(tableCapacity.capacity).toEqual(initial_capacity); 138 | tableCapacity.process(timestamp, amount_requested); 139 | expect(tableCapacity.capacity).toEqual(initial_capacity); 140 | tableCapacity.process(timestamp, amount_requested); 141 | expect(tableCapacity.capacity).toEqual(initial_capacity); 142 | tableCapacity.process(timestamp, amount_requested); 143 | expect(tableCapacity.capacity).toEqual(initial_capacity); 144 | tableCapacity.process(timestamp, amount_requested); 145 | expect(tableCapacity.capacity).toEqual(initial_capacity); 146 | tableCapacity.process(timestamp, amount_requested); 147 | expect(tableCapacity.capacity).toEqual(initial_capacity); 148 | tableCapacity.process(timestamp, amount_requested); 149 | expect(tableCapacity.capacity).toEqual(initial_capacity); 150 | 151 | // 15th tick of consecutively 20% than target threshold will adjust capacity lower 152 | tableCapacity.process(timestamp, amount_requested); 153 | expect(tableCapacity.capacity).toEqual(100); // We round capacity when we scale 154 | }); 155 | 156 | it('does not scale down more than 27 times in a 24 hour period beginning at 00:00:00.000Z (4 in first hour that downscaling begins, one per additional hour)', () => { 157 | const config = { min: 10, max: 1000, target: 0.5, scaling_delay_in_seconds: 0 }; 158 | const tableCapacity = new TableCapacity(config); 159 | const initial_capacity = 400 160 | tableCapacity.capacity = initial_capacity 161 | let datetime = dayjs('2000-01-02T00:00:00.000Z') 162 | 163 | expect(tableCapacity.capacity).toEqual(400); 164 | tableCapacity.process(datetime.valueOf(), 200); // right on target 165 | datetime = datetime.add(1, 'minute') 166 | 167 | tableCapacity.capacity = initial_capacity 168 | expect(tableCapacity.capacity).toEqual(400); 169 | tableCapacity.process(datetime.valueOf(), 200); 170 | datetime = datetime.add(1, 'minute') 171 | 172 | // let's wait a few hours, then try to trigger first downscale... 173 | for (let minute=1; minute<=4*60; minute++) { 174 | expect(tableCapacity.capacity).toEqual(400); 175 | datetime = datetime.add(1, 'minute') 176 | tableCapacity.process(datetime.valueOf(), 200); 177 | } 178 | 179 | // trigger first downscale 180 | let scaledownReq = tableCapacity.capacity * (tableCapacity.config.target - .22) 181 | for (let minute=1; minute<=15; minute++) { 182 | expect(tableCapacity.capacity).toEqual(400); 183 | datetime = datetime.add(1, 'minute') 184 | tableCapacity.process(datetime.valueOf(), scaledownReq); // more than .5 lower than 200 185 | } 186 | 187 | // first scale down should have happened 188 | expect(tableCapacity.capacity).toEqual(224); 189 | const firstDownscaleHappenedAt = dayjs(datetime).clone() 190 | 191 | // trigger 3 more downscales in consecutive minutes 192 | for (let minute=1; minute<=3; minute++) { 193 | let scaledownReq = tableCapacity.capacity * (tableCapacity.config.target - .22) 194 | let capacityBefore = tableCapacity.capacity 195 | datetime = datetime.add(1, 'minute') 196 | tableCapacity.process(datetime.valueOf(), scaledownReq); 197 | let capacityAfter = tableCapacity.capacity 198 | expect(capacityAfter).toBeLessThan(capacityBefore) 199 | } 200 | 201 | // no more downscales should happen until 60 minutes after the first downscale event of the day occured 202 | scaledownReq = tableCapacity.capacity * (tableCapacity.config.target - .22) 203 | // TODO why 62 here not 60?! 204 | while (datetime < dayjs(firstDownscaleHappenedAt).add(62, 'minutes')) { 205 | let capacityBefore = tableCapacity.capacity 206 | datetime = datetime.add(1, 'minute') 207 | tableCapacity.process(datetime.valueOf(), scaledownReq); 208 | let capacityAfter = tableCapacity.capacity 209 | expect(capacityAfter).toEqual(capacityBefore) 210 | } 211 | 212 | // we should now be in the next hour, so another downscale can happen 213 | let capacityBefore = tableCapacity.capacity 214 | datetime = datetime.add(1, 'minute') 215 | tableCapacity.process(datetime.valueOf(), scaledownReq); 216 | let capacityAfter = tableCapacity.capacity 217 | expect(capacityAfter).toBeLessThan(capacityBefore) 218 | 219 | // but only 1. so next one should fail 220 | capacityBefore = tableCapacity.capacity 221 | datetime = datetime.add(1, 'minute') 222 | tableCapacity.process(datetime.valueOf(), scaledownReq); 223 | capacityAfter = tableCapacity.capacity 224 | expect(capacityAfter).toEqual(capacityBefore) 225 | 226 | // advance to the next hour, no scaling happening in the hour 227 | while (datetime < dayjs(firstDownscaleHappenedAt).add(120, 'minutes')) { 228 | scaledownReq = tableCapacity.capacity * (tableCapacity.config.target - .22) 229 | let capacityBefore = tableCapacity.capacity 230 | datetime = datetime.add(1, 'minute') 231 | tableCapacity.process(datetime.valueOf(), scaledownReq); 232 | let capacityAfter = tableCapacity.capacity 233 | expect(capacityAfter).toEqual(capacityBefore) 234 | } 235 | }); 236 | 237 | it('respects the scaling_delay_in_seconds config', () => { 238 | // TODO 239 | }) 240 | }); 241 | }); 242 | -------------------------------------------------------------------------------- /ddb-sim.ts: -------------------------------------------------------------------------------- 1 | import { RingBuffer } from 'ring-buffer-ts'; 2 | import { BurstBuckets } from './burst-bucket' 3 | import dayjs from 'dayjs' 4 | 5 | function initCircularBuffer(capacity: number, default_value: number) { 6 | let buf = new RingBuffer(capacity) 7 | for (let i=0; i 25 | throttled_timestamps: number[] 26 | last_process_at: number 27 | capacity_change_at: number 28 | capacity_change_to: number 29 | first_scaledown_happened_at: number 30 | scaledowns_remaining_in_first_batch: number; 31 | first_scaledown_batch_ends_at: number; 32 | most_recent_scaledown_happened_at: number; 33 | 34 | constructor(config:TableCapacityConfig) { 35 | this.config = config 36 | this.capacity = config.min 37 | this.burst_buckets = new BurstBuckets(5) 38 | this.past_utilizations = initCircularBuffer(15, 0) 39 | this.throttled_timestamps = [] 40 | this.last_process_at = -1 41 | this.capacity_change_at = -1 42 | this.capacity_change_to = -1 43 | this.first_scaledown_happened_at = -1 44 | this.scaledowns_remaining_in_first_batch = 4 // TODO make this configurable 45 | this.first_scaledown_batch_ends_at = -1 46 | this.most_recent_scaledown_happened_at = -1 47 | } 48 | 49 | resetScaledownTracking() { 50 | this.first_scaledown_happened_at = -1 51 | this.first_scaledown_batch_ends_at = -1 52 | this.most_recent_scaledown_happened_at = -1 53 | this.scaledowns_remaining_in_first_batch = 4 54 | } 55 | 56 | canScaleDown(timestamp: number): boolean { 57 | // If we have not scaled down this day yet, we can scaledown 58 | if (this.first_scaledown_happened_at == -1) { 59 | return true 60 | } 61 | 62 | // If timestamp is inside first batch window and we have any scaledowns remaining in the first batch (defaults to 60 minutes after first scaledown of the day), we can scaledown 63 | if (timestamp < this.first_scaledown_batch_ends_at && this.scaledowns_remaining_in_first_batch > 0) { 64 | return true 65 | } 66 | 67 | // Othwerwise, we can only scaledown if it has been long enough (default 60 minutes) since last scaledown 68 | // TODO make the 60 minutes configurable 69 | if (timestamp >= dayjs(this.most_recent_scaledown_happened_at).add(60, 'minutes').valueOf()) { 70 | return true 71 | } 72 | 73 | return false 74 | } 75 | 76 | process(timestamp: number, amount_requested: number) { 77 | if (dayjs(timestamp).date() !== dayjs(this.last_process_at).date()) { 78 | this.resetScaledownTracking() 79 | } 80 | 81 | const amount_remaining = this.capacity - amount_requested 82 | let consumedCapacity = 0 83 | let throttled = 0 84 | if (amount_remaining < 0) { 85 | consumedCapacity += this.capacity 86 | 87 | // CONSUME FROM BURST IF WE CAN 88 | const amount_over = amount_remaining * -1 89 | const burst_consumed = Math.min(amount_over, this.burst_buckets.sum()) 90 | this.burst_buckets.consume(burst_consumed) 91 | consumedCapacity += burst_consumed 92 | 93 | const amount_remaining_after_burst_consumed = amount_over - burst_consumed 94 | if (amount_remaining_after_burst_consumed > 0) { 95 | // THROTTLE THE REST AFTER NO BURST LEFT 96 | throttled = amount_remaining_after_burst_consumed 97 | } 98 | } 99 | else { 100 | // ADD UNUSED CAPACITY TO BURST 101 | this.burst_buckets.add(amount_remaining) 102 | consumedCapacity += amount_requested 103 | } 104 | 105 | 106 | // track utilization 107 | const current_utilization = amount_requested / this.capacity 108 | this.past_utilizations.add(current_utilization) 109 | 110 | 111 | // handle scheduling a scaling event after some delay 112 | if (this.capacity_change_at == -1) { 113 | const last_two_mins_of_util = this.past_utilizations.toArray().slice(-2) 114 | if (last_two_mins_of_util[0] > 0 && last_two_mins_of_util[1] > 0 && last_two_mins_of_util[0] > this.config.target && last_two_mins_of_util[1] > this.config.target) { 115 | // scaling up... 116 | this.capacity_change_to = amount_requested / this.config.target 117 | // clamp to max value since this is a scale up 118 | this.capacity_change_to = Math.min(this.config.max, this.capacity_change_to) 119 | } 120 | 121 | const scale_down_threshold = this.config.target - .20 122 | if (this.past_utilizations.toArray().every(u => u < scale_down_threshold) && this.canScaleDown(timestamp)) { 123 | // scaling down... 124 | const hour = dayjs(timestamp).hour() 125 | this.capacity_change_to = amount_requested / this.config.target 126 | // clamp to min value since this is a scale down 127 | this.capacity_change_to = Math.max(this.config.min, this.capacity_change_to) 128 | } 129 | 130 | if (this.capacity_change_to !== -1) { 131 | this.capacity_change_at = dayjs(timestamp).add(this.config.scaling_delay_in_seconds, 'seconds').valueOf() 132 | } 133 | } 134 | 135 | // handle 'realizing' the scaling event if delay is over 136 | if (this.capacity_change_at != -1 && timestamp >= this.capacity_change_at) { 137 | // if this is a scale down, log it 138 | if (this.capacity_change_to < this.capacity) { 139 | if (this.first_scaledown_happened_at == -1) { 140 | this.first_scaledown_happened_at = timestamp 141 | this.first_scaledown_batch_ends_at = dayjs(timestamp).add(60, 'minutes').valueOf() 142 | } 143 | if (timestamp < this.first_scaledown_batch_ends_at && this.scaledowns_remaining_in_first_batch > 0) { 144 | this.scaledowns_remaining_in_first_batch -= 1 145 | } 146 | this.most_recent_scaledown_happened_at = timestamp 147 | } 148 | 149 | // we round capacity so we don't get super nasty floating point math inequalities when consuming burst 150 | this.capacity = Math.round(this.capacity_change_to) 151 | this.capacity_change_at = -1 152 | this.capacity_change_to = -1 153 | } 154 | 155 | 156 | this.last_process_at = timestamp 157 | return { consumedCapacity, throttled, burstAvailable: this.burst_buckets.sum() } 158 | } 159 | } 160 | 161 | -------------------------------------------------------------------------------- /example_main.ts: -------------------------------------------------------------------------------- 1 | import { fetchTableMetrics } from './table-consumption-fetcher' 2 | import { TableCapacity } from './ddb-sim'; 3 | import { plot, Plot } from 'nodeplotlib'; 4 | 5 | async function main() { 6 | const startTime = new Date(Date.parse('2023-01-02T00:00:00.000Z')) 7 | const endTime = new Date(Date.parse('2023-01-03T00:00:00.000Z')) 8 | const stats = await fetchTableMetrics({ 9 | profile: 'some_profile_name', // from ~/.aws/credentials 10 | region: 'eu-central-1', 11 | tableName: 'my-table-name', // your ddb table name 12 | startTime, 13 | endTime, 14 | }) 15 | 16 | // set your min, max, and target util here, as well as how long before scaling takes effect after it is triggered 17 | const capSim = new TableCapacity({min: 5000, max: 15000, target: 0.7, scaling_delay_in_seconds: 2*60}) 18 | 19 | let timeXs: Date[] = [] 20 | let provisionedCapacityTraceYs: number[] = [] 21 | let consumedCapacityTraceYs: number[] = [] 22 | let throttledCapacityTraceYs: number[] = [] 23 | let burstAvailableTraceYs: number[] = [] 24 | 25 | for (let i=0; i 2 | 3 | 4 | 5 | DDB Scaling Simulator 6 | 7 | 8 | 9 | 10 | 11 | 12 |
13 |

DynamoDB Scaling Simulator

14 |

This is alpha software.

15 |

It probably has bugs.

16 |

This is a simple tool to help you simulate how a provisioned-capacity DynamoDB table will perform (will it throttle requests or not?) under different auto-scaling configurations.

17 |

More information on GitHub.

18 |
19 | 20 |
21 |

Step 1

22 |

Use this form to take you to CloudWatch where you can download a CSV of your data for the timeframe you're interested in.

23 |
24 |
25 |
26 | 27 | 28 |
29 | 30 |
31 | 32 | 33 |
34 |
35 | 36 |
37 | 38 |
39 |
40 |
41 | 42 |
43 | 44 |

Step 2

45 |
    46 |
  1. Adjust the timeframe in Cloudwatch for your desired simulation window
  2. 47 |
  3. Select Actions -> 'Download .csv' from the top-right menu. (see picture below)
  4. 48 |
49 | 50 |

Done with the download? Go on to Step 3.

51 | 52 |
53 | 54 |

Step 3

55 |

Use this form to select your downloaded CSV file and run the simulator.

56 | 57 |
58 |
59 |
60 | Pick your CSV file from CloudWatch 61 |
62 |
63 | 64 | 65 |
66 | 67 |
68 | Configure your simulated RCU scaling 69 |
70 |
71 |
72 | 73 | 74 |
75 | 76 |
77 | 78 | 79 |
80 | 81 |
82 | 83 | 84 |
85 |
86 | 87 |
88 | Configure your simulated WCU scaling 89 |
90 |
91 |
92 | 93 | 94 |
95 | 96 |
97 | 98 | 99 |
100 | 101 |
102 | 103 | 104 |
105 |
106 | 107 |
108 | Configure the simulated scaling delay. 109 |
110 |
111 | This controls how long it takes to realize a change in capacity after the DDB simulator decides it wants to change capacity. 112 | 113 | 114 |
115 | 116 |
117 |
Pricing Data
118 | Enter the RCU and WCU cost per provisioned hour for your region. 119 |
120 |
121 |
122 | 123 | 124 |
125 |
126 | 127 | 128 |
129 |
130 | 131 |
132 | 133 |
134 |
135 |
136 | 137 | 191 |
192 | 193 | -------------------------------------------------------------------------------- /index.ts: -------------------------------------------------------------------------------- 1 | import { getCloudWatchUrl } from './cloudwatch-opener' 2 | import { getTraces, Trace, SimTimestepInput } from './plotting' 3 | import { newPlot } from 'plotly.js-dist' 4 | import { calculateCost, getCostPerUnit, optimize, ReadOrWrite, StorageClass, TableMode } from './pricing' 5 | import { arrayToMetricsRecords, makeRecordsForSimulator } from './csv-ingestion' 6 | 7 | import dayjs from 'dayjs' 8 | 9 | function getCloudwatchMetrics(region: string, tableName: string) { 10 | const url = getCloudWatchUrl(region, tableName) 11 | window.open(url) 12 | } 13 | 14 | function getScalingConfigsFromFormData(formData: FormData) { 15 | const readsConfig = { 16 | min: parseInt(formData.get('rcu_min') as any, 10), 17 | max: parseInt(formData.get('rcu_max') as any, 10), 18 | target: parseFloat(formData.get('rcu_target') as any), 19 | scaling_delay_in_seconds: parseInt(formData.get('delay') as any, 10) 20 | } 21 | const writesConfig = { 22 | min: parseInt(formData.get('wcu_min') as any, 10), 23 | max: parseInt(formData.get('wcu_max') as any, 10), 24 | target: parseFloat(formData.get('wcu_target') as any), 25 | scaling_delay_in_seconds: parseInt(formData.get('delay') as any, 10) 26 | } 27 | 28 | return { readsConfig, writesConfig } 29 | } 30 | 31 | function getTracesFromFormData(formData: FormData, readRecords: SimTimestepInput[], writeRecords: SimTimestepInput[]) { 32 | const { readsConfig, writesConfig } = getScalingConfigsFromFormData(formData) 33 | const readTraces = getTraces(readsConfig, readRecords) 34 | const writeTraces = getTraces(writesConfig, writeRecords) 35 | return { readTraces, writeTraces } 36 | } 37 | 38 | function addResultRows({tableId, description, min, max, target, cost}) { 39 | const tbody = document.querySelector(`#${tableId} tbody`); 40 | const template = document.querySelector('#resultRow'); 41 | 42 | if ('content' in document.createElement('template')) { 43 | const clone = template.content.cloneNode(true); 44 | let td = clone.querySelectorAll("td"); 45 | td[0].textContent = description 46 | td[1].textContent = min 47 | td[2].textContent = max 48 | td[3].textContent = target 49 | td[4].textContent = `$ ${Math.round(cost * 100)/100} USD `// round to 2 deicimal places 50 | 51 | tbody.prepend(clone); 52 | 53 | } else { 54 | tbody.innerHTML = `${description} | ${min} | ${max} | ${target} | ${cost}` 55 | } 56 | } 57 | 58 | async function onCsvFileReady(formData: FormData, e) { 59 | const text = e.target!.result; 60 | const data = csvToArray(text as string, ",", 5) 61 | const metricsRecords = arrayToMetricsRecords(data) 62 | const { readRecords, writeRecords } = makeRecordsForSimulator(metricsRecords) 63 | 64 | const { readTraces, writeTraces } = getTracesFromFormData(formData, readRecords, writeRecords) 65 | makeGraphs(readTraces, writeTraces) 66 | document.querySelector('#results')?.setAttribute('style', 'display: block') 67 | document.querySelector('#readsGraph')?.scrollIntoView() 68 | 69 | const rcuPricing: number = parseFloat(formData.get('rcu_pricing')) 70 | const wcuPricing: number = parseFloat(formData.get('wcu_pricing')) 71 | 72 | // const rcuPricing: number = await getCostPerUnit('ap-southeast-1', ReadOrWrite.Read, TableMode.ProvisionedCapacity, StorageClass.Standard) 73 | // const wcuPricing: number = await getCostPerUnit('ap-southeast-1', ReadOrWrite.Write, TableMode.ProvisionedCapacity, StorageClass.Standard) 74 | // console.log(rcuPricing) 75 | // console.log(wcuPricing) 76 | 77 | const rcuCost = calculateCost(readTraces.provisionedCapacityTrace, rcuPricing) 78 | const wcuCost = calculateCost(writeTraces.provisionedCapacityTrace, wcuPricing) 79 | 80 | if (window.Worker) { 81 | const worker = new Worker(new URL("./optimization-worker.ts", import.meta.url), { type: 'module' }) 82 | 83 | const { readsConfig, writesConfig } = getScalingConfigsFromFormData(formData) 84 | worker.postMessage({ taskId: 'readOptimize', scalingConfig: readsConfig, records: readRecords, pricePerHour: rcuPricing }) 85 | worker.postMessage({ taskId: 'writeOptimize', scalingConfig: writesConfig, records: writeRecords, pricePerHour: wcuPricing }) 86 | 87 | addResultRows({ 88 | tableId: 'readsResults', 89 | description: 'Your manual config (entered above)', 90 | min: readsConfig.min, 91 | max: readsConfig.max, 92 | target: readsConfig.target, 93 | cost: rcuCost 94 | }) 95 | 96 | addResultRows({ 97 | tableId: 'writesResults', 98 | description: 'Your manual config (entered above)', 99 | min: writesConfig.min, 100 | max: writesConfig.max, 101 | target: writesConfig.target, 102 | cost: wcuCost 103 | }) 104 | 105 | 106 | worker.onmessage = (e) => { 107 | const { taskId, bestMin, bestMax, bestPrice, bestTarget } = e.data 108 | if (taskId == 'readOptimize') { 109 | document.querySelector('table#readsResults tbody tr.pleaseWait')?.setAttribute('hidden', 'true') 110 | 111 | addResultRows({ 112 | tableId: 'readsResults', 113 | description: 'Optimized config (auto tuning for no throttles)', 114 | min: bestMin, 115 | max: bestMax, 116 | target: bestTarget / 100, 117 | cost: bestPrice 118 | }) 119 | 120 | } 121 | if (taskId == 'writeOptimize') { 122 | document.querySelector('table#writesResults tbody tr.pleaseWait')?.setAttribute('hidden', 'true') 123 | 124 | addResultRows({ 125 | tableId: 'writesResults', 126 | description: 'Optimized config (auto tuning for no throttles)', 127 | min: bestMin, 128 | max: bestMax, 129 | target: bestTarget / 100, 130 | cost: bestPrice 131 | }) 132 | } 133 | } 134 | 135 | } 136 | else { 137 | document.querySelector('#readsResults tbody tr.pleaseWait td').innerHTML = `Error: no Web Worker support. Skipping optimization.` 138 | document.querySelector('#writesResults tbody tr.pleaseWait td').innerHTML = `Error: no Web Worker support. Skipping optimization.` 139 | } 140 | } 141 | 142 | 143 | 144 | function makeGraphs(readTraces: { provisionedCapacityTrace: { x: string[]; y: number[]; type: string; name: string }; consumedCapacityTrace: { x: string[]; y: number[]; type: string; name: string }; throttledCapacityTrace: { x: string[]; y: number[]; type: string; name: string }; burstAvailableTrace: { x: string[]; y: number[]; type: string; name: string } }, writeTraces: { provisionedCapacityTrace: { x: string[]; y: number[]; type: string; name: string }; consumedCapacityTrace: { x: string[]; y: number[]; type: string; name: string }; throttledCapacityTrace: { x: string[]; y: number[]; type: string; name: string }; burstAvailableTrace: { x: string[]; y: number[]; type: string; name: string } }) { 145 | const layout = { 146 | height: 600, 147 | } 148 | const config = { 149 | responsive: true 150 | } 151 | readTraces.burstAvailableTrace.visible = 'legendonly' 152 | writeTraces.burstAvailableTrace.visible = 'legendonly' 153 | newPlot( 154 | 'readsGraph', 155 | [ 156 | readTraces.provisionedCapacityTrace, 157 | readTraces.consumedCapacityTrace, 158 | readTraces.throttledCapacityTrace, 159 | readTraces.burstAvailableTrace, 160 | ], 161 | { ...layout, title: 'Simulated Reads' }, 162 | config 163 | ) 164 | newPlot( 165 | 'writesGraph', 166 | [ 167 | writeTraces.provisionedCapacityTrace, 168 | writeTraces.consumedCapacityTrace, 169 | writeTraces.throttledCapacityTrace, 170 | writeTraces.burstAvailableTrace, 171 | ], 172 | { ...layout, title: 'Simulated Writes' }, 173 | config 174 | ) 175 | } 176 | 177 | function onTableFormSubmit(e) { 178 | e.preventDefault() 179 | const formData = new FormData(e.currentTarget) 180 | getCloudwatchMetrics(formData.get('region'), formData.get('tableName')) 181 | } 182 | 183 | async function onCsvFormSubmit(e) { 184 | e.preventDefault() 185 | const file = e.currentTarget.querySelector('input[type="file"').files[0] 186 | const formData = new FormData(e.currentTarget) 187 | const reader = new FileReader() 188 | reader.onload = await onCsvFileReady.bind(this, formData) 189 | reader.readAsText(file) 190 | } 191 | 192 | function onDomContentLoaded(e) { 193 | document.querySelector('#tableForm form')?.addEventListener('submit', onTableFormSubmit) 194 | document.querySelector('#csvForm form')?.addEventListener('submit', onCsvFormSubmit) 195 | } 196 | 197 | document.addEventListener('DOMContentLoaded', onDomContentLoaded, false) 198 | 199 | function csvToArray(str: string, delimiter = ",", skipLines = 0) { 200 | //skip lines 201 | const lines: string[] = str.split("\n") 202 | const rows = lines.slice(skipLines) 203 | 204 | const arr = rows.map(row => row.split(delimiter)) 205 | return arr; 206 | } 207 | -------------------------------------------------------------------------------- /jest.config.js: -------------------------------------------------------------------------------- 1 | /** @type {import('ts-jest').JestConfigWithTsJest} */ 2 | module.exports = { 3 | preset: 'ts-jest', 4 | testEnvironment: 'node', 5 | }; -------------------------------------------------------------------------------- /normalize.css: -------------------------------------------------------------------------------- 1 | /*! normalize.css v3.0.2 | MIT License | git.io/normalize */ 2 | 3 | /** 4 | * 1. Set default font family to sans-serif. 5 | * 2. Prevent iOS text size adjust after orientation change, without disabling 6 | * user zoom. 7 | */ 8 | 9 | html { 10 | font-family: sans-serif; /* 1 */ 11 | -ms-text-size-adjust: 100%; /* 2 */ 12 | -webkit-text-size-adjust: 100%; /* 2 */ 13 | } 14 | 15 | /** 16 | * Remove default margin. 17 | */ 18 | 19 | body { 20 | margin: 0; 21 | } 22 | 23 | /* HTML5 display definitions 24 | ========================================================================== */ 25 | 26 | /** 27 | * Correct `block` display not defined for any HTML5 element in IE 8/9. 28 | * Correct `block` display not defined for `details` or `summary` in IE 10/11 29 | * and Firefox. 30 | * Correct `block` display not defined for `main` in IE 11. 31 | */ 32 | 33 | article, 34 | aside, 35 | details, 36 | figcaption, 37 | figure, 38 | footer, 39 | header, 40 | hgroup, 41 | main, 42 | menu, 43 | nav, 44 | section, 45 | summary { 46 | display: block; 47 | } 48 | 49 | /** 50 | * 1. Correct `inline-block` display not defined in IE 8/9. 51 | * 2. Normalize vertical alignment of `progress` in Chrome, Firefox, and Opera. 52 | */ 53 | 54 | audio, 55 | canvas, 56 | progress, 57 | video { 58 | display: inline-block; /* 1 */ 59 | vertical-align: baseline; /* 2 */ 60 | } 61 | 62 | /** 63 | * Prevent modern browsers from displaying `audio` without controls. 64 | * Remove excess height in iOS 5 devices. 65 | */ 66 | 67 | audio:not([controls]) { 68 | display: none; 69 | height: 0; 70 | } 71 | 72 | /** 73 | * Address `[hidden]` styling not present in IE 8/9/10. 74 | * Hide the `template` element in IE 8/9/11, Safari, and Firefox < 22. 75 | */ 76 | 77 | [hidden], 78 | template { 79 | display: none; 80 | } 81 | 82 | /* Links 83 | ========================================================================== */ 84 | 85 | /** 86 | * Remove the gray background color from active links in IE 10. 87 | */ 88 | 89 | a { 90 | background-color: transparent; 91 | } 92 | 93 | /** 94 | * Improve readability when focused and also mouse hovered in all browsers. 95 | */ 96 | 97 | a:active, 98 | a:hover { 99 | outline: 0; 100 | } 101 | 102 | /* Text-level semantics 103 | ========================================================================== */ 104 | 105 | /** 106 | * Address styling not present in IE 8/9/10/11, Safari, and Chrome. 107 | */ 108 | 109 | abbr[title] { 110 | border-bottom: 1px dotted; 111 | } 112 | 113 | /** 114 | * Address style set to `bolder` in Firefox 4+, Safari, and Chrome. 115 | */ 116 | 117 | b, 118 | strong { 119 | font-weight: bold; 120 | } 121 | 122 | /** 123 | * Address styling not present in Safari and Chrome. 124 | */ 125 | 126 | dfn { 127 | font-style: italic; 128 | } 129 | 130 | /** 131 | * Address variable `h1` font-size and margin within `section` and `article` 132 | * contexts in Firefox 4+, Safari, and Chrome. 133 | */ 134 | 135 | h1 { 136 | font-size: 2em; 137 | margin: 0.67em 0; 138 | } 139 | 140 | /** 141 | * Address styling not present in IE 8/9. 142 | */ 143 | 144 | mark { 145 | background: #ff0; 146 | color: #000; 147 | } 148 | 149 | /** 150 | * Address inconsistent and variable font size in all browsers. 151 | */ 152 | 153 | small { 154 | font-size: 80%; 155 | } 156 | 157 | /** 158 | * Prevent `sub` and `sup` affecting `line-height` in all browsers. 159 | */ 160 | 161 | sub, 162 | sup { 163 | font-size: 75%; 164 | line-height: 0; 165 | position: relative; 166 | vertical-align: baseline; 167 | } 168 | 169 | sup { 170 | top: -0.5em; 171 | } 172 | 173 | sub { 174 | bottom: -0.25em; 175 | } 176 | 177 | /* Embedded content 178 | ========================================================================== */ 179 | 180 | /** 181 | * Remove border when inside `a` element in IE 8/9/10. 182 | */ 183 | 184 | img { 185 | border: 0; 186 | } 187 | 188 | /** 189 | * Correct overflow not hidden in IE 9/10/11. 190 | */ 191 | 192 | svg:not(:root) { 193 | overflow: hidden; 194 | } 195 | 196 | /* Grouping content 197 | ========================================================================== */ 198 | 199 | /** 200 | * Address margin not present in IE 8/9 and Safari. 201 | */ 202 | 203 | figure { 204 | margin: 1em 40px; 205 | } 206 | 207 | /** 208 | * Address differences between Firefox and other browsers. 209 | */ 210 | 211 | hr { 212 | -moz-box-sizing: content-box; 213 | box-sizing: content-box; 214 | height: 0; 215 | } 216 | 217 | /** 218 | * Contain overflow in all browsers. 219 | */ 220 | 221 | pre { 222 | overflow: auto; 223 | } 224 | 225 | /** 226 | * Address odd `em`-unit font size rendering in all browsers. 227 | */ 228 | 229 | code, 230 | kbd, 231 | pre, 232 | samp { 233 | font-family: monospace, monospace; 234 | font-size: 1em; 235 | } 236 | 237 | /* Forms 238 | ========================================================================== */ 239 | 240 | /** 241 | * Known limitation: by default, Chrome and Safari on OS X allow very limited 242 | * styling of `select`, unless a `border` property is set. 243 | */ 244 | 245 | /** 246 | * 1. Correct color not being inherited. 247 | * Known issue: affects color of disabled elements. 248 | * 2. Correct font properties not being inherited. 249 | * 3. Address margins set differently in Firefox 4+, Safari, and Chrome. 250 | */ 251 | 252 | button, 253 | input, 254 | optgroup, 255 | select, 256 | textarea { 257 | color: inherit; /* 1 */ 258 | font: inherit; /* 2 */ 259 | margin: 0; /* 3 */ 260 | } 261 | 262 | /** 263 | * Address `overflow` set to `hidden` in IE 8/9/10/11. 264 | */ 265 | 266 | button { 267 | overflow: visible; 268 | } 269 | 270 | /** 271 | * Address inconsistent `text-transform` inheritance for `button` and `select`. 272 | * All other form control elements do not inherit `text-transform` values. 273 | * Correct `button` style inheritance in Firefox, IE 8/9/10/11, and Opera. 274 | * Correct `select` style inheritance in Firefox. 275 | */ 276 | 277 | button, 278 | select { 279 | text-transform: none; 280 | } 281 | 282 | /** 283 | * 1. Avoid the WebKit bug in Android 4.0.* where (2) destroys native `audio` 284 | * and `video` controls. 285 | * 2. Correct inability to style clickable `input` types in iOS. 286 | * 3. Improve usability and consistency of cursor style between image-type 287 | * `input` and others. 288 | */ 289 | 290 | button, 291 | html input[type="button"], /* 1 */ 292 | input[type="reset"], 293 | input[type="submit"] { 294 | -webkit-appearance: button; /* 2 */ 295 | cursor: pointer; /* 3 */ 296 | } 297 | 298 | /** 299 | * Re-set default cursor for disabled elements. 300 | */ 301 | 302 | button[disabled], 303 | html input[disabled] { 304 | cursor: default; 305 | } 306 | 307 | /** 308 | * Remove inner padding and border in Firefox 4+. 309 | */ 310 | 311 | button::-moz-focus-inner, 312 | input::-moz-focus-inner { 313 | border: 0; 314 | padding: 0; 315 | } 316 | 317 | /** 318 | * Address Firefox 4+ setting `line-height` on `input` using `!important` in 319 | * the UA stylesheet. 320 | */ 321 | 322 | input { 323 | line-height: normal; 324 | } 325 | 326 | /** 327 | * It's recommended that you don't attempt to style these elements. 328 | * Firefox's implementation doesn't respect box-sizing, padding, or width. 329 | * 330 | * 1. Address box sizing set to `content-box` in IE 8/9/10. 331 | * 2. Remove excess padding in IE 8/9/10. 332 | */ 333 | 334 | input[type="checkbox"], 335 | input[type="radio"] { 336 | box-sizing: border-box; /* 1 */ 337 | padding: 0; /* 2 */ 338 | } 339 | 340 | /** 341 | * Fix the cursor style for Chrome's increment/decrement buttons. For certain 342 | * `font-size` values of the `input`, it causes the cursor style of the 343 | * decrement button to change from `default` to `text`. 344 | */ 345 | 346 | input[type="number"]::-webkit-inner-spin-button, 347 | input[type="number"]::-webkit-outer-spin-button { 348 | height: auto; 349 | } 350 | 351 | /** 352 | * 1. Address `appearance` set to `searchfield` in Safari and Chrome. 353 | * 2. Address `box-sizing` set to `border-box` in Safari and Chrome 354 | * (include `-moz` to future-proof). 355 | */ 356 | 357 | input[type="search"] { 358 | -webkit-appearance: textfield; /* 1 */ 359 | -moz-box-sizing: content-box; 360 | -webkit-box-sizing: content-box; /* 2 */ 361 | box-sizing: content-box; 362 | } 363 | 364 | /** 365 | * Remove inner padding and search cancel button in Safari and Chrome on OS X. 366 | * Safari (but not Chrome) clips the cancel button when the search input has 367 | * padding (and `textfield` appearance). 368 | */ 369 | 370 | input[type="search"]::-webkit-search-cancel-button, 371 | input[type="search"]::-webkit-search-decoration { 372 | -webkit-appearance: none; 373 | } 374 | 375 | /** 376 | * Define consistent border, margin, and padding. 377 | */ 378 | 379 | fieldset { 380 | border: 1px solid #c0c0c0; 381 | margin: 0 2px; 382 | padding: 0.35em 0.625em 0.75em; 383 | } 384 | 385 | /** 386 | * 1. Correct `color` not being inherited in IE 8/9/10/11. 387 | * 2. Remove padding so people aren't caught out if they zero out fieldsets. 388 | */ 389 | 390 | legend { 391 | border: 0; /* 1 */ 392 | padding: 0; /* 2 */ 393 | } 394 | 395 | /** 396 | * Remove default vertical scrollbar in IE 8/9/10/11. 397 | */ 398 | 399 | textarea { 400 | overflow: auto; 401 | } 402 | 403 | /** 404 | * Don't inherit the `font-weight` (applied by a rule above). 405 | * NOTE: the default cannot safely be changed in Chrome and Safari on OS X. 406 | */ 407 | 408 | optgroup { 409 | font-weight: bold; 410 | } 411 | 412 | /* Tables 413 | ========================================================================== */ 414 | 415 | /** 416 | * Remove most spacing between table cells. 417 | */ 418 | 419 | table { 420 | border-collapse: collapse; 421 | border-spacing: 0; 422 | } 423 | 424 | td, 425 | th { 426 | padding: 0; 427 | } -------------------------------------------------------------------------------- /optimization-worker.ts: -------------------------------------------------------------------------------- 1 | import { optimize } from "./pricing"; 2 | 3 | self.onmessage = function(e) { 4 | const { taskId, scalingConfig, records, pricePerHour } = e.data 5 | const { bestMin, bestMax, bestPrice, bestTarget } = optimize(scalingConfig, records, pricePerHour) 6 | self.postMessage({ taskId, bestMin, bestMax, bestPrice, bestTarget }) 7 | } 8 | 9 | export {} // make this a module -------------------------------------------------------------------------------- /package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "ddb-scaling-sim", 3 | "version": "1.0.0", 4 | "description": "", 5 | "source": "index.html", 6 | "scripts": { 7 | "optimize-region-report": "npx ts-node ./scripts/make-optimize-report-for-region.ts", 8 | "start": "parcel", 9 | "test": "jest" 10 | }, 11 | "author": "", 12 | "license": "ISC", 13 | "dependencies": { 14 | "@aws-sdk/client-application-auto-scaling": "^3.301.0", 15 | "@aws-sdk/client-cloudwatch": "^3.292.0", 16 | "@aws-sdk/client-dynamodb": "^3.300.0", 17 | "@aws-sdk/credential-providers": "^3.292.0", 18 | "@aws-sdk/types": "^3.292.0", 19 | "aws-sdk": "^2.1336.0", 20 | "csv": "^6.2.8", 21 | "dayjs": "^1.11.7", 22 | "nodeplotlib": "^1.1.2", 23 | "optimization-js": "^1.5.0", 24 | "plotly.js-dist": "^2.20.0", 25 | "ring-buffer-ts": "^1.2.0" 26 | }, 27 | "devDependencies": { 28 | "@jest/globals": "^29.5.0", 29 | "@types/jest": "^29.4.4", 30 | "buffer": "^5.7.1", 31 | "events": "^3.3.0", 32 | "https-browserify": "^1.0.0", 33 | "jest": "^29.5.0", 34 | "os-browserify": "^0.3.0", 35 | "parcel": "latest", 36 | "path-browserify": "^1.0.1", 37 | "process": "^0.11.10", 38 | "punycode": "^1.4.1", 39 | "querystring-es3": "^0.2.1", 40 | "stream-browserify": "^3.0.0", 41 | "stream-http": "^3.2.0", 42 | "ts-jest": "^29.0.5", 43 | "ts-node": "^10.9.1", 44 | "url": "^0.11.0" 45 | } 46 | } 47 | -------------------------------------------------------------------------------- /plotting.ts: -------------------------------------------------------------------------------- 1 | import { TableCapacity, TableCapacityConfig } from './ddb-sim'; 2 | 3 | export type Trace = { 4 | x: string[] 5 | y: number[] 6 | type: string 7 | name: string 8 | } 9 | 10 | export type SimTimestepInput = { 11 | timestamp: Date, 12 | consumed: number, 13 | throttled: number, 14 | } 15 | 16 | 17 | export function getTraces(config: TableCapacityConfig, records: SimTimestepInput[]) { 18 | const capSim = new TableCapacity(config) 19 | 20 | let timeXs: string[] = [] 21 | let provisionedCapacityTraceYs: number[] = [] 22 | let consumedCapacityTraceYs: number[] = [] 23 | let throttledCapacityTraceYs: number[] = [] 24 | let burstAvailableTraceYs: number[] = [] 25 | 26 | for (let i=0; i { return sum + n }, 0) 44 | } 45 | 46 | function groupBy(arr: T[], fn: (item: T) => any) { 47 | return arr.reduce>((prev, curr) => { 48 | const groupKey = fn(curr); 49 | const group = prev[groupKey] || []; 50 | group.push(curr); 51 | return { ...prev, [groupKey]: group }; 52 | }, {}); 53 | } 54 | 55 | function hourKey(timestamp: number | Date): string { 56 | const d = dayjs(timestamp) 57 | return `${d.year()}|${d.month()}|${d.date()}|${d.hour()}` 58 | } 59 | 60 | export function calculateCost(trace: Trace, pricePerHour: number): number { 61 | // DynamoDB actually bills by sampling a random minute in the hour and bills for the hour based on the provisioned capacity at that time 62 | // For the purposes of estimating cost here, we will be conservative and use the max provisioned value for the hour to determine the cost. 63 | 64 | const xys = trace.x.map((x, i) => [x, trace.y[i]]) 65 | const byHour = groupBy(xys, (([x,y]) => `${dayjs(x).year()}|${dayjs(x).month()}|${dayjs(x).date()}|${dayjs(x).hour()}`)) 66 | const justTheYsCollectedIntoArrays = Object.values(byHour).map(xys => xys.map(xy => parseFloat(xy[1] as string))) 67 | const hourMaxes = justTheYsCollectedIntoArrays.map((ys) => Math.max(...ys)) 68 | return sum(hourMaxes) * pricePerHour / hourMaxes.length * 24 69 | } 70 | 71 | export function calculateProvisionedCostFromCloudWatchMetrics(records: SimTimestepInput[], pricePerHour: number): number { 72 | // DynamoDB actually bills by sampling a random minute in the hour and bills for the hour based on the provisioned capacity at that time 73 | // For the purposes of estimating cost here, we will be conservative and use the max provisioned value for the hour to determine the cost. 74 | 75 | const byHour = groupBy(records, (r => hourKey(r.timestamp))) 76 | const consumptionSumsInArrays = Object.values(byHour).map(recs => recs.map(r => r.consumed + r.throttled)) 77 | const hourMaxes = consumptionSumsInArrays.map((consumeds) => Math.max(...consumeds)) 78 | return sum(hourMaxes) * pricePerHour / hourMaxes.length * 24 79 | } 80 | 81 | export function calculateOnDemandCostFromCloudwatchMetrics(records: SimTimestepInput[], pricePerUnit: number) { 82 | // On-demand doesn't bill for throttles, so just sum consumed * pricePerUnit 83 | const byHour = groupBy(records, (r => hourKey(r.timestamp))) 84 | const consumptionsInArrays = Object.values(byHour).map(recs => recs.map(r => r.consumed)) 85 | const hourSums = consumptionsInArrays.map((consumeds) => sum(consumeds)) 86 | return sum(hourSums) * pricePerUnit / hourSums.length * 24 87 | } 88 | 89 | function makeObjectiveFn(scalingConfig: TableCapacityConfig, records: SimTimestepInput[], pricePerHour: number) { 90 | return function(vals: any[]){ 91 | // make new config with adjusted target based on injected value from solver... 92 | // vals comes in with a target util param as an integer, so make it a float... 93 | let [ 94 | min, 95 | max, 96 | target, 97 | ] = vals 98 | target = target / 100.0 99 | 100 | const adjustedConfig = {...scalingConfig, min, max, target} 101 | 102 | // run the sim with our data 103 | const traces = getTraces(adjustedConfig, records) 104 | 105 | // if we have any throttles, we want to return a prohibitively high number here because we don't even care about price if we throttle 106 | const throttleCount = sum(traces.throttledCapacityTrace.y.slice(5)) // ignore first 5 minutes of throttles 107 | if (throttleCount > 0) { 108 | return 99999999999 109 | } 110 | else { 111 | const numMinutes = traces.provisionedCapacityTrace.x.length 112 | const numDays = numMinutes / (60 * 24) 113 | const avgDailyCost = calculateCost(traces.provisionedCapacityTrace, pricePerHour) 114 | return avgDailyCost 115 | } 116 | } 117 | } 118 | 119 | export function optimize(scalingConfig: TableCapacityConfig, records: SimTimestepInput[], pricePerHour: number) { 120 | const costObjFn = makeObjectiveFn(scalingConfig, records, pricePerHour) 121 | 122 | const summedDemands = records.map((_, i) => { return Math.round((records[i].consumed + records[i].throttled) / 60)}) 123 | 124 | // Figure out sane values for min and max capacity config 125 | const minBottom = 1 126 | const minTop = Math.max(1, Math.max(...summedDemands)) 127 | const maxBottom = Math.max(minTop, Math.min(1, Math.max(...summedDemands) * 0.5)) 128 | const maxTop = Math.max(1, 3 * Math.max(...summedDemands)) 129 | 130 | 131 | // TODO: consider droping min/max cap config from the optimization search. We can use our own brains for this value right? 132 | 133 | const costDims = [ 134 | optimjs.Integer(minBottom, minTop), // min capacity 135 | optimjs.Integer(maxBottom, maxTop), // max capacity 136 | optimjs.Integer(20, 90), // target utilization 137 | ] 138 | const optimizationSteps = 256 139 | const dummy_result = optimjs.rs_minimize(costObjFn, costDims, optimizationSteps) 140 | const [bestMin, bestMax, bestTarget] = dummy_result.best_x 141 | const bestPrice = dummy_result.best_y 142 | 143 | return { 144 | bestMin, 145 | bestMax, 146 | bestTarget, 147 | bestPrice 148 | } 149 | } 150 | 151 | 152 | 153 | async function getPricesByRegion() { 154 | const url = `https://pricing.us-east-1.amazonaws.com/offers/v1.0/aws/AmazonDynamoDB/current/index.csv` 155 | var s = new Readable() 156 | const csv = await (await fetch(url)).text() 157 | s.push(csv) // the string you want 158 | s.push(null) 159 | 160 | let records = new Map() 161 | 162 | const parser = s 163 | .pipe(parse({ 164 | from_line: 7, // first lines are meta data and headers 165 | })); 166 | for await (let r of parser) { 167 | let termType 168 | if (!["OnDemand", "Reserved"].includes(r[3])) { 169 | throw new Error(`Can't parse table mode ${r[3]} for line ${r}`) 170 | } 171 | else { 172 | termType = r[3] == "OnDemand" ? TermType.OnDemand : TermType.Reserved 173 | } 174 | 175 | let mode 176 | if (!["Amazon DynamoDB PayPerRequest Throughput", "Provisioned IOPS"].includes(r[14])) { 177 | continue // we don't care about rows that are not about on-demand or provisioned-capacity table modes for now 178 | } 179 | else { 180 | mode = r[14] == "Provisioned IOPS" ? TableMode.ProvisionedCapacity : TableMode.OnDemand 181 | } 182 | 183 | const description = r[4] 184 | const contractLength = r[11] // empty or 1yr or 3yr 185 | const unit = r[8] // ReadCapacityUnit-Hrs or WriteCapacityUnit-Hrs or ReadRequestUnits or WriteRequestUnits 186 | const pricePerUnit = parseFloat(r[9]) 187 | const currency = r[10] // USD 188 | const region = r[23] as string 189 | const storageClass = r[21].indexOf('IA-') !== -1 ? StorageClass.InfrequentAccess : StorageClass.Standard 190 | 191 | const record = {description, termType, contractLength, unit, pricePerUnit, currency, region, mode, storageClass, row: r} 192 | let updated = records.get(region) || [] 193 | updated.push(record) 194 | records.set(region, updated) 195 | } 196 | return records 197 | } 198 | 199 | 200 | export async function getCostPerUnit(region: string, op: ReadOrWrite, mode: TableMode, storageClass: StorageClass ) { 201 | const prices = (await getPricesByRegion()).get(region) 202 | if (!prices) { 203 | throw new Error(`Can't get prices for ${region}`) 204 | } 205 | 206 | const wantUnitByOpAndMode = { 207 | [ReadOrWrite.Read]: { 208 | [TableMode.OnDemand]: 'ReadRequestUnits', 209 | [TableMode.ProvisionedCapacity]: 'ReadCapacityUnit-Hrs', 210 | }, 211 | [ReadOrWrite.Write]: { 212 | [TableMode.OnDemand]: 'WriteRequestUnits', 213 | [TableMode.ProvisionedCapacity]: 'WriteCapacityUnit-Hrs', 214 | }, 215 | } 216 | const priceRecords: PriceRecord[] = prices 217 | .filter(r => r.mode == mode) 218 | .filter(r => r.pricePerUnit !== 0) 219 | .filter(r => r.contractLength == '') 220 | .filter(r => r.unit == wantUnitByOpAndMode[op][mode]) 221 | .filter(r => r.storageClass == storageClass) 222 | 223 | return priceRecords[0].pricePerUnit 224 | } -------------------------------------------------------------------------------- /scripts/dump-stats-for-region.ts: -------------------------------------------------------------------------------- 1 | import { makeRecordsForSimulator } from "../csv-ingestion"; 2 | import { getTraces } from "../plotting"; 3 | import { ReadOrWrite, TableMode, StorageClass, getCostPerUnit, optimize, calculateOnDemandCostFromCloudwatchMetrics, calculateProvisionedCostFromCloudWatchMetrics } from "../pricing"; 4 | import { fetchTableMetrics, getAllTableDetails, TableDetails } from "../table-consumption-fetcher"; 5 | import { readFileSync, writeFileSync } from 'fs' 6 | import { bool } from "aws-sdk/clients/signer"; 7 | 8 | const args = process.argv.slice(2) 9 | console.log(args) 10 | if (args.length < 7) { 11 | console.log('Must pass: region profile roleArn start end table-details-dump.json destination-dir [startWithTableName]') 12 | process.exit(1) 13 | } 14 | const [region, profile, roleArn, startTimeStr, endTimeStr, tableDetailsDumpPath, destinationDir, startWithTableName] = args 15 | const startTime = new Date(Date.parse(startTimeStr)) 16 | const endTime = new Date(Date.parse(endTimeStr)) 17 | const allTableDetails: TableDetails[] = JSON.parse(readFileSync(tableDetailsDumpPath, 'utf8')) 18 | 19 | async function main(){ 20 | console.log("region,tableName,tableMode,readOrWrite,bestMin,bestMax,bestTarget,bestPrice,currentAvgDailyCost") 21 | 22 | var stopSkipping: bool 23 | stopSkipping = true 24 | if (startWithTableName !== undefined) { 25 | stopSkipping = false 26 | } 27 | 28 | // For now, only process the on-demand tables 29 | for (let tableDetails of allTableDetails.filter(t => t.mode == TableMode.OnDemand)) { 30 | const tableName = tableDetails.name 31 | if (stopSkipping == false && tableName != startWithTableName) { 32 | process.stderr.write(`Skipping table: ${tableDetails.name}\n`) 33 | continue 34 | } 35 | stopSkipping = true 36 | process.stderr.write(`Processing table: ${tableDetails.name}\n`) 37 | 38 | const stats = await fetchTableMetrics({region, profile, roleArn, tableName, startTime, endTime}) 39 | const filename = [region, profile, tableName].join('_') + '.json' 40 | writeFileSync(`${destinationDir}/${filename}`, JSON.stringify(stats)) 41 | process.stderr.write(`Done with table: ${tableDetails.name}\n`) 42 | } 43 | process.exit(0) 44 | } 45 | 46 | main() -------------------------------------------------------------------------------- /scripts/dump-table-details-for-region.ts: -------------------------------------------------------------------------------- 1 | import { getAllTableDetails } from "../table-consumption-fetcher"; 2 | 3 | const args = process.argv.slice(2) 4 | if (args.length !== 3) { 5 | console.log('Must pass: region profile roleArn') 6 | process.exit(1) 7 | } 8 | const [region, profile, roleArn] = args 9 | 10 | async function main(){ 11 | process.stderr.write(`Fetching all table details for ${region} via role ${roleArn}\n`) 12 | const allTableDetails = await getAllTableDetails({region, profile, roleArn}) 13 | 14 | console.log(JSON.stringify(allTableDetails)) 15 | } 16 | 17 | main() -------------------------------------------------------------------------------- /scripts/make-csv-for-table.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # set -x 3 | # set -e 4 | 5 | # This script prints out a CSV file for the given profile/region/table 6 | # Useful for passing into the simulator to get an optimized config and cost estimate 7 | 8 | # Usage: make-csv-for-table.sh PROFILE_NAME REGION TABLE START END 9 | # PROFILE_NAME: string matching a named section in your ~/.aws/credentials file 10 | # REGION: AWS region like us-east-1 11 | # TABLE: DDB table like my_ddb_table_name 12 | # START: ISO datetime string like 2001-02-03:11:22:33.000Z 13 | # END: ISO datetime string like 2001-02-03:11:22:33.000Z 14 | 15 | # check if args exist 16 | if [ $# -ne 5 ]; then 17 | echo "Usage: $0 " 18 | exit 1 19 | fi 20 | 21 | # assign args 22 | profile=$1 23 | region=$2 24 | table_name=$3 25 | from=$4 26 | to=$5 27 | 28 | # setup working dir 29 | cd "$(dirname "$0")" 30 | pushd .. 31 | 32 | cat << EOF | npx ts-node 33 | import { fetchTableMetrics } from './table-consumption-fetcher' 34 | 35 | (async()=>{ 36 | 37 | const startTime = new Date(Date.parse('$from')) 38 | const endTime = new Date(Date.parse('$to')) 39 | const stats = await fetchTableMetrics({ 40 | profile: '$profile', 41 | region: '$region', 42 | tableName: '$table_name', 43 | startTime, 44 | endTime, 45 | }) 46 | 47 | })() 48 | EOF -------------------------------------------------------------------------------- /scripts/make-optimize-report-for-region.ts: -------------------------------------------------------------------------------- 1 | import { makeRecordsForSimulator } from "../csv-ingestion"; 2 | import { getTraces } from "../plotting"; 3 | import { ReadOrWrite, TableMode, StorageClass, getCostPerUnit, optimize, calculateOnDemandCostFromCloudwatchMetrics, calculateProvisionedCostFromCloudWatchMetrics } from "../pricing"; 4 | import { fetchTableMetrics, getAllTableDetails, TableDetails } from "../table-consumption-fetcher"; 5 | import { readFileSync } from 'fs' 6 | 7 | const args = process.argv.slice(2) 8 | console.log(args) 9 | if (args.length < 6) { 10 | console.log('Must pass: region profile roleArn start end table-details-dump.json [table-stats-dir]') 11 | process.exit(1) 12 | } 13 | const [region, profile, roleArn, startTimeStr, endTimeStr, tableDetailsDumpPath, tableStatsDirPath, startWithTableName] = args 14 | const startTime = new Date(Date.parse(startTimeStr)) 15 | const endTime = new Date(Date.parse(endTimeStr)) 16 | const allTableDetails: TableDetails[] = JSON.parse(readFileSync(tableDetailsDumpPath, 'utf8')) 17 | 18 | async function main(){ 19 | var stopSkipping: boolean 20 | stopSkipping = true 21 | if (startWithTableName !== undefined) { 22 | stopSkipping = false 23 | } 24 | 25 | console.log("region,tableName,tableMode,readOrWrite,bestMin,bestMax,bestTarget,bestPrice,currentAvgDailyCost") 26 | // For now, only process the on-demand tables 27 | for (let tableDetails of allTableDetails.filter(t => t.mode == TableMode.OnDemand)) { 28 | if (stopSkipping == false && tableDetails.name != startWithTableName) { 29 | process.stderr.write(`Skipping table: ${tableDetails.name}\n`) 30 | continue 31 | } 32 | 33 | stopSkipping = true 34 | 35 | process.stderr.write(`Processing table: ${tableDetails.name}\n`) 36 | const tableName = tableDetails.name 37 | 38 | let stats: any[] 39 | if (tableStatsDirPath == "") { 40 | process.stderr.write(`Fetching table metrics for: ${tableName}\n`) 41 | stats = await fetchTableMetrics({region, profile, roleArn, tableName, startTime, endTime}) 42 | } 43 | else { 44 | const filename = [region, profile, tableName].join('_') + '.json' 45 | stats = JSON.parse(readFileSync(`${tableStatsDirPath}/${filename}`, 'utf-8')) 46 | stats.forEach(s => s.timestamp = typeof s.timestamp == "string" ? new Date(Date.parse(s.timestamp)) : s.timestamp ) 47 | } 48 | 49 | const readUnitCost = await getCostPerUnit(region, ReadOrWrite.Read, tableDetails.mode, tableDetails.storageClass) 50 | const writeUnitCost = await getCostPerUnit(region, ReadOrWrite.Read, tableDetails.mode, tableDetails.storageClass) 51 | 52 | const readUnitCostProvisioned = await getCostPerUnit(region, ReadOrWrite.Read, TableMode.ProvisionedCapacity, tableDetails.storageClass) 53 | const writeUnitCostProvisioned = await getCostPerUnit(region, ReadOrWrite.Read, TableMode.ProvisionedCapacity, tableDetails.storageClass) 54 | 55 | const { readRecords, writeRecords } = makeRecordsForSimulator(stats) 56 | 57 | // TODO: refactor --Only the scaling delay matters below (other values are overwritten in the optimizer.) 58 | const config = {min: 0, max: 0, target: 0.5, scaling_delay_in_seconds: 2*60} 59 | 60 | // If table is in OnDemand, try to project its avg daily cost 61 | let readCost: number 62 | let writeCost: number 63 | if (tableDetails.mode == TableMode.OnDemand) { 64 | readCost = calculateOnDemandCostFromCloudwatchMetrics(readRecords, readUnitCost) 65 | writeCost = calculateOnDemandCostFromCloudwatchMetrics(writeRecords, writeUnitCost) 66 | } else { 67 | readCost = calculateProvisionedCostFromCloudWatchMetrics(readRecords, readUnitCost) 68 | writeCost = calculateProvisionedCostFromCloudWatchMetrics(writeRecords, writeUnitCost) 69 | } 70 | 71 | const writeLine = (mode: string, readWrite: string, o: any, currentAvgDailyCost: number) => console.log([region, tableName, mode, readWrite, o.bestMin, o.bestMax, o.bestTarget, o.bestPrice, currentAvgDailyCost].join(',')) 72 | 73 | let o 74 | o = optimize(config, readRecords, readUnitCostProvisioned) 75 | writeLine(tableDetails.mode, 'read', o, readCost) 76 | 77 | o = optimize(config, writeRecords, writeUnitCostProvisioned) 78 | writeLine(tableDetails.mode, 'write', o, writeCost) 79 | process.stderr.write(`Done with table: ${tableDetails.name}\n`) 80 | } 81 | process.exit(0) 82 | } 83 | 84 | main() -------------------------------------------------------------------------------- /scripts/make-updated-optimized-cost-report-for-region.ts: -------------------------------------------------------------------------------- 1 | import { makeRecordsForSimulator } from "../csv-ingestion"; 2 | import { getTraces } from "../plotting"; 3 | import { ReadOrWrite, TableMode, StorageClass, getCostPerUnit, optimize, calculateOnDemandCostFromCloudwatchMetrics, calculateProvisionedCostFromCloudWatchMetrics, calculateCost } from "../pricing"; 4 | import { fetchTableMetrics, getAllTableDetails, TableDetails } from "../table-consumption-fetcher"; 5 | import { readFileSync } from 'fs' 6 | import { TableCapacityConfig } from "../ddb-sim"; 7 | 8 | const args = process.argv.slice(2) 9 | console.log(args) 10 | if (args.length < 3) { 11 | console.log('Must pass: region table-stats-dir optimized-report.json') 12 | process.exit(1) 13 | } 14 | const [region, tableStatsDirPath, optimizedReportPath] = args 15 | const optimizedReport: any = JSON.parse(readFileSync(optimizedReportPath, 'utf8')) 16 | 17 | async function main(){ 18 | console.log("region,tableName,tableMode,readOrWrite,bestMin,bestMax,bestTarget,bestPrice,currentAvgDailyCost") 19 | 20 | for (let row of optimizedReport) { 21 | const filename = [region, 'pd-production', row.tableName].join('_') + '.json' 22 | let stats: any[] = JSON.parse(readFileSync(`${tableStatsDirPath}/${filename}`, 'utf-8')) 23 | stats.forEach(s => s.timestamp = typeof s.timestamp == "string" ? new Date(Date.parse(s.timestamp)) : s.timestamp ) 24 | 25 | const readUnitCostProvisioned = await getCostPerUnit(region, ReadOrWrite.Read, TableMode.ProvisionedCapacity, StorageClass.Standard) 26 | const writeUnitCostProvisioned = await getCostPerUnit(region, ReadOrWrite.Write, TableMode.ProvisionedCapacity, StorageClass.Standard) 27 | 28 | const { readRecords, writeRecords } = makeRecordsForSimulator(stats) 29 | 30 | const config: TableCapacityConfig = {min: parseInt(row.bestMin, 10), max: parseInt(row.bestMax, 10), scaling_delay_in_seconds: 120, target: parseInt(row.bestTarget, 10) / 100} 31 | 32 | const readTraces = getTraces(config, readRecords) 33 | const writeTraces = getTraces(config, writeRecords) 34 | 35 | let trace 36 | if (row.readOrWrite == ReadOrWrite.Read) { 37 | trace = readTraces.provisionedCapacityTrace 38 | console.log([row.region, row.tableName, row.tableMode, row.readOrWrite, row.bestMin, row.bestMax, row.bestTarget, calculateCost(trace, readUnitCostProvisioned), row.currentAvgDailyCost].join(",")) 39 | } else { 40 | trace = writeTraces.provisionedCapacityTrace 41 | console.log([row.region, row.tableName, row.tableMode, row.readOrWrite, row.bestMin, row.bestMax, row.bestTarget, calculateCost(trace, writeUnitCostProvisioned), row.currentAvgDailyCost].join(",")) 42 | } 43 | 44 | } 45 | 46 | 47 | // process.exit(0) 48 | } 49 | 50 | main() -------------------------------------------------------------------------------- /scripts/print-table-names.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # set -x 3 | # set -e 4 | 5 | # This script prints out a list of every DDB table name in a region. 6 | # Useful for the first step in a data pipeline for running scaling config optimization for every table... 7 | 8 | # Usage: print-table-names.sh REGION PROFILE_NAME 9 | # REGION: AWS region like us-east-1 10 | # PROFILE_NAME: string matching a named section in your ~/.aws/credentials file 11 | 12 | # check if region and profile name were provided 13 | if [ $# -ne 2 ]; then 14 | echo "Usage: $0 " 15 | exit 1 16 | fi 17 | 18 | # get start and end times 19 | region=$1 20 | profile=$2 21 | 22 | cd "$(dirname "$0")" 23 | pushd .. 24 | 25 | cat << EOF | npx ts-node 26 | import { getAllTableDetails } from './table-consumption-fetcher' 27 | 28 | (async()=>{ 29 | const details = await getAllTableDetails({region: '$region', profile: '$profile'}) 30 | for (let d of details) { 31 | console.log(d) 32 | } 33 | })() 34 | EOF 35 | 36 | popd -------------------------------------------------------------------------------- /skeleton.css: -------------------------------------------------------------------------------- 1 | /* 2 | * Skeleton V2.0.4 3 | * Copyright 2014, Dave Gamache 4 | * www.getskeleton.com 5 | * Free to use under the MIT license. 6 | * http://www.opensource.org/licenses/mit-license.php 7 | * 12/29/2014 8 | */ 9 | 10 | 11 | /* Table of contents 12 | –––––––––––––––––––––––––––––––––––––––––––––––––– 13 | - Grid 14 | - Base Styles 15 | - Typography 16 | - Links 17 | - Buttons 18 | - Forms 19 | - Lists 20 | - Code 21 | - Tables 22 | - Spacing 23 | - Utilities 24 | - Clearing 25 | - Media Queries 26 | */ 27 | 28 | 29 | /* Grid 30 | –––––––––––––––––––––––––––––––––––––––––––––––––– */ 31 | .container { 32 | position: relative; 33 | width: 100%; 34 | max-width: 960px; 35 | margin: 0 auto; 36 | padding: 0 20px; 37 | box-sizing: border-box; } 38 | .column, 39 | .columns { 40 | width: 100%; 41 | float: left; 42 | box-sizing: border-box; } 43 | 44 | /* For devices larger than 400px */ 45 | @media (min-width: 400px) { 46 | .container { 47 | width: 85%; 48 | padding: 0; } 49 | } 50 | 51 | /* For devices larger than 550px */ 52 | @media (min-width: 550px) { 53 | .container { 54 | width: 80%; } 55 | .column, 56 | .columns { 57 | margin-left: 4%; } 58 | .column:first-child, 59 | .columns:first-child { 60 | margin-left: 0; } 61 | 62 | .one.column, 63 | .one.columns { width: 4.66666666667%; } 64 | .two.columns { width: 13.3333333333%; } 65 | .three.columns { width: 22%; } 66 | .four.columns { width: 30.6666666667%; } 67 | .five.columns { width: 39.3333333333%; } 68 | .six.columns { width: 48%; } 69 | .seven.columns { width: 56.6666666667%; } 70 | .eight.columns { width: 65.3333333333%; } 71 | .nine.columns { width: 74.0%; } 72 | .ten.columns { width: 82.6666666667%; } 73 | .eleven.columns { width: 91.3333333333%; } 74 | .twelve.columns { width: 100%; margin-left: 0; } 75 | 76 | .one-third.column { width: 30.6666666667%; } 77 | .two-thirds.column { width: 65.3333333333%; } 78 | 79 | .one-half.column { width: 48%; } 80 | 81 | /* Offsets */ 82 | .offset-by-one.column, 83 | .offset-by-one.columns { margin-left: 8.66666666667%; } 84 | .offset-by-two.column, 85 | .offset-by-two.columns { margin-left: 17.3333333333%; } 86 | .offset-by-three.column, 87 | .offset-by-three.columns { margin-left: 26%; } 88 | .offset-by-four.column, 89 | .offset-by-four.columns { margin-left: 34.6666666667%; } 90 | .offset-by-five.column, 91 | .offset-by-five.columns { margin-left: 43.3333333333%; } 92 | .offset-by-six.column, 93 | .offset-by-six.columns { margin-left: 52%; } 94 | .offset-by-seven.column, 95 | .offset-by-seven.columns { margin-left: 60.6666666667%; } 96 | .offset-by-eight.column, 97 | .offset-by-eight.columns { margin-left: 69.3333333333%; } 98 | .offset-by-nine.column, 99 | .offset-by-nine.columns { margin-left: 78.0%; } 100 | .offset-by-ten.column, 101 | .offset-by-ten.columns { margin-left: 86.6666666667%; } 102 | .offset-by-eleven.column, 103 | .offset-by-eleven.columns { margin-left: 95.3333333333%; } 104 | 105 | .offset-by-one-third.column, 106 | .offset-by-one-third.columns { margin-left: 34.6666666667%; } 107 | .offset-by-two-thirds.column, 108 | .offset-by-two-thirds.columns { margin-left: 69.3333333333%; } 109 | 110 | .offset-by-one-half.column, 111 | .offset-by-one-half.columns { margin-left: 52%; } 112 | 113 | } 114 | 115 | 116 | /* Base Styles 117 | –––––––––––––––––––––––––––––––––––––––––––––––––– */ 118 | /* NOTE 119 | html is set to 62.5% so that all the REM measurements throughout Skeleton 120 | are based on 10px sizing. So basically 1.5rem = 15px :) */ 121 | html { 122 | font-size: 62.5%; } 123 | body { 124 | font-size: 1.5em; /* currently ems cause chrome bug misinterpreting rems on body element */ 125 | line-height: 1.6; 126 | font-weight: 400; 127 | font-family: "Raleway", "HelveticaNeue", "Helvetica Neue", Helvetica, Arial, sans-serif; 128 | color: #222; } 129 | 130 | 131 | /* Typography 132 | –––––––––––––––––––––––––––––––––––––––––––––––––– */ 133 | h1, h2, h3, h4, h5, h6 { 134 | margin-top: 0; 135 | margin-bottom: 2rem; 136 | font-weight: 300; } 137 | h1 { font-size: 4.0rem; line-height: 1.2; letter-spacing: -.1rem;} 138 | h2 { font-size: 3.6rem; line-height: 1.25; letter-spacing: -.1rem; } 139 | h3 { font-size: 3.0rem; line-height: 1.3; letter-spacing: -.1rem; } 140 | h4 { font-size: 2.4rem; line-height: 1.35; letter-spacing: -.08rem; } 141 | h5 { font-size: 1.8rem; line-height: 1.5; letter-spacing: -.05rem; } 142 | h6 { font-size: 1.5rem; line-height: 1.6; letter-spacing: 0; } 143 | 144 | /* Larger than phablet */ 145 | @media (min-width: 550px) { 146 | h1 { font-size: 5.0rem; } 147 | h2 { font-size: 4.2rem; } 148 | h3 { font-size: 3.6rem; } 149 | h4 { font-size: 3.0rem; } 150 | h5 { font-size: 2.4rem; } 151 | h6 { font-size: 1.5rem; } 152 | } 153 | 154 | p { 155 | margin-top: 0; } 156 | 157 | 158 | /* Links 159 | –––––––––––––––––––––––––––––––––––––––––––––––––– */ 160 | a { 161 | color: #1EAEDB; } 162 | a:hover { 163 | color: #0FA0CE; } 164 | 165 | 166 | /* Buttons 167 | –––––––––––––––––––––––––––––––––––––––––––––––––– */ 168 | .button, 169 | button, 170 | input[type="submit"], 171 | input[type="reset"], 172 | input[type="button"] { 173 | display: inline-block; 174 | height: 38px; 175 | padding: 0 30px; 176 | color: #555; 177 | text-align: center; 178 | font-size: 11px; 179 | font-weight: 600; 180 | line-height: 38px; 181 | letter-spacing: .1rem; 182 | text-transform: uppercase; 183 | text-decoration: none; 184 | white-space: nowrap; 185 | background-color: transparent; 186 | border-radius: 4px; 187 | border: 1px solid #bbb; 188 | cursor: pointer; 189 | box-sizing: border-box; } 190 | .button:hover, 191 | button:hover, 192 | input[type="submit"]:hover, 193 | input[type="reset"]:hover, 194 | input[type="button"]:hover, 195 | .button:focus, 196 | button:focus, 197 | input[type="submit"]:focus, 198 | input[type="reset"]:focus, 199 | input[type="button"]:focus { 200 | color: #333; 201 | border-color: #888; 202 | outline: 0; } 203 | .button.button-primary, 204 | button.button-primary, 205 | input[type="submit"].button-primary, 206 | input[type="reset"].button-primary, 207 | input[type="button"].button-primary { 208 | color: #FFF; 209 | background-color: #33C3F0; 210 | border-color: #33C3F0; } 211 | .button.button-primary:hover, 212 | button.button-primary:hover, 213 | input[type="submit"].button-primary:hover, 214 | input[type="reset"].button-primary:hover, 215 | input[type="button"].button-primary:hover, 216 | .button.button-primary:focus, 217 | button.button-primary:focus, 218 | input[type="submit"].button-primary:focus, 219 | input[type="reset"].button-primary:focus, 220 | input[type="button"].button-primary:focus { 221 | color: #FFF; 222 | background-color: #1EAEDB; 223 | border-color: #1EAEDB; } 224 | 225 | 226 | /* Forms 227 | –––––––––––––––––––––––––––––––––––––––––––––––––– */ 228 | input[type="email"], 229 | input[type="number"], 230 | input[type="search"], 231 | input[type="text"], 232 | input[type="tel"], 233 | input[type="url"], 234 | input[type="password"], 235 | textarea, 236 | select { 237 | height: 38px; 238 | padding: 6px 10px; /* The 6px vertically centers text on FF, ignored by Webkit */ 239 | background-color: #fff; 240 | border: 1px solid #D1D1D1; 241 | border-radius: 4px; 242 | box-shadow: none; 243 | box-sizing: border-box; } 244 | /* Removes awkward default styles on some inputs for iOS */ 245 | input[type="email"], 246 | input[type="number"], 247 | input[type="search"], 248 | input[type="text"], 249 | input[type="tel"], 250 | input[type="url"], 251 | input[type="password"], 252 | textarea { 253 | -webkit-appearance: none; 254 | -moz-appearance: none; 255 | appearance: none; } 256 | textarea { 257 | min-height: 65px; 258 | padding-top: 6px; 259 | padding-bottom: 6px; } 260 | input[type="email"]:focus, 261 | input[type="number"]:focus, 262 | input[type="search"]:focus, 263 | input[type="text"]:focus, 264 | input[type="tel"]:focus, 265 | input[type="url"]:focus, 266 | input[type="password"]:focus, 267 | textarea:focus, 268 | select:focus { 269 | border: 1px solid #33C3F0; 270 | outline: 0; } 271 | label, 272 | legend { 273 | display: block; 274 | margin-bottom: .5rem; 275 | font-weight: 600; } 276 | fieldset { 277 | padding: 0; 278 | border-width: 0; } 279 | input[type="checkbox"], 280 | input[type="radio"] { 281 | display: inline; } 282 | label > .label-body { 283 | display: inline-block; 284 | margin-left: .5rem; 285 | font-weight: normal; } 286 | 287 | 288 | /* Lists 289 | –––––––––––––––––––––––––––––––––––––––––––––––––– */ 290 | ul { 291 | list-style: circle inside; } 292 | ol { 293 | list-style: decimal inside; } 294 | ol, ul { 295 | padding-left: 0; 296 | margin-top: 0; } 297 | ul ul, 298 | ul ol, 299 | ol ol, 300 | ol ul { 301 | margin: 1.5rem 0 1.5rem 3rem; 302 | font-size: 90%; } 303 | li { 304 | margin-bottom: 1rem; } 305 | 306 | 307 | /* Code 308 | –––––––––––––––––––––––––––––––––––––––––––––––––– */ 309 | code { 310 | padding: .2rem .5rem; 311 | margin: 0 .2rem; 312 | font-size: 90%; 313 | white-space: nowrap; 314 | background: #F1F1F1; 315 | border: 1px solid #E1E1E1; 316 | border-radius: 4px; } 317 | pre > code { 318 | display: block; 319 | padding: 1rem 1.5rem; 320 | white-space: pre; } 321 | 322 | 323 | /* Tables 324 | –––––––––––––––––––––––––––––––––––––––––––––––––– */ 325 | th, 326 | td { 327 | padding: 12px 15px; 328 | text-align: left; 329 | border-bottom: 1px solid #E1E1E1; } 330 | th:first-child, 331 | td:first-child { 332 | padding-left: 0; } 333 | th:last-child, 334 | td:last-child { 335 | padding-right: 0; } 336 | 337 | 338 | /* Spacing 339 | –––––––––––––––––––––––––––––––––––––––––––––––––– */ 340 | button, 341 | .button { 342 | margin-bottom: 1rem; } 343 | input, 344 | textarea, 345 | select, 346 | fieldset { 347 | margin-bottom: 1.5rem; } 348 | pre, 349 | blockquote, 350 | dl, 351 | figure, 352 | table, 353 | p, 354 | ul, 355 | ol, 356 | form { 357 | margin-bottom: 2.5rem; } 358 | 359 | 360 | /* Utilities 361 | –––––––––––––––––––––––––––––––––––––––––––––––––– */ 362 | .u-full-width { 363 | width: 100%; 364 | box-sizing: border-box; } 365 | .u-max-full-width { 366 | max-width: 100%; 367 | box-sizing: border-box; } 368 | .u-pull-right { 369 | float: right; } 370 | .u-pull-left { 371 | float: left; } 372 | 373 | 374 | /* Misc 375 | –––––––––––––––––––––––––––––––––––––––––––––––––– */ 376 | hr { 377 | margin-top: 3rem; 378 | margin-bottom: 3.5rem; 379 | border-width: 0; 380 | border-top: 1px solid #E1E1E1; } 381 | 382 | 383 | /* Clearing 384 | –––––––––––––––––––––––––––––––––––––––––––––––––– */ 385 | 386 | /* Self Clearing Goodness */ 387 | .container:after, 388 | .row:after, 389 | .u-cf { 390 | content: ""; 391 | display: table; 392 | clear: both; } 393 | 394 | 395 | /* Media Queries 396 | –––––––––––––––––––––––––––––––––––––––––––––––––– */ 397 | /* 398 | Note: The best way to structure the use of media queries is to create the queries 399 | near the relevant code. For example, if you wanted to change the styles for buttons 400 | on small devices, paste the mobile query code up in the buttons section and style it 401 | there. 402 | */ 403 | 404 | 405 | /* Larger than mobile */ 406 | @media (min-width: 400px) {} 407 | 408 | /* Larger than phablet (also point when grid becomes active) */ 409 | @media (min-width: 550px) {} 410 | 411 | /* Larger than tablet */ 412 | @media (min-width: 750px) {} 413 | 414 | /* Larger than desktop */ 415 | @media (min-width: 1000px) {} 416 | 417 | /* Larger than Desktop HD */ 418 | @media (min-width: 1200px) {} 419 | -------------------------------------------------------------------------------- /table-consumption-fetcher.ts: -------------------------------------------------------------------------------- 1 | import { getCredentialsFromAssumingRole } from "./aws-credentials"; 2 | import { fromIni, fromTemporaryCredentials } from "@aws-sdk/credential-providers"; 3 | import { CloudWatchClient, GetMetricDataCommand } from "@aws-sdk/client-cloudwatch"; 4 | import { DynamoDBClient, ListTablesCommand, DescribeTableCommand, ExportConflictException } from "@aws-sdk/client-dynamodb"; 5 | import { ApplicationAutoScalingClient, DescribeScalableTargetsCommand, DescribeScalingPoliciesCommand, ScalingPolicy } from "@aws-sdk/client-application-auto-scaling"; 6 | import { TableMode, StorageClass } from "./pricing"; 7 | 8 | type FetchTableMetricsParams = { 9 | region: string 10 | profile: string 11 | roleArn: string 12 | tableName: string 13 | startTime: Date 14 | endTime: Date 15 | } 16 | 17 | export type TableDetails = { 18 | region: string 19 | name: string 20 | mode: TableMode 21 | storageClass: StorageClass 22 | provisionedRCUs: number 23 | provisionedWCUs: number 24 | scalingPolicies: { read: ScalingPolicy | undefined, write: ScalingPolicy | undefined } 25 | } 26 | 27 | 28 | async function getTableDetails(ddbClient: DynamoDBClient, scalingClient: ApplicationAutoScalingClient , name: string): Promise { 29 | let retryBackoff = 100 30 | 31 | let detailsResponse 32 | while (detailsResponse == undefined) { 33 | try { 34 | detailsResponse = await ddbClient.send(new DescribeTableCommand({ TableName: name })) 35 | } 36 | catch (error: any) { 37 | if (error.__type == 'ThrottlingException') { 38 | process.stderr.write(`Throttled. Sleeping ${retryBackoff} \n`) 39 | await sleep(retryBackoff) 40 | retryBackoff *= 2 41 | } 42 | else { 43 | throw error 44 | } 45 | } 46 | } 47 | retryBackoff = 100 48 | 49 | // console.log(detailsResponse) 50 | 51 | // Get scaling info for the table, if it exists 52 | // 1. Get the ScalingPolicies for this table (for target utilization values) 53 | let scalingResponse 54 | while (scalingResponse == undefined) { 55 | try { 56 | scalingResponse = await scalingClient.send(new DescribeScalingPoliciesCommand({ServiceNamespace: 'dynamodb', ResourceId: `table/${name}`})) 57 | } 58 | catch (error: any) { 59 | if (error.__type == 'ThrottlingException') { 60 | process.stderr.write(`Throttled. Sleeping ${retryBackoff} \n`) 61 | await sleep(retryBackoff) 62 | retryBackoff *= 2 63 | } 64 | else { 65 | throw error 66 | } 67 | } 68 | } 69 | retryBackoff = 100 70 | 71 | 72 | let scalingPolicies: {read: ScalingPolicy | undefined, write: ScalingPolicy | undefined} = { 73 | read: undefined, 74 | write: undefined 75 | } 76 | if (scalingResponse.ScalingPolicies && scalingResponse.ScalingPolicies.length > 0) { 77 | scalingPolicies.read = scalingResponse.ScalingPolicies.find(p => p.ScalableDimension == "dynamodb:table:ReadCapacityUnits") 78 | scalingPolicies.write = scalingResponse.ScalingPolicies.find(p => p.ScalableDimension == "dynamodb:table:WriteCapacityUnits") 79 | } 80 | // 2. Get the ScalableTargets for this table (for min/max values) 81 | // TODO...^ do we need this? 82 | 83 | let mode 84 | if (detailsResponse.Table?.BillingModeSummary === undefined) { 85 | // BillingModeSummary is empty if the table is really old, so it must be ProvisionedCapacity 86 | mode = TableMode.ProvisionedCapacity 87 | } else if (!['PROVISIONED', 'PAY_PER_REQUEST'].includes(detailsResponse.Table?.BillingModeSummary?.BillingMode as string)) { 88 | throw new Error(`Can't parse table ${name} billing mode: ${detailsResponse.Table?.BillingModeSummary?.BillingMode}`) 89 | } 90 | mode = detailsResponse.Table?.BillingModeSummary?.BillingMode == 'PROVISIONED' ? TableMode.ProvisionedCapacity : TableMode.OnDemand 91 | 92 | let storageClass 93 | if (detailsResponse.Table?.TableClassSummary == undefined) { 94 | // TableClassSummary is empty if the table is really old, so it must be Standard 95 | storageClass = StorageClass.Standard 96 | } 97 | else if (!['STANDARD', 'STANDARD_INFREQUENT_ACCESS'].includes(detailsResponse.Table?.TableClassSummary?.TableClass as string)) { 98 | throw new Error(`Can't parse table ${name} table class: ${detailsResponse.Table?.TableClassSummary?.TableClass}`) 99 | } 100 | storageClass = detailsResponse.Table?.TableClassSummary?.TableClass == 'STANDARD' ? StorageClass.Standard : StorageClass.InfrequentAccess 101 | 102 | const provisionedRCUs = detailsResponse.Table?.ProvisionedThroughput?.ReadCapacityUnits as number 103 | const provisionedWCUs = detailsResponse.Table?.ProvisionedThroughput?.WriteCapacityUnits as number 104 | const region = ddbClient.config.region as string 105 | 106 | const details = { region, name, mode, storageClass, provisionedRCUs, provisionedWCUs, scalingPolicies } 107 | return details 108 | } 109 | 110 | function sleep(time: number) { 111 | return new Promise(resolve => setTimeout(resolve, time)); 112 | } 113 | 114 | export async function getAllTableDetails({ region, profile, roleArn }: { region: string, profile: string, roleArn: string }): Promise { 115 | const credentials = await getCredentialsFromAssumingRole(region, profile, roleArn) 116 | if (credentials === undefined) { 117 | throw new Error("Couldn't get credentials") 118 | } 119 | 120 | const ddbClient = new DynamoDBClient({ 121 | region, 122 | credentials, 123 | }) 124 | 125 | const scalingClient = new ApplicationAutoScalingClient({ 126 | region, 127 | credentials, 128 | }) 129 | 130 | let allDetails: Promise[] = [] 131 | 132 | let lastEvaluatedTableName 133 | const DEBUG_MAX_RECORDS = -1 134 | let record_count = 0 135 | do { 136 | process.stderr.write(`Fetching batch of table details from DynamoDB. Start Table Name: ${lastEvaluatedTableName}\n`) 137 | const listResponse = await ddbClient.send(new ListTablesCommand({ ExclusiveStartTableName: lastEvaluatedTableName })) 138 | lastEvaluatedTableName = listResponse.LastEvaluatedTableName as any 139 | if (listResponse.TableNames) { 140 | for (let name of listResponse.TableNames) { 141 | if (DEBUG_MAX_RECORDS > 0 && record_count > DEBUG_MAX_RECORDS) { continue } 142 | process.stderr.write(`Fetching table details from DynamoDB. ${name}\n`) 143 | allDetails.push(getTableDetails(ddbClient, scalingClient, name)) 144 | record_count += 1 145 | await sleep(250) 146 | } 147 | } 148 | // await sleep(1000) 149 | } while (lastEvaluatedTableName !== undefined) 150 | 151 | return Promise.all(allDetails) 152 | } 153 | 154 | 155 | export async function fetchTableMetrics(params: FetchTableMetricsParams): Promise<{ timestamp: Date, consumedRead: number, consumedWrite: number, throttledReads: number, throttledWrites: number }[]> { 156 | const credentials = await getCredentialsFromAssumingRole(params.region, params.profile, params.roleArn) 157 | if (credentials === undefined) { 158 | throw new Error("Couldn't get credentials") 159 | } 160 | 161 | const cloudwatch = new CloudWatchClient({ 162 | region: params.region, 163 | credentials, 164 | }) 165 | 166 | const namespace = "AWS/DynamoDB"; 167 | const period = 60; // seconds 168 | 169 | const consumedReads = { 170 | Id: "consumedRead", 171 | MetricStat: { 172 | Metric: { 173 | Dimensions: [ 174 | { 175 | Name: "TableName", 176 | Value: params.tableName 177 | } 178 | ], 179 | MetricName: "ConsumedReadCapacityUnits", 180 | Namespace: namespace 181 | }, 182 | Period: period, 183 | Stat: "Sum" 184 | }, 185 | ReturnData: true 186 | } 187 | 188 | const consumedWrites = { 189 | Id: "consumedWrite", 190 | MetricStat: { 191 | Metric: { 192 | Dimensions: [ 193 | { 194 | Name: "TableName", 195 | Value: params.tableName 196 | } 197 | ], 198 | MetricName: "ConsumedWriteCapacityUnits", 199 | Namespace: namespace 200 | }, 201 | Period: period, 202 | Stat: "Sum" 203 | }, 204 | ReturnData: true 205 | } 206 | 207 | const throttledReads = { 208 | Id: "throttledReads", 209 | MetricStat: { 210 | Metric: { 211 | Dimensions: [ 212 | { 213 | Name: "TableName", 214 | Value: params.tableName 215 | } 216 | ], 217 | MetricName: "ReadThrottleEvents", 218 | Namespace: namespace 219 | }, 220 | Period: period, 221 | Stat: "Sum" 222 | }, 223 | ReturnData: true 224 | } 225 | 226 | const throttledWrites = { 227 | Id: "throttledWrites", 228 | MetricStat: { 229 | Metric: { 230 | Dimensions: [ 231 | { 232 | Name: "TableName", 233 | Value: params.tableName 234 | } 235 | ], 236 | MetricName: "WriteThrottleEvents", 237 | Namespace: namespace 238 | }, 239 | Period: period, 240 | Stat: "Sum" 241 | }, 242 | ReturnData: true 243 | } 244 | 245 | const provisionedReads = { 246 | Id: "provisionedReads", 247 | MetricStat: { 248 | Metric: { 249 | Dimensions: [ 250 | { 251 | Name: "TableName", 252 | Value: params.tableName 253 | } 254 | ], 255 | MetricName: "ProvisionedReadCapacityUnits", 256 | Namespace: namespace 257 | }, 258 | Period: period, 259 | Stat: "Average" 260 | }, 261 | ReturnData: true 262 | } 263 | 264 | const provisionedWrites = { 265 | Id: "provisionedWrites", 266 | MetricStat: { 267 | Metric: { 268 | Dimensions: [ 269 | { 270 | Name: "TableName", 271 | Value: params.tableName 272 | } 273 | ], 274 | MetricName: "ProvisionedWriteCapacityUnits", 275 | Namespace: namespace 276 | }, 277 | Period: period, 278 | Stat: "Average" 279 | }, 280 | ReturnData: true 281 | } 282 | 283 | const queries = [ 284 | consumedReads, 285 | consumedWrites, 286 | throttledReads, 287 | throttledWrites, 288 | provisionedReads, 289 | provisionedWrites, 290 | ] 291 | 292 | // Our data gets pushed into here for each pagination call 293 | const data: { timestamp: Date, consumedRead: number, consumedWrite: number, throttledReads: number, throttledWrites: number, provisionedReads: number, provisionedWrites: number }[] = []; 294 | 295 | let nextToken = undefined 296 | do { 297 | const response = await cloudwatch.send(new GetMetricDataCommand({ 298 | MetricDataQueries: queries, 299 | StartTime: params.startTime, 300 | EndTime: params.endTime, 301 | NextToken: nextToken, 302 | ScanBy: "TimestampAscending" 303 | })) 304 | nextToken = response.NextToken as any 305 | 306 | // console.log(response) 307 | 308 | if (!response.MetricDataResults) { 309 | throw new Error("Unexpected API response: missing MetricDataResults"); 310 | } 311 | 312 | const consumedReadData = response.MetricDataResults[0]; 313 | const consumedWriteData = response.MetricDataResults[1]; 314 | const throttledReadsData = response.MetricDataResults[2]; 315 | const throttledWritesData = response.MetricDataResults[3]; 316 | const provisionedReadsData = response.MetricDataResults[4]; 317 | const provisionedWritesData = response.MetricDataResults[5]; 318 | 319 | 320 | for (let i = 0; i < consumedReadData.Timestamps!.length; i++) { 321 | const timestamp = new Date(consumedReadData.Timestamps![i]); 322 | const consumedRead = consumedReadData.Values![i] || 0; 323 | const consumedWrite = consumedWriteData.Values![i] || 0; 324 | const throttledReads = throttledReadsData.Values![i] || 0; 325 | const throttledWrites = throttledWritesData.Values![i] || 0; 326 | const provisionedReads = provisionedReadsData.Values![i] || 0; 327 | const provisionedWrites = provisionedWritesData.Values![i] || 0; 328 | 329 | data.push({ timestamp, consumedRead, consumedWrite, throttledReads, throttledWrites, provisionedReads, provisionedWrites }); 330 | } 331 | } 332 | while (nextToken !== undefined) 333 | 334 | return data; 335 | } -------------------------------------------------------------------------------- /tsconfig.json: -------------------------------------------------------------------------------- 1 | { 2 | "$schema": "https://json.schemastore.org/tsconfig", 3 | "display": "Node 16", 4 | 5 | "compilerOptions": { 6 | "lib": ["es2021", "DOM"], 7 | "module": "ESNext", 8 | "target": "es2021", 9 | 10 | "strict": true, 11 | "esModuleInterop": true, 12 | "allowSyntheticDefaultImports": true, 13 | "skipLibCheck": true, 14 | "forceConsistentCasingInFileNames": true, 15 | "moduleResolution": "node", 16 | "typeRoots": ["./node_modules/@types", "./typings"] 17 | }, 18 | 19 | "ts-node": { 20 | // these options are overrides used only by ts-node 21 | // same as ts-node's --compilerOptions flag and the TS_NODE_COMPILER_OPTIONS environment variable 22 | "compilerOptions": { 23 | "module": "commonjs" 24 | } 25 | } 26 | } -------------------------------------------------------------------------------- /typings/optimization-js/index.d.ts: -------------------------------------------------------------------------------- 1 | declare module "optimization-js" 2 | --------------------------------------------------------------------------------