├── awspricecalculator ├── s3 │ ├── __init__.py │ └── pricing.py ├── common │ ├── __init__.py │ ├── errors.py │ ├── phelper.py │ ├── utils.py │ └── consts.py ├── ec2 │ ├── __init__.py │ └── pricing.py ├── emr │ ├── __init__.py │ └── pricing.py ├── kinesis │ ├── __init__.py │ └── pricing.py ├── rds │ ├── __init__.py │ └── pricing.py ├── awslambda │ ├── __init__.py │ └── pricing.py ├── datatransfer │ ├── __init__.py │ └── pricing.py ├── dynamodb │ ├── __init__.py │ └── pricing.py ├── redshift │ ├── __init__.py │ └── pricing.py ├── data │ └── .gitignore └── __init__.py ├── requirements.txt ├── MANIFEST.in ├── requirements-dev.txt ├── test └── events │ └── constant-tag.json ├── install.sh ├── serverless.env.yml ├── .gitignore ├── setup.py ├── cloudformation ├── lambda-metric-filters.yml └── function-plus-schedule.json ├── scripts ├── README.md ├── redshift-pricing.py ├── emr-pricing.py ├── get-latest-index.py └── lambda-optimization.py ├── serverless.yml ├── README.md └── LICENSE.md /awspricecalculator/s3/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /awspricecalculator/common/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /awspricecalculator/ec2/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /awspricecalculator/emr/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /awspricecalculator/kinesis/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /awspricecalculator/rds/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /awspricecalculator/awslambda/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /awspricecalculator/datatransfer/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /awspricecalculator/dynamodb/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /awspricecalculator/redshift/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | tinydb==3.4.1 2 | numpy== 1.12.1 3 | tabulate 4 | -------------------------------------------------------------------------------- /MANIFEST.in: -------------------------------------------------------------------------------- 1 | recursive-include awspricecalculator/data *.csv *.json 2 | 3 | -------------------------------------------------------------------------------- /requirements-dev.txt: -------------------------------------------------------------------------------- 1 | tinydb==3.4.1 2 | numpy== 1.12.1 3 | tabulate 4 | boto3 5 | python-lambda-local 6 | -------------------------------------------------------------------------------- /test/events/constant-tag.json: -------------------------------------------------------------------------------- 1 | { 2 | "tag": { 3 | "key": "", 4 | "value": "" 5 | } 6 | } 7 | 8 | -------------------------------------------------------------------------------- /awspricecalculator/data/.gitignore: -------------------------------------------------------------------------------- 1 | # Don't include the pricedata in the git repo - we want to download this 2 | # instead of working with stale cost data 3 | *.csv 4 | *.json -------------------------------------------------------------------------------- /awspricecalculator/__init__.py: -------------------------------------------------------------------------------- 1 | import os, sys 2 | 3 | __location__ = os.path.dirname(os.path.realpath(__file__)) 4 | sys.path.append(os.path.join(__location__, "../")) 5 | sys.path.append(os.path.join(__location__, "../vendored")) 6 | -------------------------------------------------------------------------------- /install.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | #Install application dependencies in /vendored folder 4 | pip install -r requirements.txt -t vendored 5 | 6 | #Install local dev environment & test dependencies in default site_packages path 7 | pip install -r requirements-dev.txt 8 | 9 | 10 | 11 | -------------------------------------------------------------------------------- /serverless.env.yml: -------------------------------------------------------------------------------- 1 | # This is the Serverless Environment File 2 | # 3 | # It contains listing of your stages, and their regions 4 | # It also manages serverless variables at 3 levels: 5 | # - common variables: variables that apply to all stages/regions 6 | # - stage variables: variables that apply to a specific stage 7 | # - region variables: variables that apply to a specific region 8 | 9 | vars: 10 | stages: 11 | dev: 12 | vars: 13 | regions: 14 | us-east-1: 15 | vars: 16 | -------------------------------------------------------------------------------- /awspricecalculator/common/errors.py: -------------------------------------------------------------------------------- 1 | import json 2 | import os 3 | 4 | 5 | 6 | class ValidationError(Exception): 7 | """Exception raised for errors in the input. 8 | 9 | Attributes: 10 | message -- explanation of the error 11 | """ 12 | 13 | def __init__(self, message): 14 | self.message = message 15 | 16 | 17 | class NoDataFoundError(Exception): 18 | """Exception raised when no data could be found for a particular set of inputs 19 | 20 | Attributes: 21 | message -- explanation of the error 22 | """ 23 | 24 | def __init__(self, message): 25 | self.message = message 26 | 27 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Don't include anything installed into the virtualenv by pip 2 | .Python 3 | bin 4 | lib 5 | include 6 | pip-selfcheck.json 7 | # We don't need compiled Python artifacts in the repo 8 | __pycache__ 9 | *egg-info 10 | *.pyc 11 | *.pyo 12 | .idea/* 13 | .serverless/* 14 | 15 | awspricecalculator/data/* 16 | 17 | # ignore vendored files 18 | vendored 19 | 20 | #temporarily out 21 | awspricecalculator/s3* 22 | awspricecalculator/common/utils.py 23 | scripts/ec2-pricing.py 24 | scripts/rds-pricing.py 25 | scripts/s3-pricing.py 26 | scripts/lambda-pricing.py 27 | scripts/dynamodb-pricing.py 28 | scripts/kinesis-pricing.py 29 | scripts/context.py 30 | scripts/propagate-lambda-code.py 31 | 32 | -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | from setuptools import setup 2 | 3 | setup(name='awspricecalculator', 4 | version='0.1', 5 | description='AWS Price List calculations', 6 | url='https://github.com/ConcurrenyLabs/aws-pricing-tools/tree/master/awspricecalculator', 7 | author='Concurrency Labs', 8 | author_email='github@concurrencylabs.com', 9 | license='GNU', 10 | packages=['awspricecalculator','awspricecalculator.common', 11 | 'awspricecalculator.awslambda','awspricecalculator.ec2','awspricecalculator.rds', 'awspricecalculator.emr', 12 | 'awspricecalculator.redshift', 'awspricecalculator.s3','awspricecalculator.dynamodb', 13 | 'awspricecalculator.kinesis', 'awspricecalculator.datatransfer'], 14 | include_package_data=True, 15 | zip_safe=False) 16 | 17 | 18 | -------------------------------------------------------------------------------- /cloudformation/lambda-metric-filters.yml: -------------------------------------------------------------------------------- 1 | AWSTemplateFormatVersion: '2010-09-09' 2 | Description: AWS CloudFormation Template for creating CloudWatch Logs Metric Filters that keep track of memory utilization (and in the future, possibly other data that can be extracted from Lambda output in CW Logs) 3 | 4 | Parameters: 5 | LambdaFunctionName: 6 | Default: "" 7 | Description: Name of the Lambda function to monitor 8 | Type: String 9 | 10 | 11 | Resources: 12 | LambdaMemoryUsed: 13 | Properties: 14 | FilterPattern: '[reportLabel=REPORT, requestIdLabel="RequestId:",..., maxMemoryUsedValue, maxMemoryUsedMbLabel]' 15 | LogGroupName: 16 | Fn::Join: 17 | - '' 18 | - - '/aws/lambda/' 19 | - Ref: LambdaFunctionName 20 | MetricTransformations: 21 | - MetricName: 22 | Fn::Join: 23 | - '' 24 | - - 'MemoryUsed-' 25 | - Ref: LambdaFunctionName 26 | MetricNamespace: ConcurrencyLabs/Lambda/ 27 | MetricValue: $maxMemoryUsedValue 28 | Type: AWS::Logs::MetricFilter 29 | 30 | LambdaMemorySize: 31 | Properties: 32 | FilterPattern: '[reportLabel=REPORT, requestIdLabel="RequestId:",..., memorySizeValue, memorySizeValueMbLabel, maxLabel, memoryLabel, usedLabel, maxMemoryUsedValue, maxMemoryUsedMbLabel]' 33 | LogGroupName: 34 | Fn::Join: 35 | - '' 36 | - - '/aws/lambda/' 37 | - Ref: LambdaFunctionName 38 | MetricTransformations: 39 | - MetricName: 40 | Fn::Join: 41 | - '' 42 | - - 'MemorySize-' 43 | - Ref: LambdaFunctionName 44 | MetricNamespace: ConcurrencyLabs/Lambda/ 45 | MetricValue: $memorySizeValue 46 | Type: AWS::Logs::MetricFilter 47 | 48 | 49 | 50 | 51 | 52 | 53 | -------------------------------------------------------------------------------- /scripts/README.md: -------------------------------------------------------------------------------- 1 | ## Concurrency Labs - aws-pricing-tools scripts 2 | 3 | This folder contains Python scripts that are used for various purposes in the repository 4 | 5 | All scripts need to be executed from the `/scripts` folder. 6 | 7 | 8 | Make sure you have the following environment variables set: 9 | 10 | ``` 11 | export AWS_DEFAULT_PROFILE= 12 | export AWS_DEFAULT_REGION= 13 | ``` 14 | 15 | 16 | ### Get Latest Index 17 | 18 | The code needs a local copy of the the AWS Price List API index file. 19 | The GitHub repo doesn't come with the index file, therefore you have to 20 | download it the first time you run a test and every time AWS publishes a new 21 | Price List API index. 22 | 23 | In order to download the latest index file, go to the "scripts" folder and run: 24 | 25 | ``` 26 | python get-latest-index.py --service= 27 | ``` 28 | 29 | The script takes a few seconds to execute since some index files are a little heavy (like the EC2 one). 30 | 31 | I recommend executing with the option `--service=all` and subscribing to the AWS Price List API change notifications. 32 | 33 | 34 | ### Lambda Optimization Recommendations 35 | 36 | This script does the following: 37 | 38 | * It finds the function's execution records in CloudWatch Logs, for the 39 | given time window in minutes (i.e. the past 10 minutes) 40 | * Parses usage information and extracts memory used, execution time and memory allocated 41 | * It uses the Price List Index to calculate pricing for the Lambda function, 42 | for different scenarios and tells you potential savings. 43 | 44 | 45 | ``` 46 | python lambda-optimization.py --function= --minutes= 47 | ``` 48 | 49 | This function requires you to have the following IAM permissions: 50 | * `lambda:getFunction` 51 | * `logs:getLogEvents` 52 | 53 | Make sure variables AWS_DEFAULT_PROFILE and AWS_DEFAULT_REGION are set. 54 | 55 | 56 | 57 | 58 | -------------------------------------------------------------------------------- /awspricecalculator/kinesis/pricing.py: -------------------------------------------------------------------------------- 1 | 2 | import json 3 | import logging 4 | from ..common import consts, phelper 5 | from ..common.models import PricingResult 6 | import tinydb 7 | 8 | log = logging.getLogger() 9 | 10 | 11 | def calculate(pdim): 12 | 13 | log.info("Calculating DynamoDB pricing with the following inputs: {}".format(str(pdim.__dict__))) 14 | 15 | ts = phelper.Timestamp() 16 | ts.start('totalCalculationKinesis') 17 | 18 | dbs, indexMetadata = phelper.loadDBs(consts.SERVICE_KINESIS, phelper.get_partition_keys(pdim.region,consts.SCRIPT_TERM_TYPE_ON_DEMAND)) 19 | 20 | cost = 0 21 | pricing_records = [] 22 | 23 | awsPriceListApiVersion = indexMetadata['Version'] 24 | priceQuery = tinydb.Query() 25 | 26 | kinesisDb = dbs[phelper.create_file_key([consts.REGION_MAP[pdim.region], consts.TERM_TYPE_MAP[pdim.termType], consts.PRODUCT_FAMILY_KINESIS_STREAMS])] 27 | 28 | #Shard Hours 29 | query = ((priceQuery['Group'] == 'Provisioned shard hour')) 30 | pricing_records, cost = phelper.calculate_price(consts.SERVICE_KINESIS, kinesisDb, query, pdim.shardHours, pricing_records, cost) 31 | 32 | #PUT Payload Units 33 | query = ((priceQuery['Group'] == 'Payload Units')) 34 | pricing_records, cost = phelper.calculate_price(consts.SERVICE_KINESIS, kinesisDb, query, pdim.putPayloadUnits, pricing_records, cost) 35 | 36 | #Extended Retention Hours 37 | query = ((priceQuery['Group'] == 'Addon shard hour')) 38 | pricing_records, cost = phelper.calculate_price(consts.SERVICE_KINESIS, kinesisDb, query, pdim.extendedDataRetentionHours, pricing_records, cost) 39 | 40 | #TODO: add Enhanced (shard-level) metrics 41 | 42 | #Data Transfer - N/A 43 | #Note there is no charge for data transfer in Kinesis as per https://aws.amazon.com/kinesis/streams/pricing/ 44 | extraargs = {'priceDimensions':pdim} 45 | pricing_result = PricingResult(awsPriceListApiVersion, pdim.region, cost, pricing_records, **extraargs) 46 | log.debug(json.dumps(vars(pricing_result),sort_keys=False,indent=4)) 47 | 48 | log.debug("Total time to compute: [{}]".format(ts.finish('totalCalculationKinesis'))) 49 | return pricing_result.__dict__ 50 | -------------------------------------------------------------------------------- /awspricecalculator/dynamodb/pricing.py: -------------------------------------------------------------------------------- 1 | 2 | import json 3 | import logging 4 | from ..common import consts, phelper 5 | from ..common.models import PricingResult 6 | import tinydb 7 | 8 | log = logging.getLogger() 9 | regiondbs = {} 10 | indexMetadata = {} 11 | 12 | 13 | def calculate(pdim): 14 | 15 | log.info("Calculating DynamoDB pricing with the following inputs: {}".format(str(pdim.__dict__))) 16 | global regiondbs 17 | global indexMetadata 18 | 19 | ts = phelper.Timestamp() 20 | ts.start('totalCalculationDynamoDB') 21 | 22 | #Load On-Demand DBs 23 | dbs = regiondbs.get(consts.SERVICE_DYNAMODB+pdim.region+pdim.termType,{}) 24 | if not dbs: 25 | dbs, indexMetadata = phelper.loadDBs(consts.SERVICE_DYNAMODB, phelper.get_partition_keys(consts.SERVICE_DYNAMODB, pdim.region,consts.SCRIPT_TERM_TYPE_ON_DEMAND)) 26 | regiondbs[consts.SERVICE_DYNAMODB+pdim.region+pdim.termType]=dbs 27 | 28 | cost = 0 29 | pricing_records = [] 30 | 31 | awsPriceListApiVersion = indexMetadata['Version'] 32 | priceQuery = tinydb.Query() 33 | 34 | #TODO:add support for free-tier flag (include or exclude from calculation) 35 | 36 | iopsDb = dbs[phelper.create_file_key([consts.REGION_MAP[pdim.region], consts.TERM_TYPE_MAP[pdim.termType], consts.PRODUCT_FAMILY_DB_PIOPS])] 37 | 38 | #Read Capacity Units 39 | query = ((priceQuery['Group'] == 'DDB-ReadUnits')) 40 | pricing_records, cost = phelper.calculate_price(consts.SERVICE_DYNAMODB, iopsDb, query, pdim.readCapacityUnitHours, pricing_records, cost) 41 | 42 | #Write Capacity Units 43 | query = ((priceQuery['Group'] == 'DDB-WriteUnits')) 44 | pricing_records, cost = phelper.calculate_price(consts.SERVICE_DYNAMODB, iopsDb, query, pdim.writeCapacityUnitHours, pricing_records, cost) 45 | 46 | #DB Storage (TODO) 47 | 48 | #Data Transfer (TODO) 49 | #there is no additional charge for data transferred between Amazon DynamoDB and other Amazon Web Services within the same Region 50 | #data transferred across Regions (e.g., between Amazon DynamoDB in the US East (Northern Virginia) Region and Amazon EC2 in the EU (Ireland) Region), will be charged on both sides of the transfer. 51 | 52 | #API Requests (only applies for DDB Streams)(TODO) 53 | extraargs = {'priceDimensions':pdim} 54 | pricing_result = PricingResult(awsPriceListApiVersion, pdim.region, cost, pricing_records, **extraargs) 55 | log.debug(json.dumps(vars(pricing_result),sort_keys=False,indent=4)) 56 | 57 | log.debug("Total time to compute: [{}]".format(ts.finish('totalCalculationDynamoDB'))) 58 | return pricing_result.__dict__ 59 | -------------------------------------------------------------------------------- /serverless.yml: -------------------------------------------------------------------------------- 1 | # Welcome to Serverless! 2 | # 3 | # This file is the main config file for your service. 4 | # It's very minimal at this point and uses default values. 5 | # You can always add more config options for more control. 6 | # We've included some commented out config examples here. 7 | # Just uncomment any of them to get that config option. 8 | # 9 | # For full config options, check the docs: 10 | # v1.docs.serverless.com 11 | # 12 | # Happy Coding! 13 | 14 | service: aws-pricing # NOTE: update this with your service name 15 | 16 | provider: 17 | name: aws 18 | runtime: python3.6 19 | 20 | # you can add statements to the Lambda function's IAM Role here 21 | iamRoleStatements: 22 | - Effect: "Allow" 23 | Action: 24 | - cloudwatch:* 25 | - ec2:Describe* 26 | - elasticloadbalancing:Describe* 27 | - autoscaling:Describe* 28 | - rds:Describe* 29 | - rds:List* 30 | - dynamodb:Describe* 31 | - dynamodb:List* 32 | - kinesis:Describe* 33 | - kinesis:List* 34 | - lambda:GetFunctionConfiguration 35 | - tag:getResources 36 | - tag:getTagKeys 37 | - tag:getTagValues 38 | Resource: "*" 39 | 40 | # you can overwrite defaults here 41 | #defaults: 42 | # stage: dev 43 | # region: us-east-1 44 | # you can add packaging information here 45 | 46 | package: 47 | exclude: 48 | - bin/* 49 | - lib/** 50 | - .git/** 51 | - .idea/** 52 | - include/** 53 | - pip-selfcheck.json 54 | - awspricecalculator/s3 55 | - awspricecalculator/data/ec2/index.* 56 | - awspricecalculator/data/ec2/*Dedicated* 57 | - awspricecalculator/data/ec2/*Host* 58 | - awspricecalculator/data/ec2/*Reserved* 59 | - awspricecalculator/data/ec2/*Savings* 60 | - awspricecalculator/data/rds/index.* 61 | - awspricecalculator/data/rds/*Reserved* 62 | - awspricecalculator/data/s3/index.* 63 | - awspricecalculator/data/lambda/index.* 64 | - awspricecalculator/data/dynamodb/index.* 65 | - awspricecalculator/data/kinesis/index.* 66 | - scripts/** 67 | - test/** 68 | - cloudformation/** 69 | - readme.txt 70 | 71 | include: 72 | - vendored 73 | 74 | functions: 75 | nearrealtimepricing: 76 | handler: functions/calculate-near-realtime.handler 77 | name: calculate-near-realtime-pricing 78 | timeout: 300 79 | memory: 1024 80 | events: 81 | - schedule: 82 | rate: rate(5 minutes) 83 | enabled: true 84 | input: 85 | tag: 86 | key: ${env:PRICING_TAG_KEY} 87 | value: ${env:PRICING_TAG_VALUE} 88 | 89 | # you can add CloudFormation resource templates here 90 | #resources: 91 | # Resources: 92 | # NewResource: 93 | # Type: AWS::S3::Bucket 94 | # Properties: 95 | # BucketName: my-new-bucket 96 | # Outputs: 97 | # NewOutput: 98 | # Description: "Description for the output" 99 | # Value: "Some output value" 100 | -------------------------------------------------------------------------------- /awspricecalculator/datatransfer/pricing.py: -------------------------------------------------------------------------------- 1 | 2 | import json 3 | import logging 4 | from ..common import consts, phelper 5 | from ..common.models import PricingResult 6 | import tinydb 7 | 8 | log = logging.getLogger() 9 | regiondbs = {} 10 | indexMetadata = {} 11 | 12 | 13 | def calculate(pdim): 14 | 15 | log.info("Calculating AWSDataTransfer pricing with the following inputs: {}".format(str(pdim.__dict__))) 16 | 17 | ts = phelper.Timestamp() 18 | ts.start('totalCalculation') 19 | ts.start('tinyDbLoadOnDemand') 20 | 21 | awsPriceListApiVersion = '' 22 | cost = 0 23 | pricing_records = [] 24 | priceQuery = tinydb.Query() 25 | 26 | global regiondbs 27 | global indexMetadata 28 | 29 | #Load On-Demand DBs 30 | indexArgs = {} 31 | tmpDbKey = consts.SERVICE_DATA_TRANSFER+pdim.region+pdim.termType+pdim.tenancy 32 | dbs = regiondbs.get(tmpDbKey,{}) 33 | if not dbs: 34 | dbs, indexMetadata = phelper.loadDBs(consts.SERVICE_DATA_TRANSFER, phelper.get_partition_keys(consts.SERVICE_DATA_TRANSFER, pdim.region, consts.SCRIPT_TERM_TYPE_ON_DEMAND, **indexArgs)) 35 | regiondbs[tmpDbKey]=dbs 36 | 37 | ts.finish('tinyDbLoadOnDemand') 38 | log.debug("Time to load OnDemand DB files: [{}]".format(ts.elapsed('tinyDbLoadOnDemand'))) 39 | 40 | #Data Transfer 41 | dataTransferDb = dbs[phelper.create_file_key((consts.REGION_MAP[pdim.region], consts.TERM_TYPE_MAP[pdim.termType], consts.PRODUCT_FAMILY_DATA_TRANSFER))] 42 | 43 | #Out to the Internet 44 | if pdim.dataTransferOutInternetGb: 45 | ts.start('searchDataTransfer') 46 | query = ((priceQuery['To Location'] == 'External') & (priceQuery['Transfer Type'] == 'AWS Outbound')) 47 | pricing_records, cost = phelper.calculate_price(consts.SERVICE_DATA_TRANSFER, dataTransferDb, query, pdim.dataTransferOutInternetGb, pricing_records, cost) 48 | log.debug("Time to search AWSDataTransfer data transfer Out: [{}]".format(ts.finish('searchDataTransfer'))) 49 | 50 | #Intra-regional data transfer - in/out/between AZs or using EIPs or ELB 51 | if pdim.dataTransferOutIntraRegionGb: 52 | query = ((priceQuery['Transfer Type'] == 'IntraRegion')) 53 | pricing_records, cost = phelper.calculate_price(consts.SERVICE_DATA_TRANSFER, dataTransferDb, query, pdim.dataTransferOutIntraRegionGb, pricing_records, cost) 54 | 55 | #Inter-regional data transfer - out to other AWS regions 56 | if pdim.dataTransferOutInterRegionGb: 57 | query = ((priceQuery['Transfer Type'] == 'InterRegion Outbound') & (priceQuery['To Location'] == consts.REGION_MAP[pdim.toRegion])) 58 | pricing_records, cost = phelper.calculate_price(consts.SERVICE_DATA_TRANSFER, dataTransferDb, query, pdim.dataTransferOutInterRegionGb, pricing_records, cost) 59 | 60 | 61 | log.debug("regiondbs:[{}]".format(regiondbs.keys())) 62 | awsPriceListApiVersion = indexMetadata['Version'] 63 | extraargs = {'priceDimensions':pdim} 64 | pricing_result = PricingResult(awsPriceListApiVersion, pdim.region, cost, pricing_records, **extraargs) 65 | log.debug(json.dumps(vars(pricing_result),sort_keys=False,indent=4)) 66 | 67 | log.debug("Total time: [{}]".format(ts.finish('totalCalculation'))) 68 | return pricing_result.__dict__ 69 | -------------------------------------------------------------------------------- /awspricecalculator/awslambda/pricing.py: -------------------------------------------------------------------------------- 1 | 2 | import json 3 | import logging 4 | from ..common import consts, phelper 5 | from ..common.models import PricingResult 6 | import tinydb 7 | 8 | log = logging.getLogger() 9 | regiondbs = {} 10 | indexMetadata = {} 11 | 12 | 13 | def calculate(pdim): 14 | 15 | log.info("Calculating Lambda pricing with the following inputs: {}".format(str(pdim.__dict__))) 16 | 17 | global regiondbs 18 | global indexMetadata 19 | 20 | ts = phelper.Timestamp() 21 | ts.start('totalCalculationAwsLambda') 22 | 23 | #Load On-Demand DB 24 | dbs = regiondbs.get(consts.SERVICE_LAMBDA+pdim.region+pdim.termType,{}) 25 | if not dbs: 26 | dbs, indexMetadata = phelper.loadDBs(consts.SERVICE_LAMBDA, phelper.get_partition_keys(consts.SERVICE_LAMBDA, pdim.region, consts.SCRIPT_TERM_TYPE_ON_DEMAND)) 27 | regiondbs[consts.SERVICE_LAMBDA+pdim.region+pdim.termType]=dbs 28 | 29 | cost = 0 30 | pricing_records = [] 31 | 32 | awsPriceListApiVersion = indexMetadata['Version'] 33 | priceQuery = tinydb.Query() 34 | 35 | #TODO: add support to include/ignore free-tier (include a flag) 36 | 37 | serverlessDb = dbs[phelper.create_file_key([consts.REGION_MAP[pdim.region], consts.TERM_TYPE_MAP[pdim.termType], consts.PRODUCT_FAMILY_SERVERLESS])] 38 | 39 | #Requests 40 | if pdim.requestCount: 41 | query = ((priceQuery['Group'] == 'AWS-Lambda-Requests')) 42 | pricing_records, cost = phelper.calculate_price(consts.SERVICE_LAMBDA, serverlessDb, query, pdim.requestCount, pricing_records, cost) 43 | 44 | #GB-s (aka compute time) 45 | if pdim.avgDurationMs: 46 | query = ((priceQuery['Group'] == 'AWS-Lambda-Duration')) 47 | usageUnits = pdim.GBs 48 | pricing_records, cost = phelper.calculate_price(consts.SERVICE_LAMBDA, serverlessDb, query, usageUnits, pricing_records, cost) 49 | 50 | #Data Transfer 51 | dataTransferDb = dbs[phelper.create_file_key([consts.REGION_MAP[pdim.region], consts.TERM_TYPE_MAP[pdim.termType], consts.PRODUCT_FAMILY_DATA_TRANSFER])] 52 | 53 | #To internet 54 | if pdim.dataTransferOutInternetGb: 55 | query = ((priceQuery['To Location'] == 'External') & (priceQuery['Transfer Type'] == 'AWS Outbound')) 56 | pricing_records, cost = phelper.calculate_price(consts.SERVICE_LAMBDA, dataTransferDb, query, pdim.dataTransferOutInternetGb, pricing_records, cost) 57 | 58 | #Intra-regional data transfer - in/out/between EC2 AZs or using IPs or ELB 59 | if pdim.dataTransferOutIntraRegionGb: 60 | query = ((priceQuery['Transfer Type'] == 'IntraRegion')) 61 | pricing_records, cost = phelper.calculate_price(consts.SERVICE_LAMBDA, dataTransferDb, query, pdim.dataTransferOutIntraRegionGb, pricing_records, cost) 62 | 63 | #Inter-regional data transfer - out to other AWS regions 64 | if pdim.dataTransferOutInterRegionGb: 65 | query = ((priceQuery['Transfer Type'] == 'InterRegion Outbound') & (priceQuery['To Location'] == consts.REGION_MAP[pdim.toRegion])) 66 | pricing_records, cost = phelper.calculate_price(consts.SERVICE_LAMBDA, dataTransferDb, query, pdim.dataTransferOutInterRegionGb, pricing_records, cost) 67 | 68 | 69 | extraargs = {'priceDimensions':pdim} 70 | pricing_result = PricingResult(awsPriceListApiVersion, pdim.region, cost, pricing_records, **extraargs) 71 | log.debug(json.dumps(vars(pricing_result),sort_keys=False,indent=4)) 72 | 73 | log.debug("Total time to compute: [{}]".format(ts.finish('totalCalculationAwsLambda'))) 74 | return pricing_result.__dict__ 75 | 76 | 77 | -------------------------------------------------------------------------------- /awspricecalculator/emr/pricing.py: -------------------------------------------------------------------------------- 1 | 2 | import json 3 | import logging 4 | from ..common import consts, phelper 5 | from ..common.models import PricingResult 6 | from ..common.models import Ec2PriceDimension 7 | from ..ec2 import pricing as ec2pricing 8 | import tinydb 9 | 10 | log = logging.getLogger() 11 | regiondbs = {} 12 | indexMetadata = {} 13 | 14 | 15 | def calculate(pdim): 16 | 17 | log.info("Calculating EMR pricing with the following inputs: {}".format(str(pdim.__dict__))) 18 | 19 | ts = phelper.Timestamp() 20 | ts.start('totalCalculation') 21 | ts.start('tinyDbLoadOnDemand') 22 | 23 | awsPriceListApiVersion = '' 24 | cost = 0 25 | pricing_records = [] 26 | priceQuery = tinydb.Query() 27 | 28 | global regiondbs 29 | global indexMetadata 30 | 31 | 32 | #DBs for Data Transfer 33 | tmpDtDbKey = consts.SERVICE_DATA_TRANSFER+pdim.region+pdim.termType 34 | dtdbs = regiondbs.get(tmpDtDbKey,{}) 35 | if not dtdbs: 36 | dtdbs, dtIndexMetadata = phelper.loadDBs(consts.SERVICE_DATA_TRANSFER, phelper.get_partition_keys(consts.SERVICE_DATA_TRANSFER, pdim.region, consts.SCRIPT_TERM_TYPE_ON_DEMAND, **{})) 37 | regiondbs[tmpDtDbKey]=dtdbs 38 | 39 | #_/_/_/_/_/ ON-DEMAND PRICING _/_/_/_/_/ 40 | #Load On-Demand EMR DBs 41 | dbs = regiondbs.get(consts.SERVICE_EMR+pdim.region+consts.TERM_TYPE_ON_DEMAND,{}) 42 | if not dbs: 43 | dbs, indexMetadata = phelper.loadDBs(consts.SERVICE_EMR, phelper.get_partition_keys(consts.SERVICE_EMR, pdim.region, consts.SCRIPT_TERM_TYPE_ON_DEMAND)) 44 | regiondbs[consts.SERVICE_EMR+pdim.region+pdim.termType]=dbs 45 | 46 | ts.finish('tinyDbLoadOnDemand') 47 | log.debug("Time to load OnDemand DB files: [{}]".format(ts.elapsed('tinyDbLoadOnDemand'))) 48 | 49 | #EMR Compute Instance 50 | if pdim.instanceHours: 51 | #The EMR component in the calculation always uses OnDemand (Reserved it's not supported yet for EMR) 52 | computeDb = dbs[phelper.create_file_key((consts.REGION_MAP[pdim.region], consts.TERM_TYPE_MAP[consts.SCRIPT_TERM_TYPE_ON_DEMAND], consts.PRODUCT_FAMILY_EMR_INSTANCE))] 53 | ts.start('tinyDbSearchComputeFile') 54 | #TODO: add support for Hunk Software Type 55 | query = ((priceQuery['Instance Type'] == pdim.instanceType) & (priceQuery['Software Type'] == 'EMR')) 56 | 57 | pricing_records, cost = phelper.calculate_price(consts.SERVICE_EMR, computeDb, query, pdim.instanceHours, pricing_records, cost) 58 | log.debug("Time to search compute:[{}]".format(ts.finish('tinyDbSearchComputeFile'))) 59 | 60 | 61 | #EC2 Pricing - the EC2 component takes into consideration either OnDemand or Reserved. 62 | ec2_pricing = ec2pricing.calculate(Ec2PriceDimension(**pdim.ec2PriceDims)) 63 | log.info("pdim.ec2PriceDims:[{}]".format(pdim.ec2PriceDims)) 64 | log.info("ec2_pricing:[{}]".format(ec2_pricing)) 65 | if ec2_pricing.get('pricingRecords',[]): pricing_records.extend(ec2_pricing['pricingRecords']) 66 | cost += ec2_pricing.get('totalCost',0) 67 | 68 | 69 | 70 | 71 | 72 | log.debug("regiondbs:[{}]".format(regiondbs.keys())) 73 | awsPriceListApiVersion = indexMetadata['Version'] 74 | extraargs = {'priceDimensions':pdim} 75 | pricing_result = PricingResult(awsPriceListApiVersion, pdim.region, cost, pricing_records, **extraargs) 76 | log.debug(json.dumps(vars(pricing_result),sort_keys=False,indent=4)) 77 | 78 | #proc = psutil.Process() 79 | #log.debug("open_files: {}".format(proc.open_files())) 80 | 81 | log.debug("Total time: [{}]".format(ts.finish('totalCalculation'))) 82 | return pricing_result.__dict__ 83 | -------------------------------------------------------------------------------- /awspricecalculator/s3/pricing.py: -------------------------------------------------------------------------------- 1 | import json 2 | import logging 3 | from ..common import consts, phelper 4 | from ..common.models import PricingResult 5 | import tinydb 6 | 7 | 8 | 9 | log = logging.getLogger() 10 | regiondbs = {} 11 | indexMetadata = {} 12 | 13 | def calculate(pdim): 14 | ts = phelper.Timestamp() 15 | ts.start('totalS3Calculation') 16 | 17 | global regiondbs 18 | global indexMetadata 19 | 20 | log.info("Calculating S3 pricing with the following inputs: {}".format(str(pdim.__dict__))) 21 | 22 | #DBs for Data Transfer 23 | tmpDtDbKey = consts.SERVICE_DATA_TRANSFER+pdim.region+pdim.termType 24 | dtdbs = regiondbs.get(tmpDtDbKey,{}) 25 | if not dtdbs: 26 | dtdbs, dtIndexMetadata = phelper.loadDBs(consts.SERVICE_DATA_TRANSFER, phelper.get_partition_keys(consts.SERVICE_DATA_TRANSFER, pdim.region, consts.SCRIPT_TERM_TYPE_ON_DEMAND, **{})) 27 | regiondbs[tmpDtDbKey]=dtdbs 28 | 29 | #DBs for S3 Pricing 30 | dbs = regiondbs.get(consts.SERVICE_S3+pdim.region+pdim.termType,{}) 31 | if not dbs: 32 | dbs, indexMetadata = phelper.loadDBs(consts.SERVICE_S3, phelper.get_partition_keys(consts.SERVICE_S3, pdim.region, consts.SCRIPT_TERM_TYPE_ON_DEMAND)) 33 | regiondbs[consts.SERVICE_S3+pdim.region+pdim.termType]=dbs 34 | 35 | cost = 0 36 | pricing_records = [] 37 | 38 | awsPriceListApiVersion = indexMetadata['Version'] 39 | priceQuery = tinydb.Query() 40 | 41 | 42 | #Storage 43 | if pdim.storageSizeGb: 44 | storageDb = dbs[phelper.create_file_key([consts.REGION_MAP[pdim.region], consts.TERM_TYPE_MAP[pdim.termType], consts.PRODUCT_FAMILY_STORAGE])] 45 | query = ((priceQuery['Storage Class'] == consts.S3_STORAGE_CLASS_MAP[pdim.storageClass]) & (priceQuery['Volume Type'] == consts.S3_VOLUME_TYPE_DICT[pdim.storageClass])) 46 | 47 | pricing_records, cost = phelper.calculate_price(consts.SERVICE_S3, storageDb, query, pdim.storageSizeGb, pricing_records, cost) 48 | 49 | 50 | #Data Transfer 51 | 52 | if pdim.dataTransferOutInternetGb: 53 | dataTransferDb = dtdbs[phelper.create_file_key((consts.REGION_MAP[pdim.region], consts.TERM_TYPE_MAP[pdim.termType], consts.PRODUCT_FAMILY_DATA_TRANSFER))] 54 | 55 | #Out to the internet 56 | query = ((priceQuery['To Location'] == 'External') & (priceQuery['Transfer Type'] == 'AWS Outbound')) 57 | pricing_records, cost = phelper.calculate_price(consts.SERVICE_DATA_TRANSFER, dataTransferDb, query, pdim.dataTransferOutInternetGb, pricing_records, cost) 58 | #TODO: Intra region (regular and accelerated) 59 | #TODO: Out to the internet (Accelerated transfer) 60 | 61 | #TODO: Fee 62 | 63 | #API Request 64 | #TODO: add support for S3 Select 65 | requestDb = None 66 | if pdim.requestNumber: 67 | requestDb = dbs[phelper.create_file_key([consts.REGION_MAP[pdim.region], consts.TERM_TYPE_MAP[pdim.termType], consts.PRODUCT_FAMILY_API_REQUEST])] 68 | group = '' 69 | if pdim.storageClass in (consts.SCRIPT_STORAGE_CLASS_STANDARD, consts.SCRIPT_STORAGE_CLASS_REDUCED_REDUNDANCY): 70 | if pdim.requestType in ['PUT','COPY','POST','LIST']: group=consts.S3_USAGE_GROUP_REQUESTS_TIER_1 71 | if pdim.requestType in ['GET']: group=consts.S3_USAGE_GROUP_REQUESTS_TIER_2 72 | if pdim.storageClass == consts.SCRIPT_STORAGE_CLASS_INFREQUENT_ACCESS: 73 | if pdim.requestType in ['PUT','COPY','POST','LIST']: group=consts.S3_USAGE_GROUP_REQUESTS_SIA_TIER1 74 | if pdim.requestType in ['GET']: group=consts.S3_USAGE_GROUP_REQUESTS_SIA_TIER2 75 | if pdim.storageClass == consts.SCRIPT_STORAGE_CLASS_ONE_ZONE_INFREQUENT_ACCESS: 76 | if pdim.requestType in ['PUT','COPY','POST','LIST']: group=consts.S3_USAGE_GROUP_REQUESTS_ZIA_TIER1 77 | if pdim.requestType in ['GET']: group=consts.S3_USAGE_GROUP_REQUESTS_ZIA_TIER2 78 | 79 | query = ((priceQuery['Group'] == group)) 80 | pricing_records, cost = phelper.calculate_price(consts.SERVICE_S3, requestDb, query, pdim.requestNumber, pricing_records, cost) 81 | 82 | #Data Retrieval: Standard and One Zone Infrequent Access 83 | if pdim.dataRetrievalGb and pdim.storageClass in (consts.SCRIPT_STORAGE_CLASS_INFREQUENT_ACCESS, consts.SCRIPT_STORAGE_CLASS_ONE_ZONE_INFREQUENT_ACCESS): 84 | if not requestDb: requestDb = dbs[phelper.create_file_key([consts.REGION_MAP[pdim.region], consts.TERM_TYPE_MAP[pdim.termType], consts.PRODUCT_FAMILY_API_REQUEST])] 85 | group = "" 86 | if pdim.storageClass == consts.SCRIPT_STORAGE_CLASS_INFREQUENT_ACCESS: 87 | group = consts.S3_USAGE_GROUP_REQUESTS_SIA_RETRIEVAL 88 | if pdim.storageClass == consts.SCRIPT_STORAGE_CLASS_ONE_ZONE_INFREQUENT_ACCESS: 89 | group = consts.S3_USAGE_GROUP_REQUESTS_ZIA_RETRIEVAL 90 | query = ((priceQuery['Group'] == group)) 91 | pricing_records, cost = phelper.calculate_price(consts.SERVICE_S3, requestDb, query, pdim.dataRetrievalGb, pricing_records, cost) 92 | 93 | 94 | 95 | #TODO:Glacier (Bulk, Expedited, Glacier Requests) 96 | 97 | extraargs = {'priceDimensions':pdim} 98 | pricing_result = PricingResult(awsPriceListApiVersion, pdim.region, cost, pricing_records, **extraargs) 99 | log.debug(json.dumps(vars(pricing_result),sort_keys=False,indent=4)) 100 | 101 | print ("Total time to compute S3 pricing: [{}]".format(ts.finish('totalS3Calculation'))) 102 | return pricing_result.__dict__ 103 | 104 | 105 | 106 | 107 | -------------------------------------------------------------------------------- /cloudformation/function-plus-schedule.json: -------------------------------------------------------------------------------- 1 | { 2 | "AWSTemplateFormatVersion": "2010-09-09", 3 | "Description": "AWS CloudFormation Template for deploying a Lambda function that calculates EC2 pricing in near real-time", 4 | "Parameters": { 5 | "TagKey" : { 6 | "Type": "String", 7 | "Description" : "Tag key that will be used to find AWS resources. Mandatory", 8 | "MinLength": "1", 9 | "ConstraintDescription": "Tag key is mandatory." 10 | }, 11 | "TagValue" : { 12 | "Type": "String", 13 | "MinLength": "1", 14 | "Description" : "Tag value that will be used to find AWS resources. Mandatory", 15 | "ConstraintDescription": "Tag value is mandatory." 16 | } 17 | 18 | }, 19 | "Resources": { 20 | 21 | "LambdaRealtimeCalculatePricingRole": { 22 | "Type": "AWS::IAM::Role", 23 | "Properties": { 24 | "AssumeRolePolicyDocument": { 25 | "Version": "2012-10-17", 26 | "Statement": [{ 27 | "Effect": "Allow", 28 | "Principal": {"Service": ["lambda.amazonaws.com"]}, 29 | "Action": ["sts:AssumeRole"] 30 | }] 31 | }, 32 | "Path": "/", 33 | "Policies": [{ 34 | "PolicyName": "root", 35 | "PolicyDocument": { 36 | "Version": "2012-10-17", 37 | "Statement": [{ 38 | "Effect": "Allow", 39 | "Action": ["logs:CreateLogGroup","logs:CreateLogStream","logs:PutLogEvents"], 40 | "Resource": "arn:aws:logs:*:*:*" 41 | }, 42 | { 43 | "Effect": "Allow", 44 | "Action": ["cloudwatch:*"], 45 | "Resource": "*" 46 | }, 47 | { 48 | "Effect": "Allow", 49 | "Action": ["ec2:Describe*", 50 | "elasticloadbalancing:Describe*", 51 | "autoscaling:Describe*"], 52 | "Resource": "*" 53 | }, 54 | { 55 | "Effect": "Allow", 56 | "Action": ["rds:Describe*", 57 | "rds:List*"], 58 | "Resource": "*" 59 | }, 60 | { 61 | "Effect": "Allow", 62 | "Action": ["dynamodb:Describe*", 63 | "dynamodb:List*"], 64 | "Resource": "*" 65 | }, 66 | { 67 | "Effect": "Allow", 68 | "Action": ["kinesis:Describe*", 69 | "kinesis:List*"], 70 | "Resource": "*" 71 | }, 72 | { 73 | "Effect": "Allow", 74 | "Action": ["lambda:GetFunctionConfiguration"], 75 | "Resource": "*" 76 | }, 77 | { 78 | "Effect": "Allow", 79 | "Action": ["tag:getResources", "tag:getTagKeys", "tag:getTagValues"], 80 | "Resource": "*" 81 | } 82 | 83 | ] 84 | } 85 | }] 86 | } 87 | }, 88 | 89 | "LambdaRealtimeCalculatePricingFunction": { 90 | "Type": "AWS::Lambda::Function", 91 | "DependsOn" : ["LambdaRealtimeCalculatePricingRole"], 92 | "Properties": { 93 | "Handler": "functions/calculate-near-realtime.handler", 94 | "Role": { "Fn::GetAtt" : ["LambdaRealtimeCalculatePricingRole", "Arn"] }, 95 | "Code": { 96 | "S3Bucket": { "Fn::Join" : [ "", ["concurrencylabs-deployment-artifacts-public-", { "Ref" : "AWS::Region" }] ] }, 97 | "S3Key": "lambda-near-realtime-pricing/calculate-near-realtime-pricing-v3.10.zip" 98 | }, 99 | "Runtime": "python3.6", 100 | "Timeout": "300", 101 | "MemorySize" : 1024 102 | } 103 | }, 104 | 105 | 106 | "ScheduledPricingCalculationRule": { 107 | "Type": "AWS::Events::Rule", 108 | "Properties": { 109 | "Description": "Invoke Pricing Calculator Lambda function every 5 minutes", 110 | "ScheduleExpression": "rate(5 minutes)", 111 | "State": "ENABLED", 112 | "Targets": [{ 113 | "Arn": { "Fn::GetAtt": ["LambdaRealtimeCalculatePricingFunction", "Arn"] }, 114 | "Id": "NearRealTimePriceCalculatorFunctionv1", 115 | "Input":{"Fn::Join":["", ["{\"tag\":{\"key\":\"",{"Ref":"TagKey"},"\",\"value\":\"",{"Ref":"TagValue"},"\"}}"]]} 116 | }] 117 | } 118 | }, 119 | 120 | 121 | "PermissionForEventsToInvokePricingCalculationLambda": { 122 | "Type": "AWS::Lambda::Permission", 123 | "Properties": { 124 | "FunctionName": { "Ref": "LambdaRealtimeCalculatePricingFunction" }, 125 | "Action": "lambda:InvokeFunction", 126 | "Principal": "events.amazonaws.com", 127 | "SourceArn": { "Fn::GetAtt": ["ScheduledPricingCalculationRule", "Arn"] } 128 | } 129 | } 130 | 131 | }, 132 | 133 | "Outputs": { 134 | 135 | "Documentation": { 136 | "Description": "For more details, see this blog post", 137 | "Value": "https://www.concurrencylabs.com/blog/aws-pricing-lambda-realtime-calculation-function/" 138 | }, 139 | 140 | 141 | "LambdaFunction": { 142 | "Description": "Lambda function that calculates pricing in near real-time", 143 | "Value": { 144 | "Ref": "LambdaRealtimeCalculatePricingFunction" 145 | } 146 | }, 147 | 148 | "ScheduledEvent": { 149 | "Description": "CloudWatch Events schedule that will trigger the Lambda function", 150 | "Value": { 151 | "Ref": "ScheduledPricingCalculationRule" 152 | } 153 | } 154 | 155 | } 156 | } 157 | -------------------------------------------------------------------------------- /awspricecalculator/redshift/pricing.py: -------------------------------------------------------------------------------- 1 | 2 | import json 3 | import logging 4 | from ..common import consts, phelper 5 | from ..common.models import PricingResult 6 | from ..common.models import Ec2PriceDimension 7 | from ..ec2 import pricing as ec2pricing 8 | import tinydb 9 | 10 | log = logging.getLogger() 11 | regiondbs = {} 12 | indexMetadata = {} 13 | 14 | 15 | def calculate(pdim): 16 | 17 | log.info("Calculating Redshift pricing with the following inputs: {}".format(str(pdim.__dict__))) 18 | 19 | ts = phelper.Timestamp() 20 | ts.start('totalCalculation') 21 | ts.start('tinyDbLoadOnDemand') 22 | ts.start('tinyDbLoadReserved') 23 | 24 | awsPriceListApiVersion = '' 25 | cost = 0 26 | pricing_records = [] 27 | priceQuery = tinydb.Query() 28 | 29 | global regiondbs 30 | global indexMetadata 31 | 32 | #DBs for Data Transfer 33 | tmpDtDbKey = consts.SERVICE_DATA_TRANSFER+pdim.region+pdim.termType 34 | dtdbs = regiondbs.get(tmpDtDbKey,{}) 35 | if not dtdbs: 36 | dtdbs, dtIndexMetadata = phelper.loadDBs(consts.SERVICE_DATA_TRANSFER, phelper.get_partition_keys(consts.SERVICE_DATA_TRANSFER, pdim.region, consts.SCRIPT_TERM_TYPE_ON_DEMAND, **{})) 37 | regiondbs[tmpDtDbKey]=dtdbs 38 | #_/_/_/_/_/ ON-DEMAND PRICING _/_/_/_/_/ 39 | #Load On-Demand Redshift DBs 40 | if pdim.termType == consts.SCRIPT_TERM_TYPE_ON_DEMAND: 41 | 42 | dbs = regiondbs.get(consts.SERVICE_REDSHIFT+pdim.region+consts.TERM_TYPE_ON_DEMAND,{}) 43 | if not dbs: 44 | dbs, indexMetadata = phelper.loadDBs(consts.SERVICE_REDSHIFT, phelper.get_partition_keys(consts.SERVICE_REDSHIFT, pdim.region, consts.SCRIPT_TERM_TYPE_ON_DEMAND)) 45 | regiondbs[consts.SERVICE_REDSHIFT+pdim.region+pdim.termType]=dbs 46 | 47 | ts.finish('tinyDbLoadOnDemand') 48 | log.debug("Time to load OnDemand DB files: [{}]".format(ts.elapsed('tinyDbLoadOnDemand'))) 49 | 50 | #Redshift Compute Instance 51 | if pdim.instanceHours: 52 | computeDb = dbs[phelper.create_file_key((consts.REGION_MAP[pdim.region], consts.TERM_TYPE_MAP[consts.SCRIPT_TERM_TYPE_ON_DEMAND], consts.PRODUCT_FAMILY_COMPUTE_INSTANCE))] 53 | ts.start('tinyDbSearchComputeFile') 54 | query = ((priceQuery['Instance Type'] == pdim.instanceType) ) 55 | 56 | pricing_records, cost = phelper.calculate_price(consts.SERVICE_REDSHIFT, computeDb, query, pdim.instanceHours, pricing_records, cost) 57 | log.debug("Time to search compute:[{}]".format(ts.finish('tinyDbSearchComputeFile'))) 58 | 59 | 60 | #TODO: move Data Transfer to a common file (since now it's a separate index file) 61 | """ 62 | #Data Transfer 63 | dataTransferDb = dtdbs[phelper.create_file_key((consts.REGION_MAP[pdim.region], consts.TERM_TYPE_MAP[pdim.termType], consts.PRODUCT_FAMILY_DATA_TRANSFER))] 64 | 65 | #Out to the Internet 66 | if pdim.dataTransferOutInternetGb: 67 | ts.start('searchDataTransfer') 68 | query = ((priceQuery['To Location'] == 'External') & (priceQuery['Transfer Type'] == 'AWS Outbound')) 69 | pricing_records, cost = phelper.calculate_price(consts.SERVICE_DATA_TRANSFER, dataTransferDb, query, pdim.dataTransferOutInternetGb, pricing_records, cost) 70 | log.debug("Time to search AWS Data Transfer Out: [{}]".format(ts.finish('searchDataTransfer'))) 71 | 72 | #Intra-regional data transfer - in/out/between EC2 AZs or using EIPs or ELB 73 | if pdim.dataTransferOutIntraRegionGb: 74 | query = ((priceQuery['Transfer Type'] == 'IntraRegion')) 75 | pricing_records, cost = phelper.calculate_price(consts.SERVICE_DATA_TRANSFER, dataTransferDb, query, pdim.dataTransferOutIntraRegionGb, pricing_records, cost) 76 | 77 | 78 | #Inter-regional data transfer - out to other AWS regions 79 | if pdim.dataTransferOutInterRegionGb: 80 | query = ((priceQuery['Transfer Type'] == 'InterRegion Outbound') & (priceQuery['To Location'] == consts.REGION_MAP[pdim.toRegion])) 81 | pricing_records, cost = phelper.calculate_price(consts.SERVICE_DATA_TRANSFER, dataTransferDb, query, pdim.dataTransferOutInterRegionGb, pricing_records, cost) 82 | """ 83 | 84 | #_/_/_/_/_/ RESERVED PRICING _/_/_/_/_/ 85 | 86 | print("regiondbs[]".format(regiondbs)) 87 | 88 | #Load Reserved DBs 89 | if pdim.termType == consts.SCRIPT_TERM_TYPE_RESERVED: 90 | 91 | indexArgs = {'offeringClasses':consts.EC2_OFFERING_CLASS_MAP.values(), 92 | 'tenancies':[consts.EC2_TENANCY_SHARED], 'purchaseOptions':consts.EC2_PURCHASE_OPTION_MAP.values()} 93 | 94 | dbs = regiondbs.get(consts.SERVICE_REDSHIFT+pdim.region+pdim.termType,{}) 95 | if not dbs: 96 | dbs, indexMetadata = phelper.loadDBs(consts.SERVICE_REDSHIFT, phelper.get_partition_keys(consts.SERVICE_REDSHIFT, pdim.region, consts.SCRIPT_TERM_TYPE_RESERVED, **indexArgs)) 97 | regiondbs[consts.SERVICE_REDSHIFT+pdim.region+pdim.termType]=dbs 98 | ts.finish('tinyDbLoadReserved') 99 | log.debug("Time to load Reserved DB files: [{}]".format(ts.elapsed('tinyDbLoadReserved'))) 100 | log.debug("regiondbs keys:[{}]".format(regiondbs)) 101 | 102 | #Redshift only supports standard 103 | print("dbs:[{}]".format(dbs)) 104 | computeDb = dbs[phelper.create_file_key((consts.REGION_MAP[pdim.region], consts.TERM_TYPE_MAP[pdim.termType], 105 | consts.PRODUCT_FAMILY_COMPUTE_INSTANCE, consts.EC2_OFFERING_CLASS_STANDARD, 106 | consts.EC2_TENANCY_SHARED, consts.EC2_PURCHASE_OPTION_MAP[pdim.offeringType]))] 107 | 108 | 109 | 110 | ts.start('tinyDbSearchComputeFileReserved') 111 | query = ((priceQuery['Instance Type'] == pdim.instanceType) & 112 | (priceQuery['LeaseContractLength'] == consts.EC2_RESERVED_YEAR_MAP["{}".format(pdim.years)])) 113 | 114 | hrsQuery = query & (priceQuery['Unit'] == 'Hrs' ) 115 | qtyQuery = query & (priceQuery['Unit'] == 'Quantity' ) 116 | 117 | if pdim.offeringType in (consts.SCRIPT_EC2_PURCHASE_OPTION_ALL_UPFRONT, consts.SCRIPT_EC2_PURCHASE_OPTION_PARTIAL_UPFRONT): 118 | pricing_records, cost = phelper.calculate_price(consts.SERVICE_REDSHIFT, computeDb, qtyQuery, pdim.instanceCount, pricing_records, cost) 119 | 120 | if pdim.offeringType in (consts.SCRIPT_EC2_PURCHASE_OPTION_NO_UPFRONT, consts.SCRIPT_EC2_PURCHASE_OPTION_PARTIAL_UPFRONT): 121 | reservedInstanceHours = pdim.instanceCount * consts.HOURS_IN_MONTH * 12 * pdim.years #TODO: move to common function 122 | pricing_records, cost = phelper.calculate_price(consts.SERVICE_REDSHIFT, computeDb, hrsQuery, reservedInstanceHours, pricing_records, cost) 123 | 124 | log.debug("Time to search:[{}]".format(ts.finish('tinyDbSearchComputeFileReserved'))) 125 | 126 | awsPriceListApiVersion = indexMetadata['Version'] 127 | extraargs = {'priceDimensions':pdim} 128 | pricing_result = PricingResult(awsPriceListApiVersion, pdim.region, cost, pricing_records, **extraargs) 129 | log.debug(json.dumps(vars(pricing_result),sort_keys=False,indent=4)) 130 | 131 | log.debug("Total time: [{}]".format(ts.finish('totalCalculation'))) 132 | return pricing_result.__dict__ 133 | -------------------------------------------------------------------------------- /scripts/redshift-pricing.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python 2 | import sys, os, getopt, json, logging 3 | import argparse 4 | import traceback 5 | sys.path.insert(0, os.path.abspath('..')) 6 | 7 | import awspricecalculator.redshift.pricing as redshiftpricing 8 | import awspricecalculator.common.consts as consts 9 | import awspricecalculator.common.models as data 10 | import awspricecalculator.common.utils as utils 11 | from awspricecalculator.common.errors import ValidationError 12 | from awspricecalculator.common.errors import NoDataFoundError 13 | 14 | log = logging.getLogger() 15 | logging.basicConfig() 16 | log.setLevel(logging.DEBUG) 17 | 18 | def main(argv): 19 | 20 | parser = argparse.ArgumentParser() 21 | parser.add_argument('--region', help='', required=False) 22 | parser.add_argument('--regions', help='', required=False) 23 | parser.add_argument('--sort-criteria', help='', required=False) 24 | parser.add_argument('--instance-type', help='', required=False) 25 | parser.add_argument('--instance-types', help='', required=False) 26 | parser.add_argument('--instance-hours', help='', type=int, required=False) 27 | parser.add_argument('--ebs-volume-type', help='', required=False) 28 | parser.add_argument('--ebs-storage-gb-month', help='', required=False) 29 | parser.add_argument('--piops', help='', type=int, required=False) 30 | parser.add_argument('--data-transfer-out-internet-gb', help='', required=False) 31 | parser.add_argument('--data-transfer-out-intraregion-gb', help='', required=False) 32 | parser.add_argument('--data-transfer-out-interregion-gb', help='', required=False) 33 | parser.add_argument('--to-region', help='', required=False) 34 | parser.add_argument('--term-type', help='', required=False) 35 | parser.add_argument('--offering-class', help='', required=False) 36 | parser.add_argument('--offering-classes', help='', required=False) 37 | parser.add_argument('--instance-count', help='', type=int, required=False) 38 | parser.add_argument('--years', help='', required=False) 39 | parser.add_argument('--offering-type', help='', required=False) 40 | parser.add_argument('--offering-types', help='', required=False) 41 | 42 | if len(sys.argv) == 1: 43 | parser.print_help() 44 | sys.exit(1) 45 | args = parser.parse_args() 46 | 47 | region = '' 48 | regions = '' 49 | instanceType = '' 50 | instanceTypes = '' 51 | instanceHours = 0 52 | instanceCount = 0 53 | sortCriteria = '' 54 | ebsVolumeType = '' 55 | ebsStorageGbMonth = 0 56 | pIops = 0 57 | dataTransferOutInternetGb = 0 58 | dataTransferOutIntraRegionGb = 0 59 | dataTransferOutInterRegionGb = 0 60 | toRegion = '' 61 | termType = consts.SCRIPT_TERM_TYPE_ON_DEMAND 62 | offeringClass = '' 63 | offeringClasses = consts.SUPPORTED_REDSHIFT_OFFERING_CLASSES #only used for Reserved comparisons (standard, convertible) 64 | offeringType = '' 65 | offeringTypes = consts.EC2_SUPPORTED_PURCHASE_OPTIONS #only used for Reserved comparisons (all-upfront, partial-upfront, no-upfront) 66 | years = 1 67 | 68 | if args.region: region = args.region 69 | if args.regions: regions = args.regions 70 | if args.sort_criteria: sortCriteria = args.sort_criteria 71 | if args.instance_type: instanceType = args.instance_type 72 | if args.instance_types: instanceTypes = args.instance_types 73 | if args.instance_hours: instanceHours = int(args.instance_hours) 74 | if args.ebs_volume_type: ebsVolumeType = args.ebs_volume_type 75 | if args.ebs_storage_gb_month: ebsStorageGbMonth = int(args.ebs_storage_gb_month) 76 | if args.piops: pIops = int(args.piops) 77 | if args.data_transfer_out_internet_gb: dataTransferOutInternetGb = int(args.data_transfer_out_internet_gb) 78 | if args.data_transfer_out_intraregion_gb: dataTransferOutIntraRegionGb = int(args.data_transfer_out_intraregion_gb) 79 | if args.data_transfer_out_interregion_gb: dataTransferOutInterRegionGb = int(args.data_transfer_out_interregion_gb) 80 | if args.to_region: toRegion = args.to_region 81 | if args.term_type: termType = args.term_type 82 | if args.offering_class: offeringClass = args.offering_class 83 | if args.offering_classes: offeringClasses = args.offering_classes.split(',') 84 | if args.instance_count: instanceCount = args.instance_count 85 | if args.offering_type: offeringType = args.offering_type 86 | if args.offering_types: offeringTypes = args.offering_types.split(',') 87 | if args.years: years = str(args.years) 88 | 89 | #TODO: Implement comparison between a subset of regions by entering an array of regions to compare 90 | #TODO: Implement a sort by target region (for data transfer) 91 | #TODO: For Reserved pricing, include a payment plan throughout the whole period, and a monthly average and savings 92 | 93 | #TODO: remove EBS for Redshift 94 | 95 | 96 | try: 97 | 98 | kwargs = {'sortCriteria':sortCriteria, 'instanceType':instanceType, 'instanceTypes':instanceTypes, 99 | 'instanceHours':instanceHours, 'dataTransferOutInternetGb':dataTransferOutInternetGb, 'pIops':pIops, 100 | 'dataTransferOutIntraRegionGb':dataTransferOutIntraRegionGb, 'dataTransferOutInterRegionGb':dataTransferOutInterRegionGb, 101 | 'toRegion':toRegion, 'termType':termType, 'instanceCount': instanceCount, 'years': years, 'offeringType':offeringType, 102 | 'offeringClass':offeringClass 103 | } 104 | 105 | if region: kwargs['region'] = region 106 | 107 | if sortCriteria: 108 | if sortCriteria in (consts.SORT_CRITERIA_TERM_TYPE, consts.SORT_CRITERIA_TERM_TYPE_REGION): 109 | if sortCriteria == consts.SORT_CRITERIA_TERM_TYPE_REGION: 110 | #TODO: validate that region list is comma-separated 111 | #TODO: move this list to utils.compare_term_types 112 | if regions: kwargs['regions'] = regions.split(',') 113 | else: kwargs['regions']=consts.SUPPORTED_REGIONS 114 | kwargs['purchaseOptions'] = offeringTypes #purchase options are referred to as offering types in the EC2 API 115 | kwargs['offeringClasses']=offeringClasses 116 | validate (kwargs) 117 | termPricingAnalysis = utils.compare_term_types(service=consts.SERVICE_REDSHIFT, **kwargs) 118 | tabularData = termPricingAnalysis.pop('tabularData') 119 | print ("Redshift termpPricingAnalysis: [{}]".format(json.dumps(termPricingAnalysis,sort_keys=False, indent=4))) 120 | print("csvData:\n{}\n".format(termPricingAnalysis['csvData'])) 121 | print("tabularData:\n{}".format(tabularData)) 122 | 123 | else: 124 | validate (kwargs) 125 | pricecomparisons = utils.compare(service=consts.SERVICE_REDSHIFT,**kwargs) 126 | print("Price comparisons:[{}]".format(json.dumps(pricecomparisons, indent=4))) 127 | else: 128 | validate (kwargs) 129 | redshift_pricing = redshiftpricing.calculate(data.RedshiftPriceDimension(**kwargs)) 130 | print(json.dumps(redshift_pricing,sort_keys=False,indent=4)) 131 | 132 | 133 | 134 | except NoDataFoundError as ndf: 135 | print ("NoDataFoundError args:[{}]".format(args)) 136 | 137 | except Exception as e: 138 | traceback.print_exc() 139 | print("Exception message:["+str(e)+"]") 140 | 141 | 142 | """ 143 | This function contains validations at the script level. No need to validate Redshift parameters, since 144 | class RedshiftPriceDimension already contains a validation function. 145 | """ 146 | def validate (args): 147 | #TODO: add - if termType sort criteria is specified, don't include offeringClass (singular) 148 | #TODO: add - if offeringTypes is included, have at least one valid offeringType (purchase option) 149 | #TODO: move to models or a common place that can be used by both CLI and API 150 | validation_msg = "" 151 | if args.get('sortCriteria','') == consts.SORT_CRITERIA_TERM_TYPE: 152 | if args.get('instanceHours',False): 153 | validation_msg = "instance-hours cannot be set when sort-criteria=term-type" 154 | if args.get('offeringType',False): 155 | validation_msg = "offering-type cannot be set when sort-criteria=term-type - try offering-types (plural) instead" 156 | if not args.get('years',''): 157 | validation_msg = "years cannot be empty" 158 | if args.get('sortCriteria','') == consts.SORT_CRITERIA_TERM_TYPE_REGION: 159 | if not args.get('offeringClasses',''): 160 | validation_msg = "offering-classes cannot be empty" 161 | if not args.get('purchaseOptions',''): 162 | validation_msg = "offering-types cannot be empty" 163 | 164 | if validation_msg: 165 | print("Error: [{}]".format(validation_msg)) 166 | raise ValidationError(validation_msg) 167 | 168 | return 169 | 170 | if __name__ == "__main__": 171 | main(sys.argv[1:]) 172 | -------------------------------------------------------------------------------- /scripts/emr-pricing.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python 2 | import sys, os, getopt, json, logging 3 | import argparse 4 | import traceback 5 | sys.path.insert(0, os.path.abspath('..')) 6 | 7 | import awspricecalculator.emr.pricing as emrpricing 8 | import awspricecalculator.common.consts as consts 9 | import awspricecalculator.common.models as data 10 | import awspricecalculator.common.utils as utils 11 | from awspricecalculator.common.errors import ValidationError 12 | from awspricecalculator.common.errors import NoDataFoundError 13 | 14 | log = logging.getLogger() 15 | logging.basicConfig() 16 | log.setLevel(logging.DEBUG) 17 | 18 | def main(argv): 19 | 20 | parser = argparse.ArgumentParser() 21 | parser.add_argument('--region', help='', required=False) 22 | parser.add_argument('--regions', help='', required=False) 23 | parser.add_argument('--sort-criteria', help='', required=False) 24 | parser.add_argument('--instance-type', help='', required=False) 25 | parser.add_argument('--instance-types', help='', required=False) 26 | parser.add_argument('--instance-hours', help='', type=int, required=False) 27 | parser.add_argument('--ebs-volume-type', help='', required=False) 28 | parser.add_argument('--ebs-storage-gb-month', help='', required=False) 29 | parser.add_argument('--piops', help='', type=int, required=False) 30 | parser.add_argument('--data-transfer-out-internet-gb', help='', required=False) 31 | parser.add_argument('--data-transfer-out-intraregion-gb', help='', required=False) 32 | parser.add_argument('--data-transfer-out-interregion-gb', help='', required=False) 33 | parser.add_argument('--to-region', help='', required=False) 34 | parser.add_argument('--term-type', help='', required=False) 35 | parser.add_argument('--offering-class', help='', required=False) 36 | parser.add_argument('--offering-classes', help='', required=False) 37 | parser.add_argument('--instance-count', help='', type=int, required=False) 38 | parser.add_argument('--years', help='', required=False) 39 | parser.add_argument('--offering-type', help='', required=False) 40 | parser.add_argument('--offering-types', help='', required=False) 41 | 42 | if len(sys.argv) == 1: 43 | parser.print_help() 44 | sys.exit(1) 45 | args = parser.parse_args() 46 | 47 | region = '' 48 | regions = '' 49 | instanceType = '' 50 | instanceTypes = '' 51 | instanceHours = 0 52 | instanceCount = 0 53 | sortCriteria = '' 54 | ebsVolumeType = '' 55 | ebsStorageGbMonth = 0 56 | pIops = 0 57 | dataTransferOutInternetGb = 0 58 | dataTransferOutIntraRegionGb = 0 59 | dataTransferOutInterRegionGb = 0 60 | toRegion = '' 61 | termType = consts.SCRIPT_TERM_TYPE_ON_DEMAND 62 | offeringClass = '' 63 | offeringClasses = consts.SUPPORTED_EMR_OFFERING_CLASSES #only used for Reserved comparisons (standard, convertible) 64 | offeringType = '' 65 | offeringTypes = consts.EC2_SUPPORTED_PURCHASE_OPTIONS #only used for Reserved comparisons (all-upfront, partial-upfront, no-upfront) 66 | years = 1 67 | 68 | if args.region: region = args.region 69 | if args.regions: regions = args.regions 70 | if args.sort_criteria: sortCriteria = args.sort_criteria 71 | if args.instance_type: instanceType = args.instance_type 72 | if args.instance_types: instanceTypes = args.instance_types 73 | if args.instance_hours: instanceHours = int(args.instance_hours) 74 | if args.ebs_volume_type: ebsVolumeType = args.ebs_volume_type 75 | if args.ebs_storage_gb_month: ebsStorageGbMonth = int(args.ebs_storage_gb_month) 76 | if args.piops: pIops = int(args.piops) 77 | if args.data_transfer_out_internet_gb: dataTransferOutInternetGb = int(args.data_transfer_out_internet_gb) 78 | if args.data_transfer_out_intraregion_gb: dataTransferOutIntraRegionGb = int(args.data_transfer_out_intraregion_gb) 79 | if args.data_transfer_out_interregion_gb: dataTransferOutInterRegionGb = int(args.data_transfer_out_interregion_gb) 80 | if args.to_region: toRegion = args.to_region 81 | if args.term_type: termType = args.term_type 82 | if args.offering_class: offeringClass = args.offering_class 83 | if args.offering_classes: offeringClasses = args.offering_classes.split(',') 84 | if args.instance_count: instanceCount = args.instance_count 85 | if args.offering_type: offeringType = args.offering_type 86 | if args.offering_types: offeringTypes = args.offering_types.split(',') 87 | if args.years: years = str(args.years) 88 | 89 | #TODO: Implement comparison between a subset of regions by entering an array of regions to compare 90 | #TODO: Implement a sort by target region (for data transfer) 91 | #TODO: For Reserved pricing, include a payment plan throughout the whole period, and a monthly average and savings 92 | 93 | 94 | try: 95 | 96 | #TODO: not working for EBS Snapshots! 97 | kwargs = {'sortCriteria':sortCriteria, 'instanceType':instanceType, 'instanceTypes':instanceTypes, 98 | 'instanceHours':instanceHours, 'dataTransferOutInternetGb':dataTransferOutInternetGb, 99 | 'ebsVolumeType':ebsVolumeType, 'ebsStorageGbMonth':ebsStorageGbMonth, 'pIops':pIops, 100 | 'dataTransferOutIntraRegionGb':dataTransferOutIntraRegionGb, 'dataTransferOutInterRegionGb':dataTransferOutInterRegionGb, 101 | 'toRegion':toRegion, 'termType':termType, 'instanceCount': instanceCount, 'years': years, 'offeringType':offeringType, 102 | 'offeringClass':offeringClass 103 | } 104 | 105 | if region: kwargs['region'] = region 106 | 107 | if sortCriteria: 108 | if sortCriteria in (consts.SORT_CRITERIA_TERM_TYPE, consts.SORT_CRITERIA_TERM_TYPE_REGION): 109 | if sortCriteria == consts.SORT_CRITERIA_TERM_TYPE_REGION: 110 | #TODO: validate that region list is comma-separated 111 | #TODO: move this list to utils.compare_term_types 112 | if regions: kwargs['regions'] = regions.split(',') 113 | else: kwargs['regions']=consts.SUPPORTED_REGIONS 114 | kwargs['purchaseOptions'] = offeringTypes #purchase options are referred to as offering types in the EC2 API 115 | kwargs['offeringClasses']=offeringClasses 116 | validate (kwargs) 117 | termPricingAnalysis = utils.compare_term_types(service=consts.SERVICE_EMR, **kwargs) 118 | tabularData = termPricingAnalysis.pop('tabularData') 119 | print ("EMR termpPricingAnalysis: [{}]".format(json.dumps(termPricingAnalysis,sort_keys=False, indent=4))) 120 | print("csvData:\n{}\n".format(termPricingAnalysis['csvData'])) 121 | #print("tabularData:\n{}".format(tabularData).replace("reserved","rsv").replace("standard","std"). 122 | # replace("convertible","conv").replace("-upfront","").replace("partial","par").replace("demand","dmd")) 123 | print("\n{}".format(tabularData)) 124 | 125 | else: 126 | validate (kwargs) 127 | pricecomparisons = utils.compare(service=consts.SERVICE_EMR,**kwargs) 128 | print("Price comparisons:[{}]".format(json.dumps(pricecomparisons, indent=4))) 129 | #tabularData = termPricingAnalysis.pop('tabularData') 130 | #print("tabularData:\n{}".format(tabularData)) 131 | 132 | else: 133 | validate (kwargs) 134 | emr_pricing = emrpricing.calculate(data.EmrPriceDimension(**kwargs)) 135 | print(json.dumps(emr_pricing,sort_keys=False,indent=4)) 136 | 137 | 138 | 139 | 140 | 141 | 142 | except NoDataFoundError as ndf: 143 | print ("NoDataFoundError args:[{}]".format(args)) 144 | 145 | except Exception as e: 146 | traceback.print_exc() 147 | print("Exception message:["+str(e)+"]") 148 | 149 | 150 | """ 151 | This function contains validations at the script level. No need to validate EC2 parameters, since 152 | class Ec2PriceDimension already contains a validation function. 153 | """ 154 | def validate (args): 155 | #TODO: add - if termType sort criteria is specified, don't include offeringClass (singular) 156 | #TODO: add - if offeringTypes is included, have at least one valid offeringType (purchase option) 157 | #TODO: move to models 158 | validation_msg = "" 159 | if args.get('sortCriteria','') == consts.SORT_CRITERIA_TERM_TYPE: 160 | if args.get('instanceHours',False): 161 | validation_msg = "instance-hours cannot be set when sort-criteria=term-type" 162 | if args.get('offeringType',False): 163 | validation_msg = "offering-type cannot be set when sort-criteria=term-type - try offering-types (plural) instead" 164 | if not args.get('years',''): 165 | validation_msg = "years cannot be empty" 166 | if args.get('sortCriteria','') == consts.SORT_CRITERIA_TERM_TYPE_REGION: 167 | if not args.get('offeringClasses',''): 168 | validation_msg = "offering-classes cannot be empty" 169 | if not args.get('purchaseOptions',''): 170 | validation_msg = "offering-types cannot be empty" 171 | 172 | if validation_msg: 173 | print("Error: [{}]".format(validation_msg)) 174 | raise ValidationError(validation_msg) 175 | 176 | return 177 | 178 | if __name__ == "__main__": 179 | main(sys.argv[1:]) 180 | -------------------------------------------------------------------------------- /awspricecalculator/common/phelper.py: -------------------------------------------------------------------------------- 1 | 2 | from . import consts 3 | import os, sys 4 | import datetime 5 | import logging 6 | import csv, json 7 | from .models import PricingRecord, PricingResult 8 | from .errors import NoDataFoundError 9 | 10 | 11 | log = logging.getLogger() 12 | log.setLevel(consts.LOG_LEVEL) 13 | 14 | __location__ = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__))) 15 | site_pkgs = os.path.abspath(os.path.join(__location__, os.pardir, os.pardir,"lib", "python3.7", "site-packages" )) 16 | sys.path.append(site_pkgs) 17 | #print "site_pkgs: [{}]".format(site_pkgs) 18 | 19 | import tinydb 20 | 21 | 22 | def get_data_directory(service): 23 | result = os.path.split(__location__)[0] + '/data/' + service + '/' 24 | return result 25 | 26 | 27 | 28 | def getBillableBand(priceDimensions, usageAmount): 29 | billableBand = 0 30 | beginRange = int(priceDimensions['beginRange']) 31 | endRange = priceDimensions['endRange'] 32 | pricePerUnit = priceDimensions['pricePerUnit']['USD'] 33 | if endRange == consts.INFINITY: 34 | if beginRange < usageAmount: 35 | billableBand = usageAmount - beginRange 36 | else: 37 | endRange = int(endRange) 38 | if endRange >= usageAmount and beginRange < usageAmount: 39 | billableBand = usageAmount - beginRange 40 | if endRange < usageAmount: 41 | billableBand = endRange - beginRange 42 | return billableBand 43 | 44 | 45 | def getBillableBandCsv(row, usageAmount): 46 | billableBand = 0 47 | pricePerUnit = 0 48 | amt = 0 49 | 50 | if not row['StartingRange']:beginRange = 0 51 | else: beginRange = int(row['StartingRange']) 52 | if not row['EndingRange']:endRange = consts.INFINITY 53 | else: endRange = row['EndingRange'] 54 | 55 | pricePerUnit = float(row['PricePerUnit']) 56 | if endRange == consts.INFINITY: 57 | if beginRange < usageAmount: 58 | billableBand = usageAmount - beginRange 59 | else: 60 | endRange = int(endRange) 61 | if endRange >= usageAmount and beginRange < usageAmount: 62 | billableBand = usageAmount - beginRange 63 | if endRange < usageAmount: 64 | billableBand = endRange - beginRange 65 | 66 | if billableBand > 0: amt = pricePerUnit * billableBand 67 | 68 | return billableBand, pricePerUnit, amt 69 | 70 | 71 | 72 | #Creates a table with all the SKUs that are part of the total price 73 | def buildSkuTable(evaluated_sku_desc): 74 | result = {} 75 | sorted_descriptions = sorted(evaluated_sku_desc) 76 | result_table_header = "Price | Description | Price Per Unit | Usage | Rate Code" 77 | result_records = "" 78 | total = 0 79 | for s in sorted_descriptions: 80 | result_records = result_records + "$" + str(s[0]) + "|" + str(s[1]) + "|" + str(s[2]) + "|" + str(s[3]) + "|" + s[4]+"\n" 81 | total = total + s[0] 82 | 83 | result['header']=result_table_header 84 | result['records']=result_records 85 | result['total']=total 86 | return result 87 | 88 | 89 | 90 | """ 91 | Calculates the keys that will be used to partition big index files into smaller pieces. 92 | If no term is specified, the function will consider On-Demand and Reserved 93 | """ 94 | #TODO: merge all 3 load balancers into a single file (to speed up DB file loading and number of open files 95 | def get_partition_keys(service, region, term, **extraArgs): 96 | result = [] 97 | if region: 98 | regions = [consts.REGION_MAP[region]] 99 | else: 100 | regions = consts.REGION_MAP.values() 101 | 102 | if term: terms = [consts.TERM_TYPE_MAP[term]] 103 | else: terms = consts.TERM_TYPE_MAP.values() 104 | 105 | #productFamilies = consts.SUPPORTED_PRODUCT_FAMILIES 106 | productFamilies = consts.SUPPORTED_PRODUCT_FAMILIES_BY_SERVICE_DICT[service] 107 | 108 | #EC2 & RDS Reserved 109 | offeringClasses = extraArgs.get('offeringClasses',consts.EC2_OFFERING_CLASS_MAP.values()) 110 | tenancies = extraArgs.get('tenancies',consts.EC2_TENANCY_MAP.values()) 111 | purchaseOptions = extraArgs.get('purchaseOptions',consts.EC2_PURCHASE_OPTION_MAP.values()) 112 | 113 | indexDict = {} 114 | #TODO: filter by service, to speed up file loading and to avoid max open files limit 115 | for r in regions: 116 | for t in terms: 117 | for pf in productFamilies: 118 | #Reserved EC2 & DB instances have more dimensions for index creation 119 | if t == consts.TERM_TYPE_RESERVED: 120 | if pf in consts.SUPPORTED_RESERVED_PRODUCT_FAMILIES: 121 | for oc in offeringClasses: 122 | for ten in tenancies: 123 | for po in purchaseOptions: 124 | result.append(create_file_key((r,t,pf,oc,ten, po))) 125 | else: 126 | #OnDemand EC2 Instances use Tenancy as a dimension for index creation 127 | if service == consts.SERVICE_EC2 and pf == consts.PRODUCT_FAMILY_COMPUTE_INSTANCE: 128 | for ten in tenancies: 129 | result.append(create_file_key((r,t,pf,ten))) 130 | else: 131 | result.append(create_file_key((r,t,pf))) 132 | 133 | return result 134 | 135 | 136 | #Creates a file key that identifies a data partition 137 | def create_file_key(indexDimensions): 138 | result = "" 139 | for d in indexDimensions: result += d 140 | return result.replace(' ','') 141 | 142 | 143 | 144 | def loadDBs(service, indexFiles): 145 | 146 | dBs = {} 147 | datadir = get_data_directory(service) 148 | indexMetadata = getIndexMetadata(service) 149 | 150 | #Files in Lambda can only be created in the /tmp filesystem - If it doesn't exist, create it. 151 | lambdaFileSystem = '/tmp/'+service+'/data' 152 | if not os.path.exists(lambdaFileSystem): 153 | os.makedirs(lambdaFileSystem) 154 | 155 | for i in indexFiles: 156 | db = tinydb.TinyDB(lambdaFileSystem+'/'+i+'.json') 157 | #TODO: remove circular dependency from utils, so I can use the method get_index_file_name 158 | #TODO: initial tests show that is faster (by a few milliseconds) to populate the file from scratch). See if I should load from scratch all the time 159 | #TODO:Create a file that is an index of those files that have been generated, so the code knows which files to look for and avoid creating unnecesary empty .json files 160 | if len(db) == 0: 161 | try: 162 | #with open(datadir+i+'.csv', 'rb') as csvfile: 163 | with open(datadir+i+'.csv', 'r') as csvfile: 164 | pricelist = csv.DictReader(csvfile, delimiter=',', quotechar='"') 165 | db.insert_multiple(pricelist) 166 | #csvfile.close()#avoid " [Errno 24] Too many open files" exception 167 | except IOError: 168 | pass 169 | dBs[i]=db 170 | #db.close()#avoid " [Errno 24] Too many open files" exception 171 | 172 | 173 | return dBs, indexMetadata 174 | 175 | 176 | 177 | def getIndexMetadata(service): 178 | ts = Timestamp() 179 | ts.start('getIndexMetadata') 180 | result = {} 181 | #datadir = get_data_directory(service) 182 | with open(get_data_directory(service)+"index_metadata.json") as index_metadata: 183 | result = json.load(index_metadata) 184 | index_metadata.close() 185 | ts.finish('getIndexMetadata') 186 | log.debug("Time to load indexMetadata: [{}]".format(ts.elapsed('getIndexMetadata'))) 187 | return result 188 | 189 | 190 | def calculate_price(service, db, query, usageAmount, pricingRecords, cost): 191 | ts = Timestamp() 192 | ts.start('tinyDbSeachCalculatePrice') 193 | 194 | resultSet = db.search(query) 195 | 196 | ts.finish('tinyDbSeachCalculatePrice') 197 | log.debug("Time to search {} pricing DB for query [{}] : [{}] ".format(service, query, ts.elapsed('tinyDbSeachCalculatePrice'))) 198 | 199 | if not resultSet: raise NoDataFoundError("Could not find data for service:[{}] - query:[{}]".format(service, query)) 200 | #print("resultSet:[{}]".format(json.dumps(resultSet,indent=4))) 201 | for r in resultSet: 202 | billableUsage, pricePerUnit, amt = getBillableBandCsv(r, usageAmount) 203 | cost = cost + amt 204 | if billableUsage: 205 | #TODO: calculate rounding dynamically - don't set to 4 - use description to set the right rounding 206 | pricing_record = PricingRecord(service,round(amt,4),r['PriceDescription'],pricePerUnit,billableUsage,r['RateCode']) 207 | pricingRecords.append(vars(pricing_record)) 208 | 209 | return pricingRecords, cost 210 | 211 | 212 | 213 | 214 | class Timestamp(): 215 | 216 | def __init__(self): 217 | self.eventdict = {} 218 | 219 | def start(self,event): 220 | self.eventdict[event] = {} 221 | self.eventdict[event]['start'] = datetime.datetime.now() 222 | 223 | def finish(self, event): 224 | #elapsed = datetime.timedelta(self.eventdict[event]['start']) * 1000 #return milliseconds 225 | elapsed = datetime.datetime.now() - self.eventdict[event]['start'] 226 | self.eventdict[event]['elapsed'] = elapsed 227 | return elapsed 228 | 229 | def elapsed(self,event): 230 | return self.eventdict[event]['elapsed'] 231 | 232 | -------------------------------------------------------------------------------- /scripts/get-latest-index.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python 2 | import os, sys, getopt, json, csv, ssl 3 | import urllib.request 4 | 5 | sys.path.insert(0, os.path.abspath('..')) 6 | from awspricecalculator.common import consts as consts 7 | from awspricecalculator.common import phelper as phelper 8 | 9 | 10 | if (not os.environ.get('PYTHONHTTPSVERIFY', '') and 11 | getattr(ssl, '_create_unverified_context', None)): 12 | ssl._create_default_https_context = ssl._create_unverified_context 13 | 14 | 15 | __location__ = os.path.dirname(os.path.realpath(__file__)) 16 | dataindexpath = os.path.join(os.path.split(__location__)[0],"awspricecalculator", "data") 17 | 18 | """ 19 | This script gets the latest index files from the AWS Price List API. 20 | """ 21 | #TODO: add support for term-type = onDemand, Reserved or both 22 | def main(argv): 23 | 24 | 25 | SUPPORTED_SERVICES = (consts.SERVICE_S3, consts.SERVICE_EC2, consts.SERVICE_RDS, consts.SERVICE_LAMBDA, 26 | consts.SERVICE_DYNAMODB, consts.SERVICE_KINESIS, consts.SERVICE_DATA_TRANSFER, consts.SERVICE_EMR, 27 | consts.SERVICE_REDSHIFT, consts.SERVICE_ALL) 28 | SUPPORTED_FORMATS = ('json','csv') 29 | OFFER_INDEX_URL = 'https://pricing.us-east-1.amazonaws.com/offers/v1.0/aws/{serviceIndex}/current/index.' 30 | 31 | 32 | service = '' 33 | format = '' 34 | region = '' 35 | tenancy = '' 36 | 37 | help_message = 'Script usage: \nget-latest-index.py --service= --format=' 38 | 39 | try: 40 | opts, args = getopt.getopt(argv,"hr:s:f:t",["region=","service=","format=","tenancy="]) 41 | print ('opts: ' + str(opts)) 42 | except getopt.GetoptError: 43 | print (help_message) 44 | sys.exit(2) 45 | 46 | for opt in opts: 47 | if opt[0] == '-h': 48 | print (help_message) 49 | sys.exit() 50 | if opt[0] in ("-s","--service"): 51 | service = opt[1] 52 | if opt[0] in ("-f","--format"): 53 | format = opt[1] 54 | if opt[0] in ("-r","--region"): 55 | region = opt[1] 56 | if opt[0] in ("-t","--tenancy"): #comma-separated tenancies (host, dedicated, shared) 57 | tenancy= opt[1].split(',') 58 | 59 | 60 | if not format: format = 'csv' 61 | 62 | validation_ok = True 63 | 64 | 65 | if service not in SUPPORTED_SERVICES: 66 | validation_ok = False 67 | if format not in SUPPORTED_FORMATS: 68 | validation_ok = False 69 | 70 | if not validation_ok: 71 | print (help_message) 72 | sys.exit(2) 73 | 74 | services = [] 75 | if service == 'all': services = SUPPORTED_SERVICES 76 | else: services = [service] 77 | 78 | term = '' #all terms 79 | 80 | extraArgs = {} 81 | if tenancy: extraArgs['tenancies']=[tenancy] 82 | else: extraArgs['tenancies']=consts.EC2_TENANCY_MAP.keys() 83 | 84 | 85 | for s in services: 86 | if s != 'all': 87 | offerIndexUrl = OFFER_INDEX_URL.replace('{serviceIndex}',consts.SERVICE_INDEX_MAP[s]) + format 88 | print ('Downloading offerIndexUrl:['+offerIndexUrl+']...') 89 | 90 | servicedatapath = dataindexpath + "/" + s 91 | print ("servicedatapath:[{}]".format(servicedatapath)) 92 | 93 | if not os.path.exists(servicedatapath): os.mkdir(servicedatapath) 94 | filename = servicedatapath+"/index."+format 95 | 96 | with open(filename, "wb") as f: f.write(urllib.request.urlopen(offerIndexUrl).read()) 97 | 98 | if format == 'csv': 99 | remove_metadata(filename) 100 | split_index(s, region, term, **extraArgs) 101 | 102 | 103 | """ 104 | The first rows in the PriceList index.csv are metadata. 105 | This method removes the metadata from the index files and writes it in a separate .json file, 106 | so the metadata can be accessed by other modules. For example, the PriceList Version is returned 107 | in every price calculation. 108 | """ 109 | 110 | def remove_metadata(index_filename): 111 | print ("Removing metadata from file [{}]".format(index_filename)) 112 | metadata_filename = index_filename.replace('.csv','_metadata.json') 113 | metadata_dict = {} 114 | with open(index_filename,"r") as rf: 115 | lines = rf.readlines() 116 | with open(index_filename,"w") as wf: 117 | i = 0 118 | for l in lines: 119 | #The first 5 records in the CSV file are metadata 120 | if i <= 4: 121 | config_record = l.replace('","','"|"').strip("\n").split("|") 122 | metadata_dict[config_record[0].strip('\"')] = config_record[1].strip('\"') 123 | else: 124 | wf.write(l) 125 | i += 1 126 | with open(metadata_filename,"w") as mf: 127 | print ("Creating metadata file [{}]".format(metadata_filename)) 128 | metadata_json = json.dumps(metadata_dict,sort_keys=False,indent=4) 129 | print ("metadata_json: [{}]".format(metadata_json)) 130 | mf.write(metadata_json) 131 | 132 | """ 133 | Some index files are too large. For example, the one for EC2 has more than 460K records. 134 | In order to make price lookup more efficient, awspricecalculator splits the 135 | index based on a combination of region, term type and product family. Each partition 136 | has a key, which is used by tinydb to load smaller files as databases that can be 137 | queried. This increases performance significantly. 138 | 139 | """ 140 | 141 | def split_index(service, region, term, **args): 142 | #Split index format: region -> term type -> product family 143 | indexDict = {}#contains the keys of the files that will be created 144 | productFamilies = {} 145 | usageGroupings=[] 146 | partition_keys = phelper.get_partition_keys(service, region, term, **args)#All regions and all term types (On-Demand + Reserved) 147 | #print("partition_keys:[{}]".format(partition_keys)) 148 | for pk in partition_keys: 149 | indexDict[pk]=[] 150 | 151 | fieldnames = [] 152 | 153 | #with open(get_index_file_name(service, 'index', 'csv'), 'rb') as csvfile: 154 | with open(get_index_file_name(service, 'index', 'csv'), 'r') as csvfile: 155 | pricelist = csv.DictReader(csvfile, delimiter=',', quotechar='"') 156 | indexRegion = '' 157 | x = 0 158 | for row in pricelist: 159 | indexKey = '' 160 | if x==0: fieldnames=row.keys() 161 | if row.get('Location Type','') == 'AWS Region': 162 | indexRegion = row['Location'] 163 | if row.get('Product Family','')== consts.PRODUCT_FAMILY_DATA_TRANSFER: 164 | indexRegion = row['From Location'] 165 | 166 | #Determine the index partition the current row belongs to and append it to the corresponding array 167 | if row.get('TermType','') == consts.TERM_TYPE_RESERVED: 168 | #TODO:move the creation of the index dimensions to a common function 169 | if service == consts.SERVICE_EC2: 170 | indexDimensions = (indexRegion,row['TermType'],row['Product Family'],row['OfferingClass'],row['Tenancy'], row['PurchaseOption']) 171 | elif service in (consts.SERVICE_RDS, consts.SERVICE_REDSHIFT):#'Tenancy' is not part of the RDS/Redshift index, therefore default it to Shared 172 | indexDimensions = (indexRegion,row['TermType'],row['Product Family'],row['OfferingClass'],row.get('Tenancy',consts.EC2_TENANCY_SHARED),row['PurchaseOption']) 173 | else: 174 | if service == consts.SERVICE_EC2: 175 | indexDimensions = (indexRegion,row['TermType'],row['Product Family'],row['Tenancy']) 176 | else: 177 | indexDimensions = (indexRegion,row['TermType'],row['Product Family']) 178 | 179 | #print ("TermType:[{}] - service:[{}] - indexDimensions:[{}]".format(row.get('TermType',''), service, indexDimensions)) 180 | 181 | indexKey = phelper.create_file_key(indexDimensions) 182 | if indexKey in indexDict: 183 | indexDict[indexKey].append(remove_fields(service, row)) 184 | 185 | #Get a list of distinct product families in the index file 186 | productFamily = row['Product Family'] 187 | if productFamily not in productFamilies: 188 | productFamilies[productFamily] = [] 189 | usageGroup = row.get('Group','') 190 | if usageGroup not in productFamilies[productFamily]: 191 | productFamilies[productFamily].append(usageGroup) 192 | 193 | x += 1 194 | if x % 1000 == 0: print("Processed row [{}]".format(x)) 195 | 196 | print ("productFamilies:{}".format(productFamilies)) 197 | 198 | i = 0 199 | #Create csv files based on the partitions that were calculated when scanning the main index.csv file 200 | for f in indexDict.keys(): 201 | if indexDict[f]: 202 | i += 1 203 | print ("Writing file for key: [{}]".format(f)) 204 | with open(get_index_file_name(service, f, 'csv'),'w') as csvfile: 205 | writer = csv.DictWriter(csvfile, fieldnames=fieldnames, dialect='excel', quoting=csv.QUOTE_ALL) 206 | writer.writeheader() 207 | for r in indexDict[f]: 208 | writer.writerow(r) 209 | 210 | print ("Number of records in main index file: [{}]".format(x)) 211 | print ("Number of files written: [{}]".format(i)) 212 | return 213 | 214 | 215 | def get_index_file_name(service, name, format): 216 | result = '../awspricecalculator/data/'+service+'/'+name+'.'+format 217 | return result 218 | 219 | 220 | """ 221 | This method removes unnecessary fields from each row in the index file. This is necessary since large index files 222 | become a problem when they're too large and result in Lambda functions exceeding package size or in slower warm-up 223 | times for Lambda. 224 | """ 225 | 226 | def remove_fields(service, row): 227 | #don't exclude: 'Product Family', 'operation' (used by ELB) 228 | EXCLUDE_FIELD_DICT = { 229 | consts.SERVICE_EC2:['Location Type', 'Storage', 'Location', 'Memory', 'Physical Processor', 230 | 'Dedicated EBS Throughput', 'Processor Features', 'ECU', 'serviceName', 'Network Performance', 231 | 'Instance Family', 'Current Generation', 232 | 'serviceCode','TermType','Tenancy','OfferingClass','PurchaseOption' #these fields are implicit in the DB file name, therefore they're not necessary in the file itself 233 | ] 234 | } 235 | 236 | for f in EXCLUDE_FIELD_DICT.get(service, []): 237 | row.pop(f,'') 238 | 239 | return row 240 | 241 | 242 | 243 | #TODO: remove consolidated index.csv file after it has been split into smaller files 244 | 245 | 246 | 247 | 248 | 249 | 250 | if __name__ == "__main__": 251 | main(sys.argv[1:]) 252 | -------------------------------------------------------------------------------- /awspricecalculator/rds/pricing.py: -------------------------------------------------------------------------------- 1 | import os, sys 2 | import json 3 | import logging 4 | from ..common import consts, phelper, utils 5 | from ..common.models import PricingResult 6 | import tinydb 7 | 8 | log = logging.getLogger() 9 | regiondbs = {} 10 | indexMetadata = {} 11 | 12 | 13 | def calculate(pdim): 14 | ts = phelper.Timestamp() 15 | ts.start('totalCalculation') 16 | ts.start('tinyDbLoadOnDemand') 17 | ts.start('tinyDbLoadReserved') 18 | 19 | global regiondbs 20 | global indexMetadata 21 | 22 | 23 | log.info("Calculating RDS pricing with the following inputs: {}".format(str(pdim.__dict__))) 24 | 25 | #Load On-Demand DBs 26 | dbs, indexMetadata = phelper.loadDBs(consts.SERVICE_RDS, phelper.get_partition_keys(consts.SERVICE_RDS, pdim.region, consts.SCRIPT_TERM_TYPE_ON_DEMAND)) 27 | cost = 0 28 | pricing_records = [] 29 | 30 | awsPriceListApiVersion = indexMetadata['Version'] 31 | priceQuery = tinydb.Query() 32 | 33 | 34 | 35 | skuEngine = '' 36 | skuEngineEdition = '' 37 | skuLicenseModel = '' 38 | 39 | if pdim.engine in consts.RDS_ENGINE_MAP: 40 | skuEngine = consts.RDS_ENGINE_MAP[pdim.engine]['engine'] 41 | skuEngineEdition = consts.RDS_ENGINE_MAP[pdim.engine]['edition'] 42 | skuLicenseModel = consts.RDS_LICENSE_MODEL_MAP[pdim.licenseModel] 43 | 44 | deploymentOptionCondition = pdim.deploymentOption 45 | 46 | #'Multi-AZ (SQL Server Mirror)' is no longer available in pricing index 47 | #if 'sqlserver' in pdim.engine and pdim.deploymentOption == consts.RDS_DEPLOYMENT_OPTION_MULTI_AZ: 48 | # deploymentOptionCondition = consts.RDS_DEPLOYMENT_OPTION_MULTI_AZ_MIRROR 49 | 50 | #DBs for Data Transfer 51 | tmpDtDbKey = consts.SERVICE_DATA_TRANSFER+pdim.region+pdim.termType 52 | dtdbs = regiondbs.get(tmpDtDbKey,{}) 53 | if not dtdbs: 54 | dtdbs, dtIndexMetadata = phelper.loadDBs(consts.SERVICE_DATA_TRANSFER, phelper.get_partition_keys(consts.SERVICE_DATA_TRANSFER, pdim.region, consts.SCRIPT_TERM_TYPE_ON_DEMAND, **{})) 55 | regiondbs[tmpDtDbKey]=dtdbs 56 | 57 | 58 | #_/_/_/_/_/ ON-DEMAND PRICING _/_/_/_/_/ 59 | if pdim.termType == consts.SCRIPT_TERM_TYPE_ON_DEMAND: 60 | #Load On-Demand DBs 61 | dbs = regiondbs.get(consts.SERVICE_RDS+pdim.region+pdim.termType,{}) 62 | if not dbs: 63 | dbs, indexMetadata = phelper.loadDBs(consts.SERVICE_RDS, phelper.get_partition_keys(consts.SERVICE_RDS, pdim.region, consts.SCRIPT_TERM_TYPE_ON_DEMAND)) 64 | regiondbs[consts.SERVICE_RDS+pdim.region+pdim.termType]=dbs 65 | 66 | ts.finish('tinyDbLoadOnDemand') 67 | log.debug("Time to load OnDemand DB files: [{}]".format(ts.elapsed('tinyDbLoadOnDemand'))) 68 | 69 | #DB Instance 70 | if pdim.instanceHours: 71 | instanceDb = dbs[phelper.create_file_key((consts.REGION_MAP[pdim.region], consts.TERM_TYPE_MAP[pdim.termType], consts.PRODUCT_FAMILY_DATABASE_INSTANCE))] 72 | 73 | 74 | ts.start('tinyDbSearchComputeFile') 75 | query = ((priceQuery['Product Family'] == consts.PRODUCT_FAMILY_DATABASE_INSTANCE) & 76 | (priceQuery['Instance Type'] == pdim.dbInstanceClass) & 77 | (priceQuery['Database Engine'] == skuEngine) & 78 | (priceQuery['Database Edition'] == skuEngineEdition) & 79 | (priceQuery['License Model'] == skuLicenseModel) & 80 | (priceQuery['Deployment Option'] == deploymentOptionCondition) 81 | ) 82 | 83 | log.debug("Time to search DB instance compute:[{}]".format(ts.finish('tinyDbSearchComputeFile'))) 84 | pricing_records, cost = phelper.calculate_price(consts.SERVICE_RDS, instanceDb, query, pdim.instanceHours, pricing_records, cost) 85 | 86 | #Data Transfer 87 | #To internet 88 | if pdim.dataTransferOutInternetGb: 89 | dataTransferDb = dtdbs[phelper.create_file_key((consts.REGION_MAP[pdim.region], consts.TERM_TYPE_MAP[pdim.termType], consts.PRODUCT_FAMILY_DATA_TRANSFER))] 90 | query = ((priceQuery['serviceCode'] == consts.SERVICE_CODE_AWS_DATA_TRANSFER) & 91 | (priceQuery['To Location'] == 'External') & 92 | (priceQuery['Transfer Type'] == 'AWS Outbound')) 93 | 94 | pricing_records, cost = phelper.calculate_price(consts.SERVICE_DATA_TRANSFER, dataTransferDb, query, pdim.dataTransferOutInternetGb, pricing_records, cost) 95 | 96 | 97 | #Inter-regional data transfer - to other AWS regions 98 | if pdim.dataTransferOutInterRegionGb: 99 | dataTransferDb = dtdbs[phelper.create_file_key((consts.REGION_MAP[pdim.region], consts.TERM_TYPE_MAP[pdim.termType], consts.PRODUCT_FAMILY_DATA_TRANSFER))] 100 | query = ((priceQuery['serviceCode'] == consts.SERVICE_CODE_AWS_DATA_TRANSFER) & 101 | (priceQuery['To Location'] == consts.REGION_MAP[pdim.toRegion]) & 102 | (priceQuery['Transfer Type'] == 'InterRegion Outbound')) 103 | 104 | pricing_records, cost = phelper.calculate_price(consts.SERVICE_DATA_TRANSFER, dataTransferDb, query, pdim.dataTransferOutInterRegionGb, pricing_records, cost) 105 | 106 | #Storage (magnetic, SSD, PIOPS) 107 | if pdim.storageGbMonth: 108 | engineCondition = 'Any' 109 | if skuEngine == consts.RDS_DB_ENGINE_SQL_SERVER: engineCondition = consts.RDS_DB_ENGINE_SQL_SERVER 110 | storageDb = dbs[phelper.create_file_key([consts.REGION_MAP[pdim.region], consts.TERM_TYPE_MAP[pdim.termType], consts.PRODUCT_FAMILY_DB_STORAGE])] 111 | query = ((priceQuery['Volume Type'] == pdim.volumeType) & 112 | (priceQuery['Database Engine'] == engineCondition) & 113 | (priceQuery['Deployment Option'] == pdim.deploymentOption)) 114 | 115 | pricing_records, cost = phelper.calculate_price(consts.SERVICE_RDS, storageDb, query, pdim.storageGbMonth, pricing_records, cost) 116 | 117 | #Provisioned IOPS 118 | if pdim.storageType == consts.SCRIPT_RDS_STORAGE_TYPE_IO1: 119 | iopsDb = dbs[phelper.create_file_key([consts.REGION_MAP[pdim.region], consts.TERM_TYPE_MAP[pdim.termType], consts.PRODUCT_FAMILY_DB_PIOPS])] 120 | query = ((priceQuery['Deployment Option'] == pdim.deploymentOption)) 121 | pricing_records, cost = phelper.calculate_price(consts.SERVICE_RDS, iopsDb, query, pdim.iops, pricing_records, cost) 122 | 123 | #Consumed IOPS (I/O rate) 124 | if pdim.ioRequests: 125 | sysopsDb = dbs[phelper.create_file_key([consts.REGION_MAP[pdim.region], consts.TERM_TYPE_MAP[pdim.termType], consts.PRODUCT_FAMILY_SYSTEM_OPERATION])] 126 | dbEngineCondition = 'Any' 127 | if pdim.engine in (consts.RDS_DB_ENGINE_POSTGRESQL, consts.RDS_DB_ENGINE_AURORA_MYSQL): 128 | dbEngineCondition = pdim.engine 129 | 130 | query = ((priceQuery['Group'] == 'Aurora I/O Operation')& 131 | (priceQuery['Database Engine'] == dbEngineCondition) 132 | ) 133 | pricing_records, cost = phelper.calculate_price(consts.SERVICE_RDS, sysopsDb, query, pdim.ioRequests, pricing_records, cost) 134 | 135 | 136 | #Snapshot Storage 137 | if pdim.backupStorageGbMonth: 138 | snapshotDb = dbs[phelper.create_file_key([consts.REGION_MAP[pdim.region], consts.TERM_TYPE_MAP[pdim.termType], consts.PRODUCT_FAMILY_SNAPSHOT])] 139 | query = ((priceQuery['usageType'] == 'RDS:ChargedBackupUsage')) 140 | pricing_records, cost = phelper.calculate_price(consts.SERVICE_RDS, snapshotDb, query, pdim.backupStorageGbMonth, pricing_records, cost) 141 | 142 | 143 | 144 | 145 | #_/_/_/_/_/ RESERVED PRICING _/_/_/_/_/ 146 | if pdim.termType == consts.SCRIPT_TERM_TYPE_RESERVED: 147 | #Load Reserved DBs 148 | 149 | indexArgs = {'offeringClasses':consts.EC2_OFFERING_CLASS_MAP.values(), 150 | 'tenancies':[consts.EC2_TENANCY_SHARED], 'purchaseOptions':consts.EC2_PURCHASE_OPTION_MAP.values()} 151 | 152 | dbs = regiondbs.get(consts.SERVICE_RDS+pdim.region+pdim.termType,{}) 153 | if not dbs: 154 | dbs, indexMetadata = phelper.loadDBs(consts.SERVICE_RDS, phelper.get_partition_keys(consts.SERVICE_RDS, pdim.region, consts.SCRIPT_TERM_TYPE_RESERVED, **indexArgs)) 155 | regiondbs[consts.SERVICE_RDS+pdim.region+pdim.termType]=dbs 156 | ts.finish('tinyDbLoadReserved') 157 | log.debug("Time to load Reserved DB files: [{}]".format(ts.elapsed('tinyDbLoadReserved'))) 158 | log.debug("regiondbs keys:[{}]".format(regiondbs)) 159 | 160 | #DB Instance 161 | #RDS only supports standard 162 | instanceDb = dbs[phelper.create_file_key((consts.REGION_MAP[pdim.region], consts.TERM_TYPE_MAP[pdim.termType], 163 | consts.PRODUCT_FAMILY_DATABASE_INSTANCE, consts.EC2_OFFERING_CLASS_STANDARD, 164 | consts.EC2_TENANCY_SHARED, consts.EC2_PURCHASE_OPTION_MAP[pdim.offeringType]))] 165 | 166 | 167 | ts.start('tinyDbSearchComputeFileReserved') 168 | query = ((priceQuery['Product Family'] == consts.PRODUCT_FAMILY_DATABASE_INSTANCE) & 169 | (priceQuery['Instance Type'] == pdim.dbInstanceClass) & 170 | (priceQuery['Database Engine'] == skuEngine) & 171 | (priceQuery['Database Edition'] == skuEngineEdition) & 172 | (priceQuery['License Model'] == skuLicenseModel) & 173 | (priceQuery['Deployment Option'] == deploymentOptionCondition) & 174 | (priceQuery['OfferingClass'] == consts.EC2_OFFERING_CLASS_MAP[pdim.offeringClass]) & 175 | (priceQuery['PurchaseOption'] == consts.EC2_PURCHASE_OPTION_MAP[pdim.offeringType]) & 176 | (priceQuery['LeaseContractLength'] == consts.EC2_RESERVED_YEAR_MAP["{}".format(pdim.years)]) 177 | ) 178 | 179 | hrsQuery = query & (priceQuery['Unit'] == 'Hrs' ) 180 | qtyQuery = query & (priceQuery['Unit'] == 'Quantity' ) 181 | 182 | #TODO: use RDS-specific constants, not EC2 constants 183 | if pdim.offeringType in (consts.SCRIPT_EC2_PURCHASE_OPTION_ALL_UPFRONT, consts.SCRIPT_EC2_PURCHASE_OPTION_PARTIAL_UPFRONT): 184 | pricing_records, cost = phelper.calculate_price(consts.SERVICE_RDS, instanceDb, qtyQuery, pdim.instanceCount, pricing_records, cost) 185 | 186 | if pdim.offeringType in (consts.SCRIPT_EC2_PURCHASE_OPTION_NO_UPFRONT, consts.SCRIPT_EC2_PURCHASE_OPTION_PARTIAL_UPFRONT): 187 | reservedInstanceHours = utils.calculate_instance_hours_year(pdim.instanceCount, pdim.years) 188 | pricing_records, cost = phelper.calculate_price(consts.SERVICE_RDS, instanceDb, hrsQuery, reservedInstanceHours, pricing_records, cost) 189 | 190 | log.debug("Time to search DB instance compute:[{}]".format(ts.finish('tinyDbSearchComputeFileReserved'))) 191 | 192 | 193 | 194 | 195 | 196 | log.debug("Total time to calculate price: [{}]".format(ts.finish('totalCalculation'))) 197 | extraargs = {'priceDimensions':pdim} 198 | pricing_result = PricingResult(awsPriceListApiVersion, pdim.region, cost, pricing_records, **extraargs) 199 | log.debug(json.dumps(vars(pricing_result),sort_keys=False,indent=4)) 200 | return pricing_result.__dict__ 201 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | 2 | 3 | ## Concurrency Labs - AWS Price Calculator tool 4 | 5 | This repository uses the AWS Price List API to implement price calculation utilities. 6 | 7 | Supported services: 8 | * EC2 9 | * ELB 10 | * EBS 11 | * RDS 12 | * Lambda 13 | * Dynamo DB 14 | * Kinesis 15 | 16 | Visit the following URLs for more details: 17 | 18 | https://www.concurrencylabs.com/blog/aws-pricing-lambda-realtime-calculation-function/ 19 | https://www.concurrencylabs.com/blog/aws-lambda-cost-optimization-tools/ 20 | https://www.concurrencylabs.com/blog/calculate-near-realtime-pricing-serverless-applications/ 21 | 22 | The code is structured in the following way: 23 | 24 | **awspricecalculator**. The modules in this package search data within the AWS Price List API index files. 25 | They take price dimension parameters as inputs and return results in JSON format. This package 26 | is called by Lambda functions or other Python scripts. 27 | 28 | **functions**. This is where our Lambda functions live. Functions are packaged using the Serverless framework. 29 | 30 | **scripts**. Here are some Python scripts to help with management and price optimizations. See README.md in the scripts 31 | folder for more details. 32 | 33 | 34 | ## Available Lambda functions: 35 | 36 | ### calculate-near-realtime 37 | This function is called by a schedule configured using CloudWatch Events. 38 | The function receives a JSON object configured in the schedule. The JSON object supports the following format: 39 | 40 | Tag-based: ```{"tag":{"key":"mykey","value":"myvalue"}}```. 41 | The function finds resources with the corresponding tag, gets current usage using CloudWatch metrics, 42 | projects usage into a longer time period (a month), calls pricecalculator to calculate price 43 | and puts results in CloudWatch metrics under the namespace ```ConcurrencyLabs/Pricing/NearRealTimeForecast```. 44 | Supported services are EC2, EBS, ELB, RDS and Lambda. Not all price dimensions are supported for all services, though. 45 | 46 | You can configure as many CloudWatch Events as you want, each one with a different tag. 47 | 48 | 49 | **Rules:** 50 | * The function only considers for price calculation those resources that are tagged. For example, if there is an untagged ELB 51 | with tagged EC2 instances, the function will only consider the EC2 instances for the calculation. 52 | If there is a tagged ELB with untagged EC2 instances, the function will only calculate price 53 | for the ELB. 54 | * The behavior described above is intended for simplicity, otherwise the function would have to 55 | cover a number of combinations that might or might not be suitable to all users of the function. 56 | * To keep it simple, if you want a resource to be included in the calculation, then tag it. Otherwise 57 | leave it untagged. 58 | 59 | 60 | **Limitations** 61 | The function doesn't support cost estimations for the following: 62 | * EC2 data transfer for instances not registered to an ELB 63 | * EC2 Reserved Instances 64 | * EBS Snapshots 65 | * RDS data trasfer 66 | * Lambda data transfer 67 | * Kinesis PUT Payload Units are partially calculated based on CloudWatch metrics (there's no 100% accuracy for this price dimension) 68 | * Dynamo DB storage 69 | * Dynamo DB data transfer 70 | 71 | 72 | 73 | 74 | ## Install - using CloudFormation (recommended) 75 | 76 | 77 | I created a CloudFormation template that deploys the Lambda function, as well as the CloudWatch Events 78 | schedule. All you have to do is specify the tag key and value you want to calculate pricing for. 79 | For example: TagKey:stack, TagValue:mywebapp 80 | 81 | Click here to get started: 82 | 83 | Launch Stack 84 | 85 | 86 | ### Metrics 87 | 88 | The function publishes a metric named `EstimatedCharges` to CloudWatch, under namespace `ConcurrencyLabs/Pricing/NearRealTimeForecast` and it uses 89 | the following dimensions: 90 | 91 | * Currency: USD 92 | * ForecastPeriod: monthly 93 | * ServiceName: ec2, rds, lambda, kinesis, dynamodb 94 | * Tag: mykey=myvalue 95 | 96 | 97 | 98 | 99 | 100 | ### Updating to the latest version using CloudFormation 101 | 102 | This function will be updated regularly in order to fix bugs, update AWS Price List data and also to add more functionality. 103 | This means you will likely have to update the function at some point. I recommend installing 104 | the function using the CloudFormation template, since it will simplify the update process. 105 | 106 | To update the function, just go to the CloudFormation console, select the stack you've created 107 | and click on Actions -> Update Stack: 108 | 109 | ![Update CF stack](https://www.concurrencylabs.com/img/posts/11-ec2-pricing-lambda/update-stack.png) 110 | 111 | 112 | Then select "Specify an Amazon S3 template URL" and enter the following value: 113 | 114 | 115 | ``` 116 | http://concurrencylabs-cfn-templates.s3.amazonaws.com/lambda-near-realtime-pricing/function-plus-schedule.json 117 | ``` 118 | 119 | ![Select template](https://www.concurrencylabs.com/img/posts/11-ec2-pricing-lambda/update-function-select-template.png) 120 | 121 | And that's it. CloudFormation will update the function with the latest code. 122 | 123 | 124 | 125 | ## Install Locally (if you want to modify it) - Manual steps 126 | 127 | If you only want to install the Lambda function, you don't need to follow the steps below, just follow 128 | the instructions in the "Install - Using CloudFormation" section above. 129 | 130 | If you want to setup a dev environment, run a local copy, make some modifications and then install in your AWS account, then keep reading... 131 | 132 | 133 | ### Clone the repo 134 | 135 | ``` 136 | git clone https://github.com/concurrencylabs/aws-pricing-tools aws-pricing-tools 137 | ``` 138 | 139 | 140 | ### (Optional, but recommended) Create an isolated Python environment using virtualenv 141 | 142 | It's always a good practice to create an isolated environment so we have greater control over 143 | the dependencies in our project, including the Python runtime. 144 | 145 | If you don't have virtualenv installed, run: 146 | 147 | ``` 148 | pip install virtualenv 149 | ``` 150 | 151 | For more details on virtualenv, click here. 152 | 153 | Now, create a Python 2.7 virtual environment in the location where you cloned the repo into. If you want to name your project 154 | aws-pricing-tools, then run (one level up from the dir, use the same local name you used when you cloned 155 | the repo): 156 | 157 | ``` 158 | virtualenv aws-pricing-tools -p python2.7 159 | ``` 160 | 161 | After your environment is created, it's time to activate it. Go to the recently created 162 | folder of your project (i.e. aws-pricing-tools) and from there run: 163 | 164 | ``` 165 | source bin/activate 166 | ``` 167 | 168 | 169 | ### Install Requirements 170 | 171 | From your project root folder, run: 172 | 173 | ``` 174 | ./install.sh 175 | ``` 176 | 177 | This will install the following dependencies to the ```vendored``` directory: 178 | 179 | * **tinydb** - The code in this repo queries the Price List API csv records using the tinydb library. 180 | * **numpy** - Used for statistics in the lambda optimization script 181 | 182 | ... and the following dependencies in your default site-packages location: 183 | 184 | * **python-local-lambda** - lets me test my Lambda functions locally using test events in my workstation. 185 | * **boto3** - AWS Python SDK to call AWS APIs. 186 | 187 | 188 | ### Install the Serverless Framework 189 | 190 | ![ServerlessLogo](https://www.concurrencylabs.com/img/posts/11-ec2-pricing-lambda/serverless_logo.png) 191 | 192 | 193 | Since the pricing tool runs on AWS Lambda, I decided to use the Serverless Framework. 194 | This framework enormously simplifies the development, configuration and deployment of Function as a Service (a.k.a. FaaS, or "serverless") 195 | code into AWS Lambda. 196 | 197 | 198 | You should follow the instructions described here, 199 | which can be summarized in the following steps: 200 | 201 | 1. Make sure you have Node.js installed in your workstation. 202 | ``` 203 | node --version 204 | ``` 205 | 206 | 2. Install the Serverless Framework 207 | ``` 208 | npm install -g serverless 209 | ``` 210 | 211 | 212 | 3. Confirm Serverless has been installed 213 | ``` 214 | serverless --version 215 | ``` 216 | The steps in this post were tested using version ```1.6.1``` 217 | 218 | 219 | 4. Serverless needs access to your AWS account, so it can create and update AWS Lambda 220 | functions, among other operations. Therefore, you have to make sure Serverless can access 221 | a set of IAM User credentials. Follow these instructions. 222 | In the long term, you should make sure these credentials are limited to only the API operations 223 | Serverless requires - avoid Administrator access, which is a bad security and operational practice. 224 | 225 | 226 | 5. Checkout the code from this repo into your virtualenv folder. 227 | 228 | 229 | ### Set environment variables 230 | 231 | ``` 232 | export AWS_DEFAULT_PROFILE= 233 | export AWS_DEFAULT_REGION= 234 | ``` 235 | 236 | 237 | ### How to test the function locally 238 | 239 | 240 | **Download the latest AWS Price List API Index file** 241 | 242 | The code needs a local copy of the the AWS Price List API index file. 243 | The GitHub repo doesn't come with the index file, therefore you have to 244 | download it the first time you run your code and every time AWS publishes a new 245 | Price List API index. 246 | 247 | Also, this index file is constantly updated by AWS. I recommend subscribing to the AWS Price List API 248 | change notifications. 249 | 250 | In order to download the latest index file, go to the ```scripts```` folder and run: 251 | 252 | ``` 253 | python get-latest-index.py --service=all 254 | ``` 255 | 256 | The script takes a few seconds to execute since some index files are a little heavy (like the EC2 one). 257 | 258 | **Run a test** 259 | 260 | Once you have the virtualenv activated, all dependencies installed, environment 261 | variables set and the latest AWS Price List index file, it's time to run a test. 262 | 263 | Update ```test/events/constant-tag.json``` with a tag key/value pair that exists in your AWS account. 264 | 265 | 266 | Then run, from the **root** location in the local repo and replace and with actual values: 267 | 268 | ``` 269 | python-lambda-local functions/calculate-near-realtime.py test/events/constant-tag.json -l lib/ -l . -f handler -t 30 -a arn:aws:lambda:::function:calculate-near-realtime 270 | ``` 271 | 272 | 273 | ### Deploy the Serverless Project 274 | 275 | From your project root folder, run: 276 | 277 | ``` 278 | serverless deploy 279 | ``` 280 | -------------------------------------------------------------------------------- /awspricecalculator/ec2/pricing.py: -------------------------------------------------------------------------------- 1 | 2 | import json 3 | import logging 4 | from ..common import consts, phelper, utils 5 | from ..common.models import PricingResult 6 | #import psutil 7 | import tinydb 8 | 9 | log = logging.getLogger() 10 | regiondbs = {} 11 | indexMetadata = {} 12 | 13 | 14 | def calculate(pdim): 15 | 16 | log.info("Calculating EC2 pricing with the following inputs: {}".format(str(pdim.__dict__))) 17 | 18 | ts = phelper.Timestamp() 19 | ts.start('totalCalculation') 20 | ts.start('tinyDbLoadOnDemand') 21 | ts.start('tinyDbLoadReserved') 22 | 23 | awsPriceListApiVersion = '' 24 | cost = 0 25 | pricing_records = [] 26 | priceQuery = tinydb.Query() 27 | 28 | global regiondbs 29 | global indexMetadata 30 | 31 | 32 | #DBs for Data Transfer 33 | tmpDtDbKey = consts.SERVICE_DATA_TRANSFER+pdim.region+pdim.termType 34 | dtdbs = regiondbs.get(tmpDtDbKey,{}) 35 | if not dtdbs: 36 | dtdbs, dtIndexMetadata = phelper.loadDBs(consts.SERVICE_DATA_TRANSFER, phelper.get_partition_keys(consts.SERVICE_DATA_TRANSFER, pdim.region, consts.SCRIPT_TERM_TYPE_ON_DEMAND, **{})) 37 | regiondbs[tmpDtDbKey]=dtdbs 38 | 39 | #_/_/_/_/_/ ON-DEMAND PRICING _/_/_/_/_/ 40 | if pdim.termType == consts.SCRIPT_TERM_TYPE_ON_DEMAND: 41 | #Load On-Demand DBs 42 | indexArgs = {'tenancies':[consts.EC2_TENANCY_MAP[pdim.tenancy]]} 43 | tmpDbKey = consts.SERVICE_EC2+pdim.region+pdim.termType+pdim.tenancy 44 | 45 | dbs = regiondbs.get(tmpDbKey,{}) 46 | if not dbs: 47 | dbs, indexMetadata = phelper.loadDBs(consts.SERVICE_EC2, phelper.get_partition_keys(consts.SERVICE_EC2, pdim.region, consts.SCRIPT_TERM_TYPE_ON_DEMAND, **indexArgs)) 48 | regiondbs[tmpDbKey]=dbs 49 | 50 | ts.finish('tinyDbLoadOnDemand') 51 | log.debug("Time to load OnDemand DB files: [{}]".format(ts.elapsed('tinyDbLoadOnDemand'))) 52 | 53 | #TODO: Move common operations to a common module, and leave only EC2-specific operations in ec2/pricing.py (create a class) 54 | #TODO: support all tenancy types (Host and Dedicated) 55 | #Compute Instance 56 | if pdim.instanceHours: 57 | dbFileKey = phelper.create_file_key((consts.REGION_MAP[pdim.region], consts.TERM_TYPE_MAP[pdim.termType], 58 | consts.PRODUCT_FAMILY_COMPUTE_INSTANCE, consts.EC2_TENANCY_MAP[pdim.tenancy])) 59 | log.debug('DB File key: [{}]'.format(dbFileKey)) 60 | computeDb = dbs[dbFileKey] 61 | ts.start('tinyDbSearchComputeFile') 62 | query = ((priceQuery['Instance Type'] == pdim.instanceType) & 63 | (priceQuery['Operating System'] == consts.EC2_OPERATING_SYSTEMS_MAP[pdim.operatingSystem]) & 64 | #(priceQuery['Tenancy'] == consts.EC2_TENANCY_SHARED) & #removed since it's redundant with the file name 65 | (priceQuery['Pre Installed S/W'] == pdim.preInstalledSoftware) & 66 | (priceQuery['CapacityStatus'] == consts.EC2_CAPACITY_RESERVATION_STATUS_MAP[pdim.capacityReservationStatus]) & 67 | (priceQuery['License Model'] == consts.EC2_LICENSE_MODEL_MAP[pdim.licenseModel]))# & 68 | #(priceQuery['OfferingClass'] == pdim.offeringClass) & 69 | #(priceQuery['PurchaseOption'] == purchaseOption )) 70 | 71 | pricing_records, cost = phelper.calculate_price(consts.SERVICE_EC2, computeDb, query, pdim.instanceHours, pricing_records, cost) 72 | log.debug("Time to search compute:[{}]".format(ts.finish('tinyDbSearchComputeFile'))) 73 | 74 | 75 | #Data Transfer 76 | dataTransferDb = dtdbs[phelper.create_file_key((consts.REGION_MAP[pdim.region], consts.TERM_TYPE_MAP[pdim.termType], consts.PRODUCT_FAMILY_DATA_TRANSFER))] 77 | 78 | #Out to the Internet 79 | if pdim.dataTransferOutInternetGb: 80 | ts.start('searchDataTransfer') 81 | query = ((priceQuery['To Location'] == 'External') & (priceQuery['Transfer Type'] == 'AWS Outbound')) 82 | pricing_records, cost = phelper.calculate_price(consts.SERVICE_DATA_TRANSFER, dataTransferDb, query, pdim.dataTransferOutInternetGb, pricing_records, cost) 83 | log.debug("Time to search AWS Data Transfer Out: [{}]".format(ts.finish('searchDataTransfer'))) 84 | 85 | #Intra-regional data transfer - in/out/between EC2 AZs or using EIPs or ELB 86 | if pdim.dataTransferOutIntraRegionGb: 87 | query = ((priceQuery['Transfer Type'] == 'IntraRegion')) 88 | pricing_records, cost = phelper.calculate_price(consts.SERVICE_DATA_TRANSFER, dataTransferDb, query, pdim.dataTransferOutIntraRegionGb, pricing_records, cost) 89 | 90 | 91 | #Inter-regional data transfer - out to other AWS regions 92 | if pdim.dataTransferOutInterRegionGb: 93 | query = ((priceQuery['Transfer Type'] == 'InterRegion Outbound') & (priceQuery['To Location'] == consts.REGION_MAP[pdim.toRegion])) 94 | pricing_records, cost = phelper.calculate_price(consts.SERVICE_DATA_TRANSFER, dataTransferDb, query, pdim.dataTransferOutInterRegionGb, pricing_records, cost) 95 | 96 | 97 | #EBS Storage 98 | if pdim.ebsStorageGbMonth: 99 | #storageDb = dbs[phelper.create_file_key(consts.REGION_MAP[pdim.region], consts.TERM_TYPE_MAP[pdim.termType], consts.PRODUCT_FAMILY_STORAGE)] 100 | storageDb = dbs[phelper.create_file_key((consts.REGION_MAP[pdim.region], consts.TERM_TYPE_MAP[pdim.termType], consts.PRODUCT_FAMILY_STORAGE))] 101 | query = ((priceQuery['Volume Type'] == pdim.volumeType)) 102 | pricing_records, cost = phelper.calculate_price(consts.SERVICE_EBS, storageDb, query, pdim.ebsStorageGbMonth, pricing_records, cost) 103 | 104 | 105 | #System Operation (pIOPS) 106 | if pdim.volumeType == consts.EBS_VOLUME_TYPE_PIOPS and pdim.pIops: 107 | #storageDb = dbs[phelper.create_file_key(consts.REGION_MAP[pdim.region], consts.TERM_TYPE_MAP[pdim.termType], consts.PRODUCT_FAMILY_SYSTEM_OPERATION)] 108 | storageDb = dbs[phelper.create_file_key((consts.REGION_MAP[pdim.region], consts.TERM_TYPE_MAP[pdim.termType], consts.PRODUCT_FAMILY_SYSTEM_OPERATION))] 109 | query = ((priceQuery['Group'] == 'EBS IOPS')) 110 | pricing_records, cost = phelper.calculate_price(consts.SERVICE_EBS, storageDb, query, pdim.pIops, pricing_records, cost) 111 | 112 | #Snapshot Storage 113 | if pdim.ebsSnapshotGbMonth: 114 | #snapshotDb = dbs[phelper.create_file_key(consts.REGION_MAP[pdim.region], consts.TERM_TYPE_MAP[pdim.termType], consts.PRODUCT_FAMILY_SNAPSHOT)] 115 | snapshotDb = dbs[phelper.create_file_key((consts.REGION_MAP[pdim.region], consts.TERM_TYPE_MAP[pdim.termType], consts.PRODUCT_FAMILY_SNAPSHOT))] 116 | query = ((priceQuery['usageType'] == consts.REGION_PREFIX_MAP[pdim.region]+'EBS:SnapshotUsage'))#EBS:SnapshotUsage comes with a prefix in the PriceList API file (i.e. EU-EBS:SnapshotUsage) 117 | pricing_records, cost = phelper.calculate_price(consts.SERVICE_EBS, snapshotDb, query, pdim.ebsSnapshotGbMonth, pricing_records, cost) 118 | 119 | #Classic Load Balancer 120 | if pdim.elbHours: 121 | #elbDb = dbs[phelper.create_file_key(consts.REGION_MAP[pdim.region], consts.TERM_TYPE_MAP[pdim.termType], consts.PRODUCT_FAMILY_LOAD_BALANCER)] 122 | elbDb = dbs[phelper.create_file_key((consts.REGION_MAP[pdim.region], consts.TERM_TYPE_MAP[pdim.termType], consts.PRODUCT_FAMILY_LOAD_BALANCER))] 123 | query = ((priceQuery['usageType'] == consts.REGION_PREFIX_MAP[pdim.region]+'LoadBalancerUsage') & (priceQuery['operation'] == 'LoadBalancing')) 124 | pricing_records, cost = phelper.calculate_price(consts.SERVICE_ELB, elbDb, query, pdim.elbHours, pricing_records, cost) 125 | 126 | if pdim.elbDataProcessedGb: 127 | #elbDb = dbs[phelper.create_file_key(consts.REGION_MAP[pdim.region], consts.TERM_TYPE_MAP[pdim.termType], consts.PRODUCT_FAMILY_LOAD_BALANCER)] 128 | elbDb = dbs[phelper.create_file_key((consts.REGION_MAP[pdim.region], consts.TERM_TYPE_MAP[pdim.termType], consts.PRODUCT_FAMILY_LOAD_BALANCER))] 129 | query = ((priceQuery['usageType'] == consts.REGION_PREFIX_MAP[pdim.region]+'DataProcessing-Bytes') & (priceQuery['operation'] == 'LoadBalancing')) 130 | pricing_records, cost = phelper.calculate_price(consts.SERVICE_ELB, elbDb, query, pdim.elbDataProcessedGb, pricing_records, cost) 131 | 132 | #Application Load Balancer 133 | #TODO: add support for Network Load Balancer 134 | if pdim.albHours: 135 | albDb = dbs[phelper.create_file_key((consts.REGION_MAP[pdim.region], consts.TERM_TYPE_MAP[pdim.termType], consts.PRODUCT_FAMILY_APPLICATION_LOAD_BALANCER))] 136 | query = ((priceQuery['usageType'] == consts.REGION_PREFIX_MAP[pdim.region]+'LoadBalancerUsage') & (priceQuery['operation'] == 'LoadBalancing:Application')) 137 | pricing_records, cost = phelper.calculate_price(consts.SERVICE_ELB, albDb, query, pdim.albHours, pricing_records, cost) 138 | 139 | if pdim.albLcus: 140 | albDb = dbs[phelper.create_file_key((consts.REGION_MAP[pdim.region], consts.TERM_TYPE_MAP[pdim.termType], consts.PRODUCT_FAMILY_APPLICATION_LOAD_BALANCER))] 141 | query = ((priceQuery['usageType'] == consts.REGION_PREFIX_MAP[pdim.region]+'LCUUsage') & (priceQuery['operation'] == 'LoadBalancing:Application')) 142 | pricing_records, cost = phelper.calculate_price(consts.SERVICE_ELB, albDb, query, pdim.albLcus, pricing_records, cost) 143 | 144 | 145 | 146 | #TODO: EIP 147 | #TODO: NAT Gateway 148 | #TODO: Fee 149 | 150 | #_/_/_/_/_/ RESERVED PRICING _/_/_/_/_/ 151 | #Load Reserved DBs 152 | if pdim.termType == consts.SCRIPT_TERM_TYPE_RESERVED: 153 | indexArgs = {'offeringClasses':[consts.EC2_OFFERING_CLASS_MAP[pdim.offeringClass]], 154 | 'tenancies':[consts.EC2_TENANCY_MAP[pdim.tenancy]], 'purchaseOptions':[consts.EC2_PURCHASE_OPTION_MAP[pdim.offeringType]]} 155 | #Load all values for offeringClasses, tenancies and purchaseOptions 156 | #indexArgs = {'offeringClasses':consts.EC2_OFFERING_CLASS_MAP.values(), 157 | # 'tenancies':consts.EC2_TENANCY_MAP.values(), 'purchaseOptions':consts.EC2_PURCHASE_OPTION_MAP.values()} 158 | tmpDbKey = consts.SERVICE_EC2+pdim.region+pdim.termType+pdim.offeringClass+consts.EC2_TENANCY_MAP[pdim.tenancy]+pdim.offeringType 159 | #tmpDbKey = consts.SERVICE_EC2+pdim.region+pdim.termType 160 | dbs = regiondbs.get(tmpDbKey,{}) 161 | if not dbs: 162 | dbs, indexMetadata = phelper.loadDBs(consts.SERVICE_EC2, phelper.get_partition_keys(consts.SERVICE_EC2, pdim.region, consts.SCRIPT_TERM_TYPE_RESERVED, **indexArgs)) 163 | #regiondbs[consts.SERVICE_EC2+pdim.region+pdim.termType]=dbs 164 | regiondbs[tmpDbKey]=dbs 165 | 166 | log.debug("dbs keys:{}".format(dbs.keys())) 167 | 168 | ts.finish('tinyDbLoadReserved') 169 | log.debug("Time to load Reserved DB files: [{}]".format(ts.elapsed('tinyDbLoadReserved'))) 170 | 171 | 172 | computeDb = dbs[phelper.create_file_key((consts.REGION_MAP[pdim.region], consts.TERM_TYPE_MAP[pdim.termType], 173 | consts.PRODUCT_FAMILY_COMPUTE_INSTANCE, pdim.offeringClass, 174 | consts.EC2_TENANCY_MAP[pdim.tenancy], consts.EC2_PURCHASE_OPTION_MAP[pdim.offeringType]))] 175 | 176 | ts.start('tinyDbSearchComputeFileReserved') 177 | query = ((priceQuery['Instance Type'] == pdim.instanceType) & 178 | (priceQuery['Operating System'] == consts.EC2_OPERATING_SYSTEMS_MAP[pdim.operatingSystem]) & 179 | #(priceQuery['Tenancy'] == consts.EC2_TENANCY_SHARED) & #removed since it's redundant with the DB file name 180 | (priceQuery['Pre Installed S/W'] == pdim.preInstalledSoftware) & 181 | (priceQuery['License Model'] == consts.EC2_LICENSE_MODEL_MAP[pdim.licenseModel]) & 182 | #(priceQuery['OfferingClass'] == consts.EC2_OFFERING_CLASS_MAP[pdim.offeringClass]) & 183 | #(priceQuery['PurchaseOption'] == consts.EC2_PURCHASE_OPTION_MAP[pdim.offeringType] ) & 184 | (priceQuery['LeaseContractLength'] == consts.EC2_RESERVED_YEAR_MAP["{}".format(pdim.years)] )) 185 | 186 | hrsQuery = query & (priceQuery['Unit'] == 'Hrs' ) 187 | qtyQuery = query & (priceQuery['Unit'] == 'Quantity' ) 188 | 189 | if pdim.offeringType in (consts.SCRIPT_EC2_PURCHASE_OPTION_ALL_UPFRONT, consts.SCRIPT_EC2_PURCHASE_OPTION_PARTIAL_UPFRONT): 190 | pricing_records, cost = phelper.calculate_price(consts.SERVICE_EC2, computeDb, qtyQuery, pdim.instanceCount, pricing_records, cost) 191 | 192 | if pdim.offeringType in (consts.SCRIPT_EC2_PURCHASE_OPTION_NO_UPFRONT, consts.SCRIPT_EC2_PURCHASE_OPTION_PARTIAL_UPFRONT): 193 | #reservedInstanceHours = pdim.instanceCount * consts.HOURS_IN_MONTH * 12 * pdim.years 194 | reservedInstanceHours = utils.calculate_instance_hours_year(pdim.instanceCount, pdim.years) 195 | pricing_records, cost = phelper.calculate_price(consts.SERVICE_EC2, computeDb, hrsQuery, reservedInstanceHours, pricing_records, cost) 196 | 197 | 198 | log.debug("Time to search:[{}]".format(ts.finish('tinyDbSearchComputeFileReserved'))) 199 | 200 | 201 | log.debug("regiondbs:[{}]".format(regiondbs.keys())) 202 | awsPriceListApiVersion = indexMetadata['Version'] 203 | extraargs = {'priceDimensions':pdim} 204 | pricing_result = PricingResult(awsPriceListApiVersion, pdim.region, cost, pricing_records, **extraargs) 205 | log.debug(json.dumps(vars(pricing_result),sort_keys=False,indent=4)) 206 | 207 | #proc = psutil.Process() 208 | #log.debug("open_files: {}".format(proc.open_files())) 209 | 210 | log.debug("Total time: [{}]".format(ts.finish('totalCalculation'))) 211 | return pricing_result.__dict__ 212 | -------------------------------------------------------------------------------- /scripts/lambda-optimization.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python 2 | import sys, os, json, time 3 | import argparse 4 | import traceback 5 | import boto3 6 | from botocore.exceptions import ClientError 7 | import math 8 | sys.path.insert(0, os.path.abspath('..')) 9 | sys.path.insert(0, os.path.abspath('../vendored')) 10 | 11 | import numpy 12 | 13 | import awspricecalculator.awslambda.pricing as lambdapricing 14 | import awspricecalculator.common.models as data 15 | import awspricecalculator.common.consts as consts 16 | import awspricecalculator.common.errors as errors 17 | 18 | logsclient = boto3.client('logs') 19 | lambdaclient = boto3.client('lambda') 20 | 21 | 22 | MONTHLY = "MONTHLY" 23 | MS_MAP = {MONTHLY:(3.6E6)*720} 24 | 25 | 26 | 27 | 28 | def main(argv): 29 | 30 | region = os.environ['AWS_DEFAULT_REGION'] 31 | 32 | parser = argparse.ArgumentParser() 33 | parser.add_argument('--function', help='', required=True) 34 | parser.add_argument('--minutes', help='', required=True) 35 | 36 | if len(sys.argv) == 1: 37 | parser.print_help() 38 | sys.exit(1) 39 | args = parser.parse_args() 40 | 41 | function = '' 42 | minutes = 0 #in minutes 43 | 44 | 45 | if args.function: function = args.function 46 | if args.minutes: minutes = int(args.minutes) 47 | 48 | try: 49 | validate(function, minutes) 50 | except errors.ValidationError as error: 51 | print(error.message) 52 | sys.exit(1) 53 | 54 | mem_used_array = [] 55 | duration_array = [] 56 | prov_mem_size = 0 57 | firstEventTs = 0 58 | lastEventTs = 0 59 | ts_format = "%Y-%m-%d %H:%M:%S UTC" 60 | log_group_name = '/aws/lambda/'+function 61 | 62 | try: 63 | i = 0 64 | windowStartTime = (int(time.time()) - minutes * 60) * 1000 65 | firstEventTs = windowStartTime #temporary value, it will be updated once (if) we get results from the CW Logs get_log_events API 66 | lastEventTs = int(time.time() * 1000) #this will also be updated once (if) we get results from the CW Logs get_log_events API 67 | nextLogstreamToken = True 68 | logstreamsargs = {'logGroupName':log_group_name, 'orderBy':'LastEventTime', 'descending':True} 69 | while nextLogstreamToken: 70 | logstreams = logsclient.describe_log_streams(**logstreamsargs) 71 | """ 72 | Read through CW Logs entries and extract information from them. 73 | We're interested in entries that look like this: 74 | REPORT RequestId: 7686bf2c-2f79-11e7-b693-97868a5db36b Duration: 5793.53 ms Billed Duration: 5800 ms Memory Size: 448 MB Max Memory Used: 24 MB 75 | """ 76 | 77 | if 'logStreams' in logstreams: 78 | print("Number of logstreams found:[{}]".format(len(logstreams['logStreams']))) 79 | 80 | nextLogstreamToken = logstreams.get('nextToken',False) 81 | if nextLogstreamToken: logstreamsargs['nextToken']=nextLogstreamToken 82 | else:logstreamsargs.pop('nextToken',False) 83 | 84 | #Go through all logstreams in descending order 85 | for ls in logstreams['logStreams']: 86 | nextEventsForwardToken = True 87 | logeventsargs = {'logGroupName':log_group_name, 'logStreamName':ls['logStreamName'], 88 | 'startFromHead':True, 'startTime':windowStartTime} 89 | while nextEventsForwardToken: 90 | logevents = logsclient.get_log_events(**logeventsargs) 91 | if 'events' in logevents: 92 | if len(logevents['events']): 93 | print ("\nEvents for logGroup:[{}] - logstream:[{}] - nextForwardToken:[{}]".format(log_group_name, ls['logStreamName'],nextEventsForwardToken)) 94 | for e in logevents['events']: 95 | #Extract lambda execution duration and memory utilization from "REPORT" log events 96 | if 'REPORT RequestId:' in e['message']: 97 | mem_used = e['message'].split('Max Memory Used: ')[1].split()[0] 98 | mem_used_array.append(int(mem_used)) 99 | duration = e['message'].split('Billed Duration: ')[1].split()[0] 100 | duration_array.append(int(duration)) 101 | if i == 0: 102 | prov_mem_size = int(e['message'].split('Memory Size: ')[1].split()[0]) 103 | firstEventTs = e['timestamp'] 104 | lastEventTs = e['timestamp'] 105 | else: 106 | if e['timestamp'] < firstEventTs: firstEventTs = e['timestamp'] 107 | if e['timestamp'] > lastEventTs: lastEventTs = e['timestamp'] 108 | print ("mem_used:[{}] - mem_size:[{}] - timestampMs:[{}] - timestamp:[{}]".format(mem_used,prov_mem_size, e['timestamp'], time.strftime(ts_format, time.gmtime(e['timestamp']/1000)))) 109 | print e['message'] 110 | i += 1 111 | else: break 112 | 113 | nextEventsForwardToken = logevents.get('nextForwardToken',False) 114 | if nextEventsForwardToken: logeventsargs['nextToken']=nextEventsForwardToken 115 | else: logeventsargs.pop('nextToken',False) 116 | 117 | 118 | 119 | #Once we've iterated through all log streams and log events, calculate averages, cost and optimization scenarios 120 | avg_used_mem = 0 121 | avg_duration_ms = 0 122 | p90_duration_ms = 0 123 | p99_duration_ms = 0 124 | p100_duration_ms = 0 125 | 126 | if mem_used_array: avg_used_mem = math.ceil(numpy.average(mem_used_array)) 127 | if duration_array: 128 | avg_duration_ms = round(math.ceil(numpy.average(duration_array)),0) 129 | p90_duration_ms = round(math.ceil(numpy.percentile(duration_array, 90)),0) 130 | p99_duration_ms = round(math.ceil(numpy.percentile(duration_array, 99)),0) 131 | p100_duration_ms = round(math.ceil(numpy.percentile(duration_array, 100)),0) 132 | base_usage = LambdaSampleUsage(region, i, avg_duration_ms, avg_used_mem, prov_mem_size, firstEventTs, lastEventTs, MONTHLY) 133 | memoptims= [] 134 | durationoptims = [] 135 | current_cost = 0 136 | 137 | for m in get_lower_possible_memory_ranges(avg_used_mem, prov_mem_size): 138 | #TODO: add target memory % utilization (i.e. I want to use 60% of memory and see how much that'll save me) 139 | memoptims.append(LambdaUtilScenario(base_usage, base_usage.avgDurationMs, m).__dict__) 140 | 141 | 142 | for d in get_lower_possible_durations(avg_duration_ms, 100): 143 | durationoptims.append(LambdaUtilScenario(base_usage, d, base_usage.memSizeMb).__dict__) 144 | 145 | 146 | optim_info = {"sampleUsage":base_usage.__dict__, 147 | "memoryOptimizationScenarios":memoptims, 148 | "durationOptimizationScenarios":durationoptims 149 | } 150 | #print(json.dumps(optim_info,sort_keys=False,indent=4)) 151 | 152 | print ("avg_duration_ms:[{}] avg_used_mem:[{}] prov_mem_size:[{}] records:[{}]".format(avg_duration_ms, avg_used_mem,prov_mem_size,i)) 153 | print ("p90_duration_ms:[{}] p99_duration_ms:[{}] p100_duration_ms:[{}]".format(p90_duration_ms, p99_duration_ms, p100_duration_ms)) 154 | 155 | print ("------------------------------------------------------------------------------------") 156 | print ("OPTIMIZATION SUMMARY\n") 157 | print ("**Data sample used for calculation:**") 158 | print ("CloudWatch Log Group: [{}]\n" \ 159 | "First Event time:[{}]\n" \ 160 | "Last Event time:[{}]\n" \ 161 | "Number of executions:[{}]\n" \ 162 | "Average executions per second:[{}]".\ 163 | format(log_group_name, 164 | time.strftime(ts_format, time.gmtime(base_usage.startTs/1000)), 165 | time.strftime(ts_format, time.gmtime(base_usage.endTs/1000)), 166 | base_usage.requestCount, base_usage.avgTps)) 167 | print ("\n**Usage for Lambda function [{}] in the sample period is the following:**".format(function)) 168 | print ("Average duration per Lambda execution: {}ms\n" \ 169 | "Average consumed memory per execution: {}MB\n" \ 170 | "Configured memory in your Lambda function: {}MB\n" \ 171 | "Memory utilization (used/allocated): {}%\n" \ 172 | "Total projected cost: ${}USD - {}".\ 173 | format(base_usage.avgDurationMs, base_usage.avgMemUsedMb,base_usage.memSizeMb, 174 | base_usage.memUsedPct,base_usage.projectedCost, base_usage.projectedPeriod)) 175 | 176 | if memoptims: 177 | print ("\nThe following Lambda memory configurations could save you money (assuming constant execution time)") 178 | labels = ['memSizeMb', 'memUsedPct', 'cost', 'timePeriod', 'savingsAmt'] 179 | print ("\n"+ResultsTable(memoptims,labels).dict2md()) 180 | if durationoptims: 181 | print ("\n\nCan you make your function execute faster? The following Lambda execution durations will save you money (assuming memory allocation remains constant):") 182 | labels = ['durationMs', 'cost', 'timePeriod', 'savingsAmt'] 183 | print ("\n"+ResultsTable(durationoptims,labels).dict2md()) 184 | print ("------------------------------------------------------------------------------------") 185 | 186 | 187 | 188 | except Exception as e: 189 | traceback.print_exc() 190 | print("Exception message:["+str(e.message)+"]") 191 | 192 | 193 | """ 194 | Get the possible Lambda memory configurations values that would: 195 | - Be lower than the current provisioned value (thus, cheaper) 196 | - Are greater than the current average used memory (therefore won't result in memory errors) 197 | """ 198 | 199 | def get_lower_possible_memory_ranges(usedMem, provMem): 200 | result = [] 201 | for m in consts.LAMBDA_MEM_SIZES: 202 | if usedMem < float(m) and m < provMem: 203 | result.append(m) 204 | return result 205 | 206 | 207 | """ 208 | Get the possible Lambda execution values that would: 209 | - Be lower than the current average duration (thus, cheaper) 210 | - Are greater than a lower limit set by the user of the script (there's only so much one can do to make a function run faster) 211 | """ 212 | def get_lower_possible_durations(usedDurationMs, lowestDuration): 213 | result = [] 214 | initBilledDurationMs = math.floor(usedDurationMs / 100) * 100 215 | d = int(initBilledDurationMs) 216 | while d >= lowestDuration: 217 | result.append(d) 218 | d -= 100 219 | return result 220 | 221 | 222 | 223 | #TODO:Move to a different file 224 | #This class models the usage for a Lambda function within a time window defined by startTs and endTs 225 | class LambdaSampleUsage(): 226 | def __init__(self, region, requestCount, avgDurationMs, avgMemUsedMb, memSizeMb, startTs, endTs, projectedPeriod): 227 | 228 | self.region = region 229 | 230 | self.requestCount = 0 231 | if requestCount: self.requestCount = requestCount 232 | 233 | self.avgDurationMs = 0 234 | if avgDurationMs: self.avgDurationMs = int(avgDurationMs) 235 | 236 | self.avgMemUsedMb = int(avgMemUsedMb) 237 | self.memSizeMb = memSizeMb 238 | 239 | self.memUsedPct = 0.00 240 | if memSizeMb: self.memUsedPct = round(float(100 * avgMemUsedMb/memSizeMb),2) 241 | 242 | self.startTs = startTs 243 | self.endTs = endTs 244 | self.elapsedMs = endTs - startTs 245 | 246 | self.avgTps = 0 247 | if self.elapsedMs: 248 | self.avgTps = round((1000 * float(self.requestCount) / float(self.elapsedMs)),4) 249 | 250 | self.projectedPeriod = projectedPeriod 251 | self.projectedRequestCount = self.get_projected_request_count(requestCount) 252 | 253 | args = {'region':region, 'requestCount':self.projectedRequestCount, 254 | 'avgDurationMs':math.ceil(avgDurationMs/100)*100, 'memoryMb':memSizeMb} 255 | print ("args: {}".format(args)) 256 | self.projectedCost = lambdapricing.calculate(data.LambdaPriceDimension(**args))['totalCost'] 257 | 258 | 259 | def get_projected_request_count(self,requestCount): 260 | result = 0 261 | print ("elapsed_ms:[{}] - period: [{}]".format(self.elapsedMs, self.projectedPeriod)) 262 | if self.elapsedMs: 263 | result = float(requestCount)*(MS_MAP[self.projectedPeriod]/self.elapsedMs) 264 | return int(result) 265 | 266 | 267 | 268 | """ 269 | This class represents usage scenarios that will be modeled and displayed as possibilities, so the user can decide 270 | if they're good options (or not) . 271 | """ 272 | class LambdaUtilScenario(): 273 | def __init__(self, base_usage, proposedDurationMs, proposedMemSizeMb): 274 | 275 | self.memSizeMb = proposedMemSizeMb 276 | 277 | self.memUsedPct = 0 278 | self.memUsedPct = float(100 * base_usage.avgMemUsedMb/proposedMemSizeMb) 279 | 280 | self.durationMs = proposedDurationMs 281 | 282 | args = {'region':base_usage.region, 'requestCount':base_usage.projectedRequestCount, 283 | 'avgDurationMs':self.durationMs, 'memoryMb':proposedMemSizeMb} 284 | self.cost= lambdapricing.calculate(data.LambdaPriceDimension(**args))['totalCost'] 285 | self.savingsAmt = round((base_usage.projectedCost - self.cost),2) 286 | 287 | self.savingsPct = 0.00 288 | if base_usage.projectedCost: 289 | self.savingsPct = round((100 * self.savingsAmt / base_usage.projectedCost),2) 290 | self.timePeriod = MONTHLY 291 | 292 | def get_next_mem(self, memUsedMb): 293 | result = 0 294 | for m in consts.LAMBDA_MEM_SIZES: 295 | if memUsedMb <= float(m): 296 | result = m 297 | break 298 | return result 299 | 300 | 301 | 302 | 303 | 304 | """ 305 | This class takes an array of dictionary objects, so they can be converted to table format using Markdown syntax. 306 | It also takes an optional array of labels, if you want to limit the output to a subset of keys in each dictionary. 307 | The output is something like this: 308 | 309 | 310 | key1| key2| key3 311 | ---| ---| --- 312 | 01| 02| 03 313 | 04| 05| 06 314 | 07| 08| 09 315 | 316 | 317 | """ 318 | 319 | class ResultsTable(): 320 | def __init__(self, records,labels): 321 | self.records = records 322 | self.labels = [] 323 | if labels: 324 | self.labels = labels 325 | 326 | 327 | #Converts an array of dictionaries to Markdown format. 328 | 329 | def dict2md(self): 330 | result = "" 331 | keys = [] 332 | if self.labels: 333 | keys = self.labels 334 | else: 335 | if self.records: keys = self.records[0].keys() 336 | 337 | rc = 0 #rowcount 338 | mdrow = "" #markdown headers row 339 | self.records.insert(0,[])#insert dummy record at the beginning, since first record in loop is used to create row headers 340 | for r in self.records: 341 | #if rc==0: keys = r.keys() 342 | cc = 0 #column count 343 | for k in keys: 344 | cc += 1 345 | if rc==0: 346 | result += k 347 | mdrow += self.addpadding(k,'---') 348 | else: 349 | result += self.addpadding(k,r[k]) 350 | if cc == len(keys): 351 | result += "\n" 352 | mdrow += "\n" 353 | else: 354 | result += "|" 355 | mdrow += "|" 356 | if rc==0: result += mdrow 357 | rc += 1 358 | return result 359 | 360 | 361 | 362 | """ 363 | def dict2md(self): 364 | result = "" 365 | keys = [] 366 | if self.labels: 367 | keys = self.labels 368 | else: 369 | if self.records: keys = self.records[0].keys() 370 | 371 | rc = 0 #rowcount 372 | mdrow = "" #markdown headers row 373 | for r in self.records: 374 | #if rc==0: keys = r.keys() 375 | cc = 0 #column count 376 | for k in keys: 377 | cc += 1 378 | if rc==0: 379 | result += k 380 | mdrow += self.addpadding(k,'---') 381 | else: 382 | result += self.addpadding(k,r[k]) 383 | if cc == len(keys): 384 | result += "\n" 385 | mdrow += "\n" 386 | else: 387 | result += "|" 388 | mdrow += "|" 389 | if rc==0: result += mdrow 390 | rc += 1 391 | return result 392 | """ 393 | 394 | 395 | def addpadding(self,label,value): 396 | padding = "" 397 | i = 0 398 | while i < (len(label)-len(str(value))): 399 | padding += " " 400 | i += 1 401 | return padding + str(value) 402 | 403 | 404 | def validate(function, minutes): 405 | validation_ok = True 406 | validation_message = "\nValidationError:\n" 407 | 408 | try: 409 | lambdaclient.get_function(FunctionName=function) 410 | except ClientError as e: 411 | if e.response['Error']['Code'] == 'ResourceNotFoundException': 412 | validation_message += "Function [{}] does not exist, please enter a valid Lambda function\n".format(function) 413 | else: 414 | validation_message += "Boto3 client error when calling lambda.get_function" 415 | validation_ok = False 416 | 417 | if minutes <1: 418 | validation_message += "Minutes must be greater than 0\n" 419 | validation_ok = False 420 | 421 | if not validation_ok: 422 | raise errors.ValidationError(validation_message) 423 | 424 | return validation_ok 425 | 426 | 427 | 428 | if __name__ == "__main__": 429 | main(sys.argv[1:]) 430 | -------------------------------------------------------------------------------- /awspricecalculator/common/utils.py: -------------------------------------------------------------------------------- 1 | import json, logging 2 | from . import consts, models 3 | import datetime 4 | 5 | from ..ec2 import pricing as ec2pricing 6 | from ..s3 import pricing as s3pricing 7 | from ..rds import pricing as rdspricing 8 | from ..emr import pricing as emrpricing 9 | from ..redshift import pricing as redshiftpricing 10 | from ..awslambda import pricing as lambdapricing 11 | from ..dynamodb import pricing as ddbpricing 12 | from ..kinesis import pricing as kinesispricing 13 | from . errors import NoDataFoundError 14 | 15 | log = logging.getLogger() 16 | 17 | #Creates a table with all the SKUs that are part of the total price 18 | def buildSkuTable(evaluated_sku_desc): 19 | result = {} 20 | sorted_descriptions = sorted(evaluated_sku_desc) 21 | result_table_header = "Price | Description | Price Per Unit | Usage | Rate Code" 22 | result_records = "" 23 | total = 0 24 | for s in sorted_descriptions: 25 | result_records = result_records + "$" + str(s[0]) + "|" + str(s[1]) + "|" + str(s[2]) + "|" + str(s[3]) + "|" + s[4]+"\n" 26 | total = total + s[0] 27 | 28 | result['header']=result_table_header 29 | result['records']=result_records 30 | result['total']=total 31 | return result 32 | 33 | 34 | #It calculates price based on a variable price dimension. For example: by region, os, instance type, etc. 35 | #TODO:include sortCriteria in the parameters for this function, instead of having it in kwargs (which are meant for priceDimensions only) 36 | def compare(**kwargs): 37 | service = kwargs['service'] 38 | sortCriteria = kwargs['sortCriteria'] 39 | result = [] 40 | cheapest_price = 0 41 | criteria_array = () 42 | kwargs_key = "" 43 | origkwargs = kwargs #we'll keep track of the original paramaters 44 | scenarioArray = [] 45 | 46 | 47 | 48 | #Sort by AWS Region - Total Cost and To-region (for sorting by destination - find which regions are cheaper for backups) 49 | if sortCriteria in [consts.SORT_CRITERIA_REGION, consts.SORT_CRITERIA_TO_REGION]: 50 | tableCriteriaHeader = "Sorted by total cost by region\nRegion code\tRegion name\t" 51 | if sortCriteria == consts.SORT_CRITERIA_TO_REGION: 52 | tableCriteriaHeader = "Sorted by data transfer destination from region ["+kwargs['region']+"] to other regions\nTo-Region code\tTo-Region name\t" 53 | 54 | for r in consts.SUPPORTED_REGIONS: 55 | kwargs = dict(origkwargs) #revert to original parameters at the beginning of each loop 56 | if sortCriteria == consts.SORT_CRITERIA_TO_REGION: 57 | kwargs['toRegion']=r 58 | else: 59 | kwargs['region']=r 60 | 61 | #avoid a situation where source and origin destinations are the same for dataTransferOutInterRegionGb 62 | if kwargs.get('dataTransferOutInterRegionGb',0) > 0 and kwargs['region'] == kwargs['toRegion']: 63 | kwargs.pop('dataTransferOutInterRegionGb',0) 64 | 65 | try: 66 | if service == consts.SERVICE_EC2: 67 | p = ec2pricing.calculate(models.Ec2PriceDimension(**kwargs)) 68 | if service == consts.SERVICE_EMR: 69 | p = emrpricing.calculate(models.EmrPriceDimension(**kwargs)) 70 | if service == consts.SERVICE_REDSHIFT: 71 | p = redshiftpricing.calculate(models.RedshiftPriceDimension(**kwargs)) 72 | if service == consts.SERVICE_S3: 73 | p = s3pricing.calculate(models.S3PriceDimension(**kwargs)) 74 | if service == consts.SERVICE_RDS: 75 | p = rdspricing.calculate(models.RdsPriceDimension(**kwargs)) 76 | if service == consts.SERVICE_LAMBDA: 77 | p = lambdapricing.calculate(models.LambdaPriceDimension(**kwargs)) 78 | if service == consts.SERVICE_DYNAMODB: 79 | p = ddbpricing.calculate(models.DynamoDBPriceDimension(**kwargs)) 80 | if service == consts.SERVICE_KINESIS: 81 | p = kinesispricing.calculate(models.KinesisPriceDimension(**kwargs)) 82 | 83 | except NoDataFoundError: 84 | continue 85 | 86 | log.debug ("PricingResult: [{}]".format(json.dumps(p))) 87 | #Only append records for those combinations that exist in the PriceList API 88 | if p['pricingRecords']: result.append((p['totalCost'],r,p)) 89 | 90 | #Sort by Instance Type (EC2, EMR, Redshift, etc.) 91 | if sortCriteria == consts.SORT_CRITERIA_INSTANCE_TYPE: 92 | tableCriteriaHeader = "Total cost sorted by {} Instance Type in region [{}]\nType\t".format(service, kwargs['region']) 93 | instanceTypes = kwargs.get('instanceTypes','') 94 | if instanceTypes: instanceTypes = instanceTypes.split(',') 95 | else: instanceTypes=consts.SUPPORTED_INSTANCE_TYPES_MAP[service] 96 | log.info("instanceTypes: [{}]".format(instanceTypes)) 97 | for t in instanceTypes: 98 | kwargs['instanceType']=t 99 | try: 100 | if service == consts.SERVICE_EC2: p = ec2pricing.calculate(models.Ec2PriceDimension(**kwargs)) 101 | if service == consts.SERVICE_EMR: p = emrpricing.calculate(models.EmrPriceDimension(**kwargs)) 102 | if service == consts.SERVICE_REDSHIFT: p = redshiftpricing.calculate(models.RedshiftPriceDimension(**kwargs)) 103 | result.append((p['totalCost'],t,p)) 104 | except NoDataFoundError: 105 | pass 106 | 107 | #Sort by EC2 Operating System 108 | if sortCriteria == consts.SORT_CRITERIA_OS: 109 | tableCriteriaHeader = "Total cost sorted by Operating System in region ["+kwargs['region']+"]\nOS\t" 110 | for o in consts.SUPPORTED_EC2_OPERATING_SYSTEMS: 111 | kwargs['operatingSystem']=o 112 | try: 113 | p = ec2pricing.calculate(models.Ec2PriceDimension(**kwargs)) 114 | result.append((p['totalCost'],o,p)) 115 | except NoDataFoundError: 116 | pass 117 | 118 | 119 | #Sort by RDS DB Instance Class 120 | if sortCriteria == consts.SORT_CRITERIA_DB_INSTANCE_CLASS: 121 | tableCriteriaHeader = "Total cost sorted by DB Instance Class in region ["+kwargs['region']+"]\nDB Instance Class\t" 122 | for ic in consts.SUPPORTED_RDS_INSTANCE_CLASSES: 123 | kwargs['dbInstanceClass']=ic 124 | try: 125 | p = rdspricing.calculate(models.RdsPriceDimension(**kwargs)) 126 | result.append((p['totalCost'],ic,p)) 127 | except NoDataFoundError: 128 | pass 129 | 130 | #Sort by RDS DB Engine 131 | if sortCriteria == consts.SORT_CRITERIA_DB_ENGINE: 132 | tableCriteriaHeader = "Total cost sorted by DB Engine in region ["+kwargs['region']+"]\nDB Engine - License Model\t" 133 | for e in consts.RDS_SUPPORTED_DB_ENGINES: 134 | kwargs['engine']=e 135 | for lm in consts.RDS_SUPPORTED_LICENSE_MODELS: 136 | if 'sqlserver' in e or 'oracle' in e: 137 | kwargs['licenseModel']=lm 138 | else: 139 | #SCRIPT_RDS_LICENSE_MODEL_PUBLIC is the only applicable license model for open source engines 140 | if lm == consts.SCRIPT_RDS_LICENSE_MODEL_PUBLIC: kwargs['licenseModel'] = consts.SCRIPT_RDS_LICENSE_MODEL_PUBLIC 141 | else: continue 142 | try: 143 | p = rdspricing.calculate(models.RdsPriceDimension(**kwargs)) 144 | result.append((p['totalCost'],"{} - {}".format(e,lm),p)) 145 | except NoDataFoundError: 146 | pass 147 | 148 | 149 | #Sort by Lambda memory 150 | if sortCriteria == consts.SORT_CRITERIA_LAMBDA_MEMORY: 151 | tableCriteriaHeader = "Total cost sorted Allocated Memory in region ["+kwargs['region']+"]\nMemory\t" 152 | for m in consts.LAMBDA_MEM_SIZES: 153 | kwargs['memoryMb']=m 154 | p = lambdapricing.calculate(models.LambdaPriceDimension(**kwargs)) 155 | if p['pricingRecords']: result.append((p['totalCost'],m,p)) 156 | 157 | 158 | #Sort by S3 Storage Class 159 | if sortCriteria == consts.SORT_CRITERIA_S3_STORAGE_CLASS: 160 | #TODO: Use criteria_array for all sort calculations 161 | tableCriteriaHeader = "Tocal cost sorted by S3 Storage Class in region ["+kwargs['region']+"]\nStorage Class\t" 162 | criteria_array = consts.SUPPORTED_S3_STORAGE_CLASSES 163 | for c in criteria_array: 164 | kwargs['storageClass']=c 165 | try: 166 | p = s3pricing.calculate(models.S3PriceDimension(**kwargs)) 167 | if p['pricingRecords']: result.append((p['totalCost'],c,p)) 168 | except NoDataFoundError: 169 | pass 170 | 171 | #Sort by S3 Storage Size (this implies that a comma-separated list of values is supplied for storage-size-gb 172 | if sortCriteria == consts.SORT_CRITERIA_S3_STORAGE_SIZE_GB: 173 | tableCriteriaHeader = "Tocal cost sorted by S3 Storage Size (GB) in region ["+kwargs['region']+"]\nStorage Size GB\t" 174 | criteria_array = kwargs.get('storageSizeGb','').split(consts.SORT_CRITERIA_VALUE_SEPARATOR) 175 | for c in criteria_array: 176 | kwargs['storageSizeGb']=int(c) 177 | try: 178 | p = s3pricing.calculate(models.S3PriceDimension(**kwargs)) 179 | if p['pricingRecords']: result.append((p['totalCost'],c,p)) 180 | except NoDataFoundError: 181 | pass 182 | 183 | #Sort by S3 Data Retrieval GB (this implies that a comma-separated list of values is supplied for data-retrieval-gb) 184 | #For now, it excludes data transfer out to the internet. #TODO: include a parameter for data transfer out, proportional to data retrieval 185 | if sortCriteria == consts.SORT_CRITERIA_S3_DATA_RETRIEVAL_GB: 186 | tableCriteriaHeader = "Tocal cost sorted by S3 Data Retrieval (GB) for Storage Class [{}] in region [{}]\nData Retrieval GB\t".format(kwargs['storageClass'],kwargs['region']) 187 | criteria_array = kwargs.get('dataRetrievalGb','').split(consts.SORT_CRITERIA_VALUE_SEPARATOR) 188 | for c in criteria_array: 189 | kwargs['dataRetrievalGb']=int(c) 190 | try: 191 | p = s3pricing.calculate(models.S3PriceDimension(**kwargs)) 192 | if p['pricingRecords']: result.append((p['totalCost'],c,p)) 193 | except NoDataFoundError: 194 | pass 195 | 196 | 197 | #Sort by S3 Data Retrieval GB AND Storage Class (this implies that a comma-separated list of values is supplied for data-retrieval-gb) 198 | #For now, it excludes data transfer out to the internet. #TODO: include a parameter for data transfer out, proportional to data retrieval 199 | if sortCriteria == consts.SORT_CRITERIA_S3_STORAGE_CLASS_DATA_RETRIEVAL_GB: 200 | tableCriteriaHeader = "Tocal cost sorted by S3 Data Retrieval (GB) and all Storage Classes in region [{}]\nStorage Class + Data Retrieval GB\t".format(kwargs['region']) 201 | criteria_array = kwargs.get('dataRetrievalGb','').split(consts.SORT_CRITERIA_VALUE_SEPARATOR) 202 | for sc in consts.SUPPORTED_S3_STORAGE_CLASSES: 203 | for c in criteria_array: 204 | kwargs['storageClass'] = sc 205 | kwargs['dataRetrievalGb']=int(c) 206 | try: 207 | p = s3pricing.calculate(models.S3PriceDimension(**kwargs)) 208 | if p['pricingRecords']: result.append((p['totalCost'],"{}_{}GB".format(sc,c),p)) 209 | except NoDataFoundError: 210 | pass 211 | 212 | sorted_result = sorted(result) 213 | log.debug ("sorted_result: {}".format(sorted_result)) 214 | if sorted_result: cheapest_price = sorted_result[0][0] 215 | result = [] 216 | i = 0 217 | awsPriceListApiVersion = '' 218 | pricingScenarios = [] 219 | #TODO: use a structured object (Class or dict) instead of using indexes for each field in the table 220 | for r in sorted_result: 221 | if i == 0: awsPriceListApiVersion = r[2]['awsPriceListApiVersion'] 222 | if sorted_result[i][0]>0: 223 | #Calculate the current record relative to the last record 224 | delta_cheapest = r[0]-cheapest_price 225 | delta_last = 0 226 | pct_to_last = 0 227 | pct_to_cheapest = 0 228 | if i >= 1: 229 | delta_last = sorted_result[i][0]-sorted_result[i-1][0] 230 | if sorted_result[i-1][0] > 0: 231 | pct_to_last = ((sorted_result[i][0]-sorted_result[i-1][0])/sorted_result[i-1][0])*100 232 | if cheapest_price > 0: 233 | pct_to_cheapest = ((r[0]-cheapest_price)/cheapest_price)*100 234 | 235 | 236 | result.append((r[0], r[1],pct_to_cheapest, pct_to_last,delta_cheapest,delta_last)) 237 | 238 | #TODO:populate price dimensions in PricingScenario instance 239 | pricingScenario = models.PricingScenario(i, r[1], {}, r[2], r[0], sortCriteria) 240 | pricingScenario.deltaCheapest = round(delta_cheapest,2) 241 | pricingScenario.deltaPrevious = round(delta_last,2) 242 | pricingScenario.pctToCheapest = round(pct_to_cheapest,2) 243 | pricingScenario.pctToPrevious = round(pct_to_last,2) 244 | pricingScenarios.append(pricingScenario.__dict__) 245 | 246 | i = i+1 247 | 248 | pricecomparison = models.PriceComparison(awsPriceListApiVersion, service, sortCriteria) 249 | pricecomparison.pricingScenarios = pricingScenarios 250 | 251 | print("Sorted cost table:") 252 | #TODO: convert to table format using Python existing libraries (i.e. prettytable, etc.) 253 | #TODO: move table string to PriceComparison and choose to print it from calling methods (or not) 254 | print(tableCriteriaHeader+"Cost(USD)\t% compared to cheapest\t% compared to previous\tdelta cheapest\tdelta previous") 255 | for r in result: 256 | rowCriteriaValues = "" 257 | if sortCriteria in [consts.SORT_CRITERIA_REGION, consts.SORT_CRITERIA_TO_REGION]: 258 | rowCriteriaValues = r[1]+"\t"+consts.REGION_MAP[r[1]]+"\t" 259 | else: 260 | rowCriteriaValues = str(r[1])+"\t" 261 | print(rowCriteriaValues+str(r[0])+"\t"+str(r[2])+"\t"+str(r[3])+"\t"+str(r[4])+"\t"+str(r[5])) 262 | 263 | return pricecomparison.__dict__ 264 | 265 | 266 | def compare_term_types(service, **kwargs): 267 | log.info("kwargs:[{}]".format(kwargs)) 268 | years = kwargs['years'] 269 | regions = [] 270 | if kwargs.get('regions',[]): regions = kwargs['regions'] 271 | else: regions = [kwargs['region']] 272 | kwargs.pop('sortCriteria','') 273 | scenarioArray = [] 274 | priceCalc = {} 275 | calcKey = '' 276 | awsPriceListApiVersion = "" 277 | onDemandTotal = 0 278 | 279 | #TODO: move this logic to models.TermPricingAnalysis 280 | #Iterate through applicable combinations of term types, purchase options and years 281 | termTypes = kwargs.get('termTypes',consts.SUPPORTED_TERM_TYPES) 282 | purchaseOptions = kwargs.get('purchaseOptions',consts.EC2_SUPPORTED_PURCHASE_OPTIONS) 283 | offeringClasses = kwargs.get('offeringClasses',consts.SUPPORTED_OFFERING_CLASSES_MAP.get(service,consts.SUPPORTED_EC2_OFFERING_CLASSES)) 284 | log.info("compare_term_types - offeringClasses:[{}]".format(offeringClasses)) 285 | for r in regions: 286 | kwargs['region'] = r 287 | for t in termTypes: 288 | i = 0 289 | for oc in offeringClasses: 290 | for p in purchaseOptions: 291 | addFlag = False 292 | kwargs['instanceHours'] = calculate_instance_hours_year(kwargs['instanceCount'], years) 293 | kwargs['termType']=t 294 | 295 | if t == consts.SCRIPT_TERM_TYPE_RESERVED: 296 | if len(regions)>1: calcKey = "{}-{}-{}-{}-{}yr".format(r,t,oc,p,years) 297 | else: calcKey = "{}-{}-{}-{}yr".format(t,oc,p,years) 298 | kwargs['offeringType']=p 299 | kwargs['offeringClass']=oc 300 | if p == consts.SCRIPT_EC2_PURCHASE_OPTION_ALL_UPFRONT: kwargs.pop('instanceHours',0) 301 | addFlag = True 302 | if t == consts.SCRIPT_TERM_TYPE_ON_DEMAND and i == 0:#only calculate price once for OnDemand 303 | if len(regions)>1: calcKey = "{}-{}-{}yr".format(r,t,years) 304 | else: calcKey = "{}-{}yr".format(t,years) 305 | kwargs['offeringType']='' 306 | addFlag = True 307 | i += 1 308 | try: 309 | #This flag ensures there are no duplicate OnDemand entries 310 | pdims = {} 311 | priceCalc = {} 312 | if addFlag: 313 | if service == consts.SERVICE_EC2: 314 | pdims = models.Ec2PriceDimension(**kwargs) 315 | pdims.region=r 316 | priceCalc = ec2pricing.calculate(pdims) 317 | elif service == consts.SERVICE_RDS: 318 | pdims = models.RdsPriceDimension(**kwargs) 319 | pdims.region=r 320 | priceCalc = rdspricing.calculate(pdims) 321 | elif service == consts.SERVICE_EMR: 322 | pdims = models.EmrPriceDimension(**kwargs) 323 | pdims.region=r 324 | priceCalc = emrpricing.calculate(pdims) 325 | elif service == consts.SERVICE_REDSHIFT: 326 | pdims = models.RedshiftPriceDimension(**kwargs) 327 | pdims.region=r 328 | priceCalc = redshiftpricing.calculate(pdims) 329 | 330 | 331 | log.info("priceCalc: {}".format(json.dumps(priceCalc))) 332 | #pricingScenario = models.TermPricingScenario(calcKey, dict(kwargs), priceCalc['pricingRecords'], priceCalc['totalCost'], onDemandTotal) 333 | if t == consts.SCRIPT_TERM_TYPE_ON_DEMAND: onDemandTotal = priceCalc['totalCost'] 334 | pricingScenario = models.TermPricingScenario(calcKey, pdims.__dict__, priceCalc['pricingRecords'], priceCalc['totalCost'], onDemandTotal) 335 | scenarioArray.append([pricingScenario.totalCost,pricingScenario]) 336 | 337 | awsPriceListApiVersion = priceCalc['awsPriceListApiVersion'] 338 | 339 | except NoDataFoundError as ndf: 340 | log.debug ("NoDataFoundError pdims:[{}]".format(pdims)) 341 | 342 | 343 | if len(scenarioArray)==0: raise NoDataFoundError("NoDataFoundeError for term type comparison: [{}]".format(kwargs)) 344 | sortedPricingScenarios = calculate_sorted_results(scenarioArray) 345 | #print "calculation results:[{}]".format(json.dumps(sortedPricingScenarios, indent=4)) 346 | 347 | pricingAnalysis = models.TermPricingAnalysis(awsPriceListApiVersion, regions, service, years) 348 | pricingAnalysis.pricingScenarios = sortedPricingScenarios 349 | #TODO: move the next 4 calls to a single method 350 | pricingAnalysis.calculate_months_to_recover() 351 | pricingAnalysis.calculate_monthly_breakdown() 352 | pricingAnalysis.get_csv_data() 353 | pricingAnalysis.get_tabular_data() 354 | return pricingAnalysis.__dict__ 355 | 356 | 357 | 358 | #TODO: use for sortCriteria calculations too (so we only have this logic once) 359 | #TODO: modify such that items in unsortedScenarioArray are not a tuple, but simply a pricingScenario object 360 | def calculate_sorted_results(unsortedScenarioArray): 361 | sorted_result = sorted(unsortedScenarioArray, key=lambda scenario: scenario[0]) 362 | if sorted_result: cheapest_price = sorted_result[0][0] 363 | result = [] 364 | i = 0 365 | for r in sorted_result: 366 | #print "sorting the following pricing scenario:[{}]".format(json.dumps(r[1].__dict__, indent=4)) 367 | if sorted_result[i][0]>0: 368 | #Calculate the current record relative to the last record 369 | delta_last = 0 370 | pct_to_last = 0 371 | pct_to_cheapest = 0 372 | if i >= 1: 373 | delta_last = sorted_result[i][0]-sorted_result[i-1][0] 374 | if sorted_result[i-1][0] > 0: 375 | pct_to_last = ((sorted_result[i][0]-sorted_result[i-1][0])/sorted_result[i-1][0])*100 376 | if cheapest_price > 0: 377 | pct_to_cheapest = ((r[0]-cheapest_price)/cheapest_price)*100 378 | 379 | pricingScenario = r[1] 380 | pricingScenario.index = i 381 | pricingScenario.deltaPrevious = round(delta_last,2) 382 | pricingScenario.deltaCheapest = round(r[0]-cheapest_price,2) 383 | pricingScenario.pctToPrevious = round(pct_to_last,2) 384 | pricingScenario.pctToCheapest = round(pct_to_cheapest,2) 385 | pricingScenario.calculateOnDemandSavings() 386 | result.append(pricingScenario.__dict__) 387 | 388 | i = i+1 389 | 390 | return result 391 | 392 | 393 | def get_index_file_name(service, name, format): 394 | result = '../awspricecalculator/'+service+'/data/'+name+'.'+format 395 | return result 396 | 397 | """ 398 | Common method to calculate instance hours in a year - at the time it ignores leap years 399 | """ 400 | #TODO: remove circular dependency with models, so this method can be called from models too instead of duplicating it there 401 | def calculate_instance_hours_year(instanceCount, years): 402 | return 365 * 24 * int(instanceCount) * int(years) 403 | 404 | 405 | 406 | 407 | -------------------------------------------------------------------------------- /LICENSE.md: -------------------------------------------------------------------------------- 1 | GNU GENERAL PUBLIC LICENSE 2 | 3 | Version 3, 29 June 2007 4 | 5 | Copyright (C) 2007 Free Software Foundation, Inc. 6 | 7 | Everyone is permitted to copy and distribute verbatim copies of this license document, but changing it is not allowed. 8 | 9 | Preamble 10 | The GNU General Public License is a free, copyleft license for software and other kinds of works. 11 | 12 | The licenses for most software and other practical works are designed to take away your freedom to share and change the works. By contrast, the GNU General Public License is intended to guarantee your freedom to share and change all versions of a program--to make sure it remains free software for all its users. We, the Free Software Foundation, use the GNU General Public License for most of our software; it applies also to any other work released this way by its authors. You can apply it to your programs, too. 13 | 14 | When we speak of free software, we are referring to freedom, not price. Our General Public Licenses are designed to make sure that you have the freedom to distribute copies of free software (and charge for them if you wish), that you receive source code or can get it if you want it, that you can change the software or use pieces of it in new free programs, and that you know you can do these things. 15 | 16 | To protect your rights, we need to prevent others from denying you these rights or asking you to surrender the rights. Therefore, you have certain responsibilities if you distribute copies of the software, or if you modify it: responsibilities to respect the freedom of others. 17 | 18 | For example, if you distribute copies of such a program, whether gratis or for a fee, you must pass on to the recipients the same freedoms that you received. You must make sure that they, too, receive or can get the source code. And you must show them these terms so they know their rights. 19 | 20 | Developers that use the GNU GPL protect your rights with two steps: (1) assert copyright on the software, and (2) offer you this License giving you legal permission to copy, distribute and/or modify it. 21 | 22 | For the developers' and authors' protection, the GPL clearly explains that there is no warranty for this free software. For both users' and authors' sake, the GPL requires that modified versions be marked as changed, so that their problems will not be attributed erroneously to authors of previous versions. 23 | 24 | Some devices are designed to deny users access to install or run modified versions of the software inside them, although the manufacturer can do so. This is fundamentally incompatible with the aim of protecting users' freedom to change the software. The systematic pattern of such abuse occurs in the area of products for individuals to use, which is precisely where it is most unacceptable. Therefore, we have designed this version of the GPL to prohibit the practice for those products. If such problems arise substantially in other domains, we stand ready to extend this provision to those domains in future versions of the GPL, as needed to protect the freedom of users. 25 | 26 | Finally, every program is threatened constantly by software patents. States should not allow patents to restrict development and use of software on general-purpose computers, but in those that do, we wish to avoid the special danger that patents applied to a free program could make it effectively proprietary. To prevent this, the GPL assures that patents cannot be used to render the program non-free. 27 | 28 | The precise terms and conditions for copying, distribution and modification follow. 29 | 30 | TERMS AND CONDITIONS 31 | 0. Definitions. 32 | "This License" refers to version 3 of the GNU General Public License. 33 | 34 | "Copyright" also means copyright-like laws that apply to other kinds of works, such as semiconductor masks. 35 | 36 | "The Program" refers to any copyrightable work licensed under this License. Each licensee is addressed as "you". "Licensees" and "recipients" may be individuals or organizations. 37 | 38 | To "modify" a work means to copy from or adapt all or part of the work in a fashion requiring copyright permission, other than the making of an exact copy. The resulting work is called a "modified version" of the earlier work or a work "based on" the earlier work. 39 | 40 | A "covered work" means either the unmodified Program or a work based on the Program. 41 | 42 | To "propagate" a work means to do anything with it that, without permission, would make you directly or secondarily liable for infringement under applicable copyright law, except executing it on a computer or modifying a private copy. Propagation includes copying, distribution (with or without modification), making available to the public, and in some countries other activities as well. 43 | 44 | To "convey" a work means any kind of propagation that enables other parties to make or receive copies. Mere interaction with a user through a computer network, with no transfer of a copy, is not conveying. 45 | 46 | An interactive user interface displays "Appropriate Legal Notices" to the extent that it includes a convenient and prominently visible feature that (1) displays an appropriate copyright notice, and (2) tells the user that there is no warranty for the work (except to the extent that warranties are provided), that licensees may convey the work under this License, and how to view a copy of this License. If the interface presents a list of user commands or options, such as a menu, a prominent item in the list meets this criterion. 47 | 48 | 1. Source Code. 49 | The "source code" for a work means the preferred form of the work for making modifications to it. "Object code" means any non-source form of a work. 50 | 51 | A "Standard Interface" means an interface that either is an official standard defined by a recognized standards body, or, in the case of interfaces specified for a particular programming language, one that is widely used among developers working in that language. 52 | 53 | The "System Libraries" of an executable work include anything, other than the work as a whole, that (a) is included in the normal form of packaging a Major Component, but which is not part of that Major Component, and (b) serves only to enable use of the work with that Major Component, or to implement a Standard Interface for which an implementation is available to the public in source code form. A "Major Component", in this context, means a major essential component (kernel, window system, and so on) of the specific operating system (if any) on which the executable work runs, or a compiler used to produce the work, or an object code interpreter used to run it. 54 | 55 | The "Corresponding Source" for a work in object code form means all the source code needed to generate, install, and (for an executable work) run the object code and to modify the work, including scripts to control those activities. However, it does not include the work's System Libraries, or general-purpose tools or generally available free programs which are used unmodified in performing those activities but which are not part of the work. For example, Corresponding Source includes interface definition files associated with source files for the work, and the source code for shared libraries and dynamically linked subprograms that the work is specifically designed to require, such as by intimate data communication or control flow between those subprograms and other parts of the work. 56 | 57 | The Corresponding Source need not include anything that users can regenerate automatically from other parts of the Corresponding Source. 58 | 59 | The Corresponding Source for a work in source code form is that same work. 60 | 61 | 2. Basic Permissions. 62 | All rights granted under this License are granted for the term of copyright on the Program, and are irrevocable provided the stated conditions are met. This License explicitly affirms your unlimited permission to run the unmodified Program. The output from running a covered work is covered by this License only if the output, given its content, constitutes a covered work. This License acknowledges your rights of fair use or other equivalent, as provided by copyright law. 63 | 64 | You may make, run and propagate covered works that you do not convey, without conditions so long as your license otherwise remains in force. You may convey covered works to others for the sole purpose of having them make modifications exclusively for you, or provide you with facilities for running those works, provided that you comply with the terms of this License in conveying all material for which you do not control copyright. Those thus making or running the covered works for you must do so exclusively on your behalf, under your direction and control, on terms that prohibit them from making any copies of your copyrighted material outside their relationship with you. 65 | 66 | Conveying under any other circumstances is permitted solely under the conditions stated below. Sublicensing is not allowed; section 10 makes it unnecessary. 67 | 68 | 3. Protecting Users' Legal Rights From Anti-Circumvention Law. 69 | No covered work shall be deemed part of an effective technological measure under any applicable law fulfilling obligations under article 11 of the WIPO copyright treaty adopted on 20 December 1996, or similar laws prohibiting or restricting circumvention of such measures. 70 | 71 | When you convey a covered work, you waive any legal power to forbid circumvention of technological measures to the extent such circumvention is effected by exercising rights under this License with respect to the covered work, and you disclaim any intention to limit operation or modification of the work as a means of enforcing, against the work's users, your or third parties' legal rights to forbid circumvention of technological measures. 72 | 73 | 4. Conveying Verbatim Copies. 74 | You may convey verbatim copies of the Program's source code as you receive it, in any medium, provided that you conspicuously and appropriately publish on each copy an appropriate copyright notice; keep intact all notices stating that this License and any non-permissive terms added in accord with section 7 apply to the code; keep intact all notices of the absence of any warranty; and give all recipients a copy of this License along with the Program. 75 | 76 | You may charge any price or no price for each copy that you convey, and you may offer support or warranty protection for a fee. 77 | 78 | 5. Conveying Modified Source Versions. 79 | You may convey a work based on the Program, or the modifications to produce it from the Program, in the form of source code under the terms of section 4, provided that you also meet all of these conditions: 80 | 81 | a) The work must carry prominent notices stating that you modified it, and giving a relevant date. 82 | 83 | b) The work must carry prominent notices stating that it is released under this License and any conditions added under section 7. This requirement modifies the requirement in section 4 to "keep intact all notices". 84 | 85 | c) You must license the entire work, as a whole, under this License to anyone who comes into possession of a copy. This License will therefore apply, along with any applicable section 7 additional terms, to the whole of the work, and all its parts, regardless of how they are packaged. This License gives no permission to license the work in any other way, but it does not invalidate such permission if you have separately received it. 86 | 87 | d) If the work has interactive user interfaces, each must display Appropriate Legal Notices; however, if the Program has interactive interfaces that do not display Appropriate Legal Notices, your work need not make them do so. 88 | 89 | A compilation of a covered work with other separate and independent works, which are not by their nature extensions of the covered work, and which are not combined with it such as to form a larger program, in or on a volume of a storage or distribution medium, is called an "aggregate" if the compilation and its resulting copyright are not used to limit the access or legal rights of the compilation's users beyond what the individual works permit. Inclusion of a covered work in an aggregate does not cause this License to apply to the other parts of the aggregate. 90 | 91 | 6. Conveying Non-Source Forms. 92 | You may convey a covered work in object code form under the terms of sections 4 and 5, provided that you also convey the machine-readable Corresponding Source under the terms of this License, in one of these ways: 93 | 94 | a) Convey the object code in, or embodied in, a physical product (including a physical distribution medium), accompanied by the Corresponding Source fixed on a durable physical medium customarily used for software interchange. 95 | 96 | b) Convey the object code in, or embodied in, a physical product (including a physical distribution medium), accompanied by a written offer, valid for at least three years and valid for as long as you offer spare parts or customer support for that product model, to give anyone who possesses the object code either (1) a copy of the Corresponding Source for all the software in the product that is covered by this License, on a durable physical medium customarily used for software interchange, for a price no more than your reasonable cost of physically performing this conveying of source, or (2) access to copy the Corresponding Source from a network server at no charge. 97 | 98 | c) Convey individual copies of the object code with a copy of the written offer to provide the Corresponding Source. This alternative is allowed only occasionally and noncommercially, and only if you received the object code with such an offer, in accord with subsection 6b. 99 | 100 | d) Convey the object code by offering access from a designated place (gratis or for a charge), and offer equivalent access to the Corresponding Source in the same way through the same place at no further charge. You need not require recipients to copy the Corresponding Source along with the object code. If the place to copy the object code is a network server, the Corresponding Source may be on a different server (operated by you or a third party) that supports equivalent copying facilities, provided you maintain clear directions next to the object code saying where to find the Corresponding Source. Regardless of what server hosts the Corresponding Source, you remain obligated to ensure that it is available for as long as needed to satisfy these requirements. 101 | 102 | e) Convey the object code using peer-to-peer transmission, provided you inform other peers where the object code and Corresponding Source of the work are being offered to the general public at no charge under subsection 6d. 103 | 104 | A separable portion of the object code, whose source code is excluded from the Corresponding Source as a System Library, need not be included in conveying the object code work. 105 | 106 | A "User Product" is either (1) a "consumer product", which means any tangible personal property which is normally used for personal, family, or household purposes, or (2) anything designed or sold for incorporation into a dwelling. In determining whether a product is a consumer product, doubtful cases shall be resolved in favor of coverage. For a particular product received by a particular user, "normally used" refers to a typical or common use of that class of product, regardless of the status of the particular user or of the way in which the particular user actually uses, or expects or is expected to use, the product. A product is a consumer product regardless of whether the product has substantial commercial, industrial or non-consumer uses, unless such uses represent the only significant mode of use of the product. 107 | 108 | "Installation Information" for a User Product means any methods, procedures, authorization keys, or other information required to install and execute modified versions of a covered work in that User Product from a modified version of its Corresponding Source. The information must suffice to ensure that the continued functioning of the modified object code is in no case prevented or interfered with solely because modification has been made. 109 | 110 | If you convey an object code work under this section in, or with, or specifically for use in, a User Product, and the conveying occurs as part of a transaction in which the right of possession and use of the User Product is transferred to the recipient in perpetuity or for a fixed term (regardless of how the transaction is characterized), the Corresponding Source conveyed under this section must be accompanied by the Installation Information. But this requirement does not apply if neither you nor any third party retains the ability to install modified object code on the User Product (for example, the work has been installed in ROM). 111 | 112 | The requirement to provide Installation Information does not include a requirement to continue to provide support service, warranty, or updates for a work that has been modified or installed by the recipient, or for the User Product in which it has been modified or installed. Access to a network may be denied when the modification itself materially and adversely affects the operation of the network or violates the rules and protocols for communication across the network. 113 | 114 | Corresponding Source conveyed, and Installation Information provided, in accord with this section must be in a format that is publicly documented (and with an implementation available to the public in source code form), and must require no special password or key for unpacking, reading or copying. 115 | 116 | 7. Additional Terms. 117 | "Additional permissions" are terms that supplement the terms of this License by making exceptions from one or more of its conditions. Additional permissions that are applicable to the entire Program shall be treated as though they were included in this License, to the extent that they are valid under applicable law. If additional permissions apply only to part of the Program, that part may be used separately under those permissions, but the entire Program remains governed by this License without regard to the additional permissions. 118 | 119 | When you convey a copy of a covered work, you may at your option remove any additional permissions from that copy, or from any part of it. (Additional permissions may be written to require their own removal in certain cases when you modify the work.) You may place additional permissions on material, added by you to a covered work, for which you have or can give appropriate copyright permission. 120 | 121 | Notwithstanding any other provision of this License, for material you add to a covered work, you may (if authorized by the copyright holders of that material) supplement the terms of this License with terms: 122 | 123 | a) Disclaiming warranty or limiting liability differently from the terms of sections 15 and 16 of this License; or 124 | 125 | b) Requiring preservation of specified reasonable legal notices or author attributions in that material or in the Appropriate Legal Notices displayed by works containing it; or 126 | 127 | c) Prohibiting misrepresentation of the origin of that material, or requiring that modified versions of such material be marked in reasonable ways as different from the original version; or 128 | 129 | d) Limiting the use for publicity purposes of names of licensors or authors of the material; or 130 | 131 | e) Declining to grant rights under trademark law for use of some trade names, trademarks, or service marks; or 132 | 133 | f) Requiring indemnification of licensors and authors of that material by anyone who conveys the material (or modified versions of it) with contractual assumptions of liability to the recipient, for any liability that these contractual assumptions directly impose on those licensors and authors. 134 | 135 | All other non-permissive additional terms are considered "further restrictions" within the meaning of section 10. If the Program as you received it, or any part of it, contains a notice stating that it is governed by this License along with a term that is a further restriction, you may remove that term. If a license document contains a further restriction but permits relicensing or conveying under this License, you may add to a covered work material governed by the terms of that license document, provided that the further restriction does not survive such relicensing or conveying. 136 | 137 | If you add terms to a covered work in accord with this section, you must place, in the relevant source files, a statement of the additional terms that apply to those files, or a notice indicating where to find the applicable terms. 138 | 139 | Additional terms, permissive or non-permissive, may be stated in the form of a separately written license, or stated as exceptions; the above requirements apply either way. 140 | 141 | 8. Termination. 142 | You may not propagate or modify a covered work except as expressly provided under this License. Any attempt otherwise to propagate or modify it is void, and will automatically terminate your rights under this License (including any patent licenses granted under the third paragraph of section 11). 143 | 144 | However, if you cease all violation of this License, then your license from a particular copyright holder is reinstated (a) provisionally, unless and until the copyright holder explicitly and finally terminates your license, and (b) permanently, if the copyright holder fails to notify you of the violation by some reasonable means prior to 60 days after the cessation. 145 | 146 | Moreover, your license from a particular copyright holder is reinstated permanently if the copyright holder notifies you of the violation by some reasonable means, this is the first time you have received notice of violation of this License (for any work) from that copyright holder, and you cure the violation prior to 30 days after your receipt of the notice. 147 | 148 | Termination of your rights under this section does not terminate the licenses of parties who have received copies or rights from you under this License. If your rights have been terminated and not permanently reinstated, you do not qualify to receive new licenses for the same material under section 10. 149 | 150 | 9. Acceptance Not Required for Having Copies. 151 | You are not required to accept this License in order to receive or run a copy of the Program. Ancillary propagation of a covered work occurring solely as a consequence of using peer-to-peer transmission to receive a copy likewise does not require acceptance. However, nothing other than this License grants you permission to propagate or modify any covered work. These actions infringe copyright if you do not accept this License. Therefore, by modifying or propagating a covered work, you indicate your acceptance of this License to do so. 152 | 153 | 10. Automatic Licensing of Downstream Recipients. 154 | Each time you convey a covered work, the recipient automatically receives a license from the original licensors, to run, modify and propagate that work, subject to this License. You are not responsible for enforcing compliance by third parties with this License. 155 | 156 | An "entity transaction" is a transaction transferring control of an organization, or substantially all assets of one, or subdividing an organization, or merging organizations. If propagation of a covered work results from an entity transaction, each party to that transaction who receives a copy of the work also receives whatever licenses to the work the party's predecessor in interest had or could give under the previous paragraph, plus a right to possession of the Corresponding Source of the work from the predecessor in interest, if the predecessor has it or can get it with reasonable efforts. 157 | 158 | You may not impose any further restrictions on the exercise of the rights granted or affirmed under this License. For example, you may not impose a license fee, royalty, or other charge for exercise of rights granted under this License, and you may not initiate litigation (including a cross-claim or counterclaim in a lawsuit) alleging that any patent claim is infringed by making, using, selling, offering for sale, or importing the Program or any portion of it. 159 | 160 | 11. Patents. 161 | A "contributor" is a copyright holder who authorizes use under this License of the Program or a work on which the Program is based. The work thus licensed is called the contributor's "contributor version". 162 | 163 | A contributor's "essential patent claims" are all patent claims owned or controlled by the contributor, whether already acquired or hereafter acquired, that would be infringed by some manner, permitted by this License, of making, using, or selling its contributor version, but do not include claims that would be infringed only as a consequence of further modification of the contributor version. For purposes of this definition, "control" includes the right to grant patent sublicenses in a manner consistent with the requirements of this License. 164 | 165 | Each contributor grants you a non-exclusive, worldwide, royalty-free patent license under the contributor's essential patent claims, to make, use, sell, offer for sale, import and otherwise run, modify and propagate the contents of its contributor version. 166 | 167 | In the following three paragraphs, a "patent license" is any express agreement or commitment, however denominated, not to enforce a patent (such as an express permission to practice a patent or covenant not to sue for patent infringement). To "grant" such a patent license to a party means to make such an agreement or commitment not to enforce a patent against the party. 168 | 169 | If you convey a covered work, knowingly relying on a patent license, and the Corresponding Source of the work is not available for anyone to copy, free of charge and under the terms of this License, through a publicly available network server or other readily accessible means, then you must either (1) cause the Corresponding Source to be so available, or (2) arrange to deprive yourself of the benefit of the patent license for this particular work, or (3) arrange, in a manner consistent with the requirements of this License, to extend the patent license to downstream recipients. "Knowingly relying" means you have actual knowledge that, but for the patent license, your conveying the covered work in a country, or your recipient's use of the covered work in a country, would infringe one or more identifiable patents in that country that you have reason to believe are valid. 170 | 171 | If, pursuant to or in connection with a single transaction or arrangement, you convey, or propagate by procuring conveyance of, a covered work, and grant a patent license to some of the parties receiving the covered work authorizing them to use, propagate, modify or convey a specific copy of the covered work, then the patent license you grant is automatically extended to all recipients of the covered work and works based on it. 172 | 173 | A patent license is "discriminatory" if it does not include within the scope of its coverage, prohibits the exercise of, or is conditioned on the non-exercise of one or more of the rights that are specifically granted under this License. You may not convey a covered work if you are a party to an arrangement with a third party that is in the business of distributing software, under which you make payment to the third party based on the extent of your activity of conveying the work, and under which the third party grants, to any of the parties who would receive the covered work from you, a discriminatory patent license (a) in connection with copies of the covered work conveyed by you (or copies made from those copies), or (b) primarily for and in connection with specific products or compilations that contain the covered work, unless you entered into that arrangement, or that patent license was granted, prior to 28 March 2007. 174 | 175 | Nothing in this License shall be construed as excluding or limiting any implied license or other defenses to infringement that may otherwise be available to you under applicable patent law. 176 | 177 | 12. No Surrender of Others' Freedom. 178 | If conditions are imposed on you (whether by court order, agreement or otherwise) that contradict the conditions of this License, they do not excuse you from the conditions of this License. If you cannot convey a covered work so as to satisfy simultaneously your obligations under this License and any other pertinent obligations, then as a consequence you may not convey it at all. For example, if you agree to terms that obligate you to collect a royalty for further conveying from those to whom you convey the Program, the only way you could satisfy both those terms and this License would be to refrain entirely from conveying the Program. 179 | 180 | 13. Use with the GNU Affero General Public License. 181 | Notwithstanding any other provision of this License, you have permission to link or combine any covered work with a work licensed under version 3 of the GNU Affero General Public License into a single combined work, and to convey the resulting work. The terms of this License will continue to apply to the part which is the covered work, but the special requirements of the GNU Affero General Public License, section 13, concerning interaction through a network will apply to the combination as such. 182 | 183 | 14. Revised Versions of this License. 184 | The Free Software Foundation may publish revised and/or new versions of the GNU General Public License from time to time. Such new versions will be similar in spirit to the present version, but may differ in detail to address new problems or concerns. 185 | 186 | Each version is given a distinguishing version number. If the Program specifies that a certain numbered version of the GNU General Public License "or any later version" applies to it, you have the option of following the terms and conditions either of that numbered version or of any later version published by the Free Software Foundation. If the Program does not specify a version number of the GNU General Public License, you may choose any version ever published by the Free Software Foundation. 187 | 188 | If the Program specifies that a proxy can decide which future versions of the GNU General Public License can be used, that proxy's public statement of acceptance of a version permanently authorizes you to choose that version for the Program. 189 | 190 | Later license versions may give you additional or different permissions. However, no additional obligations are imposed on any author or copyright holder as a result of your choosing to follow a later version. 191 | 192 | 15. Disclaimer of Warranty. 193 | THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION. 194 | 195 | 16. Limitation of Liability. 196 | IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. 197 | 198 | 17. Interpretation of Sections 15 and 16. 199 | If the disclaimer of warranty and limitation of liability provided above cannot be given local legal effect according to their terms, reviewing courts shall apply local law that most closely approximates an absolute waiver of all civil liability in connection with the Program, unless a warranty or assumption of liability accompanies a copy of the Program in return for a fee. 200 | 201 | END OF TERMS AND CONDITIONS 202 | -------------------------------------------------------------------------------- /awspricecalculator/common/consts.py: -------------------------------------------------------------------------------- 1 | import os, logging 2 | 3 | # COMMON 4 | #_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/ 5 | AWS_PRICE_CALCULATOR_VERSION = "v2.0" 6 | 7 | LOG_LEVEL = os.environ.get('LOG_LEVEL',logging.INFO) 8 | DEFAULT_CURRENCY = "USD" 9 | FORECAST_PERIOD_MONTHLY = "monthly" 10 | FORECAST_PERIOD_YEARLY = "yearly" 11 | 12 | HOURS_IN_MONTH = 720 13 | 14 | SERVICE_CODE_AWS_DATA_TRANSFER = 'AWSDataTransfer' 15 | 16 | REGION_MAP = {'us-east-1':'US East (N. Virginia)', 17 | 'us-east-2':'US East (Ohio)', 18 | 'us-west-1':'US West (N. California)', 19 | 'us-west-2':'US West (Oregon)', 20 | 'ca-central-1':'Canada (Central)', 21 | 'eu-west-1':'EU (Ireland)', 22 | 'eu-west-2':'EU (London)', 23 | 'eu-west-3':'EU (Paris)', 24 | 'eu-north-1':'EU (Stockholm)', 25 | 'eu-central-1':'EU (Frankfurt)', 26 | 'ap-northeast-1':'Asia Pacific (Tokyo)', 27 | 'ap-northeast-2':'Asia Pacific (Seoul)', 28 | 'ap-northeast-3':'Asia Pacific (Osaka-Local)', 29 | 'ap-southeast-1':'Asia Pacific (Singapore)', 30 | 'ap-southeast-2':'Asia Pacific (Sydney)', 31 | 'sa-east-1':'South America (Sao Paulo)', 32 | 'ap-south-1':'Asia Pacific (Mumbai)', 33 | 'cn-northwest-1':'China (Ningxia)', 34 | 'ap-east-1':'Asia Pacific (Hong Kong)' 35 | } 36 | 37 | #TODO: update for China region 38 | REGION_PREFIX_MAP = {'us-east-1':'', 39 | 'us-east-2':'USE2-', 40 | 'us-west-1':'USW1-', 41 | 'us-west-2':'USW2-', 42 | 'ca-central-1':'CAN1-', 43 | 'eu-west-1':'EU-', 44 | 'eu-west-2':'EUW2-', 45 | 'eu-west-3':'EUW3-', 46 | 'eu-north-1':'EUN1-', 47 | 'eu-central-1':'EUC1-', 48 | 'ap-east-1':'APE1-' , 49 | 'ap-northeast-1':'APN1-', 50 | 'ap-northeast-2':'APN2-', 51 | 'ap-northeast-3':'APN3-', 52 | 'ap-southeast-1':'APS1-', 53 | 'ap-southeast-2':'APS2-', 54 | 'sa-east-1':'SAE1-', 55 | 'ap-south-1':'APS3-', 56 | 'cn-northwest-1':'', 57 | 'US East (N. Virginia)':'', 58 | 'US East (Ohio)':'USE2-', 59 | 'US West (N. California)':'USW1-', 60 | 'US West (Oregon)':'USW2-', 61 | 'Canada (Central)':'CAN1-', 62 | 'EU (Ireland)':'EU-', 63 | 'EU (London)':'EUW2-', 64 | 'EU (Paris)':'EUW3-', 65 | 'EU (Stockholm)':'EUN1-', 66 | 'EU (Frankfurt)':'EUC1-', 67 | 'Asia Pacific (Tokyo)':'APN1-', 68 | 'Asia Pacific (Seoul)':'APN2-', 69 | 'Asia Pacific (Singapore)':'APS1-', 70 | 'Asia Pacific (Sydney)':'APS2-', 71 | 'South America (Sao Paulo)':'SAE1-', 72 | 'Asia Pacific (Mumbai)':'APS3-', 73 | 'AWS GovCloud (US)':'UGW1-', 74 | 'External':'', 75 | 'Any': '' 76 | } 77 | 78 | 79 | 80 | REGION_REPORT_MAP = {'us-east-1':'N. Virginia', 81 | 'us-east-2':'Ohio', 82 | 'us-west-1':'N. California', 83 | 'us-west-2':'Oregon', 84 | 'ca-central-1':'Canada', 85 | 'eu-west-1':'Ireland', 86 | 'eu-west-2':'London', 87 | 'eu-north-1':'Stockholm', 88 | 'eu-central-1':'Frankfurt', 89 | 'ap-east-1':'Hong Kong', 90 | 'ap-northeast-1':'Tokyo', 91 | 'ap-northeast-2':'Seoul', 92 | 'ap-northeast-3':'Osaka', 93 | 'ap-southeast-1':'Singapore', 94 | 'ap-southeast-2':'Sydney', 95 | 'sa-east-1':'Sao Paulo', 96 | 'ap-south-1':'Mumbai', 97 | 'cn-northwest-1':'Ningxia', 98 | 'eu-west-3':'Paris' 99 | } 100 | 101 | 102 | 103 | SERVICE_EC2 = 'ec2' 104 | SERVICE_ELB = 'elb' 105 | SERVICE_EBS = 'ebs' 106 | SERVICE_S3 = 's3' 107 | SERVICE_RDS = 'rds' 108 | SERVICE_LAMBDA = 'lambda' 109 | SERVICE_DYNAMODB= 'dynamodb' 110 | SERVICE_KINESIS = 'kinesis' 111 | SERVICE_DATA_TRANSFER = 'datatransfer' 112 | SERVICE_EMR = 'emr' 113 | SERVICE_REDSHIFT = 'redshift' 114 | SERVICE_ALL = 'all' 115 | 116 | NOT_APPLICABLE = 'NA' 117 | 118 | 119 | SUPPORTED_SERVICES = (SERVICE_S3, SERVICE_EC2, SERVICE_RDS, SERVICE_LAMBDA, SERVICE_DYNAMODB, SERVICE_KINESIS, 120 | SERVICE_EMR, SERVICE_REDSHIFT) 121 | 122 | SUPPORTED_REGIONS = ('us-east-1','us-east-2', 'us-west-1', 'us-west-2','ca-central-1', 'eu-west-1','eu-west-2', 123 | 'eu-central-1', 'ap-east-1', 'ap-northeast-1', 'ap-northeast-2', 'ap-northeast-3', 'ap-southeast-1', 'ap-southeast-2', 124 | 'sa-east-1','ap-south-1', 'eu-west-3', 'eu-north-1' 125 | ) 126 | 127 | SUPPORTED_EC2_INSTANCE_TYPES = ('a1.2xlarge','a1.4xlarge','a1.large','a1.medium','a1.xlarge','c1.medium','c1.xlarge','c3.2xlarge', 128 | 'c3.4xlarge','c3.8xlarge','c3.large','c3.xlarge','c4.2xlarge','c4.4xlarge','c4.8xlarge','c4.large', 129 | 'c4.xlarge','c5.18xlarge','c5.2xlarge','c5.4xlarge','c5.9xlarge','c5.large','c5.xlarge','c5d.18xlarge', 130 | 'c5d.2xlarge','c5d.4xlarge','c5d.9xlarge','c5d.large','c5d.xlarge','c5n.18xlarge','c5n.2xlarge', 131 | 'c5n.4xlarge','c5n.9xlarge','c5n.large','c5n.xlarge','cc2.8xlarge','cr1.8xlarge','d2.2xlarge', 132 | 'd2.4xlarge','d2.8xlarge','d2.xlarge','f1.16xlarge','f1.2xlarge','f1.4xlarge','g2.2xlarge', 133 | 'g2.8xlarge','g3.16xlarge','g3.4xlarge','g3.8xlarge','g3s.xlarge','h1.16xlarge','h1.2xlarge', 134 | 'h1.4xlarge','h1.8xlarge','hs1.8xlarge','i2.2xlarge','i2.4xlarge','i2.8xlarge','i2.xlarge', 135 | 'i3.16xlarge','i3.2xlarge','i3.4xlarge','i3.8xlarge','i3.large','i3.xlarge','m1.large', 136 | 'm1.medium','m1.small','m1.xlarge','m2.2xlarge','m2.4xlarge','m2.xlarge','m3.2xlarge', 137 | 'm3.large','m3.medium','m3.xlarge','m4.10xlarge','m4.16xlarge','m4.2xlarge','m4.4xlarge', 138 | 'm4.large','m4.xlarge','m5.12xlarge','m5.24xlarge','m5.2xlarge','m5.4xlarge','m5.large', 139 | 'm5.metal','m5.xlarge','m5a.12xlarge','m5a.24xlarge','m5a.2xlarge','m5a.4xlarge','m5a.large', 140 | 'm5a.xlarge','m5d.12xlarge','m5d.24xlarge','m5d.2xlarge','m5d.4xlarge','m5d.large','m5d.metal', 141 | 'm5d.xlarge','p2.16xlarge','p2.8xlarge','p2.xlarge','p3.16xlarge','p3.2xlarge','p3.8xlarge', 142 | 'p3dn.24xlarge','r3.2xlarge','r3.4xlarge','r3.8xlarge','r3.large','r3.xlarge','r4.16xlarge', 143 | 'r4.2xlarge','r4.4xlarge','r4.8xlarge','r4.large','r4.xlarge', 144 | 'r5.12xlarge','r5.24xlarge', 'r5.8xlarge', 145 | 'r5.2xlarge','r5.4xlarge','r5.large','r5.xlarge','r5a.12xlarge','r5a.24xlarge','r5a.2xlarge', 146 | 'r5a.4xlarge','r5a.large','r5a.xlarge','r5d.12xlarge','r5d.24xlarge','r5d.2xlarge','r5d.4xlarge', 147 | 'r5d.large','r5d.xlarge','t1.micro','t2.2xlarge','t2.large','t2.medium','t2.micro','t2.nano', 148 | 't2.small','t2.xlarge', 149 | 't3.2xlarge','t3.large','t3.medium','t3.micro','t3.nano','t3.small','t3.xlarge', 150 | 't3a.nano', 't3a.micro','t3a.small','t3a.medium','t3a.large','t3a.xlarge','t3a.2xlarge', 151 | 'x1.16xlarge','x1.32xlarge','x1e.16xlarge','x1e.2xlarge','x1e.32xlarge','x1e.4xlarge', 152 | 'x1e.8xlarge','x1e.xlarge','z1d.12xlarge','z1d.2xlarge','z1d.3xlarge','z1d.6xlarge','z1d.large','z1d.xlarge') 153 | 154 | 155 | SUPPORTED_EMR_INSTANCE_TYPES = ('c1.medium','c1.xlarge','c3.2xlarge','c3.4xlarge','c3.8xlarge','c3.large','c3.xlarge','c4.2xlarge', 156 | 'c4.4xlarge','c4.8xlarge','c4.large','c4.xlarge','c5.18xlarge','c5.2xlarge','c5.4xlarge', 157 | 'c5.9xlarge','c5.xlarge','c5d.18xlarge','c5d.2xlarge','c5d.4xlarge','c5d.9xlarge','c5d.xlarge', 158 | 'c5n.18xlarge','c5n.2xlarge','c5n.4xlarge','c5n.9xlarge','c5n.xlarge', 159 | 'cc2.8xlarge', 160 | 'cr1.8xlarge','d2.2xlarge','d2.4xlarge','d2.8xlarge','d2.xlarge','g2.2xlarge','g3.16xlarge', 161 | 'g3.4xlarge','g3.8xlarge','g3s.xlarge','h1.16xlarge','h1.2xlarge','h1.4xlarge','h1.8xlarge', 162 | 'hs1.8xlarge','i2.2xlarge','i2.4xlarge','i2.8xlarge','i2.xlarge','i3.16xlarge', 163 | 'i3.2xlarge','i3.4xlarge','i3.8xlarge','i3.xlarge','m1.large','m1.medium','m1.small','m1.xlarge', 164 | 'm2.2xlarge','m2.4xlarge','m2.xlarge','m3.2xlarge','m3.large','m3.medium','m3.xlarge','m4.10xlarge', 165 | 'm4.16xlarge','m4.2xlarge','m4.4xlarge','m4.large','m4.xlarge','m5.12xlarge','m5.24xlarge', 166 | 'm5.2xlarge','m5.4xlarge','m5.xlarge','m5a.12xlarge','m5a.24xlarge','m5a.2xlarge','m5a.4xlarge', 167 | 'm5a.xlarge', 168 | 'm5d.12xlarge','m5d.24xlarge','m5d.2xlarge','m5d.4xlarge','m5d.xlarge','p2.16xlarge','p2.8xlarge', 169 | 'p2.xlarge','p3.16xlarge','p3.2xlarge','p3.8xlarge','r3.2xlarge','r3.4xlarge','r3.8xlarge', 170 | 'r3.xlarge','r4.16xlarge','r4.2xlarge','r4.4xlarge','r4.8xlarge','r4.large','r4.xlarge', 171 | 'r5.12xlarge','r5.24xlarge','r5.2xlarge','r5.4xlarge','r5.xlarge','r5a.12xlarge','r5a.24xlarge', 172 | 'r5a.2xlarge','r5a.4xlarge','r5a.xlarge', 173 | 'r5d.2xlarge','r5d.4xlarge','r5d.xlarge','z1d.12xlarge','z1d.2xlarge','z1d.3xlarge', 174 | 'z1d.6xlarge','z1d.xlarge') 175 | 176 | SUPPORTED_REDSHIFT_INSTANCE_TYPES = ('ds1.xlarge','dc1.8xlarge','dc1.large','ds2.8xlarge', 177 | 'ds1.8xlarge','ds2.xlarge','dc2.8xlarge','dc2.large') 178 | 179 | SUPPORTED_INSTANCE_TYPES_MAP = {SERVICE_EC2:SUPPORTED_EC2_INSTANCE_TYPES, SERVICE_EMR:SUPPORTED_EMR_INSTANCE_TYPES , 180 | SERVICE_REDSHIFT:SUPPORTED_REDSHIFT_INSTANCE_TYPES} 181 | 182 | 183 | SERVICE_INDEX_MAP = {SERVICE_S3:'AmazonS3', SERVICE_EC2:'AmazonEC2', SERVICE_RDS:'AmazonRDS', 184 | SERVICE_LAMBDA:'AWSLambda', SERVICE_DYNAMODB:'AmazonDynamoDB', 185 | SERVICE_KINESIS:'AmazonKinesis', SERVICE_EMR:'ElasticMapReduce', SERVICE_REDSHIFT:'AmazonRedshift', 186 | SERVICE_DATA_TRANSFER:'AWSDataTransfer'} 187 | 188 | 189 | SCRIPT_TERM_TYPE_ON_DEMAND = 'on-demand' 190 | SCRIPT_TERM_TYPE_RESERVED = 'reserved' 191 | 192 | TERM_TYPE_RESERVED = 'Reserved' 193 | TERM_TYPE_ON_DEMAND = 'OnDemand' 194 | 195 | SUPPORTED_TERM_TYPES = (SCRIPT_TERM_TYPE_ON_DEMAND, SCRIPT_TERM_TYPE_RESERVED) 196 | 197 | 198 | TERM_TYPE_MAP = {SCRIPT_TERM_TYPE_ON_DEMAND:'OnDemand', SCRIPT_TERM_TYPE_RESERVED:'Reserved'} 199 | 200 | 201 | PRODUCT_FAMILY_COMPUTE_INSTANCE = 'Compute Instance' 202 | PRODUCT_FAMILY_DATABASE_INSTANCE = 'Database Instance' 203 | PRODUCT_FAMILY_DATA_TRANSFER = 'Data Transfer' 204 | PRODUCT_FAMILY_FEE = 'Fee' 205 | PRODUCT_FAMILY_API_REQUEST = 'API Request' 206 | PRODUCT_FAMILY_STORAGE = 'Storage' 207 | PRODUCT_FAMILY_SYSTEM_OPERATION = 'System Operation' 208 | PRODUCT_FAMILY_LOAD_BALANCER = 'Load Balancer' 209 | PRODUCT_FAMILY_APPLICATION_LOAD_BALANCER = 'Load Balancer-Application' 210 | PRODUCT_FAMILY_NETWORK_LOAD_BALANCER = 'Load Balancer-Network' 211 | PRODUCT_FAMILY_SNAPSHOT = "Storage Snapshot" 212 | PRODUCT_FAMILY_SERVERLESS = "Serverless" 213 | PRODUCT_FAMILY_DB_STORAGE = "Database Storage" 214 | PRODUCT_FAMILY_DB_PIOPS = "Provisioned IOPS" 215 | PRODUCT_FAMILY_KINESIS_STREAMS = "Kinesis Streams" 216 | PRODUCT_FAMILY_EMR_INSTANCE = "Elastic Map Reduce Instance" 217 | PRODUCT_FAMILIY_BUNDLE = 'Bundle' 218 | PRODUCT_FAMILIY_REDSHIFT_CONCURRENCY_SCALING = 'Redshift Concurrency Scaling' 219 | PRODUCT_FAMILIY_REDSHIFT_DATA_SCAN = 'Redshift Data Scan' 220 | PRODUCT_FAMILIY_STORAGE_SNAPSHOT = 'Storage Snapshot' 221 | 222 | 223 | SUPPORTED_PRODUCT_FAMILIES = (PRODUCT_FAMILY_COMPUTE_INSTANCE, PRODUCT_FAMILY_DATABASE_INSTANCE, 224 | PRODUCT_FAMILY_DATA_TRANSFER,PRODUCT_FAMILY_FEE, PRODUCT_FAMILY_API_REQUEST, 225 | PRODUCT_FAMILY_STORAGE, PRODUCT_FAMILY_SYSTEM_OPERATION, PRODUCT_FAMILY_LOAD_BALANCER, 226 | PRODUCT_FAMILY_APPLICATION_LOAD_BALANCER, PRODUCT_FAMILY_NETWORK_LOAD_BALANCER, 227 | PRODUCT_FAMILY_SNAPSHOT,PRODUCT_FAMILY_SERVERLESS,PRODUCT_FAMILY_DB_STORAGE, 228 | PRODUCT_FAMILY_DB_PIOPS,PRODUCT_FAMILY_KINESIS_STREAMS, PRODUCT_FAMILY_EMR_INSTANCE, 229 | PRODUCT_FAMILIY_BUNDLE, PRODUCT_FAMILIY_REDSHIFT_CONCURRENCY_SCALING, PRODUCT_FAMILIY_REDSHIFT_DATA_SCAN, 230 | PRODUCT_FAMILIY_STORAGE_SNAPSHOT 231 | ) 232 | 233 | SUPPORTED_RESERVED_PRODUCT_FAMILIES = (PRODUCT_FAMILY_COMPUTE_INSTANCE, PRODUCT_FAMILY_DATABASE_INSTANCE) 234 | 235 | SUPPORTED_PRODUCT_FAMILIES_BY_SERVICE_DICT = { 236 | SERVICE_EC2:[PRODUCT_FAMILY_COMPUTE_INSTANCE,PRODUCT_FAMILY_DATA_TRANSFER, PRODUCT_FAMILY_FEE, 237 | PRODUCT_FAMILY_STORAGE,PRODUCT_FAMILY_SYSTEM_OPERATION,PRODUCT_FAMILY_LOAD_BALANCER, 238 | PRODUCT_FAMILY_APPLICATION_LOAD_BALANCER,PRODUCT_FAMILY_NETWORK_LOAD_BALANCER, 239 | PRODUCT_FAMILY_SNAPSHOT], 240 | SERVICE_RDS:[PRODUCT_FAMILY_DATABASE_INSTANCE, PRODUCT_FAMILY_DATA_TRANSFER,PRODUCT_FAMILY_FEE, 241 | PRODUCT_FAMILY_DB_STORAGE,PRODUCT_FAMILY_DB_PIOPS,PRODUCT_FAMILY_SNAPSHOT ], 242 | SERVICE_S3:[PRODUCT_FAMILY_STORAGE, PRODUCT_FAMILY_FEE,PRODUCT_FAMILY_API_REQUEST,PRODUCT_FAMILY_SYSTEM_OPERATION, PRODUCT_FAMILY_DATA_TRANSFER ], 243 | SERVICE_LAMBDA:[PRODUCT_FAMILY_SERVERLESS, PRODUCT_FAMILY_DATA_TRANSFER, PRODUCT_FAMILY_FEE, 244 | PRODUCT_FAMILY_API_REQUEST], 245 | SERVICE_KINESIS:[PRODUCT_FAMILY_KINESIS_STREAMS], 246 | SERVICE_DYNAMODB:[PRODUCT_FAMILY_DB_STORAGE, PRODUCT_FAMILY_DB_PIOPS, PRODUCT_FAMILY_FEE ], 247 | SERVICE_EMR:[PRODUCT_FAMILY_EMR_INSTANCE], 248 | SERVICE_REDSHIFT:[PRODUCT_FAMILY_COMPUTE_INSTANCE, PRODUCT_FAMILIY_BUNDLE, PRODUCT_FAMILIY_REDSHIFT_CONCURRENCY_SCALING, 249 | PRODUCT_FAMILIY_REDSHIFT_DATA_SCAN, PRODUCT_FAMILIY_STORAGE_SNAPSHOT], 250 | SERVICE_DATA_TRANSFER:[PRODUCT_FAMILY_DATA_TRANSFER] 251 | } 252 | 253 | 254 | INFINITY = 'Inf' 255 | 256 | SORT_CRITERIA_REGION = 'region' 257 | SORT_CRITERIA_INSTANCE_TYPE = 'instance-type' 258 | SORT_CRITERIA_OS = 'os' 259 | SORT_CRITERIA_DB_INSTANCE_CLASS = 'db-instance-class' 260 | SORT_CRITERIA_DB_ENGINE = 'engine' 261 | SORT_CRITERIA_S3_STORAGE_CLASS = 'storage-class' 262 | SORT_CRITERIA_S3_STORAGE_SIZE_GB = 'storage-size-gb' 263 | SORT_CRITERIA_S3_DATA_RETRIEVAL_GB = 'data-retrieval-gb' 264 | SORT_CRITERIA_S3_STORAGE_CLASS_DATA_RETRIEVAL_GB = 'storage-class-data-retrieval-gb' 265 | SORT_CRITERIA_TO_REGION = 'to-region' 266 | SORT_CRITERIA_LAMBDA_MEMORY = 'memory' 267 | SORT_CRITERIA_TERM_TYPE = 'term-type' 268 | SORT_CRITERIA_TERM_TYPE_REGION = 'term-type-region' 269 | 270 | 271 | SORT_CRITERIA_VALUE_SEPARATOR = ',' 272 | 273 | #_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/ 274 | #EC2 275 | EC2_OPERATING_SYSTEM_LINUX = 'Linux' 276 | EC2_OPERATING_SYSTEM_BYOL = 'Windows BYOL' 277 | EC2_OPERATING_SYSTEM_WINDOWS = 'Windows' 278 | EC2_OPERATING_SYSTEM_SUSE = 'Suse' 279 | #EC2_OPERATING_SYSTEM_SQL_WEB = 'SQL Web' 280 | EC2_OPERATING_SYSTEM_RHEL = 'RHEL' 281 | 282 | SCRIPT_EC2_TENANCY_SHARED = 'shared' 283 | SCRIPT_EC2_TENANCY_DEDICATED = 'dedicated' 284 | SCRIPT_EC2_TENANCY_HOST = 'host' 285 | 286 | EC2_TENANCY_SHARED = 'Shared' 287 | EC2_TENANCY_DEDICATED = 'Dedicated' 288 | EC2_TENANCY_HOST = 'Host' 289 | 290 | EC2_TENANCY_MAP = {SCRIPT_EC2_TENANCY_SHARED:EC2_TENANCY_SHARED, 291 | SCRIPT_EC2_TENANCY_DEDICATED:EC2_TENANCY_DEDICATED, 292 | SCRIPT_EC2_TENANCY_HOST:EC2_TENANCY_HOST} 293 | 294 | 295 | SCRIPT_EC2_CAPACITY_RESERVATION_STATUS_USED = 'used' 296 | SCRIPT_EC2_CAPACITY_RESERVATION_STATUS_UNUSED = 'unused' 297 | SCRIPT_EC2_CAPACITY_RESERVATION_STATUS_ALLOCATED = 'allocated' 298 | 299 | EC2_CAPACITY_RESERVATION_STATUS_USED = 'Used' 300 | EC2_CAPACITY_RESERVATION_STATUS_UNUSED = 'UnusedCapacityReservation' 301 | EC2_CAPACITY_RESERVATION_STATUS_ALLOCATED = 'AllocatedCapacityReservation' 302 | 303 | 304 | 305 | EC2_CAPACITY_RESERVATION_STATUS_MAP = {SCRIPT_EC2_CAPACITY_RESERVATION_STATUS_USED: EC2_CAPACITY_RESERVATION_STATUS_USED, 306 | SCRIPT_EC2_CAPACITY_RESERVATION_STATUS_UNUSED: EC2_CAPACITY_RESERVATION_STATUS_UNUSED, 307 | SCRIPT_EC2_CAPACITY_RESERVATION_STATUS_ALLOCATED: EC2_CAPACITY_RESERVATION_STATUS_ALLOCATED} 308 | 309 | 310 | 311 | 312 | 313 | STORAGE_MEDIA_SSD = "SSD-backed" 314 | STORAGE_MEDIA_HDD = "HDD-backed" 315 | STORAGE_MEDIA_S3 = "AmazonS3" 316 | 317 | EBS_VOLUME_TYPE_MAGNETIC = "Magnetic" 318 | EBS_VOLUME_TYPE_GENERAL_PURPOSE = "General Purpose" 319 | EBS_VOLUME_TYPE_PIOPS = "Provisioned IOPS" 320 | EBS_VOLUME_TYPE_THROUGHPUT_OPTIMIZED = "Throughput Optimized HDD" 321 | EBS_VOLUME_TYPE_COLD_HDD = "Cold HDD" 322 | 323 | #Values that are valid in the calling script (which could be a Lambda function or any Python module) 324 | 325 | #OS 326 | SCRIPT_OPERATING_SYSTEM_LINUX = 'linux' 327 | SCRIPT_OPERATING_SYSTEM_WINDOWS_BYOL = 'windowsbyol' 328 | SCRIPT_OPERATING_SYSTEM_WINDOWS = 'windows' 329 | SCRIPT_OPERATING_SYSTEM_SUSE = 'suse' 330 | #SCRIPT_OPERATING_SYSTEM_SQL_WEB = 'sqlweb' 331 | SCRIPT_OPERATING_SYSTEM_RHEL = 'rhel' 332 | 333 | #License Model 334 | SCRIPT_EC2_LICENSE_MODEL_BYOL = 'byol' 335 | SCRIPT_EC2_LICENSE_MODEL_INCLUDED = 'included' 336 | SCRIPT_EC2_LICENSE_MODEL_NONE_REQUIRED = 'none-required' 337 | 338 | #EBS 339 | SCRIPT_EBS_VOLUME_TYPE_STANDARD = 'standard' 340 | SCRIPT_EBS_VOLUME_TYPE_IO1 = 'io1' 341 | SCRIPT_EBS_VOLUME_TYPE_GP2 = 'gp2' 342 | SCRIPT_EBS_VOLUME_TYPE_SC1 = 'sc1' 343 | SCRIPT_EBS_VOLUME_TYPE_ST1 = 'st1' 344 | 345 | 346 | #Reserved Instances 347 | SCRIPT_EC2_OFFERING_CLASS_STANDARD = 'standard' 348 | SCRIPT_EC2_OFFERING_CLASS_CONVERTIBLE = 'convertible' 349 | 350 | EC2_OFFERING_CLASS_STANDARD = 'standard' 351 | EC2_OFFERING_CLASS_CONVERTIBLE = 'convertible' 352 | 353 | SUPPORTED_EC2_OFFERING_CLASSES = [SCRIPT_EC2_OFFERING_CLASS_STANDARD, SCRIPT_EC2_OFFERING_CLASS_CONVERTIBLE] 354 | SUPPORTED_RDS_OFFERING_CLASSES = [SCRIPT_EC2_OFFERING_CLASS_STANDARD] 355 | SUPPORTED_EMR_OFFERING_CLASSES = [SCRIPT_EC2_OFFERING_CLASS_STANDARD, SCRIPT_EC2_OFFERING_CLASS_CONVERTIBLE] 356 | SUPPORTED_REDSHIFT_OFFERING_CLASSES = [SCRIPT_EC2_OFFERING_CLASS_STANDARD] 357 | 358 | SUPPORTED_OFFERING_CLASSES_MAP = {SERVICE_EC2:SUPPORTED_EC2_OFFERING_CLASSES, SERVICE_RDS: SUPPORTED_RDS_OFFERING_CLASSES, 359 | SERVICE_EMR:SUPPORTED_EMR_OFFERING_CLASSES, 360 | SERVICE_REDSHIFT: SUPPORTED_REDSHIFT_OFFERING_CLASSES } 361 | 362 | EC2_OFFERING_CLASS_MAP = {SCRIPT_EC2_OFFERING_CLASS_STANDARD:EC2_OFFERING_CLASS_STANDARD, 363 | SCRIPT_EC2_OFFERING_CLASS_CONVERTIBLE: EC2_OFFERING_CLASS_CONVERTIBLE} 364 | 365 | 366 | 367 | SCRIPT_EC2_PURCHASE_OPTION_PARTIAL_UPFRONT = 'partial-upfront' 368 | SCRIPT_EC2_PURCHASE_OPTION_ALL_UPFRONT = 'all-upfront' 369 | SCRIPT_EC2_PURCHASE_OPTION_NO_UPFRONT = 'no-upfront' 370 | 371 | EC2_PURCHASE_OPTION_PARTIAL_UPFRONT = 'Partial Upfront' 372 | EC2_PURCHASE_OPTION_ALL_UPFRONT = 'All Upfront' 373 | EC2_PURCHASE_OPTION_NO_UPFRONT = 'No Upfront' 374 | 375 | SCRIPT_EC2_RESERVED_YEARS_1 = '1' 376 | SCRIPT_EC2_RESERVED_YEARS_3 = '3' 377 | 378 | EC2_SUPPORTED_RESERVED_YEARS = (SCRIPT_EC2_RESERVED_YEARS_1, SCRIPT_EC2_RESERVED_YEARS_3) 379 | 380 | EC2_RESERVED_YEAR_MAP = {SCRIPT_EC2_RESERVED_YEARS_1:'1yr', SCRIPT_EC2_RESERVED_YEARS_3:'3yr'} 381 | 382 | EC2_SUPPORTED_PURCHASE_OPTIONS = (SCRIPT_EC2_PURCHASE_OPTION_ALL_UPFRONT, SCRIPT_EC2_PURCHASE_OPTION_NO_UPFRONT, SCRIPT_EC2_PURCHASE_OPTION_PARTIAL_UPFRONT) 383 | 384 | 385 | 386 | EC2_PURCHASE_OPTION_MAP = {SCRIPT_EC2_PURCHASE_OPTION_PARTIAL_UPFRONT:EC2_PURCHASE_OPTION_PARTIAL_UPFRONT, 387 | SCRIPT_EC2_PURCHASE_OPTION_ALL_UPFRONT: EC2_PURCHASE_OPTION_ALL_UPFRONT, SCRIPT_EC2_PURCHASE_OPTION_NO_UPFRONT: EC2_PURCHASE_OPTION_NO_UPFRONT 388 | } 389 | 390 | SUPPORTED_EC2_OPERATING_SYSTEMS = (SCRIPT_OPERATING_SYSTEM_LINUX, 391 | SCRIPT_OPERATING_SYSTEM_WINDOWS, 392 | SCRIPT_OPERATING_SYSTEM_WINDOWS_BYOL, 393 | SCRIPT_OPERATING_SYSTEM_SUSE, 394 | #SCRIPT_OPERATING_SYSTEM_SQL_WEB, 395 | SCRIPT_OPERATING_SYSTEM_RHEL) 396 | 397 | SUPPORTED_EC2_LICENSE_MODELS = (SCRIPT_EC2_LICENSE_MODEL_BYOL, SCRIPT_EC2_LICENSE_MODEL_INCLUDED, SCRIPT_EC2_LICENSE_MODEL_NONE_REQUIRED) 398 | 399 | EC2_LICENSE_MODEL_MAP = {SCRIPT_EC2_LICENSE_MODEL_BYOL: 'Bring your own license', 400 | SCRIPT_EC2_LICENSE_MODEL_INCLUDED: 'License Included', 401 | SCRIPT_EC2_LICENSE_MODEL_NONE_REQUIRED: 'No License required' 402 | } 403 | 404 | 405 | EC2_OPERATING_SYSTEMS_MAP = {SCRIPT_OPERATING_SYSTEM_LINUX:'Linux', 406 | SCRIPT_OPERATING_SYSTEM_WINDOWS_BYOL:'Windows', 407 | SCRIPT_OPERATING_SYSTEM_WINDOWS:'Windows', 408 | SCRIPT_OPERATING_SYSTEM_SUSE:'SUSE', 409 | #SCRIPT_OPERATING_SYSTEM_SQL_WEB:'SQL Web', 410 | SCRIPT_OPERATING_SYSTEM_RHEL:'RHEL'} 411 | 412 | SUPPORTED_EBS_VOLUME_TYPES = (SCRIPT_EBS_VOLUME_TYPE_STANDARD, 413 | SCRIPT_EBS_VOLUME_TYPE_IO1, 414 | SCRIPT_EBS_VOLUME_TYPE_GP2, 415 | SCRIPT_EBS_VOLUME_TYPE_SC1, 416 | SCRIPT_EBS_VOLUME_TYPE_ST1 417 | ) 418 | 419 | EBS_VOLUME_TYPES_MAP = { 420 | SCRIPT_EBS_VOLUME_TYPE_STANDARD : {'storageMedia':STORAGE_MEDIA_HDD , 'volumeType':EBS_VOLUME_TYPE_MAGNETIC}, 421 | SCRIPT_EBS_VOLUME_TYPE_IO1 : {'storageMedia':STORAGE_MEDIA_SSD , 'volumeType':EBS_VOLUME_TYPE_PIOPS}, 422 | SCRIPT_EBS_VOLUME_TYPE_GP2 : {'storageMedia':STORAGE_MEDIA_SSD , 'volumeType':EBS_VOLUME_TYPE_GENERAL_PURPOSE}, 423 | SCRIPT_EBS_VOLUME_TYPE_SC1 : {'storageMedia':STORAGE_MEDIA_HDD , 'volumeType':EBS_VOLUME_TYPE_COLD_HDD}, 424 | SCRIPT_EBS_VOLUME_TYPE_ST1 : {'storageMedia':STORAGE_MEDIA_HDD , 'volumeType':EBS_VOLUME_TYPE_THROUGHPUT_OPTIMIZED} 425 | } 426 | 427 | #_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/ 428 | 429 | #RDS 430 | 431 | SUPPORTED_RDS_INSTANCE_CLASSES = ('db.t1.micro', 'db.m1.small', 'db.m1.medium', 'db.m1.large', 'db.m1.xlarge', 432 | 'db.m2.xlarge', 'db.m2.2xlarge', 'db.m2.4xlarge', 433 | 'db.m3.medium', 'db.m3.large', 'db.m3.xlarge', 'db.m3.2xlarge', 434 | 'db.m4.large', 'db.m4.xlarge', 'db.m4.2xlarge', 'db.m4.4xlarge', 'db.m4.10xlarge', 'db.m4.16xlarge', 435 | 'db.m5.large', 'db.m5.xlarge', 'db.m5.2xlarge', 'db.m5.4xlarge', 'db.m5.12xlarge', 'db.m5.24xlarge', 436 | 'db.r3.large', 'db.r3.xlarge', 'db.r3.2xlarge', 'db.r3.4xlarge', 'db.r3.8xlarge', 437 | 'db.r4.large', 'db.r4.xlarge', 'db.r4.2xlarge', 'db.r4.4xlarge', 'db.r4.8xlarge', 'db.r4.16xlarge', 438 | 'db.r5.large', 'db.r5.xlarge', 'db.r5.2xlarge', 'db.r5.4xlarge', 'db.r5.12xlarge', 'db.r5.24xlarge', 439 | 'db.t2.micro', 'db.t2.small', 'db.t2.2xlarge', 'db.t2.large', 'db.t2.xlarge', 'db.t2.medium', 440 | 'db.t3.micro', 'db.t3.small', 'db.t3.medium', 'db.t3.large', 'db.t3.xlarge', 'db.t3.2xlarge', 441 | 'db.x1.16xlarge', 'db.x1.32xlarge', 'db.x1e.16xlarge', 'db.x1e.2xlarge', 'db.x1e.32xlarge', 'db.x1e.4xlarge', 'db.x1e.8xlarge', 'db.x1e.xlarge' 442 | ) 443 | 444 | 445 | SCRIPT_RDS_STORAGE_TYPE_STANDARD = 'standard' 446 | SCRIPT_RDS_STORAGE_TYPE_AURORA = 'aurora' #Aurora has its own type of storage, which is billed by IO operations and size 447 | SCRIPT_RDS_STORAGE_TYPE_GP2 = 'gp2' 448 | SCRIPT_RDS_STORAGE_TYPE_IO1 = 'io1' 449 | 450 | RDS_VOLUME_TYPE_MAGNETIC = 'Magnetic' 451 | RDS_VOLUME_TYPE_AURORA = 'General Purpose-Aurora' 452 | RDS_VOLUME_TYPE_GP2 = 'General Purpose' 453 | RDS_VOLUME_TYPE_IO1 = 'Provisioned IOPS' 454 | 455 | 456 | 457 | 458 | RDS_VOLUME_TYPES_MAP = { 459 | SCRIPT_RDS_STORAGE_TYPE_STANDARD : RDS_VOLUME_TYPE_MAGNETIC, 460 | SCRIPT_RDS_STORAGE_TYPE_AURORA : RDS_VOLUME_TYPE_AURORA, 461 | SCRIPT_RDS_STORAGE_TYPE_GP2 : RDS_VOLUME_TYPE_GP2, 462 | SCRIPT_RDS_STORAGE_TYPE_IO1 : RDS_VOLUME_TYPE_IO1 463 | } 464 | 465 | 466 | 467 | SUPPORTED_RDS_STORAGE_TYPES = (SCRIPT_RDS_STORAGE_TYPE_STANDARD, SCRIPT_RDS_STORAGE_TYPE_AURORA, SCRIPT_RDS_STORAGE_TYPE_GP2, SCRIPT_RDS_STORAGE_TYPE_IO1) 468 | 469 | 470 | RDS_DEPLOYMENT_OPTION_SINGLE_AZ = 'Single-AZ' 471 | RDS_DEPLOYMENT_OPTION_MULTI_AZ = 'Multi-AZ' 472 | RDS_DEPLOYMENT_OPTION_MULTI_AZ_MIRROR = 'Multi-AZ (SQL Server Mirror)' 473 | 474 | RDS_DB_ENGINE_MYSQL = 'MySQL' 475 | RDS_DB_ENGINE_MARIADB = 'MariaDB' 476 | RDS_DB_ENGINE_ORACLE = 'Oracle' 477 | RDS_DB_ENGINE_SQL_SERVER = 'SQL Server' 478 | RDS_DB_ENGINE_POSTGRESQL = 'PostgreSQL' 479 | RDS_DB_ENGINE_AURORA_MYSQL = 'Aurora MySQL' 480 | RDS_DB_ENGINE_AURORA_POSTGRESQL = 'Aurora PostgreSQL' 481 | 482 | RDS_DB_EDITION_ENTERPRISE = 'Enterprise' 483 | RDS_DB_EDITION_STANDARD = 'Standard' 484 | RDS_DB_EDITION_STANDARD_ONE = 'Standard One' 485 | RDS_DB_EDITION_STANDARD_TWO = 'Standard Two' 486 | RDS_DB_EDITION_EXPRESS = 'Express' 487 | RDS_DB_EDITION_WEB = 'Web' 488 | 489 | 490 | SCRIPT_RDS_DATABASE_ENGINE_MYSQL = 'mysql' 491 | SCRIPT_RDS_DATABASE_ENGINE_MARIADB = 'mariadb' 492 | SCRIPT_RDS_DATABASE_ENGINE_ORACLE_STANDARD = 'oracle-se' 493 | SCRIPT_RDS_DATABASE_ENGINE_ORACLE_STANDARD_ONE = 'oracle-se1' 494 | SCRIPT_RDS_DATABASE_ENGINE_ORACLE_STANDARD_TWO = 'oracle-se2' 495 | SCRIPT_RDS_DATABASE_ENGINE_ORACLE_ENTERPRISE = 'oracle-ee' 496 | SCRIPT_RDS_DATABASE_ENGINE_SQL_SERVER_ENTERPRISE = 'sqlserver-ee' 497 | SCRIPT_RDS_DATABASE_ENGINE_SQL_SERVER_STANDARD = 'sqlserver-se' 498 | SCRIPT_RDS_DATABASE_ENGINE_SQL_SERVER_EXPRESS = 'sqlserver-ex' 499 | SCRIPT_RDS_DATABASE_ENGINE_SQL_SERVER_WEB = 'sqlserver-web' 500 | SCRIPT_RDS_DATABASE_ENGINE_POSTGRESQL = 'postgres' #to be consistent with RDS API - https://docs.aws.amazon.com/AmazonRDS/latest/APIReference/API_CreateDBInstance.html 501 | SCRIPT_RDS_DATABASE_ENGINE_AURORA_MYSQL = 'aurora' 502 | SCRIPT_RDS_DATABASE_ENGINE_AURORA_MYSQL_LONG = 'aurora-mysql' #some items in the RDS API now return aurora-mysql as a valid engine (instead of just aurora) 503 | SCRIPT_RDS_DATABASE_ENGINE_AURORA_POSTGRESQL = 'aurora-postgresql' 504 | 505 | RDS_SUPPORTED_DB_ENGINES = (SCRIPT_RDS_DATABASE_ENGINE_MYSQL,SCRIPT_RDS_DATABASE_ENGINE_MARIADB, 506 | SCRIPT_RDS_DATABASE_ENGINE_ORACLE_STANDARD, SCRIPT_RDS_DATABASE_ENGINE_ORACLE_STANDARD_ONE, 507 | SCRIPT_RDS_DATABASE_ENGINE_ORACLE_STANDARD_TWO,SCRIPT_RDS_DATABASE_ENGINE_ORACLE_ENTERPRISE, 508 | SCRIPT_RDS_DATABASE_ENGINE_SQL_SERVER_ENTERPRISE, SCRIPT_RDS_DATABASE_ENGINE_SQL_SERVER_STANDARD, 509 | SCRIPT_RDS_DATABASE_ENGINE_SQL_SERVER_EXPRESS, SCRIPT_RDS_DATABASE_ENGINE_SQL_SERVER_WEB, 510 | SCRIPT_RDS_DATABASE_ENGINE_POSTGRESQL, SCRIPT_RDS_DATABASE_ENGINE_AURORA_POSTGRESQL, 511 | SCRIPT_RDS_DATABASE_ENGINE_AURORA_MYSQL, SCRIPT_RDS_DATABASE_ENGINE_AURORA_MYSQL_LONG 512 | ) 513 | 514 | SCRIPT_RDS_LICENSE_MODEL_INCLUDED = 'license-included' 515 | SCRIPT_RDS_LICENSE_MODEL_BYOL = 'bring-your-own-license' 516 | SCRIPT_RDS_LICENSE_MODEL_PUBLIC = 'general-public-license' 517 | RDS_SUPPORTED_LICENSE_MODELS = (SCRIPT_RDS_LICENSE_MODEL_INCLUDED, SCRIPT_RDS_LICENSE_MODEL_BYOL, SCRIPT_RDS_LICENSE_MODEL_PUBLIC) 518 | RDS_LICENSE_MODEL_MAP = {SCRIPT_RDS_LICENSE_MODEL_INCLUDED:'License included', 519 | SCRIPT_RDS_LICENSE_MODEL_BYOL:'Bring your own license', 520 | SCRIPT_RDS_LICENSE_MODEL_PUBLIC:'No license required'} 521 | 522 | RDS_ENGINE_MAP = {SCRIPT_RDS_DATABASE_ENGINE_MYSQL:{'engine':RDS_DB_ENGINE_MYSQL,'edition':''}, 523 | SCRIPT_RDS_DATABASE_ENGINE_MARIADB:{'engine':RDS_DB_ENGINE_MARIADB ,'edition':''}, 524 | SCRIPT_RDS_DATABASE_ENGINE_ORACLE_STANDARD:{'engine':RDS_DB_ENGINE_ORACLE ,'edition':RDS_DB_EDITION_STANDARD}, 525 | SCRIPT_RDS_DATABASE_ENGINE_ORACLE_STANDARD_ONE:{'engine':RDS_DB_ENGINE_ORACLE ,'edition':RDS_DB_EDITION_STANDARD_ONE}, 526 | SCRIPT_RDS_DATABASE_ENGINE_ORACLE_STANDARD_TWO:{'engine':RDS_DB_ENGINE_ORACLE ,'edition':RDS_DB_EDITION_STANDARD_TWO}, 527 | SCRIPT_RDS_DATABASE_ENGINE_ORACLE_ENTERPRISE:{'engine':RDS_DB_ENGINE_ORACLE ,'edition':RDS_DB_EDITION_ENTERPRISE}, 528 | SCRIPT_RDS_DATABASE_ENGINE_SQL_SERVER_ENTERPRISE:{'engine':RDS_DB_ENGINE_SQL_SERVER ,'edition':RDS_DB_EDITION_ENTERPRISE}, 529 | SCRIPT_RDS_DATABASE_ENGINE_SQL_SERVER_STANDARD:{'engine':RDS_DB_ENGINE_SQL_SERVER ,'edition':RDS_DB_EDITION_STANDARD}, 530 | SCRIPT_RDS_DATABASE_ENGINE_SQL_SERVER_EXPRESS:{'engine':RDS_DB_ENGINE_SQL_SERVER ,'edition':RDS_DB_EDITION_EXPRESS}, 531 | SCRIPT_RDS_DATABASE_ENGINE_SQL_SERVER_WEB:{'engine':RDS_DB_ENGINE_SQL_SERVER ,'edition':RDS_DB_EDITION_WEB}, 532 | SCRIPT_RDS_DATABASE_ENGINE_POSTGRESQL:{'engine':RDS_DB_ENGINE_POSTGRESQL ,'edition':''}, 533 | SCRIPT_RDS_DATABASE_ENGINE_AURORA_MYSQL:{'engine':RDS_DB_ENGINE_AURORA_MYSQL ,'edition':''}, 534 | SCRIPT_RDS_DATABASE_ENGINE_AURORA_MYSQL_LONG:{'engine':RDS_DB_ENGINE_AURORA_MYSQL ,'edition':''}, 535 | SCRIPT_RDS_DATABASE_ENGINE_AURORA_POSTGRESQL:{'engine':RDS_DB_ENGINE_AURORA_POSTGRESQL ,'edition':''} 536 | } 537 | 538 | 539 | 540 | #_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/ 541 | 542 | #S3 543 | 544 | S3_USAGE_GROUP_REQUESTS_TIER_1 = 'S3-API-Tier1' 545 | S3_USAGE_GROUP_REQUESTS_TIER_2 = 'S3-API-Tier2' 546 | S3_USAGE_GROUP_REQUESTS_TIER_3 = 'S3-API-Tier3' 547 | S3_USAGE_GROUP_REQUESTS_SIA_TIER1 = 'S3-API-SIA-Tier1' 548 | S3_USAGE_GROUP_REQUESTS_SIA_TIER2 = 'S3-API-SIA-Tier2' 549 | S3_USAGE_GROUP_REQUESTS_SIA_RETRIEVAL = 'S3-API-SIA-Retrieval' 550 | S3_USAGE_GROUP_REQUESTS_ZIA_TIER1 = 'S3-API-ZIA-Tier1' 551 | S3_USAGE_GROUP_REQUESTS_ZIA_TIER2 = 'S3-API-ZIA-Tier2' 552 | S3_USAGE_GROUP_REQUESTS_ZIA_RETRIEVAL = 'S3-API-ZIA-Retrieval' 553 | 554 | 555 | S3_STORAGE_CLASS_STANDARD = 'General Purpose' 556 | S3_STORAGE_CLASS_SIA = 'Infrequent Access' 557 | S3_STORAGE_CLASS_ZIA = 'Infrequent Access' 558 | S3_STORAGE_CLASS_GLACIER = 'Archive' 559 | S3_STORAGE_CLASS_REDUCED_REDUNDANCY = 'Non-Critical Data' 560 | 561 | 562 | SUPPORTED_REQUEST_TYPES = ('PUT','COPY','POST','LIST','GET') 563 | 564 | SCRIPT_STORAGE_CLASS_INFREQUENT_ACCESS = 'STANDARD_IA' 565 | SCRIPT_STORAGE_CLASS_ONE_ZONE_INFREQUENT_ACCESS = 'ONEZONE_IA' 566 | SCRIPT_STORAGE_CLASS_STANDARD = 'STANDARD' 567 | SCRIPT_STORAGE_CLASS_GLACIER = 'GLACIER' 568 | SCRIPT_STORAGE_CLASS_REDUCED_REDUNDANCY = 'REDUCED_REDUNDANCY' 569 | 570 | SUPPORTED_S3_STORAGE_CLASSES = (SCRIPT_STORAGE_CLASS_STANDARD, 571 | SCRIPT_STORAGE_CLASS_INFREQUENT_ACCESS, 572 | SCRIPT_STORAGE_CLASS_ONE_ZONE_INFREQUENT_ACCESS, 573 | SCRIPT_STORAGE_CLASS_GLACIER, 574 | SCRIPT_STORAGE_CLASS_REDUCED_REDUNDANCY) 575 | 576 | S3_STORAGE_CLASS_MAP = {SCRIPT_STORAGE_CLASS_INFREQUENT_ACCESS:S3_STORAGE_CLASS_SIA, 577 | SCRIPT_STORAGE_CLASS_ONE_ZONE_INFREQUENT_ACCESS:S3_STORAGE_CLASS_ZIA, 578 | SCRIPT_STORAGE_CLASS_STANDARD:S3_STORAGE_CLASS_STANDARD, 579 | SCRIPT_STORAGE_CLASS_GLACIER:S3_STORAGE_CLASS_GLACIER, 580 | SCRIPT_STORAGE_CLASS_REDUCED_REDUNDANCY:S3_STORAGE_CLASS_REDUCED_REDUNDANCY} 581 | 582 | S3_USAGE_TYPE_DICT = { 583 | SCRIPT_STORAGE_CLASS_STANDARD:'TimedStorage-ByteHrs', 584 | SCRIPT_STORAGE_CLASS_INFREQUENT_ACCESS:'TimedStorage-SIA-ByteHrs', 585 | SCRIPT_STORAGE_CLASS_ONE_ZONE_INFREQUENT_ACCESS:'TimedStorage-ZIA-ByteHrs', 586 | SCRIPT_STORAGE_CLASS_GLACIER:'TimedStorage-GlacierByteHrs', 587 | SCRIPT_STORAGE_CLASS_REDUCED_REDUNDANCY:'TimedStorage-RRS-ByteHrs' 588 | } 589 | 590 | S3_VOLUME_TYPE_DICT = { 591 | SCRIPT_STORAGE_CLASS_STANDARD:'Standard', 592 | SCRIPT_STORAGE_CLASS_INFREQUENT_ACCESS:'Standard - Infrequent Access', 593 | SCRIPT_STORAGE_CLASS_ONE_ZONE_INFREQUENT_ACCESS:'One Zone - Infrequent Access', 594 | SCRIPT_STORAGE_CLASS_GLACIER:'Amazon Glacier', 595 | SCRIPT_STORAGE_CLASS_REDUCED_REDUNDANCY:'Reduced Redundancy' 596 | } 597 | 598 | 599 | 600 | 601 | #_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/ 602 | 603 | #LAMBDA 604 | 605 | LAMBDA_MEM_SIZES = [64,128,192,256,320,384,448,512,576,640,704,768,832,896,960,1024,1088,1152,1216,1280,1344,1408, 606 | 1472,1536,1600,1664,1728,1792,1856,1920,1984,2048,2112,2176,2240,2304,2368,2432,2496,2560,2624,2688, 607 | 2752,2816,2880,2944,3008] 608 | --------------------------------------------------------------------------------