├── diagram.png ├── policies ├── trust-policy.json └── trust-policy-mod.json ├── config ├── deployment-config.json └── config.json ├── LICENSE.txt ├── README.md ├── lambda_function.py └── deploy-wrapper.py /diagram.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/dbnegative/lambda-cloudfront-log-ingester/HEAD/diagram.png -------------------------------------------------------------------------------- /policies/trust-policy.json: -------------------------------------------------------------------------------- 1 | { 2 | "Version": "2012-10-17", 3 | "Statement": [ 4 | { 5 | "Sid": "", 6 | "Effect": "Allow", 7 | "Principal": { 8 | "Service": "lambda.amazonaws.com" 9 | }, 10 | "Action": "sts:AssumeRole" 11 | } 12 | ] 13 | } 14 | -------------------------------------------------------------------------------- /config/deployment-config.json: -------------------------------------------------------------------------------- 1 | { 2 | "S3_CONFIG_BUCKET":"lambda-cloudfront-log-ingester-config", 3 | "LAMBDA_DEPLOY_BUCKET": "lambda-cloudfront-log-ingester", 4 | "CONFIG_FILE":"config/config.json", 5 | "LAMBDA_FUNC_NAME" :"cloudfront-log-ingester", 6 | "LAMBDA_HANDLER":"lamda_function.lambda_handler", 7 | "LAMBDA_ROLE_ARN":"arn:aws:iam:::role/lambda-cloudfront-log-ingester", 8 | "LAMBDA_TIMEOUT":"300", 9 | "LAMBDA_MEMORY_SIZE":"512" 10 | } -------------------------------------------------------------------------------- /policies/trust-policy-mod.json: -------------------------------------------------------------------------------- 1 | { 2 | "Version": "2012-10-17", 3 | "Statement": [ 4 | { 5 | "Sid": "", 6 | "Effect": "Allow", 7 | "Principal": { 8 | "Service": "lambda.amazonaws.com" 9 | }, 10 | "Action": "sts:AssumeRole" 11 | }, 12 | { 13 | "Sid": "AllowlambdaToAssumeRole", 14 | "Effect": "Allow", 15 | "Principal": { 16 | "AWS": "arn:aws:iam:::role/lambda-cloudfront-log-ingester" 17 | }, 18 | "Action": "sts:AssumeRole" 19 | } 20 | ] 21 | } 22 | -------------------------------------------------------------------------------- /LICENSE.txt: -------------------------------------------------------------------------------- 1 | Copyright (c) 2017 Jason Witting. 2 | 3 | Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: 4 | 5 | The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. 6 | 7 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -------------------------------------------------------------------------------- /config/config.json: -------------------------------------------------------------------------------- 1 | { 2 | "es_host": "YOUR AWS ES ENDPOINT ", 3 | "es_region": "eu-west-1", 4 | "es_connection_timeout": 60, 5 | "es_bulk_timeout": "60s", 6 | "es_bulk_chunk_size": 1000, 7 | "sts_role_arn": "YOUR LAMBDA ROLE ARN", 8 | "sts_session_name": "lambdastsassume", 9 | "es_mapping": { 10 | "mappings": { 11 | "logs": { 12 | "properties": { 13 | "host-header": { 14 | "type": "string", 15 | "index": "not_analyzed" 16 | }, 17 | "ip": { 18 | "type": "string", 19 | "index": "not_analyzed" 20 | }, 21 | "host": { 22 | "type": "string", 23 | "index": "not_analyzed" 24 | }, 25 | "uri-stem": { 26 | "type": "string", 27 | "index": "not_analyzed" 28 | } 29 | } 30 | } 31 | } 32 | } 33 | } -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Push CloudFront logs to Elasticsearch with Lambda and S3 2 | 3 | Lambda function to ingest and push CloudFront logs that have been placed on S3. 4 | 5 | ![Alt text](/diagram.png?raw=true "Layout") 6 | 7 | ## Things to know before starting: 8 | This function pulls the cloudfront .gz log files from S3 and creates a dict of all log lines. It all also strips the time/date and merge's them into a new timestamp field. The dict is written using the Elasticsearch client via the bulk api. 9 | 10 | New elasticsearch index's are created for each day. PLEASE MAKE SURE YOU HAVE INDEX CLEANING POLICY IN PLACE! 11 | I have created a custom index map scheme that works for me. Please change to suite your needs. 12 | 13 | The deploy-wrapper.sh is generic and can be used for other functions. All settings can be changed in the deployment-config.json file in the config folder. 14 | 15 | * I'm not a python expert so pull requests welcome!! 16 | * AWS Elasticsearch is a pain to connect to. You need to auth all your requests with an AWSAuthRequest 17 | * My default lambda settings are not finely tuned - however they are working for me - YMMV 18 | * Always use aliases when working with LAMBDA in prod - trust me... 19 | 20 | ## Prerequisites 21 | * Admin Acess to: AWS S3, Elasticsearch, Lambda, IAM 22 | * aws cli 23 | * python 2.7+ 24 | * boto3 25 | * virtualenv 26 | * jq 27 | 28 | ## Setup 29 | ### IAM 30 | * create the lambda IAM role 31 | ``` 32 | aws iam create-role --role-name lambda-cloudfront-log-ingester --assume-role-policy-document="$(cat policies/trust-policy.json|jq -c '.' )" 33 | ``` 34 | * modify the role so that it can assume itself for STS token generation 35 | ``` 36 | aws iam update-assume-role-policy --policy-document="$(cat policies/trust-policy-mod.json|jq -c '.')" --role-name lambda-cloudfront-log-ingester 37 | ``` 38 | * Add custom policy to allow access to S3, Elasticsearch, Cloudwatch Logs, 39 | ``` 40 | You can use the pre-generated AWS policies in the IAM console as follows: 41 | S3 Readonly Access 42 | ES full access 43 | Lambda Basic execution role 44 | ``` 45 | ### S3 46 | * create the bucket where the lambda function config will be stored 47 | ``` 48 | aws s3 mb s3://lambda-cloudfront-log-ingester-config --region eu-west-1 49 | ``` 50 | * create the bucket where lambda function deployment zip will be stored 51 | ``` 52 | aws s3 mb s3://lambda-cloudfront-log-ingester --region eu-west-1 53 | ``` 54 | * Create 4 folders to hold config files for different deployment stages thorugh the AWS S3 console: 55 | ``` 56 | $LATEST 57 | DEV 58 | STAGE 59 | PROD 60 | ``` 61 | ### Elasticsearch 62 | Permissions policy should allow calls from the lamda role, however in my case I have this open to my AWS Account ID. 63 | You will need to get your ES endpoint URL 64 | 65 | 66 | * install needed python dep's 67 | ``` 68 | pip install virtualenv boto3 69 | ``` 70 | * clone the repo 71 | ``` 72 | git clone https://github.com/dbnegative/lambda-cloudfront-log-ingester 73 | cd lambda-cloudfront-log-ingester 74 | ``` 75 | * edit the config/deployment-config.json if needed. 76 | ``` 77 | { 78 | "S3_CONFIG_BUCKET":"lambda-cloudfront-log-ingester-config", 79 | "LAMBDA_DEPLOY_BUCKET": "lambda-cloudfront-log-ingester", 80 | "CONFIG_FILE":"config.json", 81 | "LAMBDA_FUNC_NAME" :"cloudfront-log-ingester", 82 | "LAMBDA_HANDLER":"lambda_function.lambda_handler", 83 | "LAMBDA_ROLE_ARN":"arn:aws:iam:::role/lambda-cloudfront-log-ingester", 84 | "LAMBDA_TIMEOUT":"300", 85 | "LAMBDA_MEMORY_SIZE":"512" 86 | } 87 | ``` 88 | * setup the build enviroment 89 | ``` 90 | deploy-wrapper.py setup 91 | ``` 92 | * edit the config/config.json with your own settings, at the minimum the following: 93 | ``` 94 | "es_host": "YOUR AWS ES ENDPOINT ", 95 | "es_region": "eu-west-1", 96 | "sts_role_arn": "YOUR LAMBDA ROLE ARN", 97 | "sts_session_name": "lambdastsassume", 98 | ``` 99 | * create the initial version of the function using the deploy-wrapper.sh 100 | ``` 101 | deploy-wrapper.py init 102 | ``` 103 | * create 3 lambda alias for continous deploments and tests 104 | ``` 105 | aws lambda create-alias --name DEV --function-name lambda-cloudfront-log-ingester --function-version=1 106 | aws lambda create-alias --name STAGE --function-name lambda-cloudfront-log-ingester --function-version=1 107 | aws lambda create-alias --name PROD --function-name lambda-cloudfront-log-ingester --function-version=1 108 | ``` 109 | * create s3 trigger on PROD alias. You can now deploy and test to DEV and STAGE without affecting your production version 110 | ``` 111 | 1. go to the lambda console 112 | 2. select the lambda-cloudfront-log-ingester fucntion 113 | 3. press the "Qualifiers" button and select the PROD alias 114 | 4. select the "Triggers" tab 115 | 5. add an S3 trigger 116 | 6. set the bucket to where your cloudfront logs sit 117 | 7. set the event to "Object Create (All) - Put" 118 | 8. enable the trigger and save 119 | ``` 120 | * deploying a new build to DEV alias 121 | ``` 122 | deploy-wrapper.py deploy --env DEV 123 | ``` 124 | * promoting that version to STAGE alias 125 | ``` 126 | deploy-wrapper.py promote DEV STAGE 127 | ``` 128 | 129 | # Deploy-wrapper.py usage 130 | ``` 131 | Deploy and manipulate lambda function 132 | 133 | positional arguments: 134 | {promote,deploy,config,clean,setup} 135 | [CMDS...] 136 | promote promote version to 138 | deploy deploy function to s3 139 | config deploy config to s3 140 | init creates the base lambda function 141 | clean clean local build enviroment 142 | setup create local build enviroment 143 | 144 | optional arguments: 145 | -h, --help show this help message and exit 146 | ``` 147 | 148 | ## TODO 149 | * aws policy files - S3, ELASTICSEARCH, LOG 150 | * improve instructions aka this file 151 | -------------------------------------------------------------------------------- /lambda_function.py: -------------------------------------------------------------------------------- 1 | ''' 2 | Lambda fucnction to ingest CLoudfront logs from S3 and bulk insert 3 | them into Elasticsearch. This lambda function needs to do a STS 4 | assume role in order to create an AWSAUTHREQUEST inorder to connect 5 | to the elastic search cluster 6 | 7 | Author: Jason Witting 8 | Version: 0.1 9 | 10 | Copyright (c) 2016 Jason Witting 11 | 12 | Permission is hereby granted, free of charge, to any person obtaining a 13 | copy of this software and associated documentation files (the "Software"), 14 | to deal in the Software without restriction, including without limitation 15 | the rights to use, copy, modify, merge, publish, distribute, sublicense, 16 | and/or sell copies of the Software, and to permit persons to whom the Software 17 | is furnished to do so, subject to the following conditions: 18 | 19 | The above copyright notice and this permission notice shall be included in 20 | all copies or substantial portions of the Software. 21 | 22 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED 23 | , INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A 24 | PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT 25 | HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF 26 | CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE 27 | OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 28 | ''' 29 | import csv 30 | import gzip 31 | import json 32 | from datetime import datetime 33 | 34 | import boto3 35 | from elasticsearch import Elasticsearch, RequestsHttpConnection 36 | from elasticsearch import helpers 37 | from aws_requests_auth.aws_auth import AWSRequestsAuth 38 | 39 | # Global vars 40 | FIELDNAMES = ( 41 | 'logdate', # this gets stripped and merged into a new timestamp field 42 | 'logtime', # this gets stripped and merged into a new timestamp field 43 | 'edge-location', 44 | 'src-bytes', 45 | 'ip', 46 | 'method', 47 | 'host', 48 | 'uri-stem', 49 | 'status', 50 | 'referer', 51 | 'user-agent', 52 | 'uri-query', 53 | 'cookie', 54 | 'edge-result-type', 55 | 'edge-request-id', 56 | 'host-header', 57 | 'protocol', 58 | 'resp-bytes', 59 | 'time-taken' 60 | 'forwarded-for', 61 | 'ssl-protocol', 62 | 'ssl-cipher', 63 | 'edge-response-result-type' 64 | ) 65 | 66 | 67 | # config_bucket='lambda-cf-es-config' 68 | CONFIG_BUCKET = 'lambda-cloudfront-log-ingester-config' 69 | # config s3 location and filename 70 | CONFIG_FILE = 'config.json' 71 | 72 | 73 | def sts_auth(config): 74 | '''Genarate auth request to connect AWS ES ''' 75 | sts = boto3.client('sts') 76 | 77 | creds = sts.assume_role( 78 | RoleArn=config['sts_role_arn'], RoleSessionName=config['sts_session_name']) 79 | 80 | auth = AWSRequestsAuth(aws_access_key=creds['Credentials']['AccessKeyId'], 81 | aws_secret_access_key=creds['Credentials']['SecretAccessKey'], 82 | aws_host=config['es_host'], 83 | aws_region=config['es_region'], 84 | aws_service='es', 85 | aws_token=creds['Credentials']['SessionToken']) 86 | return auth 87 | 88 | 89 | def parse_log(filename): 90 | '''Parse the log file into a dict''' 91 | # init 92 | idx = 1 93 | recordset = [] 94 | 95 | with gzip.open(filename) as data: 96 | 97 | result = csv.DictReader(data, fieldnames=FIELDNAMES, dialect="excel-tab") 98 | 99 | for row in result: 100 | # skip header rows - cruft 101 | if idx > 2: 102 | # cloudfront events are logged to the second only, date and time are seperate 103 | # fields which we remove and merge into a new timestamp field 104 | date = row.pop('logdate') 105 | row['timestamp'] = datetime.strptime( 106 | date + " " + row.pop('logtime'), '%Y-%m-%d %H:%M:%S').isoformat() 107 | # add to new record dict 108 | record = { 109 | "_index": "cloudfrontlog-" + date, 110 | "_type": "logs", 111 | "_source": row 112 | } 113 | # append to recordset 114 | recordset.append(record) 115 | idx = idx + 1 116 | 117 | return recordset 118 | 119 | 120 | def write_bulk(record_set, es_client, config): 121 | ''' Write the data set to ES, chunk size has been increased to improve performance ''' 122 | print "Writing data to ES" 123 | resp = helpers.bulk(es_client, 124 | record_set, 125 | chunk_size=config['es_bulk_chunk_size'], 126 | timeout=config['es_bulk_timeout']) 127 | return resp 128 | 129 | 130 | def load_config(context): 131 | '''Load config file from S3''' 132 | config = '' 133 | 134 | # Check version 135 | function_name = context.function_name 136 | alias = context.invoked_function_arn.split(':').pop() 137 | 138 | if function_name == alias: 139 | alias = '$LATEST' 140 | print "No Version Set - Default to $LATEST" 141 | 142 | s3_client = boto3.client('s3') 143 | 144 | # set the file path 145 | file_path = '/tmp/config.json' 146 | 147 | # download the gzip log from s3 148 | s3_client.download_file(CONFIG_BUCKET, alias + "/" + CONFIG_FILE, file_path) 149 | 150 | with open(file_path) as f: 151 | config = json.load(f) 152 | 153 | print "Succesfully loaded config file" 154 | return config 155 | 156 | 157 | def lambda_handler(event, context): 158 | '''Invoke Lambda ''' 159 | # load config from json file in s3 bucket 160 | config = load_config(context) 161 | 162 | # create ES connection with sts auth file 163 | es_client = Elasticsearch(host=config['es_host'], 164 | port=80, 165 | connection_class=RequestsHttpConnection, 166 | http_auth=sts_auth(config), 167 | timeout=config['es_connection_timeout']) 168 | 169 | # create new index with custom mappings from config, ignore if it's already created 170 | # new index will be created for everyday YMV 171 | suffix = datetime.strftime(datetime.now(), '%Y-%m-%d') 172 | resp = es_client.indices.create(index="cloudfrontlog-" + 173 | suffix, body=config['es_mapping'], 174 | ignore=400) 175 | print resp 176 | 177 | # create a s3 boto client 178 | s3_client = boto3.client('s3') 179 | 180 | # split bucket and filepath to variables 181 | bucket = event['Records'][0]['s3']['bucket']['name'] 182 | key = event['Records'][0]['s3']['object']['key'] 183 | 184 | # set the file path 185 | file_path = '/tmp/cflogfile.gz' 186 | 187 | # download the gzip log from s3 188 | s3_client.download_file(bucket, key, file_path) 189 | 190 | # parse the log 191 | record_set = parse_log('/tmp/cflogfile.gz') 192 | 193 | # write the dict to ES 194 | resp = write_bulk(record_set, es_client, config) 195 | print resp 196 | -------------------------------------------------------------------------------- /deploy-wrapper.py: -------------------------------------------------------------------------------- 1 | #!/usr/local/bin/python 2 | ''' 3 | Deployment and build script to manage build and deploy lambda functions 4 | Author: Jason Witting 5 | Version: 0.1 6 | ''' 7 | 8 | import argparse 9 | import json 10 | import logging 11 | import subprocess 12 | import sys 13 | 14 | import boto3 15 | 16 | BASE_DIR = subprocess.check_output(['pwd']).strip('\n') 17 | BUILD_SCRIPT = BASE_DIR + '/' + 'build.sh' 18 | 19 | CONFIG_FILE = 'config/deployment-config.json' 20 | 21 | # setup logger 22 | LOGGER = logging.getLogger('DEPLOY') 23 | LOGGER.setLevel(logging.DEBUG) 24 | STDOUT_LOG_HANDLER = logging.StreamHandler(sys.stdout) 25 | STDOUT_LOG_HANDLER.setLevel(logging.INFO) 26 | 27 | FORMATTER = logging.Formatter('\ 28 | [%(asctime)s] \ 29 | [%(levelname)s] \ 30 | [%(name)s] %(message)s') 31 | 32 | STDOUT_LOG_HANDLER.setFormatter(FORMATTER) 33 | LOGGER.addHandler(STDOUT_LOG_HANDLER) 34 | 35 | 36 | def load_config(filename): 37 | '''loads the script config variables''' 38 | config = '' 39 | with open(filename) as f: 40 | config = json.load(f) 41 | return config 42 | 43 | 44 | def create_deployment_bundle(): 45 | '''create lambda deployment zip file and return name''' 46 | 47 | pkg = subprocess.check_output([BUILD_SCRIPT, '-b']).strip('\n') 48 | LOGGER.info("Created deployment bundle: " + pkg) 49 | return pkg 50 | 51 | 52 | def publish_s3(filename, bucket, key): 53 | ''' 54 | push files to s3 55 | 56 | args: 57 | filename - (string) the file to publish to s3 58 | bucket - (string) s3 bucket 59 | key - (string) s3 object key 60 | ''' 61 | s3 = boto3.client('s3') 62 | with open(filename, 'rb') as data: 63 | s3.upload_fileobj(data, bucket, key) 64 | LOGGER.info("Uploaded " + filename + " as "+key+ " to S3://" + bucket) 65 | 66 | 67 | def upate_config(env, lambda_config_file, config): 68 | ''' 69 | push config file to specific env folder on s3 70 | 71 | args: 72 | env - (string) the enviroment to push to i.e DEV, STAGE, PROD 73 | file - (string) the config.json file which contains the lambda 74 | config 75 | ''' 76 | print ("FILE: " + lambda_config_file + 77 | " BUCKET: " + config['S3_CONFIG_BUCKET'] + 78 | " KEY: " + env + '/' + 79 | config['CONFIG_FILE']) 80 | return publish_s3(lambda_config_file, 81 | config['S3_CONFIG_BUCKET'], 82 | env + '/' + 83 | config['CONFIG_FILE']) 84 | 85 | 86 | def create_lamda_function(function_name, role_arn, handler, bucket, key, timeout, memory_size, description=''): 87 | ''' 88 | create a lambda function 89 | returns the entire response as a JSON string 90 | 91 | args: 92 | function_name - (string) name of the lambda function to create 93 | role_arn - (string) the role for lambda function to assume 94 | handler - (string) the lamda function within the package to invoke 95 | bucket - (string) the s3 bucket where the deployment package resides 96 | key - (string) the s3 object key AKA lamda deployment package filename 97 | timeout - (string) the timeout value for the lambda function 98 | memory_size - (string) the memory size of the lambda function 99 | 100 | opt args: 101 | description - description of the function 102 | ''' 103 | aws_lambda = boto3.client('lambda') 104 | resp = aws_lambda.create_function( 105 | FunctionName=function_name, 106 | Runtime='python2.7', 107 | Role=role_arn, 108 | Handler=handler, 109 | Code={ 110 | 'S3Bucket': bucket, 111 | 'S3Key': key, 112 | }, 113 | Description=description, 114 | Timeout=int(timeout), 115 | MemorySize=int(memory_size), 116 | Publish=True 117 | ) 118 | LOGGER.info("Created Lambda function: " + function_name) 119 | return resp 120 | 121 | 122 | def update_lamda_alias(alias, version, function_name, description=''): 123 | ''' 124 | update lambda alias to specified version 125 | returns the entire response as a JSON string 126 | 127 | args: 128 | alias - (string) name of the lambda alias to update 129 | version - (string) version number of lambda alias to point to 130 | function_name - the lamda function name 131 | 132 | opt args: 133 | description - description of the function 134 | ''' 135 | aws_lambda = boto3.client('lambda') 136 | resp = aws_lambda.update_alias( 137 | FunctionName=function_name, 138 | Name=alias, 139 | FunctionVersion=version, 140 | Description=description 141 | ) 142 | LOGGER.info("Updated Alias: " + 143 | alias + 144 | " to version: " + 145 | version) 146 | return resp 147 | 148 | 149 | def publish_lambda(function_name, bucket, key): 150 | ''' 151 | publish lambda function and return version number 152 | returns a version number as a string 153 | 154 | args: 155 | function_name - (string) the function name 156 | bucket - (string) the s3 bucket NameError 157 | key - (string) the s3 object key 158 | ''' 159 | aws_lambda = boto3.client('lambda') 160 | resp = aws_lambda.update_function_code( 161 | FunctionName=function_name, 162 | S3Bucket=bucket, 163 | S3Key=key, 164 | Publish=True 165 | ) 166 | LOGGER.info("Published verison: " + resp['Version']) 167 | return resp['Version'] 168 | 169 | 170 | def get_alias_version(alias, function_name): 171 | ''' 172 | version number associated with alias 173 | returns the version number as a string 174 | 175 | args: 176 | alias - (string) the lambda alias to query 177 | function_name - (string) the lambda function name 178 | ''' 179 | aws_lambda = boto3.client('lambda') 180 | response = aws_lambda.get_alias( 181 | FunctionName=function_name, 182 | Name=alias 183 | ) 184 | return response['FunctionVersion'] 185 | 186 | 187 | def promote_version(source, target, config): 188 | ''' 189 | promote version tied to an alias to another enviroment(alias) 190 | returns the entire reponse as JSON string 191 | 192 | args: 193 | source - (string) the source alias from which to get version 194 | target - (string) the target alias to set the version to 195 | ''' 196 | resp = '' 197 | version = get_alias_version(source, config['LAMBDA_FUNC_NAME']) 198 | 199 | print("promote " + source + 200 | " version: " + version + " to " + 201 | target + " (Y/N):") 202 | 203 | reply = raw_input().strip('\n').upper() 204 | 205 | if reply == 'Y': 206 | LOGGER.info('Updating Alias: ' + source + 207 | ' to version: ' + version) 208 | resp = update_lamda_alias(target, version, config['LAMBDA_FUNC_NAME']) 209 | LOGGER.debug(resp) 210 | return resp 211 | 212 | 213 | def main(): 214 | ''' 215 | main method 216 | ''' 217 | 218 | config = load_config(CONFIG_FILE) 219 | 220 | parser = argparse.ArgumentParser( 221 | description='Deploy and manipulate lambda function') 222 | subparsers = parser.add_subparsers( 223 | dest='subparsers_name', help="[CMDS...]") 224 | 225 | parser_promote = subparsers.add_parser( 226 | 'promote', help='promote \ 227 | version to ') 228 | parser_promote.add_argument('source', choices=['DEV', 'STAGE', 'PROD'], 229 | help='the source enviroment') 230 | parser_promote.add_argument('target', choices=['DEV', 'STAGE', 'PROD'], 231 | help='the target enviroment') 232 | 233 | parser_deploy = subparsers.add_parser( 234 | 'deploy', help='deploy function to s3') 235 | parser_deploy.add_argument( 236 | '--env', choices=['DEV', 'STAGE', 'PROD'], 237 | help='the target enviroment') 238 | 239 | parser_config = subparsers.add_parser('config', 240 | help='deploy config to s3') 241 | parser_config.add_argument('env', 242 | choices=['DEV', 'STAGE', 'PROD'], 243 | help='set config for specific enviroment') 244 | subparsers.add_parser( 245 | 'init', help='creates the base lambda function') 246 | 247 | subparsers.add_parser( 248 | 'clean', help='clean local build enviroment') 249 | 250 | subparsers.add_parser( 251 | 'setup', help='create local build enviroment') 252 | 253 | args = parser.parse_args() 254 | 255 | # deploy 256 | if args.subparsers_name == 'deploy': 257 | pkg = create_deployment_bundle() 258 | publish_s3(pkg, config['LAMBDA_DEPLOY_BUCKET'], pkg.split('/').pop()) 259 | 260 | LOGGER.info("Uploaded lambda zip:" + pkg.split('/').pop() + 261 | " to S3://" + config['LAMBDA_DEPLOY_BUCKET']) 262 | 263 | version = publish_lambda(config['LAMBDA_FUNC_NAME'], 264 | config['LAMBDA_DEPLOY_BUCKET'], 265 | pkg.split('/').pop()) 266 | 267 | if args.env: 268 | LOGGER.debug(update_lamda_alias(args.env, version, 269 | config['LAMBDA_FUNC_NAME'], description='')) 270 | 271 | # promote 272 | if args.subparsers_name == 'promote': 273 | LOGGER.debug(promote_version(args.source, args.target, config)) 274 | 275 | # deploy 276 | if args.subparsers_name == 'config': 277 | LOGGER.debug(upate_config(args.env, BASE_DIR + 278 | "/config/" + config['CONFIG_FILE'], config)) 279 | 280 | if args.subparsers_name == 'init': 281 | pkg = create_deployment_bundle() 282 | publish_s3(pkg, config['LAMBDA_DEPLOY_BUCKET'], pkg.split('/').pop()) 283 | 284 | LOGGER.info("Uploaded lambda zip:" + pkg.split('/').pop() + 285 | " to S3://" + config['LAMBDA_DEPLOY_BUCKET']) 286 | 287 | LOGGER.debug(create_lamda_function(config['LAMBDA_FUNC_NAME'], 288 | config['LAMBDA_ROLE_ARN'], config[ 289 | 'LAMBDA_HANDLER'], 290 | config['LAMBDA_DEPLOY_BUCKET'], 291 | pkg.split('/').pop(), 292 | config['LAMBDA_TIMEOUT'], 293 | config['LAMBDA_MEMORY_SIZE'])) 294 | 295 | # clean 296 | if args.subparsers_name == 'clean': 297 | LOGGER.info(subprocess.check_output([BUILD_SCRIPT, '-c']).strip('\n')) 298 | 299 | # setup 300 | if args.subparsers_name == 'setup': 301 | LOGGER.info("creating build enviroment" + 302 | subprocess.check_output([BUILD_SCRIPT, '-s']).strip('\n')) 303 | 304 | # launch main 305 | if __name__ == "__main__": 306 | main() 307 | --------------------------------------------------------------------------------