├── LICENSE.txt ├── NOTICE.txt ├── README.md ├── cloudformation ├── aws-cloudformation-validation-pipeline.template ├── central-microservices.template ├── clone-repo.template └── main-pipeline.template ├── code ├── examples │ └── config.yml ├── lambda_functions │ ├── ami_check │ │ ├── ami_check.py │ │ └── requirements.txt │ ├── anon_reporting │ │ ├── anon_reporting.py │ │ └── requirements.txt │ ├── cleanup_on_stack_delete │ │ ├── cleanup_on_stack_delete.py │ │ └── requirements.txt │ ├── create_stack │ │ ├── create_stack.py │ │ └── requirements.txt │ ├── deploy_to_s3 │ │ ├── deploy_to_s3.py │ │ └── requirements.txt │ ├── email_notification │ │ ├── email_notification.py │ │ └── requirements.txt │ ├── generate_report │ │ ├── generate_report.py │ │ └── requirements.txt │ ├── git_pull │ │ ├── git_pull.py │ │ └── requirements.txt │ ├── lib │ │ ├── awsclients │ │ │ ├── __init__.py │ │ │ └── awsclients.py │ │ ├── cfnpipeline │ │ │ ├── __init__.py │ │ │ └── cfnpipeline.py │ │ ├── crhelper.py │ │ └── logger │ │ │ ├── __init__.py │ │ │ └── logger.py │ ├── lint_template │ │ ├── lint_template.py │ │ └── requirements.txt │ ├── stack_cleanup │ │ └── stack_cleanup.py │ ├── subnet_name │ │ ├── requirements.txt │ │ └── subnet_name.py │ ├── tcp_connect │ │ ├── requirements.txt │ │ └── tcp_connect.py │ └── validate_template │ │ ├── requirements.txt │ │ └── validate_template.py ├── scripts │ ├── cfn-validation-pipeline-cleanup │ ├── cfn-validation-pipeline-deploy │ ├── cfn-validation-pipeline-rollback │ └── cfn-validation-pipeline-skeleton └── tests │ ├── __init__.py │ ├── test_ami_check.py │ ├── test_anon_reporting.py │ ├── test_awsclients.py │ ├── test_cfnpipeline.py │ ├── test_cleanup_on_stack_delete.py │ ├── test_create_stack.py │ ├── test_deploy_to_s3.py │ ├── test_email_notification.py │ ├── test_generate_report.py │ ├── test_git_pull.py │ ├── test_lint_template.py │ ├── test_logger.py │ ├── test_stack_cleanup.py │ ├── test_subnet_name.py │ ├── test_tcp_connect.py │ └── test_validate_template.py ├── demo_source ├── LICENSE.txt ├── NOTICE.txt ├── README.md ├── ci │ ├── aws-vpc-3az-complete.json │ ├── aws-vpc-3az-public.json │ ├── aws-vpc-3az.json │ ├── aws-vpc-4az-complete.json │ ├── aws-vpc-4az-public.json │ ├── aws-vpc-4az.json │ ├── aws-vpc-complete.json │ ├── aws-vpc-defaults.json │ ├── aws-vpc-public.json │ └── config.yml └── templates │ └── aws-vpc.template └── setup.py /LICENSE.txt: -------------------------------------------------------------------------------- 1 | Amazon Software License 1.0 2 | 3 | This Amazon Software License ("License") governs your use, reproduction, and 4 | distribution of the accompanying software as specified below. 5 | 6 | 1. Definitions 7 | 8 | "Licensor" means any person or entity that distributes its Work. 9 | 10 | "Software" means the original work of authorship made available under this 11 | License. 12 | 13 | "Work" means the Software and any additions to or derivative works of the 14 | Software that are made available under this License. 15 | 16 | The terms "reproduce," "reproduction," "derivative works," and 17 | "distribution" have the meaning as provided under U.S. copyright law; 18 | provided, however, that for the purposes of this License, derivative works 19 | shall not include works that remain separable from, or merely link (or bind 20 | by name) to the interfaces of, the Work. 21 | 22 | Works, including the Software, are "made available" under this License by 23 | including in or with the Work either (a) a copyright notice referencing the 24 | applicability of this License to the Work, or (b) a copy of this License. 25 | 26 | 2. License Grants 27 | 28 | 2.1 Copyright Grant. Subject to the terms and conditions of this License, 29 | each Licensor grants to you a perpetual, worldwide, non-exclusive, 30 | royalty-free, copyright license to reproduce, prepare derivative works of, 31 | publicly display, publicly perform, sublicense and distribute its Work and 32 | any resulting derivative works in any form. 33 | 34 | 2.2 Patent Grant. Subject to the terms and conditions of this License, each 35 | Licensor grants to you a perpetual, worldwide, non-exclusive, royalty-free 36 | patent license to make, have made, use, sell, offer for sale, import, and 37 | otherwise transfer its Work, in whole or in part. The foregoing license 38 | applies only to the patent claims licensable by Licensor that would be 39 | infringed by Licensor's Work (or portion thereof) individually and 40 | excluding any combinations with any other materials or technology. 41 | 42 | 3. Limitations 43 | 44 | 3.1 Redistribution. You may reproduce or distribute the Work only if 45 | (a) you do so under this License, (b) you include a complete copy of this 46 | License with your distribution, and (c) you retain without modification 47 | any copyright, patent, trademark, or attribution notices that are present 48 | in the Work. 49 | 50 | 3.2 Derivative Works. You may specify that additional or different terms 51 | apply to the use, reproduction, and distribution of your derivative works 52 | of the Work ("Your Terms") only if (a) Your Terms provide that the use 53 | limitation in Section 3.3 applies to your derivative works, and (b) you 54 | identify the specific derivative works that are subject to Your Terms. 55 | Notwithstanding Your Terms, this License (including the redistribution 56 | requirements in Section 3.1) will continue to apply to the Work itself. 57 | 58 | 3.3 Use Limitation. The Work and any derivative works thereof only may be 59 | used or intended for use with the web services, computing platforms or 60 | applications provided by Amazon.com, Inc. or its affiliates, including 61 | Amazon Web Services, Inc. 62 | 63 | 3.4 Patent Claims. If you bring or threaten to bring a patent claim against 64 | any Licensor (including any claim, cross-claim or counterclaim in a 65 | lawsuit) to enforce any patents that you allege are infringed by any Work, 66 | then your rights under this License from such Licensor (including the 67 | grants in Sections 2.1 and 2.2) will terminate immediately. 68 | 69 | 3.5 Trademarks. This License does not grant any rights to use any 70 | Licensor's or its affiliates' names, logos, or trademarks, except as 71 | necessary to reproduce the notices described in this License. 72 | 73 | 3.6 Termination. If you violate any term of this License, then your rights 74 | under this License (including the grants in Sections 2.1 and 2.2) will 75 | terminate immediately. 76 | 77 | 4. Disclaimer of Warranty. 78 | 79 | THE WORK IS PROVIDED "AS IS" WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, 80 | EITHER EXPRESS OR IMPLIED, INCLUDING WARRANTIES OR CONDITIONS OF 81 | MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, TITLE OR 82 | NON-INFRINGEMENT. YOU BEAR THE RISK OF UNDERTAKING ANY ACTIVITIES UNDER 83 | THIS LICENSE. SOME STATES' CONSUMER LAWS DO NOT ALLOW EXCLUSION OF AN 84 | IMPLIED WARRANTY, SO THIS DISCLAIMER MAY NOT APPLY TO YOU. 85 | 86 | 5. Limitation of Liability. 87 | 88 | EXCEPT AS PROHIBITED BY APPLICABLE LAW, IN NO EVENT AND UNDER NO LEGAL 89 | THEORY, WHETHER IN TORT (INCLUDING NEGLIGENCE), CONTRACT, OR OTHERWISE 90 | SHALL ANY LICENSOR BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY DIRECT, 91 | INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES ARISING OUT OF OR 92 | RELATED TO THIS LICENSE, THE USE OR INABILITY TO USE THE WORK (INCLUDING 93 | BUT NOT LIMITED TO LOSS OF GOODWILL, BUSINESS INTERRUPTION, LOST PROFITS 94 | OR DATA, COMPUTER FAILURE OR MALFUNCTION, OR ANY OTHER COMM ERCIAL DAMAGES 95 | OR LOSSES), EVEN IF THE LICENSOR HAS BEEN ADVISED OF THE POSSIBILITY OF 96 | SUCH DAMAGES. 97 | -------------------------------------------------------------------------------- /NOTICE.txt: -------------------------------------------------------------------------------- 1 | AWS Cloudformation Validation Pipeline 2 | 3 | Copyright 2017 Amazon.com, Inc. or its affiliates. All Rights Reserved. 4 | Licensed under the Amazon Software License (the "License"). You may not use this file except 5 | in compliance with the License. A copy of the License is located at http://aws.amazon.com/asl/ 6 | or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, 7 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, express or implied. See the License for the 8 | specific language governing permissions and limitations under the License. 9 | 10 | ********************** 11 | THIRD PARTY COMPONENTS 12 | ********************** 13 | This software includes third party software subject to the following copyrights: 14 | 15 | pygit2 under the GNU GPL, version 2.0 16 | pyyaml under the GNU GPL, version 2.0 17 | 18 | The licenses for these third party components are included in LICENSE.txt 19 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | Usage 2 | ----- 3 | 4 | A demo and deployment guide describing this solution are available on the AWS Answers website. 5 | 6 | To start writing your own tests and pipelines to test your templates it's likely your going to want to customize which tests are run and even write your own tests. 7 | 8 | To ensure that any binary dependencies you may have are built for the correct platform it is recommended to create a new ec2 instance using the amzn-ami-hvm-2016.03.3.x86_64-gp2 AMI and run the build process from there. 9 | 10 | The following s3 permissions are required for the IAM user/role configured in the aws cli: 11 | 12 | ```json 13 | { 14 | "Version": "2012-10-17", 15 | "Statement": [ 16 | { 17 | "Effect": "Allow", 18 | "Action": [ 19 | "s3:CreateBucket", 20 | "s3:PutObject", 21 | "s3:PutObjectAcl", 22 | "s3:ListBucket", 23 | "s3:GetObject" 24 | ], 25 | "Resource": [ 26 | "*" 27 | ] 28 | } 29 | ] 30 | } 31 | ``` 32 | 33 | Ensure that pip and setuptools are up to date: 34 | ```bash 35 | sudo -H pip install --upgrade setuptools pip 36 | ``` 37 | 38 | Clone the validation pipeline repo: 39 | ```bash 40 | git clone https://github.com/awslabs/aws-cloudformation-validation-pipeline.git 41 | ``` 42 | 43 | Install: 44 | ```bash 45 | cd aws-cloudformation-validation-pipeline 46 | sudo -H python setup.py install 47 | ``` 48 | 49 | Now you're ready to start building tests and pipelines, to get going create a project skeleton: 50 | ```bash 51 | cd ~/ 52 | cfn-validation-pipeline-skeleton 53 | cd validation_pipeline 54 | ``` 55 | 56 | In the validation_pipeline folder you will find all the required cloudformation templates, lambda function code, html docs and unit tests 57 | 58 | Building/deploying dependencies, Lambda function zips and CloudFormation templates to S3 is done using the deployment utility: 59 | ```bash 60 | cfn-validation-pipeline-deploy my-cfn-pipeline-bucket 61 | ``` 62 | 63 | ***Note:*** this command has several configurable options, to view help for the deployment tools run the command with the --help argument 64 | 65 | Now you can launch stacks from the cloudformation console using the s3 location for the desired cloudformation template in the output of the previous command. 66 | 67 | Creating your own tests 68 | ----------------------- 69 | 70 | This can be done by creating a new folder in the 'lambda_functions' folder. The folder should contain: 71 | * python file(s) - this is installed into the lambda function and executed when the pipeline runs. 72 | * requirements.txt [optional] - a list of all python packages that are required. Can include pip modules or custom modules stored in the 'lambda_functions/lib/' folder 73 | 74 | Using the CFNPipeline class in your function greatly simplifies writing tests and is recommended. The included tests can be used as examples on usage for both pre-create and post-create tests. The validate_template and ami_check are fairly simple examples of pre and post create tests respectively. HTML API docs for usage of CFNPipeline are available in the 'docs' folder. 75 | 76 | Adding tests to the template and pipeline 77 | ----------------------------------------- 78 | Custom tests can be added to the 'cloudformation/central-microservices.template' file and added to the pipeline by modifying 'cloudformation/main-pipeline.template' file. 79 | 80 | Once you've authored your new tests and pipeline it can be deployed to s3 using the cfn-validation-pipeline-deploy command. 81 | -------------------------------------------------------------------------------- /cloudformation/clone-repo.template: -------------------------------------------------------------------------------- 1 | { 2 | "AWSTemplateFormatVersion": "2010-09-09", 3 | "Description": "Creates a new CodeCommit repository and populates it with contents from S3 or another git repo.", 4 | "Mappings": { 5 | "Functions": { 6 | "PropagateRepo": { 7 | "S3Key": "lambda_functions/git_pull.zip", 8 | "LogLevel": "debug", 9 | "Handler": "git_pull" 10 | } 11 | }, 12 | "General": { 13 | "CodeRepo": { 14 | "S3BucketPrefix": "${bucket_prefix}", 15 | "S3KeyPrefix": "${key_prefix}${version_number}/" 16 | }, 17 | "AbsoluteBucketName": { 18 | "Enabled": "${absolute_bucket}" 19 | } 20 | } 21 | }, 22 | "Parameters": { 23 | "SourceRepoUrl": { 24 | "Description": "Source repository https clone url, if specified SourceS3 parameters will be ignored and source will be fetched from this git repo", 25 | "Type": "String", 26 | "Default": "" 27 | }, 28 | "SourceRepoBranch": { 29 | "Description": "Source repository branch", 30 | "Type": "String", 31 | "Default": "" 32 | }, 33 | "SourceS3Bucket": { 34 | "Description": "Source S3 Bucket", 35 | "Type": "String", 36 | "Default": "" 37 | }, 38 | "SourceS3Key": { 39 | "Description": "Source repository branch", 40 | "Type": "String", 41 | "Default": "" 42 | } 43 | }, 44 | "Conditions": { 45 | "UseRelativeBucket": { 46 | "Fn::Equals": [ 47 | { 48 | "Fn::FindInMap": [ 49 | "General", 50 | "AbsoluteBucketName", 51 | "Enabled" 52 | ] 53 | }, 54 | "No" 55 | ] 56 | } 57 | }, 58 | "Resources": { 59 | "PropagateRepoRole": { 60 | "Type": "AWS::IAM::Role", 61 | "Properties": { 62 | "AssumeRolePolicyDocument": { 63 | "Version": "2012-10-17", 64 | "Statement": [ 65 | { 66 | "Effect": "Allow", 67 | "Principal": { 68 | "Service": "lambda.amazonaws.com" 69 | }, 70 | "Action": "sts:AssumeRole" 71 | } 72 | ] 73 | }, 74 | "Path": "/", 75 | "Policies": [ 76 | { 77 | "PolicyName": "root", 78 | "PolicyDocument": { 79 | "Version": "2012-10-17", 80 | "Statement": [ 81 | { 82 | "Effect": "Allow", 83 | "Action": [ 84 | "logs:CreateLogGroup", 85 | "logs:CreateLogStream", 86 | "logs:PutLogEvents" 87 | ], 88 | "Resource": [ 89 | { "Fn::Sub": "arn:aws:logs:${AWS::Region}:${AWS::AccountId}:log-group:/aws/lambda/*" } 90 | ] 91 | }, 92 | { 93 | "Effect": "Allow", 94 | "Action": [ 95 | "iam:AttachUserPolicy", 96 | "iam:CreateUser", 97 | "iam:DetachUserPolicy", 98 | "iam:DeleteUser" 99 | ], 100 | "Resource": [ 101 | { "Fn::Sub": "arn:aws:iam::${AWS::AccountId}:user/${CodeCommitRepo.Name}" } 102 | ] 103 | }, 104 | { 105 | "Effect": "Allow", 106 | "Action": [ 107 | "iam:CreateServiceSpecificCredential", 108 | "iam:DeleteServiceSpecificCredential" 109 | ], 110 | "Resource": [ 111 | "*" 112 | ] 113 | }, 114 | { 115 | "Effect": "Allow", 116 | "Action": [ 117 | "codecommit:GetRepository" 118 | ], 119 | "Resource": [ 120 | { "Fn::Sub": "arn:aws:codecommit:${AWS::Region}:${AWS::AccountId}:${CodeCommitRepo.Name}" } 121 | ] 122 | } 123 | ] 124 | } 125 | } 126 | ] 127 | } 128 | }, 129 | "PropagateRepoLambda": { 130 | "Type": "AWS::Lambda::Function", 131 | "Properties": { 132 | "Handler": { 133 | "Fn::Join": [ 134 | ".", 135 | [ 136 | { 137 | "Fn::FindInMap": [ 138 | "Functions", 139 | "PropagateRepo", 140 | "Handler" 141 | ] 142 | }, 143 | "lambda_handler" 144 | ] 145 | ] 146 | }, 147 | "MemorySize": "128", 148 | "Role": { 149 | "Fn::GetAtt": [ 150 | "PropagateRepoRole", 151 | "Arn" 152 | ] 153 | }, 154 | "Runtime": "python2.7", 155 | "Timeout": "300", 156 | "Code": { 157 | "S3Bucket": { 158 | "Fn::Join": [ 159 | "", 160 | [ 161 | { 162 | "Fn::FindInMap": [ 163 | "General", 164 | "CodeRepo", 165 | "S3BucketPrefix" 166 | ] 167 | }, 168 | { 169 | "Fn::If": [ 170 | "UseRelativeBucket", 171 | { 172 | "Ref": "AWS::Region" 173 | }, 174 | { 175 | "Ref": "AWS::NoValue" 176 | } 177 | ] 178 | } 179 | ] 180 | ] 181 | }, 182 | "S3Key": { 183 | "Fn::Join": [ 184 | "", 185 | [ 186 | { 187 | "Fn::FindInMap": [ 188 | "General", 189 | "CodeRepo", 190 | "S3KeyPrefix" 191 | ] 192 | }, 193 | { 194 | "Fn::FindInMap": [ 195 | "Functions", 196 | "PropagateRepo", 197 | "S3Key" 198 | ] 199 | } 200 | ] 201 | ] 202 | } 203 | } 204 | } 205 | }, 206 | "PropagateRepo": { 207 | "DependsOn": "CodeCommitRepo", 208 | "Type": "AWS::CloudFormation::CustomResource", 209 | "Version": "1.0", 210 | "Properties": { 211 | "ServiceToken": { 212 | "Fn::GetAtt": [ 213 | "PropagateRepoLambda", 214 | "Arn" 215 | ] 216 | }, 217 | "SourceRepoUrl": { 218 | "Ref": "SourceRepoUrl" 219 | }, 220 | "SourceRepoBranch": { 221 | "Ref": "SourceRepoBranch" 222 | }, 223 | "SourceS3Bucket": { 224 | "Ref": "SourceS3Bucket" 225 | }, 226 | "SourceS3Key": { 227 | "Ref": "SourceS3Key" 228 | }, 229 | "DestRepoName": { 230 | "Fn::GetAtt": [ 231 | "CodeCommitRepo", 232 | "Name" 233 | ] 234 | }, 235 | "DestRepoUrl": { 236 | "Fn::GetAtt": [ 237 | "CodeCommitRepo", 238 | "CloneUrlHttp" 239 | ] 240 | }, 241 | "loglevel": { 242 | "Fn::FindInMap": [ 243 | "Functions", 244 | "PropagateRepo", 245 | "LogLevel" 246 | ] 247 | } 248 | } 249 | }, 250 | "CodeCommitRepo": { 251 | "Type": "AWS::CodeCommit::Repository", 252 | "Properties": { 253 | "RepositoryName": { 254 | "Ref": "AWS::StackName" 255 | }, 256 | "RepositoryDescription": { 257 | "Fn::Sub": "Clone of ${SourceRepoUrl}" 258 | } 259 | } 260 | } 261 | }, 262 | "Outputs": { 263 | "CodeCommitRepoName": { 264 | "Value": { 265 | "Fn::GetAtt": [ 266 | "CodeCommitRepo", 267 | "Name" 268 | ] 269 | } 270 | } 271 | } 272 | } 273 | -------------------------------------------------------------------------------- /code/examples/config.yml: -------------------------------------------------------------------------------- 1 | global: 2 | regions: 3 | - ap-northeast-1 4 | - eu-central-1 5 | - eu-west-1 6 | - us-east-1 7 | tests: 8 | public: 9 | parameter_input: aws-vpc-public.json 10 | template_file: aws-vpc.template 11 | -------------------------------------------------------------------------------- /code/lambda_functions/ami_check/ami_check.py: -------------------------------------------------------------------------------- 1 | from awsclients import AwsClients 2 | from cfnpipeline import CFNPipeline 3 | from datetime import datetime 4 | from logger import Logger 5 | import re 6 | 7 | loglevel = 'debug' 8 | logger = Logger(loglevel=loglevel) 9 | logger.info('New Lambda container initialised, logging configured.') 10 | clients = AwsClients(logger) 11 | pipeline_run = CFNPipeline(logger, clients) 12 | 13 | 14 | def get_latest_ami(ami_id, regions): 15 | latest_ami = False 16 | for region in regions: 17 | ec2_client = clients.get('ec2', region) 18 | try: 19 | name = ec2_client.describe_images(ImageIds=[ami_id])['Images'][0]['Name'] 20 | latest_ami = ami_id 21 | except Exception as e: 22 | if "The image id '[%s]' does not exist" % ami_id in e.args[0]: 23 | if not latest_ami: 24 | latest_ami = 'invalid' 25 | continue 26 | else: 27 | raise 28 | for m in re.findall('20[1-2][0-9]\.[0-9][0-9]\.[0-9]*', name): 29 | name = name.replace(m, '*') 30 | logger.info("Searching for newer AMI using name filter: %s" % name) 31 | results = ec2_client.describe_images(Filters=[{'Name': 'name', 'Values': [name]}]) 32 | latest = datetime.strptime('1970-01-01T00:00:00.000Z', "%Y-%m-%dT%H:%M:%S.%fZ") 33 | for result in results['Images']: 34 | if datetime.strptime(result['CreationDate'], "%Y-%m-%dT%H:%M:%S.%fZ") > latest: 35 | latest = datetime.strptime(result['CreationDate'], "%Y-%m-%dT%H:%M:%S.%fZ") 36 | latest_ami = result['ImageId'] 37 | return latest_ami 38 | 39 | 40 | def match_startswith(obj, val): 41 | return obj.startswith(val) 42 | 43 | 44 | def find_ami_ids(templates): 45 | matches = [] 46 | pipeline_run.find_in_obj(templates, 'ami-', match_startswith, matches) 47 | return matches 48 | 49 | 50 | def lambda_handler(event, context): 51 | try: 52 | logger.config(context.aws_request_id) 53 | logger.debug("Handler starting...") 54 | logger.debug(event) 55 | pipeline_run.consume_event(event, context, loglevel=loglevel) 56 | logger.info({'event': 'new_invoke'}) 57 | logger.debug(pipeline_run.ci_configs) 58 | templates = pipeline_run.get_templates() 59 | matches = find_ami_ids(templates) 60 | logger.debug(matches) 61 | success = [] 62 | failure = [] 63 | for match in matches: 64 | logger.info("checking ami %s" % match['value']) 65 | if len(match['regions']) == 0: 66 | match['error'] = "cannot map a region from the ami-id this likely indicates that this template will only be able to launch in one region" 67 | failure.append(match) 68 | else: 69 | latest = get_latest_ami(match['value'], match['regions']) 70 | if latest == 'invalid': 71 | match['error'] = "ami %s cannot be found in regions %s" % (match['value'], match['regions']) 72 | failure.append(match) 73 | elif latest == match['value']: 74 | success.append(match) 75 | else: 76 | match['error'] = "ami is out of date, latest ami is %s" % latest 77 | failure.append(match) 78 | if len(failure) > 0: 79 | logger.error(failure) 80 | pipeline_run.put_job_failure(str(failure)) 81 | else: 82 | logger.info(success) 83 | pipeline_run.put_job_success(str(success)) 84 | except Exception as e: 85 | logger.error("unhandled exception!", exc_info=1) 86 | pipeline_run.put_job_failure(str(e)) 87 | -------------------------------------------------------------------------------- /code/lambda_functions/ami_check/requirements.txt: -------------------------------------------------------------------------------- 1 | awsclients 2 | cfnpipeline 3 | logger 4 | pyyaml 5 | -------------------------------------------------------------------------------- /code/lambda_functions/anon_reporting/anon_reporting.py: -------------------------------------------------------------------------------- 1 | import crhelper 2 | from datetime import datetime 3 | from hashlib import sha256 4 | import json 5 | import uuid 6 | import urllib2 7 | 8 | # initialise logger 9 | logger = crhelper.log_config({"RequestId": "CONTAINER_INIT"}) 10 | logger.info('Logging configured') 11 | init_failed = False 12 | 13 | 14 | def send_data(uuid, solution_id, stack_event, region, stack_id): 15 | logger.info("Sending anonymous data") 16 | data_dict = { 17 | 'TimeStamp': str(datetime.utcnow().isoformat()), 18 | 'UUID': uuid, 19 | 'Data': { 20 | "status": "stack_" + stack_event.lower(), 21 | "stack_hash": sha256(stack_id).hexdigest(), 22 | "region": region 23 | }, 24 | 'Solution': solution_id, 25 | } 26 | data_json = json.dumps(data_dict) 27 | logger.info("Data: %s", data_json) 28 | url = 'https://oszclq8tyh.execute-api.us-east-1.amazonaws.com/prod/generic' 29 | headers = {'content-type': 'application/json'} 30 | req = urllib2.Request(url, data_json, headers) 31 | rsp = urllib2.urlopen(req) 32 | rspcode = rsp.getcode() 33 | content = rsp.read() 34 | logger.info("Response from APIGateway: %s, %s", rspcode, content) 35 | 36 | 37 | def create(event, context): 38 | physical_resource_id = str(uuid.uuid4()) 39 | response_data = {} 40 | send_data( 41 | physical_resource_id, 42 | event['ResourceProperties']['SolutionID'], 43 | event['RequestType'], 44 | event['StackId'].split(':')[3], 45 | event['StackId'] 46 | ) 47 | return physical_resource_id, response_data 48 | 49 | 50 | def update(event, context): 51 | physical_resource_id = event['PhysicalResourceId'] 52 | response_data = {} 53 | send_data( 54 | physical_resource_id, 55 | event['ResourceProperties']['SolutionID'], 56 | event['RequestType'], 57 | event['StackId'].split(':')[3], 58 | event['StackId'] 59 | ) 60 | return physical_resource_id, response_data 61 | 62 | 63 | def delete(event, context): 64 | physical_resource_id = event['PhysicalResourceId'] 65 | send_data( 66 | physical_resource_id, 67 | event['ResourceProperties']['SolutionID'], 68 | event['RequestType'], 69 | event['StackId'].split(':')[3], 70 | event['StackId'] 71 | ) 72 | return 73 | 74 | 75 | def lambda_handler(event, context): 76 | # update the logger with event info 77 | global logger 78 | logger = crhelper.log_config(event, loglevel='debug') 79 | return crhelper.cfn_handler(event, context, create, update, delete, logger, 80 | init_failed) 81 | -------------------------------------------------------------------------------- /code/lambda_functions/anon_reporting/requirements.txt: -------------------------------------------------------------------------------- 1 | crhelper 2 | -------------------------------------------------------------------------------- /code/lambda_functions/cleanup_on_stack_delete/cleanup_on_stack_delete.py: -------------------------------------------------------------------------------- 1 | import boto3 2 | import crhelper 3 | import re 4 | import uuid 5 | 6 | # initialise logger 7 | logger = crhelper.log_config({"RequestId": "CONTAINER_INIT"}) 8 | logger.info('Logging configured') 9 | init_failed = False 10 | 11 | 12 | def get_regions(region, service): 13 | if region == 'ALL': 14 | s = boto3.session.Session(region_name='us-east-1') 15 | return s.get_available_regions(service) 16 | else: 17 | return [region] 18 | 19 | 20 | def get_all_stacks(): 21 | regions = get_regions('ALL', 'cloudformation') 22 | stacks = {} 23 | for region in regions: 24 | stacks[region] = [] 25 | cfn_client = boto3.client('cloudformation', region_name=region) 26 | response = cfn_client.list_stacks(StackStatusFilter=[ 27 | 'CREATE_FAILED', 'CREATE_COMPLETE', 'ROLLBACK_COMPLETE', 28 | 'UPDATE_COMPLETE', 'UPDATE_ROLLBACK_COMPLETE', 'DELETE_FAILED']) 29 | for stack in response['StackSummaries']: 30 | describe_response = cfn_client.describe_stacks(StackName=stack['StackName']) 31 | for tag in describe_response['Stacks'][0]['Tags']: 32 | if tag['Key'] == 'cfn_cicd_pipeline': 33 | stacks[region].append({ 34 | 'name': stack['StackName'], 'id': stack['StackId'], 'pipeline': tag['Value'], 35 | 'status': stack['StackStatus'], 'created': stack['CreationTime'].replace(tzinfo=None), 36 | 'tags': describe_response['Stacks'][0]['Tags'], 'region': region}) 37 | return stacks 38 | 39 | 40 | def iter_stacks(stacks, filter_func, filter_val): 41 | filtered_stacks = {} 42 | for region in stacks.keys(): 43 | filtered_stacks[region] = [] 44 | for stack in stacks[region]: 45 | if filter_func(stack, filter_val): 46 | filtered_stacks[region].append(stack) 47 | return filtered_stacks 48 | 49 | 50 | def filter_pipeline_name(stack, pipeline_name): 51 | for tag in stack['tags']: 52 | if tag['Key'] == 'cfn_cicd_pipeline' and tag['Value'] == pipeline_name: 53 | return True 54 | return False 55 | 56 | 57 | def delete_provisioned_product(provisioned_product_id, stack_id, region, cfn_client): 58 | """Deletes a particular provisioned product and all the other related Service Catalog artifacts. 59 | 60 | Args: 61 | Provisioned product (str): provisioned_product_id for provisioned product to be deleted 62 | """ 63 | sc_client = boto3.client('servicecatalog', region_name=region) 64 | try: 65 | logger.debug("Deleting provisioned product %s in %s" % (provisioned_product_id, region)) 66 | response = sc_client.scan_provisioned_products(AccessLevelFilter={'Key': 'Account', 'Value': 'self'}) 67 | while 'PageToken' in response: 68 | for item in response['ProvisionedProducts']: 69 | if item['Id'] == provisioned_product_id: 70 | record_id = item['LastRecordId'] 71 | response = sc_client.scan_provisioned_products(AccessLevelFilter={'Key': 'Account', 'Value': 'self'}, 72 | PageSize=1, 73 | PageToken=response['PageToken'] 74 | ) 75 | for item in response['ProvisionedProducts']: 76 | if item['Id'] == provisioned_product_id: 77 | record_id = item['LastRecordId'] 78 | response = sc_client.describe_record(Id=record_id) 79 | product_id = response['RecordDetail']['ProductId'] 80 | try: 81 | response = sc_client.terminate_provisioned_product(ProvisionedProductId=provisioned_product_id, 82 | TerminateToken=str(uuid.uuid4())) 83 | logger.debug("Provisioned product %s terminated" % provisioned_product_id) 84 | except Exception as e: 85 | logger.error("Failed to terminate provisioned product %s. Falling back to deleting the CloudFormation stack instead" % (provisioned_product_id), exc_info=1) 86 | try: 87 | logger.debug("Deleting stack %s in %s" % (stack_id, region)) 88 | cfn_client.delete_stack(StackName=stack_id) 89 | except Exception as e: 90 | logger.error("Failed to delete stack %s in %s" % (stack_id, region), exc_info=1) 91 | raise e 92 | response = sc_client.list_portfolios_for_product(ProductId=product_id) 93 | portfolio_id = response['PortfolioDetails'][0]['Id'] 94 | response = sc_client.list_principals_for_portfolio(PortfolioId=portfolio_id) 95 | principal_arn = response['Principals'][0]['PrincipalARN'] 96 | response = sc_client.disassociate_principal_from_portfolio(PortfolioId=portfolio_id, PrincipalARN=principal_arn) 97 | logger.debug("Principal %s disassociated from portfolio %s" % (principal_arn, portfolio_id)) 98 | response = sc_client.list_constraints_for_portfolio(PortfolioId=portfolio_id) 99 | constraint_id = response['ConstraintDetails'][0]['ConstraintId'] 100 | response = sc_client.delete_constraint(Id=constraint_id) 101 | logger.debug("Constraint %s deleted from portfolio %s" % (constraint_id, portfolio_id)) 102 | response = sc_client.disassociate_product_from_portfolio(ProductId=product_id, PortfolioId=portfolio_id) 103 | logger.debug("Product %s disassociated from portfolio %s" % (product_id, portfolio_id)) 104 | response = sc_client.delete_product(Id=product_id) 105 | logger.debug("Product %s deleted from %s" % (product_id, region)) 106 | response = sc_client.delete_portfolio(Id=portfolio_id) 107 | logger.debug("Portfolio %s deleted from %s" % (portfolio_id, region)) 108 | return {'status': "success", 'stackid': stack_id, 'region': region, 'detail': None} 109 | except Exception as e: 110 | logger.error("Failed to delete provisioned product %s with stack %s in %s" % (provisioned_product_id, 111 | stack_id, 112 | region), exc_info=1) 113 | 114 | 115 | def delete_stacks(stacks): 116 | for region in stacks.keys(): 117 | if len(stacks[region]) > 0: 118 | cfn_client = boto3.client('cloudformation', region_name=region) 119 | for stack in stacks[region]: 120 | # Check for Service Catalog stacks 121 | match = re.search(r'(pp-)\w+', stack['id']) 122 | if match is not None: 123 | logger.debug('Service Catalog product detected: %s with provisioned product id %s' % (stack, match.group())) 124 | delete_provisioned_product(match.group(), stack['id'], region, cfn_client) 125 | else: 126 | print('deleting stack %s in %s from pipeline %s' % (stack['name'], region, stack['pipeline'])) 127 | cfn_client.delete_stack(StackName=stack['name']) 128 | 129 | 130 | def create(event, context): 131 | physical_resource_id = "cleanup_stacks_on_delete" 132 | response_data = {} 133 | return physical_resource_id, response_data 134 | 135 | 136 | def update(event, context): 137 | physical_resource_id = event['PhysicalResourceId'] 138 | response_data = {} 139 | return physical_resource_id, response_data 140 | 141 | 142 | def delete(event, context): 143 | pipeline = event['ResourceProperties']['Pipeline'] 144 | stacks = get_all_stacks() 145 | filtered_stacks = iter_stacks(stacks, filter_pipeline_name, pipeline) 146 | delete_stacks(filtered_stacks) 147 | return 148 | 149 | 150 | def lambda_handler(event, context): 151 | # update the logger with event info 152 | global logger 153 | logger = crhelper.log_config(event, loglevel='debug') 154 | return crhelper.cfn_handler(event, context, create, update, delete, logger, 155 | init_failed) 156 | -------------------------------------------------------------------------------- /code/lambda_functions/cleanup_on_stack_delete/requirements.txt: -------------------------------------------------------------------------------- 1 | crhelper 2 | -------------------------------------------------------------------------------- /code/lambda_functions/create_stack/create_stack.py: -------------------------------------------------------------------------------- 1 | from awsclients import AwsClients 2 | from cfnpipeline import CFNPipeline 3 | from logger import Logger 4 | 5 | 6 | loglevel = 'debug' 7 | logger = Logger(loglevel=loglevel) 8 | logger.info('New Lambda container initialised, logging configured.') 9 | clients = AwsClients(logger) 10 | pipeline_run = CFNPipeline(logger, clients) 11 | 12 | 13 | def lambda_handler(event, context): 14 | try: 15 | logger.config(context.aws_request_id) 16 | logger.debug("Handler starting...") 17 | logger.debug(event) 18 | pipeline_run.consume_event(event, context, loglevel=loglevel) 19 | logger.info({'event': 'new_invoke'}) 20 | if pipeline_run.cleanup_previous: 21 | deleted_stacks = None 22 | if not pipeline_run.continuation_event: 23 | logger.info({'event': 'pre-delete_start'}) 24 | deleted_stacks = pipeline_run.cleanup_previous_stacks() 25 | elif 'pre-delete' in pipeline_run.continuation_data['message'].keys(): 26 | logger.info({'event': 'pre-delete_continuation'}) 27 | deleted_stacks = pipeline_run.check_statuses( 28 | pipeline_run.continuation_data['message']['deleting'] 29 | ) 30 | if deleted_stacks: 31 | if pipeline_run.handle_deletes(deleted_stacks): 32 | # delete handler sent a failure or continuation, so exit 33 | return 34 | try: 35 | stacks_in_event = 'stacks' in pipeline_run.continuation_data['message'].keys() 36 | except Exception: 37 | stacks_in_event = False 38 | if not stacks_in_event: 39 | logger.info({'event': 'create_stacks'}) 40 | stacks = pipeline_run.create_stacks() 41 | else: 42 | logger.info({'event': 'create_stacks_continuation'}) 43 | stacks = pipeline_run.continuation_data['message']['stacks'] 44 | updated_stacks = pipeline_run.check_statuses(stacks['inprogress']) 45 | stacks['inprogress'] = updated_stacks['inprogress'] 46 | stacks['success'] += updated_stacks['success'] 47 | stacks['error'] += updated_stacks['error'] 48 | 49 | if len(stacks['inprogress']) > 0: 50 | msg = "Stack creation still in progress..." 51 | data = { 52 | "status": msg, 53 | 'configs': pipeline_run.ci_configs, 54 | 'stacks': stacks 55 | } 56 | logger.info({'event': 'stack_create_continuation'}) 57 | pipeline_run.continue_job_later(data) 58 | logger.debug(msg) 59 | return 60 | elif len(stacks['error']) > 0: 61 | deleted_stacks = None 62 | stacks_to_delete = [] 63 | if pipeline_run.cleanup_non_failed and pipeline_run.cleanup_failed: 64 | stacks_to_delete = stacks['success'] + stacks['inprogress'] + stacks['error'] 65 | elif pipeline_run.cleanup_failed: 66 | stacks_to_delete = stacks['error'] 67 | elif pipeline_run.cleanup_non_failed: 68 | stacks_to_delete = stacks['success'] + stacks['inprogress'] 69 | try: 70 | delete_continuation = 'post-delete' in pipeline_run.continuation_data['message'].keys() 71 | except Exception: 72 | delete_continuation = False 73 | if not delete_continuation: 74 | if stacks_to_delete != []: 75 | logger.info({'event': 'post-delete_start'}) 76 | deleted_stacks = pipeline_run.delete_stacks(stacks_to_delete) 77 | else: 78 | logger.info({'event': 'post-delete_continuation'}) 79 | deleted_stacks = pipeline_run.check_statuses( 80 | pipeline_run.continuation_data['message']['deleting'] 81 | ) 82 | if deleted_stacks: 83 | if pipeline_run.handle_deletes(deleted_stacks, stacks, 'post'): 84 | # delete handler sent a failure or continuation, so exit 85 | logger.info({'event': 'post-delete_complete'}) 86 | return 87 | msg = "%s StackCreate failures %s" % (len(stacks['error']), stacks['error']) 88 | pipeline_run.put_job_failure(msg) 89 | logger.debug("%s StackCreate failures %s" % (len(stacks['error']), stacks['error'])) 90 | return 91 | logger.info({'event': 'stack_create_complete', "stacks": stacks['success']}) 92 | pipeline_run.upload_output_artifact(stacks['success']) 93 | msg = "%s StackCreate success %s" % (len(stacks['success']), stacks['success']) 94 | pipeline_run.put_job_success(msg) 95 | return 96 | 97 | except Exception as e: 98 | logger.error("Unhandled exception!", exc_info=1) 99 | pipeline_run.put_job_failure(str(e)) 100 | -------------------------------------------------------------------------------- /code/lambda_functions/create_stack/requirements.txt: -------------------------------------------------------------------------------- 1 | awsclients 2 | cfnpipeline 3 | logger 4 | pyyaml 5 | -------------------------------------------------------------------------------- /code/lambda_functions/deploy_to_s3/deploy_to_s3.py: -------------------------------------------------------------------------------- 1 | from awsclients import AwsClients 2 | from cfnpipeline import CFNPipeline 3 | from logger import Logger 4 | 5 | 6 | loglevel = 'debug' 7 | logger = Logger(loglevel=loglevel) 8 | logger.info('New Lambda container initialised, logging configured.') 9 | clients = AwsClients(logger) 10 | pipeline_run = CFNPipeline(logger, clients) 11 | 12 | 13 | def lambda_handler(event, context): 14 | try: 15 | logger.config(context.aws_request_id, loglevel=loglevel) 16 | logger.debug("Handler starting...") 17 | logger.debug(event) 18 | pipeline_run.consume_event(event, context, loglevel=loglevel) 19 | logger.info({'event': 'new_invoke'}) 20 | pipeline_run.deploy_to_s3() 21 | pipeline_run.put_job_success("S3 bucket updated successfully") 22 | return 23 | 24 | except Exception as e: 25 | logger.error("Unhandled exception!", exc_info=1) 26 | pipeline_run.put_job_failure(str(e)) 27 | -------------------------------------------------------------------------------- /code/lambda_functions/deploy_to_s3/requirements.txt: -------------------------------------------------------------------------------- 1 | awsclients 2 | cfnpipeline 3 | logger 4 | pyyaml 5 | -------------------------------------------------------------------------------- /code/lambda_functions/email_notification/email_notification.py: -------------------------------------------------------------------------------- 1 | from awsclients import AwsClients 2 | import boto3 3 | from cfnpipeline import CFNPipeline 4 | from logger import Logger 5 | import os 6 | 7 | 8 | message = """\ 9 | A pipeline action has failed: 10 | 11 | Pipeline: %s 12 | Region: %s 13 | Stage: %s 14 | Action: %s 15 | Link: %s 16 | Error: %s 17 | 18 | """ 19 | link_template = "https://%s.console.aws.amazon.com/codepipeline/home?region=%s#/view/%s" 20 | 21 | table_name = os.environ['table_name'] 22 | 23 | loglevel = 'debug' 24 | logger = Logger(loglevel=loglevel) 25 | logger.info('New Lambda container initialised, logging configured.') 26 | clients = AwsClients(logger) 27 | pipeline_run = CFNPipeline(logger, clients) 28 | 29 | 30 | def get_pipeline_failures(pipeline, region): 31 | cp_client = boto3.client('codepipeline', region_name=region) 32 | pl_state = cp_client.get_pipeline_state(name=pipeline) 33 | print(pl_state) 34 | issues = [] 35 | for stage in pl_state['stageStates']: 36 | for action in stage['actionStates']: 37 | if 'latestExecution' in action.keys(): 38 | if action['latestExecution']['status'] == 'Failed': 39 | stage_name = stage['stageName'] 40 | action_name = action['actionName'] 41 | error = action['latestExecution']['errorDetails']['message'] 42 | lastchange = action['latestExecution']['lastStatusChange'].replace( 43 | tzinfo=None 44 | ).strftime('%s') 45 | issues.append([stage_name, action_name, error, lastchange]) 46 | if len(issues) > 0: 47 | return issues 48 | return None 49 | 50 | 51 | def is_new_issue(error_id, lastchange): 52 | ddb_table = boto3.resource('dynamodb').Table(table_name) 53 | response = ddb_table.get_item(Key={"FailureId": error_id}) 54 | if 'Item' in response.keys(): 55 | if response['Item']['LastChange'] == lastchange: 56 | return False 57 | ddb_table.put_item(Item={'FailureId': error_id, 'LastChange': lastchange}) 58 | return True 59 | 60 | 61 | def lambda_handler(event, context): 62 | print(event) 63 | pipeline = event['pipeline'] 64 | region = event['region'] 65 | topic = event['topic'] 66 | sns_region = topic.split(':')[3] 67 | 68 | issues = get_pipeline_failures(pipeline, region) 69 | if issues: 70 | for stage, action, error, lastchange in issues: 71 | error_id = "%s::%s::%s::%s" % (region, pipeline, stage, action) 72 | if is_new_issue(error_id, lastchange): 73 | sns_client = boto3.client('sns', region_name=sns_region) 74 | subject = "Pipeline Failure - %s - %s" % (pipeline, stage) 75 | link = link_template % (region, region, pipeline) 76 | body = message % (pipeline, region, stage, action, link, error) 77 | body += '\n\n' 78 | body += pipeline_run.build_execution_report( 79 | pipeline_id=pipeline, region=region, 80 | sns_topic=None, s3_bucket=None, s3_prefix='', 81 | s3_region=None, execution_id=None 82 | ) 83 | sns_client.publish(TopicArn=topic, Subject=subject[:100], Message=body) 84 | -------------------------------------------------------------------------------- /code/lambda_functions/email_notification/requirements.txt: -------------------------------------------------------------------------------- 1 | awsclients 2 | cfnpipeline 3 | logger 4 | pyyaml 5 | -------------------------------------------------------------------------------- /code/lambda_functions/generate_report/generate_report.py: -------------------------------------------------------------------------------- 1 | from awsclients import AwsClients 2 | from cfnpipeline import CFNPipeline 3 | from logger import Logger 4 | 5 | 6 | loglevel = 'debug' 7 | logger = Logger(loglevel=loglevel) 8 | logger.info('New Lambda container initialised, logging configured.') 9 | clients = AwsClients(logger) 10 | pipeline_run = CFNPipeline(logger, clients) 11 | 12 | 13 | def lambda_handler(event, context): 14 | try: 15 | logger.config(context.aws_request_id, loglevel=loglevel) 16 | logger.debug("Handler starting...") 17 | logger.debug(event) 18 | pipeline_run.consume_event(event, context, loglevel=loglevel) 19 | logger.info({'event': 'new_invoke'}) 20 | pipeline_run.build_execution_report( 21 | pipeline_id=pipeline_run.pipeline_name, region=pipeline_run.region, 22 | sns_topic=pipeline_run.report_sns_topic, s3_bucket=pipeline_run.report_s3_bucket, 23 | s3_prefix=pipeline_run.report_s3_prefix, s3_region=pipeline_run.region, 24 | execution_id=pipeline_run.pipeline_execution_id 25 | ) 26 | pipeline_run.put_job_success("report generated successfully") 27 | return 28 | 29 | except Exception as e: 30 | logger.error("Unhandled exception!", exc_info=1) 31 | pipeline_run.put_job_failure(str(e)) 32 | -------------------------------------------------------------------------------- /code/lambda_functions/generate_report/requirements.txt: -------------------------------------------------------------------------------- 1 | awsclients 2 | cfnpipeline 3 | logger 4 | pyyaml 5 | -------------------------------------------------------------------------------- /code/lambda_functions/git_pull/git_pull.py: -------------------------------------------------------------------------------- 1 | import os 2 | from boto3 import client 3 | from boto3 import session 4 | from botocore.vendored import requests 5 | from pygit2 import clone_repository 6 | from pygit2 import init_repository 7 | from pygit2 import GitError 8 | from pygit2 import RemoteCallbacks 9 | from pygit2 import Signature 10 | from pygit2 import UserPass 11 | import shutil 12 | from time import sleep 13 | import crhelper 14 | import zipfile 15 | 16 | # initialise logger 17 | logger = crhelper.log_config({"RequestId": "CONTAINER_INIT"}) 18 | logger.info('Logging configured') 19 | # set global to track init failures 20 | init_failed = False 21 | 22 | ci_config = """global: 23 | regions: 24 | - ap-southeast-1 25 | - ca-central-1 26 | - eu-west-2 27 | - us-east-2 28 | tests: 29 | defaults: 30 | parameter_input: aws-vpc-defaults.json 31 | template_file: aws-vpc.template""" 32 | 33 | try: 34 | # Place initialization code here 35 | s3 = client('s3') 36 | kms = client('kms') 37 | iam_client = client('iam') 38 | cc_client = client('codecommit') 39 | logger.info("Container initialization completed") 40 | except Exception as e: 41 | logger.error(e, exc_info=True) 42 | init_failed = e 43 | 44 | 45 | def get_codecommit_credentials(username, repo_name): 46 | iam_client.create_user(UserName=username) 47 | iam_client.attach_user_policy(UserName=username, 48 | PolicyArn="arn:aws:iam::aws:policy/AWSCodeCommitFullAccess") 49 | response = iam_client.create_service_specific_credential(UserName=username, 50 | ServiceName="codecommit.amazonaws.com") 51 | user_id = response['ServiceSpecificCredential']['ServiceSpecificCredentialId'] 52 | username = response['ServiceSpecificCredential']['ServiceUserName'] 53 | print([user_id, username]) 54 | password = response['ServiceSpecificCredential']['ServicePassword'] 55 | while not policy_propagated(repo_name): 56 | logger.info("waiting for role propogation...") 57 | sleep(15) 58 | return user_id, username, password 59 | 60 | 61 | def policy_propagated(repo_name): 62 | try: 63 | cc_client.get_repository(repositoryName=repo_name) 64 | return True 65 | except Exception as e: 66 | logger.debug(str(e), exc_info=1) 67 | return False 68 | 69 | 70 | def delete_codecommit_credentials(credential_id, username): 71 | try: 72 | iam_client.delete_service_specific_credential(UserName=username, 73 | ServiceSpecificCredentialId=credential_id) 74 | except Exception: 75 | pass 76 | try: 77 | iam_client.detach_user_policy(UserName=username, 78 | PolicyArn="arn:aws:iam::aws:policy/AWSCodeCommitFullAccess") 79 | except Exception: 80 | pass 81 | iam_client.delete_user(UserName=username) 82 | 83 | 84 | def create_repo(repo_path): 85 | if os.path.exists(repo_path): 86 | logger.info('Cleaning up repo path...') 87 | shutil.rmtree(repo_path) 88 | repo = init_repository(repo_path) 89 | return repo 90 | 91 | 92 | def pull_repo(source_url, remote_branch): 93 | src_prefix = ".".join(source_url.split('.')[:-1]) 94 | src_suffix = source_url.split('/')[-1] 95 | repo = clone_repository(src_prefix, "/tmp/" + src_suffix, bare=False, remote=init_remote) 96 | branch_ref = repo.lookup_reference('refs/heads/%s' % remote_branch) 97 | repo.checkout_tree(repo.get(branch_ref.target)) 98 | branch_ref.set_target(branch_ref.target) 99 | repo.head.set_target(branch_ref.target) 100 | return repo 101 | 102 | 103 | def setup_ci_config(repo, remote_branch): 104 | f = open('/tmp/src/ci/config.yml', 'w') 105 | f.write(ci_config) 106 | f.close() 107 | index = repo.index 108 | index.add_all() 109 | index.write() 110 | tree = index.write_tree() 111 | author = Signature('Template Validation Pipeline Clone', 'demo@solutions.amazonaws') 112 | repo.create_commit('refs/heads/%s' % remote_branch, author, author, 'limit ci to defaults', tree, [repo.head.get_object().hex]) 113 | 114 | 115 | def push_repo(repo, remote_url, creds, remote_branch): 116 | try: 117 | repo.remotes.set_url('origin', remote_url) 118 | repo.remotes.set_push_url('origin', remote_url) 119 | repo.remotes[0].push(['refs/heads/%s' % remote_branch], creds) 120 | return True 121 | except GitError as e: 122 | logger.info(e) 123 | if e.args[0] == 'Unexpected HTTP status code: 403': 124 | return False 125 | 126 | 127 | def init_remote(repo, name, url): 128 | remote = repo.remotes.create(name, url, "+refs/*:refs/*") 129 | mirror_var = "remote.{}.mirror".format(name) 130 | repo.config[mirror_var] = True 131 | return remote 132 | 133 | 134 | def s3_region_url(): 135 | region_session = session.Session() 136 | region = region_session.region_name 137 | if region == 'us-east-1': 138 | return 's3.amazonaws.com' 139 | else: 140 | return 's3-' + region + '.amazonaws.com' 141 | 142 | 143 | def create(event, context): 144 | """ 145 | Place your code to handle Create events here 146 | """ 147 | logger.info(event) 148 | physical_resource_id = 'myResourceId' 149 | response_data = {} 150 | source_url = event['ResourceProperties']['SourceRepoUrl'] 151 | source_branch = event['ResourceProperties']['SourceRepoBranch'] 152 | source_bucket = event['ResourceProperties']['SourceS3Bucket'] 153 | source_key = event['ResourceProperties']['SourceS3Key'] 154 | s3_zip_filename = source_key.split('/')[-1] 155 | dest_url = event['ResourceProperties']['DestRepoUrl'] 156 | repo_name = event['ResourceProperties']['DestRepoName'] 157 | username = event['ResourceProperties']['DestRepoName'] 158 | if len(username) >= 64: 159 | raise Exception('Username is longer than 64 chars') 160 | user_id, codecommit_username, password = get_codecommit_credentials(username, repo_name) 161 | try: 162 | creds = RemoteCallbacks(credentials=UserPass(codecommit_username, password)) 163 | if source_url != "": 164 | repo = pull_repo(source_url, source_branch) 165 | # Uncomment the next line if you want to update your ci files to a minimal default 166 | # setup_ci_config(repo) 167 | else: 168 | # Fetch source from S3 169 | repo = create_repo('/tmp/s3source') 170 | r = requests.get('https://' + s3_region_url() + '/' + source_bucket + '/' + source_key, stream=True) 171 | if r.status_code == 200: 172 | with open('/tmp/' + s3_zip_filename, 'wb') as f: 173 | for chunk in r: 174 | f.write(chunk) 175 | else: 176 | raise Exception("cannot fetch zip, s3 returned %s: %s" % (r.status_code, r.reason)) 177 | zip = zipfile.ZipFile('/tmp/' + s3_zip_filename) 178 | zip.extractall(path='/tmp/s3source') 179 | author = Signature('Template Validation Pipeline Clone', 'demo@solutions.amazonaws') 180 | tree = repo.TreeBuilder().write() 181 | repo.create_commit('HEAD', author, author, 'initial commit', tree, []) 182 | index = repo.index 183 | index.add_all() 184 | index.write() 185 | tree = index.write_tree() 186 | repo.create_commit('refs/heads/%s' % source_branch, author, author, 'initial commit', tree, 187 | [repo.head.get_object().hex]) 188 | while not push_repo(repo, dest_url, creds, source_branch): 189 | logger.info("waiting for git credential propagation...") 190 | sleep(5) 191 | except Exception: 192 | logger.error("Unhandled exception: ", exc_info=1) 193 | raise 194 | delete_codecommit_credentials(user_id, username) 195 | return physical_resource_id, response_data 196 | 197 | 198 | def update(event, context): 199 | """ 200 | Place your code to handle Update events here 201 | """ 202 | physical_resource_id = event['PhysicalResourceId'] 203 | response_data = {} 204 | return physical_resource_id, response_data 205 | 206 | 207 | def delete(event, context): 208 | """ 209 | Place your code to handle Delete events here 210 | """ 211 | username = event['ResourceProperties']['DestRepoName'] 212 | try: 213 | response = iam_client.list_service_specific_credentials(UserName=username, 214 | ServiceName="codecommit.amazonaws.com") 215 | credential_id = response['ServiceSpecificCredentials'][0]['ServiceSpecificCredentialId'] 216 | except Exception: 217 | pass 218 | try: 219 | iam_client.delete_service_specific_credential(UserName=username, 220 | ServiceSpecificCredentialId=credential_id) 221 | except Exception: 222 | pass 223 | try: 224 | iam_client.detach_user_policy(UserName=username, 225 | PolicyArn="arn:aws:iam::aws:policy/AWSCodeCommitFullAccess") 226 | except Exception: 227 | pass 228 | try: 229 | iam_client.delete_user(UserName=username) 230 | except Exception: 231 | pass 232 | return 233 | 234 | 235 | def lambda_handler(event, context): 236 | """ 237 | Main handler function, passes off it's work to crhelper's cfn_handler 238 | """ 239 | # update the logger with event info 240 | global logger 241 | logger = crhelper.log_config(event) 242 | return crhelper.cfn_handler(event, context, create, update, delete, logger, 243 | init_failed) 244 | -------------------------------------------------------------------------------- /code/lambda_functions/git_pull/requirements.txt: -------------------------------------------------------------------------------- 1 | crhelper 2 | pygit2 3 | -------------------------------------------------------------------------------- /code/lambda_functions/lib/awsclients/__init__.py: -------------------------------------------------------------------------------- 1 | from awsclients import AwsClients -------------------------------------------------------------------------------- /code/lambda_functions/lib/awsclients/awsclients.py: -------------------------------------------------------------------------------- 1 | import boto3 2 | import botocore 3 | import os 4 | from threading import Lock 5 | from time import sleep 6 | 7 | 8 | class AwsClients(object): 9 | 10 | """Manages creating and caching boto3 clients""" 11 | 12 | def __init__(self, logger): 13 | """Sets up the cache dict, a locking mechanism and the logging object 14 | 15 | Args: 16 | logger (obj): a logging instance 17 | """ 18 | 19 | self._clients = {"default_role": {}} 20 | self._lock = Lock() 21 | self.logger = logger 22 | return 23 | 24 | def get(self, service, region=None, role='default_role', access_key=None, secret_key=None, 25 | session_token=None, s3v4=False): 26 | """Gets a client for a given service and region, optionally with specific role, credentials and/or sig version 27 | 28 | Args: 29 | service (str): service name 30 | region (str): [optional] region name, defaults to current region 31 | role (str): [optional] descriptive role name used to separate different sets of credentials for the same service/region, defaults to default_role which uses the AWS Lambda execution role 32 | access_key (str): [optional] IAM access key, defaults to None (uses execution role creds) 33 | secret_key (str): [optional] IAM secret key, defaults to None (uses execution role creds) 34 | session_token (str): [optional] IAM session token, defaults to None (uses execution role creds) 35 | s3v4 (bool): [optional] when True enables signature_version=s3v4 which is required for SSE protected buckets/objects 36 | 37 | Returns: 38 | class: boto3 client 39 | """ 40 | 41 | if not region: 42 | self.logger.debug("Region not set explicitly, getting default region") 43 | region = os.environ['AWS_DEFAULT_REGION'] 44 | s3v4 = 's3v4' if s3v4 else 'default_sig_version' 45 | try: 46 | self.logger.debug("Trying to get [%s][%s][%s][%s]" % (role, region, service, s3v4)) 47 | client = self._clients[role][region][service][s3v4] 48 | if access_key: 49 | if self._clients[role][region]['session'].get_credentials().access_key != access_key: 50 | self.logger.debug("credentials changed, forcing update...") 51 | raise KeyError("New credentials for this role, need a new session.") 52 | return client 53 | except KeyError: 54 | self.logger.debug("Couldn't return an existing client, making a new one...") 55 | if role not in self._clients.keys(): 56 | self._clients[role] = {} 57 | if region not in self._clients[role].keys(): 58 | self._clients[role][region] = {} 59 | if service not in self._clients[role].keys(): 60 | self._clients[role][region][service] = {} 61 | if 'session' not in self._clients[role][region].keys(): 62 | self._clients[role][region]['session'] = self._create_session(region, access_key, secret_key, 63 | session_token) 64 | self._clients[role][region][service][s3v4] = self._create_client(role, region, service, s3v4) 65 | return self._clients[role][region][service][s3v4] 66 | 67 | def _create_session(self, region, access_key, secret_key, session_token): 68 | """creates (or fetches from cache) a boto3 session object 69 | 70 | Args: 71 | region (str): region name 72 | access_key (str): [optional] IAM secret key, defaults to None (uses execution role creds) 73 | secret_key (str): [optional] IAM secret key, defaults to None (uses execution role creds) 74 | session_token (str): [optional] IAM secret key, defaults to None (uses execution role creds) 75 | """ 76 | session = None 77 | retry = 0 78 | max_retries = 4 79 | while not session: 80 | try: 81 | with self._lock: 82 | if access_key and secret_key and session_token: 83 | session = boto3.session.Session( 84 | aws_access_key_id=access_key, 85 | aws_secret_access_key=secret_key, 86 | aws_session_token=session_token, 87 | region_name=region 88 | ) 89 | else: 90 | session = boto3.session.Session(region_name=region) 91 | return session 92 | except Exception: 93 | self.logger.debug("failed to create session", exc_info=1) 94 | retry += 1 95 | if retry >= max_retries: 96 | raise 97 | sleep(5*(retry**2)) 98 | 99 | def _create_client(self, role, region, service, s3v4): 100 | """creates (or fetches from cache) a boto3 client object 101 | 102 | Args: 103 | role (str): role descriptor 104 | region (str): region name 105 | service (str): AWS service name 106 | s3v4 (bool): when True enables signature_version=s3v4 which is required for SSE protected buckets/objects 107 | """ 108 | client = None 109 | retry = 0 110 | max_retries = 4 111 | while not client: 112 | try: 113 | with self._lock: 114 | if s3v4 == 's3v4': 115 | client = self._clients[role][region]['session'].client( 116 | service, 117 | config=botocore.client.Config(signature_version='s3v4') 118 | ) 119 | else: 120 | client = self._clients[role][region]['session'].client(service) 121 | return client 122 | except Exception: 123 | self.logger.debug("failed to create client", exc_info=1) 124 | retry += 1 125 | if retry >= max_retries: 126 | raise 127 | sleep(5*(retry**2)) 128 | 129 | def get_available_regions(self, service): 130 | """Fetches available regions for a service 131 | 132 | Args: 133 | service (str): AWS service name 134 | 135 | Returns: 136 | list: AWS Region name strings 137 | """ 138 | 139 | for role in self._clients.keys(): 140 | for region in self._clients[role].keys(): 141 | if 'session' in self._clients[role][region].keys(): 142 | return self._clients[role][region]['session'].get_available_regions(service) 143 | session = boto3.session.Session() 144 | return session.get_available_regions(service) 145 | -------------------------------------------------------------------------------- /code/lambda_functions/lib/cfnpipeline/__init__.py: -------------------------------------------------------------------------------- 1 | from cfnpipeline import CFNPipeline 2 | -------------------------------------------------------------------------------- /code/lambda_functions/lib/crhelper.py: -------------------------------------------------------------------------------- 1 | ################################################################################################### 2 | #### Copyright 2016 Amazon.com, Inc. or its affiliates. All Rights Reserved. 3 | #### 4 | #### Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file 5 | #### except in compliance with the License. A copy of the License is located at 6 | #### 7 | #### http://aws.amazon.com/apache2.0/ 8 | #### 9 | #### or in the "license" file accompanying this file. This file is distributed on an "AS IS" 10 | #### BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the 11 | #### License for the specific language governing permissions and limitations under the License. 12 | ################################################################################################### 13 | 14 | from __future__ import print_function 15 | import traceback 16 | import logging 17 | import threading 18 | from time import sleep 19 | from datetime import datetime 20 | from botocore.vendored import requests 21 | import json 22 | 23 | 24 | def log_config(event, loglevel=None, botolevel=None): 25 | if 'ResourceProperties' in event.keys(): 26 | if 'loglevel' in event['ResourceProperties'] and not loglevel: 27 | loglevel = event['ResourceProperties']['loglevel'] 28 | if 'botolevel' in event['ResourceProperties'] and not botolevel: 29 | loglevel = event['ResourceProperties']['botolevel'] 30 | if not loglevel: 31 | loglevel = 'warning' 32 | if not botolevel: 33 | botolevel = 'error' 34 | # Set log verbosity levels 35 | loglevel = getattr(logging, loglevel.upper(), 20) 36 | botolevel = getattr(logging, botolevel.upper(), 40) 37 | mainlogger = logging.getLogger() 38 | mainlogger.setLevel(loglevel) 39 | logging.getLogger('boto3').setLevel(botolevel) 40 | logging.getLogger('botocore').setLevel(botolevel) 41 | # Set log message format 42 | #logfmt = '[%(requestid)s][%(asctime)s][%(levelname)s] %(message)s \n' 43 | #if len(mainlogger.handlers) == 0: 44 | # mainlogger.addHandler(logging.StreamHandler()) 45 | #mainlogger.handlers[0].setFormatter(logging.Formatter(logfmt)) 46 | return logging.LoggerAdapter(mainlogger, {'requestid': event['RequestId']}) 47 | 48 | 49 | def send(event, context, responseStatus, responseData, physicalResourceId, 50 | logger, reason=None): 51 | 52 | responseUrl = event['ResponseURL'] 53 | logger.debug("CFN response URL: " + responseUrl) 54 | 55 | responseBody = {} 56 | responseBody['Status'] = responseStatus 57 | msg = 'See details in CloudWatch Log Stream: ' + context.log_stream_name 58 | if not reason: 59 | responseBody['Reason'] = msg 60 | else: 61 | responseBody['Reason'] = str(reason)[0:255] + '... ' + msg 62 | responseBody['PhysicalResourceId'] = physicalResourceId or context.log_stream_name 63 | responseBody['StackId'] = event['StackId'] 64 | responseBody['RequestId'] = event['RequestId'] 65 | responseBody['LogicalResourceId'] = event['LogicalResourceId'] 66 | if responseData and responseData != {} and responseData != [] and isinstance(responseData, dict): 67 | responseBody['Data'] = responseData 68 | 69 | json_responseBody = json.dumps(responseBody) 70 | 71 | logger.debug("Response body:\n" + json_responseBody) 72 | 73 | headers = { 74 | 'content-type': '', 75 | 'content-length': str(len(json_responseBody)) 76 | } 77 | 78 | try: 79 | response = requests.put(responseUrl, 80 | data=json_responseBody, 81 | headers=headers) 82 | logger.info("CloudFormation returned status code: " + response.reason) 83 | except Exception as e: 84 | logger.error("send(..) failed executing requests.put(..): " + str(e)) 85 | raise 86 | 87 | 88 | # Function that executes just before lambda excecution times out 89 | def timeout(event, context, logger): 90 | logger.error("Execution is about to time out, sending failure message") 91 | send(event, context, "FAILED", None, None, reason="Execution timed out", 92 | logger=logger) 93 | 94 | 95 | # Handler function 96 | def cfn_handler(event, context, create, update, delete, logger, init_failed): 97 | 98 | logger.info("Lambda RequestId: %s CloudFormation RequestId: %s" % 99 | (context.aws_request_id, event['RequestId'])) 100 | 101 | # Define an object to place any response information you would like to send 102 | # back to CloudFormation (these keys can then be used by Fn::GetAttr) 103 | responseData = {} 104 | 105 | # Define a physicalId for the resource, if the event is an update and the 106 | # returned phyiscalid changes, cloudformation will then issue a delete 107 | # against the old id 108 | physicalResourceId = None 109 | 110 | logger.debug("EVENT: " + str(event)) 111 | # handle init failures 112 | if init_failed: 113 | send(event, context, "FAILED", responseData, physicalResourceId, 114 | init_failed, logger=logger) 115 | raise 116 | 117 | # Setup timer to catch timeouts 118 | t = threading.Timer((context.get_remaining_time_in_millis()/1000.00)-0.5, 119 | timeout, args=[event, context, logger]) 120 | t.start() 121 | 122 | try: 123 | # Execute custom resource handlers 124 | logger.info("Received a %s Request" % event['RequestType']) 125 | if event['RequestType'] == 'Create': 126 | physicalResourceId, responseData = create(event, context) 127 | elif event['RequestType'] == 'Update': 128 | physicalResourceId, responseData = update(event, context) 129 | elif event['RequestType'] == 'Delete': 130 | delete(event, context) 131 | 132 | # Send response back to CloudFormation 133 | logger.info("Completed successfully, sending response to cfn") 134 | send(event, context, "SUCCESS", responseData, physicalResourceId, 135 | logger=logger) 136 | 137 | # Catch any exceptions, log the stacktrace, send a failure back to 138 | # CloudFormation and then raise an exception 139 | except Exception, e: 140 | logger.error(e, exc_info=True) 141 | send(event, context, "FAILED", responseData, physicalResourceId, 142 | reason=e, logger=logger) 143 | raise 144 | -------------------------------------------------------------------------------- /code/lambda_functions/lib/logger/__init__.py: -------------------------------------------------------------------------------- 1 | from logger import Logger -------------------------------------------------------------------------------- /code/lambda_functions/lib/logger/logger.py: -------------------------------------------------------------------------------- 1 | import json 2 | import logging 3 | 4 | 5 | class Logger(object): 6 | """Wrapper for a logging object that logs in json and appends AWS CodePipeline identifiers to each log message""" 7 | 8 | def __init__(self, request_id='CONTAINER_INIT', original_job_id=None, job_id=None, 9 | loglevel='warning', botolevel='critical'): 10 | """Initializes logging with minimal settings of request_id, original_job_id and job_id""" 11 | self.request_id = request_id 12 | self.original_job_id = original_job_id 13 | self.job_id = job_id 14 | self.config(request_id, original_job_id, job_id, loglevel=loglevel, botolevel=botolevel) 15 | return 16 | 17 | def config(self, request_id='CONTAINER_INIT', original_job_id=None, job_id=None, 18 | artifact_revision_id=None, pipeline_execution_id=None, pipeline_action=None, 19 | stage_name=None, pipeline_name=None, loglevel='warning', botolevel='critical'): 20 | """Configures logging object 21 | 22 | Args: 23 | request_id (str): lambda request id. 24 | original_job_id (str): [optional] pipeline job_id from first request in this run. 25 | job_id (str): [optional] pipeline job_id for the current invocation (differs from original_job_id if this is 26 | a continuation invocation). 27 | artifact_revision_id (str): [optional] commit id for current revision. 28 | pipeline_execution_id (str): [optional] pipeline execution id (same for all actions/stages in this pipeline 29 | run). 30 | pipeline_action (str): [optional] pipeline action name. 31 | stage_name (str): [optional] pipeline stage name. 32 | pipeline_name (str): [optional] pipeline name. 33 | loglevel (str): [optional] logging verbosity, defaults to warning. 34 | botolevel (str): [optional] boto logging verbosity, defaults to critical. 35 | """ 36 | 37 | loglevel = getattr(logging, loglevel.upper(), 20) 38 | botolevel = getattr(logging, botolevel.upper(), 40) 39 | mainlogger = logging.getLogger() 40 | mainlogger.setLevel(loglevel) 41 | logging.getLogger('boto3').setLevel(botolevel) 42 | logging.getLogger('botocore').setLevel(botolevel) 43 | logging.getLogger('nose').setLevel(botolevel) 44 | logging.getLogger('s3transfer').setLevel(botolevel) 45 | logfmt = '{"time_stamp": "%(asctime)s", "log_level": "%(levelname)s", "data": %(message)s}\n' 46 | if len(mainlogger.handlers) == 0: 47 | mainlogger.addHandler(logging.StreamHandler()) 48 | mainlogger.handlers[0].setFormatter(logging.Formatter(logfmt)) 49 | self.log = logging.LoggerAdapter(mainlogger, {}) 50 | self.request_id = request_id 51 | self.original_job_id = original_job_id 52 | self.job_id = job_id 53 | self.pipeline_execution_id = pipeline_execution_id 54 | self.artifact_revision_id = artifact_revision_id 55 | self.pipeline_action = pipeline_action 56 | self.stage_name = stage_name 57 | self.pipeline_name = pipeline_name 58 | 59 | def set_boto_level(self, botolevel): 60 | """Sets boto logging level 61 | 62 | Args: 63 | botolevel (str): boto3 logging verbosity (critical|error|warning|info|debug) 64 | """ 65 | 66 | botolevel = getattr(logging, botolevel.upper(), 40) 67 | logging.getLogger('boto3').setLevel(botolevel) 68 | logging.getLogger('botocore').setLevel(botolevel) 69 | logging.getLogger('nose').setLevel(botolevel) 70 | logging.getLogger('s3transfer').setLevel(botolevel) 71 | return 72 | 73 | def _format(self, message): 74 | """formats log message in json 75 | 76 | Args: 77 | message (str): log message, can be a dict, list, string, or json blob 78 | """ 79 | 80 | try: 81 | message = json.loads(message) 82 | except Exception: 83 | pass 84 | try: 85 | return json.dumps({ 86 | 'request_id': self.request_id, 'original_job_id': self.original_job_id, 87 | 'pipeline_execution_id': self.pipeline_execution_id, 'pipeline_name': self.pipeline_name, 88 | 'stage_name': self.stage_name, 'artifact_revision_id': self.artifact_revision_id, 89 | 'pipeline_action': self.pipeline_action, 'job_id': self.job_id, "message": message 90 | }) 91 | except Exception: 92 | return json.dumps({ 93 | 'request_id': self.request_id, 'original_job_id': self.original_job_id, 94 | 'pipeline_execution_id': self.pipeline_execution_id, 'pipeline_name': self.pipeline_name, 95 | 'stage_name': self.stage_name, 'artifact_revision_id': self.artifact_revision_id, 96 | 'pipeline_action': self.pipeline_action, 'job_id': self.job_id, "message": str(message) 97 | }) 98 | 99 | def debug(self, message, **kwargs): 100 | """Wrapper for logging.debug call""" 101 | self.log.debug(self._format(message), **kwargs) 102 | 103 | def info(self, message, **kwargs): 104 | """Wrapper for logging.info call""" 105 | self.log.info(self._format(message), **kwargs) 106 | 107 | def warning(self, message, **kwargs): 108 | """Wrapper for logging.warning call""" 109 | self.log.warning(self._format(message), **kwargs) 110 | 111 | def error(self, message, **kwargs): 112 | """Wrapper for logging.error call""" 113 | self.log.error(self._format(message), **kwargs) 114 | 115 | def critical(self, message, **kwargs): 116 | """Wrapper for logging.critical call""" 117 | self.log.critical(self._format(message), **kwargs) 118 | -------------------------------------------------------------------------------- /code/lambda_functions/lint_template/lint_template.py: -------------------------------------------------------------------------------- 1 | import time 2 | from awsclients import AwsClients 3 | import botocore 4 | from cfnpipeline import CFNPipeline 5 | from logger import Logger 6 | import os 7 | 8 | loglevel = 'debug' 9 | logger = Logger(loglevel=loglevel) 10 | logger.info('New Lambda container initialised, logging configured.') 11 | clients = AwsClients(logger) 12 | pipeline_run = CFNPipeline(logger, clients) 13 | 14 | 15 | def get_templates(configs): 16 | templates = [] 17 | for artifact in configs.keys(): 18 | for config in configs[artifact]: 19 | for test in config['tests'].keys(): 20 | template = [artifact, config['tests'][test]['template_file']] 21 | if template not in templates: 22 | templates.append(template) 23 | return templates 24 | 25 | 26 | def lint_template(artifact, template_name, pipeline_name, cfn_nag_version): 27 | codebuild_client = clients.get('codebuild') 28 | try: 29 | build_response = codebuild_client.start_build(projectName='CFN-Lint-' + pipeline_name, 30 | buildspecOverride="""version: 0.1 31 | 32 | phases: 33 | install: 34 | commands: 35 | - apt-get -y update 36 | - apt-get -y install ruby-full 37 | - apt-get -y install jq 38 | - gem install cfn-nag""" + cfn_nag_version + """ 39 | pre_build: 40 | commands: 41 | - echo Nothing to do in the pre_build phase... 42 | build: 43 | commands: 44 | - echo Build started on `date` 45 | - cfn_nag_scan --input-path templates/""" + template_name + """ --debug 46 | post_build: 47 | commands: 48 | - echo Build completed on `date`""") 49 | 50 | build_id = build_response['build']['id'] 51 | build_status = build_response['build']['buildStatus'] 52 | 53 | while build_status == 'IN_PROGRESS': 54 | time.sleep(5) 55 | check_response = {'builds': [{}]} 56 | retry = 0 57 | while not ('phases' in check_response['builds'][0] and 'buildStatus' in check_response['builds'][0]): 58 | if retry > 4: 59 | raise KeyError("Cannot get buildStatus or phases from CodeBuild response") 60 | elif retry > 0: 61 | time.sleep(10) 62 | retry += 1 63 | check_response = codebuild_client.batch_get_builds(ids=[build_id]) 64 | build_status = check_response['builds'][0]['buildStatus'] 65 | phases = check_response['builds'][0]['phases'] 66 | print(check_response) 67 | 68 | if build_status != 'SUCCEEDED': 69 | error_message = 'linting of template ' + template_name + ' failed' 70 | for phase in phases: 71 | if 'phaseStatus' in phase and phase['phaseStatus'] != 'SUCCEEDED': 72 | for context in phase['contexts']: 73 | error_message += context['message'] + ' - ' + context['statusCode'] 74 | return error_message 75 | 76 | except botocore.exceptions.ClientError as exception: 77 | return exception.message 78 | 79 | 80 | def lambda_handler(event, context): 81 | try: 82 | logger.config(context.aws_request_id) 83 | logger.debug("Handler starting...") 84 | logger.debug(event) 85 | pipeline_run.consume_event(event, context, loglevel=loglevel) 86 | logger.info({'event': 'new_invoke'}) 87 | errors = [] 88 | successes = [] 89 | cfn_nag_version = os.environ['TOOLVERSION'] 90 | if cfn_nag_version.lower() == 'latest': 91 | cfn_nag_version = "" 92 | else: 93 | cfn_nag_version = ' -v %s' % cfn_nag_version 94 | for artifact, template_name in get_templates(pipeline_run.ci_configs): 95 | lint_failed = lint_template(artifact, template_name, pipeline_run.pipeline_name, cfn_nag_version) 96 | if lint_failed: 97 | errors.append([artifact, template_name, lint_failed]) 98 | else: 99 | successes.append('%s/%s' % (artifact, template_name)) 100 | if len(errors) > 0: 101 | msg = "%s lint failures %s" % (len(errors), errors) 102 | pipeline_run.put_job_failure(msg) 103 | logger.error(msg) 104 | else: 105 | pipeline_run.put_job_success("Successfully linted: %s" % successes) 106 | logger.info("Successfully linted: %s" % successes) 107 | except Exception as exception: 108 | logger.error("unhandled exception!", exc_info=1) 109 | pipeline_run.put_job_failure(str(exception)) 110 | -------------------------------------------------------------------------------- /code/lambda_functions/lint_template/requirements.txt: -------------------------------------------------------------------------------- 1 | awsclients 2 | cfnpipeline 3 | logger 4 | pyyaml 5 | -------------------------------------------------------------------------------- /code/lambda_functions/stack_cleanup/stack_cleanup.py: -------------------------------------------------------------------------------- 1 | import boto3 2 | from datetime import datetime 3 | from datetime import timedelta 4 | import json 5 | from random import randrange 6 | from time import sleep 7 | 8 | 9 | iam_client = boto3.client('iam') 10 | 11 | 12 | def get_regions(region, service): 13 | if region == 'ALL': 14 | s = boto3.session.Session(region_name='us-east-1') 15 | return s.get_available_regions(service) 16 | else: 17 | return [region] 18 | 19 | 20 | def get_all_pipelines(region): 21 | regions = get_regions(region, 'codepipeline') 22 | pipelines = {} 23 | for region in regions: 24 | cp_client = boto3.client('codepipeline', region_name=region) 25 | response = cp_client.list_pipelines() 26 | for pipeline in response['pipelines']: 27 | matched = False 28 | pl_detail = cp_client.get_pipeline(name=pipeline['name']) 29 | for stage in pl_detail['pipeline']['stages']: 30 | for action in stage['actions']: 31 | if 'UserParameters' in action['configuration']: 32 | try: 33 | params = json.loads(action['configuration']['UserParameters']).keys() 34 | if 'CleanupNonFailed' in params and 'StackCreationRoleArn' in params: 35 | matched = True 36 | except ValueError as e: 37 | if e.args[0] != 'No JSON object could be decoded': 38 | raise 39 | 40 | if matched: 41 | pipelines[pipeline['name']] = region 42 | return pipelines 43 | 44 | 45 | def _describe_stacks(cfn_client, stackname, retries=10, backoff=1.2, delay=5, jitter=True): 46 | while retries > 0: 47 | retries -= 1 48 | try: 49 | return cfn_client.describe_stacks(StackName=stackname) 50 | except Exception as e: 51 | if "Rate exceeded" in e.response['Error']['Message']: 52 | if jitter: 53 | delay = int(delay * backoff) + randrange(0, 10) 54 | else: 55 | delay = int(delay * backoff) 56 | sleep(delay) 57 | else: 58 | raise 59 | 60 | 61 | def get_all_stacks(): 62 | regions = get_regions('ALL', 'cloudformation') 63 | stacks = {} 64 | for region in regions: 65 | stacks[region] = [] 66 | cfn_client = boto3.client('cloudformation', region_name=region) 67 | response = cfn_client.list_stacks(StackStatusFilter=[ 68 | 'CREATE_FAILED', 'CREATE_COMPLETE', 'ROLLBACK_COMPLETE', 69 | 'UPDATE_COMPLETE', 'UPDATE_ROLLBACK_COMPLETE', 'DELETE_FAILED']) 70 | for stack in response['StackSummaries']: 71 | describe_response = _describe_stacks(cfn_client, stack['StackName']) 72 | for tag in describe_response['Stacks'][0]['Tags']: 73 | if tag['Key'] == 'cfn_cicd_pipeline': 74 | stacks[region].append({ 75 | 'name': stack['StackName'], 'pipeline': tag['Value'], 76 | 'status': stack['StackStatus'], 'created': stack['CreationTime'].replace(tzinfo=None), 77 | 'tags': describe_response['Stacks'][0]['Tags'], 'region': region}) 78 | return stacks 79 | 80 | 81 | def get_all_keypairs(): 82 | regions = get_regions('ALL', 'ec2') 83 | key_pairs = {} 84 | for region in regions: 85 | key_pairs[region] = [] 86 | ec2_client = boto3.client('ec2', region_name=region) 87 | response = ec2_client.describe_key_pairs() 88 | for kp in response['KeyPairs']: 89 | if kp['KeyName'].startswith('ci-'): 90 | key_pairs[region].append(kp['KeyName']) 91 | return key_pairs 92 | 93 | 94 | def iter_stacks(stacks, filter_func, filter_val): 95 | filtered_stacks = {} 96 | for region in stacks.keys(): 97 | filtered_stacks[region] = [] 98 | for stack in stacks[region]: 99 | if filter_func(stack, filter_val): 100 | filtered_stacks[region].append(stack) 101 | return filtered_stacks 102 | 103 | 104 | def filter_pipeline_name(stack, pipeline_name): 105 | for tag in stack['tags']: 106 | if tag['Key'] == 'cfn_cicd_pipeline' and tag['Value'] == pipeline_name: 107 | return True 108 | return False 109 | 110 | 111 | def filter_failed(stack, failed): 112 | if stack['status'] not in ['CREATE_FAILED', 'ROLLBACK_COMPLETE', 'UPDATE_ROLLBACK_COMPLETE']: 113 | return True 114 | return False 115 | 116 | 117 | def filter_age(stack, age): 118 | min_age = datetime.utcnow() - timedelta(days=age) 119 | if stack['created'] < min_age: 120 | return True 121 | return False 122 | 123 | 124 | def filter_latest(stack, pipelines): 125 | pipeline_name = None 126 | execution_id = None 127 | for tag in stack['tags']: 128 | if tag['Key'] == 'cfn_cicd_pipeline': 129 | pipeline_name = tag['Value'] 130 | elif tag['Key'] == 'cfn_cicd_executionid': 131 | execution_id = tag['Value'] 132 | if pipeline_name not in pipelines.keys(): 133 | return False 134 | cp_client = boto3.client('codepipeline', region_name=pipelines[pipeline_name]) 135 | response = cp_client.get_pipeline_state(name=pipeline_name) 136 | if response['stageStates'][0]['latestExecution']['pipelineExecutionId'] == execution_id: 137 | return False 138 | return True 139 | 140 | 141 | def delete_stacks(stacks): 142 | for region in stacks.keys(): 143 | if len(stacks[region]) > 0: 144 | cfn_client = boto3.client('cloudformation', region_name=region) 145 | for stack in stacks[region]: 146 | print('deleting stack %s in %s from pipeline %s' % (stack['name'], region, stack['pipeline'])) 147 | try: 148 | cfn_client.delete_stack(StackName=stack['name']) 149 | except Exception as e: 150 | if e.args[0].endswith('is invalid or cannot be assumed'): 151 | try: 152 | arn = get_role_arn() 153 | cfn_client.delete_stack(StackName=stack['name'], RoleARN=arn) 154 | except Exception as e: 155 | print('Failed to delete stack %s' % (str(stack))) 156 | print(str(e)) 157 | else: 158 | print('Failed to delete stack %s' % (str(stack))) 159 | print(str(e)) 160 | 161 | 162 | def get_role_arn(): 163 | try: 164 | return iam_client.get_role(RoleName='TemplateCI-StackCleanUp')['Role']['Arn'] 165 | except Exception: 166 | return "NoValidRoles" 167 | 168 | 169 | def delete_keypairs(region, keypairs): 170 | ec2_client = boto3.client('ec2', region_name=region) 171 | for kp in keypairs: 172 | ec2_client.delete_key_pair(KeyName=kp) 173 | 174 | 175 | def lambda_handler(event, context): 176 | print(event) 177 | pipeline = event['pipeline'] 178 | region = event['region'] 179 | age = int(event['age']) 180 | failed = event['failed'] 181 | latest = event['latest'] 182 | 183 | print('Getting stacks...') 184 | stacks = get_all_stacks() 185 | 186 | print("Cleanup orphaned stacks...") 187 | orphaned = {} 188 | pipelines = get_all_pipelines('ALL') 189 | for region in stacks.keys(): 190 | for stack in stacks[region]: 191 | if stack['pipeline'] not in pipelines.keys(): 192 | try: 193 | orphaned[region].append(stack) 194 | except Exception: 195 | orphaned[region] = [stack] 196 | print(stack['pipeline'], pipelines.keys()) 197 | print("stack %s is orphaned" % stack['name']) 198 | delete_stacks(orphaned) 199 | 200 | print("Cleanup keypairs...") 201 | key_pairs = get_all_keypairs() 202 | for region in key_pairs.keys(): 203 | kp_to_delete = [] 204 | for kp in key_pairs[region]: 205 | stack_list = [s['name'] for s in stacks[region]] 206 | if kp not in stack_list: 207 | kp_to_delete.append(kp) 208 | if len(kp_to_delete) > 0: 209 | delete_keypairs(region, kp_to_delete) 210 | 211 | print('getting pipelines...') 212 | pipelines = get_all_pipelines(region) 213 | filtered_stacks = stacks 214 | if pipeline: 215 | print('Filtering results to specific pipeline') 216 | filtered_stacks = iter_stacks(filtered_stacks, filter_pipeline_name, pipeline) 217 | if not failed: 218 | print('Filtering results to exclude failed stacks') 219 | filtered_stacks = iter_stacks(filtered_stacks, filter_failed, failed) 220 | if age > 0: 221 | print('Filtering results to exclude stacks older than %s days' % str(age)) 222 | filtered_stacks = iter_stacks(filtered_stacks, filter_age, age) 223 | if latest: 224 | print('Filtering results to exclude most recent pipeline execution') 225 | filtered_stacks = iter_stacks(filtered_stacks, filter_latest, pipelines) 226 | delete_stacks(filtered_stacks) 227 | -------------------------------------------------------------------------------- /code/lambda_functions/subnet_name/requirements.txt: -------------------------------------------------------------------------------- 1 | awsclients 2 | cfnpipeline 3 | logger 4 | pyyaml 5 | -------------------------------------------------------------------------------- /code/lambda_functions/subnet_name/subnet_name.py: -------------------------------------------------------------------------------- 1 | from awsclients import AwsClients 2 | from cfnpipeline import CFNPipeline 3 | from logger import Logger 4 | 5 | # Initialise logging 6 | loglevel = 'debug' 7 | logger = Logger(loglevel=loglevel) 8 | logger.info('New Lambda container initialised, logging configured.') 9 | 10 | # Initialise boto client factory 11 | clients = AwsClients(logger) 12 | 13 | # Initialise CFNPipeline helper library 14 | pipeline_run = CFNPipeline(logger, clients) 15 | 16 | 17 | def test_subnet_name(region, stackid, logical_resource_id, physical_resource_id): 18 | 19 | logger.debug({"test_subnet_name": "starting"}) 20 | # Create an empty list to put errors into (so you can have more than 1 error per test) 21 | error_msg = [] 22 | 23 | # Setup our output object, if there are errors we will flip the "success" key to False 24 | results = {"success": True, 25 | "logical_resource_id": logical_resource_id, 26 | "physical_resource_id": physical_resource_id, 27 | "region": region, 28 | "stackid": stackid} 29 | 30 | try: 31 | # Get a boto3 client and describe the subnet 32 | ec2_client = clients.get('ec2', region) 33 | response = ec2_client.describe_subnets(SubnetIds=[physical_resource_id]) 34 | logger.debug({"test_subnet_name:describe_subnets": response}) 35 | 36 | # Initialise subnet_type 37 | subnet_type = None 38 | 39 | # If there are no tags the subnet has no name, so raise an exception 40 | if 'Tags' not in response['Subnets'][0].keys(): 41 | raise ValueError('Subnet name is not defined') 42 | 43 | # Loop through subnet tags looking for the "Name" tag 44 | for tag in response['Subnets'][0]['Tags']: 45 | if tag['Key'] == 'Name': 46 | # Check if "PRIV" or "DMZ" are in the name 47 | if 'PRIV' in tag['Value'] and 'DMZ' not in tag['Value']: 48 | subnet_type = 'PRIV' 49 | elif 'DMZ' in tag['Value'] and 'PRIV' not in tag['Value']: 50 | subnet_type = 'DMZ' 51 | # If the name contains both, that's wrong, so raise an exception 52 | elif 'PRIV' in tag['Value'] and 'DMZ' in tag['Value']: 53 | raise ValueError('Cannot have both "PRIV" and "DMZ" in subnet name') 54 | # If there were no "Name" tags containing 'DMZ' or 'PRIV' raise an exception 55 | if not subnet_type: 56 | raise ValueError('Subnet name does not contain "PRIV" or "DMZ"') 57 | 58 | # catch all exceptions and treat them as test failures 59 | except Exception as e: 60 | # Log the exception, including a traceback 61 | logger.error({"test_subnet_name": str(e)}, exc_info=1) 62 | # If we're here the test failed, flip success to False and add exception message to the error list 63 | results["success"] = False 64 | error_msg.append({"exception": str(e)}) 65 | finally: 66 | # If there are error messages, append them to the output 67 | if len(error_msg) > 0: 68 | results["error_msg"] = error_msg 69 | 70 | # return our findings 71 | return results 72 | 73 | 74 | def lambda_handler(event, context): 75 | # Wrap everything in try/except to make sure we alway respond to CodePipeline 76 | try: 77 | # Add pipeline execution details to logging 78 | logger.config(context.aws_request_id, loglevel=loglevel) 79 | logger.debug("Handler starting...") 80 | logger.debug(event) 81 | pipeline_run.consume_event(event, context, loglevel=loglevel) 82 | logger.info({'event': 'new_invoke'}) 83 | logger.debug(pipeline_run.artifacts) 84 | logger.debug({'ci_configs': pipeline_run.ci_configs}) 85 | 86 | # Call the test runner, specifying the name of the test function and the types of resources to test 87 | results = pipeline_run.test_stacks(test_subnet_name, resource_types=["AWS::EC2::Subnet"]) 88 | 89 | # if there are any tests that failed return a failure to CodePipeline, else return success 90 | if len(results['error']) > 0: 91 | pipeline_run.put_job_failure("%s tests failed: %s" % (len(results['error']), results['error'])) 92 | else: 93 | pipeline_run.put_job_success(results['success']) 94 | return 95 | 96 | # something went wrong that we weren't expecting, probably a fault in the test logic 97 | except Exception as e: 98 | logger.error("Unhandled exception!", exc_info=1) 99 | pipeline_run.put_job_failure(str(e)) 100 | -------------------------------------------------------------------------------- /code/lambda_functions/tcp_connect/requirements.txt: -------------------------------------------------------------------------------- 1 | awsclients 2 | cfnpipeline 3 | logger 4 | pyyaml 5 | -------------------------------------------------------------------------------- /code/lambda_functions/tcp_connect/tcp_connect.py: -------------------------------------------------------------------------------- 1 | from awsclients import AwsClients 2 | import botocore 3 | from cfnpipeline import CFNPipeline 4 | from cStringIO import StringIO 5 | from datetime import datetime 6 | import json 7 | from logger import Logger 8 | import random 9 | import string 10 | from time import sleep 11 | from zipfile import ZIP_DEFLATED 12 | from zipfile import ZipFile 13 | from zipfile import ZipInfo 14 | 15 | 16 | loglevel = 'debug' 17 | logger = Logger(loglevel=loglevel) 18 | logger.info('New Lambda container initialised, logging configured.') 19 | clients = AwsClients(logger) 20 | pipeline_run = CFNPipeline(logger, clients) 21 | 22 | 23 | def random_string(length, alphanum=True): 24 | additional = '' 25 | if not alphanum: 26 | additional = ';:=+!@#%%^&*()[]{}' 27 | chars = string.ascii_uppercase + string.ascii_lowercase + string.digits + additional 28 | return ''.join(random.SystemRandom().choice(chars) for _ in range(length)) 29 | 30 | 31 | function_code = """import socket 32 | import sys 33 | 34 | def tcp_connect(address, port): 35 | s = socket.socket() 36 | s.settimeout(4) 37 | print("Attempting to connect to %s on port %s" % (address, port)) 38 | s.connect((address, port)) 39 | print("Connection to %s on port %s succeeded" % (address, port)) 40 | return {"success": True} 41 | 42 | def lambda_handler(event, context): 43 | print(event) 44 | address = event['address'] 45 | port = int(event['port']) 46 | try: 47 | return tcp_connect(address, port) 48 | except Exception as e: 49 | print("%s:%s %s" % (address, port, str(e))) 50 | raise 51 | """ 52 | 53 | assume_role_policy = """\ 54 | { 55 | "Version": "2012-10-17", 56 | "Statement": [ 57 | { 58 | "Effect": "Allow", 59 | "Principal": { 60 | "Service": "lambda.amazonaws.com" 61 | }, 62 | "Action": "sts:AssumeRole" 63 | } 64 | ] 65 | }\ 66 | """ 67 | 68 | iam_policy = """\ 69 | { 70 | "Version": "2012-10-17", 71 | "Statement": [ 72 | { 73 | "Sid": "VpcRequirements", 74 | "Effect": "Allow", 75 | "Action": [ 76 | "ec2:CreateNetworkInterface", 77 | "ec2:DeleteNetworkInterface", 78 | "ec2:DescribeNetworkInterfaces", 79 | "ec2:DetachNetworkInterface" 80 | ], 81 | "Resource": [ 82 | "*" 83 | ] 84 | }, 85 | { 86 | "Sid": "LogRequirements", 87 | "Effect": "Allow", 88 | "Action": [ 89 | "logs:CreateLogGroup", 90 | "logs:CreateLogStream", 91 | "logs:PutLogEvents" 92 | ], 93 | "Resource": [ 94 | "*" 95 | ] 96 | } 97 | ] 98 | }\ 99 | """ 100 | 101 | iam_role_arn = None 102 | iam_policy_arn = None 103 | 104 | 105 | def get_iam_role(): 106 | global iam_role_arn 107 | recreate = False 108 | iam_client = clients.get("iam") 109 | if not iam_role_arn: 110 | recreate = True 111 | iam_name = 'test_subnet_%s' % random_string(8) 112 | else: 113 | try: 114 | iam_name = iam_role_arn.split('/')[-1] 115 | iam_client.get_role(RoleName=iam_name) 116 | except Exception as e: 117 | logger.debug({"get_iam_role:get_role": str(e)}) 118 | recreate = True 119 | if recreate: 120 | iam_role_arn = iam_client.create_role( 121 | RoleName=iam_name, 122 | AssumeRolePolicyDocument=assume_role_policy 123 | )['Role']['Arn'] 124 | logger.debug({"get_iam_role:iam_role": iam_role_arn}) 125 | iam_client.put_role_policy(RoleName=iam_name, PolicyName=iam_name, 126 | PolicyDocument=iam_policy) 127 | return iam_role_arn 128 | 129 | 130 | def delete_iam_role(): 131 | global iam_role_arn 132 | if iam_role_arn: 133 | iam_client = clients.get("iam") 134 | iam_name = iam_role_arn.split('/')[-1] 135 | iam_client.delete_role_policy(RoleName=iam_name, PolicyName=iam_name) 136 | iam_client.delete_role(RoleName=iam_name) 137 | iam_role_arn = None 138 | return 139 | 140 | 141 | def test_subnet_connectivity(region, stackid, logical_resource_id, physical_resource_id, 142 | endpoints=[['www.amazon.com', "80"]]): 143 | logger.debug({"test_subnet_connectivity": "starting"}) 144 | error_msg = [] 145 | if region not in clients.get_available_regions('lambda'): 146 | msg = "Test for %s %s skipped, %s not supprted by lambda" % (stackid, logical_resource_id, region) 147 | logger.warning(msg) 148 | return {"success": True, 149 | "logical_resource_id": logical_resource_id, 150 | "physical_resource_id": physical_resource_id, 151 | "warning": "Test skipped, region %s not supprted by lambda" % region, 152 | "region": region, 153 | "stackid": stackid} 154 | try: 155 | function_name = 'test_subnet_%s_%s' % (physical_resource_id, random_string(8)) 156 | iam_name = function_name.replace('_', '-') 157 | lambda_client = clients.get("lambda", region=region) 158 | ec2_client = clients.get("ec2", region=region) 159 | iam_role_arn = get_iam_role() 160 | response = ec2_client.describe_subnets(SubnetIds=[physical_resource_id]) 161 | logger.debug({"test_subnet_connectivity:describe_subnets": response}) 162 | vpc_id = response['Subnets'][0]['VpcId'] 163 | logger.debug({"test_subnet_connectivity:vpc_id": vpc_id}) 164 | security_group_id = ec2_client.create_security_group( 165 | GroupName=iam_name, 166 | Description=iam_name, 167 | VpcId=vpc_id 168 | )['GroupId'] 169 | logger.debug({"test_subnet_connectivity:security_group_id": security_group_id}) 170 | now = datetime.now() 171 | zi_timestamp = (now.year, now.month, now.day, now.hour, now.minute, now.second) 172 | zinfo = ZipInfo('lambda_function.py', zi_timestamp) 173 | zinfo.external_attr = 0x0744 << 16 174 | f = StringIO() 175 | z = ZipFile(f, 'w', ZIP_DEFLATED) 176 | z.writestr(zinfo, function_code) 177 | z.close() 178 | zip_bytes = f.getvalue() 179 | logger.debug({"test_subnet_connectivity:create_function_input": {"FunctionName": function_name, 180 | "Role": iam_role_arn, "Code": {'ZipFile': zip_bytes}, 181 | "VpcConfig": {'SubnetIds': [physical_resource_id], 182 | 'SecurityGroupIds': [security_group_id]} 183 | }}) 184 | lambda_function = False 185 | retries = 0 186 | max_retries = 4 187 | while not lambda_function: 188 | try: 189 | lambda_function = lambda_client.create_function( 190 | FunctionName=function_name, 191 | Runtime='python2.7', 192 | Role=iam_role_arn, 193 | Handler='lambda_function.lambda_handler', 194 | Code={'ZipFile': zip_bytes}, 195 | Timeout=120, 196 | MemorySize=128, 197 | VpcConfig={ 198 | 'SubnetIds': [physical_resource_id], 199 | 'SecurityGroupIds': [security_group_id] 200 | } 201 | ) 202 | except botocore.exceptions.ClientError as e: 203 | codes = ['InvalidParameterValueException', 'AccessDeniedException'] 204 | logger.debug("boto exception: ", exc_info=1) 205 | logger.debug(e.response) 206 | if "The provided subnets contain availability zone Lambda doesn't support." in e.response['Error']['Message']: 207 | raise 208 | if e.response['Error']['Code'] in codes and retries < max_retries: 209 | logger.debug({"test_subnet_connectivity:create_function": str(e)}, exc_info=1) 210 | msg = "role not propagated yet, sleeping a bit and then retrying" 211 | logger.debug({"test_subnet_connectivity:create_function_retry": msg}) 212 | retries += 1 213 | sleep(10*(retries**2)) 214 | else: 215 | raise 216 | for endpoint in endpoints: 217 | f = StringIO() 218 | f.write(json.dumps({"address": endpoint[0], "port": endpoint[1]})) 219 | payload = f.getvalue() 220 | f.close() 221 | response = lambda_client.invoke(FunctionName=function_name, InvocationType='RequestResponse', 222 | Payload=payload) 223 | response['Payload'] = response['Payload'].read() 224 | try: 225 | response['Payload'] = json.loads(response['Payload']) 226 | except Exception: 227 | pass 228 | logger.debug({"test_subnet_connectivity:response": response}) 229 | 230 | if response['StatusCode'] != 200 or 'FunctionError' in response.keys(): 231 | results = {"success": False, 232 | "logical_resource_id": logical_resource_id, 233 | "physical_resource_id": physical_resource_id, 234 | "region": region, 235 | "stackid": stackid} 236 | error_msg.append({"endpoint": endpoint, "response": response['Payload']}) 237 | elif response['StatusCode'] == 200 and len(error_msg) == 0: 238 | results = {"success": True, 239 | "logical_resource_id": logical_resource_id, 240 | "physical_resource_id": physical_resource_id, 241 | "region": region, 242 | "stackid": stackid} 243 | except Exception as e: 244 | logger.error({"test_subnet_connectivity": str(e)}, exc_info=1) 245 | if "subnets contain availability zone Lambda doesn't support" in str(e): 246 | results = {"success": True, 247 | "logical_resource_id": logical_resource_id, 248 | "physical_resource_id": physical_resource_id, 249 | "region": region, 250 | "stackid": stackid} 251 | logger.warning("test skipped as lambda is not supported in the subnet's az. %s" % str(results)) 252 | else: 253 | results = {"success": False, 254 | "logical_resource_id": logical_resource_id, 255 | "physical_resource_id": physical_resource_id, 256 | "region": region, 257 | "stackid": stackid} 258 | error_msg.append({"exception": str(e)}) 259 | finally: 260 | try: 261 | lambda_client.delete_function(FunctionName=function_name) 262 | except Exception: 263 | logger.warning("Failed to cleanup lambda function", exc_info=1) 264 | try: 265 | logger.debug({"test_subnet_connectivity:security_group_id": security_group_id}) 266 | enis = ec2_client.describe_network_interfaces(Filters=[{'Name': 'group-id', 267 | 'Values': [security_group_id]}]) 268 | for eni in enis['NetworkInterfaces']: 269 | if 'Attachment' in eni.keys(): 270 | logger.debug("Detaching ENI...") 271 | ec2_client.detach_network_interface(AttachmentId=eni['Attachment']['AttachmentId']) 272 | while 'Attachment' in ec2_client.describe_network_interfaces( 273 | NetworkInterfaceIds=[eni['NetworkInterfaceId']] 274 | )['NetworkInterfaces'][0].keys(): 275 | logger.debug("eni still attached, waiting 5 seconds...") 276 | sleep(5) 277 | logger.debug("Deleting ENI %s" % eni['NetworkInterfaceId']) 278 | ec2_client.delete_network_interface(NetworkInterfaceId=eni['NetworkInterfaceId']) 279 | sg = False 280 | retries = 0 281 | max_retries = 3 282 | while not sg: 283 | try: 284 | sg = ec2_client.delete_security_group(GroupId=security_group_id) 285 | except botocore.exceptions.ClientError as e: 286 | msg = "has a dependent object" 287 | dep_violation = e.response['Error']['Code'] == 'DependencyViolation' 288 | logger.debug("boto exception: ", exc_info=1) 289 | if dep_violation and msg in str(e) and retries < max_retries: 290 | msg = "eni deletion not propagated yet, sleeping a bit and then retrying" 291 | logger.debug({"test_subnet_connectivity:delete_sg_retry": security_group_id}) 292 | retries += 1 293 | sleep(5*(retries**2)) 294 | else: 295 | raise 296 | logger.debug({"test_subnet_connectivity:security_group_id_response": response}) 297 | except Exception: 298 | logger.warning("Failed to cleanup security group", exc_info=1) 299 | if len(error_msg) > 0: 300 | results["error_msg"] = error_msg 301 | return results 302 | 303 | 304 | def lambda_handler(event, context): 305 | try: 306 | logger.config(context.aws_request_id, loglevel=loglevel) 307 | logger.debug("Handler starting...") 308 | logger.debug(event) 309 | pipeline_run.consume_event(event, context, loglevel=loglevel) 310 | logger.info({'event': 'new_invoke'}) 311 | logger.debug(pipeline_run.artifacts) 312 | logger.debug({'ci_configs': pipeline_run.ci_configs}) 313 | results = pipeline_run.test_stacks(test_subnet_connectivity, 314 | resource_types=["AWS::EC2::Subnet"], 315 | logical_resource_id_prefix="PrivateSubnet") 316 | logger.info(results) 317 | if len(results['error']) > 0: 318 | pipeline_run.put_job_failure("%s tests failed: %s" % (len(results['error']), results['error'])) 319 | pipeline_run.put_job_success(results['success']) 320 | return 321 | 322 | except Exception as e: 323 | logger.error("Unhandled exception!", exc_info=1) 324 | pipeline_run.put_job_failure(str(e)) 325 | 326 | finally: 327 | try: 328 | delete_iam_role() 329 | except Exception: 330 | logger.warning("Failed to cleanup IAM role", exc_info=1) 331 | -------------------------------------------------------------------------------- /code/lambda_functions/validate_template/requirements.txt: -------------------------------------------------------------------------------- 1 | awsclients 2 | cfnpipeline 3 | logger 4 | pyyaml 5 | -------------------------------------------------------------------------------- /code/lambda_functions/validate_template/validate_template.py: -------------------------------------------------------------------------------- 1 | from awsclients import AwsClients 2 | from botocore.exceptions import ClientError 3 | from cfnpipeline import CFNPipeline 4 | from logger import Logger 5 | 6 | 7 | loglevel = 'debug' 8 | logger = Logger(loglevel=loglevel) 9 | logger.info('New Lambda container initialised, logging configured.') 10 | clients = AwsClients(logger) 11 | pipeline_run = CFNPipeline(logger, clients) 12 | 13 | 14 | def get_templates(configs): 15 | templates = [] 16 | for artifact in configs.keys(): 17 | for config in configs[artifact]: 18 | for test in config['tests'].keys(): 19 | t = [artifact, config['tests'][test]['template_file']] 20 | if t not in templates: 21 | templates.append(t) 22 | logger.debug(templates) 23 | return templates 24 | 25 | 26 | def validate_template(artifact, template_name): 27 | url = pipeline_run.upload_template(artifact, template_name, pipeline_run.user_params["ScratchBucket"], 28 | pipeline_run.region) 29 | cfn_client = clients.get('cloudformation') 30 | try: 31 | cfn_client.validate_template(TemplateURL=url) 32 | except ClientError as e: 33 | return e.message 34 | 35 | 36 | def lambda_handler(event, context): 37 | try: 38 | logger.config(context.aws_request_id) 39 | logger.debug("Handler starting...") 40 | logger.debug(event) 41 | pipeline_run.consume_event(event, context, loglevel=loglevel) 42 | logger.info({'event': 'new_invoke'}) 43 | errors = [] 44 | successes = [] 45 | for a, t in get_templates(pipeline_run.ci_configs): 46 | validation_failed = validate_template(a, t) 47 | if validation_failed: 48 | errors.append([a, t, validation_failed]) 49 | else: 50 | successes.append('%s/%s' % (a, t)) 51 | if len(errors) > 0: 52 | msg = "%s validation failures %s" % (len(errors), errors) 53 | pipeline_run.put_job_failure(msg) 54 | logger.error(msg) 55 | else: 56 | pipeline_run.put_job_success("Successfully validated: %s" % successes) 57 | logger.info("Successfully validated: %s" % successes) 58 | except Exception as e: 59 | logger.error("unhandled exception!", exc_info=1) 60 | pipeline_run.put_job_failure(str(e)) 61 | -------------------------------------------------------------------------------- /code/scripts/cfn-validation-pipeline-cleanup: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | import argparse 3 | import boto3 4 | from datetime import datetime 5 | from datetime import timedelta 6 | import json 7 | import logging 8 | from time import sleep 9 | 10 | 11 | def get_regions(region, service): 12 | if region == 'ALL': 13 | # s = boto3.session.Session(region_name='us-east-1') 14 | return boto3.get_available_regions(service) 15 | else: 16 | return [region] 17 | 18 | 19 | def get_all_pipelines(region): 20 | regions = get_regions(region, 'codepipeline') 21 | pipelines = {} 22 | for region in regions: 23 | cp_client = boto3.client('codepipeline', region_name=region) 24 | response = cp_client.list_pipelines() 25 | for pipeline in response['pipelines']: 26 | matched = False 27 | pl_detail = cp_client.get_pipeline(name=pipeline['name']) 28 | for stage in pl_detail['pipeline']['stages']: 29 | for action in stage['actions']: 30 | if 'UserParameters' in action['configuration']: 31 | try: 32 | params = json.loads(action['configuration']['UserParameters']).keys() 33 | if 'CleanupNonFailed' in params and 'StackCreationRoleArn' in params: 34 | matched = True 35 | except ValueError as e: 36 | if e.args[0] != 'No JSON object could be decoded': 37 | raise 38 | 39 | if matched: 40 | pipelines[pipeline['name']] = region 41 | return pipelines 42 | 43 | 44 | def get_all_stacks(): 45 | regions = get_regions('ALL', 'cloudformation') 46 | stacks = {} 47 | for region in regions: 48 | stacks[region] = [] 49 | cfn_client = boto3.client('cloudformation', region_name=region) 50 | response = cfn_client.list_stacks(StackStatusFilter=[ 51 | 'CREATE_FAILED', 'CREATE_COMPLETE', 'ROLLBACK_COMPLETE', 52 | 'UPDATE_COMPLETE', 'UPDATE_ROLLBACK_COMPLETE', 'DELETE_FAILED']) 53 | for stack in response['StackSummaries']: 54 | describe_response = cfn_client.describe_stacks(StackName=stack['StackName']) 55 | for tag in describe_response['Stacks'][0]['Tags']: 56 | if tag['Key'] == 'cfn_cicd_pipeline': 57 | stacks[region].append({ 58 | 'name': stack['StackName'], 'pipeline': tag['Value'], 59 | 'status': stack['StackStatus'], 'created': stack['CreationTime'].replace(tzinfo=None), 60 | 'tags': describe_response['Stacks'][0]['Tags'], 'region': region}) 61 | return stacks 62 | 63 | 64 | def get_all_keypairs(): 65 | regions = get_regions('ALL', 'ec2') 66 | key_pairs = {} 67 | for region in regions: 68 | key_pairs[region] = [] 69 | ec2_client = boto3.client('ec2', region_name=region) 70 | response = ec2_client.describe_key_pairs() 71 | for kp in response['KeyPairs']: 72 | if kp['KeyName'].startswith('ci-'): 73 | key_pairs[region].append(kp['KeyName']) 74 | return key_pairs 75 | 76 | 77 | def iter_stacks(stacks, filter_func, filter_val): 78 | filtered_stacks = {} 79 | for region in stacks.keys(): 80 | filtered_stacks[region] = [] 81 | for stack in stacks[region]: 82 | if filter_func(stack, filter_val): 83 | filtered_stacks[region].append(stack) 84 | return filtered_stacks 85 | 86 | 87 | def filter_pipeline_name(stack, pipeline_name): 88 | for tag in stack['tags']: 89 | if tag['Key'] == 'cfn_cicd_pipeline' and tag['Value'] == pipeline_name: 90 | return True 91 | return False 92 | 93 | 94 | def filter_failed(stack, failed): 95 | if stack['status'] not in ['CREATE_FAILED', 'ROLLBACK_COMPLETE', 'UPDATE_ROLLBACK_COMPLETE']: 96 | return True 97 | return False 98 | 99 | 100 | def filter_age(stack, age): 101 | min_age = datetime.utcnow() - timedelta(days=age) 102 | if stack['created'] < min_age: 103 | return True 104 | return False 105 | 106 | 107 | def filter_latest(stack, pipelines): 108 | pipeline_name = None 109 | execution_id = None 110 | for tag in stack['tags']: 111 | if tag['Key'] == 'cfn_cicd_pipeline': 112 | pipeline_name = tag['Value'] 113 | elif tag['Key'] == 'cfn_cicd_executionid': 114 | execution_id = tag['Value'] 115 | if pipeline_name not in pipelines.keys(): 116 | return False 117 | cp_client = boto3.client('codepipeline', region_name=pipelines[pipeline_name]) 118 | response = cp_client.get_pipeline_state(name=pipeline_name) 119 | if response['stageStates'][0]['latestExecution']['pipelineExecutionId'] == execution_id: 120 | return False 121 | return True 122 | 123 | 124 | def delete_stacks(stacks): 125 | for region in stacks.keys(): 126 | if len(stacks[region]) > 0: 127 | cfn_client = boto3.client('cloudformation', region_name=region) 128 | for stack in stacks[region]: 129 | print('deleting stack %s in %s from pipeline %s' % (stack['name'], region, stack['pipeline'])) 130 | try: 131 | cfn_client.delete_stack(StackName=stack['name']) 132 | except Exception as e: 133 | if e.args[0].endswith('is invalid or cannot be assumed'): 134 | try: 135 | arn = get_role_arn() 136 | cfn_client.delete_stack(StackName=stack['name'], RoleARN=arn) 137 | except Exception as e: 138 | print('Failed to delete stack %s' % (str(stack))) 139 | print(str(e)) 140 | else: 141 | print('Failed to delete stack %s' % (str(stack))) 142 | print(str(e)) 143 | 144 | 145 | def get_role_arn(): 146 | try: 147 | return iam_client.get_role(RoleName='TemplateCI-StackCleanUp')['Role']['Arn'] 148 | except Exception: 149 | return "" 150 | 151 | 152 | def delete_keypairs(region, keypairs): 153 | ec2_client = boto3.client('ec2', region_name=region) 154 | for kp in keypairs: 155 | ec2_client.delete_key_pair(KeyName=kp) 156 | 157 | 158 | if __name__ == "__main__": 159 | parser = argparse.ArgumentParser() 160 | 161 | parser.add_argument("-p", "--pipeline-name", dest="pipeline", help="Name of the pipeline") 162 | parser.add_argument("-r", "--region", dest="region", help="aws region that contains the pipeline(s) to cleanup", default='ALL') 163 | parser.add_argument("-pr", "--profile", dest="profile", help="aws cli profile that you want to run as") 164 | parser.add_argument("-a", "--age", dest="age", help="minimum stack age in days to delete", default=2, type=int) 165 | parser.add_argument("-l", "--latest", dest="latest", action="store_true", help="skip stacks associated with most recent pipeline execution") 166 | parser.add_argument("-f", "--failed", dest="failed", action="store_true", help="include stacks that have a failed status") 167 | parser.add_argument("-d", "--debug", dest="debug", help="enable debugging output", action="store_true") 168 | 169 | args = parser.parse_args() 170 | 171 | if args.debug: 172 | logging.basicConfig(level=logging.DEBUG) 173 | logging.getLogger('boto3').setLevel(logging.ERROR) 174 | logging.getLogger('botocore').setLevel(logging.ERROR) 175 | logging.getLogger('nose').setLevel(logging.ERROR) 176 | logging.getLogger('s3transfer').setLevel(logging.ERROR) 177 | 178 | if args.profile: 179 | boto3 = boto3.Session(profile_name=args.profile, region_name='us-east-1') 180 | else: 181 | boto3 = boto3.Session(region_name='us-east-1') 182 | iam_client = boto3.client('iam') 183 | print('Getting stacks...') 184 | stacks = get_all_stacks() 185 | 186 | print("Cleanup orphaned stacks...") 187 | orphaned = {} 188 | pipelines = get_all_pipelines('ALL') 189 | for region in stacks.keys(): 190 | for stack in stacks[region]: 191 | if stack['pipeline'] not in pipelines.keys(): 192 | try: 193 | orphaned[region].append(stack) 194 | except Exception: 195 | orphaned[region] = [stack] 196 | logging.debug([stack['pipeline'], pipelines.keys()]) 197 | logging.debug("stack %s is orphaned" % stack['name']) 198 | delete_stacks(orphaned) 199 | 200 | print("Cleanup keypairs...") 201 | key_pairs = get_all_keypairs() 202 | for region in key_pairs.keys(): 203 | kp_to_delete = [] 204 | for kp in key_pairs[region]: 205 | stack_list = [s['name'] for s in stacks[region]] 206 | if kp not in stack_list: 207 | kp_to_delete.append(kp) 208 | if len(kp_to_delete) > 0: 209 | delete_keypairs(region, kp_to_delete) 210 | 211 | print('getting pipelines...') 212 | pipelines = get_all_pipelines(args.region) 213 | logging.debug(pipelines) 214 | if args.pipeline: 215 | stacks = iter_stacks(stacks, filter_pipeline_name, args.pipeline) 216 | if not args.failed: 217 | stacks = iter_stacks(stacks, filter_failed, args.failed) 218 | if args.age > 0: 219 | stacks = iter_stacks(stacks, filter_age, args.age) 220 | if args.latest: 221 | stacks = iter_stacks(stacks, filter_latest, pipelines) 222 | logging.debug(stacks) 223 | delete_stacks(stacks) 224 | -------------------------------------------------------------------------------- /code/scripts/cfn-validation-pipeline-rollback: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | import argparse 3 | import boto3 4 | from datetime import datetime 5 | import json 6 | import logging 7 | from pprint import pprint 8 | from operator import itemgetter 9 | 10 | 11 | def extract_commits(s3_objects, prefix): 12 | commits = {} 13 | for o in s3_objects: 14 | commit_id = o['Key'].replace(prefix, '').split('/')[1] 15 | if commit_id != 'latest': 16 | commits[commit_id] = o['LastModified'] 17 | return commits 18 | 19 | 20 | def get_latest(bucket, key, region): 21 | if region: 22 | s3_client = boto3.client('s3', region_name=region) 23 | s3_resource = boto3.resource('s3', region_name=region) 24 | else: 25 | s3_client = boto3.client('s3') 26 | s3_resource = boto3.resource('s3') 27 | results = s3_client.list_objects_v2(Bucket=bucket, Prefix=key+'/latest/', MaxKeys=1) 28 | key = results['Contents'][0]['Key'] 29 | s3_object = s3_resource.Object(bucket, key) 30 | if 'git_revision_id' in s3_object.metadata: 31 | return s3_object.metadata['git_revision_id'] 32 | else: 33 | return None 34 | 35 | 36 | def list_commits(pipeline_name, region): 37 | bucket, key = get_s3_path(pipeline_name, region) 38 | latest = get_latest(bucket, key, region) 39 | if region: 40 | s3_client = boto3.client('s3', region_name=region) 41 | else: 42 | s3_client = boto3.client('s3') 43 | results = s3_client.list_objects_v2(Bucket=bucket, Prefix=key) 44 | commits = extract_commits(results['Contents'], key) 45 | while results['IsTruncated']: 46 | logging.debug("result is truncated, fetching more objects...") 47 | results = s3_client.list_objects_v2(Bucket=bucket, Prefix=key, ContinuationToken=results['NextContinuationToken']) 48 | commits.update(extract_commits(results['Contents'], key)) 49 | return commits, latest, bucket, key 50 | 51 | 52 | def get_s3_path(pipeline_name, region): 53 | if region: 54 | cp_client = boto3.client('codepipeline', region_name=region) 55 | else: 56 | cp_client = boto3.client('codepipeline') 57 | response = cp_client.get_pipeline(name=pipeline_name) 58 | bucket, key = [None, None] 59 | for stage in response['pipeline']['stages']: 60 | logging.debug("checking stage %s" % stage['name']) 61 | for action in stage['actions']: 62 | logging.debug("checking action %s in stage %s" % (action['name'], stage['name'])) 63 | if 'UserParameters' in action['configuration'].keys(): 64 | try: 65 | user_params = json.loads(action['configuration']['UserParameters']) 66 | if 'DeployKey' in user_params.keys() and 'DeployBucket' in user_params.keys(): 67 | logging.debug("matched action %s in stage %s" % (action['name'], stage['name'])) 68 | bucket = user_params['DeployBucket'] 69 | key = user_params['DeployKey'] 70 | except Exception: 71 | logging.debug("failed to get UserParameters", exc_info=1) 72 | pass 73 | if bucket and key: 74 | return bucket, key 75 | else: 76 | raise ValueError("could not find a deploy to S3 action (requires DeployBucket and DeployKey in UserParameters json)") 77 | 78 | 79 | def delete_latest(bucket, key, region): 80 | if region: 81 | s3_client = boto3.client('s3', region_name=region) 82 | else: 83 | s3_client = boto3.client('s3') 84 | prefix = key + '/latest/' 85 | logging.debug("Deleting %s prefix" % prefix) 86 | response = s3_client.list_objects(Bucket=bucket, Prefix=prefix) 87 | if 'Contents' in response.keys(): 88 | s3_client.delete_objects( 89 | Bucket=bucket, 90 | Delete={'Objects': [{"Key": dkey['Key']} for dkey in response['Contents']]} 91 | ) 92 | while 'NextMarker' in response: 93 | response = s3_client.list_objects(Bucket=bucket, Prefix=prefix, Marker=response['NextMarker']) 94 | if 'Contents' in response.keys(): 95 | s3_client.delete_objects( 96 | Bucket=bucket, 97 | Delete={'Objects': [{"Key": dkey['Key']} for dkey in response['Contents']]} 98 | ) 99 | return 100 | 101 | 102 | def new_latest(commit, bucket, key, region): 103 | delete_latest(bucket, key, region) 104 | if region: 105 | s3_client = boto3.client('s3', region_name=region) 106 | else: 107 | s3_client = boto3.client('s3') 108 | logging.debug("copying %s prefix to latest" % key+'/'+commit+'/') 109 | results = s3_client.list_objects_v2(Bucket=bucket, Prefix=key+'/'+commit+'/') 110 | objects_to_latest(s3_client, results['Contents'], commit, bucket, key) 111 | while results['IsTruncated']: 112 | logging.debug("result is truncated, fetching more objects...") 113 | results = s3_client.list_objects_v2(Bucket=bucket, Prefix=key+'/'+commit+'/', ContinuationToken=results['NextContinuationToken']) 114 | objects_to_latest(s3_client, results['Contents'], commit, bucket, key) 115 | 116 | 117 | def objects_to_latest(s3_client, objects, commit, bucket, key): 118 | for o in objects: 119 | src_key = o['Key'] 120 | dst_key = src_key.replace(commit, 'latest') 121 | s3_client.copy_object(Bucket=bucket, Key=dst_key, CopySource={'Bucket': bucket, 'Key': src_key}) 122 | return 123 | 124 | if __name__ == "__main__": 125 | parser = argparse.ArgumentParser() 126 | 127 | parser.add_argument("-p", "--pipeline-name", dest="pipeline", help="Name of the pipeline", required=True) 128 | parser.add_argument("-c", "--commit", dest="commit", help="commit to revert to") 129 | parser.add_argument("-r", "--region", dest="region", help="aws region that contains the pipeline") 130 | parser.add_argument("-l", "--list", dest="list", help="list available commits", action="store_true") 131 | parser.add_argument("-d", "--debug", dest="debug", help="enable debugging output", action="store_true") 132 | 133 | args = parser.parse_args() 134 | # --commit is required if --list is not specified 135 | if not args.list and args.commit is None: 136 | parser.error('argument -c/--commit is required') 137 | 138 | parser.parse_args() 139 | 140 | if args.debug: 141 | logging.basicConfig(level=logging.DEBUG) 142 | logging.getLogger('boto3').setLevel(logging.ERROR) 143 | logging.getLogger('botocore').setLevel(logging.ERROR) 144 | logging.getLogger('nose').setLevel(logging.ERROR) 145 | logging.getLogger('s3transfer').setLevel(logging.ERROR) 146 | 147 | commits, latest, bucket, key = list_commits(args.pipeline, args.region) 148 | if args.list: 149 | commit_list = [] 150 | for c in commits.keys(): 151 | commit_list.append([c, commits[c]]) 152 | commit_list.sort(key=itemgetter(1), reverse=True) 153 | for c in commit_list: 154 | if c[0] == latest: 155 | print("%s %s [LATEST]" % (c[0], c[1])) 156 | else: 157 | print("%s %s" % (c[0], c[1])) 158 | else: 159 | if latest == args.commit: 160 | parser.error('commit %s is already set to latest' % args.commit) 161 | else: 162 | new_latest(args.commit, bucket, key, args.region) 163 | -------------------------------------------------------------------------------- /code/scripts/cfn-validation-pipeline-skeleton: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | import argparse 3 | from distutils.version import LooseVersion 4 | import os 5 | import shutil 6 | import sys 7 | import zipfile 8 | 9 | if __name__ == "__main__": 10 | parser = argparse.ArgumentParser() 11 | 12 | parser.add_argument('path', type=str, nargs='?', default='./', 13 | help='path to create a project skeleton in, defaults to current directory') 14 | 15 | args = parser.parse_args() 16 | 17 | if not os.path.isdir(args.path[0]): 18 | os.mkdir(args.path[0]) 19 | skeleton_path = '0' 20 | for p in sys.path: 21 | if 'aws_cloudformation_validation_pipeline' in p and LooseVersion(p) >= LooseVersion(skeleton_path): 22 | skeleton_path = p 23 | if skeleton_path == '0': 24 | print("ERROR: could not find skeleton files in python path") 25 | sys.exit(1) 26 | shutil.copytree( 27 | skeleton_path + '/project-skeleton/validation_pipeline', 28 | args.path[0] + '/validation_pipeline', 29 | ignore=shutil.ignore_patterns('*.pyc') 30 | ) 31 | orig_path = os.getcwd() 32 | os.chdir(args.path[0] + '/validation_pipeline') 33 | basepath = './' 34 | if not os.path.exists(basepath + 'demo_source.zip'): 35 | ziph = zipfile.ZipFile('./demo_source.zip', 'w', zipfile.ZIP_DEFLATED) 36 | os.chdir('./demo_source') 37 | for root, dirs, files in os.walk('./'): 38 | for f in files: 39 | ziph.write(os.path.join(root, f)) 40 | ziph.close() 41 | os.chdir('../') 42 | shutil.rmtree('./demo_source') 43 | os.chdir(orig_path) 44 | -------------------------------------------------------------------------------- /code/tests/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/amazon-archives/cloudformation-validation-pipeline/c45e4dbcaa338659e1c5b247a195673c3e789eef/code/tests/__init__.py -------------------------------------------------------------------------------- /code/tests/test_ami_check.py: -------------------------------------------------------------------------------- 1 | import mock 2 | import unittest 3 | import sys 4 | import os 5 | 6 | os.environ['AWS_DEFAULT_REGION'] = 'us-east-1' 7 | 8 | if os.getcwd().endswith('tests') and 'lambda_functions' in os.listdir('../'): 9 | relative_path_prefix = '../lambda_functions' 10 | elif os.getcwd().endswith('tests') and 'source' in os.listdir('../'): 11 | relative_path_prefix = '../code/lambda_functions' 12 | elif 'lambda_functions' in os.listdir('./'): 13 | relative_path_prefix = './lambda_functions' 14 | else: 15 | relative_path_prefix = './code/lambda_functions' 16 | sys.path.append(relative_path_prefix + "/lib/") 17 | sys.path.append(relative_path_prefix + "/ami_check/") 18 | 19 | from ami_check import lambda_handler 20 | 21 | def raise_exception(*args, **kwargs): 22 | raise Exception("Test Exception") 23 | 24 | event = { 25 | "CodePipeline.job": { 26 | "id": str(''), 27 | "data": { 28 | "inputArtifacts": [ 29 | { 30 | 'location': { 31 | 's3Location': { 32 | 'bucketName': "bucketName", 33 | 'objectKey': "objectKey" 34 | } 35 | }, 36 | "name": "TemplateArtifact" 37 | } 38 | ], 39 | "outputArtifacts": [ 40 | { 41 | 'location': { 42 | 's3Location': { 43 | 'bucketName': "bucketName", 44 | 'objectKey': "objectKey" 45 | } 46 | }, 47 | "name": "StackArtifact" 48 | } 49 | ], 50 | 'actionConfiguration': { 51 | 'configuration': { 52 | 'UserParameters': None 53 | } 54 | }, 55 | 'artifactCredentials': { 56 | 'accessKeyId': "xxx", 57 | 'secretAccessKey': "yyy", 58 | 'sessionToken': "zzz" 59 | } 60 | } 61 | } 62 | } 63 | 64 | class MockContext(object): 65 | 66 | def __init__(self): 67 | self.aws_request_id = 'some-request-id' 68 | 69 | class MockEC2Client(object): 70 | 71 | def __init__(self): 72 | pass 73 | 74 | def describe_images(self, ImageIds): 75 | if ImageIds == ['ami-test']: 76 | return { 77 | "Images": [ 78 | {"Name": "ami-test"} 79 | ] 80 | } 81 | elif ImageIds == ['ami-testDated']: 82 | return { 83 | "Images": [ 84 | {"Name": "ami-testLatest"} 85 | ] 86 | } 87 | else: 88 | raise Exception("The image id '[%s]' does not exist" % ImageIds[0]) 89 | 90 | class MockBotoClient(object): 91 | def __init__(self): 92 | pass 93 | def get(self, service, region=None, access_key=None, secret_key=None, session_token=None, s3v4=True): 94 | if service == 'ec2': 95 | return MockEC2Client() 96 | else: 97 | raise ValueError("no api mock available for %s" % service) 98 | 99 | class MockCFNPipeline(object): 100 | 101 | def __init__(self): 102 | self.ci_configs = {"ScratchBucket": "scratch_bucket_name"} 103 | self.user_params = {"ScratchBucket": "scratch_bucket_name" } 104 | 105 | pipeline_run = mock.MagicMock(pipeline_run=None) 106 | 107 | 108 | class TestLambdaHandler(unittest.TestCase): 109 | 110 | @mock.patch('ami_check.pipeline_run') 111 | @mock.patch('ami_check.find_ami_ids') 112 | @mock.patch('ami_check.clients', MockBotoClient()) 113 | @mock.patch('cfnpipeline.CFNPipeline.put_job_success') 114 | def test_handler_success(self, put_job_success, find_ami_ids, cfnpl): 115 | cfnpl.put_job_success.return_value = None 116 | cfnpl.get_templates.return_value = None 117 | find_ami_ids.return_value = [{'value': "ami-test", 'regions': ['us-east-1']}] 118 | self.assertEqual(lambda_handler(event, MockContext()), None) 119 | cfnpl.put_job_success.assert_called() 120 | cfnpl.put_job_success.return_value = None 121 | find_ami_ids.return_value = [{'value': "ami-testDated", 'regions': ['us-east-1']}] 122 | self.assertEqual(lambda_handler(event, MockContext()), None) 123 | cfnpl.put_job_success.assert_called() 124 | cfnpl.put_job_failure.return_value = None 125 | find_ami_ids.return_value = [{'value': "ami-test", 'regions': []}] 126 | self.assertEqual(lambda_handler(event, MockContext()), None) 127 | cfnpl.put_job_failure.assert_called() 128 | cfnpl.put_job_failure.return_value = None 129 | find_ami_ids.return_value = [{'value': "ami-test-fail", 'regions': ['us-east-1']}] 130 | self.assertEqual(lambda_handler(event, MockContext()), None) 131 | cfnpl.put_job_failure.assert_called() 132 | 133 | 134 | @mock.patch('ami_check.pipeline_run.put_job_failure') 135 | @mock.patch('ami_check.pipeline_run.consume_event') 136 | def test_handler_failure(self, consume_event, put_job_failure): 137 | consume_event.side_effect = raise_exception 138 | self.assertEqual(lambda_handler(event, MockContext()), None) 139 | put_job_failure.assert_called() 140 | 141 | if __name__ == '__main__': 142 | unittest.main() 143 | -------------------------------------------------------------------------------- /code/tests/test_anon_reporting.py: -------------------------------------------------------------------------------- 1 | import mock 2 | import unittest 3 | import sys 4 | import os 5 | 6 | os.environ['AWS_DEFAULT_REGION'] = 'us-east-1' 7 | 8 | if os.getcwd().endswith('tests') and 'lambda_functions' in os.listdir('../'): 9 | relative_path_prefix = '../lambda_functions' 10 | elif os.getcwd().endswith('tests') and 'source' in os.listdir('../'): 11 | relative_path_prefix = '../code/lambda_functions' 12 | elif 'lambda_functions' in os.listdir('./'): 13 | relative_path_prefix = './lambda_functions' 14 | else: 15 | relative_path_prefix = './code/lambda_functions' 16 | sys.path.append(relative_path_prefix + "/lib/") 17 | sys.path.append(relative_path_prefix + "/anon_reporting/") 18 | 19 | from anon_reporting import lambda_handler 20 | from anon_reporting import send_data 21 | 22 | def raise_exception(*args, **kwargs): 23 | raise Exception("Test Exception") 24 | 25 | event = { 26 | "RequestId": 'a-request-id', 27 | "ResponseURL": "http://a.response.url", 28 | "StackId": "arn:::a-stack-id", 29 | "LogicalResourceId": "a-logical-resource-id", 30 | "PhysicalResourceId": "a-physical-resource-id", 31 | "ResourceProperties": { 32 | "loglevel": "error", 33 | "botolevel": "error", 34 | "SolutionID": "a-solution-id" 35 | } 36 | } 37 | 38 | class MockContext(object): 39 | 40 | def __init__(self): 41 | self.aws_request_id = 'some-request-id' 42 | self.log_stream_name = 'a-logstream' 43 | 44 | def get_remaining_time_in_millis(self): 45 | return 15000 46 | 47 | class MockPut(object): 48 | def __init__(self, url, data, headers): 49 | self.reason = 'a-response' 50 | 51 | class MockUrlopen(object): 52 | def __init__(self, req): 53 | pass 54 | def getcode(self): 55 | return 200 56 | def read(self): 57 | return '' 58 | 59 | class TestLambdaHandler(unittest.TestCase): 60 | 61 | @mock.patch("anon_reporting.urllib2.Request", mock.Mock(return_value=None)) 62 | @mock.patch("anon_reporting.urllib2.urlopen", MockUrlopen) 63 | def test_send_data(self): 64 | self.assertEqual(None, send_data('uuid', 'solution_id', 'stack_event', 'region', 'stack_id')) 65 | 66 | @mock.patch("anon_reporting.send_data", mock.Mock(return_value=None)) 67 | @mock.patch("botocore.vendored.requests.put", MockPut) 68 | @mock.patch("crhelper.timeout",mock.Mock(return_value=None)) 69 | def test_handler_success(self): 70 | event['RequestType'] = "Create" 71 | self.assertEqual(None, lambda_handler(event, MockContext())) 72 | event['RequestType'] = "Update" 73 | self.assertEqual(None, lambda_handler(event, MockContext())) 74 | event['RequestType'] = "Delete" 75 | self.assertEqual(None, lambda_handler(event, MockContext())) 76 | 77 | 78 | if __name__ == '__main__': 79 | unittest.main() -------------------------------------------------------------------------------- /code/tests/test_awsclients.py: -------------------------------------------------------------------------------- 1 | import sys 2 | import os 3 | import shutil 4 | import zipfile 5 | 6 | os.environ['AWS_DEFAULT_REGION'] = 'us-east-1' 7 | 8 | if os.getcwd().endswith('tests') and 'lambda_functions' in os.listdir('../'): 9 | relative_path_prefix = '../lambda_functions' 10 | artifact_path = './test_artifact.zip' 11 | basepath = './' 12 | elif os.getcwd().endswith('tests') and 'source' in os.listdir('../'): 13 | relative_path_prefix = '../code/lambda_functions' 14 | artifact_path = './test_artifact.zip' 15 | basepath = './' 16 | elif 'lambda_functions' in os.listdir('./'): 17 | relative_path_prefix = './lambda_functions' 18 | artifact_path = './tests/test_artifact.zip' 19 | basepath = './tests/' 20 | else: 21 | relative_path_prefix = './code/lambda_functions' 22 | artifact_path = './code/tests/test_artifact.zip' 23 | basepath = './code/tests/' 24 | 25 | sys.path.append(relative_path_prefix + "/lib/") 26 | 27 | if not os.path.exists(basepath + 'test_artifact.zip'): 28 | orig_path = os.getcwd() 29 | os.chdir(basepath) 30 | ziph = zipfile.ZipFile('./test_artifact.zip', 'w', zipfile.ZIP_DEFLATED) 31 | if 'demo_source' in os.listdir('../'): 32 | os.chdir('../demo_source') 33 | else: 34 | os.chdir('../../demo_source') 35 | for root, dirs, files in os.walk('./'): 36 | for f in files: 37 | if f == 'config.yml': 38 | shutil.copyfile(os.path.join(root, f), '/tmp/config.yml') 39 | fh = open('/tmp/config.yml', 'a') 40 | fh.write(""" 41 | built_stacks: 42 | "eu-west-1": 43 | - test1 44 | "us-east-1": 45 | - test2 46 | """) 47 | fh.close() 48 | ziph.write('/tmp/config.yml', 'ci/config.yml') 49 | else: 50 | ziph.write(os.path.join(root, f)) 51 | ziph.close() 52 | os.chdir(orig_path) 53 | 54 | 55 | import unittest 56 | from threading import Lock 57 | import mock 58 | from awsclients import AwsClients 59 | from logger import Logger 60 | import boto3 61 | 62 | logger = Logger() 63 | 64 | class MockClientConfig(object): 65 | def __init__(self): 66 | self.region_name = "us-east-2" 67 | 68 | class MockS3Client(object): 69 | 70 | def __init__(self): 71 | self._client_config = MockClientConfig() 72 | pass 73 | 74 | def get_object(self, Bucket, Key): 75 | return { 76 | "Body": open(artifact_path, 'rb') 77 | } 78 | 79 | def put_object(self,Bucket, Key, Body): 80 | return None 81 | 82 | def upload_file(self, fname, bucket, key, ExtraArgs=None): 83 | return None 84 | 85 | def list_objects(self, Bucket, Prefix): 86 | return { 87 | "Contents": [ 88 | { 89 | "Key": "an_s3_key" 90 | } 91 | ] 92 | } 93 | 94 | def delete_objects(self, Bucket, Delete): 95 | return None 96 | 97 | class MockBotoClient(object): 98 | def __init__(self): 99 | self.session = MockBotoSession() 100 | pass 101 | 102 | def client(self, service, region_name=None, access_key=None, secret_key=None, session_token=None, s3v4=True): 103 | if service == 's3': 104 | return MockS3Client() 105 | else: 106 | raise ValueError("no api mock available for %s" % service) 107 | def session(self, *args): 108 | return None 109 | 110 | 111 | class MockBotoSession(object): 112 | def __init__(self): 113 | pass 114 | def Session(self, *args, **kwargs): 115 | return MockBotoSessionClass() 116 | def get_session(self): 117 | return MockBotoSessionClass() 118 | 119 | 120 | class MockBotoSessionClass(object): 121 | def __init__(self): 122 | pass 123 | def get_available_regions(self, *args, **kwargs): 124 | return ['us-east-1'] 125 | def client(self, service): 126 | return MockBotoClient().client(service) 127 | 128 | 129 | class TestAwsClients(unittest.TestCase): 130 | def test___init__(self): 131 | aws_clients = AwsClients(logger) 132 | self.assertEqual({"default_role": {}}, aws_clients._clients) 133 | self.assertEqual(type(Lock()),type(aws_clients._lock)) 134 | self.assertEqual(logger, aws_clients.logger) 135 | 136 | @mock.patch("awsclients.AwsClients._create_client", mock.MagicMock(return_value=MockBotoClient().client('s3'))) 137 | def test_get(self): 138 | aws_clients = AwsClients(logger) 139 | self.assertEqual('us-east-2', aws_clients.get('s3', region='us-east-2')._client_config.region_name) 140 | self.assertIn('default_sig_version', aws_clients._clients['default_role']['us-east-2']['s3'].keys()) 141 | self.assertIn('session', aws_clients._clients['default_role']['us-east-2'].keys()) 142 | 143 | def test_get_available_regions(self): 144 | aws_clients = AwsClients(logger) 145 | session = boto3.session.Session() 146 | regions = session.get_available_regions('s3') 147 | self.assertEqual(regions, aws_clients.get_available_regions('s3')) 148 | 149 | if __name__ == '__main__': 150 | unittest.main() -------------------------------------------------------------------------------- /code/tests/test_create_stack.py: -------------------------------------------------------------------------------- 1 | import mock 2 | import unittest 3 | import sys 4 | import os 5 | 6 | os.environ['AWS_DEFAULT_REGION'] = 'us-east-1' 7 | 8 | if os.getcwd().endswith('tests') and 'lambda_functions' in os.listdir('../'): 9 | relative_path_prefix = '../lambda_functions' 10 | elif os.getcwd().endswith('tests') and 'source' in os.listdir('../'): 11 | relative_path_prefix = '../code/lambda_functions' 12 | elif 'lambda_functions' in os.listdir('./'): 13 | relative_path_prefix = './lambda_functions' 14 | else: 15 | relative_path_prefix = './code/lambda_functions' 16 | 17 | print relative_path_prefix 18 | 19 | sys.path.append(relative_path_prefix + "/lib/") 20 | sys.path.append(relative_path_prefix + "/create_stack/") 21 | 22 | from create_stack import lambda_handler 23 | 24 | def raise_exception(*args, **kwargs): 25 | raise Exception("Test Exception") 26 | 27 | event = { 28 | "CodePipeline.job": { 29 | "id": str(''), 30 | "data": { 31 | "inputArtifacts": [ 32 | { 33 | 'location': { 34 | 's3Location': { 35 | 'bucketName': "bucketName", 36 | 'objectKey': "objectKey" 37 | } 38 | }, 39 | "name": "TemplateArtifact" 40 | } 41 | ], 42 | "outputArtifacts": [ 43 | { 44 | 'location': { 45 | 's3Location': { 46 | 'bucketName': "bucketName", 47 | 'objectKey': "objectKey" 48 | } 49 | }, 50 | "name": "StackArtifact" 51 | } 52 | ], 53 | 'actionConfiguration': { 54 | 'configuration': { 55 | 'UserParameters': None 56 | } 57 | }, 58 | 'artifactCredentials': { 59 | 'accessKeyId': "xxx", 60 | 'secretAccessKey': "yyy", 61 | 'sessionToken': "zzz" 62 | } 63 | } 64 | } 65 | } 66 | 67 | class MockContext(object): 68 | 69 | def __init__(self): 70 | self.aws_request_id = 'some-request-id' 71 | 72 | pipeline_run = mock.MagicMock(pipeline_run=None) 73 | 74 | 75 | class TestLambdaHandler(unittest.TestCase): 76 | 77 | @mock.patch('create_stack.pipeline_run') 78 | def test_handler_success(self, cfnpl): 79 | cfnpl.continue_job_later.return_value = None 80 | cfnpl.create_stacks.return_value = {'inprogress': ['inprogress'], 'success': [], 'error': []} 81 | self.assertEqual(lambda_handler(event, MockContext()), None) 82 | cfnpl.continue_job_later.assert_called() 83 | cfnpl.continue_job_later.return_value = None 84 | cfnpl.create_stacks.return_value = {'inprogress': [], 'success': [], 'error': ['error']} 85 | self.assertEqual(lambda_handler(event, MockContext()), None) 86 | cfnpl.continue_job_later.assert_called() 87 | cfnpl.cleanup_failed = False 88 | cfnpl.continue_job_later.return_value = None 89 | self.assertEqual(lambda_handler(event, MockContext()), None) 90 | cfnpl.continue_job_later.assert_called() 91 | cfnpl.put_job_success.return_value = None 92 | cfnpl.create_stacks.return_value = {'inprogress': [], 'success': ['success'], 'error': []} 93 | self.assertEqual(lambda_handler(event, MockContext()), None) 94 | cfnpl.put_job_success.assert_called() 95 | cfnpl.continuation_data = {'message': {"stacks": {'inprogress': ['inprogress'], 'success': [], 'error': []}}} 96 | cfnpl.put_job_success.return_value = None 97 | self.assertEqual(lambda_handler(event, MockContext()), None) 98 | cfnpl.put_job_success.assert_called() 99 | cfnpl.continuation_data = {'message': {"pre-delete":True, "deleting": ['inprogress']}} 100 | cfnpl.check_statuses.return_value = [] 101 | cfnpl.put_job_success.return_value = None 102 | self.assertEqual(lambda_handler(event, MockContext()), None) 103 | cfnpl.put_job_success.assert_called() 104 | cfnpl.continuation_data = {'message': {'invalid': True}} 105 | cfnpl.check_statuses.return_value = [] 106 | cfnpl.put_job_success.return_value = None 107 | self.assertEqual(lambda_handler(event, MockContext()), None) 108 | cfnpl.put_job_success.assert_called() 109 | cfnpl.continuation_event = None 110 | cfnpl.put_job_success.return_value = None 111 | self.assertEqual(lambda_handler(event, MockContext()), None) 112 | cfnpl.put_job_success.assert_called() 113 | 114 | 115 | @mock.patch('create_stack.pipeline_run.put_job_failure') 116 | @mock.patch('create_stack.pipeline_run.consume_event') 117 | def test_handler_failure(self, consume_event, put_job_failure): 118 | consume_event.side_effect = raise_exception 119 | self.assertEqual(lambda_handler(event, MockContext()), None) 120 | put_job_failure.assert_called() 121 | 122 | if __name__ == '__main__': 123 | unittest.main() -------------------------------------------------------------------------------- /code/tests/test_deploy_to_s3.py: -------------------------------------------------------------------------------- 1 | import mock 2 | import unittest 3 | import sys 4 | import os 5 | 6 | os.environ['AWS_DEFAULT_REGION'] = 'us-east-1' 7 | 8 | if os.getcwd().endswith('tests') and 'lambda_functions' in os.listdir('../'): 9 | relative_path_prefix = '../lambda_functions' 10 | elif os.getcwd().endswith('tests') and 'source' in os.listdir('../'): 11 | relative_path_prefix = '../code/lambda_functions' 12 | elif 'lambda_functions' in os.listdir('./'): 13 | relative_path_prefix = './lambda_functions' 14 | else: 15 | relative_path_prefix = './code/lambda_functions' 16 | sys.path.append(relative_path_prefix + "/lib/") 17 | sys.path.append(relative_path_prefix + "/deploy_to_s3/") 18 | 19 | from deploy_to_s3 import lambda_handler 20 | 21 | def raise_exception(*args, **kwargs): 22 | raise Exception("Test Exception") 23 | 24 | event = { 25 | "CodePipeline.job": { 26 | "id": str(''), 27 | "data": { 28 | "inputArtifacts": [ 29 | { 30 | 'location': { 31 | 's3Location': { 32 | 'bucketName': "bucketName", 33 | 'objectKey': "objectKey" 34 | } 35 | }, 36 | "name": "TemplateArtifact" 37 | } 38 | ], 39 | "outputArtifacts": [ 40 | { 41 | 'location': { 42 | 's3Location': { 43 | 'bucketName': "bucketName", 44 | 'objectKey': "objectKey" 45 | } 46 | }, 47 | "name": "StackArtifact" 48 | } 49 | ], 50 | 'actionConfiguration': { 51 | 'configuration': { 52 | 'UserParameters': None 53 | } 54 | }, 55 | 'artifactCredentials': { 56 | 'accessKeyId': "xxx", 57 | 'secretAccessKey': "yyy", 58 | 'sessionToken': "zzz" 59 | } 60 | } 61 | } 62 | } 63 | 64 | class MockContext(object): 65 | 66 | def __init__(self): 67 | self.aws_request_id = 'some-request-id' 68 | 69 | pipeline_run = mock.MagicMock(pipeline_run=None) 70 | 71 | 72 | class TestLambdaHandler(unittest.TestCase): 73 | 74 | @mock.patch('deploy_to_s3.pipeline_run') 75 | def test_handler_success(self, cfnpl): 76 | cfnpl.put_job_success.return_value = None 77 | self.assertEqual(lambda_handler(event, MockContext()), None) 78 | cfnpl.put_job_success.assert_called() 79 | 80 | 81 | @mock.patch('deploy_to_s3.pipeline_run.put_job_failure') 82 | @mock.patch('deploy_to_s3.pipeline_run.consume_event') 83 | def test_handler_failure(self, consume_event, put_job_failure): 84 | consume_event.side_effect = raise_exception 85 | self.assertEqual(lambda_handler(event, MockContext()), None) 86 | put_job_failure.assert_called() 87 | 88 | if __name__ == '__main__': 89 | unittest.main() -------------------------------------------------------------------------------- /code/tests/test_email_notification.py: -------------------------------------------------------------------------------- 1 | import mock 2 | import unittest 3 | import sys 4 | import os 5 | from datetime import datetime 6 | 7 | os.environ['table_name'] = 'some-table-name' 8 | 9 | os.environ['AWS_DEFAULT_REGION'] = 'us-east-1' 10 | 11 | if os.getcwd().endswith('tests') and 'lambda_functions' in os.listdir('../'): 12 | relative_path_prefix = '../lambda_functions' 13 | elif os.getcwd().endswith('tests') and 'source' in os.listdir('../'): 14 | relative_path_prefix = '../code/lambda_functions' 15 | elif 'lambda_functions' in os.listdir('./'): 16 | relative_path_prefix = './lambda_functions' 17 | else: 18 | relative_path_prefix = './code/lambda_functions' 19 | sys.path.append(relative_path_prefix + "/lib/") 20 | sys.path.append(relative_path_prefix + "/email_notification/") 21 | 22 | from email_notification import lambda_handler 23 | 24 | def raise_exception(*args, **kwargs): 25 | raise Exception("Test Exception") 26 | 27 | class MockCodePipelineClient(object): 28 | 29 | def __init__(self): 30 | pass 31 | 32 | def put_job_failure_result(self, jobId, failureDetails): 33 | return None 34 | 35 | def put_job_success_result(self, jobId, continuationToken=None): 36 | return None 37 | 38 | def get_job_details(self, jobId): 39 | return { 40 | 'jobDetails': { 41 | 'data': { 42 | 'pipelineContext': { 43 | "pipelineName": str("pipelineName"), 44 | "stage": { 45 | "name": str("stage_name") 46 | }, 47 | 'action': { 48 | 'name': "action_name" 49 | } 50 | } 51 | } 52 | } 53 | } 54 | 55 | def get_pipeline_state(self, name): 56 | return { 57 | 'stageStates': [ 58 | { 59 | "stageName": "stage_name", 60 | 'latestExecution': { 61 | 'pipelineExecutionId': "pipelineExecutionId" 62 | }, 63 | "actionStates": [ 64 | { 65 | "actionName": "actionName", 66 | 'latestExecution': { 67 | 'pipelineExecutionId': "pipelineExecutionId", 68 | "status": "Failed", 69 | "errorDetails": { 70 | "message": "an error message" 71 | }, 72 | "lastStatusChange": datetime.now() 73 | } 74 | } 75 | ] 76 | } 77 | ] 78 | } 79 | 80 | def get_pipeline_execution(self, pipelineName, pipelineExecutionId): 81 | return { 82 | 'pipelineExecution': { 83 | 'artifactRevisions': [ 84 | { 85 | 'revisionId': "revisionId" 86 | } 87 | ] 88 | } 89 | } 90 | 91 | def get_pipeline(self, name): 92 | return { 93 | "pipeline": { 94 | "stages": [ 95 | { 96 | "actions": [ 97 | { 98 | "actionTypeId": { 99 | "category": "Invoke" 100 | }, 101 | "configuration": { 102 | "FunctionName": "TestLambda" 103 | } 104 | }, 105 | { 106 | "actionTypeId": { 107 | "category": "Source" 108 | }, 109 | "configuration": { 110 | "RepositoryName": "TestRepo" 111 | } 112 | } 113 | ] 114 | } 115 | ] 116 | } 117 | } 118 | 119 | class MockSNSClient(object): 120 | 121 | def __init__(self): 122 | pass 123 | 124 | def publish(self, TopicArn, Subject, Message): 125 | return None 126 | 127 | class MockDDBResource(object): 128 | 129 | def __init__(self): 130 | pass 131 | 132 | def Table(self, table_name): 133 | return MockDDBTable() 134 | 135 | class MockDDBTable(object): 136 | 137 | def __init__(self): 138 | pass 139 | 140 | def get_item(self, Key): 141 | return {} 142 | 143 | def put_item(self, Item): 144 | return None 145 | 146 | class MockBotoClient(object): 147 | def __init__(self): 148 | pass 149 | def client(self, service, region_name=None, access_key=None, secret_key=None, session_token=None, s3v4=True): 150 | if service == 'codepipeline': 151 | return MockCodePipelineClient() 152 | elif service == 'sns': 153 | return MockSNSClient() 154 | else: 155 | raise ValueError("no api mock available for %s" % service) 156 | def resource(self, service, region_name=None, access_key=None, secret_key=None, session_token=None, s3v4=True): 157 | if service == 'dynamodb': 158 | return MockDDBResource() 159 | else: 160 | raise ValueError("no api mock available for %s" % service) 161 | 162 | event = { 163 | "pipeline": "test-pipeline", 164 | "topic": "arn:::test-sns-topic", 165 | "region": "us-east-1" 166 | } 167 | 168 | class MockContext(object): 169 | 170 | def __init__(self): 171 | self.aws_request_id = 'some-request-id' 172 | 173 | pipeline_run = mock.MagicMock(pipeline_run=None) 174 | 175 | pipeline_run = mock.MagicMock(pipeline_run=None) 176 | 177 | class TestLambdaHandler(unittest.TestCase): 178 | 179 | @mock.patch('email_notification.boto3', MockBotoClient()) 180 | @mock.patch('email_notification.pipeline_run') 181 | def test_handler_success(self, cfnpl): 182 | self.assertEqual(lambda_handler(event, MockContext()), None) 183 | 184 | if __name__ == '__main__': 185 | unittest.main() -------------------------------------------------------------------------------- /code/tests/test_generate_report.py: -------------------------------------------------------------------------------- 1 | import mock 2 | import unittest 3 | import sys 4 | import os 5 | 6 | os.environ['AWS_DEFAULT_REGION'] = 'us-east-1' 7 | 8 | if os.getcwd().endswith('tests') and 'lambda_functions' in os.listdir('../'): 9 | relative_path_prefix = '../lambda_functions' 10 | elif os.getcwd().endswith('tests') and 'source' in os.listdir('../'): 11 | relative_path_prefix = '../code/lambda_functions' 12 | elif 'lambda_functions' in os.listdir('./'): 13 | relative_path_prefix = './lambda_functions' 14 | else: 15 | relative_path_prefix = './code/lambda_functions' 16 | sys.path.append(relative_path_prefix + "/lib/") 17 | sys.path.append(relative_path_prefix + "/generate_report/") 18 | 19 | from generate_report import lambda_handler 20 | 21 | def raise_exception(*args, **kwargs): 22 | raise Exception("Test Exception") 23 | 24 | event = { 25 | "CodePipeline.job": { 26 | "id": str(''), 27 | "data": { 28 | "inputArtifacts": [ 29 | { 30 | 'location': { 31 | 's3Location': { 32 | 'bucketName': "bucketName", 33 | 'objectKey': "objectKey" 34 | } 35 | }, 36 | "name": "TemplateArtifact" 37 | } 38 | ], 39 | "outputArtifacts": [ 40 | { 41 | 'location': { 42 | 's3Location': { 43 | 'bucketName': "bucketName", 44 | 'objectKey': "objectKey" 45 | } 46 | }, 47 | "name": "StackArtifact" 48 | } 49 | ], 50 | 'actionConfiguration': { 51 | 'configuration': { 52 | 'UserParameters': None 53 | } 54 | }, 55 | 'artifactCredentials': { 56 | 'accessKeyId': "xxx", 57 | 'secretAccessKey': "yyy", 58 | 'sessionToken': "zzz" 59 | } 60 | } 61 | } 62 | } 63 | 64 | class MockContext(object): 65 | 66 | def __init__(self): 67 | self.aws_request_id = 'some-request-id' 68 | 69 | pipeline_run = mock.MagicMock(pipeline_run=None) 70 | 71 | 72 | class TestLambdaHandler(unittest.TestCase): 73 | 74 | @mock.patch('generate_report.pipeline_run') 75 | def test_handler_success(self, cfnpl): 76 | cfnpl.put_job_success.return_value = None 77 | self.assertEqual(lambda_handler(event, MockContext()), None) 78 | cfnpl.put_job_success.assert_called() 79 | 80 | 81 | @mock.patch('generate_report.pipeline_run.put_job_failure') 82 | @mock.patch('generate_report.pipeline_run.consume_event') 83 | def test_handler_failure(self, consume_event, put_job_failure): 84 | consume_event.side_effect = raise_exception 85 | self.assertEqual(lambda_handler(event, MockContext()), None) 86 | put_job_failure.assert_called() 87 | 88 | if __name__ == '__main__': 89 | unittest.main() -------------------------------------------------------------------------------- /code/tests/test_lint_template.py: -------------------------------------------------------------------------------- 1 | import mock 2 | import unittest 3 | import sys 4 | import os 5 | import zipfile 6 | 7 | os.environ['TOOLVERSION'] = 'latest' 8 | 9 | os.environ['AWS_DEFAULT_REGION'] = 'us-east-1' 10 | 11 | if os.getcwd().endswith('tests') and 'lambda_functions' in os.listdir('../'): 12 | relative_path_prefix = '../lambda_functions' 13 | artifact_path = './test_artifact.zip' 14 | basepath = './' 15 | elif os.getcwd().endswith('tests') and 'source' in os.listdir('../'): 16 | relative_path_prefix = '../code/lambda_functions' 17 | artifact_path = './test_artifact.zip' 18 | basepath = './' 19 | elif 'lambda_functions' in os.listdir('./'): 20 | relative_path_prefix = './lambda_functions' 21 | artifact_path = './tests/test_artifact.zip' 22 | basepath = './tests/' 23 | else: 24 | relative_path_prefix = './code/lambda_functions' 25 | artifact_path = './code/tests/test_artifact.zip' 26 | basepath = './code/tests/' 27 | sys.path.append(relative_path_prefix + "/lib/") 28 | sys.path.append(relative_path_prefix + "/lint_template/") 29 | 30 | if not os.path.exists(basepath + 'test_artifact.zip'): 31 | orig_path = os.getcwd() 32 | os.chdir(basepath) 33 | ziph = zipfile.ZipFile('./test_artifact.zip', 'w', zipfile.ZIP_DEFLATED) 34 | if 'demo_source' in os.listdir('../'): 35 | os.chdir('../demo_source') 36 | else: 37 | os.chdir('../../demo_source') 38 | for root, dirs, files in os.walk('./'): 39 | for f in files: 40 | if f == 'config.yml': 41 | shutil.copyfile(os.path.join(root, f), '/tmp/config.yml') 42 | fh = open('/tmp/config.yml', 'a') 43 | fh.write(""" 44 | built_stacks: 45 | "eu-west-1": 46 | - test1 47 | "us-east-1": 48 | - test2 49 | """) 50 | fh.close() 51 | ziph.write('/tmp/config.yml', 'ci/config.yml') 52 | else: 53 | ziph.write(os.path.join(root, f)) 54 | ziph.close() 55 | os.chdir(orig_path) 56 | 57 | from lint_template import lambda_handler 58 | 59 | def raise_exception(*args, **kwargs): 60 | raise Exception("Test Exception") 61 | 62 | event = { 63 | "CodePipeline.job": { 64 | "id": str(''), 65 | "data": { 66 | "inputArtifacts": [ 67 | { 68 | 'location': { 69 | 's3Location': { 70 | 'bucketName': "bucketName", 71 | 'objectKey': "objectKey" 72 | } 73 | }, 74 | "name": "TemplateArtifact" 75 | } 76 | ], 77 | "outputArtifacts": [ 78 | { 79 | 'location': { 80 | 's3Location': { 81 | 'bucketName': "bucketName", 82 | 'objectKey': "objectKey" 83 | } 84 | }, 85 | "name": "StackArtifact" 86 | } 87 | ], 88 | 'actionConfiguration': { 89 | 'configuration': { 90 | 'UserParameters': None 91 | } 92 | }, 93 | 'artifactCredentials': { 94 | 'accessKeyId': "xxx", 95 | 'secretAccessKey': "yyy", 96 | 'sessionToken': "zzz" 97 | } 98 | } 99 | } 100 | } 101 | 102 | class MockContext(object): 103 | 104 | def __init__(self): 105 | self.aws_request_id = 'some-request-id' 106 | 107 | pipeline_run = mock.MagicMock(pipeline_run=None) 108 | 109 | class MockCodeBuildClient(object): 110 | 111 | def __init__(self): 112 | pass 113 | 114 | def start_build(self, projectName, buildspecOverride): 115 | return { 116 | "build": {"id": "test_id", "buildStatus": "IN_PROGRESS"} 117 | } 118 | 119 | def batch_get_builds(self,ids): 120 | return { 121 | "builds": [{"buildStatus": "SUCCEEDED", "phases": None}] 122 | } 123 | 124 | 125 | class MockBotoClient(object): 126 | def __init__(self): 127 | pass 128 | def get(self, service, region=None, access_key=None, secret_key=None, session_token=None, s3v4=True): 129 | if service == 'codebuild': 130 | return MockCodeBuildClient() 131 | else: 132 | raise ValueError("no api mock available for %s" % service) 133 | 134 | 135 | class TestLambdaHandler(unittest.TestCase): 136 | 137 | @mock.patch('lint_template.clients', MockBotoClient()) 138 | @mock.patch('lint_template.pipeline_run') 139 | def test_handler_success(self, cfnpl): 140 | cfnpl.put_job_success.return_value = None 141 | cfnpl.ci_configs = {"TemplateArtifact": [{"tests": {"default": {"template_file": "template_file.template"}}}]} 142 | cfnpl.consume_event(event, MockContext(), 'critical') 143 | self.assertEqual(lambda_handler(event, MockContext()), None) 144 | cfnpl.put_job_success.assert_called() 145 | 146 | 147 | @mock.patch('lint_template.pipeline_run.put_job_failure') 148 | @mock.patch('lint_template.pipeline_run.consume_event') 149 | def test_handler_failure(self, consume_event, put_job_failure): 150 | consume_event.side_effect = raise_exception 151 | self.assertEqual(lambda_handler(event, MockContext()), None) 152 | put_job_failure.assert_called() 153 | 154 | if __name__ == '__main__': 155 | unittest.main() -------------------------------------------------------------------------------- /code/tests/test_logger.py: -------------------------------------------------------------------------------- 1 | import mock 2 | import unittest 3 | import logging 4 | import json 5 | from datetime import datetime 6 | import sys 7 | import os 8 | 9 | os.environ['AWS_DEFAULT_REGION'] = 'us-east-1' 10 | 11 | if os.getcwd().endswith('tests') and 'lambda_functions' in os.listdir('../'): 12 | relative_path_prefix = '../lambda_functions' 13 | elif os.getcwd().endswith('tests') and 'source' in os.listdir('../'): 14 | relative_path_prefix = '../code/lambda_functions' 15 | elif 'lambda_functions' in os.listdir('./'): 16 | relative_path_prefix = './lambda_functions' 17 | else: 18 | relative_path_prefix = './code/lambda_functions' 19 | sys.path.append(relative_path_prefix + "/lib/") 20 | 21 | from logger import Logger 22 | 23 | 24 | class Tests(unittest.TestCase): 25 | 26 | @mock.patch('logger.Logger.config') 27 | def test_init(self, config): 28 | config.return_value = None 29 | instance = Logger() 30 | self.assertEqual(instance.job_id, None) 31 | self.assertEqual(instance.request_id, 'CONTAINER_INIT') 32 | self.assertEqual(instance.original_job_id, None) 33 | config.assert_called() 34 | 35 | def test_config(self): 36 | instance = Logger() 37 | self.assertEqual(instance.config(request_id='request_id', original_job_id="original_job_id", job_id='job_id', 38 | artifact_revision_id='artifact_revision_id', pipeline_execution_id='pipeline_execution_id', 39 | pipeline_action='pipeline_action', stage_name='stage_name', pipeline_name='pipeline_name', 40 | loglevel='loglevel', botolevel='botolevel'), None) 41 | self.assertEqual(type(instance.log), logging.LoggerAdapter) 42 | self.assertEqual(logging.getLogger('boto3').level, 40) 43 | self.assertEqual(instance.log.logger.level, 20) 44 | self.assertEqual(instance.request_id, 'request_id') 45 | self.assertEqual(instance.original_job_id, 'original_job_id') 46 | self.assertEqual(instance.job_id, 'job_id') 47 | self.assertEqual(instance.pipeline_execution_id, 'pipeline_execution_id') 48 | self.assertEqual(instance.artifact_revision_id, 'artifact_revision_id') 49 | self.assertEqual(instance.pipeline_action, 'pipeline_action') 50 | self.assertEqual(instance.stage_name, 'stage_name') 51 | 52 | def test_set_boto_level(self): 53 | instance = Logger() 54 | self.assertEqual(instance.set_boto_level('debug'), None) 55 | self.assertEqual(logging.getLogger('boto3').level, 10) 56 | 57 | def test__format(self): 58 | instance = Logger() 59 | log_msg = json.loads(instance._format('test message')) 60 | self.assertEqual(log_msg['message'], 'test message') 61 | log_msg = json.loads(instance._format('{"test": "message"}')) 62 | self.assertEqual(log_msg['message']['test'], 'message') 63 | log_msg = json.loads(instance._format({"test": datetime(2016, 01, 01)})) 64 | self.assertEqual(log_msg['message'], "{\'test\': datetime.datetime(2016, 1, 1, 0, 0)}") 65 | 66 | @mock.patch('logging.LoggerAdapter') 67 | def test_log_levels(self, logger): 68 | instance = Logger() 69 | instance.log = logger 70 | instance.debug('I called a debug') 71 | instance.info('I called a info') 72 | instance.warning('I called a warning') 73 | instance.error('I called a error') 74 | instance.critical('I called a critical') 75 | logger.debug.assert_called() 76 | logger.info.assert_called() 77 | logger.warning.assert_called() 78 | logger.error.assert_called() 79 | logger.critical.assert_called() 80 | 81 | if __name__ == '__main__': 82 | unittest.main() -------------------------------------------------------------------------------- /code/tests/test_subnet_name.py: -------------------------------------------------------------------------------- 1 | import mock 2 | import unittest 3 | import sys 4 | import os 5 | 6 | os.environ['AWS_DEFAULT_REGION'] = 'us-east-1' 7 | 8 | if os.getcwd().endswith('tests') and 'lambda_functions' in os.listdir('../'): 9 | relative_path_prefix = '../lambda_functions' 10 | elif os.getcwd().endswith('tests') and 'source' in os.listdir('../'): 11 | relative_path_prefix = '../code/lambda_functions' 12 | elif 'lambda_functions' in os.listdir('./'): 13 | relative_path_prefix = './lambda_functions' 14 | else: 15 | relative_path_prefix = './code/lambda_functions' 16 | sys.path.append(relative_path_prefix + "/lib/") 17 | sys.path.append(relative_path_prefix + "/subnet_name/") 18 | 19 | from subnet_name import lambda_handler 20 | from subnet_name import test_subnet_name 21 | 22 | 23 | def raise_exception(*args, **kwargs): 24 | raise Exception("Test Exception") 25 | 26 | event = { 27 | "CodePipeline.job": { 28 | "id": str(''), 29 | "data": { 30 | "inputArtifacts": [ 31 | { 32 | 'location': { 33 | 's3Location': { 34 | 'bucketName': "bucketName", 35 | 'objectKey': "objectKey" 36 | } 37 | }, 38 | "name": "TemplateArtifact" 39 | } 40 | ], 41 | "outputArtifacts": [ 42 | { 43 | 'location': { 44 | 's3Location': { 45 | 'bucketName': "bucketName", 46 | 'objectKey': "objectKey" 47 | } 48 | }, 49 | "name": "StackArtifact" 50 | } 51 | ], 52 | 'actionConfiguration': { 53 | 'configuration': { 54 | 'UserParameters': None 55 | } 56 | }, 57 | 'artifactCredentials': { 58 | 'accessKeyId': "xxx", 59 | 'secretAccessKey': "yyy", 60 | 'sessionToken': "zzz" 61 | } 62 | } 63 | } 64 | } 65 | 66 | class MockContext(object): 67 | 68 | def __init__(self): 69 | self.aws_request_id = 'some-request-id' 70 | 71 | pipeline_run = mock.MagicMock(pipeline_run=None) 72 | 73 | class MockEC2Client(object): 74 | 75 | def __init__(self): 76 | pass 77 | 78 | def describe_subnets(self, SubnetIds): 79 | return { 80 | "Subnets": [ 81 | { 82 | "Tags": [ 83 | { 84 | "Key": "Name", 85 | "Value": "PRIV_Subnet_DMZ" 86 | } 87 | ] 88 | } 89 | ] 90 | } 91 | 92 | class MockBotoClient(object): 93 | def __init__(self): 94 | pass 95 | def get(self, service, region=None, access_key=None, secret_key=None, session_token=None, s3v4=True): 96 | if service == 'ec2': 97 | return MockEC2Client() 98 | else: 99 | raise ValueError("no api mock available for %s" % service) 100 | 101 | 102 | def mock_test_stacks(test_function, resource_types): 103 | test_subnet_name('us-east-1', 'stackid', 'LogicalResourceId', "PhysicalResourceId") 104 | 105 | class TestLambdaHandler(unittest.TestCase): 106 | 107 | @mock.patch('subnet_name.clients', MockBotoClient()) 108 | @mock.patch('subnet_name.pipeline_run') 109 | def test_handler_success(self, cfnpl): 110 | cfnpl.put_job_failure.return_value = None 111 | cfnpl.test_stacks = mock_test_stacks 112 | cfnpl.ci_configs = {"TemplateArtifact": [{"tests": {"default": {"template_file": "template_file.template"}}}]} 113 | cfnpl.consume_event(event, MockContext(), 'critical') 114 | self.assertEqual(lambda_handler(event, MockContext()), None) 115 | cfnpl.put_job_failure.assert_called() 116 | 117 | 118 | @mock.patch('subnet_name.pipeline_run.put_job_failure') 119 | @mock.patch('subnet_name.pipeline_run.consume_event') 120 | def test_handler_failure(self, consume_event, put_job_failure): 121 | consume_event.side_effect = raise_exception 122 | self.assertEqual(lambda_handler(event, MockContext()), None) 123 | put_job_failure.assert_called() 124 | 125 | if __name__ == '__main__': 126 | unittest.main() -------------------------------------------------------------------------------- /code/tests/test_tcp_connect.py: -------------------------------------------------------------------------------- 1 | import mock 2 | import unittest 3 | import sys 4 | import os 5 | import StringIO 6 | 7 | os.environ['AWS_DEFAULT_REGION'] = 'us-east-1' 8 | 9 | if os.getcwd().endswith('tests') and 'lambda_functions' in os.listdir('../'): 10 | relative_path_prefix = '../lambda_functions' 11 | elif os.getcwd().endswith('tests') and 'source' in os.listdir('../'): 12 | relative_path_prefix = '../code/lambda_functions' 13 | elif 'lambda_functions' in os.listdir('./'): 14 | relative_path_prefix = './lambda_functions' 15 | else: 16 | relative_path_prefix = './code/lambda_functions' 17 | sys.path.append(relative_path_prefix + "/lib/") 18 | sys.path.append(relative_path_prefix + "/tcp_connect/") 19 | 20 | from tcp_connect import lambda_handler 21 | from tcp_connect import test_subnet_connectivity 22 | 23 | def raise_exception(*args, **kwargs): 24 | raise Exception("Test Exception") 25 | 26 | event = { 27 | "CodePipeline.job": { 28 | "id": str(''), 29 | "data": { 30 | "inputArtifacts": [ 31 | { 32 | 'location': { 33 | 's3Location': { 34 | 'bucketName': "bucketName", 35 | 'objectKey': "objectKey" 36 | } 37 | }, 38 | "name": "TemplateArtifact" 39 | } 40 | ], 41 | "outputArtifacts": [ 42 | { 43 | 'location': { 44 | 's3Location': { 45 | 'bucketName': "bucketName", 46 | 'objectKey': "objectKey" 47 | } 48 | }, 49 | "name": "StackArtifact" 50 | } 51 | ], 52 | 'actionConfiguration': { 53 | 'configuration': { 54 | 'UserParameters': None 55 | } 56 | }, 57 | 'artifactCredentials': { 58 | 'accessKeyId': "xxx", 59 | 'secretAccessKey': "yyy", 60 | 'sessionToken': "zzz" 61 | } 62 | } 63 | } 64 | } 65 | 66 | class MockContext(object): 67 | 68 | def __init__(self): 69 | self.aws_request_id = 'some-request-id' 70 | 71 | pipeline_run = mock.MagicMock(pipeline_run=None) 72 | 73 | class MockEC2Client(object): 74 | 75 | def __init__(self): 76 | pass 77 | 78 | def describe_subnets(self, SubnetIds): 79 | return { 80 | "Subnets": [ 81 | { 82 | "Tags": [ 83 | { 84 | "Key": "Name", 85 | "Value": "PRIV_Subnet_DMZ" 86 | } 87 | ], 88 | "VpcId": "test-vpcid" 89 | } 90 | ] 91 | } 92 | 93 | def create_security_group(self, GroupName, Description, VpcId): 94 | return {"GroupId": "test-security-group"} 95 | 96 | def describe_network_interfaces(self, Filters): 97 | return {"NetworkInterfaces": [ 98 | {"NetworkInterfaceId": "test-eni-id"} 99 | ]} 100 | 101 | def delete_network_interface(self, NetworkInterfaceId): 102 | return None 103 | 104 | def delete_security_group(self, GroupId): 105 | return True 106 | 107 | class MockLambdaClient(object): 108 | 109 | def __init__(self): 110 | pass 111 | 112 | def create_function(self, **kwargs): 113 | return {"success": True} 114 | 115 | def invoke(self, **kwargs): 116 | resp = StringIO.StringIO() 117 | resp.write('{}') 118 | resp.seek(0) 119 | return {"Payload": resp, "StatusCode": 200} 120 | 121 | def delete_function(self, FunctionName): 122 | return None 123 | 124 | class MockIAMClient(object): 125 | 126 | def __init__(self): 127 | pass 128 | 129 | def create_role(self, RoleName, AssumeRolePolicyDocument): 130 | return {"Role": {"Arn": "arn:::iamRole"}} 131 | 132 | def put_role_policy(self, RoleName, PolicyName, PolicyDocument): 133 | return None 134 | 135 | def delete_role_policy(self, RoleName, PolicyName): 136 | return None 137 | 138 | def delete_role(self, RoleName): 139 | return None 140 | 141 | class MockBotoClient(object): 142 | def __init__(self): 143 | pass 144 | def get(self, service, region=None, access_key=None, secret_key=None, session_token=None, s3v4=True): 145 | if service == 'ec2': 146 | return MockEC2Client() 147 | elif service == 'lambda': 148 | return MockLambdaClient() 149 | elif service == 'iam': 150 | return MockIAMClient() 151 | else: 152 | raise ValueError("no api mock available for %s" % service) 153 | def get_available_regions(self, service): 154 | return ['us-east-1'] 155 | 156 | def mock_test_stacks(test_function, resource_types, logical_resource_id_prefix): 157 | test_subnet_connectivity('us-east-1', 'stackid', 'LogicalResourceId', "PhysicalResourceId") 158 | return {"error": [], "success": ["some-id"]} 159 | 160 | class TestLambdaHandler(unittest.TestCase): 161 | 162 | @mock.patch('tcp_connect.clients', MockBotoClient()) 163 | @mock.patch('tcp_connect.pipeline_run') 164 | def test_handler_success(self, cfnpl): 165 | cfnpl.put_job_success.return_value = None 166 | cfnpl.test_stacks = mock_test_stacks 167 | self.assertEqual(lambda_handler(event, MockContext()), None) 168 | cfnpl.put_job_success.assert_called() 169 | 170 | 171 | @mock.patch('tcp_connect.pipeline_run.put_job_failure') 172 | @mock.patch('tcp_connect.pipeline_run.consume_event') 173 | def test_handler_failure(self, consume_event, put_job_failure): 174 | consume_event.side_effect = raise_exception 175 | self.assertEqual(lambda_handler(event, MockContext()), None) 176 | put_job_failure.assert_called() 177 | 178 | if __name__ == '__main__': 179 | unittest.main() -------------------------------------------------------------------------------- /code/tests/test_validate_template.py: -------------------------------------------------------------------------------- 1 | import mock 2 | import unittest 3 | import sys 4 | import os 5 | 6 | os.environ['AWS_DEFAULT_REGION'] = 'us-east-1' 7 | 8 | if os.getcwd().endswith('tests') and 'lambda_functions' in os.listdir('../'): 9 | relative_path_prefix = '../lambda_functions' 10 | elif os.getcwd().endswith('tests') and 'source' in os.listdir('../'): 11 | relative_path_prefix = '../code/lambda_functions' 12 | elif 'lambda_functions' in os.listdir('./'): 13 | relative_path_prefix = './lambda_functions' 14 | else: 15 | relative_path_prefix = './code/lambda_functions' 16 | sys.path.append(relative_path_prefix + "/lib/") 17 | sys.path.append(relative_path_prefix + "/validate_template/") 18 | 19 | from validate_template import lambda_handler 20 | 21 | 22 | def raise_exception(*args, **kwargs): 23 | raise Exception("Test Exception") 24 | 25 | event = { 26 | "CodePipeline.job": { 27 | "id": str(''), 28 | "data": { 29 | "inputArtifacts": [ 30 | { 31 | 'location': { 32 | 's3Location': { 33 | 'bucketName': "bucketName", 34 | 'objectKey': "objectKey" 35 | } 36 | }, 37 | "name": "TemplateArtifact" 38 | } 39 | ], 40 | "outputArtifacts": [ 41 | { 42 | 'location': { 43 | 's3Location': { 44 | 'bucketName': "bucketName", 45 | 'objectKey': "objectKey" 46 | } 47 | }, 48 | "name": "StackArtifact" 49 | } 50 | ], 51 | 'actionConfiguration': { 52 | 'configuration': { 53 | 'UserParameters': None 54 | } 55 | }, 56 | 'artifactCredentials': { 57 | 'accessKeyId': "xxx", 58 | 'secretAccessKey': "yyy", 59 | 'sessionToken': "zzz" 60 | } 61 | } 62 | } 63 | } 64 | 65 | class MockContext(object): 66 | 67 | def __init__(self): 68 | self.aws_request_id = 'some-request-id' 69 | 70 | class MockCfnClient(object): 71 | 72 | def __init__(self): 73 | pass 74 | 75 | def validate_template(self, TemplateUrl): 76 | return None 77 | 78 | class MockBotoClient(object): 79 | def __init__(self): 80 | pass 81 | def get(self, service, region=None, access_key=None, secret_key=None, session_token=None, s3v4=True): 82 | if service == 'cloudformation': 83 | return MockCfnClient() 84 | else: 85 | raise ValueError("no api mock available for %s" % service) 86 | 87 | class MockCFNPipeline(object): 88 | 89 | def __init__(self): 90 | self.ci_configs = {"ScratchBucket": "scratch_bucket_name"} 91 | self.user_params = {"ScratchBucket": "scratch_bucket_name" } 92 | 93 | pipeline_run = mock.MagicMock(pipeline_run=None) 94 | 95 | 96 | class TestLambdaHandler(unittest.TestCase): 97 | 98 | @mock.patch('validate_template.pipeline_run') 99 | @mock.patch('validate_template.clients', mock.Mock(return_value=MockBotoClient())) 100 | @mock.patch('cfnpipeline.CFNPipeline.put_job_success') 101 | def test_handler_success(self, put_job_success, cfnpl): 102 | cfnpl.user_params = {"ScratchBucket": "scratch_bucket_name" } 103 | cfnpl.enable_anon_usage_reporting = 'No' 104 | cfnpl.ci_configs = {"artifact": [{"tests": {"test": {"template_file": "file.template"}}}]} 105 | cfnpl.upload_template.return_value = 'https://my-template.url/hahaha.template' 106 | cfnpl.put_job_success.return_value = None 107 | self.assertEqual(lambda_handler(event, MockContext()), None) 108 | cfnpl.put_job_success.assert_called() 109 | 110 | 111 | @mock.patch('validate_template.pipeline_run.put_job_failure') 112 | @mock.patch('validate_template.pipeline_run.consume_event') 113 | def test_handler_failure(self, consume_event, put_job_failure): 114 | consume_event.side_effect = raise_exception 115 | self.assertEqual(lambda_handler(event, MockContext()), None) 116 | put_job_failure.assert_called() 117 | 118 | if __name__ == '__main__': 119 | unittest.main() -------------------------------------------------------------------------------- /demo_source/LICENSE.txt: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "{}" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright {yyyy} {name of copyright owner} 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. 202 | -------------------------------------------------------------------------------- /demo_source/NOTICE.txt: -------------------------------------------------------------------------------- 1 | Copyright 2016 Amazon.com, Inc. or its affiliates. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with the License. A copy of the License is located at 4 | 5 | http://aws.amazon.com/apache2.0/ 6 | 7 | or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. 8 | -------------------------------------------------------------------------------- /demo_source/README.md: -------------------------------------------------------------------------------- 1 | # quickstart-aws-vpc 2 | 3 | ** This is a modified version of the [VPC QuickStart](https://github.com/aws-quickstart/quickstart-aws-vpc) that is used in the demo pipeline. 4 | 5 | -------------------------------------------------------------------------------- /demo_source/ci/aws-vpc-3az-complete.json: -------------------------------------------------------------------------------- 1 | [ 2 | { 3 | "ParameterKey": "AvailabilityZones", 4 | "ParameterValue": "$[alfred_genaz_3]" 5 | }, 6 | { 7 | "ParameterKey": "CreateAdditionalPrivateSubnets", 8 | "ParameterValue": "true" 9 | }, 10 | { 11 | "ParameterKey": "CreatePrivateSubnets", 12 | "ParameterValue": "true" 13 | }, 14 | { 15 | "ParameterKey": "KeyPairName", 16 | "ParameterValue": "$[alfred_getkeypair]" 17 | }, 18 | { 19 | "ParameterKey": "NATInstanceType", 20 | "ParameterValue": "t2.small" 21 | }, 22 | { 23 | "ParameterKey": "NumberOfAZs", 24 | "ParameterValue": "3" 25 | }, 26 | { 27 | "ParameterKey": "PrivateSubnet1ACIDR", 28 | "ParameterValue": "10.0.0.0/19" 29 | }, 30 | { 31 | "ParameterKey": "PrivateSubnet1BCIDR", 32 | "ParameterValue": "10.0.192.0/21" 33 | }, 34 | { 35 | "ParameterKey": "PrivateSubnet2ACIDR", 36 | "ParameterValue": "10.0.32.0/19" 37 | }, 38 | { 39 | "ParameterKey": "PrivateSubnet2BCIDR", 40 | "ParameterValue": "10.0.200.0/21" 41 | }, 42 | { 43 | "ParameterKey": "PrivateSubnet3ACIDR", 44 | "ParameterValue": "10.0.64.0/19" 45 | }, 46 | { 47 | "ParameterKey": "PrivateSubnet3BCIDR", 48 | "ParameterValue": "10.0.208.0/21" 49 | }, 50 | { 51 | "ParameterKey": "PrivateSubnet4ACIDR", 52 | "ParameterValue": "10.0.96.0/19" 53 | }, 54 | { 55 | "ParameterKey": "PrivateSubnet4BCIDR", 56 | "ParameterValue": "10.0.216.0/21" 57 | }, 58 | { 59 | "ParameterKey": "PublicSubnet1CIDR", 60 | "ParameterValue": "10.0.128.0/20" 61 | }, 62 | { 63 | "ParameterKey": "PublicSubnet2CIDR", 64 | "ParameterValue": "10.0.144.0/20" 65 | }, 66 | { 67 | "ParameterKey": "PublicSubnet3CIDR", 68 | "ParameterValue": "10.0.160.0/20" 69 | }, 70 | { 71 | "ParameterKey": "PublicSubnet4CIDR", 72 | "ParameterValue": "10.0.176.0/20" 73 | }, 74 | { 75 | "ParameterKey": "VPCCIDR", 76 | "ParameterValue": "10.0.0.0/16" 77 | } 78 | ] 79 | -------------------------------------------------------------------------------- /demo_source/ci/aws-vpc-3az-public.json: -------------------------------------------------------------------------------- 1 | [ 2 | { 3 | "ParameterKey": "AvailabilityZones", 4 | "ParameterValue": "$[alfred_genaz_3]" 5 | }, 6 | { 7 | "ParameterKey": "CreateAdditionalPrivateSubnets", 8 | "ParameterValue": "false" 9 | }, 10 | { 11 | "ParameterKey": "CreatePrivateSubnets", 12 | "ParameterValue": "false" 13 | }, 14 | { 15 | "ParameterKey": "KeyPairName", 16 | "ParameterValue": "$[alfred_getkeypair]" 17 | }, 18 | { 19 | "ParameterKey": "NATInstanceType", 20 | "ParameterValue": "t2.small" 21 | }, 22 | { 23 | "ParameterKey": "NumberOfAZs", 24 | "ParameterValue": "3" 25 | }, 26 | { 27 | "ParameterKey": "PrivateSubnet1ACIDR", 28 | "ParameterValue": "10.0.0.0/19" 29 | }, 30 | { 31 | "ParameterKey": "PrivateSubnet1BCIDR", 32 | "ParameterValue": "10.0.192.0/21" 33 | }, 34 | { 35 | "ParameterKey": "PrivateSubnet2ACIDR", 36 | "ParameterValue": "10.0.32.0/19" 37 | }, 38 | { 39 | "ParameterKey": "PrivateSubnet2BCIDR", 40 | "ParameterValue": "10.0.200.0/21" 41 | }, 42 | { 43 | "ParameterKey": "PrivateSubnet3ACIDR", 44 | "ParameterValue": "10.0.64.0/19" 45 | }, 46 | { 47 | "ParameterKey": "PrivateSubnet3BCIDR", 48 | "ParameterValue": "10.0.208.0/21" 49 | }, 50 | { 51 | "ParameterKey": "PrivateSubnet4ACIDR", 52 | "ParameterValue": "10.0.96.0/19" 53 | }, 54 | { 55 | "ParameterKey": "PrivateSubnet4BCIDR", 56 | "ParameterValue": "10.0.216.0/21" 57 | }, 58 | { 59 | "ParameterKey": "PublicSubnet1CIDR", 60 | "ParameterValue": "10.0.128.0/20" 61 | }, 62 | { 63 | "ParameterKey": "PublicSubnet2CIDR", 64 | "ParameterValue": "10.0.144.0/20" 65 | }, 66 | { 67 | "ParameterKey": "PublicSubnet3CIDR", 68 | "ParameterValue": "10.0.160.0/20" 69 | }, 70 | { 71 | "ParameterKey": "PublicSubnet4CIDR", 72 | "ParameterValue": "10.0.176.0/20" 73 | }, 74 | { 75 | "ParameterKey": "VPCCIDR", 76 | "ParameterValue": "10.0.0.0/16" 77 | } 78 | ] 79 | -------------------------------------------------------------------------------- /demo_source/ci/aws-vpc-3az.json: -------------------------------------------------------------------------------- 1 | [ 2 | { 3 | "ParameterKey": "AvailabilityZones", 4 | "ParameterValue": "$[alfred_genaz_3]" 5 | }, 6 | { 7 | "ParameterKey": "CreateAdditionalPrivateSubnets", 8 | "ParameterValue": "false" 9 | }, 10 | { 11 | "ParameterKey": "CreatePrivateSubnets", 12 | "ParameterValue": "true" 13 | }, 14 | { 15 | "ParameterKey": "KeyPairName", 16 | "ParameterValue": "$[alfred_getkeypair]" 17 | }, 18 | { 19 | "ParameterKey": "NATInstanceType", 20 | "ParameterValue": "t2.small" 21 | }, 22 | { 23 | "ParameterKey": "NumberOfAZs", 24 | "ParameterValue": "3" 25 | }, 26 | { 27 | "ParameterKey": "PrivateSubnet1ACIDR", 28 | "ParameterValue": "10.0.0.0/19" 29 | }, 30 | { 31 | "ParameterKey": "PrivateSubnet1BCIDR", 32 | "ParameterValue": "10.0.192.0/21" 33 | }, 34 | { 35 | "ParameterKey": "PrivateSubnet2ACIDR", 36 | "ParameterValue": "10.0.32.0/19" 37 | }, 38 | { 39 | "ParameterKey": "PrivateSubnet2BCIDR", 40 | "ParameterValue": "10.0.200.0/21" 41 | }, 42 | { 43 | "ParameterKey": "PrivateSubnet3ACIDR", 44 | "ParameterValue": "10.0.64.0/19" 45 | }, 46 | { 47 | "ParameterKey": "PrivateSubnet3BCIDR", 48 | "ParameterValue": "10.0.208.0/21" 49 | }, 50 | { 51 | "ParameterKey": "PrivateSubnet4ACIDR", 52 | "ParameterValue": "10.0.96.0/19" 53 | }, 54 | { 55 | "ParameterKey": "PrivateSubnet4BCIDR", 56 | "ParameterValue": "10.0.216.0/21" 57 | }, 58 | { 59 | "ParameterKey": "PublicSubnet1CIDR", 60 | "ParameterValue": "10.0.128.0/20" 61 | }, 62 | { 63 | "ParameterKey": "PublicSubnet2CIDR", 64 | "ParameterValue": "10.0.144.0/20" 65 | }, 66 | { 67 | "ParameterKey": "PublicSubnet3CIDR", 68 | "ParameterValue": "10.0.160.0/20" 69 | }, 70 | { 71 | "ParameterKey": "PublicSubnet4CIDR", 72 | "ParameterValue": "10.0.176.0/20" 73 | }, 74 | { 75 | "ParameterKey": "VPCCIDR", 76 | "ParameterValue": "10.0.0.0/16" 77 | } 78 | ] 79 | -------------------------------------------------------------------------------- /demo_source/ci/aws-vpc-4az-complete.json: -------------------------------------------------------------------------------- 1 | [ 2 | { 3 | "ParameterKey": "AvailabilityZones", 4 | "ParameterValue": "$[alfred_genaz_4]" 5 | }, 6 | { 7 | "ParameterKey": "CreateAdditionalPrivateSubnets", 8 | "ParameterValue": "true" 9 | }, 10 | { 11 | "ParameterKey": "CreatePrivateSubnets", 12 | "ParameterValue": "true" 13 | }, 14 | { 15 | "ParameterKey": "KeyPairName", 16 | "ParameterValue": "$[alfred_getkeypair]" 17 | }, 18 | { 19 | "ParameterKey": "NATInstanceType", 20 | "ParameterValue": "t2.small" 21 | }, 22 | { 23 | "ParameterKey": "NumberOfAZs", 24 | "ParameterValue": "4" 25 | }, 26 | { 27 | "ParameterKey": "PrivateSubnet1ACIDR", 28 | "ParameterValue": "10.0.0.0/19" 29 | }, 30 | { 31 | "ParameterKey": "PrivateSubnet1BCIDR", 32 | "ParameterValue": "10.0.192.0/21" 33 | }, 34 | { 35 | "ParameterKey": "PrivateSubnet2ACIDR", 36 | "ParameterValue": "10.0.32.0/19" 37 | }, 38 | { 39 | "ParameterKey": "PrivateSubnet2BCIDR", 40 | "ParameterValue": "10.0.200.0/21" 41 | }, 42 | { 43 | "ParameterKey": "PrivateSubnet3ACIDR", 44 | "ParameterValue": "10.0.64.0/19" 45 | }, 46 | { 47 | "ParameterKey": "PrivateSubnet3BCIDR", 48 | "ParameterValue": "10.0.208.0/21" 49 | }, 50 | { 51 | "ParameterKey": "PrivateSubnet4ACIDR", 52 | "ParameterValue": "10.0.96.0/19" 53 | }, 54 | { 55 | "ParameterKey": "PrivateSubnet4BCIDR", 56 | "ParameterValue": "10.0.216.0/21" 57 | }, 58 | { 59 | "ParameterKey": "PublicSubnet1CIDR", 60 | "ParameterValue": "10.0.128.0/20" 61 | }, 62 | { 63 | "ParameterKey": "PublicSubnet2CIDR", 64 | "ParameterValue": "10.0.144.0/20" 65 | }, 66 | { 67 | "ParameterKey": "PublicSubnet3CIDR", 68 | "ParameterValue": "10.0.160.0/20" 69 | }, 70 | { 71 | "ParameterKey": "PublicSubnet4CIDR", 72 | "ParameterValue": "10.0.176.0/20" 73 | }, 74 | { 75 | "ParameterKey": "VPCCIDR", 76 | "ParameterValue": "10.0.0.0/16" 77 | } 78 | ] 79 | -------------------------------------------------------------------------------- /demo_source/ci/aws-vpc-4az-public.json: -------------------------------------------------------------------------------- 1 | [ 2 | { 3 | "ParameterKey": "AvailabilityZones", 4 | "ParameterValue": "$[alfred_genaz_4]" 5 | }, 6 | { 7 | "ParameterKey": "CreateAdditionalPrivateSubnets", 8 | "ParameterValue": "false" 9 | }, 10 | { 11 | "ParameterKey": "CreatePrivateSubnets", 12 | "ParameterValue": "false" 13 | }, 14 | { 15 | "ParameterKey": "KeyPairName", 16 | "ParameterValue": "$[alfred_getkeypair]" 17 | }, 18 | { 19 | "ParameterKey": "NATInstanceType", 20 | "ParameterValue": "t2.small" 21 | }, 22 | { 23 | "ParameterKey": "NumberOfAZs", 24 | "ParameterValue": "4" 25 | }, 26 | { 27 | "ParameterKey": "PrivateSubnet1ACIDR", 28 | "ParameterValue": "10.0.0.0/19" 29 | }, 30 | { 31 | "ParameterKey": "PrivateSubnet1BCIDR", 32 | "ParameterValue": "10.0.192.0/21" 33 | }, 34 | { 35 | "ParameterKey": "PrivateSubnet2ACIDR", 36 | "ParameterValue": "10.0.32.0/19" 37 | }, 38 | { 39 | "ParameterKey": "PrivateSubnet2BCIDR", 40 | "ParameterValue": "10.0.200.0/21" 41 | }, 42 | { 43 | "ParameterKey": "PrivateSubnet3ACIDR", 44 | "ParameterValue": "10.0.64.0/19" 45 | }, 46 | { 47 | "ParameterKey": "PrivateSubnet3BCIDR", 48 | "ParameterValue": "10.0.208.0/21" 49 | }, 50 | { 51 | "ParameterKey": "PrivateSubnet4ACIDR", 52 | "ParameterValue": "10.0.96.0/19" 53 | }, 54 | { 55 | "ParameterKey": "PrivateSubnet4BCIDR", 56 | "ParameterValue": "10.0.216.0/21" 57 | }, 58 | { 59 | "ParameterKey": "PublicSubnet1CIDR", 60 | "ParameterValue": "10.0.128.0/20" 61 | }, 62 | { 63 | "ParameterKey": "PublicSubnet2CIDR", 64 | "ParameterValue": "10.0.144.0/20" 65 | }, 66 | { 67 | "ParameterKey": "PublicSubnet3CIDR", 68 | "ParameterValue": "10.0.160.0/20" 69 | }, 70 | { 71 | "ParameterKey": "PublicSubnet4CIDR", 72 | "ParameterValue": "10.0.176.0/20" 73 | }, 74 | { 75 | "ParameterKey": "VPCCIDR", 76 | "ParameterValue": "10.0.0.0/16" 77 | } 78 | ] 79 | -------------------------------------------------------------------------------- /demo_source/ci/aws-vpc-4az.json: -------------------------------------------------------------------------------- 1 | [ 2 | { 3 | "ParameterKey": "AvailabilityZones", 4 | "ParameterValue": "$[alfred_genaz_4]" 5 | }, 6 | { 7 | "ParameterKey": "CreateAdditionalPrivateSubnets", 8 | "ParameterValue": "false" 9 | }, 10 | { 11 | "ParameterKey": "CreatePrivateSubnets", 12 | "ParameterValue": "true" 13 | }, 14 | { 15 | "ParameterKey": "KeyPairName", 16 | "ParameterValue": "$[alfred_getkeypair]" 17 | }, 18 | { 19 | "ParameterKey": "NATInstanceType", 20 | "ParameterValue": "t2.small" 21 | }, 22 | { 23 | "ParameterKey": "NumberOfAZs", 24 | "ParameterValue": "4" 25 | }, 26 | { 27 | "ParameterKey": "PrivateSubnet1ACIDR", 28 | "ParameterValue": "10.0.0.0/19" 29 | }, 30 | { 31 | "ParameterKey": "PrivateSubnet1BCIDR", 32 | "ParameterValue": "10.0.192.0/21" 33 | }, 34 | { 35 | "ParameterKey": "PrivateSubnet2ACIDR", 36 | "ParameterValue": "10.0.32.0/19" 37 | }, 38 | { 39 | "ParameterKey": "PrivateSubnet2BCIDR", 40 | "ParameterValue": "10.0.200.0/21" 41 | }, 42 | { 43 | "ParameterKey": "PrivateSubnet3ACIDR", 44 | "ParameterValue": "10.0.64.0/19" 45 | }, 46 | { 47 | "ParameterKey": "PrivateSubnet3BCIDR", 48 | "ParameterValue": "10.0.208.0/21" 49 | }, 50 | { 51 | "ParameterKey": "PrivateSubnet4ACIDR", 52 | "ParameterValue": "10.0.96.0/19" 53 | }, 54 | { 55 | "ParameterKey": "PrivateSubnet4BCIDR", 56 | "ParameterValue": "10.0.216.0/21" 57 | }, 58 | { 59 | "ParameterKey": "PublicSubnet1CIDR", 60 | "ParameterValue": "10.0.128.0/20" 61 | }, 62 | { 63 | "ParameterKey": "PublicSubnet2CIDR", 64 | "ParameterValue": "10.0.144.0/20" 65 | }, 66 | { 67 | "ParameterKey": "PublicSubnet3CIDR", 68 | "ParameterValue": "10.0.160.0/20" 69 | }, 70 | { 71 | "ParameterKey": "PublicSubnet4CIDR", 72 | "ParameterValue": "10.0.176.0/20" 73 | }, 74 | { 75 | "ParameterKey": "VPCCIDR", 76 | "ParameterValue": "10.0.0.0/16" 77 | } 78 | ] 79 | -------------------------------------------------------------------------------- /demo_source/ci/aws-vpc-complete.json: -------------------------------------------------------------------------------- 1 | [ 2 | { 3 | "ParameterKey": "AvailabilityZones", 4 | "ParameterValue": "$[alfred_genaz_2]" 5 | }, 6 | { 7 | "ParameterKey": "CreateAdditionalPrivateSubnets", 8 | "ParameterValue": "true" 9 | }, 10 | { 11 | "ParameterKey": "CreatePrivateSubnets", 12 | "ParameterValue": "true" 13 | }, 14 | { 15 | "ParameterKey": "KeyPairName", 16 | "ParameterValue": "$[alfred_getkeypair]" 17 | }, 18 | { 19 | "ParameterKey": "NATInstanceType", 20 | "ParameterValue": "t2.small" 21 | }, 22 | { 23 | "ParameterKey": "NumberOfAZs", 24 | "ParameterValue": "2" 25 | }, 26 | { 27 | "ParameterKey": "PrivateSubnet1ACIDR", 28 | "ParameterValue": "10.0.0.0/19" 29 | }, 30 | { 31 | "ParameterKey": "PrivateSubnet1BCIDR", 32 | "ParameterValue": "10.0.192.0/21" 33 | }, 34 | { 35 | "ParameterKey": "PrivateSubnet2ACIDR", 36 | "ParameterValue": "10.0.32.0/19" 37 | }, 38 | { 39 | "ParameterKey": "PrivateSubnet2BCIDR", 40 | "ParameterValue": "10.0.200.0/21" 41 | }, 42 | { 43 | "ParameterKey": "PrivateSubnet3ACIDR", 44 | "ParameterValue": "10.0.64.0/19" 45 | }, 46 | { 47 | "ParameterKey": "PrivateSubnet3BCIDR", 48 | "ParameterValue": "10.0.208.0/21" 49 | }, 50 | { 51 | "ParameterKey": "PrivateSubnet4ACIDR", 52 | "ParameterValue": "10.0.96.0/19" 53 | }, 54 | { 55 | "ParameterKey": "PrivateSubnet4BCIDR", 56 | "ParameterValue": "10.0.216.0/21" 57 | }, 58 | { 59 | "ParameterKey": "PublicSubnet1CIDR", 60 | "ParameterValue": "10.0.128.0/20" 61 | }, 62 | { 63 | "ParameterKey": "PublicSubnet2CIDR", 64 | "ParameterValue": "10.0.144.0/20" 65 | }, 66 | { 67 | "ParameterKey": "PublicSubnet3CIDR", 68 | "ParameterValue": "10.0.160.0/20" 69 | }, 70 | { 71 | "ParameterKey": "PublicSubnet4CIDR", 72 | "ParameterValue": "10.0.176.0/20" 73 | }, 74 | { 75 | "ParameterKey": "VPCCIDR", 76 | "ParameterValue": "10.0.0.0/16" 77 | } 78 | ] 79 | -------------------------------------------------------------------------------- /demo_source/ci/aws-vpc-defaults.json: -------------------------------------------------------------------------------- 1 | [ 2 | { 3 | "ParameterKey": "AvailabilityZones", 4 | "ParameterValue": "$[alfred_genaz_2]" 5 | }, 6 | { 7 | "ParameterKey": "CreateAdditionalPrivateSubnets", 8 | "ParameterValue": "false" 9 | }, 10 | { 11 | "ParameterKey": "CreatePrivateSubnets", 12 | "ParameterValue": "true" 13 | }, 14 | { 15 | "ParameterKey": "KeyPairName", 16 | "ParameterValue": "$[alfred_getkeypair]" 17 | }, 18 | { 19 | "ParameterKey": "NATInstanceType", 20 | "ParameterValue": "t2.small" 21 | }, 22 | { 23 | "ParameterKey": "NumberOfAZs", 24 | "ParameterValue": "2" 25 | }, 26 | { 27 | "ParameterKey": "PrivateSubnet1ACIDR", 28 | "ParameterValue": "10.0.0.0/19" 29 | }, 30 | { 31 | "ParameterKey": "PrivateSubnet1BCIDR", 32 | "ParameterValue": "10.0.192.0/21" 33 | }, 34 | { 35 | "ParameterKey": "PrivateSubnet2ACIDR", 36 | "ParameterValue": "10.0.32.0/19" 37 | }, 38 | { 39 | "ParameterKey": "PrivateSubnet2BCIDR", 40 | "ParameterValue": "10.0.200.0/21" 41 | }, 42 | { 43 | "ParameterKey": "PrivateSubnet3ACIDR", 44 | "ParameterValue": "10.0.64.0/19" 45 | }, 46 | { 47 | "ParameterKey": "PrivateSubnet3BCIDR", 48 | "ParameterValue": "10.0.208.0/21" 49 | }, 50 | { 51 | "ParameterKey": "PrivateSubnet4ACIDR", 52 | "ParameterValue": "10.0.96.0/19" 53 | }, 54 | { 55 | "ParameterKey": "PrivateSubnet4BCIDR", 56 | "ParameterValue": "10.0.216.0/21" 57 | }, 58 | { 59 | "ParameterKey": "PublicSubnet1CIDR", 60 | "ParameterValue": "10.0.128.0/20" 61 | }, 62 | { 63 | "ParameterKey": "PublicSubnet2CIDR", 64 | "ParameterValue": "10.0.144.0/20" 65 | }, 66 | { 67 | "ParameterKey": "PublicSubnet3CIDR", 68 | "ParameterValue": "10.0.160.0/20" 69 | }, 70 | { 71 | "ParameterKey": "PublicSubnet4CIDR", 72 | "ParameterValue": "10.0.176.0/20" 73 | }, 74 | { 75 | "ParameterKey": "VPCCIDR", 76 | "ParameterValue": "10.0.0.0/16" 77 | } 78 | ] 79 | -------------------------------------------------------------------------------- /demo_source/ci/aws-vpc-public.json: -------------------------------------------------------------------------------- 1 | [ 2 | { 3 | "ParameterKey": "AvailabilityZones", 4 | "ParameterValue": "$[alfred_genaz_2]" 5 | }, 6 | { 7 | "ParameterKey": "CreateAdditionalPrivateSubnets", 8 | "ParameterValue": "false" 9 | }, 10 | { 11 | "ParameterKey": "CreatePrivateSubnets", 12 | "ParameterValue": "false" 13 | }, 14 | { 15 | "ParameterKey": "KeyPairName", 16 | "ParameterValue": "$[alfred_getkeypair]" 17 | }, 18 | { 19 | "ParameterKey": "NATInstanceType", 20 | "ParameterValue": "t2.small" 21 | }, 22 | { 23 | "ParameterKey": "NumberOfAZs", 24 | "ParameterValue": "2" 25 | }, 26 | { 27 | "ParameterKey": "PrivateSubnet1ACIDR", 28 | "ParameterValue": "10.0.0.0/19" 29 | }, 30 | { 31 | "ParameterKey": "PrivateSubnet1BCIDR", 32 | "ParameterValue": "10.0.192.0/21" 33 | }, 34 | { 35 | "ParameterKey": "PrivateSubnet2ACIDR", 36 | "ParameterValue": "10.0.32.0/19" 37 | }, 38 | { 39 | "ParameterKey": "PrivateSubnet2BCIDR", 40 | "ParameterValue": "10.0.200.0/21" 41 | }, 42 | { 43 | "ParameterKey": "PrivateSubnet3ACIDR", 44 | "ParameterValue": "10.0.64.0/19" 45 | }, 46 | { 47 | "ParameterKey": "PrivateSubnet3BCIDR", 48 | "ParameterValue": "10.0.208.0/21" 49 | }, 50 | { 51 | "ParameterKey": "PrivateSubnet4ACIDR", 52 | "ParameterValue": "10.0.96.0/19" 53 | }, 54 | { 55 | "ParameterKey": "PrivateSubnet4BCIDR", 56 | "ParameterValue": "10.0.216.0/21" 57 | }, 58 | { 59 | "ParameterKey": "PublicSubnet1CIDR", 60 | "ParameterValue": "10.0.128.0/20" 61 | }, 62 | { 63 | "ParameterKey": "PublicSubnet2CIDR", 64 | "ParameterValue": "10.0.144.0/20" 65 | }, 66 | { 67 | "ParameterKey": "PublicSubnet3CIDR", 68 | "ParameterValue": "10.0.160.0/20" 69 | }, 70 | { 71 | "ParameterKey": "PublicSubnet4CIDR", 72 | "ParameterValue": "10.0.176.0/20" 73 | }, 74 | { 75 | "ParameterKey": "VPCCIDR", 76 | "ParameterValue": "10.0.0.0/16" 77 | } 78 | ] 79 | -------------------------------------------------------------------------------- /demo_source/ci/config.yml: -------------------------------------------------------------------------------- 1 | global: 2 | regions: 3 | - ap-northeast-1 4 | - eu-central-1 5 | - eu-west-1 6 | - us-east-1 7 | - us-east-2 8 | tests: 9 | defaults: 10 | regions: 11 | - eu-west-1 12 | - us-east-1 13 | parameter_input: aws-vpc-defaults.json 14 | template_file: aws-vpc.template -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | import os 2 | from setuptools import setup 3 | from setuptools.command.test import test as TestCommand 4 | from setuptools import Command 5 | import subprocess 6 | import sys 7 | 8 | 9 | def get_data_files(directories): 10 | build_docs() 11 | paths = {} 12 | for directory in directories: 13 | for (path, directories, filenames) in os.walk(directory): 14 | for filename in filenames: 15 | if not path.startswith('code/scripts'): 16 | if path not in paths.keys(): 17 | paths[path] = [] 18 | paths[path].append(os.path.join(path, filename)) 19 | data_files = [] 20 | for k in paths.keys(): 21 | dest_path = k 22 | if dest_path.startswith('code/'): 23 | dest_path = dest_path[len('code/'):] 24 | if not dest_path.startswith('scripts/'): 25 | data_files.append(('project-skeleton/validation_pipeline/' + dest_path, paths[k])) 26 | return data_files 27 | 28 | 29 | def build_docs(dest_path='code/docs'): 30 | orig_path = os.getcwd() 31 | for lib in ['awsclients', 'cfnpipeline', 'logger']: 32 | sys.path.append('%s/code/lambda_functions/lib/%s' % (orig_path, lib)) 33 | 34 | try: 35 | import pdoc 36 | except ImportError: 37 | import pip 38 | pip.main(['install', 'pdoc']) 39 | import pdoc 40 | 41 | try: 42 | import boto3 43 | except ImportError: 44 | import pip 45 | pip.main(['install', 'boto3']) 46 | import boto3 47 | 48 | try: 49 | os.mkdir(dest_path) 50 | except OSError as e: 51 | if e.errno == 17: 52 | pass 53 | else: 54 | raise 55 | 56 | for lib in ['awsclients', 'cfnpipeline', 'logger']: 57 | f = open('%s/%s/%s.html' % (orig_path, dest_path, lib), 'w') 58 | f.write(pdoc.html(lib)) 59 | f.close() 60 | 61 | 62 | class DocGen(Command): 63 | user_options = [] 64 | description = 'generate html docs with pdoc' 65 | 66 | def initialize_options(self): 67 | pass 68 | 69 | def finalize_options(self): 70 | pass 71 | 72 | @staticmethod 73 | def run(): 74 | build_docs('docs') 75 | 76 | 77 | class CustomTestCommand(TestCommand): 78 | description = 'run tests' 79 | user_options = [] 80 | 81 | def run_tests(self): 82 | self._run([sys.executable, '-m', 'unittest', 'discover', '-s', './code/tests']) 83 | 84 | @staticmethod 85 | def _run(command): 86 | try: 87 | subprocess.check_call(command) 88 | except subprocess.CalledProcessError as error: 89 | print('Command failed with exit code', error.returncode) 90 | sys.exit(error.returncode) 91 | 92 | 93 | setup( 94 | name="aws_cloudformation_validation_pipeline", 95 | version="0.0.2", 96 | author="AWS Solutions Builder", 97 | author_email="aws-solutions-builder@amazon.com", 98 | description="Authoring package for AWS CloudFormation Template Validation Pipeline", 99 | license="Amazon Software License 1.0", 100 | keywords="cloudformation cicd aws pipeline", 101 | url="https://github.com/awslabs/aws_cloudformation_validation_pipeline/", 102 | scripts=[ 103 | 'code/scripts/cfn-validation-pipeline-skeleton', 104 | 'code/scripts/cfn-validation-pipeline-deploy', 105 | 'code/scripts/cfn-validation-pipeline-cleanup', 106 | 'code/scripts/cfn-validation-pipeline-rollback' 107 | ], 108 | data_files=get_data_files( 109 | [ 110 | 'code', 111 | 'cloudformation', 112 | 'demo_source' 113 | ] 114 | ), 115 | classifiers=[ 116 | "Development Status :: 4 - Beta", 117 | "Programming Language :: Python :: 2.7", 118 | "Natural Language :: English", 119 | "Operating System :: POSIX :: Linux", 120 | "Topic :: Software Development :: Testing", 121 | "Environment :: Console", 122 | "Intended Audience :: Information Technology", 123 | "License :: Amazon Software License :: 1.0", 124 | ], 125 | zip_safe=False, 126 | setup_requires=[ 127 | 'pdoc' 128 | ], 129 | install_requires=[ 130 | 'boto3' 131 | ], 132 | tests_require=[ 133 | 'six', 134 | 'mock', 135 | 'boto3', 136 | 'pyyaml' 137 | ], 138 | cmdclass={ 139 | 'test': CustomTestCommand, 140 | 'build_docs': DocGen 141 | } 142 | ) --------------------------------------------------------------------------------