├── .gitignore ├── LICENSE.txt ├── NOTICE.txt ├── README.md ├── cr-backend-substack-template.template └── examples ├── ami-lookup ├── example.template └── impl │ ├── custom-resource-runner.template │ ├── example-manifest.json │ └── lookup-ami.py ├── dns-mapping ├── example.template └── impl │ ├── bin │ └── cr-dns-processor │ ├── custom-resource-runner.template │ ├── dns-processor-handler.py │ ├── dnsprocessor.py │ ├── init │ └── cr-dns-processor │ └── runner.py ├── eip-lookup ├── README.md ├── example.template └── impl │ ├── custom-resource-runner.template │ └── lookup-eip.py ├── mount ├── example.template └── impl │ ├── create.sh │ ├── delete.sh │ └── update.sh └── schema ├── example.template └── impl ├── custom_resource_runner.template └── liquify.py /.gitignore: -------------------------------------------------------------------------------- 1 | .project 2 | .idea/ -------------------------------------------------------------------------------- /LICENSE.txt: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | 4 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 5 | 6 | 1. Definitions. 7 | 8 | "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. 9 | 10 | "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. 11 | 12 | "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. 13 | 14 | "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. 15 | 16 | "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. 17 | 18 | "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. 19 | 20 | "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). 21 | 22 | "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. 23 | 24 | "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." 25 | 26 | "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. 27 | 28 | 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. 29 | 30 | 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. 31 | 32 | 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: 33 | 34 | You must give any other recipients of the Work or Derivative Works a copy of this License; and 35 | 36 | You must cause any modified files to carry prominent notices stating that You changed the files; and 37 | 38 | You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and 39 | 40 | If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. 41 | 42 | You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. 43 | 44 | 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. 45 | 46 | 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. 47 | 48 | 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. 49 | 50 | 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. 51 | 52 | 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. 53 | 54 | END OF TERMS AND CONDITIONS 55 | -------------------------------------------------------------------------------- /NOTICE.txt: -------------------------------------------------------------------------------- 1 | aws-cfn-resource-bridge 2 | Copyright 2013 Amazon.com, Inc. or its affiliates. All Rights Reserved. -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | Custom resource examples for CloudFormation -------------------------------------------------------------------------------- /cr-backend-substack-template.template: -------------------------------------------------------------------------------- 1 | { 2 | "AWSTemplateFormatVersion": "2010-09-09", 3 | "Description": "Builds out a backend infrastructure for handling the messaging for CloudFormation Custom Resources.", 4 | "Resources" : { 5 | "CustomResourceQueue" : { 6 | "Type": "AWS::SQS::Queue", 7 | "Properties": { 8 | "ReceiveMessageWaitTimeSeconds": "20", 9 | "VisibilityTimeout": "30" 10 | } 11 | }, 12 | "CustomResourceTopic" : { 13 | "Type" : "AWS::SNS::Topic", 14 | "Properties" : { 15 | "Subscription" : [ 16 | { "Endpoint" : { "Fn::GetAtt" : [ "CustomResourceQueue", "Arn" ] }, "Protocol" : "sqs" } 17 | ] 18 | } 19 | }, 20 | "CustomResourceQueuePolicy" : { 21 | "Type" : "AWS::SQS::QueuePolicy", 22 | "Properties" : { 23 | "PolicyDocument" : { 24 | "Version": "2008-10-17", 25 | "Id": { "Fn::Join" : [ "/", [ { "Fn::GetAtt" : [ "CustomResourceQueue", "Arn" ] }, "CustomResourceQueuePolicy" ] ] }, 26 | "Statement": [ 27 | { 28 | "Sid": "AllowTopicToPublishMessages", 29 | "Effect": "Allow", 30 | "Principal": { 31 | "AWS": "*" 32 | }, 33 | "Action": "SQS:SendMessage", 34 | "Resource": { "Fn::GetAtt" : [ "CustomResourceQueue", "Arn" ] }, 35 | "Condition": { 36 | "ArnEquals": { 37 | "aws:SourceArn": { "Ref" : "CustomResourceTopic" } 38 | } 39 | } 40 | } 41 | ] 42 | }, 43 | "Queues" : [ 44 | { "Ref" : "CustomResourceQueue" } 45 | ] 46 | } 47 | } 48 | }, 49 | "Outputs" : { 50 | "CustomResourceTopicARN" : { 51 | "Value" : { "Ref" : "CustomResourceTopic" } 52 | }, 53 | "CustomResourceQueueURL" : { 54 | "Description" : "URL of newly created SQS Queue", 55 | "Value" : { "Ref" : "CustomResourceQueue" } 56 | }, 57 | "CustomResourceQueueARN" : { 58 | "Description" : "ARN of newly created SQS Queue", 59 | "Value" : { "Fn::GetAtt" : ["CustomResourceQueue", "Arn"]} 60 | } 61 | } 62 | } -------------------------------------------------------------------------------- /examples/ami-lookup/example.template: -------------------------------------------------------------------------------- 1 | { 2 | "AWSTemplateFormatVersion" : "2010-09-09", 3 | 4 | "Description" : "An example of the AMI Lookup custom resource", 5 | 6 | "Parameters" : { 7 | "InstanceType" : { 8 | "Description" : "Example instance types", 9 | "Type" : "String", 10 | "Default" : "t1.micro", 11 | "AllowedValues" : [ "t1.micro","m1.small","m1.medium","m1.large","m1.xlarge","m2.xlarge","m2.2xlarge","m2.4xlarge","m3.xlarge","m3.2xlarge","c1.medium","c1.xlarge","cc1.4xlarge","cc2.8xlarge","cg1.4xlarge"], 12 | "ConstraintDescription" : "must be a valid EC2 instance type." 13 | }, 14 | "AmiLookupServiceToken" : { 15 | "Description" : "ServiceToken of AMI Lookup Custom Resource", 16 | "Type" : "String", 17 | "AllowedPattern" : "arn:aws:sns:.*", 18 | "ConstraintDescription" : "must be an SNS topic ARN" 19 | } 20 | }, 21 | 22 | "Resources" : { 23 | "UbuntuInstance" : { 24 | "Type" : "AWS::EC2::Instance", 25 | "Properties" : { 26 | "ImageId" : { "Ref" : "UbuntuAmi"}, 27 | "InstanceType" : { "Ref" : "InstanceType" } 28 | } 29 | }, 30 | "WindowsInstance" : { 31 | "Type" : "AWS::EC2::Instance", 32 | "Properties" : { 33 | "ImageId" : { "Ref" : "WindowsAmi"}, 34 | "InstanceType" : { "Ref" : "InstanceType" } 35 | } 36 | }, 37 | "UbuntuAmi" : { 38 | "Type" : "Custom::AmiLookup", 39 | "Version" : "1.0", 40 | "Properties" : { 41 | "ServiceToken" : { "Ref" : "AmiLookupServiceToken" }, 42 | "os": "ubuntu", 43 | "arch": "64" 44 | } 45 | }, 46 | "WindowsAmi" : { 47 | "Type" : "Custom::AmiLookup", 48 | "Version" : "1.0", 49 | "Properties" : { 50 | "ServiceToken" : { "Ref" : "AmiLookupServiceToken" }, 51 | "os": "windows", 52 | "arch": "32", 53 | "version": "Server 2008" 54 | } 55 | } 56 | } 57 | } 58 | -------------------------------------------------------------------------------- /examples/ami-lookup/impl/custom-resource-runner.template: -------------------------------------------------------------------------------- 1 | { 2 | "AWSTemplateFormatVersion": "2010-09-09", 3 | "Description" : "Example stack for AMI Lookup Custom Resource Backend.", 4 | 5 | "Parameters" : { 6 | "KeyName" : { 7 | "Description" : "Name of an existing EC2 KeyPair to enable SSH access to the instances", 8 | "Type" : "String" 9 | }, 10 | "InstanceType" : { 11 | "Description" : "Custom resource runner instance type", 12 | "Type" : "String", 13 | "Default" : "t1.micro", 14 | "AllowedValues" : [ "t1.micro","m1.small","m1.medium","m1.large","m1.xlarge","m2.xlarge","m2.2xlarge","m2.4xlarge","m3.xlarge","m3.2xlarge","c1.medium","c1.xlarge","cc1.4xlarge","cc2.8xlarge","cg1.4xlarge"], 15 | "ConstraintDescription" : "must be a valid EC2 instance type." 16 | }, 17 | "MinSize" : { 18 | "Description" : "Minimum number of custom resource runners", 19 | "Type" : "Number", 20 | "MinValue" : "1", 21 | "Default" : "1", 22 | "ConstraintDescription" : "Must have at least one runner" 23 | }, 24 | "MaxSize" : { 25 | "Description" : "Maximum number of custom resource runners", 26 | "Type" : "Number", 27 | "MinValue" : "1", 28 | "Default" : "1", 29 | "ConstraintDescription" : "Must have at least one runner" 30 | }, 31 | "SSHLocation" : { 32 | "Description" : "The IP address range that can be used to SSH to the custom resource runners", 33 | "Type" : "String", 34 | "MinLength" : "9", 35 | "MaxLength" : "18", 36 | "Default" : "0.0.0.0/0", 37 | "AllowedPattern" : "(\\d{1,3})\\.(\\d{1,3})\\.(\\d{1,3})\\.(\\d{1,3})/(\\d{1,2})", 38 | "ConstraintDescription" : "must be a valid IP CIDR range of the form x.x.x.x/x." 39 | }, 40 | "AmiManifestUrl": { 41 | "Description" : "URL of the ami manifest file to use for lookups", 42 | "Type" : "String", 43 | "Default" : "https://raw.github.com/awslabs/aws-cfn-custom-resource-examples/master/examples/ami-lookup/impl/example-manifest.json" 44 | } 45 | }, 46 | 47 | "Mappings" : { 48 | "AwsRegionToAMI" : { 49 | "us-east-1" : { "id" : "ami-35792c5c" }, 50 | "us-west-2" : { "id" : "ami-d03ea1e0" }, 51 | "us-west-1" : { "id" : "ami-687b4f2d" }, 52 | "eu-west-1" : { "id" : "ami-149f7863" }, 53 | "ap-southeast-1" : { "id" : "ami-14f2b946" }, 54 | "ap-northeast-1" : { "id" : "ami-3561fe34" }, 55 | "ap-southeast-2" : { "id" : "ami-a148d59b" }, 56 | "sa-east-1" : { "id" : "ami-9f6ec982" } 57 | } 58 | }, 59 | 60 | "Resources" : { 61 | "CustomResourcePipeline" : { 62 | "Type" : "AWS::CloudFormation::Stack", 63 | "Properties" : { 64 | "TemplateURL" : "https://s3.amazonaws.com/cloudformation-examples/cr-backend-substack-template.template" 65 | } 66 | }, 67 | 68 | "AuditLogTable" : { 69 | "Type" : "AWS::DynamoDB::Table", 70 | "Properties" : { 71 | "KeySchema" : { 72 | "HashKeyElement": { 73 | "AttributeName" : "Ami", 74 | "AttributeType" : "S" 75 | }, 76 | "RangeKeyElement" : { 77 | "AttributeName" : "RequestId", 78 | "AttributeType" : "S" 79 | } 80 | }, 81 | "ProvisionedThroughput" : { 82 | "ReadCapacityUnits" : "1", 83 | "WriteCapacityUnits" : "3" 84 | } 85 | } 86 | }, 87 | 88 | "RunnerRole" : { 89 | "Type" : "AWS::IAM::Role", 90 | "Properties" : { 91 | "AssumeRolePolicyDocument" : { 92 | "Version": "2008-10-17", 93 | "Statement": [{ 94 | "Effect": "Allow", 95 | "Principal": { 96 | "Service": [ "ec2.amazonaws.com" ] 97 | }, 98 | "Action": [ "sts:AssumeRole" ] 99 | }] 100 | }, 101 | "Path" : "/", 102 | "Policies" : [ 103 | { 104 | "PolicyName" : "CustomResourceRunner", 105 | "PolicyDocument" : { 106 | "Statement" : [ 107 | { 108 | "Effect" : "Allow", 109 | "Action" : ["sqs:ChangeMessageVisibility", "sqs:DeleteMessage", "sqs:ReceiveMessage"], 110 | "Resource" : { "Fn::GetAtt" : ["CustomResourcePipeline", "Outputs.CustomResourceQueueARN"] } 111 | }, 112 | { 113 | "Effect" : "Allow", 114 | "Action" : ["dynamodb:PutItem"], 115 | "Resource" : { "Fn::Join" : ["", ["arn:aws:dynamodb:", {"Ref" : "AWS::Region"}, ":*:table/", { "Ref" : "AuditLogTable" }]] } 116 | } 117 | ] 118 | } 119 | } 120 | ] 121 | } 122 | }, 123 | 124 | "RunnerInstanceProfile" : { 125 | "Type" : "AWS::IAM::InstanceProfile", 126 | "Properties" : { 127 | "Path" : "/", 128 | "Roles" : [ { "Ref" : "RunnerRole" } ] 129 | } 130 | }, 131 | 132 | "RunnerLaunchConfig" : { 133 | "Type" : "AWS::AutoScaling::LaunchConfiguration", 134 | "Properties" : { 135 | "IamInstanceProfile" : { "Ref" : "RunnerInstanceProfile" }, 136 | "ImageId" : { "Fn::FindInMap" : ["AwsRegionToAMI", { "Ref" : "AWS::Region" }, "id"] }, 137 | "InstanceType" : { "Ref" : "InstanceType" }, 138 | "KeyName" : { "Ref" : "KeyName" }, 139 | "SecurityGroups" : [ { "Ref" : "RunnerSecurityGroup" } ], 140 | "UserData" : { "Fn::Base64" : { "Fn::Join" : ["", [ 141 | "#!/bin/bash -x\n", 142 | "exec &> /home/ec2-user/userdata.log\n", 143 | "/opt/aws/bin/cfn-init --region ", { "Ref" : "AWS::Region" }, " -s ", { "Ref" : "AWS::StackId" }, " -r RunnerLaunchConfig -v\n", 144 | "/opt/aws/bin/cfn-signal -e $? ", { "Fn::Base64" : { "Ref" : "RunnerWaitConditionHandle" }}, "\n" 145 | ]] } } 146 | }, 147 | "Metadata" : { 148 | "AWS::CloudFormation::Init" : { 149 | "config" : { 150 | "packages" : { 151 | "rpm" : { 152 | "aws-cfn-resource-bridge" : "https://s3.amazonaws.com/cloudformation-examples/aws-cfn-resource-bridge-0.1-4.noarch.rpm" 153 | } 154 | }, 155 | "files" : { 156 | "/etc/cfn/bridge.d/ami-lookup.conf" : { 157 | "content" : { "Fn::Join" : ["", [ 158 | "[ami-lookup]\n", 159 | "resource_type=Custom::AmiLookup\n", 160 | "queue_url=", { "Fn::GetAtt" : ["CustomResourcePipeline", "Outputs.CustomResourceQueueURL"] }, "\n", 161 | "timeout=60\n", 162 | "default_action=/home/ec2-user/lookup-ami.py -s ", { "Ref" : "AmiManifestUrl"}, " -r ", { "Ref" : "AWS::Region" }, " -t ", { "Ref" : "AuditLogTable" } 163 | ]]} 164 | }, 165 | "/home/ec2-user/lookup-ami.py" : { 166 | "source" : "https://raw.github.com/awslabs/aws-cfn-custom-resource-examples/master/examples/ami-lookup/impl/lookup-ami.py", 167 | "mode" : "000755", 168 | "owner" : "ec2-user" 169 | } 170 | }, 171 | "services" : { 172 | "sysvinit" : { 173 | "cfn-resource-bridge" : { 174 | "enabled" : "true", 175 | "ensureRunning" : "true", 176 | "files" : ["/etc/cfn/bridge.d/ami-lookup.conf", 177 | "/home/ec2-user/lookup-ami.py"] 178 | } 179 | } 180 | } 181 | } 182 | } 183 | } 184 | }, 185 | 186 | "RunnerAutoScalingGroup" : { 187 | "Type" : "AWS::AutoScaling::AutoScalingGroup", 188 | "Properties" : { 189 | "AvailabilityZones" : { "Fn::GetAZs" : ""}, 190 | "LaunchConfigurationName" : { "Ref" : "RunnerLaunchConfig" }, 191 | "MinSize" : { "Ref" : "MinSize" }, 192 | "MaxSize" : { "Ref" : "MaxSize" } 193 | } 194 | }, 195 | 196 | "RunnerSecurityGroup" : { 197 | "Type" : "AWS::EC2::SecurityGroup", 198 | "Properties" : { 199 | "GroupDescription" : "SSH to the runner instances", 200 | "SecurityGroupIngress" : [ 201 | { 202 | "CidrIp" : { "Ref" : "SSHLocation" }, 203 | "FromPort" : "22", 204 | "ToPort" : "22", 205 | "IpProtocol" : "tcp" 206 | } 207 | ] 208 | } 209 | }, 210 | 211 | "RunnerWaitConditionHandle" : { 212 | "Type" : "AWS::CloudFormation::WaitConditionHandle" 213 | }, 214 | 215 | "RunnerWaitCondition" : { 216 | "Type" : "AWS::CloudFormation::WaitCondition", 217 | "DependsOn" : "RunnerAutoScalingGroup", 218 | "Properties" : { 219 | "Count" : "1", 220 | "Handle" : { "Ref" : "RunnerWaitConditionHandle" }, 221 | "Timeout" : "600" 222 | } 223 | } 224 | }, 225 | "Outputs" : { 226 | "ServiceToken" : { 227 | "Value" : { "Fn::GetAtt" : ["CustomResourcePipeline", "Outputs.CustomResourceTopicARN"] }, 228 | "Description" : "Service token to use in CustomResource definitions" 229 | } 230 | } 231 | } -------------------------------------------------------------------------------- /examples/ami-lookup/impl/example-manifest.json: -------------------------------------------------------------------------------- 1 | { 2 | "ubuntu": { 3 | "64": { 4 | "latest": "13.04", 5 | "13.04": { 6 | "ap-northeast-1": "ami-6dea746c", 7 | "ap-southeast-1": "ami-141d5746", 8 | "ap-southeast-2": "ami-f377eac9", 9 | "eu-west-1": "ami-5e03e429", 10 | "sa-east-1": "ami-ed47e0f0", 11 | "us-east-1": "ami-ad83d7c4", 12 | "us-west-1": "ami-7e37033b", 13 | "us-west-2": "ami-12fe6022" 14 | }, 15 | "12.04": { 16 | "ap-northeast-1": "ami-d9118cd8", 17 | "ap-southeast-1": "ami-e07239b2", 18 | "ap-southeast-2": "ami-6be67b51", 19 | "eu-west-1": "ami-78cf2a0f", 20 | "sa-east-1": "ami-bdcd6aa0", 21 | "us-east-1": "ami-53b1ff3a", 22 | "us-west-1": "ami-40350005", 23 | "us-west-2": "ami-8635a9b6" 24 | } 25 | }, 26 | "32": { 27 | "latest": "12.04", 28 | "12.04": { 29 | "ap-northeast-1": "ami-d7118cd6", 30 | "ap-southeast-1": "ami-e27239b0", 31 | "ap-southeast-2": "ami-75e67b4f", 32 | "eu-west-1": "ami-7ccf2a0b", 33 | "sa-east-1": "ami-83cd6a9e", 34 | "us-east-1": "ami-51b1ff38", 35 | "us-west-1": "ami-44350001", 36 | "us-west-2": "ami-8435a9b4" 37 | } 38 | } 39 | }, 40 | "windows": { 41 | "64": { 42 | "latest": "server 2012", 43 | "server 2012": { 44 | "ap-northeast-1": "ami-cddf43cc", 45 | "ap-southeast-1": "ami-268cc774", 46 | "ap-southeast-2": "ami-c99904f3", 47 | "eu-west-1": "ami-a63edbd1", 48 | "sa-east-1": "ami-dfd374c2", 49 | "us-east-1": "ami-173d747e", 50 | "us-west-1": "ami-a02015e5", 51 | "us-west-2": "ami-60dc4350" 52 | }, 53 | "server 2008": { 54 | "ap-northeast-1": "ami-0fdf430e", 55 | "ap-southeast-1": "ami-628cc730", 56 | "ap-southeast-2": "ami-9d9904a7", 57 | "eu-west-1": "ami-463edb31", 58 | "sa-east-1": "ami-97d3748a", 59 | "us-east-1": "ami-7f236a16", 60 | "us-west-1": "ami-1a20155f", 61 | "us-west-2": "ami-1b3d7472" 62 | } 63 | }, 64 | "32": { 65 | "latest": "server 2008", 66 | "server 2008": { 67 | "ap-northeast-1": "ami-3dd8443c", 68 | "ap-southeast-1": "ami-ca8cc798", 69 | "ap-southeast-2": "ami-2b980511", 70 | "eu-west-1": "ami-803edbf7", 71 | "sa-east-1": "ami-cbd374d6", 72 | "us-east-1": "ami-1b3d7472", 73 | "us-west-1": "ami-ba2015ff", 74 | "us-west-2": "ami-b0dc4380" 75 | } 76 | } 77 | } 78 | } -------------------------------------------------------------------------------- /examples/ami-lookup/impl/lookup-ami.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | #============================================================================== 3 | # Copyright 2013 Amazon.com, Inc. or its affiliates. All Rights Reserved. 4 | # 5 | # Licensed under the Apache License, Version 2.0 (the "License"); 6 | # you may not use this file except in compliance with the License. 7 | # You may obtain a copy of the License at 8 | # 9 | # http://www.apache.org/licenses/LICENSE-2.0 10 | # 11 | # Unless required by applicable law or agreed to in writing, software 12 | # distributed under the License is distributed on an "AS IS" BASIS, 13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | # See the License for the specific language governing permissions and 15 | # limitations under the License. 16 | #============================================================================== 17 | import os 18 | import sys 19 | import re 20 | import time 21 | import logging 22 | from argparse import ArgumentParser 23 | 24 | import requests 25 | import botocore.session 26 | 27 | 28 | handler = logging.StreamHandler(sys.stderr) 29 | handler.setFormatter(logging.Formatter("%(asctime)s [%(levelname)s] %(message)s")) 30 | logging.getLogger().addHandler(handler) 31 | 32 | log = logging.getLogger('lookup-ami') 33 | log.setLevel(logging.INFO) 34 | 35 | parser = ArgumentParser(prog='lookup-ami') 36 | parser.add_argument("-s", "--source", help="The source url for the ami information", dest="source") 37 | parser.add_argument("-r", "--region", help="The region the audit trail will be written to in DynamoDB", dest="region") 38 | parser.add_argument("-t", "--audit-table", help="The DynamoDB table name to write the audit trail", dest="table_name") 39 | 40 | options = parser.parse_args() 41 | 42 | 43 | class FatalError(SystemExit): 44 | def __init__(self, reason): 45 | super(FatalError, self).__init__(-1) 46 | log.error('Failing resource: %s', reason) 47 | print u'{ "Reason": "%s" }' % reason 48 | 49 | 50 | def required(dict_in, key, field): 51 | """Ensures the key is in the dictionary, otherwise fails the resource.""" 52 | if key.lower() not in dict_in: 53 | raise FatalError(u"%s is not a recognized value for the '%s' property." % (key, field)) 54 | return dict_in[key.lower()] 55 | 56 | # Make sure we have a source to download from. 57 | if not options.source: 58 | raise FatalError(u"Service not configured to handle requests.") 59 | 60 | # Determine whether auditing should be enabled. 61 | audit = True if options.region and options.table_name else False 62 | if options.table_name and not options.region: 63 | raise FatalError(u"Region is a required parameter for auditing. Use -r/--region to specify.") 64 | 65 | # Get the request type for this 66 | request_type = os.getenv('Event_RequestType') 67 | if not request_type: 68 | raise FatalError(u"Event_RequestType was not valid.") 69 | 70 | stack_id = os.getenv('Event_StackId') 71 | if not stack_id: 72 | raise FatalError(u"Event_StackId is a required attribute.") 73 | 74 | if request_type != 'Delete': 75 | # Download the AMI manifest 76 | try: 77 | r = requests.get(options.source, verify=True) 78 | r.raise_for_status() 79 | os_bundle = r.json() 80 | except Exception, e: 81 | raise FatalError(u"Service not configured: %s" % (str(e))) 82 | 83 | # Pull in our options 84 | operating_system = os.getenv('Event_ResourceProperties_os') 85 | architecture = os.getenv('Event_ResourceProperties_arch', '64') 86 | version = os.getenv('Event_ResourceProperties_version') 87 | region = os.getenv('Event_ResourceProperties_region') 88 | 89 | # Validate we have the required fields. 90 | if not operating_system: 91 | raise FatalError(u"'os' is a required property") 92 | 93 | # Try to parse region from stackId if not provided by stack 94 | if not region: 95 | match = re.match(r"arn:aws:cloudformation:([^:]+):.*", stack_id, re.I) 96 | if not match: 97 | raise FatalError(u"Unable to determine region. Provide 'region' property for this resource.") 98 | region = match.group(1) 99 | 100 | # Locate our operating system/architecture. 101 | arch_map = required(required(os_bundle, operating_system, 'os'), architecture, 'arch') 102 | 103 | # Handle our version in a special manner, since they can specify 'latest'. 104 | if not version or version.lower() == 'latest': 105 | if 'latest' not in arch_map: 106 | raise FatalError(u"Latest version not defined for %s/%s. 'version' property is required.") 107 | version = arch_map['latest'] 108 | 109 | # Locate our ami based on version/region. 110 | ami = required(required(arch_map, version, 'version'), region, 'region') 111 | else: 112 | # When auditing for delete, we want the existing AMI, not a new one (if manifest has changed) 113 | ami = os.getenv('Event_PhysicalResourceId') 114 | 115 | # Write our audit trail to DynamoDB to track ami usage 116 | if audit: 117 | try: 118 | request_id = os.getenv('Event_RequestId') 119 | logical_id = os.getenv('Event_LogicalResourceId') 120 | if not (request_id and logical_id): 121 | raise FatalError(u"Event_RequestId and Event_LogicalResourceId are required for auditing.") 122 | 123 | ddb = botocore.session.get_session().get_service("dynamodb") 124 | put = ddb.get_operation("PutItem") 125 | http_response, response_data = put.call(ddb.get_endpoint(options.region), 126 | table_name=options.table_name, 127 | item={ 128 | "Ami": {"S": ami}, 129 | "RequestId": {"S": request_id}, 130 | "StackId": {"S": stack_id}, 131 | "LogicalResourceId": {"S": logical_id}, 132 | "Time": {"N": str(time.time())}, 133 | "RequestType": {"S": request_type} 134 | }) 135 | 136 | # If we are auditing and can't log, fail the request. 137 | if http_response.status_code != 200: 138 | raise FatalError(u"Failed putting audit log (%s): %s: " % (http_response.status_code, response_data)) 139 | except Exception, e: 140 | raise FatalError(u"Unhandled exception creating audit log: %s" % e) 141 | 142 | # Write out our successful response! 143 | if request_type != 'Delete': 144 | print u'{ "PhysicalResourceId" : "%s", "Data": { "os": "%s", "arch": "%s", "version": "%s", "region": "%s", ' \ 145 | u'"ami": "%s" } }' % (ami, operating_system.lower(), architecture.lower(), version.lower(), region.lower(), 146 | ami) 147 | else: 148 | print u"{}" 149 | -------------------------------------------------------------------------------- /examples/dns-mapping/example.template: -------------------------------------------------------------------------------- 1 | { 2 | "AWSTemplateFormatVersion" : "2010-09-09", 3 | 4 | "Description" : "Runs a VNC server fleet that binds instances to domain names.", 5 | 6 | "Parameters" : { 7 | "KeyName" : { 8 | "Description" : "Name of an existing EC2 KeyPair to enable SSH access to the instances", 9 | "Type" : "String" 10 | }, 11 | "InstanceType" : { 12 | "Description" : "VNC host instance type", 13 | "Type" : "String", 14 | "Default" : "t1.micro", 15 | "AllowedValues" : [ "t1.micro","m1.small","m1.medium","m1.large","m1.xlarge","m2.xlarge","m2.2xlarge","m2.4xlarge","m3.xlarge","m3.2xlarge","c1.medium","c1.xlarge","cc1.4xlarge","cc2.8xlarge","cg1.4xlarge"], 16 | "ConstraintDescription" : "must be a valid EC2 instance type." 17 | }, 18 | "MinSize" : { 19 | "Description" : "Minimum number of VNC hosts", 20 | "Type" : "Number", 21 | "MinValue" : "1", 22 | "Default" : "1", 23 | "ConstraintDescription" : "Must have at least one host" 24 | }, 25 | "MaxSize" : { 26 | "Description" : "Maximum number of VNC hosts", 27 | "Type" : "Number", 28 | "MinValue" : "1", 29 | "Default" : "1", 30 | "ConstraintDescription" : "Must have at least one host" 31 | }, 32 | "SSHLocation" : { 33 | "Description" : "The IP address range that can be used to SSH into hosts", 34 | "Type": "String", 35 | "MinLength": "9", 36 | "MaxLength": "18", 37 | "Default": "0.0.0.0/0", 38 | "AllowedPattern": "(\\d{1,3})\\.(\\d{1,3})\\.(\\d{1,3})\\.(\\d{1,3})/(\\d{1,2})", 39 | "ConstraintDescription": "must be a valid IP CIDR range of the form x.x.x.x/x." 40 | }, 41 | "HostedZoneId" : { 42 | "Description" : "The Hosted Zone Id to use when assigning DNS mappings", 43 | "Type" : "String" 44 | }, 45 | "DNSProcessorServiceToken" : { 46 | "Description" : "ServiceToken of DNS Processor Custom Resource", 47 | "Type" : "String", 48 | "AllowedPattern" : "arn:aws:sns:.*", 49 | "ConstraintDescription" : "must be an SNS topic ARN" 50 | } 51 | }, 52 | 53 | "Mappings" : { 54 | "AwsRegionToAMI" : { 55 | "ap-northeast-1" : { "id" : "ami-6dea746c" }, 56 | "ap-southeast-1" : { "id" : "ami-141d5746" }, 57 | "ap-southeast-2" : { "id" : "ami-f377eac9" }, 58 | "eu-west-1" : { "id" : "ami-5e03e429" }, 59 | "us-east-1" : { "id" : "ami-ad83d7c4" }, 60 | "us-west-1" : { "id" : "ami-7e37033b" }, 61 | "us-west-2" : { "id" : "ami-12fe6022" }, 62 | "sa-east-1" : { "id" : "ami-ed47e0f0" } 63 | } 64 | }, 65 | 66 | "Resources" : { 67 | "DNSProcessor" : { 68 | "Type" : "Custom::DNSProcessor", 69 | "Version" : "1.0", 70 | "Properties" : { 71 | "ServiceToken" : { "Ref" : "DNSProcessorServiceToken" }, 72 | "DNSPattern" : { "Fn::Join" : [".", ["{{simpsons_name}}", { "Ref" : "AWS::Region" }, "{{hosted_zone_name}}"]] }, 73 | "HostedZoneId" : { "Ref" : "HostedZoneId" } 74 | } 75 | }, 76 | 77 | "VncLaunchConfig" : { 78 | "Type" : "AWS::AutoScaling::LaunchConfiguration", 79 | "Properties" : { 80 | "ImageId" : { "Fn::FindInMap" : ["AwsRegionToAMI", { "Ref" : "AWS::Region" }, "id"] }, 81 | "InstanceType" : { "Ref" : "InstanceType" }, 82 | "KeyName" : { "Ref" : "KeyName" }, 83 | "SecurityGroups" : [ { "Ref" : "VncSecurityGroup" } ], 84 | "UserData" : { "Fn::Base64" : { "Fn::Join" : ["", [ 85 | "#!/bin/bash -x\n", 86 | "exec &> /home/ubuntu/userdata.log\n", 87 | "function error_exit\n", 88 | "{\n", 89 | " cfn-signal -e 1 -r \"$1\" '", { "Ref" : "VncWaitConditionHandle" }, "'\n", 90 | " exit 1\n", 91 | "}\n", 92 | "apt-get update || error_exit 'Failed to update package repository'\n", 93 | "apt-get -y install python-pip || error_exit 'Failed to install python-pip'\n", 94 | "pip install https://s3.amazonaws.com/cloudformation-examples/aws-cfn-bootstrap-latest.tar.gz\n", 95 | "cfn-init --region ", { "Ref" : "AWS::Region" }, " -s ", { "Ref" : "AWS::StackName" }, " -r VncLaunchConfig || error_exit 'Failed to run cfn-init'\n", 96 | "/etc/init.d/vncserver start || error_exit 'Failed to start vnc server'\n", 97 | "cfn-signal -e $? ", { "Fn::Base64" : { "Ref" : "VncWaitConditionHandle" }}, "\n" 98 | ]] } } 99 | }, 100 | "Metadata" : { 101 | "AWS::CloudFormation::Init" : { 102 | "config" : { 103 | "packages" : { 104 | "apt" : { 105 | "tightvncserver" : [], 106 | "fluxbox" : [] 107 | } 108 | }, 109 | "files" : { 110 | "/etc/init.d/vncserver" : { 111 | "content" : { "Fn::Join" : ["\n", [ 112 | "#!/bin/sh -e", 113 | "### BEGIN INIT INFO", 114 | "# Provides: vncserver", 115 | "# Required-Start: networking", 116 | "# Default-Start: S", 117 | "# Default-Stop: 0 6", 118 | "### END INIT INFO", 119 | "export USER=\"ubuntu\"", 120 | "OPTIONS=\"-depth 16 -geometry 1024x768 :1\"", 121 | 122 | ". /lib/lsb/init-functions", 123 | 124 | "case \"$1\" in", 125 | "start)", 126 | "su ${USER} -c \"/usr/bin/vncserver ${OPTIONS}\"", 127 | ";;", 128 | 129 | "stop)", 130 | "su ${USER} -c \"/usr/bin/vncserver -kill :1\"", 131 | ";;", 132 | "restart)", 133 | "su ${USER} -c \"/usr/bin/vncserver -kill :1\"", 134 | "su ${USER} -c \"/usr/bin/vncserver ${OPTIONS}\"", 135 | ";;", 136 | "esac" 137 | ]]}, 138 | "owner" : "root", 139 | "group" : "root", 140 | "mode" : "000755" 141 | 142 | }, 143 | "/home/ubuntu/.vnc/passwd" : { 144 | "content" : "X6670O8KJBM=", 145 | "encoding" : "base64", 146 | "mode" : "000600", 147 | "owner" : "ubuntu", 148 | "group" : "ubuntu" 149 | }, 150 | "/home/ubuntu/.vnc/xstartup" : { 151 | "content" : { "Fn::Join" : ["\n", [ 152 | "#!/bin/sh", 153 | "xrdb $HOME/.Xresources", 154 | "xsetroot -solid grey", 155 | "/usr/bin/startfluxbox &", 156 | "# Fix to make GNOME work", 157 | "export XKL_XMODMAP_DISABLE=1", 158 | "/etc/X11/Xsession" 159 | ]]}, 160 | "mode" : "000755", 161 | "owner" : "ubuntu", 162 | "group" : "ubuntu" 163 | } 164 | }, 165 | "commands" : { 166 | "01-start-vnc-on-boot": { 167 | "command" : "update-rc.d vncserver defaults" 168 | }, 169 | "02-change-ownership-of-.vnc": { 170 | "command" : "chown ubuntu:ubuntu /home/ubuntu/.vnc" 171 | }, 172 | "03-set-permissions-of-.vnc": { 173 | "command" : "chmod 700 /home/ubuntu/.vnc" 174 | } 175 | } 176 | } 177 | } 178 | } 179 | }, 180 | 181 | "VncAutoScalingGroup" : { 182 | "Type" : "AWS::AutoScaling::AutoScalingGroup", 183 | "Properties" : { 184 | "AvailabilityZones" : { "Fn::GetAZs" : ""}, 185 | "LaunchConfigurationName" : { "Ref" : "VncLaunchConfig" }, 186 | "MinSize" : { "Ref" : "MinSize" }, 187 | "MaxSize" : { "Ref" : "MaxSize" }, 188 | "NotificationConfiguration" : { 189 | "TopicARN" : { "Fn::GetAtt" : ["DNSProcessor", "Topic"] }, 190 | "NotificationTypes" : [ "autoscaling:EC2_INSTANCE_LAUNCH","autoscaling:EC2_INSTANCE_TERMINATE"] 191 | }, 192 | "Tags" : [ 193 | { 194 | "Key" : "ProcessorId", 195 | "Value" : { "Ref" : "DNSProcessor" }, 196 | "PropagateAtLaunch" : false 197 | } 198 | ] 199 | } 200 | }, 201 | 202 | "VncSecurityGroup" : { 203 | "Type" : "AWS::EC2::SecurityGroup", 204 | "Properties" : { 205 | "GroupDescription" : "SSH to the vnc instances", 206 | "SecurityGroupIngress" : [ 207 | { 208 | "CidrIp" : { "Ref" : "SSHLocation" }, 209 | "FromPort" : "22", 210 | "ToPort" : "22", 211 | "IpProtocol" : "tcp" 212 | } 213 | ] 214 | } 215 | }, 216 | 217 | "VncWaitConditionHandle" : { 218 | "Type" : "AWS::CloudFormation::WaitConditionHandle" 219 | }, 220 | 221 | "VncWaitCondition" : { 222 | "Type" : "AWS::CloudFormation::WaitCondition", 223 | "DependsOn" : "VncAutoScalingGroup", 224 | "Properties" : { 225 | "Count" : { "Ref" : "MinSize"}, 226 | "Handle" : { "Ref" : "VncWaitConditionHandle" }, 227 | "Timeout" : "600" 228 | } 229 | } 230 | }, 231 | 232 | "Outputs" : { 233 | "SecurityGroup" : { 234 | "Description" : "Security group of the vnc hosts", 235 | "Value" : { "Ref" : "VncSecurityGroup" } 236 | } 237 | } 238 | } 239 | -------------------------------------------------------------------------------- /examples/dns-mapping/impl/bin/cr-dns-processor: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | #============================================================================== 4 | # Copyright 2013 Amazon.com, Inc. or its affiliates. All Rights Reserved. 5 | # 6 | # Licensed under the Apache License, Version 2.0 (the "License"); 7 | # you may not use this file except in compliance with the License. 8 | # You may obtain a copy of the License at 9 | # 10 | # http://www.apache.org/licenses/LICENSE-2.0 11 | # 12 | # Unless required by applicable law or agreed to in writing, software 13 | # distributed under the License is distributed on an "AS IS" BASIS, 14 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 | # See the License for the specific language governing permissions and 16 | # limitations under the License. 17 | #============================================================================== 18 | from argparse import ArgumentParser 19 | import ConfigParser 20 | import StringIO 21 | import logging 22 | import logging.config 23 | import os 24 | import threading 25 | import datetime 26 | import sys 27 | 28 | from runner import AutoScalingNotificationRunner 29 | 30 | default_confdir = '/etc/cfn' 31 | 32 | parser = ArgumentParser(prog='cr-dns-processor') 33 | 34 | parser.add_argument("-c", "--config", help="The configuration directory (default: %s)" % default_confdir, 35 | dest="config_path", default=default_confdir) 36 | parser.add_argument("--no-daemon", help="Do not daemonize", dest="no_daemon", action="store_true") 37 | parser.add_argument("-v", "--verbose", help="Enables verbose logging", action="store_true", dest="verbose") 38 | parser.add_argument("-t", "--threads", help="Configure the number of threads to use", type=int, dest="threads") 39 | 40 | options = parser.parse_args() 41 | 42 | def _parse_config(config_file): 43 | """Parses the provided configuration; returns options from all sections merged 44 | 45 | When provided with a valid configuration file, will load all of the sections and single dictionary 46 | of all the options merged together. 47 | """ 48 | config = ConfigParser.SafeConfigParser() 49 | config.read(config_file) 50 | 51 | options = {} 52 | for section in config.sections(): 53 | # Convert configuration options into dictionary (lowercasing all keys) 54 | options.update(dict((i[0].lower(), i[1]) for i in config.items(section))) 55 | 56 | return options 57 | 58 | def _load_configuration(config_dir): 59 | """Locates and parses configuration files 60 | 61 | Given a configuration directory, reads in the cr-dns-processor.conf file. 62 | """ 63 | config_file = os.path.join(config_dir, 'cr-dns-processor.conf') 64 | 65 | # Add the default configuration file if it exists 66 | if not os.path.isfile(config_file): 67 | raise ValueError(u"Could not find default configuration file, %s" % config_file) 68 | 69 | # Load our configuration 70 | options = _parse_config(config_file) 71 | 72 | # Fail if we have not found any options. 73 | if not options: 74 | raise ValueError(u"No configuration options were defined in %s" % config_file) 75 | 76 | if not options.get('queue_url') or not options.get('region'): 77 | raise ValueError(u"Configuration must contain a queue_url and region option") 78 | 79 | if not options.get('table'): 80 | raise ValueError(u"Configuration must contain a table option") 81 | 82 | return options 83 | 84 | def main(): 85 | # Configure our logger 86 | _config = """[loggers] 87 | keys=root,crdnsprocessor 88 | [handlers] 89 | keys=default 90 | [formatters] 91 | keys=amzn 92 | [logger_root] 93 | level=NOTSET 94 | handlers=default 95 | [logger_crdnsprocessor] 96 | level=NOTSET 97 | handlers=default 98 | qualname=cr.dnsprocessor 99 | propagate=0 100 | [handler_default] 101 | class=handlers.RotatingFileHandler 102 | level=%(conf_level)s 103 | formatter=amzn 104 | args=('/var/log/cr-dns-processor.log', 'a', 5242880, 5, 'UTF-8') 105 | [formatter_amzn] 106 | format=%(asctime)s [%(levelname)s] %(message)s 107 | datefmt= 108 | class=logging.Formatter 109 | """ 110 | logging.config.fileConfig(StringIO.StringIO(_config), {'conf_level': "DEBUG" if options.verbose else "INFO"}) 111 | 112 | # Require there to be a configuration path, default should handle when not specified. 113 | if not options.config_path: 114 | print >> sys.stderr, u"Error: A configuration path must be specified" 115 | parser.print_help(sys.stderr) 116 | sys.exit(1) 117 | 118 | # Ensure that the configuration path really exists, since we expect the config file to be there. 119 | if not os.path.isdir(options.config_path): 120 | print >> sys.stderr, u"Error: Could not find configuration at %s" % options.config_path 121 | sys.exit(1) 122 | 123 | try: 124 | config = _load_configuration(options.config_path) 125 | except Exception, ex: 126 | logging.exception("Failed to load configuration") 127 | print >> sys.stderr, u"Error: Failed to load configuration: %s" % str(ex) 128 | sys.exit(1) 129 | 130 | # Construct our runner to monitor queue 131 | runner = AutoScalingNotificationRunner(config['queue_url'], config['region'], config['table'], num_threads=options.threads) 132 | 133 | # Start processing messages 134 | runner.process_messages() 135 | 136 | wait_event = threading.Event() 137 | 138 | # Wait until process is killed 139 | while True: 140 | try: 141 | # do this instead of wait() without timeout 142 | # as for some reason interrupts will not happen unless you wait for a specified time 143 | # (even if the wait is for a long time, the interrupt comes immediately) 144 | wait_event.wait(60) 145 | except KeyboardInterrupt: 146 | sys.exit(0) 147 | 148 | if options.no_daemon: 149 | main() 150 | else: 151 | try: 152 | import daemon 153 | except ImportError: 154 | print >> sys.stderr, u"Daemon library was not installed; please install python-daemon" 155 | sys.exit(1) 156 | 157 | try: 158 | from daemon import pidlockfile 159 | except ImportError: 160 | from daemon import pidfile as pidlockfile 161 | 162 | with daemon.DaemonContext(pidfile=pidlockfile.TimeoutPIDLockFile('/var/run/cr-dns-processor.pid', 300)): 163 | try: 164 | main() 165 | except Exception, e: 166 | logging.exception("Unhandled exception") 167 | sys.exit(1) 168 | -------------------------------------------------------------------------------- /examples/dns-mapping/impl/custom-resource-runner.template: -------------------------------------------------------------------------------- 1 | { 2 | "AWSTemplateFormatVersion": "2010-09-09", 3 | "Description" : "Example stack for AMI Lookup Custom Resource Backend.", 4 | 5 | "Parameters" : { 6 | "KeyName" : { 7 | "Description" : "Name of an existing EC2 KeyPair to enable SSH access to the instances", 8 | "Type" : "String" 9 | }, 10 | "InstanceType" : { 11 | "Description" : "Custom resource runner instance type", 12 | "Type" : "String", 13 | "Default" : "t1.micro", 14 | "AllowedValues" : [ "t1.micro","m1.small","m1.medium","m1.large","m1.xlarge","m2.xlarge","m2.2xlarge","m2.4xlarge","m3.xlarge","m3.2xlarge","c1.medium","c1.xlarge","cc1.4xlarge","cc2.8xlarge","cg1.4xlarge"], 15 | "ConstraintDescription" : "must be a valid EC2 instance type." 16 | }, 17 | "MinSize" : { 18 | "Description" : "Minimum number of custom resource runners", 19 | "Type" : "Number", 20 | "MinValue" : "1", 21 | "Default" : "1", 22 | "ConstraintDescription" : "Must have at least one runner" 23 | }, 24 | "MaxSize" : { 25 | "Description" : "Maximum number of custom resource runners", 26 | "Type" : "Number", 27 | "MinValue" : "1", 28 | "Default" : "1", 29 | "ConstraintDescription" : "Must have at least one runner" 30 | }, 31 | "SSHLocation" : { 32 | "Description" : "The IP address range that can be used to SSH to the custom resource runners", 33 | "Type" : "String", 34 | "MinLength" : "9", 35 | "MaxLength" : "18", 36 | "Default" : "0.0.0.0/0", 37 | "AllowedPattern" : "(\\d{1,3})\\.(\\d{1,3})\\.(\\d{1,3})\\.(\\d{1,3})/(\\d{1,2})", 38 | "ConstraintDescription" : "must be a valid IP CIDR range of the form x.x.x.x/x." 39 | } 40 | }, 41 | 42 | "Mappings" : { 43 | "AwsRegionToAMI" : { 44 | "us-east-1" : { "id" : "ami-35792c5c" }, 45 | "us-west-2" : { "id" : "ami-d03ea1e0" }, 46 | "us-west-1" : { "id" : "ami-687b4f2d" }, 47 | "eu-west-1" : { "id" : "ami-149f7863" }, 48 | "ap-southeast-1" : { "id" : "ami-14f2b946" }, 49 | "ap-northeast-1" : { "id" : "ami-3561fe34" }, 50 | "ap-southeast-2" : { "id" : "ami-a148d59b" }, 51 | "sa-east-1" : { "id" : "ami-9f6ec982" } 52 | } 53 | }, 54 | 55 | "Resources" : { 56 | "CustomResourcePipeline" : { 57 | "Type" : "AWS::CloudFormation::Stack", 58 | "Properties" : { 59 | "TemplateURL" : "https://s3.amazonaws.com/cloudformation-examples/cr-backend-substack-template.template" 60 | } 61 | }, 62 | 63 | "AutoScalingNotificationPipeline" : { 64 | "Type" : "AWS::CloudFormation::Stack", 65 | "Properties" : { 66 | "TemplateURL" : "https://s3.amazonaws.com/cloudformation-examples/cr-backend-substack-template.template" 67 | } 68 | }, 69 | 70 | "NameStatusTable" : { 71 | "Type" : "AWS::DynamoDB::Table", 72 | "Properties" : { 73 | "KeySchema" : { 74 | "HashKeyElement": { 75 | "AttributeName" : "ProcessorId", 76 | "AttributeType" : "S" 77 | } 78 | }, 79 | "ProvisionedThroughput" : { 80 | "ReadCapacityUnits" : "3", 81 | "WriteCapacityUnits" : "3" 82 | } 83 | } 84 | }, 85 | 86 | "RunnerRole" : { 87 | "Type" : "AWS::IAM::Role", 88 | "Properties" : { 89 | "AssumeRolePolicyDocument" : { 90 | "Version": "2008-10-17", 91 | "Statement": [{ 92 | "Effect": "Allow", 93 | "Principal": { 94 | "Service": [ "ec2.amazonaws.com" ] 95 | }, 96 | "Action": [ "sts:AssumeRole" ] 97 | }] 98 | }, 99 | "Path" : "/", 100 | "Policies" : [ 101 | { 102 | "PolicyName" : "CustomResourceRunner", 103 | "PolicyDocument" : { 104 | "Statement" : [ 105 | { 106 | "Effect" : "Allow", 107 | "Action" : ["sqs:ChangeMessageVisibility", "sqs:DeleteMessage", "sqs:ReceiveMessage"], 108 | "Resource" : { "Fn::GetAtt" : ["CustomResourcePipeline", "Outputs.CustomResourceQueueARN"] } 109 | }, 110 | { 111 | "Effect" : "Allow", 112 | "Action" : ["sqs:ChangeMessageVisibility", "sqs:DeleteMessage", "sqs:ReceiveMessage"], 113 | "Resource" : { "Fn::GetAtt" : ["AutoScalingNotificationPipeline", "Outputs.CustomResourceQueueARN"] } 114 | }, 115 | { 116 | "Effect" : "Allow", 117 | "Action" : ["dynamodb:PutItem", "dynamodb:GetItem", "dynamodb:DeleteItem"], 118 | "Resource" : { "Fn::Join" : ["", ["arn:aws:dynamodb:", {"Ref" : "AWS::Region"}, ":*:table/", { "Ref" : "NameStatusTable" }]] } 119 | }, 120 | { 121 | "Effect" : "Allow", 122 | "Action" : ["route53:ChangeResourceRecordSets", "route53:GetHostedZone", "autoscaling:DescribeTags", "ec2:DescribeInstances"], 123 | "Resource" : "*" 124 | } 125 | ] 126 | } 127 | } 128 | ] 129 | } 130 | }, 131 | 132 | "RunnerInstanceProfile" : { 133 | "Type" : "AWS::IAM::InstanceProfile", 134 | "Properties" : { 135 | "Path" : "/", 136 | "Roles" : [ { "Ref" : "RunnerRole" } ] 137 | } 138 | }, 139 | 140 | "RunnerLaunchConfig" : { 141 | "Type" : "AWS::AutoScaling::LaunchConfiguration", 142 | "Properties" : { 143 | "IamInstanceProfile" : { "Ref" : "RunnerInstanceProfile" }, 144 | "ImageId" : { "Fn::FindInMap" : ["AwsRegionToAMI", { "Ref" : "AWS::Region" }, "id"] }, 145 | "InstanceType" : { "Ref" : "InstanceType" }, 146 | "KeyName" : { "Ref" : "KeyName" }, 147 | "SecurityGroups" : [ { "Ref" : "RunnerSecurityGroup" } ], 148 | "UserData" : { "Fn::Base64" : { "Fn::Join" : ["", [ 149 | "#!/bin/bash -x\n", 150 | "exec &> /home/ec2-user/userdata.log\n", 151 | "/opt/aws/bin/cfn-init --region ", { "Ref" : "AWS::Region" }, " -s ", { "Ref" : "AWS::StackId" }, " -r RunnerLaunchConfig -v\n", 152 | "/opt/aws/bin/cfn-signal -e $? ", { "Fn::Base64" : { "Ref" : "RunnerWaitConditionHandle" }}, "\n" 153 | ]] } } 154 | }, 155 | "Metadata" : { 156 | "AWS::CloudFormation::Init" : { 157 | "config" : { 158 | "packages" : { 159 | "rpm" : { 160 | "aws-cfn-resource-bridge" : "https://s3.amazonaws.com/cloudformation-examples/aws-cfn-resource-bridge-0.1-4.noarch.rpm" 161 | } 162 | }, 163 | "files" : { 164 | "/etc/cfn/bridge.d/dns-processor.conf" : { 165 | "content" : { "Fn::Join" : ["", [ 166 | "[dns-processor]\n", 167 | "resource_type=Custom::DNSProcessor\n", 168 | "flatten=false\n", 169 | "queue_url=", { "Fn::GetAtt" : ["CustomResourcePipeline", "Outputs.CustomResourceQueueURL"] }, "\n", 170 | "timeout=30\n", 171 | "default_action=/home/ec2-user/dns-processor-handler.py -r ", { "Ref" : "AWS::Region" }, " -t ", { "Ref" : "NameStatusTable" }, " -p \"", { "Fn::GetAtt" : ["AutoScalingNotificationPipeline", "Outputs.CustomResourceTopicARN"] }, "\"\n" 172 | ]]} 173 | }, 174 | "/etc/cfn/cr-dns-processor.conf" : { 175 | "content" : { "Fn::Join" : ["", [ 176 | "[cr-dns-processor]\n", 177 | "queue_url=", { "Fn::GetAtt" : ["AutoScalingNotificationPipeline", "Outputs.CustomResourceQueueURL"] }, "\n", 178 | "region=", { "Ref" : "AWS::Region" }, "\n", 179 | "table=", { "Ref" : "NameStatusTable" }, "\n" 180 | ]]} 181 | }, 182 | "/home/ec2-user/dns-processor-handler.py" : { 183 | "source" : "https://raw.github.com/awslabs/aws-cfn-custom-resource-examples/master/examples/dns-mapping/impl/dns-processor-handler.py", 184 | "mode" : "000755", 185 | "owner" : "ec2-user" 186 | }, 187 | "/home/ec2-user/dnsprocessor.py" : { 188 | "source" : "https://raw.github.com/awslabs/aws-cfn-custom-resource-examples/master/examples/dns-mapping/impl/dnsprocessor.py", 189 | "mode" : "000755", 190 | "owner" : "ec2-user" 191 | }, 192 | "/home/ec2-user/runner.py" : { 193 | "source" : "https://raw.github.com/awslabs/aws-cfn-custom-resource-examples/master/examples/dns-mapping/impl/runner.py", 194 | "mode" : "000755", 195 | "owner" : "ec2-user" 196 | }, 197 | "/etc/init.d/cr-dns-processor" : { 198 | "source" : "https://raw.github.com/awslabs/aws-cfn-custom-resource-examples/master/examples/dns-mapping/impl/init/cr-dns-processor", 199 | "mode" : "000755", 200 | "owner" : "root", 201 | "group" : "root" 202 | }, 203 | "/home/ec2-user/cr-dns-processor" : { 204 | "source" : "https://raw.github.com/awslabs/aws-cfn-custom-resource-examples/master/examples/dns-mapping/impl/bin/cr-dns-processor", 205 | "mode" : "000755", 206 | "owner" : "ec2-user" 207 | } 208 | }, 209 | "services" : { 210 | "sysvinit" : { 211 | "cfn-resource-bridge" : { 212 | "enabled" : "true", 213 | "ensureRunning" : "true", 214 | "files" : ["/etc/cfn/bridge.d/dns-processor.conf", 215 | "/home/ec2-user/dns-processor-handler.py"] 216 | }, 217 | "cr-dns-processor" : { 218 | "enabled" : "true", 219 | "ensureRunning" : "true", 220 | "files" : ["/etc/cfn/cr-dns-processor.conf", 221 | "/home/ec2-user/dnsprocessor.py", 222 | "/home/ec2-user/runner.py", 223 | "/home/ec2-user/cr-dns-processor"] 224 | } 225 | } 226 | } 227 | } 228 | } 229 | } 230 | }, 231 | 232 | "RunnerAutoScalingGroup" : { 233 | "Type" : "AWS::AutoScaling::AutoScalingGroup", 234 | "Properties" : { 235 | "AvailabilityZones" : { "Fn::GetAZs" : ""}, 236 | "LaunchConfigurationName" : { "Ref" : "RunnerLaunchConfig" }, 237 | "MinSize" : { "Ref" : "MinSize" }, 238 | "MaxSize" : { "Ref" : "MaxSize" } 239 | } 240 | }, 241 | 242 | "RunnerSecurityGroup" : { 243 | "Type" : "AWS::EC2::SecurityGroup", 244 | "Properties" : { 245 | "GroupDescription" : "SSH to the runner instances", 246 | "SecurityGroupIngress" : [ 247 | { 248 | "CidrIp" : { "Ref" : "SSHLocation" }, 249 | "FromPort" : "22", 250 | "ToPort" : "22", 251 | "IpProtocol" : "tcp" 252 | } 253 | ] 254 | } 255 | }, 256 | 257 | "RunnerWaitConditionHandle" : { 258 | "Type" : "AWS::CloudFormation::WaitConditionHandle" 259 | }, 260 | 261 | "RunnerWaitCondition" : { 262 | "Type" : "AWS::CloudFormation::WaitCondition", 263 | "DependsOn" : "RunnerAutoScalingGroup", 264 | "Properties" : { 265 | "Count" : "1", 266 | "Handle" : { "Ref" : "RunnerWaitConditionHandle" }, 267 | "Timeout" : "600" 268 | } 269 | } 270 | }, 271 | "Outputs" : { 272 | "ServiceToken" : { 273 | "Value" : { "Fn::GetAtt" : ["CustomResourcePipeline", "Outputs.CustomResourceTopicARN"] }, 274 | "Description" : "Service token to use in CustomResource definitions" 275 | } 276 | } 277 | } -------------------------------------------------------------------------------- /examples/dns-mapping/impl/dns-processor-handler.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | #============================================================================== 3 | # Copyright 2013 Amazon.com, Inc. or its affiliates. All Rights Reserved. 4 | # 5 | # Licensed under the Apache License, Version 2.0 (the "License"); 6 | # you may not use this file except in compliance with the License. 7 | # You may obtain a copy of the License at 8 | # 9 | # http://www.apache.org/licenses/LICENSE-2.0 10 | # 11 | # Unless required by applicable law or agreed to in writing, software 12 | # distributed under the License is distributed on an "AS IS" BASIS, 13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | # See the License for the specific language governing permissions and 15 | # limitations under the License. 16 | #============================================================================== 17 | import os 18 | import sys 19 | import logging 20 | 21 | from argparse import ArgumentParser 22 | from dnsprocessor import DNSProcessor 23 | from dnsprocessor import FatalError 24 | 25 | try: 26 | import simplejson as json 27 | except ImportError: 28 | import json 29 | 30 | handler = logging.StreamHandler(sys.stderr) 31 | handler.setFormatter(logging.Formatter("%(asctime)s [%(levelname)s] %(message)s")) 32 | logging.getLogger().addHandler(handler) 33 | 34 | log = logging.getLogger('dns-processor-handler') 35 | log.setLevel(logging.INFO) 36 | 37 | parser = ArgumentParser(prog='dns-processor-handler') 38 | parser.add_argument("-r", "--region", help="The region the DynamoDB table lives in", dest="region") 39 | parser.add_argument("-t", "--table", help="The DynamoDB table name to write name states", dest="table_name") 40 | parser.add_argument("-p", "--topic", help="The SNS Topic to send ASG notifications to", dest="topic") 41 | 42 | options = parser.parse_args() 43 | 44 | if not options.topic: 45 | raise FatalError(u"Topic is a required parameter. Use -p/--topic to specify.") 46 | 47 | if not options.table_name: 48 | raise FatalError(u"Name table is a required parameter. Use -t/--table to specify.") 49 | 50 | if not options.region: 51 | raise FatalError(u"Region is a required parameter. Use -r/--region to specify.") 52 | 53 | try: 54 | event_obj = json.loads(os.environ.get('EventProperties')) 55 | log.info(u"Received event: %s", json.dumps(event_obj, indent=4)) 56 | except ValueError: 57 | raise FatalError(u"Could not parse properties as JSON") 58 | 59 | resource_properties = event_obj.get('ResourceProperties') 60 | 61 | if not resource_properties: 62 | raise FatalError(u"ResourceProperties not found.") 63 | 64 | stack_id = event_obj['StackId'] 65 | logical_id = event_obj['LogicalResourceId'] 66 | request_id = event_obj['RequestId'] 67 | request_type = event_obj['RequestType'] 68 | 69 | hosted_zone_id = resource_properties.get('HostedZoneId') 70 | dns_pattern = resource_properties.get('DNSPattern') 71 | 72 | if not hosted_zone_id: 73 | raise FatalError(u"HostedZoneId is a required property.") 74 | 75 | if not dns_pattern: 76 | raise FatalError(u"DNSPattern is a required property.") 77 | 78 | processor = DNSProcessor(options.topic, options.table_name, options.region) 79 | 80 | if request_type == 'Update': 81 | old_resource_properties = event_obj.get('OldResourceProperties') 82 | old_physical_id = event_obj['PhysicalResourceId'] 83 | 84 | if not old_resource_properties: 85 | raise FatalError(u"OldResourceProperties not found.") 86 | 87 | print processor.update_processor(old_physical_id, stack_id, logical_id, request_id, hosted_zone_id, dns_pattern) 88 | elif request_type == 'Create': 89 | print processor.create_processor(stack_id, logical_id, request_id, hosted_zone_id, dns_pattern) 90 | elif request_type == 'Delete': 91 | physical_id = event_obj['PhysicalResourceId'] 92 | print processor.delete_processor(physical_id, hosted_zone_id) -------------------------------------------------------------------------------- /examples/dns-mapping/impl/dnsprocessor.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | #============================================================================== 3 | # Copyright 2013 Amazon.com, Inc. or its affiliates. All Rights Reserved. 4 | # 5 | # Licensed under the Apache License, Version 2.0 (the "License"); 6 | # you may not use this file except in compliance with the License. 7 | # You may obtain a copy of the License at 8 | # 9 | # http://www.apache.org/licenses/LICENSE-2.0 10 | # 11 | # Unless required by applicable law or agreed to in writing, software 12 | # distributed under the License is distributed on an "AS IS" BASIS, 13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | # See the License for the specific language governing permissions and 15 | # limitations under the License. 16 | #============================================================================== 17 | import logging 18 | import hashlib 19 | import sys 20 | import pystache 21 | 22 | import botocore.session 23 | 24 | try: 25 | import simplejson as json 26 | except ImportError: 27 | import json 28 | 29 | handler = logging.StreamHandler(sys.stderr) 30 | handler.setFormatter(logging.Formatter("%(asctime)s [%(levelname)s] %(message)s")) 31 | logging.getLogger().addHandler(handler) 32 | 33 | log = logging.getLogger('dnsprocessor') 34 | log.setLevel(logging.INFO) 35 | 36 | _NAMES = sorted(['bart', 'lisa', 'homer', 'marge', 'maggie', 'patty', 'selma', 'abe', 'mona', 'moe', 'artie']) 37 | 38 | 39 | class FatalError(SystemExit): 40 | def __init__(self, reason): 41 | super(FatalError, self).__init__(-1) 42 | log.error(u"Failing resource: %s", reason) 43 | print u'{ "Reason": "%s" }' % reason 44 | 45 | 46 | class DNSProcessor(object): 47 | def __init__(self, topic, table_name, region): 48 | self._topic = topic 49 | self._table_name = table_name 50 | self._region = region 51 | self._ddb = botocore.session.get_session().get_service("dynamodb") 52 | self._r53 = botocore.session.get_session().get_service("route53") 53 | 54 | def _create_processor_id(self, stack_id, logical_id, hosted_zone_id, dns_pattern): 55 | """Constructs a repeated id to help with idempotency.""" 56 | return str(hashlib.sha256(stack_id + "|" + logical_id + "|" + hosted_zone_id + "|" + dns_pattern).hexdigest()) 57 | 58 | def _create_processor_record(self, processor_id, request_id, hosted_zone_id, dns_pattern): 59 | """Creates a record that can be stored in DynamoDB""" 60 | record = { 61 | "ProcessorId": {'S': processor_id}, 62 | "HostedZoneId": {'S': hosted_zone_id}, 63 | "DnsPattern": {'S': dns_pattern}, 64 | "RequestId": {'S': request_id}, 65 | "UpdateVersion": {'N': '1'} 66 | } 67 | 68 | # Write our names for this new resource into the DynamoDB table to track status. 69 | for name in _NAMES: 70 | record['name_' + name] = {'S': 'A'} 71 | 72 | return record 73 | 74 | def _get_processor_record(self, processor_id, attributes=None, default=None, error_type=FatalError): 75 | # Check if this is the same request. 76 | get = self._ddb.get_operation("GetItem") 77 | kwargs = { 78 | 'consistent_read': True, 79 | 'key': { 80 | "ProcessorId": {'S': processor_id} 81 | } 82 | } 83 | kwargs['table_name'] = self._table_name 84 | if attributes: 85 | kwargs['attributes_to_get'] = attributes 86 | 87 | get_response, get_data = get.call(self._ddb.get_endpoint(self._region), **kwargs) 88 | if get_response.status_code != 200: 89 | raise error_type(u"Failed reading entry (%s): %s: " % (get_response.status_code, get_data)) 90 | 91 | try: 92 | return get_data.get('Item', default) 93 | except: 94 | return default 95 | 96 | def _delete_processor_record(self, processor_id): 97 | delete = self._ddb.get_operation("DeleteItem") 98 | 99 | response, data = delete.call(self._ddb.get_endpoint(self._region), 100 | table_name=self._table_name, 101 | key={ 102 | "ProcessorId": {'S': processor_id} 103 | }) 104 | 105 | if response.status_code != 200: 106 | raise FatalError(u"Failed deleting DynamoDB entry (%s): %s: " % (response.status_code, data)) 107 | 108 | def _processor_create_success(self, processor_id): 109 | return u'{ "PhysicalResourceId" : "%s", "Data": { "Topic": "%s" } }' % (processor_id, self._topic) 110 | 111 | def _store_processor_record(self, processor_id, record, request_id): 112 | try: 113 | put = self._ddb.get_operation("PutItem") 114 | 115 | put_response, put_data = put.call(self._ddb.get_endpoint(self._region), 116 | table_name=self._table_name, 117 | item=record, 118 | expected={ 119 | "ProcessorId": { 120 | "Exists": "false" 121 | } 122 | }) 123 | # If we couldn't store the row, check why 124 | if put_response.status_code != 200: 125 | valid_request = False 126 | for error in put_data.get('Errors', []): 127 | # If the row already exists, handle duplicate requests as a success. 128 | if error.get('Code', '') == 'ConditionalCheckFailedException': 129 | existing_record = self._get_processor_record(processor_id, ['RequestId']) 130 | # Determine if this is a duplicate request. 131 | if existing_record and existing_record.get('RequestId', {}).get('S') != request_id: 132 | raise FatalError(u"Resource already exists for the specified hosted zone.") 133 | 134 | # This was a valid duplicate request. 135 | valid_request = True 136 | break 137 | 138 | if not valid_request: 139 | raise FatalError(u"Failed creating DynamoDB entry (%s): %s: " % (put_response.status_code, put_data)) 140 | 141 | except Exception, e: 142 | log.exception("Unexpected exception creating entry") 143 | raise FatalError(u"Unhandled exception creating entry: %s" % e) 144 | 145 | def _update_processor_record(self, record): 146 | try: 147 | expected_version = record.get('UpdateVersion', {}).get('N', '1') 148 | record['UpdateVersion'] = {'N': str(long(expected_version) + 1)} 149 | 150 | put = self._ddb.get_operation("PutItem") 151 | 152 | put_response, put_data = put.call(self._ddb.get_endpoint(self._region), 153 | table_name=self._table_name, 154 | item=record, 155 | expected={ 156 | "UpdateVersion": { 157 | "Value": {"N": expected_version} 158 | } 159 | }) 160 | # If we couldn't update the row, fail and let it be retried later 161 | if put_response.status_code != 200: 162 | log.error(u"Failed creating DynamoDB entry (%s): %s: ", put_response.status_code, put_data) 163 | return False 164 | 165 | except Exception: 166 | log.exception(u"Unexpected exception updating entry") 167 | return False 168 | 169 | return True 170 | 171 | def create_processor(self, stack_id, logical_id, request_id, hosted_zone_id, dns_pattern): 172 | processor_id = self._create_processor_id(stack_id, logical_id, hosted_zone_id, dns_pattern) 173 | self._store_processor_record(processor_id, 174 | self._create_processor_record(processor_id, request_id, 175 | hosted_zone_id, dns_pattern), 176 | request_id) 177 | return self._processor_create_success(processor_id) 178 | 179 | def delete_processor(self, processor_id, hosted_zone_id): 180 | try: 181 | # Locate the resource we are trying to delete. 182 | existing_item = self._get_processor_record(processor_id, default={}) 183 | 184 | # 185 | # Delete all of the route53 entries for this custom resource 186 | # 187 | changes = [] 188 | for key, value in existing_item.iteritems(): 189 | # Find names that are not available and add to r53 request to delete entry. 190 | if key.startswith('name_') and value.get('S', '') != 'A': 191 | domain_data = value.get('S', '').split('|', 3) 192 | if len(domain_data) == 3: 193 | try: 194 | log.info(u"Preparing to delete record set: %s", domain_data[2]) 195 | record_set = json.loads(domain_data[2]) 196 | except ValueError: 197 | raise FatalError(u"Could not parse DNS record entry: %s: %s" % (key, value)) 198 | 199 | changes.append({ 200 | 'Action': 'DELETE', 201 | 'ResourceRecordSet': record_set 202 | }) 203 | 204 | if len(changes) != 0: 205 | change_records = self._r53.get_operation("ChangeResourceRecordSets") 206 | change_response, change_data = change_records.call(self._r53.get_endpoint(self._region), 207 | hosted_zone_id=hosted_zone_id, 208 | change_batch={ 209 | 'Comment': u"Deleting DNSProcessor %s." 210 | % processor_id, 211 | 'Changes': changes 212 | }) 213 | 214 | if change_response.status_code != 200: 215 | valid_delete = True 216 | for error in change_data.get('Errors', []): 217 | # If the error isn't that the value doesn't match, we will say the update failed. 218 | if (error.get('Code', '') != 'InvalidChangeBatch' 219 | or (u"values provided do not match the current values" not in error.get('Message', '') 220 | and u"it was not found" not in error.get('Message', ''))): 221 | valid_delete = False 222 | break 223 | 224 | if not valid_delete: 225 | raise FatalError(u"Failed deleting DNS entries (%s): %s: " % (change_response.status_code, 226 | change_data)) 227 | 228 | # Lastly, delete our custom resource from our DynamoDB table 229 | self._delete_processor_record(processor_id) 230 | 231 | except Exception, e: 232 | log.exception(u"Unexpected exception deleting dns processor") 233 | raise FatalError(u"Unhandled exception deleting dns processor: %s" % e) 234 | 235 | return "{}" 236 | 237 | def _name_status(self, status, instance_id, record_set): 238 | try: 239 | if isinstance(record_set, basestring): 240 | record_set = json.loads(record_set) 241 | 242 | return "%s|%s|%s" % (status, instance_id, json.dumps(record_set)) 243 | except ValueError: 244 | raise FatalError(u"Could not serialize status record set: %s" % record_set) 245 | 246 | def _get_hosted_zone_domain(self, hosted_zone_id, required=True, error_type=FatalError): 247 | """Retrieves the domain for a hosted zone""" 248 | try: 249 | get_zone = self._r53.get_operation("GetHostedZone") 250 | response, data = get_zone.call(self._r53.get_endpoint(self._region), 251 | id=hosted_zone_id) 252 | if response.status_code != 200: 253 | log.exception(u"Failed to retrieve hosted zone (%s) information" % hosted_zone_id) 254 | raise error_type(u"Failed to retrieve hosted zone (%s): %s: " % (response.status_code, data)) 255 | 256 | domain = data.get('HostedZone', {}).get('Name') 257 | if not domain: 258 | if required: 259 | raise error_type(u"HostedZone (%s) was not found." % hosted_zone_id) 260 | return None 261 | 262 | return domain 263 | except Exception, e: 264 | log.exception(u"Unexpected exception listing hosted zone") 265 | raise error_type(u"Unhandled exception listing hosted zone: %s" % e) 266 | 267 | def _generate_domain_name(self, dns_pattern, name, domain, error_type=FatalError): 268 | try: 269 | return pystache.render(dns_pattern, {'simpsons_name': name, 'hosted_zone_name': domain}) 270 | except: 271 | raise error_type(u"Failed to generate domain name using pattern %s" % dns_pattern) 272 | 273 | def _find_instance_entry(self, processor, instance_id): 274 | """Locates the processor entry for an instance; returns parsed entry""" 275 | for key, value in processor.iteritems(): 276 | if key.startswith('name_') and value.get('S', '') != 'A': 277 | domain_data = value.get('S', '').split('|', 3) 278 | if len(domain_data) != 3 or domain_data[1] != instance_id: 279 | continue 280 | 281 | return key.split("name_", 2)[1], domain_data 282 | 283 | return None 284 | 285 | def _create_r53_record(self, entry_name, ip_address): 286 | return { 287 | 'Name': entry_name, 288 | 'Type': 'A', 289 | 'TTL': 300, 290 | 'ResourceRecords': [{'Value': ip_address}] 291 | } 292 | 293 | def add_instance_mapping(self, processor_id, instance_id, ip_address): 294 | try: 295 | processor = self._get_processor_record(processor_id, error_type=ValueError) 296 | except ValueError: 297 | # Retry exceptions while trying to retrieve processor record 298 | return False 299 | 300 | # This should only happen if the processor is unregistered, so fail hard. 301 | if not processor: 302 | raise ValueError(u"Processor %s no longer exists." % processor_id) 303 | 304 | hosted_zone_id = processor['HostedZoneId'].get('S') 305 | dns_pattern = processor['DnsPattern'].get('S') 306 | 307 | # Check if instance is registered 308 | entry = self._find_instance_entry(processor, instance_id) 309 | 310 | if not entry: 311 | # Find a name to use, alphabetically so the same names keep being used. 312 | for key in sorted(processor): 313 | if key.startswith('name_') and processor[key].get('S', '') == 'A': 314 | name = key.split('name_', 2)[1] 315 | 316 | # Locate the root domain name 317 | try: 318 | domain = self._get_hosted_zone_domain(hosted_zone_id, False, error_type=ValueError) 319 | if not domain: 320 | raise ValueError(u"Hosted Zone %s does not exist" % hosted_zone_id) 321 | except ValueError: 322 | # Failures trying to get domain should be retried. 323 | return False 324 | 325 | dns_name = self._generate_domain_name(dns_pattern, name, domain, error_type=ValueError) 326 | 327 | entry_str = self._name_status('A', instance_id, self._create_r53_record(dns_name, ip_address)) 328 | entry = (name, entry_str.split('|', 3)) 329 | break 330 | 331 | if not entry: 332 | # No names are currently available 333 | return False 334 | 335 | # If the name hasn't been reserved, update processor to mark the name as reserved. 336 | if entry[1][0] == 'A': 337 | # Set the name status to reserved. 338 | entry[1][0] = 'R' 339 | processor['name_' + entry[0]] = {'S': self._name_status(*(entry[1]))} 340 | 341 | # Attempt to reserve the entry. 342 | if not self._update_processor_record(processor): 343 | # Name couldn't be reserved, retry later 344 | return False 345 | 346 | # If the name is reserved, we need to create the r53 record. 347 | if entry[1][0] == 'R': 348 | 349 | try: 350 | record_set = json.loads(entry[1][2]) 351 | except ValueError: 352 | log.exception("Failed to parse recordset %s" % entry[1][2]) 353 | return False 354 | 355 | change_records = self._r53.get_operation("ChangeResourceRecordSets") 356 | change_response, change_data = change_records.call(self._r53.get_endpoint(self._region), 357 | hosted_zone_id=hosted_zone_id, 358 | change_batch={ 359 | 'Comment': u"Registering for DNSProcessor %s." 360 | % processor_id, 361 | 'Changes': [ 362 | { 363 | 'Action': 'CREATE', 364 | 'ResourceRecordSet': record_set 365 | }] 366 | }) 367 | if change_response.status_code != 200: 368 | valid_create = True 369 | for error in change_data.get('Errors', []): 370 | if (error.get('Code', '') != 'InvalidChangeBatch' 371 | or u"but it already exists" not in error.get('Message', '')): 372 | valid_create = False 373 | break 374 | 375 | if not valid_create: 376 | log.error(u"Failed creating DNS entry (%s): %s: ", change_response.status_code, change_data) 377 | return False 378 | 379 | # Set the name status to used. 380 | entry[1][0] = 'U' 381 | processor['name_' + entry[0]] = {'S': self._name_status(*(entry[1]))} 382 | 383 | # Attempt to reserve the entry. 384 | return self._update_processor_record(processor) 385 | 386 | return True 387 | 388 | def remove_instance_mapping(self, processor_id, instance_id): 389 | try: 390 | processor = self._get_processor_record(processor_id, error_type=ValueError) 391 | except ValueError: 392 | # Retry exceptions while trying to retrieve processor record 393 | return False 394 | 395 | # This should only happen if the processor is unregistered, so the record should be gone 396 | if not processor: 397 | return True 398 | 399 | hosted_zone_id = processor['HostedZoneId'].get('S') 400 | 401 | # Find the entry for our instance 402 | entry = self._find_instance_entry(processor, instance_id) 403 | if not entry: 404 | return True 405 | 406 | try: 407 | record_set = json.loads(entry[1][2]) 408 | except ValueError: 409 | log.exception("Failed to parse recordset %s" % entry[1][2]) 410 | return False 411 | 412 | # Delete r53 entry for instance 413 | change_records = self._r53.get_operation("ChangeResourceRecordSets") 414 | change_response, change_data = change_records.call(self._r53.get_endpoint(self._region), 415 | hosted_zone_id=hosted_zone_id, 416 | change_batch={ 417 | 'Comment': u"Deleting DNSProcessor %s." 418 | % processor_id, 419 | 'Changes': [ 420 | { 421 | 'Action': 'DELETE', 422 | 'ResourceRecordSet': record_set 423 | }] 424 | }) 425 | 426 | if change_response.status_code != 200: 427 | valid_delete = True 428 | for error in change_data.get('Errors', []): 429 | # If the error isn't that the value doesn't match, we will say the update failed. 430 | if (error.get('Code', '') != 'InvalidChangeBatch' 431 | or (u"values provided do not match the current values" not in error.get('Message', '') 432 | and u"it was not found" not in error.get('Message', ''))): 433 | valid_delete = False 434 | break 435 | 436 | if not valid_delete: 437 | log.error(u"Failed deleting DNS entries (%s): %s: ", change_response.status_code, change_data) 438 | return False 439 | 440 | # Update the processor to indicate name is available. 441 | processor['name_' + entry[0]] = {'S': 'A'} 442 | return self._update_processor_record(processor) 443 | 444 | def update_processor(self, old_processor_id, stack_id, logical_id, request_id, hosted_zone_id, dns_pattern): 445 | try: 446 | processor_id = self._create_processor_id(stack_id, logical_id, hosted_zone_id, dns_pattern) 447 | 448 | # If the processor id matches, no work needs to be done, so short-circuit. 449 | if old_processor_id == processor_id: 450 | return self._processor_create_success(processor_id) 451 | 452 | # Check if processor already exists 453 | record = self._get_processor_record(processor_id) 454 | if record: 455 | return self._processor_create_success(processor_id) 456 | 457 | # Get existing processor entries 458 | existing_record = self._get_processor_record(old_processor_id) 459 | 460 | # If there is no existing record, just create a default record. 461 | if not existing_record: 462 | return self.create_processor(stack_id, logical_id, request_id, hosted_zone_id, dns_pattern) 463 | 464 | # Construct a default entry for new processor 465 | new_record = self._create_processor_record(processor_id, request_id, hosted_zone_id, dns_pattern) 466 | 467 | domain = self._get_hosted_zone_domain(hosted_zone_id) 468 | 469 | # Copy over any existing entries and prepare to call route53 470 | changes = [] 471 | unmatched = {} 472 | for key, value in existing_record.iteritems(): 473 | # Find names that are not available and add to r53 request to delete entry. 474 | if key.startswith('name_') and value.get('S', '') != 'A': 475 | current_name = key.split('name_', 2)[1] 476 | domain_data = value.get('S', '').split('|', 3) 477 | if len(domain_data) == 3: 478 | try: 479 | log.info(u"Preparing to copy record set: %s", domain_data[2]) 480 | record_set = json.loads(domain_data[2]) 481 | except ValueError: 482 | raise FatalError(u"Could not parse DNS record entry: %s: %s" % (key, value)) 483 | 484 | # Store the unmatched entry to locate a new name later. 485 | if key not in new_record: 486 | unmatched[key] = (domain_data[1], record_set) 487 | continue 488 | 489 | record_set['Name'] = self._generate_domain_name(dns_pattern, current_name, domain) 490 | new_record[key] = {'S': self._name_status('U', domain_data[1], record_set)} 491 | changes.append({ 492 | 'Action': 'CREATE', 493 | 'ResourceRecordSet': record_set 494 | }) 495 | 496 | for unmatched_key, unmatched_tuple in unmatched.iteritems(): 497 | inserted = False 498 | for key in sorted(new_record): 499 | if key.startswith('name_') and new_record[key].get('S', '') == 'A': 500 | inserted = True 501 | new_name = key.split('name_', 2)[1] 502 | 503 | unmatched_record_set = unmatched_tuple[1] 504 | unmatched_record_set['Name'] = self._generate_domain_name(dns_pattern, new_name, domain) 505 | new_record[key] = {'S': self._name_status('U', unmatched_tuple[0], unmatched_record_set)} 506 | changes.append({ 507 | 'Action': 'CREATE', 508 | 'ResourceRecordSet': unmatched_record_set 509 | }) 510 | break 511 | if not inserted: 512 | raise FatalError(u"Unable to create DNS mapping for %s" % unmatched_key) 513 | 514 | # Write entries to route53 515 | if len(changes) != 0: 516 | change_records = self._r53.get_operation("ChangeResourceRecordSets") 517 | change_response, change_data = change_records.call(self._r53.get_endpoint(self._region), 518 | hosted_zone_id=hosted_zone_id, 519 | change_batch={ 520 | 'Comment': u"Updating DNSProcessor %s." 521 | % processor_id, 522 | 'Changes': changes 523 | }) 524 | 525 | if change_response.status_code != 200: 526 | valid_change = True 527 | for error in change_data.get('Errors', []): 528 | # If the error isn't that the value doesn't match, we will say the update failed. 529 | if (error.get('Code', '') != 'InvalidChangeBatch' 530 | or u"but it already exists" not in error.get('Message', '')): 531 | valid_change = False 532 | break 533 | if not valid_change: 534 | raise FatalError(u"Failed to create DNS entries (%s): %s: " % (change_response.status_code, 535 | change_data)) 536 | 537 | # Store the new processor record 538 | self._store_processor_record(processor_id, new_record, request_id) 539 | return self._processor_create_success(processor_id) 540 | except Exception, e: 541 | log.exception(u"Unexpected exception updating dns processor") 542 | raise FatalError(u"Unhandled exception updating dns processor: %s" % e) 543 | -------------------------------------------------------------------------------- /examples/dns-mapping/impl/init/cr-dns-processor: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 3 | # cr-dns-processor DNS processor custom resource daemon 4 | # 5 | # chkconfig: - 80 20 6 | # 7 | # description: Creates a DNS entry when an AutoScaling event is received 8 | # processname: cr-dns-processor 9 | # config: /etc/cfn/cr-dns-processor.conf 10 | # pidfile: /var/run/cr-dns-processor.pid 11 | # 12 | # source function library 13 | . /etc/rc.d/init.d/functions 14 | 15 | RETVAL=0 16 | 17 | start() { 18 | echo -n $"Starting cr-dns-processor: " 19 | cd /home/ec2-user; daemon /home/ec2-user/cr-dns-processor 20 | RETVAL=$? 21 | echo 22 | [ $RETVAL -eq 0 ] && touch /var/lock/subsys/cr-dns-processor 23 | } 24 | 25 | stop() { 26 | echo -n $"Stopping cr-dns-processor: " 27 | killproc -p /var/run/cr-dns-processor.pid cr-dns-processor 28 | echo 29 | [ $RETVAL -eq 0 ] && rm -f /var/lock/subsys/cr-dns-processor 30 | } 31 | 32 | restart() { 33 | stop 34 | start 35 | } 36 | 37 | case "$1" in 38 | start) 39 | start 40 | ;; 41 | stop) 42 | stop 43 | ;; 44 | restart|force-reload|reload) 45 | restart 46 | ;; 47 | condrestart|try-restart) 48 | [ -f /var/lock/subsys/cr-dns-processor ] && restart 49 | ;; 50 | status) 51 | status -p /var/run/cr-dns-processor.pid cr-dns-processor 52 | RETVAL=$? 53 | ;; 54 | *) 55 | echo $"Usage: $0 {start|stop|status|restart|reload|force-reload|condrestart}" 56 | exit 1 57 | esac 58 | 59 | exit $RETVAL -------------------------------------------------------------------------------- /examples/dns-mapping/impl/runner.py: -------------------------------------------------------------------------------- 1 | #============================================================================== 2 | # Copyright 2013 Amazon.com, Inc. or its affiliates. All Rights Reserved. 3 | # 4 | # Licensed under the Apache License, Version 2.0 (the "License"); 5 | # you may not use this file except in compliance with the License. 6 | # You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | #============================================================================== 16 | from threading import Thread 17 | from Queue import Queue 18 | import logging 19 | import botocore.session 20 | import dnsprocessor 21 | from dnsprocessor import DNSProcessor 22 | 23 | try: 24 | import simplejson as json 25 | except ImportError: 26 | import json 27 | 28 | log = logging.getLogger("cr.dnsprocessor") 29 | dnsprocessor.log = log 30 | 31 | 32 | class AutoScalingNotificationRunner(object): 33 | def __init__(self, queue_url, region, table, num_threads=2): 34 | # The SQS queue to poll for events. 35 | self._queue_url = queue_url 36 | 37 | # Construct an unbounded Queue to hold our pending tasks 38 | self._task_queue = Queue() 39 | 40 | # Construct a task to poll the new queue 41 | self._task_queue.put(QueuePollTask(queue_url, region, table)) 42 | 43 | # Determine the maximum number of threads to use 44 | self._num_threads = max(1, num_threads) 45 | 46 | def process_messages(self): 47 | for i in range(self._num_threads): 48 | worker = Thread(target=self.task_worker) 49 | worker.daemon = True 50 | worker.start() 51 | 52 | def task_worker(self): 53 | while True: 54 | task = self._task_queue.get() 55 | try: 56 | new_tasks = task.execute_task() 57 | if new_tasks: 58 | for t in new_tasks: 59 | self._task_queue.put(t) 60 | except: 61 | log.exception(u"Failed executing task") 62 | finally: 63 | self._task_queue.task_done() 64 | 65 | # Reschedule the polling tasks 66 | if isinstance(task, QueuePollTask): 67 | self._task_queue.put(task) 68 | 69 | 70 | class Message(object): 71 | def __init__(self, queue_url, region, message): 72 | self._queue_url = queue_url 73 | self._message = message 74 | self._region = region 75 | 76 | def parse_message(self): 77 | return json.loads(json.loads(self._message["Body"])["Message"]) 78 | 79 | @property 80 | def region(self): 81 | return self._region 82 | 83 | def delete(self): 84 | sqs = botocore.session.get_session().get_service("sqs") 85 | delete = sqs.get_operation("DeleteMessage") 86 | http_response, response_data = delete.call(sqs.get_endpoint(self._region), 87 | queue_url=self._queue_url, 88 | receipt_handle=self._message.get("ReceiptHandle")) 89 | 90 | # Swallow up any errors/issues, logging them out 91 | if http_response.status_code != 200: 92 | log.error(u"Failed to delete message from queue %s with status_code %s: %s" % 93 | (self._queue_url, http_response.status_code, response_data)) 94 | 95 | def change_message_visibility(self, timeout): 96 | sqs = botocore.session.get_session().get_service("sqs") 97 | delete = sqs.get_operation("ChangeMessageVisibility") 98 | http_response, response_data = delete.call(sqs.get_endpoint(self._region), 99 | queue_url=self._queue_url, 100 | receipt_handle=self._message.get("ReceiptHandle"), 101 | visibility_timeout=timeout) 102 | 103 | # Swallow up any errors/issues, logging them out 104 | if http_response.status_code != 200: 105 | log.error(u"Failed to change visibility of message from queue %s with status_code %s: %s" % 106 | (self._queue_url, http_response.status_code, response_data)) 107 | 108 | 109 | class ScalingNotification(object): 110 | def _validate_property(self, property_name): 111 | if not property_name in self._notification: 112 | raise ValueError(u"ScalingNotification requires %s" % property_name) 113 | 114 | def __init__(self, message): 115 | self._message = message 116 | self._notification = self._message.parse_message() 117 | 118 | # Ensure the notification has some required fields. 119 | self._validate_property("AutoScalingGroupName") 120 | self._validate_property("EC2InstanceId") 121 | self._validate_property("Event") 122 | 123 | valid_events = ['autoscaling:EC2_INSTANCE_LAUNCH', 'autoscaling:EC2_INSTANCE_TERMINATE'] 124 | event = self._notification["Event"] 125 | if event not in valid_events: 126 | raise ValueError(u"ScalingNotification requires Event to be %s", valid_events) 127 | 128 | @property 129 | def event(self): 130 | return self._notification["Event"] 131 | 132 | @property 133 | def instance_id(self): 134 | return self._notification["EC2InstanceId"] 135 | 136 | @property 137 | def auto_scaling_group(self): 138 | return self._notification["AutoScalingGroupName"] 139 | 140 | @property 141 | def region(self): 142 | return self._message.region 143 | 144 | def increase_timeout(self, timeout): 145 | """Attempts to increase the message visibility timeout.""" 146 | self._message.change_message_visibility(timeout) 147 | 148 | def delete(self): 149 | self._message.delete() 150 | 151 | def __repr__(self): 152 | return str(self._notification) 153 | 154 | 155 | class BaseTask(object): 156 | def execute_task(self): 157 | pass 158 | 159 | 160 | class QueuePollTask(BaseTask): 161 | def __init__(self, queue_url, region, table): 162 | self._queue_url = queue_url 163 | self._region = region 164 | self._table = table 165 | 166 | def retrieve_notifications(self, max_notifications=1): 167 | """Attempts to retrieve notifications from the provided SQS queue""" 168 | session = botocore.session.get_session() 169 | sqs = session.get_service("sqs") 170 | receive = sqs.get_operation("ReceiveMessage") 171 | http_response, response_data = receive.call(sqs.get_endpoint(self._region), 172 | queue_url=self._queue_url, 173 | wait_time_seconds=20, 174 | max_number_of_messages=max_notifications) 175 | 176 | # Swallow up any errors/issues, logging them out 177 | if http_response.status_code != 200 or not "Messages" in response_data: 178 | log.error(u"Failed to retrieve messages from queue %s with status_code %s: %s" % 179 | (self._queue_url, http_response.status_code, response_data)) 180 | return [] 181 | 182 | notifications = [] 183 | for msg in response_data.get("Messages", []): 184 | # Construct a message that we can parse into events. 185 | message = Message(self._queue_url, self._region, msg) 186 | try: 187 | notifications.append(ScalingNotification(message)) 188 | except Exception: 189 | log.exception(u"Invalid message received; will delete from queue: %s", msg) 190 | message.delete() 191 | 192 | return notifications 193 | 194 | def execute_task(self): 195 | log.debug(u"Checking queue %s", self._queue_url) 196 | notifications = self.retrieve_notifications() 197 | 198 | tasks = [] 199 | for notification in notifications: 200 | if notification.event == 'autoscaling:EC2_INSTANCE_LAUNCH': 201 | # Create a mapping 202 | tasks.append(CreateMappingTask(notification, self._table)) 203 | else: 204 | # Delete a mapping 205 | tasks.append(DeleteMappingTask(notification, self._table)) 206 | 207 | return tasks 208 | 209 | 210 | class MappingTask(BaseTask): 211 | def __init__(self, notification, table): 212 | self._notification = notification 213 | self._table = table 214 | 215 | def get_processor_id(self): 216 | session = botocore.session.get_session() 217 | autoscaling = session.get_service("autoscaling") 218 | describe = autoscaling.get_operation("DescribeTags") 219 | response, data = describe.call(autoscaling.get_endpoint(self._notification.region), 220 | filters=[{ 221 | "Name": "auto-scaling-group", 222 | "Values": [self._notification.auto_scaling_group] 223 | }]) 224 | 225 | # Swallow up any error responses and return nothing. 226 | if response.status_code != 200 or 'Tags' not in data: 227 | log.error(u"Failed to retrieve tags for ASG %s with status_code %s: %s" % 228 | (self._notification.auto_scaling_group, response.status_code, data)) 229 | return None 230 | 231 | for tag in data.get('Tags', []): 232 | if tag.get('Key', '') == 'ProcessorId': 233 | return tag.get('Value') 234 | 235 | return None 236 | 237 | 238 | class CreateMappingTask(MappingTask): 239 | def __init__(self, notification, table): 240 | super(CreateMappingTask, self).__init__(notification, table) 241 | 242 | def _get_instance_ip(self, instance_id): 243 | session = botocore.session.get_session() 244 | ec2 = session.get_service("ec2") 245 | describe = ec2.get_operation("DescribeInstances") 246 | response, data = describe.call(ec2.get_endpoint(self._notification.region), 247 | instance_ids=[instance_id]) 248 | 249 | # Swallow up any error responses and return nothing. 250 | if response.status_code != 200 or 'Reservations' not in data: 251 | log.error(u"Failed to describe instance %s with status_code %s: %s" % 252 | (instance_id, response.status_code, data)) 253 | return None 254 | 255 | for reservation in data.get('Reservations', []): 256 | for instance in reservation.get('Instances', []): 257 | if instance.get('InstanceId', '') == instance_id: 258 | return instance.get('PublicIpAddress') 259 | 260 | return None 261 | 262 | def execute_task(self): 263 | log.debug(u"CreateMapping for notification %s" % self._notification) 264 | # Determine the public ip address of our instance. 265 | ip_address = self._get_instance_ip(self._notification.instance_id) 266 | log.error("IP: %s", ip_address) 267 | if not ip_address: 268 | return [] 269 | 270 | # Determine the processor_id that is handling our naming 271 | processor_id = self.get_processor_id() 272 | # If we couldn't find processor_id, keep message and we'll retry message later. 273 | log.error('Processor Id: %s', processor_id) 274 | if not processor_id: 275 | return [] 276 | 277 | # Create the new route. 278 | processor = DNSProcessor(None, self._table, self._notification.region) 279 | try: 280 | delete_notification = processor.add_instance_mapping(processor_id, 281 | self._notification.instance_id, 282 | ip_address) 283 | except ValueError: 284 | log.exception("Invalid mapping to add") 285 | delete_notification = True 286 | 287 | # Delete our notification 288 | if delete_notification: 289 | self._notification.delete() 290 | 291 | 292 | class DeleteMappingTask(MappingTask): 293 | def __init__(self, notification, table): 294 | super(DeleteMappingTask, self).__init__(notification, table) 295 | 296 | def execute_task(self): 297 | log.debug(u"DeleteMapping for notification %s" % self._notification) 298 | processor_id = self.get_processor_id() 299 | # If we couldn't find processor_id, keep message and we'll retry message later. 300 | if not processor_id: 301 | return [] 302 | 303 | # Delete the route. 304 | processor = DNSProcessor(None, self._table, self._notification.region) 305 | try: 306 | delete_notification = processor.remove_instance_mapping(processor_id, self._notification.instance_id) 307 | except ValueError: 308 | log.exception("Invalid mapping to remove") 309 | delete_notification = True 310 | 311 | # Delete our notification 312 | if delete_notification: 313 | self._notification.delete() 314 | -------------------------------------------------------------------------------- /examples/eip-lookup/README.md: -------------------------------------------------------------------------------- 1 | # EIP Lookup Custom Resource 2 | This custom resource returns a pre-provisioned Elastic IP address that is stored in a DynamoDB table. It addresses the scenario where an EC2 instance needs a known EIP, usually because the address has been white-listed with a 3rd party. This resource supports associating an EIP with a pool. For example, you might have a default pool of EIPs, and another pool used for interacting with a specific 3rd party API. 3 | 4 | EIPs and their pool association are stored in DynamoDB. An example DynamoDB table is provisioned by`impl/custom-resource-runner.template`. The table schema designates the EIP pool name as the Hash Key, and the EIP address as the Range Key, allowing all addresses in a given pool to be efficiently retrieved. The custom resource uses consistent reads and conditional updates to ensure consistency, and supports updating the pool name of a resource. 5 | 6 | Although the above template provisions the required DynamoDB table to store EIPs and their pools, you must populate the table with values for existing EIP addresses. Use _default_ for the pool name if you do not require pooling. -------------------------------------------------------------------------------- /examples/eip-lookup/example.template: -------------------------------------------------------------------------------- 1 | { 2 | "AWSTemplateFormatVersion" : "2010-09-09", 3 | 4 | "Description" : "An example of the EIP Lookup custom resource", 5 | 6 | "Parameters" : { 7 | "InstanceType" : { 8 | "Description" : "Example instance types", 9 | "Type" : "String", 10 | "Default" : "t1.micro", 11 | "AllowedValues" : [ "t1.micro","m1.small","m1.medium","m1.large","m1.xlarge","m2.xlarge","m2.2xlarge","m2.4xlarge","m3.xlarge","m3.2xlarge","c1.medium","c1.xlarge","cc1.4xlarge","cc2.8xlarge","cg1.4xlarge"], 12 | "ConstraintDescription" : "must be a valid EC2 instance type." 13 | }, 14 | "EipLookupServiceToken" : { 15 | "Description" : "ServiceToken of EIP Lookup Custom Resource", 16 | "Type" : "String", 17 | "AllowedPattern" : "arn:aws:sns:.*", 18 | "ConstraintDescription" : "must be an SNS topic ARN" 19 | } 20 | }, 21 | 22 | "Mappings" : { 23 | "RegionMap" : { 24 | "us-east-1" : { "AMI" : "ami-7f418316" }, 25 | "us-west-1" : { "AMI" : "ami-951945d0" }, 26 | "us-west-2" : { "AMI" : "ami-16fd7026" }, 27 | "eu-west-1" : { "AMI" : "ami-24506250" }, 28 | "sa-east-1" : { "AMI" : "ami-3e3be423" }, 29 | "ap-southeast-1" : { "AMI" : "ami-74dda626" }, 30 | "ap-southeast-2" : { "AMI" : "ami-b3990e89" }, 31 | "ap-northeast-1" : { "AMI" : "ami-dcfa4edd" } 32 | } 33 | }, 34 | 35 | "Resources" : { 36 | "Ec2Instance" : { 37 | "Type" : "AWS::EC2::Instance", 38 | "Properties" : { 39 | "UserData" : { "Fn::Base64" : { "Fn::Join" : [ "", [ "IPAddress=", {"Ref" : "IPAddress"}]]}}, 40 | "ImageId" : { "Fn::FindInMap" : [ "RegionMap", { "Ref" : "AWS::Region" }, "AMI" ]} 41 | } 42 | }, 43 | 44 | "IPAddress" : { 45 | "Type" : "Custom::EipLookup", 46 | "Version" : "1.0", 47 | "Properties" : { 48 | "ServiceToken" : { "Ref" : "EipLookupServiceToken" }, 49 | "pool" : "default" 50 | } 51 | }, 52 | 53 | "IPAssoc" : { 54 | "Type" : "AWS::EC2::EIPAssociation", 55 | "Properties" : { 56 | "InstanceId" : { "Ref" : "Ec2Instance" }, 57 | "EIP" : { "Ref" : "IPAddress" } 58 | } 59 | } 60 | } 61 | } 62 | -------------------------------------------------------------------------------- /examples/eip-lookup/impl/custom-resource-runner.template: -------------------------------------------------------------------------------- 1 | { 2 | "AWSTemplateFormatVersion": "2010-09-09", 3 | "Description" : "Example stack for EIP Lookup Custom Resource Backend.", 4 | 5 | "Parameters" : { 6 | "KeyName" : { 7 | "Description" : "Name of an existing EC2 KeyPair to enable SSH access to the instances", 8 | "Type" : "String" 9 | }, 10 | "InstanceType" : { 11 | "Description" : "Custom resource runner instance type", 12 | "Type" : "String", 13 | "Default" : "t1.micro", 14 | "AllowedValues" : [ "t1.micro","m1.small","m1.medium","m1.large","m1.xlarge","m2.xlarge","m2.2xlarge","m2.4xlarge","m3.xlarge","m3.2xlarge","c1.medium","c1.xlarge","cc1.4xlarge","cc2.8xlarge","cg1.4xlarge"], 15 | "ConstraintDescription" : "must be a valid EC2 instance type." 16 | }, 17 | "MinSize" : { 18 | "Description" : "Minimum number of custom resource runners", 19 | "Type" : "Number", 20 | "MinValue" : "1", 21 | "Default" : "1", 22 | "ConstraintDescription" : "Must have at least one runner" 23 | }, 24 | "MaxSize" : { 25 | "Description" : "Maximum number of custom resource runners", 26 | "Type" : "Number", 27 | "MinValue" : "1", 28 | "Default" : "1", 29 | "ConstraintDescription" : "Must have at least one runner" 30 | }, 31 | "SSHLocation" : { 32 | "Description" : "The IP address range that can be used to SSH to the custom resource runners", 33 | "Type" : "String", 34 | "MinLength" : "9", 35 | "MaxLength" : "18", 36 | "Default" : "0.0.0.0/0", 37 | "AllowedPattern" : "(\\d{1,3})\\.(\\d{1,3})\\.(\\d{1,3})\\.(\\d{1,3})/(\\d{1,2})", 38 | "ConstraintDescription" : "must be a valid IP CIDR range of the form x.x.x.x/x." 39 | }, 40 | "LookupEipScriptUrl" : { 41 | "Description" : "The URL of the lookup-eip.py script", 42 | "Type" : "String", 43 | "Default" : "https://raw.github.com/awslabs/aws-cfn-custom-resource-examples/master/examples/eip-lookup/impl/lookup-eip.py" 44 | } 45 | }, 46 | 47 | "Mappings" : { 48 | "AwsRegionToAMI" : { 49 | "us-east-1" : { "id" : "ami-35792c5c" }, 50 | "us-west-2" : { "id" : "ami-d03ea1e0" }, 51 | "us-west-1" : { "id" : "ami-687b4f2d" }, 52 | "eu-west-1" : { "id" : "ami-149f7863" }, 53 | "ap-southeast-1" : { "id" : "ami-14f2b946" }, 54 | "ap-northeast-1" : { "id" : "ami-3561fe34" }, 55 | "ap-southeast-2" : { "id" : "ami-a148d59b" }, 56 | "sa-east-1" : { "id" : "ami-9f6ec982" } 57 | } 58 | }, 59 | 60 | "Resources" : { 61 | "CustomResourcePipeline" : { 62 | "Type" : "AWS::CloudFormation::Stack", 63 | "Properties" : { 64 | "TemplateURL" : "https://s3.amazonaws.com/cloudformation-examples/cr-backend-substack-template.template" 65 | } 66 | }, 67 | 68 | "EipTable" : { 69 | "Type" : "AWS::DynamoDB::Table", 70 | "Properties" : { 71 | "KeySchema" : { 72 | "HashKeyElement": { 73 | "AttributeName" : "pool", 74 | "AttributeType" : "S" 75 | }, 76 | "RangeKeyElement" : { 77 | "AttributeName" : "address", 78 | "AttributeType" : "S" 79 | } 80 | }, 81 | "ProvisionedThroughput" : { 82 | "ReadCapacityUnits" : "1", 83 | "WriteCapacityUnits" : "3" 84 | } 85 | } 86 | }, 87 | 88 | "RunnerRole" : { 89 | "Type" : "AWS::IAM::Role", 90 | "Properties" : { 91 | "AssumeRolePolicyDocument" : { 92 | "Version": "2008-10-17", 93 | "Statement": [{ 94 | "Effect": "Allow", 95 | "Principal": { 96 | "Service": [ "ec2.amazonaws.com" ] 97 | }, 98 | "Action": [ "sts:AssumeRole" ] 99 | }] 100 | }, 101 | "Path" : "/", 102 | "Policies" : [ 103 | { 104 | "PolicyName" : "CustomResourceRunner", 105 | "PolicyDocument" : { 106 | "Statement" : [ 107 | { 108 | "Effect" : "Allow", 109 | "Action" : ["sqs:ChangeMessageVisibility", "sqs:DeleteMessage", "sqs:ReceiveMessage"], 110 | "Resource" : { "Fn::GetAtt" : ["CustomResourcePipeline", "Outputs.CustomResourceQueueARN"] } 111 | }, 112 | { 113 | "Effect" : "Allow", 114 | "Action" : ["dynamodb:PutItem"], 115 | "Resource" : { "Fn::Join" : ["", ["arn:aws:dynamodb:", {"Ref" : "AWS::Region"}, ":", {"Ref" : "AWS::AccountId"}, ":table/", { "Ref" : "EipTable" }]] } 116 | }, 117 | { 118 | "Effect" : "Allow", 119 | "Action" : ["dynamodb:Query"], 120 | "Resource" : { "Fn::Join" : ["", ["arn:aws:dynamodb:", {"Ref" : "AWS::Region"}, ":", {"Ref" : "AWS::AccountId"}, ":table/", { "Ref" : "EipTable" }]] } 121 | }, 122 | { 123 | "Effect" : "Allow", 124 | "Action" : ["dynamodb:GetItem"], 125 | "Resource" : { "Fn::Join" : ["", ["arn:aws:dynamodb:", {"Ref" : "AWS::Region"}, ":", {"Ref" : "AWS::AccountId"}, ":table/", { "Ref" : "EipTable" }]] } 126 | } 127 | ] 128 | } 129 | } 130 | ] 131 | } 132 | }, 133 | 134 | "RunnerInstanceProfile" : { 135 | "Type" : "AWS::IAM::InstanceProfile", 136 | "Properties" : { 137 | "Path" : "/", 138 | "Roles" : [ { "Ref" : "RunnerRole" } ] 139 | } 140 | }, 141 | 142 | "RunnerLaunchConfig" : { 143 | "Type" : "AWS::AutoScaling::LaunchConfiguration", 144 | "Properties" : { 145 | "IamInstanceProfile" : { "Ref" : "RunnerInstanceProfile" }, 146 | "ImageId" : { "Fn::FindInMap" : ["AwsRegionToAMI", { "Ref" : "AWS::Region" }, "id"] }, 147 | "InstanceType" : { "Ref" : "InstanceType" }, 148 | "KeyName" : { "Ref" : "KeyName" }, 149 | "SecurityGroups" : [ { "Ref" : "RunnerSecurityGroup" } ], 150 | "UserData" : { "Fn::Base64" : { "Fn::Join" : ["", [ 151 | "#!/bin/bash -x\n", 152 | "exec &> /home/ec2-user/userdata.log\n", 153 | "/opt/aws/bin/cfn-init --region ", { "Ref" : "AWS::Region" }, " -s ", { "Ref" : "AWS::StackId" }, " -r RunnerLaunchConfig -v\n", 154 | "/opt/aws/bin/cfn-signal -e $? ", { "Fn::Base64" : { "Ref" : "RunnerWaitConditionHandle" }}, "\n" 155 | ]] } } 156 | }, 157 | "Metadata" : { 158 | "AWS::CloudFormation::Init" : { 159 | "config" : { 160 | "packages" : { 161 | "rpm" : { 162 | "aws-cfn-resource-bridge" : "https://s3.amazonaws.com/cloudformation-examples/aws-cfn-resource-bridge-0.1-4.noarch.rpm" 163 | } 164 | }, 165 | "files" : { 166 | "/etc/cfn/bridge.d/eip-lookup.conf" : { 167 | "content" : { "Fn::Join" : ["", [ 168 | "[eip-lookup]\n", 169 | "resource_type=Custom::EipLookup\n", 170 | "queue_url=", { "Fn::GetAtt" : ["CustomResourcePipeline", "Outputs.CustomResourceQueueURL"] }, "\n", 171 | "timeout=60\n", 172 | "default_action=/home/ec2-user/lookup-eip.py -r ", { "Ref" : "AWS::Region" }, " -t ", { "Ref" : "EipTable" } 173 | ]]} 174 | }, 175 | "/home/ec2-user/lookup-eip.py" : { 176 | "source" : { "Ref" : "LookupEipScriptUrl" }, 177 | "mode" : "000755", 178 | "owner" : "ec2-user" 179 | } 180 | }, 181 | "services" : { 182 | "sysvinit" : { 183 | "cfn-resource-bridge" : { 184 | "enabled" : "true", 185 | "ensureRunning" : "true", 186 | "files" : ["/etc/cfn/bridge.d/eip-lookup.conf", 187 | "/home/ec2-user/lookup-eip.py"] 188 | } 189 | } 190 | } 191 | } 192 | } 193 | } 194 | }, 195 | 196 | "RunnerAutoScalingGroup" : { 197 | "Type" : "AWS::AutoScaling::AutoScalingGroup", 198 | "Properties" : { 199 | "AvailabilityZones" : { "Fn::GetAZs" : ""}, 200 | "LaunchConfigurationName" : { "Ref" : "RunnerLaunchConfig" }, 201 | "MinSize" : { "Ref" : "MinSize" }, 202 | "MaxSize" : { "Ref" : "MaxSize" } 203 | } 204 | }, 205 | 206 | "RunnerSecurityGroup" : { 207 | "Type" : "AWS::EC2::SecurityGroup", 208 | "Properties" : { 209 | "GroupDescription" : "SSH to the runner instances", 210 | "SecurityGroupIngress" : [ 211 | { 212 | "CidrIp" : { "Ref" : "SSHLocation" }, 213 | "FromPort" : "22", 214 | "ToPort" : "22", 215 | "IpProtocol" : "tcp" 216 | } 217 | ] 218 | } 219 | }, 220 | 221 | "RunnerWaitConditionHandle" : { 222 | "Type" : "AWS::CloudFormation::WaitConditionHandle" 223 | }, 224 | 225 | "RunnerWaitCondition" : { 226 | "Type" : "AWS::CloudFormation::WaitCondition", 227 | "DependsOn" : "RunnerAutoScalingGroup", 228 | "Properties" : { 229 | "Count" : "1", 230 | "Handle" : { "Ref" : "RunnerWaitConditionHandle" }, 231 | "Timeout" : "600" 232 | } 233 | } 234 | }, 235 | "Outputs" : { 236 | "ServiceToken" : { 237 | "Value" : { "Fn::GetAtt" : ["CustomResourcePipeline", "Outputs.CustomResourceTopicARN"] }, 238 | "Description" : "Service token to use in CustomResource definitions" 239 | } 240 | } 241 | } -------------------------------------------------------------------------------- /examples/eip-lookup/impl/lookup-eip.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | #============================================================================== 3 | # Copyright 2013 Amazon.com, Inc. or its affiliates. All Rights Reserved. 4 | # 5 | # Licensed under the Apache License, Version 2.0 (the "License"); 6 | # you may not use this file except in compliance with the License. 7 | # You may obtain a copy of the License at 8 | # 9 | # http://www.apache.org/licenses/LICENSE-2.0 10 | # 11 | # Unless required by applicable law or agreed to in writing, software 12 | # distributed under the License is distributed on an "AS IS" BASIS, 13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | # See the License for the specific language governing permissions and 15 | # limitations under the License. 16 | #============================================================================== 17 | import os 18 | import sys 19 | import logging 20 | from argparse import ArgumentParser 21 | 22 | import boto 23 | from boto.dynamodb2.table import Table 24 | 25 | 26 | handler = logging.StreamHandler(sys.stderr) 27 | handler.setFormatter(logging.Formatter("%(asctime)s [%(levelname)s] %(message)s")) 28 | logging.getLogger().addHandler(handler) 29 | 30 | log = logging.getLogger('lookup-eip') 31 | log.setLevel(logging.INFO) 32 | 33 | parser = ArgumentParser(prog='lookup-eip') 34 | parser.add_argument("-r", "--region", help="The region the audit trail will be written to in DynamoDB", dest="region") 35 | parser.add_argument("-t", "--eip-table", help="The DynamoDB table that lists available EIPs", dest="table_name") 36 | 37 | options = parser.parse_args() 38 | 39 | 40 | class FatalError(SystemExit): 41 | def __init__(self, reason): 42 | super(FatalError, self).__init__(-1) 43 | log.error('Failing resource: %s', reason) 44 | print u'{ "Reason": "%s" }' % reason 45 | 46 | # Get Region 47 | if not options.region or not options.table_name: 48 | raise FatalError(u"Service not configured to handle requests.") 49 | 50 | # Get the Request Type, Stack ID, and Logical Resource ID 51 | request_type = os.getenv('Event_RequestType') 52 | stack_id = os.getenv('Event_StackId') 53 | logical_id = os.getenv('Event_LogicalResourceId') 54 | 55 | # Get pool indicating where to get EIP from 56 | pool = os.getenv('Event_ResourceProperties_pool', 'default') 57 | 58 | 59 | def get_address(pool): 60 | """Retrieve an EIP for the given pool from DynamoDB""" 61 | #Connect to ddb 62 | conn = boto.dynamodb2.connect_to_region(options.region) 63 | ddb = Table(options.table_name, connection=conn) 64 | 65 | # Get available EIPs from pool 66 | eips = ddb.query( 67 | pool__eq=pool, 68 | consistent=True 69 | ) 70 | 71 | if not eips: 72 | raise FatalError(u"No EIPs found in pool %s" % pool) 73 | 74 | address = None 75 | for eip in eips: 76 | if not eip.get('stack_id', False): 77 | eip['stack_id'] = stack_id 78 | eip['logical_id'] = logical_id 79 | if eip.save(): 80 | address = eip['address'] 81 | break 82 | 83 | if not address: 84 | raise FatalError(u"All EIPs in pool %s are in use" % pool) 85 | 86 | return address 87 | 88 | 89 | def delete_address(pool, address): 90 | """Mark an EIP as no longer in use""" 91 | #Connect to ddb 92 | conn = boto.dynamodb2.connect_to_region(options.region) 93 | ddb = Table(options.table_name, connection=conn) 94 | 95 | eip = ddb.get_item(pool=pool, address=address) 96 | del eip['stack_id'] 97 | del eip['logical_id'] 98 | eip.save() 99 | 100 | if request_type == 'Create': 101 | physical_id = get_address(pool) 102 | 103 | elif request_type == 'Update': 104 | old_pool = os.getenv('Event_OldResourceProperties_pool', 'default') 105 | old_address = os.getenv('Event_PhysicalResourceId') 106 | 107 | # If the updated resource wants an EIP from a different pool 108 | if not pool == old_pool: 109 | # And get a new one 110 | physical_id = get_address(pool) 111 | else: 112 | physical_id = old_address 113 | 114 | elif request_type == 'Delete': 115 | address = os.getenv('Event_PhysicalResourceId') 116 | delete_address(pool, address) 117 | 118 | # Write out our successful response! 119 | if request_type != 'Delete': 120 | print u'{ "PhysicalResourceId" : "%s" }' % physical_id 121 | else: 122 | print u"{}" 123 | -------------------------------------------------------------------------------- /examples/mount/example.template: -------------------------------------------------------------------------------- 1 | { 2 | "AWSTemplateFormatVersion" : "2010-09-09", 3 | 4 | "Description" : "An example of the volume mount/dismount custom resource", 5 | 6 | "Parameters" : { 7 | "KeyName" : { 8 | "Description" : "Name of an existing EC2 KeyPair to enable SSH access to the Instance", 9 | "Type" : "String" 10 | }, 11 | "InstanceType" : { 12 | "Description" : "Instance type", 13 | "Type" : "String", 14 | "Default" : "t1.micro", 15 | "AllowedValues" : [ "t1.micro","m1.small","m1.medium","m1.large","m1.xlarge","m2.xlarge","m2.2xlarge","m2.4xlarge","m3.xlarge","m3.2xlarge","c1.medium","c1.xlarge","cc1.4xlarge","cc2.8xlarge","cg1.4xlarge"], 16 | "ConstraintDescription" : "must be a valid EC2 instance type." 17 | }, 18 | "SSHLocation" : { 19 | "Description" : "The IP address range that can be used to SSH to the instance", 20 | "Type": "String", 21 | "MinLength": "9", 22 | "MaxLength": "18", 23 | "Default": "0.0.0.0/0", 24 | "AllowedPattern": "(\\d{1,3})\\.(\\d{1,3})\\.(\\d{1,3})\\.(\\d{1,3})/(\\d{1,2})", 25 | "ConstraintDescription": "must be a valid IP CIDR range of the form x.x.x.x/x." 26 | } 27 | }, 28 | 29 | "Mappings" : { 30 | "AwsRegionToAMI" : { 31 | "us-east-1" : { "id" : "ami-35792c5c" }, 32 | "us-west-2" : { "id" : "ami-d03ea1e0" }, 33 | "us-west-1" : { "id" : "ami-687b4f2d" }, 34 | "eu-west-1" : { "id" : "ami-149f7863" }, 35 | "ap-southeast-1" : { "id" : "ami-14f2b946" }, 36 | "ap-northeast-1" : { "id" : "ami-3561fe34" }, 37 | "ap-southeast-2" : { "id" : "ami-a148d59b" }, 38 | "sa-east-1" : { "id" : "ami-9f6ec982" } 39 | } 40 | }, 41 | 42 | "Resources" : { 43 | "CustomResourcePipeline" : { 44 | "Type" : "AWS::CloudFormation::Stack", 45 | "Properties" : { 46 | "TemplateURL" : "https://s3.amazonaws.com/cloudformation-examples/cr-backend-substack-template.template" 47 | } 48 | }, 49 | 50 | "ExampleRole" : { 51 | "Type" : "AWS::IAM::Role", 52 | "Properties" : { 53 | "AssumeRolePolicyDocument" : { 54 | "Version": "2008-10-17", 55 | "Statement": [{ 56 | "Effect": "Allow", 57 | "Principal": { 58 | "Service": [ "ec2.amazonaws.com" ] 59 | }, 60 | "Action": [ "sts:AssumeRole" ] 61 | }] 62 | }, 63 | "Path" : "/", 64 | "Policies" : [ 65 | { 66 | "PolicyName" : "DismountExample", 67 | "PolicyDocument" : { 68 | "Statement" : [ 69 | { 70 | "Effect" : "Allow", 71 | "Action" : ["sqs:ChangeMessageVisibility", "sqs:DeleteMessage", "sqs:ReceiveMessage"], 72 | "Resource" : { "Fn::GetAtt" : ["CustomResourcePipeline", "Outputs.CustomResourceQueueARN"] } 73 | } 74 | ] 75 | } 76 | } 77 | ] 78 | } 79 | }, 80 | 81 | "ExampleInstanceProfile" : { 82 | "Type" : "AWS::IAM::InstanceProfile", 83 | "Properties" : { 84 | "Path" : "/", 85 | "Roles" : [ { "Ref" : "ExampleRole" } ] 86 | } 87 | }, 88 | 89 | "ExampleInstance" : { 90 | "Type" : "AWS::EC2::Instance", 91 | "Properties" : { 92 | "IamInstanceProfile" : { "Ref" : "ExampleInstanceProfile" }, 93 | "ImageId" : { "Fn::FindInMap" : ["AwsRegionToAMI", { "Ref" : "AWS::Region" }, "id"] }, 94 | "InstanceType" : { "Ref" : "InstanceType" }, 95 | "KeyName" : { "Ref" : "KeyName" }, 96 | "SecurityGroups" : [ { "Ref" : "ExampleSecurityGroup" } ], 97 | "UserData" : { "Fn::Base64" : { "Fn::Join" : ["", [ 98 | "#!/bin/bash -x\n", 99 | "exec &> /home/ec2-user/userdata.log\n", 100 | "/opt/aws/bin/cfn-init --region ", { "Ref" : "AWS::Region" }, " -s ", { "Ref" : "AWS::StackId" }, " -r ExampleInstance -v\n", 101 | "/opt/aws/bin/cfn-signal -e $? ", { "Fn::Base64" : { "Ref" : "ExampleWaitConditionHandle" }}, "\n" 102 | ]] } } 103 | }, 104 | "Metadata" : { 105 | "AWS::CloudFormation::Init" : { 106 | "config" : { 107 | "packages" : { 108 | "rpm" : { 109 | "aws-cfn-resource-bridge" : "https://s3.amazonaws.com/cloudformation-examples/aws-cfn-resource-bridge-0.1-4.noarch.rpm" 110 | } 111 | }, 112 | "files" : { 113 | "/etc/cfn/bridge.d/mount.conf" : { 114 | "content" : { "Fn::Join" : ["", [ 115 | "[mount]\n", 116 | "resource_type=Custom::VolumeMount\n", 117 | "queue_url=", { "Fn::GetAtt" : ["CustomResourcePipeline", "Outputs.CustomResourceQueueURL"] }, "\n", 118 | "timeout=600\n", 119 | "create_action=/home/ec2-user/create.sh\n", 120 | "update_action=/home/ec2-user/update.sh\n", 121 | "delete_action=/home/ec2-user/delete.sh\n" 122 | ]]} 123 | }, 124 | "/home/ec2-user/create.sh" : { 125 | "source" : "https://raw.github.com/awslabs/aws-cfn-custom-resource-examples/master/examples/mount/impl/create.sh", 126 | "mode" : "000755", 127 | "owner" : "ec2-user" 128 | }, 129 | "/home/ec2-user/update.sh" : { 130 | "source" : "https://raw.github.com/awslabs/aws-cfn-custom-resource-examples/master/examples/mount/impl/update.sh", 131 | "mode" : "000755", 132 | "owner" : "ec2-user" 133 | }, 134 | "/home/ec2-user/delete.sh" : { 135 | "source" : "https://raw.github.com/awslabs/aws-cfn-custom-resource-examples/master/examples/mount/impl/delete.sh", 136 | "mode" : "000755", 137 | "owner" : "ec2-user" 138 | } 139 | }, 140 | "services" : { 141 | "sysvinit" : { 142 | "cfn-resource-bridge" : { 143 | "enabled" : "true", 144 | "ensureRunning" : "true", 145 | "files" : ["/etc/cfn/bridge.d/mount.conf", 146 | "/home/ec2-user/create.sh", 147 | "/home/ec2-user/update.sh", 148 | "/home/ec2-user/delete.sh"] 149 | } 150 | } 151 | } 152 | } 153 | } 154 | } 155 | }, 156 | 157 | "ExampleVolume" : { 158 | "Type" : "AWS::EC2::Volume", 159 | "Properties" : { 160 | "AvailabilityZone" : { "Fn::GetAtt" : ["ExampleInstance", "AvailabilityZone"] }, 161 | "Size" : "10" 162 | } 163 | }, 164 | 165 | "ExampleVolumeAttachment" : { 166 | "Type" : "AWS::EC2::VolumeAttachment", 167 | "Properties" : { 168 | "Device" : "/dev/xvdh", 169 | "InstanceId" : { "Ref" : "ExampleInstance" }, 170 | "VolumeId" : { "Ref" : "ExampleVolume" } 171 | } 172 | }, 173 | 174 | "ExampleVolumeMount" : { 175 | "Type" : "Custom::VolumeMount", 176 | "Version" : "1.0", 177 | "DependsOn" : ["ExampleVolumeAttachment", "ExampleWaitCondition"], 178 | "Properties" : { 179 | "ServiceToken" : { "Fn::GetAtt" : ["CustomResourcePipeline", "Outputs.CustomResourceTopicARN"] }, 180 | "Device" : "/dev/xvdh", 181 | "MountPoint" : "/mnt/analysis", 182 | "FsType" : "ext3", 183 | "Format" : "true" 184 | } 185 | }, 186 | 187 | "ExampleSecurityGroup" : { 188 | "Type" : "AWS::EC2::SecurityGroup", 189 | "Properties" : { 190 | "GroupDescription" : "SSH to the instance", 191 | "SecurityGroupIngress" : [ 192 | { 193 | "CidrIp" : { "Ref" : "SSHLocation" }, 194 | "FromPort" : "22", 195 | "ToPort" : "22", 196 | "IpProtocol" : "tcp" 197 | } 198 | ] 199 | } 200 | }, 201 | 202 | "ExampleWaitConditionHandle" : { 203 | "Type" : "AWS::CloudFormation::WaitConditionHandle" 204 | }, 205 | 206 | "ExampleWaitCondition" : { 207 | "Type" : "AWS::CloudFormation::WaitCondition", 208 | "DependsOn" : "ExampleInstance", 209 | "Properties" : { 210 | "Count" : "1", 211 | "Handle" : { "Ref" : "ExampleWaitConditionHandle" }, 212 | "Timeout" : "600" 213 | } 214 | } 215 | } 216 | } 217 | -------------------------------------------------------------------------------- /examples/mount/impl/create.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash -x 2 | #============================================================================== 3 | # Copyright 2013 Amazon.com, Inc. or its affiliates. All Rights Reserved. 4 | # 5 | # Licensed under the Apache License, Version 2.0 (the "License"); 6 | # you may not use this file except in compliance with the License. 7 | # You may obtain a copy of the License at 8 | # 9 | # http://www.apache.org/licenses/LICENSE-2.0 10 | # 11 | # Unless required by applicable law or agreed to in writing, software 12 | # distributed under the License is distributed on an "AS IS" BASIS, 13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | # See the License for the specific language governing permissions and 15 | # limitations under the License. 16 | #============================================================================== 17 | exec 3>&1 # "save" stdout to fd 3 18 | exec &>> /home/ec2-user/create.log 19 | 20 | function error_exit() { 21 | echo "{\"Reason\": \"$1\"}" >&3 3>&- # echo reason to stdout (instead of log) and then close fd 3 22 | exit $2 23 | } 24 | 25 | if [ -z "${Event_ResourceProperties_Device}" ] 26 | then 27 | error_exit "Device is required." 64 28 | fi 29 | 30 | if [ -z "${Event_ResourceProperties_MountPoint}" ] 31 | then 32 | error_exit "MountPoint is required." 64 33 | fi 34 | 35 | if [ ! -e "${Event_ResourceProperties_MountPoint}" ] 36 | then 37 | mkdir -p "${Event_ResourceProperties_MountPoint}" 38 | mkdir_ret=$? 39 | if [ $mkdir_ret -ne 0 ] 40 | then 41 | error_exit "Could not create ${Event_ResourceProperties_MountPoint}" $mkdir_ret 42 | fi 43 | fi 44 | 45 | if [ ! -z "${Event_ResourceProperties_Format}" ] && [ "true" = "${Event_ResourceProperties_Format}" ] 46 | then 47 | if [ -z "${Event_ResourceProperties_FsType}" ] 48 | then 49 | error_exit "Cannot format without fstype." 64 50 | else 51 | mkfs -t "${Event_ResourceProperties_FsType}" "${Event_ResourceProperties_Device}" 52 | mkfs_ret=$? 53 | if [ $mkfs_ret -ne 0 ] 54 | then 55 | error_exit "Formatting failed." $mkfs_ret 56 | fi 57 | fi 58 | fi 59 | 60 | if [ ! -z "${Event_ResourceProperties_FsType}" ] 61 | then 62 | mount -t "${Event_ResourceProperties_FsType}" "${Event_ResourceProperties_Device}" "${Event_ResourceProperties_MountPoint}" 63 | else 64 | mount "${Event_ResourceProperties_Device}" "${Event_ResourceProperties_MountPoint}" 65 | fi 66 | 67 | mount_ret=$? 68 | if [ $mount_ret -ne 0 ] 69 | then 70 | error_exit "Mount failed." $mount_ret 71 | else 72 | echo "{}" >&3 3>&- # echo success to stdout (instead of log) and then close fd 3 73 | exit 0 74 | fi -------------------------------------------------------------------------------- /examples/mount/impl/delete.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash -x 2 | #============================================================================== 3 | # Copyright 2013 Amazon.com, Inc. or its affiliates. All Rights Reserved. 4 | # 5 | # Licensed under the Apache License, Version 2.0 (the "License"); 6 | # you may not use this file except in compliance with the License. 7 | # You may obtain a copy of the License at 8 | # 9 | # http://www.apache.org/licenses/LICENSE-2.0 10 | # 11 | # Unless required by applicable law or agreed to in writing, software 12 | # distributed under the License is distributed on an "AS IS" BASIS, 13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | # See the License for the specific language governing permissions and 15 | # limitations under the License. 16 | #============================================================================== 17 | exec 3>&1 # "save" stdout to fd 3 18 | exec &>> /home/ec2-user/delete.log 19 | 20 | function error_exit() { 21 | echo "{\"Reason\": \"$1\"}" >&3 3>&- # echo reason to stdout (instead of log) and then close fd 3 22 | exit $2 23 | } 24 | 25 | if [ -z "${Event_ResourceProperties_MountPoint}" ] 26 | then 27 | error_exit "MountPoint is required." 64 28 | fi 29 | 30 | umount "${Event_ResourceProperties_MountPoint}" 31 | 32 | umount_ret=$? 33 | if [ $umount_ret -ne 0 ] 34 | then 35 | error_exit "Unmount failed." $umount_ret 36 | else 37 | echo "{}" >&3 3>&- # echo reason to stdout (instead of log) and then close fd 3 38 | exit 0 39 | fi -------------------------------------------------------------------------------- /examples/mount/impl/update.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash -x 2 | #============================================================================== 3 | # Copyright 2013 Amazon.com, Inc. or its affiliates. All Rights Reserved. 4 | # 5 | # Licensed under the Apache License, Version 2.0 (the "License"); 6 | # you may not use this file except in compliance with the License. 7 | # You may obtain a copy of the License at 8 | # 9 | # http://www.apache.org/licenses/LICENSE-2.0 10 | # 11 | # Unless required by applicable law or agreed to in writing, software 12 | # distributed under the License is distributed on an "AS IS" BASIS, 13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | # See the License for the specific language governing permissions and 15 | # limitations under the License. 16 | #============================================================================== 17 | exec 3>&1 # "save" stdout to fd 3 18 | exec &>> /home/ec2-user/update.log 19 | 20 | function error_exit() { 21 | echo "{\"Reason\": \"$1\"}" >&3 3>&- # echo reason to stdout (instead of log) and then close fd 3 22 | exit $2 23 | } 24 | 25 | if [ -z "${Event_ResourceProperties_Device}" ] 26 | then 27 | error_exit "Device is required." 64 28 | fi 29 | 30 | if [ -z "${Event_ResourceProperties_MountPoint}" ] 31 | then 32 | error_exit "MountPoint is required." 64 33 | fi 34 | 35 | if [ ! -e "${Event_ResourceProperties_MountPoint}" ] 36 | then 37 | mkdir -p "${Event_ResourceProperties_MountPoint}" 38 | mkdir_ret=$? 39 | if [ $mkdir_ret -ne 0 ] 40 | then 41 | error_exit "Could not create ${Event_ResourceProperties_MountPoint}" $mkdir_ret 42 | fi 43 | fi 44 | 45 | if [ ! -z "${Event_OldResourceProperties_MountPoint}" ] 46 | then 47 | grep "${Event_OldResourceProperties_MountPoint}" /etc/mtab 48 | is_mounted=$? 49 | if [ $is_mounted -eq 0 ] 50 | then 51 | umount "${Event_OldResourceProperties_MountPoint}" 52 | 53 | umount_ret=$? 54 | if [ $umount_ret -ne 0 ] 55 | then 56 | error_exit "Unmounting from ${Event_OldResourceProperties_MountPoint} failed." $umount_ret 57 | fi 58 | fi 59 | fi 60 | 61 | if [ ! -z "${Event_ResourceProperties_Format}" ] && [ "true" = "${Event_ResourceProperties_Format}" ] 62 | then 63 | if [ "${Event_ResourceProperties_FsType}" != "${Event_OldResourceProperties_FsType}" ] 64 | then 65 | if [ -z "${Event_ResourceProperties_FsType}" ] 66 | then 67 | error_exit "Cannot format without fstype." 64 68 | else 69 | mkfs -t "${Event_ResourceProperties_FsType}" "${Event_ResourceProperties_Device}" 70 | mkfs_ret=$? 71 | if [ $mkfs_ret -ne 0 ] 72 | then 73 | error_exit "Formatting failed." $mkfs_ret 74 | fi 75 | fi 76 | fi 77 | fi 78 | 79 | if [ ! -z "${Event_ResourceProperties_FsType}" ] 80 | then 81 | mount -t "${Event_ResourceProperties_FsType}" "${Event_ResourceProperties_Device}" "${Event_ResourceProperties_MountPoint}" 82 | else 83 | mount "${Event_ResourceProperties_Device}" "${Event_ResourceProperties_MountPoint}" 84 | fi 85 | 86 | mount_ret=$? 87 | if [ $mount_ret -ne 0 ] 88 | then 89 | error_exit "Mount failed." $mount_ret 90 | else 91 | echo "{}" >&3 3>&- # echo success to stdout (instead of log) and then close fd 3 92 | exit 0 93 | fi -------------------------------------------------------------------------------- /examples/schema/example.template: -------------------------------------------------------------------------------- 1 | { 2 | "AWSTemplateFormatVersion" : "2010-09-09", 3 | 4 | "Description" : "An example of the managed schema custom resource", 5 | 6 | "Parameters" : { 7 | "DBUser" : { 8 | "Description" : "The database admin account username", 9 | "Type" : "String", 10 | "Default" : "master", 11 | "MinLength" : "1", 12 | "MaxLength" : "16", 13 | "AllowedPattern" : "[a-zA-Z][a-zA-Z0-9]*", 14 | "ConstraintDescription" : "must begin with a letter and contain only alphanumeric characters." 15 | }, 16 | "DBPassword": { 17 | "NoEcho": "true", 18 | "Description" : "The database admin account password", 19 | "Type": "String", 20 | "MinLength": "1", 21 | "MaxLength": "41" 22 | }, 23 | "DBName" : { 24 | "Description" : "Name of database", 25 | "Type" : "String", 26 | "Default" : "food", 27 | "MinLength" : "1", 28 | "MaxLength" : "64", 29 | "AllowedPattern" : "[a-zA-Z0-9]*", 30 | "ConstraintDescription" : "must be 1-64 alphanumeric characters" 31 | }, 32 | "InstanceClass" : { 33 | "Description" : "The InstanceClass of the RDS Database", 34 | "Type" : "String", 35 | "Default" : "db.m1.small" 36 | }, 37 | "SchemaServiceToken" : { 38 | "Description" : "ServiceToken of Schema Custom Resource", 39 | "Type" : "String", 40 | "AllowedPattern" : "arn:aws:sns:.*", 41 | "ConstraintDescription" : "must be an SNS topic ARN" 42 | }, 43 | "CustomResourceSecurityGroup" : { 44 | "Description" : "Name of Security Group used by Custom Resource", 45 | "Type" : "String" 46 | } 47 | }, 48 | 49 | "Resources" : { 50 | "MyDB" : { 51 | "Type" : "AWS::RDS::DBInstance", 52 | "Properties" : { 53 | "AllocatedStorage" : "5", 54 | "DBInstanceClass" : { "Ref" : "InstanceClass" }, 55 | "Engine" : "MySQL", 56 | "EngineVersion" : "5.6", 57 | "MasterUsername" : { "Ref" : "DBUser" }, 58 | "MasterUserPassword" : { "Ref" : "DBPassword" }, 59 | "DBSecurityGroups" : [{ "Ref" : "MyDBSecurityGroup" }], 60 | "DBName" : { "Ref" : "DBName" } 61 | } 62 | }, 63 | 64 | "MyDBSecurityGroup" : { 65 | "Type" : "AWS::RDS::DBSecurityGroup", 66 | "Properties" : { 67 | "DBSecurityGroupIngress" : [ 68 | { "EC2SecurityGroupName" : { "Ref" : "CustomResourceSecurityGroup" } }, 69 | { "CIDRIP" : "0.0.0.0/0" } 70 | ], 71 | "GroupDescription" : "Custom Resource Access" 72 | } 73 | }, 74 | 75 | "MyDBSchema" : { 76 | "Type" : "Custom::DatabaseSchema", 77 | "Version" : "1.0", 78 | "Properties" : { 79 | "ServiceToken" : { "Ref" : "SchemaServiceToken" }, 80 | "DatabaseURL" : {"Fn::Join" : ["", ["jdbc:mysql://", { "Fn::GetAtt" : ["MyDB", "Endpoint.Address"]}, ":", { "Fn::GetAtt" : ["MyDB", "Endpoint.Port"]}, "/", { "Ref" : "DBName" }]]}, 81 | "DatabaseUsername" : { "Ref" : "DBUser" }, 82 | "DatabasePassword" : { "Ref" : "DBPassword" }, 83 | "databaseChangeLog" : [ 84 | { 85 | "changeSet" : { 86 | "id" : "1", 87 | "author" : "adamthom", 88 | "changes" : [ 89 | { 90 | "createTable" : { 91 | "tableName" : "example", 92 | "columns" : [ 93 | { 94 | "column" : { 95 | "name" : "id", 96 | "type" : "int", 97 | "autoIncrement" : true, 98 | "constraints" : { 99 | "primaryKey" : true, 100 | "nullable" : false 101 | } 102 | } 103 | }, 104 | { 105 | "column" : { 106 | "name" : "fruit", 107 | "type" : "varchar(255)", 108 | "constraints" : { 109 | "nullable" : false 110 | } 111 | } 112 | } 113 | ] 114 | } 115 | } 116 | ] 117 | } 118 | } 119 | ] 120 | } 121 | } 122 | } 123 | } 124 | -------------------------------------------------------------------------------- /examples/schema/impl/custom_resource_runner.template: -------------------------------------------------------------------------------- 1 | { 2 | "AWSTemplateFormatVersion" : "2010-09-09", 3 | 4 | "Description" : "Runs the managed schema custom resource", 5 | 6 | "Parameters" : { 7 | "KeyName" : { 8 | "Description" : "Name of an existing EC2 KeyPair to enable SSH access to the instances", 9 | "Type" : "String" 10 | }, 11 | "InstanceType" : { 12 | "Description" : "Custom resource runner instance type", 13 | "Type" : "String", 14 | "Default" : "t1.micro", 15 | "AllowedValues" : [ "t1.micro","m1.small","m1.medium","m1.large","m1.xlarge","m2.xlarge","m2.2xlarge","m2.4xlarge","m3.xlarge","m3.2xlarge","c1.medium","c1.xlarge","cc1.4xlarge","cc2.8xlarge","cg1.4xlarge"], 16 | "ConstraintDescription" : "must be a valid EC2 instance type." 17 | }, 18 | "MinSize" : { 19 | "Description" : "Minimum number of custom resource runners", 20 | "Type" : "Number", 21 | "MinValue" : "1", 22 | "Default" : "1", 23 | "ConstraintDescription" : "Must have at least one runner" 24 | }, 25 | "MaxSize" : { 26 | "Description" : "Maximum number of custom resource runners", 27 | "Type" : "Number", 28 | "MinValue" : "1", 29 | "Default" : "1", 30 | "ConstraintDescription" : "Must have at least one runner" 31 | }, 32 | "SSHLocation" : { 33 | "Description" : "The IP address range that can be used to SSH to the custom resource runners", 34 | "Type": "String", 35 | "MinLength": "9", 36 | "MaxLength": "18", 37 | "Default": "0.0.0.0/0", 38 | "AllowedPattern": "(\\d{1,3})\\.(\\d{1,3})\\.(\\d{1,3})\\.(\\d{1,3})/(\\d{1,2})", 39 | "ConstraintDescription": "must be a valid IP CIDR range of the form x.x.x.x/x." 40 | } 41 | }, 42 | 43 | "Mappings" : { 44 | "AwsRegionToAMI" : { 45 | "us-east-1" : { "id" : "ami-35792c5c" }, 46 | "us-west-2" : { "id" : "ami-d03ea1e0" }, 47 | "us-west-1" : { "id" : "ami-687b4f2d" }, 48 | "eu-west-1" : { "id" : "ami-149f7863" }, 49 | "ap-southeast-1" : { "id" : "ami-14f2b946" }, 50 | "ap-northeast-1" : { "id" : "ami-3561fe34" }, 51 | "ap-southeast-2" : { "id" : "ami-a148d59b" }, 52 | "sa-east-1" : { "id" : "ami-9f6ec982" } 53 | } 54 | }, 55 | 56 | "Resources" : { 57 | "CustomResourcePipeline" : { 58 | "Type" : "AWS::CloudFormation::Stack", 59 | "Properties" : { 60 | "TemplateURL" : "https://s3.amazonaws.com/cloudformation-examples/cr-backend-substack-template.template" 61 | } 62 | }, 63 | 64 | "RunnerRole" : { 65 | "Type" : "AWS::IAM::Role", 66 | "Properties" : { 67 | "AssumeRolePolicyDocument" : { 68 | "Version": "2008-10-17", 69 | "Statement": [{ 70 | "Effect": "Allow", 71 | "Principal": { 72 | "Service": [ "ec2.amazonaws.com" ] 73 | }, 74 | "Action": [ "sts:AssumeRole" ] 75 | }] 76 | }, 77 | "Path" : "/", 78 | "Policies" : [ 79 | { 80 | "PolicyName" : "CustomResourceRunner", 81 | "PolicyDocument" : { 82 | "Statement" : [ 83 | { 84 | "Effect" : "Allow", 85 | "Action" : ["sqs:ChangeMessageVisibility", "sqs:DeleteMessage", "sqs:ReceiveMessage"], 86 | "Resource" : { "Fn::GetAtt" : ["CustomResourcePipeline", "Outputs.CustomResourceQueueARN"] } 87 | } 88 | ] 89 | } 90 | } 91 | ] 92 | } 93 | }, 94 | 95 | "RunnerInstanceProfile" : { 96 | "Type" : "AWS::IAM::InstanceProfile", 97 | "Properties" : { 98 | "Path" : "/", 99 | "Roles" : [ { "Ref" : "RunnerRole" } ] 100 | } 101 | }, 102 | 103 | "RunnerLaunchConfig" : { 104 | "Type" : "AWS::AutoScaling::LaunchConfiguration", 105 | "Properties" : { 106 | "IamInstanceProfile" : { "Ref" : "RunnerInstanceProfile" }, 107 | "ImageId" : { "Fn::FindInMap" : ["AwsRegionToAMI", { "Ref" : "AWS::Region" }, "id"] }, 108 | "InstanceType" : { "Ref" : "InstanceType" }, 109 | "KeyName" : { "Ref" : "KeyName" }, 110 | "SecurityGroups" : [ { "Ref" : "RunnerSecurityGroup" } ], 111 | "UserData" : { "Fn::Base64" : { "Fn::Join" : ["", [ 112 | "#!/bin/bash -x\n", 113 | "exec &> /home/ec2-user/userdata.log\n", 114 | "/opt/aws/bin/cfn-init --region ", { "Ref" : "AWS::Region" }, " -s ", { "Ref" : "AWS::StackId" }, " -r RunnerLaunchConfig -v\n", 115 | "/opt/aws/bin/cfn-signal -e $? ", { "Fn::Base64" : { "Ref" : "RunnerWaitConditionHandle" }}, "\n" 116 | ]] } } 117 | }, 118 | "Metadata" : { 119 | "AWS::CloudFormation::Init" : { 120 | "config" : { 121 | "packages" : { 122 | "rpm" : { 123 | "aws-cfn-resource-bridge" : "https://s3.amazonaws.com/cloudformation-examples/aws-cfn-resource-bridge-0.1-4.noarch.rpm" 124 | }, 125 | "yum" : { 126 | "mysql-connector-java" : [] 127 | } 128 | }, 129 | "files" : { 130 | "/etc/cfn/bridge.d/schema.conf" : { 131 | "content" : { "Fn::Join" : ["", [ 132 | "[schema]\n", 133 | "resource_type=Custom::DatabaseSchema\n", 134 | "flatten=false\n", 135 | "queue_url=", { "Fn::GetAtt" : ["CustomResourcePipeline", "Outputs.CustomResourceQueueURL"] }, "\n", 136 | "timeout=600\n", 137 | "default_action=/home/ec2-user/liquify.py\n" 138 | ]]} 139 | }, 140 | "/home/ec2-user/liquify.py" : { 141 | "source" : "https://raw.github.com/awslabs/aws-cfn-custom-resource-examples/master/examples/schema/impl/liquify.py", 142 | "mode" : "000755", 143 | "owner" : "ec2-user" 144 | }, 145 | "/home/ec2-user/liquibase/lib/mysql-connector-java-bin.jar" : { 146 | "content" : "/usr/share/java/mysql-connector-java.jar", 147 | "mode" : "120644" 148 | } 149 | }, 150 | "sources" : { 151 | "/home/ec2-user/liquibase" : "https://s3.amazonaws.com/cloudformation-examples/liquibase-3.0.5-bin.zip" 152 | }, 153 | "services" : { 154 | "sysvinit" : { 155 | "cfn-resource-bridge" : { 156 | "enabled" : "true", 157 | "ensureRunning" : "true", 158 | "files" : ["/etc/cfn/bridge.d/schema.conf", 159 | "/home/ec2-user/liquify.py"] 160 | } 161 | } 162 | } 163 | } 164 | } 165 | } 166 | }, 167 | 168 | "RunnerAutoScalingGroup" : { 169 | "Type" : "AWS::AutoScaling::AutoScalingGroup", 170 | "Properties" : { 171 | "AvailabilityZones" : { "Fn::GetAZs" : ""}, 172 | "LaunchConfigurationName" : { "Ref" : "RunnerLaunchConfig" }, 173 | "MinSize" : { "Ref" : "MinSize" }, 174 | "MaxSize" : { "Ref" : "MaxSize" } 175 | } 176 | }, 177 | 178 | "RunnerSecurityGroup" : { 179 | "Type" : "AWS::EC2::SecurityGroup", 180 | "Properties" : { 181 | "GroupDescription" : "SSH to the runner instances", 182 | "SecurityGroupIngress" : [ 183 | { 184 | "CidrIp" : { "Ref" : "SSHLocation" }, 185 | "FromPort" : "22", 186 | "ToPort" : "22", 187 | "IpProtocol" : "tcp" 188 | } 189 | ] 190 | } 191 | }, 192 | 193 | "RunnerWaitConditionHandle" : { 194 | "Type" : "AWS::CloudFormation::WaitConditionHandle" 195 | }, 196 | 197 | "RunnerWaitCondition" : { 198 | "Type" : "AWS::CloudFormation::WaitCondition", 199 | "DependsOn" : "RunnerAutoScalingGroup", 200 | "Properties" : { 201 | "Count" : "1", 202 | "Handle" : { "Ref" : "RunnerWaitConditionHandle" }, 203 | "Timeout" : "600" 204 | } 205 | } 206 | }, 207 | 208 | "Outputs" : { 209 | "SecurityGroup" : { 210 | "Description" : "Security group of the runner (to permit it to access the database)", 211 | "Value" : { "Ref" : "RunnerSecurityGroup" } 212 | }, 213 | "ServiceToken" : { 214 | "Description" : "Service token of schema custom resource", 215 | "Value" : { "Fn::GetAtt" : ["CustomResourcePipeline", "Outputs.CustomResourceTopicARN"] } 216 | } 217 | } 218 | } 219 | -------------------------------------------------------------------------------- /examples/schema/impl/liquify.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | #============================================================================== 3 | # Copyright 2013 Amazon.com, Inc. or its affiliates. All Rights Reserved. 4 | # 5 | # Licensed under the Apache License, Version 2.0 (the "License"); 6 | # you may not use this file except in compliance with the License. 7 | # You may obtain a copy of the License at 8 | # 9 | # http://www.apache.org/licenses/LICENSE-2.0 10 | # 11 | # Unless required by applicable law or agreed to in writing, software 12 | # distributed under the License is distributed on an "AS IS" BASIS, 13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | # See the License for the specific language governing permissions and 15 | # limitations under the License. 16 | #============================================================================== 17 | import hashlib 18 | import lockfile 19 | import logging 20 | import os 21 | import subprocess 22 | import sys 23 | import tempfile 24 | import re 25 | 26 | try: 27 | import simplejson as json 28 | except ImportError: 29 | import json 30 | 31 | handler = logging.StreamHandler(sys.stderr) 32 | handler.setFormatter(logging.Formatter("%(asctime)s [%(levelname)s] %(message)s")) 33 | logging.getLogger().addHandler(handler) 34 | 35 | log = logging.getLogger('schema') 36 | log.setLevel(logging.INFO) 37 | 38 | _BOOLEANS = ["autoIncrement", "primaryKey", "nullable", "alwaysRun", "runOnChange", "failOnError", "runInTransaction", 39 | "defaultValueBoolean", "unique", "deleteCascade", "initiallyDeferred", "deferrable"] 40 | _NUMBERS = ["defaultValueNumeric", "startWith", "incrementBy"] 41 | 42 | 43 | def type_fixer(d): 44 | return_value = dict(d) 45 | for k, v in d.iteritems(): 46 | if not isinstance(v, basestring): 47 | continue 48 | if k in _BOOLEANS: 49 | return_value[k] = (True if "true" == v.lower() else False) 50 | elif k in _NUMBERS: 51 | try: 52 | return_value[k] = int(v) 53 | except ValueError: 54 | return_value[k] = float(v) 55 | return return_value 56 | 57 | 58 | class FatalError(SystemExit): 59 | def __init__(self, reason, code): 60 | super(FatalError, self).__init__(code) 61 | log.error('Failing resource: %s', reason) 62 | print json.dumps({'Reason': reason}) 63 | 64 | 65 | class Liquifier(object): 66 | def _get_property_or_fail(self, key, properties=None): 67 | if not properties: 68 | properties = self._resource_properties 69 | try: 70 | return properties[key] 71 | except KeyError: 72 | raise FatalError('Properties did not contain required field %s' % key, -1) 73 | 74 | def __init__(self, properties, old_properties, stackId, logicalId, 75 | driver='com.mysql.jdbc.Driver', liquibase_home='/home/ec2-user/liquibase'): 76 | self._stack_id = stackId 77 | self._logical_id = logicalId 78 | self._resource_properties = properties 79 | self._url = self._get_property_or_fail('DatabaseURL') 80 | self._user = self._get_property_or_fail('DatabaseUsername') 81 | self._passwd = self._get_property_or_fail('DatabasePassword') 82 | self._old_properties = old_properties 83 | # TODO: fail if old url/username/password do not match (not allowed to update) 84 | self._driver = driver 85 | self._libjars = ':'.join( 86 | [os.path.join('%s/lib/' % liquibase_home, f) for f in os.listdir(liquibase_home + '/lib') if f.endswith('.jar')] 87 | ) 88 | self._libjars += ':%s/liquibase.jar' % liquibase_home 89 | self._liquibase_home = liquibase_home 90 | 91 | def run_event(self, event_type): 92 | change_log = self._get_property_or_fail('databaseChangeLog') 93 | change_tag = hashlib.sha256(json.dumps(change_log)).hexdigest() 94 | old_change_log = self._get_property_or_fail('databaseChangeLog', self._old_properties) 95 | 96 | if event_type == 'Create': 97 | self._update_to_tag(change_log, change_tag) 98 | elif event_type == 'Update': 99 | # to roll back to a previous changelog, liquibase needs the "latest" changelog, but the previous tag. 100 | if not self._roll_back_to_tag(old_change_log, change_tag): 101 | self._update_to_tag(change_log, change_tag) 102 | elif event_type == 'Delete': 103 | if self._resource_properties.get('DropAllOnDelete', 'false').lower() == 'true': 104 | self._drop_all() 105 | 106 | def _get_command_base(self): 107 | return ['java', 108 | '-cp', self._libjars, 109 | 'liquibase.integration.commandline.Main', 110 | '--logLevel=debug', 111 | '--classpath=%s' % self._libjars, 112 | '--driver=%s' % self._driver, 113 | '--url=%s' % self._url, 114 | '--username=%s' % self._user, 115 | '--password=%s' % self._passwd] 116 | 117 | def _run_cmd(self, cmdline, liquibase_cmd): 118 | log.info("Running command: %s", cmdline) 119 | proc = subprocess.Popen(cmdline, cwd=self._liquibase_home, stdout=subprocess.PIPE, 120 | stderr=subprocess.STDOUT) 121 | 122 | out = proc.communicate()[0] 123 | 124 | log.info('Liquibase %s output: %s', liquibase_cmd, out) 125 | 126 | return proc.returncode, out 127 | 128 | def _call_with_changelog(self, func, change_log): 129 | changelog_parent = os.path.join(tempfile.gettempdir(), 130 | re.sub('[^a-zA-Z0-9_-]', '_', self._stack_id), 131 | self._logical_id) 132 | 133 | if not os.path.isdir(changelog_parent): 134 | try: 135 | os.makedirs(changelog_parent) 136 | except OSError, e: 137 | raise FatalError(str(e), -2) 138 | 139 | lock = lockfile.FileLock(os.path.join(changelog_parent, 'changelog.json.lock')) 140 | 141 | with lock: 142 | changelog_path = os.path.join(changelog_parent, 'changelog.json') 143 | with file(changelog_path, 'w') as f: 144 | json.dump({'databaseChangeLog': change_log}, f, indent=4) 145 | 146 | f.flush() 147 | retval = func(changelog_path) 148 | 149 | os.remove(changelog_path) 150 | 151 | return retval 152 | 153 | def _update(self, changelog_file): 154 | cmd = self._get_command_base() 155 | cmd.append('--changeLogFile=%s' % changelog_file) 156 | cmd.append('update') 157 | 158 | retcode, output = self._run_cmd(cmd, 'update') 159 | 160 | if retcode: 161 | raise FatalError('Liquibase update failed with error %s' % retcode, retcode) 162 | 163 | def _update_to_tag(self, change_log, change_tag): 164 | self._call_with_changelog(lambda path: self._update(path), change_log) 165 | 166 | self._tag(change_tag) 167 | 168 | def _rollback(self, changelog_file, change_tag): 169 | cmd = self._get_command_base() 170 | cmd.append('--changeLogFile=%s' % changelog_file) 171 | cmd.append('rollback') 172 | cmd.append(change_tag) 173 | 174 | retcode, output = self._run_cmd(cmd, 'update_rollback') 175 | 176 | return False if retcode else True 177 | 178 | def _roll_back_to_tag(self, change_log, change_tag): 179 | return self._call_with_changelog(lambda path: self._rollback(path, change_tag), change_log) 180 | 181 | def _drop_all(self): 182 | cmd = self._get_command_base() 183 | cmd.append('dropAll') 184 | 185 | retcode, output = self._run_cmd(cmd, 'dropAll') 186 | 187 | if retcode: 188 | raise FatalError('Liquibase drop failed with error %s' % retcode, retcode) 189 | 190 | def _tag(self, tag): 191 | cmd = self._get_command_base() 192 | cmd.append('tag') 193 | cmd.append(tag) 194 | 195 | retcode, output = self._run_cmd(cmd, 'tag') 196 | 197 | if retcode: 198 | raise FatalError('Liquibase tag failed with error %s' % retcode, retcode) 199 | 200 | 201 | try: 202 | event_obj = json.loads(os.environ.get('EventProperties'), object_hook=type_fixer) 203 | except ValueError: 204 | raise FatalError('Could not parse properties as JSON', -1) 205 | 206 | event_type = event_obj['RequestType'] 207 | 208 | log.info('%s received event: %s', event_type, json.dumps(event_obj, indent=4)) 209 | 210 | resource_properties = event_obj.get('ResourceProperties') 211 | 212 | if not resource_properties: 213 | raise FatalError('Resource Properties not found.', -1) 214 | 215 | stack_id = event_obj['StackId'] 216 | logical_id = event_obj['LogicalResourceId'] 217 | 218 | Liquifier(resource_properties, event_obj.get('OldResourceProperties', {}), stack_id, logical_id) .run_event(event_type) 219 | 220 | print json.dumps({}) --------------------------------------------------------------------------------