├── .gitignore
├── CONTRIBUTING.md
├── LICENSE.txt
├── MANIFEST.in
├── Makefile
├── README.md
├── cfnplus
├── __init__.py
├── action_tags.py
├── after_creation_tag.py
├── before_creation_tag.py
├── bootstrap_actions_tag.py
├── eval_cfn_expr.py
├── lambda_code_tag.py
├── s3_ops.py
├── stack_policy_tag.py
├── stack_resource.py
└── utils.py
├── example
├── bootstrap
│ └── db.sh
├── data
│ ├── a-file
│ └── more-data
│ │ └── another-file
├── deploy.py
├── lambda
│ └── api
│ │ └── api.py
├── nested_template.yml
└── template.yml
├── setup.py
├── test
├── integration
│ └── s3_ops_test.py
└── unit
│ └── eval_cfn_expr_test.py
└── tox.ini
/.gitignore:
--------------------------------------------------------------------------------
1 | *.egg-info
2 | dist
3 | *.pyc
4 | *.pyo
5 | .pytest_cache
6 | .tox
7 | build
8 |
--------------------------------------------------------------------------------
/CONTRIBUTING.md:
--------------------------------------------------------------------------------
1 | # Contributing to CloudFormation Plus
2 |
3 | ## Contributing Code
4 |
5 | Contributions are certainly welcome! Any code you contribute must be owned by
6 | you, and must be licensed under Apache License 2.0.
7 |
8 | ## Reporting Bugs, Requesting Features
9 |
10 | Please use GitHub to report bugs or request features.
11 |
--------------------------------------------------------------------------------
/LICENSE.txt:
--------------------------------------------------------------------------------
1 |
2 | Apache License
3 | Version 2.0, January 2004
4 | http://www.apache.org/licenses/
5 |
6 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
7 |
8 | 1. Definitions.
9 |
10 | "License" shall mean the terms and conditions for use, reproduction,
11 | and distribution as defined by Sections 1 through 9 of this document.
12 |
13 | "Licensor" shall mean the copyright owner or entity authorized by
14 | the copyright owner that is granting the License.
15 |
16 | "Legal Entity" shall mean the union of the acting entity and all
17 | other entities that control, are controlled by, or are under common
18 | control with that entity. For the purposes of this definition,
19 | "control" means (i) the power, direct or indirect, to cause the
20 | direction or management of such entity, whether by contract or
21 | otherwise, or (ii) ownership of fifty percent (50%) or more of the
22 | outstanding shares, or (iii) beneficial ownership of such entity.
23 |
24 | "You" (or "Your") shall mean an individual or Legal Entity
25 | exercising permissions granted by this License.
26 |
27 | "Source" form shall mean the preferred form for making modifications,
28 | including but not limited to software source code, documentation
29 | source, and configuration files.
30 |
31 | "Object" form shall mean any form resulting from mechanical
32 | transformation or translation of a Source form, including but
33 | not limited to compiled object code, generated documentation,
34 | and conversions to other media types.
35 |
36 | "Work" shall mean the work of authorship, whether in Source or
37 | Object form, made available under the License, as indicated by a
38 | copyright notice that is included in or attached to the work
39 | (an example is provided in the Appendix below).
40 |
41 | "Derivative Works" shall mean any work, whether in Source or Object
42 | form, that is based on (or derived from) the Work and for which the
43 | editorial revisions, annotations, elaborations, or other modifications
44 | represent, as a whole, an original work of authorship. For the purposes
45 | of this License, Derivative Works shall not include works that remain
46 | separable from, or merely link (or bind by name) to the interfaces of,
47 | the Work and Derivative Works thereof.
48 |
49 | "Contribution" shall mean any work of authorship, including
50 | the original version of the Work and any modifications or additions
51 | to that Work or Derivative Works thereof, that is intentionally
52 | submitted to Licensor for inclusion in the Work by the copyright owner
53 | or by an individual or Legal Entity authorized to submit on behalf of
54 | the copyright owner. For the purposes of this definition, "submitted"
55 | means any form of electronic, verbal, or written communication sent
56 | to the Licensor or its representatives, including but not limited to
57 | communication on electronic mailing lists, source code control systems,
58 | and issue tracking systems that are managed by, or on behalf of, the
59 | Licensor for the purpose of discussing and improving the Work, but
60 | excluding communication that is conspicuously marked or otherwise
61 | designated in writing by the copyright owner as "Not a Contribution."
62 |
63 | "Contributor" shall mean Licensor and any individual or Legal Entity
64 | on behalf of whom a Contribution has been received by Licensor and
65 | subsequently incorporated within the Work.
66 |
67 | 2. Grant of Copyright License. Subject to the terms and conditions of
68 | this License, each Contributor hereby grants to You a perpetual,
69 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable
70 | copyright license to reproduce, prepare Derivative Works of,
71 | publicly display, publicly perform, sublicense, and distribute the
72 | Work and such Derivative Works in Source or Object form.
73 |
74 | 3. Grant of Patent License. Subject to the terms and conditions of
75 | this License, each Contributor hereby grants to You a perpetual,
76 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable
77 | (except as stated in this section) patent license to make, have made,
78 | use, offer to sell, sell, import, and otherwise transfer the Work,
79 | where such license applies only to those patent claims licensable
80 | by such Contributor that are necessarily infringed by their
81 | Contribution(s) alone or by combination of their Contribution(s)
82 | with the Work to which such Contribution(s) was submitted. If You
83 | institute patent litigation against any entity (including a
84 | cross-claim or counterclaim in a lawsuit) alleging that the Work
85 | or a Contribution incorporated within the Work constitutes direct
86 | or contributory patent infringement, then any patent licenses
87 | granted to You under this License for that Work shall terminate
88 | as of the date such litigation is filed.
89 |
90 | 4. Redistribution. You may reproduce and distribute copies of the
91 | Work or Derivative Works thereof in any medium, with or without
92 | modifications, and in Source or Object form, provided that You
93 | meet the following conditions:
94 |
95 | (a) You must give any other recipients of the Work or
96 | Derivative Works a copy of this License; and
97 |
98 | (b) You must cause any modified files to carry prominent notices
99 | stating that You changed the files; and
100 |
101 | (c) You must retain, in the Source form of any Derivative Works
102 | that You distribute, all copyright, patent, trademark, and
103 | attribution notices from the Source form of the Work,
104 | excluding those notices that do not pertain to any part of
105 | the Derivative Works; and
106 |
107 | (d) If the Work includes a "NOTICE" text file as part of its
108 | distribution, then any Derivative Works that You distribute must
109 | include a readable copy of the attribution notices contained
110 | within such NOTICE file, excluding those notices that do not
111 | pertain to any part of the Derivative Works, in at least one
112 | of the following places: within a NOTICE text file distributed
113 | as part of the Derivative Works; within the Source form or
114 | documentation, if provided along with the Derivative Works; or,
115 | within a display generated by the Derivative Works, if and
116 | wherever such third-party notices normally appear. The contents
117 | of the NOTICE file are for informational purposes only and
118 | do not modify the License. You may add Your own attribution
119 | notices within Derivative Works that You distribute, alongside
120 | or as an addendum to the NOTICE text from the Work, provided
121 | that such additional attribution notices cannot be construed
122 | as modifying the License.
123 |
124 | You may add Your own copyright statement to Your modifications and
125 | may provide additional or different license terms and conditions
126 | for use, reproduction, or distribution of Your modifications, or
127 | for any such Derivative Works as a whole, provided Your use,
128 | reproduction, and distribution of the Work otherwise complies with
129 | the conditions stated in this License.
130 |
131 | 5. Submission of Contributions. Unless You explicitly state otherwise,
132 | any Contribution intentionally submitted for inclusion in the Work
133 | by You to the Licensor shall be under the terms and conditions of
134 | this License, without any additional terms or conditions.
135 | Notwithstanding the above, nothing herein shall supersede or modify
136 | the terms of any separate license agreement you may have executed
137 | with Licensor regarding such Contributions.
138 |
139 | 6. Trademarks. This License does not grant permission to use the trade
140 | names, trademarks, service marks, or product names of the Licensor,
141 | except as required for reasonable and customary use in describing the
142 | origin of the Work and reproducing the content of the NOTICE file.
143 |
144 | 7. Disclaimer of Warranty. Unless required by applicable law or
145 | agreed to in writing, Licensor provides the Work (and each
146 | Contributor provides its Contributions) on an "AS IS" BASIS,
147 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
148 | implied, including, without limitation, any warranties or conditions
149 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
150 | PARTICULAR PURPOSE. You are solely responsible for determining the
151 | appropriateness of using or redistributing the Work and assume any
152 | risks associated with Your exercise of permissions under this License.
153 |
154 | 8. Limitation of Liability. In no event and under no legal theory,
155 | whether in tort (including negligence), contract, or otherwise,
156 | unless required by applicable law (such as deliberate and grossly
157 | negligent acts) or agreed to in writing, shall any Contributor be
158 | liable to You for damages, including any direct, indirect, special,
159 | incidental, or consequential damages of any character arising as a
160 | result of this License or out of the use or inability to use the
161 | Work (including but not limited to damages for loss of goodwill,
162 | work stoppage, computer failure or malfunction, or any and all
163 | other commercial damages or losses), even if such Contributor
164 | has been advised of the possibility of such damages.
165 |
166 | 9. Accepting Warranty or Additional Liability. While redistributing
167 | the Work or Derivative Works thereof, You may choose to offer,
168 | and charge a fee for, acceptance of support, warranty, indemnity,
169 | or other liability obligations and/or rights consistent with this
170 | License. However, in accepting such obligations, You may act only
171 | on Your own behalf and on Your sole responsibility, not on behalf
172 | of any other Contributor, and only if You agree to indemnify,
173 | defend, and hold each Contributor harmless for any liability
174 | incurred by, or claims asserted against, such Contributor by reason
175 | of your accepting any such warranty or additional liability.
176 |
177 | END OF TERMS AND CONDITIONS
178 |
179 | APPENDIX: How to apply the Apache License to your work.
180 |
181 | To apply the Apache License to your work, attach the following
182 | boilerplate notice, with the fields enclosed by brackets "[]"
183 | replaced with your own identifying information. (Don't include
184 | the brackets!) The text should be enclosed in the appropriate
185 | comment syntax for the file format. We also recommend that a
186 | file or class name and description of purpose be included on the
187 | same "printed page" as the copyright notice for easier
188 | identification within third-party archives.
189 |
190 | Copyright [yyyy] [name of copyright owner]
191 |
192 | Licensed under the Apache License, Version 2.0 (the "License");
193 | you may not use this file except in compliance with the License.
194 | You may obtain a copy of the License at
195 |
196 | http://www.apache.org/licenses/LICENSE-2.0
197 |
198 | Unless required by applicable law or agreed to in writing, software
199 | distributed under the License is distributed on an "AS IS" BASIS,
200 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
201 | See the License for the specific language governing permissions and
202 | limitations under the License.
203 |
--------------------------------------------------------------------------------
/MANIFEST.in:
--------------------------------------------------------------------------------
1 | include README.md
2 | include LICENSE.txt
3 | include CONTRIBUTING.md
4 |
--------------------------------------------------------------------------------
/Makefile:
--------------------------------------------------------------------------------
1 | .PHONY : main
2 | main :
3 | @echo "Targets:"
4 | @echo " test-unit"
5 | @echo " test-integ"
6 | @echo " package"
7 |
8 | .PHONY : test-unit
9 | test-unit :
10 | tox -- test/unit/*.py
11 |
12 | .PHONY : test-integ
13 | test-integ :
14 | tox -- test/integration/*py
15 |
16 | .PHONY : package
17 | package :
18 | python2 setup.py bdist_wheel
19 | python3 setup.py sdist bdist_wheel
20 | @echo ""
21 | @echo "Packages:"
22 | @find dist -type file
23 |
24 | .PHONY : clean
25 | clean :
26 | rm -rf build dist *.egg-info .pytest_cache
27 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # CloudFormation Plus
2 |
3 | ## Contents
4 |
5 | - [Intro](#intro)
6 | - [Example](#example)
7 | - [Installation](#installation)
8 | - [Usage](#usage)
9 | - [Signature of `process_template`](#signature-of-process_template)
10 | - [Note: Intrinsic functions](#note-intrinsic-functions)
11 | - [Features](#features)
12 | - [Atomicity](#atomicity)
13 | - [S3 operations](#s3-operations)
14 | - [Making a directory](#making-a-directory)
15 | - [Uploading a file](#uploading-a-file)
16 | - [Syncing a directory](#syncing-a-directory)
17 | - [Making and updating Lambda functions](#making-and-updating-lambda-functions)
18 | - [Bootstrapping EC2 instances](#bootstrapping-ec2-instances)
19 | - [Including nested stacks](#including-nested-stacks)
20 | - [Using YAML anchors in templates](#using-yaml-anchors-in-templates)
21 | - [Setting a stack policy](#setting-a-stack-policy)
22 |
23 | ## Intro
24 |
25 | This is a Python library that adds features to AWS CloudFormation that reduce the amount of code you must write in order to automate the deployment of non-trivial cloud-based systems. Specifically, this library adds elements to the CloudFormation template language that perform tasks that otherwise would need to be done in your deploy script.
26 |
27 | ### Example
28 |
29 | Suppose we want to use CloudFormation to make a database node and a Lambda function that implements an API endpoint. We write a shell script that configures the database node (i.e., downloads and installs the database software) and a program that implements the Lambda function's logic. We put both files in this directory structure:
30 |
31 | ```
32 | |- my-api/
33 | |- bootstrap/
34 | |- db.sh
35 | |- lambda/
36 | |- api/
37 | |- api.py
38 | ```
39 |
40 | Using CloudFormation Plus's extensions to the template language, we can make the template like this:
41 |
42 | ```
43 | AWSTemplateFormatVersion: 2010-09-09
44 |
45 | Metadata:
46 | Aruba::BeforeCreation:
47 | - S3Upload:
48 | LocalFile: bootstrap/db.sh
49 | S3Dest: s3://my-bucket/bootstrap/db.sh
50 |
51 | Resources:
52 | Database:
53 | Type: 'AWS::EC2::Instance'
54 | Properties:
55 | AvailabilityZone: us-west-2a
56 | ImageId: ami-e251209a
57 | InstanceType: m5.large
58 | Aruba::BootstrapActions:
59 | Actions:
60 | - Path: s3://my-bucket/bootstrap/db.sh
61 | LogUri: s3://my-bucket/logs
62 | Timeout: PT5M
63 | IamInstanceProfile: {Ref: InstProf}
64 |
65 | ApiLambda:
66 | Type: 'AWS::Lambda::Function'
67 | Properties:
68 | Aruba::LambdaCode:
69 | LocalPath: lambda/api
70 | S3Dest: s3://my-bucket/lambda
71 | Environment:
72 | Variables:
73 | DB_HOST: {'Fn::Sub': 'Database.PublicDnsName'}
74 | Handler: api.handle
75 | Runtime: python2.7
76 |
77 | DatabaseRole:
78 | Type: 'AWS::IAM::Role'
79 | Properties:
80 | AssumeRolePolicyDocument:
81 | Version: 2012-10-17
82 | Statement:
83 | - Effect: Allow
84 | Principal:
85 | Service:
86 | - ec2.amazonaws.com
87 | Action:
88 | - 'sts:AssumeRole'
89 | Policies:
90 | - PolicyName: DatabasePolicy
91 | PolicyDocument:
92 | Version: 2012-10-17
93 | Statement:
94 | - Effect: Allow
95 | Action: 's3:HeadBucket'
96 | Resource: '*'
97 | - Effect: Allow
98 | Action: 's3:ListBucket'
99 | Resource: 'arn:aws:s3:::my-bucket'
100 | - Effect: Allow
101 | Action:
102 | - 's3:GetObject'
103 | - 's3:PutObject'
104 | Resource: 'arn:aws:s3:::my-bucket/*'
105 |
106 | InstProf:
107 | Type: 'AWS::IAM::InstanceProfile'
108 | Properties:
109 | Roles:
110 | - Ref: DatabaseRole
111 | ```
112 |
113 | This template would be saved at `my-api/template.yml`.
114 |
115 | CloudFormation Plus helps in the following ways:
116 |
117 | - In the `Metadata` section near the top, the `Aruba::BeforeCreation` element uploads `bootstrap/db.sh` to the S3 bucket `my-bucket` at key `bootstrap/db.sh`.
118 | - In the `Database` resource, the `Aruba::BootstrapActions` property causes `db.sh` to be executed after the database node is made. If `db.sh` fails, the whole stack deployment will fail. `db.sh`'s output will be written to the S3 bucket `my-bucket`.
119 | - In the `ApiLambda` resource, the `Aruba::LambdaCode` property bundles the local `lambda/api` directory into a Lambda deployment package, and then uploads it to the S3 bucket `my-bucket`. It also sets the function to use that deployment package as its code (and when this template is used to update an existing stack, it ensures that the function uses the latest package).
120 |
121 |
122 | ## Installation
123 |
124 | CloudFormation Plus works with Python 2 and 3.
125 |
126 | 1. Download the source package for one of the releases.
127 | 1. Example the source package, and `cd` into the resulting directory.
128 | 1. Run `pip install .`
129 |
130 | You can now use CloudFormation Plus in your scripts by importing `cfnplus`.
131 |
132 | ## Usage
133 |
134 | The rest of this document describes the extensions to the template language. This section describes how to process templates that use these extensions.
135 |
136 | CloudFormation Plus is a Python library, and it is intended to be used with stacks that are made/updated by Python programs. In other words, it is best if the creation/update of your stacks is automated with Python programs.
137 |
138 | Let's use an example consisting of a single template named "my_website.yml". We can write a Python program that uses boto3 to deploy and update this stack:
139 |
140 | ```
141 | import time
142 | import boto3
143 | import botocore
144 |
145 | _TEMPLATE_PATH = 'my_website.yml'
146 | _STACK_NAME = 'MyWebsite'
147 | _AWS_REGION = 'us-west-2'
148 |
149 | def make_or_update_stack(cfn, template):
150 | ...
151 |
152 | def main():
153 | # read template
154 | with open(_TEMPLATE_PATH) as f:
155 | template = f.read()
156 |
157 | # make/update stack
158 | make_or_update_stack(cfn, template)
159 |
160 | if __name__ == '__main__':
161 | main()
162 | ```
163 |
164 | (I omit the body of `make_or_update_stack` because it's quite verbose and not important for the current purpose. If you'd like to see how it could be written, confer this.)
165 |
166 | If we want to use CloudFormation Plus template language extensions in `my_website.yml`, we need to change this script thus:
167 |
168 | ```
169 | import time
170 | import boto3
171 | import botocore
172 | import cfnplus
173 |
174 | _TEMPLATE_PATH = 'my_website.yml'
175 | _STACK_NAME = 'MyWebsite'
176 | _AWS_REGION = 'us-west-2'
177 |
178 | def make_or_update_stack(cfn, template):
179 | ...
180 |
181 | def main():
182 | # read template
183 | with open(_TEMPLATE_PATH) as f:
184 | template = f.read()
185 |
186 | # process language extensions
187 | with cfnplus.process_template(
188 | template,
189 | [], # template params
190 | _AWS_REGION,
191 | _TEMPLATE_PATH,
192 | _STACK_NAME,
193 | ) as cfnp_result:
194 |
195 | # do actions that must be done before stack creation/update
196 | cfnp_result.do_before_creation()
197 |
198 | # make/update stack
199 | make_or_update_stack(cfn, template)
200 |
201 | # do actions that must be done after stack creation/update
202 | cfnp_result.do_after_creation()
203 |
204 | if __name__ == '__main__':
205 | main()
206 | ```
207 |
208 | Let's go over these changes. All CloudFormation Plus features used in `my_website.yml` are processed by the call to `cfnplus.process_template`. (The parameters for this function are described below.) As you will learn when you read the sections below, some features generate template code, some features generate S3 actions that are to be done before stack creation/update, and some features generate S3 actions that are to be done after stack creation/update. The return value of `cfnplus.process_template` contains the accumulated results of each feature in `my_website.yml`. It is important to note that `cfnplus.process_template` itself does not perform any S3 actions — in fact, it has no side-effets.
209 |
210 | We do the rest of the work in the body of the `with` statement. The purpose of the `with` statement is to support atomicity; if an exception is thrown in the `do_before_creation` call or in any statement after this call and before the `do_after_creation` call, the effects of the actions done by the `do_before_creation` call will be rolled back — for example, objects added to S3 will be removed, objects removed from S3 will be restored. Similarly, if an exception is thrown by the `do_after_creation` call, the effects of any actions done by this call will be rolled back — but the effects of the `do_before_creation` call will NOT be rolled back.
211 |
212 | The most likely cause of exceptions will be problems with the original (non-transformed) template. CloudFormation supports rollback of failed stack changes; with CloudFormation Plus, you now can roll back S3 changes as well.
213 |
214 | ### Signature of `process_template`
215 |
216 | ```
217 | def process_template(template, template_params, aws_region, template_path=None, stack_name=None)
218 | ```
219 |
220 |
221 |
222 | Param | Type | Description |
223 |
224 |
225 |
226 |
227 | template |
228 | str |
229 | The CloudFormation template to process (must be in
230 | YAML) |
231 |
232 |
233 | template_params |
234 | dict |
235 | A list of dicts of this form:
236 |
237 | {
238 | "ParameterKey": ...,
239 | "ParameterValue": ..., (optional)
240 | "UsePreviousValue": ... (optional)
241 | }
242 |
243 | If `UsePreviousValue` is `True` for any of them, then the `stack_name` parameter must be
244 | given and a stack with that name must exist.
245 | |
246 |
247 |
248 |
249 | aws_region |
250 | str |
251 | The name of the AWS region in which the stack will be
252 | made |
253 |
254 |
255 |
256 | template_path |
257 | str |
258 | An absolute filesystem path pointing to
259 | the template. This is needed only if the template has tags containing
260 | paths relative to the template's path. |
261 |
262 |
263 |
264 | stack_name |
265 | str |
266 | The name that the stack will have when it is
267 | made. This is needed only if the template uses the AWS::StackName
268 | variable, or if template_params contains an item with UsePreviousValue
269 | set to True , or if Aruba::StackPolicy is used. |
270 |
271 |
272 |
273 |
274 |
275 |
276 | ## Note: Intrinsic functions
277 |
278 | Certain CloudFormation intrinsic functions can be used with the features provided by this library. However, they can only be used to reference template parameters and values exported from other stacks. In particular, they cannot be used to reference resources (or properties thereof) defined in the same template.
279 |
280 | The following intrinsic functions are supported:
281 | - `fn::ImportValue`
282 | - `fn::Sub`
283 | - `Ref`
284 |
285 | ## Features
286 |
287 | ### Atomicity
288 |
289 | A great aspect of CloudFormation is that it is (usually) able to roll back resource changes when an error occurs. This is also true of S3 operations performed by CloudFormation Plus. Confer [above](#usage) to see how to use the CloudFormation Plus library to enable this.
290 |
291 | ### S3 operations
292 |
293 | You can specify S3 operations to be done before or after a stack is made from your template. For the former, add a list to your template's `Metadata` section with the label `Aruba::BeforeCreation`, and add your actions to that list. For the latter, the list should have the label `Aruba::AfterCreation`.
294 |
295 | For example:
296 |
297 | ```
298 | AWSTemplateFormatVersion: 2010-09-09
299 | Parameters:
300 | ...
301 | Metadata:
302 | Aruba::BeforeCreation:
303 | - ...
304 | - ...
305 | Aruba::AfterCreation:
306 | - ...
307 | - ...
308 | Resources:
309 | ...
310 | ```
311 |
312 | The following subsections describe how to define actions.
313 |
314 | *NOTE:* Versioning must be enabled on the bucket. This is needed because it is used to undo S3 operations when an error occurs.
315 |
316 | #### Making a directory
317 |
318 | ```
319 | S3Mkdir: S3_DEST
320 | ```
321 |
322 | This action makes a directory in an S3 bucket.
323 |
324 | If a directory already exists at the destination, this action does nothing.
325 |
326 | NOTE: This is done by adding a 0-byte object with the specified key (plus a '/' at the end if it doesn't end with one already).
327 |
328 | ##### Parameters
329 |
330 |
331 | S3_DEST
332 | - An "s3://BUCKET/KEY" URI at which a directory should be made
333 |
334 |
335 | #### Uploading a file
336 |
337 | ```
338 | S3Upload:
339 | LocalFile: LOCAL_FILE
340 | S3Dest: S3_DEST
341 | ```
342 |
343 | This action uploads a local file to an S3 bucket.
344 |
345 | If a file already exists at the destination, this action overwrites it.
346 |
347 | ##### Parameters
348 |
349 |
350 | LOCAL_FILE
351 | - A local path to the file that should be uploaded. If the path is relative,
352 | it must be relative to the template file.
353 |
354 | S3_DEST
355 | - An "s3://BUCKET/KEY" URI to which the file should be uploaded
356 |
357 |
358 | #### Syncing a directory
359 |
360 | ```
361 | S3Sync:
362 | LocalDir: LOCAL_DIR
363 | S3Dest: S3_DEST
364 | ```
365 |
366 | This action updates a directory in S3 with the contents of a local directory. Files and directories in the local directory are uploaded to the S3 directory, and any files and directories in the S3 directory that are not in the local directory are deleted.
367 |
368 | If nothing yet exists at the destination, a directory is created there.
369 |
370 | ##### Parameters
371 |
372 |
373 | LOCAL_DIR
374 | - A local path to the directory that should be synced. If the path is relative,
375 | it must be relative to the template file.
376 |
377 | S3_DEST
378 | - The "s3://BUCKET/KEY" URI of the directory that should be synced. At the end,
379 | this directory will contain (directory or indirectly) all the files in the local
380 | directory, and nothing else.
381 |
382 |
383 | ### Making and updating Lambda functions
384 |
385 | CloudFormation Plus can help you keep your Lambda functions up-to-date. Use the following property in your `AWS::Lambda::Function` resources:
386 |
387 | ```
388 | Aruba::LambdaCode:
389 | LocalPath: LOCAL_PATH
390 | S3Dest: S3_DEST
391 | ```
392 |
393 | *IMPORTANT:* This property should be used instead of the `Code` property.
394 |
395 | This property does the following:
396 | - Builds a Lambda deployment package containing the files in the directory at `LOCAL_PATH`, and uploads it to S3
397 | - If the Lambda function already exists and any code in the directory at `LOCAL_PATH` has changed, updates the Lambda function to use the new code
398 |
399 | #### Parameters
400 |
401 |
402 | LOCAL_PATH
403 | - A path to a directory containing code (and dependencies) for a Lambda function.
404 | If the path is relative, it must be relative to the template file.
405 |
406 | S3_DEST
407 | - The "s3://BUCKET/KEY" URI of the directory to which the Lambda deployment
408 | package should be uploaded
409 |
410 |
411 | #### Example
412 |
413 | ```
414 | MyFunction:
415 | Type: 'AWS::Lambda::Function'
416 | Properties:
417 | Aruba::LambdaCode:
418 | LocalPath: lambda-funcs/my-func
419 | S3Dest: s3://my-bucket/lambda-code
420 | Handler: my_func.go
421 | Runtime: python2.7
422 | Timeout: 30
423 | ```
424 |
425 | ### Bootstrapping EC2 instances
426 |
427 | CloudFormation does lets you bootstrap EC2 instances, but it's a bit complicated and is missing some useful features.
428 |
429 | Instead, use the following property in your EC2 instance definitions:
430 |
431 | ```
432 | Aruba::BootstrapActions:
433 | Actions:
434 | - Path: ACTION_PATH
435 | Args: ARG_LIST
436 | - ...
437 | - ...
438 | LogUri: LOG_URI
439 | Timeout: TIMEOUT
440 | ```
441 |
442 | The property does the following:
443 | - Runs each program specified in the `Actions` list on the instance
444 | - Captures each action's output and uploads it to the S3 location given in `LogUri`
445 | - Makes the stack wait for all the actions to finish running
446 | - Makes the stack's status conditional on the status of all the actions
447 |
448 | *IMPORTANT:* The action programs must be in S3, and this property does not put them there. You can use the [S3 operations elements](#s3-operations) defined in this library to upload the action programs to S3.
449 |
450 | *IMPORTANT:* The EC2 instance must be given an instance profile with a role that has permission to read the action programs in S3, and, if `LogUri` is used, it must also have permissions to write to the specified S3 location.
451 |
452 | #### Parameters
453 |
454 |
455 | ACTION_PATH
456 | - An S3 path (starting with "s3://") pointing to the program that bootstraps the instance
457 |
458 | ARG_LIST
459 | - A list of strings to pass as arguments to the bootstrap program
460 |
461 | LOG_URI
462 | - (Optional) The "s3://BUCKET/KEY" URI of the directory in which to put the output of the
463 | bootstrap program
464 |
465 | TIMEOUT
466 | - The length of time that we should wait for the bootstrap program to run.
467 | Must be in ISO8601 duration format.
468 |
469 |
470 | #### Example
471 |
472 | ```
473 | Database:
474 | Type: 'AWS::EC2::Instance'
475 | Properties:
476 | AvailabilityZone: us-west-2a
477 | ImageId: ami-e251209a
478 | InstanceType: m5.large
479 | IamInstanceProfile: {Ref: InstProf}
480 | Aruba::BootstrapActions:
481 | Actions:
482 | - Path: s3://my-bucket/bootstrap/db.sh
483 | Timeout: PT5M
484 |
485 | DatabaseRole:
486 | Type: 'AWS::IAM::Role'
487 | Properties:
488 | AssumeRolePolicyDocument:
489 | Version: 2012-10-17
490 | Statement:
491 | - Effect: Allow
492 | Principal:
493 | Service:
494 | - ec2.amazonaws.com
495 | Action:
496 | - 'sts:AssumeRole'
497 | Policies:
498 | - PolicyName: DatabasePolicy
499 | PolicyDocument:
500 | Version: 2012-10-17
501 | Statement:
502 | - Effect: Allow
503 | Action: 's3:HeadBucket'
504 | Resource: '*'
505 | - Effect: Allow
506 | Action: 's3:ListBucket'
507 | Resource: 'arn:aws:s3:::my-bucket'
508 | - Effect: Allow
509 | Action:
510 | - 's3:GetObject'
511 | Resource: 'arn:aws:s3:::my-bucket/*'
512 |
513 | InstProf:
514 | Type: 'AWS::IAM::InstanceProfile'
515 | Properties:
516 | Roles:
517 | - Ref: DatabaseRole
518 | ```
519 |
520 | ### Including nested stacks
521 |
522 | ```
523 | Type: Aruba::Stack
524 | Properties:
525 | Template:
526 | LocalPath: LOCAL_PATH
527 | S3Dest: S3_DEST
528 | Parameters: PARAMETERS
529 | ```
530 |
531 | This resource type will upload the template file to S3. If the template file uses any of the features from this library, it will be processed accordingly.
532 |
533 | #### Parameters
534 |
535 |
536 | LOCAL_PATH
537 | - A path to the nested stack's template file. If the path is relative, it must be relative to the template file.
538 |
539 | S3_DEST
540 | - The "s3://BUCKET/KEY" URI of the directory to which the file should be uploaded
541 |
542 | PARAMETERS
543 | - Parameters to pass to the nested stack (cf. AWS CloudFormation Stack Parameters)
544 |
545 |
546 | ### Using YAML anchors in templates
547 |
548 | In a YAML document, you can include the same node in multiple places using anchors. This can be quite useful when you want to reduce the size of a YAML file.
549 |
550 | Unfortunately, CloudFormation does not support YAML anchors. However, when you process a template with this library, all the anchor references will be expanded in the modified template that you send to CloudFormation. This applies to templates that you pass directly to this library as well as templates that are referenced in an `Aruba::Stack` resource.
551 |
552 | ### Setting a stack policy
553 |
554 | You can set a stack's policy by adding the following to the template's `Metadata` section:
555 |
556 | ```
557 | Aruba::StackPolicy: POLICY
558 | ```
559 |
560 | #### Parameters
561 |
562 |
563 | POLICY
564 | - A stack policy — cf. the AWS documentation for details on how to define a policy.
565 |
566 |
--------------------------------------------------------------------------------
/cfnplus/__init__.py:
--------------------------------------------------------------------------------
1 | # (C) Copyright 2018 Hewlett Packard Enterprise Development LP.
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License"). You
4 | # may not use this file except in compliance with the License. A copy of
5 | # the License is located at
6 | #
7 | # http://aws.amazon.com/apache2.0/
8 | #
9 | # and in the "LICENSE.txt" file accompanying this file. This file is
10 | # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
11 | # ANY KIND, either express or implied. See the License for the specific
12 | # language governing permissions and limitations under the License.
13 |
14 | # pylint: disable=superfluous-parens
15 | # pylint: disable=invalid-name
16 | # pylint: disable=missing-docstring
17 | # pylint: disable=global-statement
18 | # pylint: disable=broad-except
19 | # pylint: disable=bare-except
20 | # pylint: disable=too-many-branches
21 | # pylint: disable=too-many-statements
22 | # pylint: disable=too-many-return-statements
23 | # pylint: disable=import-error
24 | # pylint: disable=no-else-return
25 | # pylint: disable=len-as-condition
26 | # pylint: disable=too-many-locals
27 | # pylint: disable=unused-argument
28 | # pylint: disable=too-many-arguments
29 | # pylint: disable=too-many-ancestors
30 |
31 | import os
32 | import collections
33 | import itertools
34 | import yaml
35 | import boto3
36 | from botocore.exceptions import ClientError
37 | from .utils import InvalidTemplate, Result
38 | from .lambda_code_tag import delete_unused_lambda_code
39 | from . import (
40 | utils,
41 | lambda_code_tag,
42 | before_creation_tag,
43 | after_creation_tag,
44 | bootstrap_actions_tag,
45 | stack_policy_tag,
46 | stack_resource,
47 | )
48 |
49 | _ARUBA_TAG_EVAL_FUNCS = {
50 | 'Aruba::LambdaCode': lambda_code_tag.evaluate,
51 | 'Aruba::BeforeCreation': before_creation_tag.evaluate,
52 | 'Aruba::AfterCreation': after_creation_tag.evaluate,
53 | 'Aruba::BootstrapActions': bootstrap_actions_tag.evaluate,
54 | 'Aruba::StackPolicy': stack_policy_tag.evaluate,
55 | }
56 |
57 | _ARUBA_RESOURCE_EVAL_FUNCS = {
58 | 'Aruba::Stack': stack_resource.evaluate,
59 | }
60 |
61 | def process_template(template_str, template_params, aws_region, \
62 | template_path=None, stack_name=None, template_is_imported=False):
63 | '''
64 | Evaluate the "Aruba::" tags in a CloudFormation template.
65 |
66 | Each "Aruba::" tag can produce three outputs:
67 | 1. A transformation of the CloudFormation template
68 | 2. An action that must be done before the stack is made/updated
69 | 3. An action that must be done after the stack is made/updated.
70 |
71 | (These actions are usually S3 operations.) This function evaluates all
72 | the "Aruba::" tags in the template and gathers their outputs into one
73 | object containing:
74 | 1. The net result of all the template transformations
75 | 2. A list of all the actions to be done before creation/update
76 | 3. A list of all the actions to be done after creation/update
77 |
78 | This object is then returned.
79 |
80 | After calling this function, you must put the result in a "with"
81 | statement, and in the statement's body you must first call the
82 | do_before_creation method, then make/update the stack using the template
83 | in the new_template attribute, and finally call the do_after_creation
84 | method --- for example:
85 |
86 | with process_template(...) as result:
87 | result.do_before_creation()
88 | boto3.client('cloudformation').create_stack(
89 | TemplateBody=result.new_template,
90 | ...
91 | )
92 | result.do_after_creation()
93 |
94 | The purpose of the "with" statement is to support atomicity; if an
95 | exception is thrown in the do_before_creation call or in any statement
96 | after this call and before the do_after_creation call, the effects of the
97 | actions done by the do_before_creation call will be rolled back --- for
98 | example, objects added to S3 will be removed, objects removed from S3 will
99 | be restored. Similarly, if an exception is thrown by the do_after_creation
100 | call, the effects of any actions done by this call will be rolled back ---
101 | but the effects of the do_before_creation call will NOT be rolled back.
102 |
103 | The most likely cause of exceptions will be problems with the original
104 | (non-transformed) template. CloudFormation supports rollback of failed
105 | stack changes; with this library, you now can roll back S3 changes as well.
106 |
107 | :param template_str: The CloudFormation template to process (must be in
108 | YAML).
109 | :param template_params: A list of dicts of this form:
110 | {
111 | "ParameterKey": ...,
112 | "ParameterValue": ..., (optional)
113 | "UsePreviousValue": ... (optional)
114 | }
115 |
116 | If "UsePreviousValue" is True for any of them, then stack_name must be
117 | given and a stack with that name must exist.
118 | :param aws_region: The name of the AWS region in which the stack will be
119 | made.
120 | :param template_path: (Optional) An absolute filesystem path pointing to
121 | the template. This is needed only if the template has tags containing
122 | paths relative to the template's path.
123 | :param stack_name: (Optional) The name that the stack will have when it is
124 | made. This is needed only if the template uses the "AWS::StackName"
125 | variable, or if template_params contains an item with "UsePreviousValue"
126 | set to True, or if "Aruba::StackPolicy" is used.
127 | :param template_is_imported: Internal use only.
128 |
129 | :return: Cf. description of this function.
130 |
131 | :throw InvalidTemplate: If the template is invalid.
132 | :throw ValueError: If there is a problem with an argument.
133 | '''
134 |
135 | # get old stack
136 | old_stack = None
137 | if stack_name is not None:
138 | cfn = boto3.resource('cloudformation', region_name=aws_region)
139 | old_stack = cfn.Stack(stack_name)
140 | try:
141 | old_stack.reload()
142 | except:
143 | old_stack = None
144 |
145 | # make template param dict
146 | param_dict = {}
147 | for param in template_params:
148 | key = param['ParameterKey']
149 | use_prev = param.get('UsePreviousValue', False)
150 | if use_prev:
151 | if 'ParameterValue' in param:
152 | raise ValueError("Param value given but also told to use " \
153 | "previous value")
154 | if old_stack is None:
155 | raise ValueError("Told to use prev param value but there " \
156 | "is no existing stack")
157 | value = None
158 | for sp in old_stack.parameters:
159 | if sp['ParameterKey'] == key:
160 | value = sp['ParameterValue']
161 | break
162 | if value is None:
163 | raise ValueError("Existing stack has no param \"{}\"".\
164 | format(key))
165 | else:
166 | if 'ParameterValue' not in param:
167 | raise ValueError("No value for param \"{}\"".format(key))
168 | value = param['ParameterValue']
169 | param_dict[key] = value
170 |
171 | ctx = utils.Context(param_dict, aws_region, template_path, \
172 | stack_name, template_is_imported, _process_template)
173 | return _process_template(template_str, ctx)
174 |
175 | def _process_template(template_str, ctx):
176 | # We process two kinds of nodes:
177 | #
178 | # 1. Tags: nodes like "{'Aruba::Code': {...}}" in which the key starts
179 | # with "Aruba::"
180 | # 2. Resources: objects in the "Resources" section with "Type" fields
181 | # beginning with "Aruba::"
182 | #
183 | # This is done in two passes.
184 |
185 | template = yaml.load(template_str)
186 |
187 | # pass 1
188 | result_1 = _processs_tags(template, ctx)
189 |
190 | # pass 2
191 | result_2 = _processs_resources(result_1.new_template, ctx)
192 |
193 | result_2.before_creation.extend(result_1.before_creation)
194 | result_2.after_creation.extend(result_1.after_creation)
195 | result_2.new_template = _yaml_dump(result_2.new_template)
196 | return result_2
197 |
198 | def _processs_tags(template, ctx):
199 | '''
200 | :return: Instance of Result.
201 | '''
202 |
203 | final_result = Result(template)
204 |
205 | def eval_recursive(tag_name, tag_value, parent, curr_ctx):
206 | if tag_name in _ARUBA_TAG_EVAL_FUNCS:
207 | # evauluate Aruba tag
208 | eval_func = _ARUBA_TAG_EVAL_FUNCS[tag_name]
209 | result = eval_func(tag_value, curr_ctx)
210 |
211 | # replace tag
212 | if result.new_template is not None:
213 | new_tag_name, new_tag_value = result.new_template
214 | parent[new_tag_name] = new_tag_value
215 | del parent[tag_name]
216 |
217 | final_result.before_creation.extend(result.before_creation)
218 | final_result.after_creation.extend(result.after_creation)
219 |
220 | elif isinstance(tag_value, collections.Mapping):
221 | # we may modify the tag_value dict during iteration, so make a list of its items
222 | next_items = list(tag_value.items())
223 | for next_tag_name, next_tag_value in next_items:
224 | # recurse
225 | eval_recursive(next_tag_name, next_tag_value, tag_value, \
226 | curr_ctx)
227 |
228 | try:
229 | # process "Metadata" section
230 | if 'Metadata' in template:
231 | eval_recursive(None, template['Metadata'], None, ctx)
232 |
233 | # process "Resources" section
234 | if 'Resources' in template:
235 | for rsrc_name, rsrc_node in template['Resources'].items():
236 | new_ctx = ctx.copy()
237 | new_ctx.resource_name = rsrc_name
238 | new_ctx.resource_node = rsrc_node
239 | eval_recursive(None, rsrc_node, None, new_ctx)
240 | except InvalidTemplate as e:
241 | template_fn = os.path.basename(ctx.template_path)
242 | raise InvalidTemplate('{}: {}'.format(template_fn, str(e)))
243 |
244 | return final_result
245 |
246 | def _processs_resources(template, ctx):
247 | '''
248 | :return: Instance of Result.
249 | '''
250 |
251 | final_result = Result(template)
252 |
253 | if 'Resources' not in template:
254 | return final_result
255 |
256 | resources = template['Resources']
257 | for name, resource in resources.items():
258 | # evauluate Aruba resource
259 | typ = resource.get('Type', '')
260 | try:
261 | eval_func = _ARUBA_RESOURCE_EVAL_FUNCS[typ]
262 | except KeyError:
263 | continue
264 | result = eval_func(resource, ctx)
265 |
266 | # replace resource
267 | if result.new_template is None:
268 | del resources[name]
269 | else:
270 | resources[name] = result.new_template
271 |
272 | # save actions
273 | final_result.before_creation.extend(result.before_creation)
274 | final_result.after_creation.extend(result.after_creation)
275 |
276 | return final_result
277 |
278 | class _YamlDumper(yaml.dumper.SafeDumper):
279 | def ignore_aliases(self, data): # override
280 | return True
281 |
282 | def _yaml_dump(s):
283 | # serialize template, and do not preseve YAML anchors (CFN does not
284 | # support them)
285 | return yaml.dump(s, Dumper=_YamlDumper)
286 |
287 | def delete_stack(stack_name, aws_region):
288 | '''
289 | Sometimes CloudFormation cannot delete stacks containing security groups,
290 | for some reason. This function doesn't have that problem.
291 | '''
292 |
293 | cf = boto3.resource('cloudformation', region_name=aws_region)
294 | ec2 = boto3.resource('ec2', region_name=aws_region)
295 |
296 | # CloudFormation sometimes has trouble deleting security groups. This can
297 | # happen when an EMR cluster was deployed into a stack's VPC --- EMR makes
298 | # security groups for the cluster, but the stack doesn't know about them
299 | # and so stack deletion fails.
300 |
301 | # look for VPCs, and then get their security groups
302 | stack = cf.Stack(stack_name)
303 | vpc_ids = (r.physical_resource_id for r \
304 | in stack.resource_summaries.all() \
305 | if r.resource_type == 'AWS::EC2::VPC')
306 | vpcs = (ec2.Vpc(id) for id in vpc_ids)
307 | sec_groups = list(itertools.chain.from_iterable(vpc.security_groups.all() \
308 | for vpc in vpcs))
309 |
310 | # Some groups may reference each other, which
311 | # prevents them from being deleted. So we need to first clear
312 | # out the groups' rules.
313 | for sg in sec_groups:
314 | if len(sg.ip_permissions_egress) > 0:
315 | sg.revoke_egress(IpPermissions=sg.ip_permissions_egress)
316 | if len(sg.ip_permissions) > 0:
317 | sg.revoke_ingress(IpPermissions=sg.ip_permissions)
318 |
319 | # try to delete security groups
320 | for sg in sec_groups:
321 | try:
322 | sg.delete()
323 | except ClientError:
324 | pass
325 |
326 | # delete stack
327 | stack.delete()
328 |
--------------------------------------------------------------------------------
/cfnplus/action_tags.py:
--------------------------------------------------------------------------------
1 | # (C) Copyright 2018 Hewlett Packard Enterprise Development LP.
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License"). You
4 | # may not use this file except in compliance with the License. A copy of
5 | # the License is located at
6 | #
7 | # http://aws.amazon.com/apache2.0/
8 | #
9 | # and in the "LICENSE.txt" file accompanying this file. This file is
10 | # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
11 | # ANY KIND, either express or implied. See the License for the specific
12 | # language governing permissions and limitations under the License.
13 |
14 | # pylint: disable=superfluous-parens
15 | # pylint: disable=invalid-name
16 | # pylint: disable=missing-docstring
17 | # pylint: disable=global-statement
18 | # pylint: disable=broad-except
19 | # pylint: disable=bare-except
20 | # pylint: disable=too-many-branches
21 | # pylint: disable=too-many-statements
22 | # pylint: disable=too-many-return-statements
23 | # pylint: disable=import-error
24 | # pylint: disable=no-else-return
25 | # pylint: disable=len-as-condition
26 | # pylint: disable=too-many-locals
27 | # pylint: disable=unused-argument
28 |
29 | import json
30 | import collections
31 | import os
32 | import io
33 | import boto3
34 | from . import utils, eval_cfn_expr, s3_ops
35 |
36 | def _do_mkdir(arg_node, ctx):
37 | # eval URI
38 | uri = eval_cfn_expr.eval_expr(arg_node, ctx)
39 | bucket_name, key = utils.parse_s3_uri(uri)
40 | if not key.endswith('/'):
41 | key += '/'
42 |
43 | def action(undoers, committers):
44 | # check if bucket exists
45 | if not utils.bucket_exists(bucket_name, ctx.aws_region):
46 | raise utils.InvalidTemplate("S3Mkdir: No such S3 bucket: {}".\
47 | format(bucket_name))
48 | bucket = boto3.resource('s3', region_name=ctx.aws_region).\
49 | Bucket(bucket_name)
50 |
51 | s3_ops.make_dir(bucket, key, undoers, committers)
52 |
53 | return action
54 |
55 | def _do_sync(arg_node, ctx):
56 | # get args
57 | ex = utils.InvalidTemplate("Invalid argument for S3Sync: {}".\
58 | format(json.dumps(arg_node)))
59 | if not isinstance(arg_node, collections.Mapping) or len(arg_node) != 2:
60 | raise ex
61 | try:
62 | local_dir_node = arg_node['LocalDir']
63 | s3_dest_node = arg_node['S3Dest']
64 | except KeyError:
65 | raise ex
66 |
67 | # eval nodes
68 | local_dir = eval_cfn_expr.eval_expr(local_dir_node, ctx)
69 | s3_dest = eval_cfn_expr.eval_expr(s3_dest_node, ctx)
70 | bucket_name, dir_key = utils.parse_s3_uri(s3_dest)
71 | if not dir_key.endswith('/'):
72 | dir_key += '/'
73 |
74 | def action(undoers, committers):
75 | # check if bucket exists
76 | if not utils.bucket_exists(bucket_name, ctx.aws_region):
77 | raise utils.InvalidTemplate("S3Sync: No such S3 bucket: {}".\
78 | format(bucket_name))
79 |
80 | # make abs path to local dir
81 | abs_local_path = ctx.abspath(local_dir)
82 | if not os.path.isdir(abs_local_path):
83 | raise utils.InvalidTemplate("S3Sync: {} is not a directory".\
84 | format(abs_local_path))
85 |
86 | print("Syncing {} with s3://{}/{}".\
87 | format(abs_local_path, bucket_name, dir_key))
88 |
89 | # list existing files in S3
90 | bucket = boto3.resource('s3', region_name=ctx.aws_region).\
91 | Bucket(bucket_name)
92 | s3_files = [os.path.relpath(obj.key, start=dir_key) for \
93 | obj in bucket.objects.filter(Prefix=dir_key)]
94 |
95 | # list local files
96 | local_files = set([])
97 | for dirpath, _, filenames in os.walk(abs_local_path):
98 | for fn in filenames:
99 | local_path = os.path.join(dirpath, fn)
100 | relpath = os.path.relpath(local_path, start=abs_local_path)
101 | local_files.add(relpath)
102 |
103 | # delete unneeded S3 files
104 | files_to_delete = [f for f in s3_files if f not in local_files]
105 | for f in files_to_delete:
106 | key = dir_key + f
107 | s3_ops.delete_object(bucket, key, undoers, committers)
108 |
109 | # upload local files
110 | for fn in local_files:
111 | local_path = os.path.join(abs_local_path, fn)
112 | key = dir_key + fn
113 | with io.open(local_path, 'rb') as f:
114 | s3_ops.upload_file(f, bucket, key, undoers, committers)
115 |
116 | return action
117 |
118 | def _do_upload(arg_node, ctx):
119 | # get args
120 | ex = utils.InvalidTemplate("Invalid argument for S3Upload: {}".\
121 | format(json.dumps(arg_node)))
122 | if not isinstance(arg_node, collections.Mapping) or len(arg_node) != 2:
123 | raise ex
124 | try:
125 | local_file_node = arg_node['LocalFile']
126 | s3_dest_node = arg_node['S3Dest']
127 | except KeyError:
128 | raise ex
129 |
130 | # eval nodes
131 | local_file = eval_cfn_expr.eval_expr(local_file_node, ctx)
132 | s3_dest = eval_cfn_expr.eval_expr(s3_dest_node, ctx)
133 | bucket_name, key = utils.parse_s3_uri(s3_dest)
134 | if key.endswith('/'):
135 | raise utils.InvalidTemplate("S3Upload: Key must not end with '/'")
136 |
137 | def action(undoers, committers):
138 | # check if bucket exists
139 | if not utils.bucket_exists(bucket_name, ctx.aws_region):
140 | raise utils.InvalidTemplate("S3Upload: No such S3 bucket: {}".\
141 | format(bucket_name))
142 | bucket = boto3.resource('s3', region_name=ctx.aws_region).\
143 | Bucket(bucket_name)
144 |
145 | # upload
146 | with io.open(ctx.abspath(local_file), 'rb') as f:
147 | s3_ops.upload_file(f, bucket, key, undoers, committers)
148 |
149 | return action
150 |
151 | _ACTION_HANDLERS = {
152 | 'S3Mkdir': _do_mkdir,
153 | 'S3Sync': _do_sync,
154 | 'S3Upload': _do_upload,
155 | }
156 |
157 | def eval_beforecreation_or_aftercreation(tag_name, arg_node, ctx):
158 | '''
159 | :return: list of functions
160 | '''
161 |
162 | if ctx.template_is_imported:
163 | raise utils.InvalidTemplate("Actions are not allowed in this template, " + \
164 | "but found {}".format(tag_name))
165 |
166 | ex = utils.InvalidTemplate("Invalid value for {}: {}".\
167 | format(tag_name, json.dumps(arg_node)))
168 | if not isinstance(arg_node, collections.Sequence):
169 | raise ex
170 |
171 | actions = []
172 | for action_node in arg_node:
173 | if not isinstance(action_node, collections.Mapping) or \
174 | len(action_node) != 1:
175 | raise ex
176 |
177 | action_name, action_arg = utils.dict_only_item(action_node)
178 | try:
179 | action_handler = _ACTION_HANDLERS[action_name]
180 | except KeyError:
181 | raise utils.InvalidTemplate("Invalid action: {}".\
182 | format(action_name))
183 | actions.append(action_handler(action_arg, ctx))
184 |
185 | return actions
186 |
--------------------------------------------------------------------------------
/cfnplus/after_creation_tag.py:
--------------------------------------------------------------------------------
1 | # (C) Copyright 2018 Hewlett Packard Enterprise Development LP.
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License"). You
4 | # may not use this file except in compliance with the License. A copy of
5 | # the License is located at
6 | #
7 | # http://aws.amazon.com/apache2.0/
8 | #
9 | # and in the "LICENSE.txt" file accompanying this file. This file is
10 | # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
11 | # ANY KIND, either express or implied. See the License for the specific
12 | # language governing permissions and limitations under the License.
13 |
14 | # pylint: disable=superfluous-parens
15 | # pylint: disable=invalid-name
16 | # pylint: disable=missing-docstring
17 | # pylint: disable=global-statement
18 | # pylint: disable=broad-except
19 | # pylint: disable=bare-except
20 | # pylint: disable=too-many-branches
21 | # pylint: disable=too-many-statements
22 | # pylint: disable=too-many-return-statements
23 | # pylint: disable=import-error
24 | # pylint: disable=no-else-return
25 | # pylint: disable=len-as-condition
26 | # pylint: disable=too-many-locals
27 | # pylint: disable=unused-argument
28 |
29 | from . import action_tags, utils
30 |
31 | def evaluate(arg_node, ctx):
32 | '''
33 | :return: Instance of utils.Result.
34 | '''
35 | acts = action_tags.eval_beforecreation_or_aftercreation(\
36 | 'Aruba::AfterCreation', arg_node, ctx)
37 | return utils.Result(after_creation=acts)
38 |
--------------------------------------------------------------------------------
/cfnplus/before_creation_tag.py:
--------------------------------------------------------------------------------
1 | # (C) Copyright 2018 Hewlett Packard Enterprise Development LP.
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License"). You
4 | # may not use this file except in compliance with the License. A copy of
5 | # the License is located at
6 | #
7 | # http://aws.amazon.com/apache2.0/
8 | #
9 | # and in the "LICENSE.txt" file accompanying this file. This file is
10 | # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
11 | # ANY KIND, either express or implied. See the License for the specific
12 | # language governing permissions and limitations under the License.
13 |
14 | # pylint: disable=superfluous-parens
15 | # pylint: disable=invalid-name
16 | # pylint: disable=missing-docstring
17 | # pylint: disable=global-statement
18 | # pylint: disable=broad-except
19 | # pylint: disable=bare-except
20 | # pylint: disable=too-many-branches
21 | # pylint: disable=too-many-statements
22 | # pylint: disable=too-many-return-statements
23 | # pylint: disable=import-error
24 | # pylint: disable=no-else-return
25 | # pylint: disable=len-as-condition
26 | # pylint: disable=too-many-locals
27 | # pylint: disable=unused-argument
28 |
29 | from . import action_tags, utils
30 |
31 | def evaluate(arg_node, ctx):
32 | '''
33 | :return: Instance of utils.Result.
34 | '''
35 | acts = action_tags.eval_beforecreation_or_aftercreation(\
36 | 'Aruba::BeforeCreation', arg_node, ctx)
37 | return utils.Result(before_creation=acts)
38 |
--------------------------------------------------------------------------------
/cfnplus/bootstrap_actions_tag.py:
--------------------------------------------------------------------------------
1 | # (C) Copyright 2018 Hewlett Packard Enterprise Development LP.
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License"). You
4 | # may not use this file except in compliance with the License. A copy of
5 | # the License is located at
6 | #
7 | # http://aws.amazon.com/apache2.0/
8 | #
9 | # and in the "LICENSE.txt" file accompanying this file. This file is
10 | # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
11 | # ANY KIND, either express or implied. See the License for the specific
12 | # language governing permissions and limitations under the License.
13 |
14 | # pylint: disable=superfluous-parens
15 | # pylint: disable=invalid-name
16 | # pylint: disable=missing-docstring
17 | # pylint: disable=global-statement
18 | # pylint: disable=broad-except
19 | # pylint: disable=bare-except
20 | # pylint: disable=too-many-branches
21 | # pylint: disable=too-many-statements
22 | # pylint: disable=too-many-return-statements
23 | # pylint: disable=import-error
24 | # pylint: disable=no-else-return
25 | # pylint: disable=len-as-condition
26 | # pylint: disable=too-many-locals
27 | # pylint: disable=unused-argument
28 |
29 | import collections
30 | from . import utils
31 |
32 | USER_DATA_SCRIPT_TEMPLATE = '''
33 | #!/bin/bash -x
34 |
35 | mkdir /var/log/aruba-bootstrap
36 | exec >/var/log/aruba-bootstrap/main 2>&1
37 |
38 | function go() {{
39 | {go_body}
40 | }}
41 |
42 | # run steps
43 | go
44 | EXIT_CODE=$?
45 |
46 | if [ {should_copy_log} == True ]; then
47 | # copy log to S3
48 | aws s3 cp --content-type text/plain /var/log/aruba-bootstrap/main \
49 | "${{log_uri}}/main"
50 | fi
51 |
52 | # notify CloudFormation of result
53 | yum install -y aws-cfn-bootstrap
54 | /opt/aws/bin/cfn-signal -e "${{!EXIT_CODE}}" --stack "${{AWS::StackName}}" \
55 | --resource "{rsrc_name}" --region "${{AWS::Region}}"
56 | '''
57 |
58 | RUN_BS_SCRIPT_TEMPLATE = '''
59 | LOG_LOCAL_PATH="/var/log/aruba-bootstrap/{action_nbr}"
60 | SCRIPT_LOCAL_PATH="/tmp/aruba-bootstrap/{action_nbr}"
61 |
62 | # run script
63 | mkdir -p "$(dirname ${{!SCRIPT_LOCAL_PATH}})"
64 | aws s3 cp "${{s3_uri_{action_nbr}}}" "${{!SCRIPT_LOCAL_PATH}}"
65 | chmod +x "${{!SCRIPT_LOCAL_PATH}}"
66 | sudo -u ec2-user "${{!SCRIPT_LOCAL_PATH}}" {args} > \
67 | "${{!LOG_LOCAL_PATH}}" 2>&1
68 | EXIT_CODE=$?
69 |
70 | if [ {should_copy_log} == True ]; then
71 | # copy log to S3
72 | aws s3 cp --content-type text/plain "${{!LOG_LOCAL_PATH}}" \
73 | "${{log_uri}}/{action_nbr}"
74 | fi
75 |
76 | if [ "${{!EXIT_CODE}}" -ne 0 ]; then
77 | return 1
78 | fi
79 |
80 | '''
81 |
82 | def evaluate(arg_node, ctx):
83 | '''
84 | :return: Instance of Result.
85 | '''
86 |
87 | # This evaluation is purely text-manipulation: no variables are
88 | # dereferenced, and there are no side-effects.
89 |
90 | tag_name = 'Aruba::BootstrapActions'
91 | if not isinstance(arg_node, collections.Mapping):
92 | raise utils.InvalidTemplate("{}: must contain mapping".format(tag_name))
93 | try:
94 | actions_node = arg_node['Actions']
95 | log_uri_node = arg_node.get('LogUri')
96 | timeout_node = arg_node['Timeout']
97 | except KeyError as e:
98 | raise utils.InvalidTemplate("{}: missing '{}'".\
99 | format(tag_name, e.args[0]))
100 |
101 | # check 'Actions' argument
102 | if not isinstance(actions_node, collections.Sequence):
103 | raise utils.InvalidTemplate("{}: 'Actions' must contain a sequence".\
104 | format(tag_name))
105 |
106 | # make UserData script
107 | cfn_subs = {
108 | 'log_uri': log_uri_node if log_uri_node is not None else 'unset',
109 | }
110 | go_body = ''
111 | for i, action_node in enumerate(actions_node):
112 | # get child nodes
113 | try:
114 | path_node = action_node['Path']
115 | except KeyError:
116 | raise utils.InvalidTemplate("{}: an action is missing '{}'".\
117 | format(tag_name, e.args[0]))
118 |
119 | cfn_subs['s3_uri_{}'.format(i)] = path_node
120 |
121 | args_node = action_node.get('Args', [])
122 | args = []
123 | for j, n in enumerate(args_node):
124 | placeholder = 'arg_{}_{}'.format(i, j)
125 | cfn_subs[placeholder] = n
126 | args.append('"${' + placeholder + '}"')
127 |
128 | go_body += RUN_BS_SCRIPT_TEMPLATE.format(
129 | action_nbr=i,
130 | args=' '.join(args),
131 | should_copy_log=log_uri_node is not None,
132 | )
133 |
134 | user_data_script = USER_DATA_SCRIPT_TEMPLATE.format(
135 | go_body=go_body,
136 | rsrc_name=ctx.resource_name,
137 | should_copy_log=log_uri_node is not None,
138 | )
139 | user_data_node = {
140 | 'Fn::Base64': {
141 | 'Fn::Sub': [
142 | user_data_script,
143 | cfn_subs
144 | ],
145 | }
146 | }
147 |
148 | # add creation policy
149 | ctx.resource_node['CreationPolicy'] = {
150 | 'ResourceSignal': {'Timeout': timeout_node},
151 | }
152 |
153 | return utils.Result(new_template=('UserData', user_data_node))
154 |
--------------------------------------------------------------------------------
/cfnplus/eval_cfn_expr.py:
--------------------------------------------------------------------------------
1 | # (C) Copyright 2018 Hewlett Packard Enterprise Development LP.
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License"). You
4 | # may not use this file except in compliance with the License. A copy of
5 | # the License is located at
6 | #
7 | # http://aws.amazon.com/apache2.0/
8 | #
9 | # and in the "LICENSE.txt" file accompanying this file. This file is
10 | # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
11 | # ANY KIND, either express or implied. See the License for the specific
12 | # language governing permissions and limitations under the License.
13 |
14 | # pylint: disable=superfluous-parens
15 | # pylint: disable=invalid-name
16 | # pylint: disable=missing-docstring
17 | # pylint: disable=global-statement
18 | # pylint: disable=broad-except
19 | # pylint: disable=bare-except
20 | # pylint: disable=too-many-branches
21 | # pylint: disable=too-many-statements
22 | # pylint: disable=too-many-return-statements
23 | # pylint: disable=import-error
24 | # pylint: disable=no-else-return
25 | # pylint: disable=len-as-condition
26 | # pylint: disable=too-few-public-methods
27 | # pylint: disable=unused-argument
28 | import json
29 | import re
30 | import collections
31 | import numbers
32 | from . import utils
33 |
34 | def eval_expr(node, ctx):
35 | '''
36 | Evaluate CloudFormation template nodes like "{Ref: SomeItem}",
37 | "{'Fn::Sub': AnotherItem.SomeAttr}", and "{'Fn::ImportVaue': AnExportName}"
38 | that represent scalar values.
39 |
40 | :param node: A node from a parsed CloudFormation template that represents
41 | a scalar value.
42 | :param ctx: An instance of utils.Context. It will be used to resolve
43 | references in the node.
44 |
45 | :return: The scalar value represented by the given node.
46 | :throw: utils.InvalidTemplate
47 | '''
48 |
49 | if isinstance(node, (utils.base_str, numbers.Number)):
50 | return node
51 |
52 | if not isinstance(node, collections.Mapping) or len(node) != 1:
53 | node_str = json.dumps(node)
54 | raise utils.InvalidTemplate("Invalid scalar expression: {}".\
55 | format(node_str))
56 |
57 | handlers = {
58 | 'Fn::Sub': _eval_cfn_sub,
59 | 'Fn::ImportValue': _eval_cfn_importvalue,
60 | 'Ref': _eval_cfn_ref,
61 | }
62 |
63 | func_name, func_arg = utils.dict_only_item(node)
64 | try:
65 | h = handlers[func_name]
66 | except KeyError:
67 | raise utils.InvalidTemplate("Unknown function: {}".format(func_name))
68 | return h(func_arg, ctx)
69 |
70 | def _eval_cfn_ref(node, ctx):
71 | '''
72 | :param node: The argument to a 'Ref' expression.
73 | :param ctx: An instance of utils.Context.
74 |
75 | :return: The referenced scalar value.
76 | :throw: utils.InvalidTemplate
77 | '''
78 |
79 | if not isinstance(node, utils.base_str):
80 | raise utils.InvalidTemplate("Invalid arg for 'Ref': {}".\
81 | format(json.dumps(node)))
82 |
83 | try:
84 | return ctx.resolve_var(node)
85 | except KeyError:
86 | raise utils.InvalidTemplate("Cannot resolve variable \"{}\"".\
87 | format(node))
88 |
89 | def _eval_cfn_importvalue(node, ctx):
90 | '''
91 | :param node: The argument to an 'Fn::ImportValue' expression.
92 | :param ctx: An instance of utils.Context.
93 |
94 | :return: The imported scalar value.
95 | :throw: utils.InvalidTemplate
96 | '''
97 |
98 | var_name = eval_expr(node, ctx)
99 | return ctx.resolve_cfn_export(var_name)
100 |
101 | def _eval_cfn_sub(node, ctx):
102 | '''
103 | :param node: The argument to an 'Fn::Sub' expression.
104 | :param ctx: An instance of utils.Context.
105 |
106 | :return: The computed string.
107 | :throw: utils.InvalidTemplate
108 | '''
109 |
110 | # Arg to 'Fn::Sub' can be string or list. If it's a string, we normalize
111 | # it to a list.
112 |
113 | if isinstance(node, utils.base_str):
114 | node = [node, {}]
115 |
116 | ex = utils.InvalidTemplate("Invalid arg for 'Fn::Sub': {}".\
117 | format(json.dumps(node)))
118 | if not isinstance(node, collections.Sequence) or len(node) != 2:
119 | raise ex
120 |
121 | # get components of arg
122 | format_str = node[0]
123 | local_symbols = node[1]
124 | if not isinstance(format_str, utils.base_str) or \
125 | not isinstance(local_symbols, collections.Mapping):
126 | raise ex
127 |
128 | # eval local symbols
129 | new_ctx = ctx.copy()
130 | for k, v in local_symbols.items():
131 | new_ctx.set_var(k, eval_expr(v, ctx))
132 |
133 | # make substitutions in format string
134 | regex = re.compile(r'\$\{([-.:_0-9a-zA-Z]*)\}')
135 | pos = 0
136 | result_buff = utils.StringIO()
137 | while True:
138 | # look for variable ref
139 | match = regex.search(format_str, pos=pos)
140 | if match is None:
141 | break
142 |
143 | # resolve variable
144 | var_name = match.group(1)
145 | try:
146 | var_value = new_ctx.resolve_var(var_name)
147 | except KeyError:
148 | raise utils.InvalidTemplate("Cannot resolve variable \"{}\"".\
149 | format(var_name))
150 |
151 | # write variable's value to result
152 | result_buff.write(format_str[pos:match.start()])
153 | result_buff.write(str(var_value))
154 | pos = match.end()
155 |
156 | result_buff.write(format_str[pos:])
157 | return result_buff.getvalue()
158 |
--------------------------------------------------------------------------------
/cfnplus/lambda_code_tag.py:
--------------------------------------------------------------------------------
1 | # (C) Copyright 2018 Hewlett Packard Enterprise Development LP.
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License"). You
4 | # may not use this file except in compliance with the License. A copy of
5 | # the License is located at
6 | #
7 | # http://aws.amazon.com/apache2.0/
8 | #
9 | # and in the "LICENSE.txt" file accompanying this file. This file is
10 | # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
11 | # ANY KIND, either express or implied. See the License for the specific
12 | # language governing permissions and limitations under the License.
13 |
14 | # pylint: disable=superfluous-parens
15 | # pylint: disable=invalid-name
16 | # pylint: disable=missing-docstring
17 | # pylint: disable=global-statement
18 | # pylint: disable=broad-except
19 | # pylint: disable=bare-except
20 | # pylint: disable=too-many-branches
21 | # pylint: disable=too-many-statements
22 | # pylint: disable=too-many-return-statements
23 | # pylint: disable=import-error
24 | # pylint: disable=no-else-return
25 | # pylint: disable=len-as-condition
26 | # pylint: disable=too-many-locals
27 | # pylint: disable=unused-argument
28 |
29 | import os
30 | import hashlib
31 | import zipfile
32 | import collections
33 | import json
34 | import tempfile
35 | import struct
36 | import io
37 | import yaml
38 | import boto3
39 | from . import eval_cfn_expr, utils, s3_ops
40 |
41 | class _LambdaPkgMaker(object):
42 | '''
43 | This class makes AWS Lambda function packages --- i.e., zipfiles of code.
44 | It also computes a hash of such packages based (only) on their contents.
45 | '''
46 |
47 | # IMPLEMENTATION NOTE: We cannot just take the hash of the zipfile b/c
48 | # zipfiles contain crap like timestamps that we do not want to influence
49 | # the hash.
50 | #
51 | # Instead, we take the hash of a bytestring that represents the zipfile's
52 | # contents. The bytestring consists of a series of records of this form:
53 | #
54 | #
55 | #
56 | # with one record for each file in the zipfile.
57 |
58 | def __init__(self):
59 | self._entries = {} # package path -> abs path
60 |
61 | def add(self, local_path, pkg_path):
62 | self._entries[pkg_path] = local_path
63 |
64 | @property
65 | def hash(self):
66 | h = hashlib.new(utils.FILE_HASH_ALG)
67 | for pkg_path, local_path in self._entries.items():
68 | pkg_path_encoded = pkg_path.encode('utf-8')
69 | h.update(struct.pack('>Q', len(pkg_path_encoded))) # path_in_zipfile_len
70 | h.update(pkg_path_encoded) # path_in_zipfile
71 | stat = os.stat(local_path)
72 | h.update(struct.pack('>Q', stat.st_size)) # file_contents_len
73 | with io.open(local_path, 'rb') as f: # file_contents
74 | while True:
75 | data = f.read(1024)
76 | if len(data) == 0:
77 | break
78 | h.update(data)
79 |
80 | return h.hexdigest()
81 |
82 | def open(self):
83 | f = tempfile.TemporaryFile() # will be deleted when closed
84 | try:
85 | with zipfile.ZipFile(f, 'w') as z:
86 | for pkg_path, local_path in self._entries.items():
87 | z.write(local_path, arcname=pkg_path)
88 |
89 | f.seek(0)
90 | return f
91 | except:
92 | f.close()
93 | raise
94 |
95 | def evaluate(arg_node, ctx):
96 | '''
97 | :return: Instance of Result.
98 | '''
99 |
100 | ex = utils.InvalidTemplate("Invalid argument for Aruba::LambdaCode: {}".\
101 | format(json.dumps(arg_node)))
102 | if not isinstance(arg_node, collections.Mapping):
103 | raise ex
104 | try:
105 | local_path_node = arg_node['LocalPath']
106 | s3_dest_node = arg_node['S3Dest']
107 | except KeyError:
108 | raise ex
109 |
110 | # eval nodes in arg
111 | local_path = eval_cfn_expr.eval_expr(local_path_node, ctx)
112 | s3_dest = eval_cfn_expr.eval_expr(s3_dest_node, ctx)
113 | bucket_name, dir_key = utils.parse_s3_uri(s3_dest)
114 |
115 | # make abs path to local dir
116 | abs_local_path = ctx.abspath(local_path)
117 | if not os.path.isdir(abs_local_path):
118 | raise utils.InvalidTemplate("{} is not a directory".\
119 | format(abs_local_path))
120 |
121 | # make package
122 | pkg_maker = _LambdaPkgMaker()
123 | for parent, _, filenames in os.walk(abs_local_path):
124 | for fn in filenames:
125 | local_path = os.path.join(parent, fn)
126 | pkg_path = os.path.relpath(local_path, start=abs_local_path)
127 | pkg_maker.add(local_path, pkg_path)
128 |
129 | # compute S3 key
130 | s3_key = '{}/{}'.format(dir_key, pkg_maker.hash)
131 |
132 | def action(undoers, committers):
133 | # check if bucket exists
134 | if not utils.bucket_exists(bucket_name, ctx.aws_region):
135 | raise utils.InvalidTemplate("No such S3 bucket: {}".\
136 | format(bucket_name))
137 | bucket = boto3.resource('s3', region_name=ctx.aws_region).\
138 | Bucket(bucket_name)
139 |
140 | with pkg_maker.open() as f:
141 | s3_ops.upload_file(f, bucket, s3_key, undoers, committers)
142 |
143 | # make new tag
144 | new_tag_value = {
145 | 'S3Bucket': bucket_name,
146 | 'S3Key': s3_key,
147 | }
148 |
149 | return utils.Result(new_template=('Code', new_tag_value), \
150 | before_creation=[action])
151 |
152 | def delete_unused_lambda_code(stack_names, bucket_name, s3_code_prefix, \
153 | aws_region):
154 | # In order to support rollbacks, we need to keep Lambda functions' source
155 | # in S3 (even though it isn't actually used when the functions run).
156 | # Eventually function code gets replaced with new verions, so we need to
157 | # delete old code that's no longer referenced by a stack.
158 |
159 | cf = boto3.client('cloudformation', region_name=aws_region)
160 |
161 | if not s3_code_prefix.endswith('/'):
162 | s3_code_prefix += '/'
163 |
164 | # make list of code files referenced by any stack
165 | refed_code = set([])
166 | for stack_name in stack_names:
167 | resp = cf.get_template(StackName=stack_name, TemplateStage='Original')
168 | template = yaml.load(resp['TemplateBody'])
169 | for _, rsrc in template['Resources'].items():
170 | if rsrc['Type'] != 'AWS::Lambda::Function':
171 | continue
172 | code_node = rsrc['Properties']['Code']
173 | curr_bucket = code_node['S3Bucket']
174 | curr_key = code_node['S3Key']
175 | if curr_bucket != bucket_name or \
176 | not curr_key.startswith(s3_code_prefix):
177 | continue
178 | refed_code.add(curr_key)
179 |
180 | # delete unreferenced code files from S3
181 | bucket = boto3.resource('s3', region_name=aws_region).Bucket(bucket_name)
182 | for obj in bucket.objects.filter(Prefix=s3_code_prefix):
183 | if obj.key in refed_code:
184 | continue
185 | print("Deleting unused Lambda code s3://{}/{}".\
186 | format(bucket_name, obj.key))
187 | obj.delete()
188 |
--------------------------------------------------------------------------------
/cfnplus/s3_ops.py:
--------------------------------------------------------------------------------
1 | # (C) Copyright 2018 Hewlett Packard Enterprise Development LP.
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License"). You
4 | # may not use this file except in compliance with the License. A copy of
5 | # the License is located at
6 | #
7 | # http://aws.amazon.com/apache2.0/
8 | #
9 | # and in the "LICENSE.txt" file accompanying this file. This file is
10 | # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
11 | # ANY KIND, either express or implied. See the License for the specific
12 | # language governing permissions and limitations under the License.
13 |
14 | # pylint: disable=superfluous-parens
15 | # pylint: disable=invalid-name
16 | # pylint: disable=missing-docstring
17 | # pylint: disable=global-statement
18 | # pylint: disable=broad-except
19 | # pylint: disable=bare-except
20 | # pylint: disable=too-many-branches
21 | # pylint: disable=too-many-statements
22 | # pylint: disable=too-many-return-statements
23 | # pylint: disable=import-error
24 | # pylint: disable=no-else-return
25 | # pylint: disable=len-as-condition
26 | # pylint: disable=too-many-locals
27 | # pylint: disable=unused-argument
28 |
29 | import hashlib
30 | import base64
31 | import botocore
32 | from . import utils
33 |
34 | def upload_file(f, bucket, key, undoers, committers):
35 | # If there's no existing object:
36 | # Do: upload file
37 | # Undo: delete latest version
38 | # Commit: nop
39 | #
40 | # If existing file is different:
41 | # Do: add new version
42 | # Undo: delete latest version
43 | # Commit: delete previous version
44 | #
45 | # If existing file is same:
46 | # Do: nop
47 | # Undo: nop
48 | # Commit: nop
49 |
50 | HASH_METADATA_KEY = '{}_sum'.format(utils.FILE_HASH_ALG)
51 |
52 | # get file's hash
53 | h = hashlib.new(utils.FILE_HASH_ALG)
54 | while True:
55 | buf = f.read(1024)
56 | if len(buf) == 0:
57 | break
58 | h.update(buf)
59 | hashvalue = str(base64.b64encode(h.digest()))
60 |
61 | # check if file was already uploaded
62 | previous_version = None
63 | try:
64 | prev_obj = bucket.Object(key)
65 | previous_version = prev_obj.version_id
66 | existing_hash = prev_obj.metadata.get(HASH_METADATA_KEY)
67 | except botocore.exceptions.ClientError:
68 | pass
69 | else:
70 | if existing_hash == hashvalue:
71 | # object already exists
72 | return
73 |
74 | # upload file
75 | print("Uploading to s3://{}/{}".format(bucket.name, key))
76 | f.seek(0)
77 | obj = bucket.put_object(
78 | Body=f,
79 | Key=key,
80 | Metadata={HASH_METADATA_KEY: hashvalue})
81 | obj.wait_until_exists()
82 | if obj.version_id is None:
83 | obj.delete()
84 | raise Exception("Bucket must have versioning enabled")
85 | new_version = obj.version_id
86 |
87 | # add undoer
88 | def undo():
89 | obj.delete(VersionId=new_version)
90 | obj.wait_until_not_exists(VersionId=new_version)
91 | undoers.append(undo)
92 |
93 | # add committer
94 | if previous_version is not None:
95 | def commit():
96 | obj.delete(VersionId=previous_version)
97 | obj.wait_until_not_exists(VersionId=previous_version)
98 | committers.append(commit)
99 |
100 | def delete_object(bucket, key, undoers, committers):
101 | # If object exists:
102 | # Do: insert delete marker for object
103 | # Undo: delete the delete marker
104 | # Commit: delete all versions
105 | #
106 | # If object does not exist:
107 | # Do: nop
108 | # Undo: nop
109 | # Commit: nop
110 |
111 | # check if object exists
112 | obj = bucket.Object(key)
113 | try:
114 | obj.reload()
115 | except botocore.exceptions.ClientError:
116 | # doesn't exist
117 | return
118 | prev_version = obj.version_id
119 | if prev_version is None:
120 | raise Exception("Bucket must have versioning enabled")
121 |
122 | # delete object (this inserts a delete marker version)
123 | print("Deleting s3://{}/{}".format(bucket.name, key))
124 | resp = obj.delete()
125 | delete_marker_version = resp['VersionId']
126 | obj.wait_until_not_exists()
127 |
128 | # add undoer
129 | def undo():
130 | # delete the delete marker
131 | obj.delete(VersionId=delete_marker_version)
132 | obj.wait_until_not_exists(VersionId=delete_marker_version)
133 | undoers.append(undo)
134 |
135 | # add committer
136 | def commit():
137 | # delete all versions
138 | obj.delete(VersionId=prev_version)
139 | obj.wait_until_not_exists(VersionId=prev_version)
140 | obj.delete(VersionId=delete_marker_version)
141 | obj.wait_until_not_exists(VersionId=delete_marker_version)
142 | committers.append(commit)
143 |
144 | def make_dir(bucket, key, undoers, committers):
145 | # If dir does not already exist:
146 | # Do: make dir
147 | # Undo: delete dir (latest version)
148 | # Commit: nop
149 | #
150 | # If dir already exists:
151 | # Do: nop
152 | # Undo: nop
153 | # Commit: nop
154 |
155 | # check if dir already exists
156 | files = bucket.objects.filter(Prefix=key)
157 | if len(list(files)) > 0:
158 | # already exists
159 | return
160 |
161 | # make dir
162 | print("Making directory at s3://{}/{}".format(bucket.name, key))
163 | obj = bucket.put_object(Key=key)
164 | obj.wait_until_exists()
165 | if obj.version_id is None:
166 | obj.delete()
167 | obj.wait_until_not_exists()
168 | raise Exception("Bucket must have versioning enabled")
169 | new_version = obj.version_id
170 |
171 | # add undoer
172 | def undo():
173 | obj.delete(VersionId=new_version)
174 | obj.wait_until_not_exists(VersionId=new_version)
175 | undoers.append(undo)
176 |
--------------------------------------------------------------------------------
/cfnplus/stack_policy_tag.py:
--------------------------------------------------------------------------------
1 | # (C) Copyright 2018 Hewlett Packard Enterprise Development LP.
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License"). You
4 | # may not use this file except in compliance with the License. A copy of
5 | # the License is located at
6 | #
7 | # http://aws.amazon.com/apache2.0/
8 | #
9 | # and in the "LICENSE.txt" file accompanying this file. This file is
10 | # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
11 | # ANY KIND, either express or implied. See the License for the specific
12 | # language governing permissions and limitations under the License.
13 |
14 | # pylint: disable=superfluous-parens
15 | # pylint: disable=invalid-name
16 | # pylint: disable=missing-docstring
17 | # pylint: disable=global-statement
18 | # pylint: disable=broad-except
19 | # pylint: disable=bare-except
20 | # pylint: disable=too-many-branches
21 | # pylint: disable=too-many-statements
22 | # pylint: disable=too-many-return-statements
23 | # pylint: disable=import-error
24 | # pylint: disable=no-else-return
25 | # pylint: disable=len-as-condition
26 | # pylint: disable=too-many-locals
27 | # pylint: disable=unused-argument
28 |
29 | import collections
30 | import json
31 | import boto3
32 | from . import utils
33 |
34 | def evaluate(arg_node, ctx):
35 | '''
36 | :return: Instance of Result.
37 | '''
38 |
39 | tag_name = 'Aruba::StackPolicy'
40 | if not isinstance(arg_node, collections.Mapping):
41 | raise utils.InvalidTemplate("{}: must contain mapping".\
42 | format(tag_name))
43 | if ctx.stack_name is None:
44 | raise utils.InvalidTemplate("{}: stack name is unknown".\
45 | format(tag_name))
46 |
47 | def set_policy_action(undoers, committers):
48 | cfn = boto3.client('cloudformation', region_name=ctx.aws_region)
49 | print("Setting policy for stack {}".format(ctx.stack_name))
50 | cfn.set_stack_policy(
51 | StackName=ctx.stack_name,
52 | StackPolicyBody=json.dumps(arg_node),
53 | )
54 | return utils.Result(after_creation=[set_policy_action])
55 |
--------------------------------------------------------------------------------
/cfnplus/stack_resource.py:
--------------------------------------------------------------------------------
1 | # (C) Copyright 2018 Hewlett Packard Enterprise Development LP.
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License"). You
4 | # may not use this file except in compliance with the License. A copy of
5 | # the License is located at
6 | #
7 | # http://aws.amazon.com/apache2.0/
8 | #
9 | # and in the "LICENSE.txt" file accompanying this file. This file is
10 | # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
11 | # ANY KIND, either express or implied. See the License for the specific
12 | # language governing permissions and limitations under the License.
13 |
14 | # pylint: disable=superfluous-parens
15 | # pylint: disable=invalid-name
16 | # pylint: disable=missing-docstring
17 | # pylint: disable=global-statement
18 | # pylint: disable=broad-except
19 | # pylint: disable=bare-except
20 | # pylint: disable=too-many-branches
21 | # pylint: disable=too-many-statements
22 | # pylint: disable=too-many-return-statements
23 | # pylint: disable=import-error
24 | # pylint: disable=no-else-return
25 | # pylint: disable=len-as-condition
26 | # pylint: disable=too-many-locals
27 | # pylint: disable=unused-argument
28 |
29 | import collections
30 | import hashlib
31 | import json
32 | import io
33 | import boto3
34 | from . import utils, eval_cfn_expr, s3_ops
35 |
36 | def evaluate(resource, ctx):
37 | '''
38 | :return: Instance of Result.
39 | '''
40 |
41 | # 1. Compute local vars for imported template
42 | # 2. Evaluate Aruba tags in imported template
43 | # 3. Upload imported template to S3
44 | # 4. Make 'AWS::CloudFormation::Stack' resource
45 |
46 | if ctx.template_is_imported:
47 | raise utils.InvalidTemplate("Cannot have imported template in " + \
48 | "imported template")
49 |
50 | # check resource contents
51 | ex = utils.InvalidTemplate("Invalid argument for Aruba::Stack: {}".\
52 | format(json.dumps(resource)))
53 | if not isinstance(resource, collections.Mapping):
54 | raise ex
55 | try:
56 | props = resource['Properties']
57 | except KeyError:
58 | raise ex
59 | if not isinstance(props, collections.Mapping):
60 | raise ex
61 | try:
62 | template_node = props['Template']
63 | except KeyError:
64 | raise ex
65 | if not isinstance(template_node, collections.Mapping):
66 | raise ex
67 | try:
68 | local_path_node = template_node['LocalPath']
69 | s3_dest_node = template_node['S3Dest']
70 | except KeyError:
71 | raise ex
72 | params_node = props.get('Parameters')
73 |
74 | # eval nodes
75 | local_path = eval_cfn_expr.eval_expr(local_path_node, ctx)
76 | s3_dest = eval_cfn_expr.eval_expr(s3_dest_node, ctx)
77 | s3_bucket, s3_dir_key = utils.parse_s3_uri(s3_dest)
78 |
79 | # compute local vars
80 | new_ctx = ctx.copy()
81 | if params_node is not None:
82 | for param_name, param_node in params_node.items():
83 | try:
84 | param_value = eval_cfn_expr.eval_expr(param_node, ctx)
85 | except utils.InvalidTemplate:
86 | # it's okay; not everything needs to be resolvable at this point
87 | continue
88 | new_ctx.set_var(param_name, param_value)
89 |
90 | # eval Aruba tags in imported template
91 | template_abs_path = ctx.abspath(local_path)
92 | with open(template_abs_path) as f:
93 | imported_template_str = f.read()
94 | new_ctx.template_is_imported = True
95 | new_ctx.template_path = template_abs_path
96 | new_ctx.stack_name = None
97 | new_template_str = ctx.proc_result_cache_get(imported_template_str, new_ctx)
98 | if new_template_str is None:
99 | result = ctx.process_template_func(imported_template_str, new_ctx)
100 |
101 | # add to cache
102 | ctx.proc_result_cache_put(imported_template_str, new_ctx, \
103 | result.new_template)
104 | else:
105 | result = utils.Result(new_template=new_template_str)
106 |
107 | # make S3 key
108 | h = hashlib.new(utils.FILE_HASH_ALG)
109 | h.update(result.new_template.encode('utf-8'))
110 | s3_key = '{}/{}'.format(s3_dir_key, h.hexdigest())
111 |
112 | def upload_action(undoers, committers):
113 | buf = io.BytesIO()
114 | buf.write(result.new_template.encode('utf-8'))
115 | buf.seek(0)
116 | bucket = boto3.resource('s3', region_name=ctx.aws_region).\
117 | Bucket(s3_bucket)
118 | s3_ops.upload_file(buf, bucket, s3_key, undoers, committers)
119 |
120 | # make 'AWS::CloudFormation::Stack' resource
121 | s3_dest_uri = 'https://s3-{region}.amazonaws.com/{bucket}/{key}'\
122 | .format(region=ctx.aws_region, bucket=s3_bucket, key=s3_key)
123 | cfn_resource = {
124 | 'Type': 'AWS::CloudFormation::Stack',
125 | 'Properties': {
126 | 'TemplateURL': s3_dest_uri,
127 | },
128 | }
129 | if params_node is not None:
130 | cfn_resource['Properties']['Parameters'] = params_node
131 |
132 | final_result = utils.Result(
133 | new_template=cfn_resource,
134 | before_creation=[upload_action] + result.before_creation,
135 | after_creation=result.after_creation,
136 | )
137 | return final_result
138 |
--------------------------------------------------------------------------------
/cfnplus/utils.py:
--------------------------------------------------------------------------------
1 | # (C) Copyright 2018 Hewlett Packard Enterprise Development LP.
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License"). You
4 | # may not use this file except in compliance with the License. A copy of
5 | # the License is located at
6 | #
7 | # http://aws.amazon.com/apache2.0/
8 | #
9 | # and in the "LICENSE.txt" file accompanying this file. This file is
10 | # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
11 | # ANY KIND, either express or implied. See the License for the specific
12 | # language governing permissions and limitations under the License.
13 |
14 | # pylint: disable=superfluous-parens
15 | # pylint: disable=invalid-name
16 | # pylint: disable=missing-docstring
17 | # pylint: disable=global-statement
18 | # pylint: disable=broad-except
19 | # pylint: disable=bare-except
20 | # pylint: disable=too-many-branches
21 | # pylint: disable=too-many-statements
22 | # pylint: disable=too-many-return-statements
23 | # pylint: disable=import-error
24 | # pylint: disable=no-else-return
25 | # pylint: disable=len-as-condition
26 | # pylint: disable=too-many-locals
27 | # pylint: disable=unused-argument
28 | # pylint: disable=too-many-arguments
29 | # pylint: disable=too-many-instance-attributes
30 |
31 | import os
32 | import json
33 | import io
34 | try:
35 | from urlparse import urlparse
36 | except ImportError:
37 | # Python 3
38 | from urllib.parse import urlparse
39 | import boto3
40 | import botocore
41 |
42 | try:
43 | base_str = basestring
44 | except NameError:
45 | # Python 3
46 | base_str = str
47 |
48 | try:
49 | unicode
50 | except NameError:
51 | # Python 3
52 | StringIO = io.StringIO
53 | else:
54 | # Python 2
55 | StringIO = io.BytesIO
56 |
57 | FILE_HASH_ALG = 'sha1'
58 |
59 | class InvalidTemplate(Exception):
60 | pass
61 |
62 | def bucket_exists(bucket_name, aws_region):
63 | s3 = boto3.client('s3', region_name=aws_region)
64 | try:
65 | s3.head_bucket(Bucket=bucket_name)
66 | except botocore.exceptions.ClientError as e:
67 | # If a client error is thrown, then check that it was a 404 error.
68 | # If it was a 404 error, then the bucket does not exist.
69 | error_code = int(e.response['Error']['Code'])
70 | if error_code == 404:
71 | return False
72 | else:
73 | raise e
74 | return True
75 |
76 | def parse_s3_uri(uri):
77 | '''
78 | :return: A pair (bucket, key)
79 | '''
80 |
81 | uri = urlparse(uri)
82 | if uri.scheme != 's3':
83 | raise InvalidTemplate("Invalid URI: '{}'".format(uri))
84 | bucket = uri.netloc
85 | key = uri.path
86 | if key.startswith('/'):
87 | key = key[1:]
88 | return (bucket, key)
89 |
90 | def dict_only_item(d):
91 | try:
92 | # Python 2
93 | key = d.iterkeys().next()
94 | except AttributeError:
95 | # Python 3
96 | key = next(iter(d.keys()))
97 | return key, d[key]
98 |
99 | class Context(object):
100 | def __init__(self, symbols, aws_region=None, \
101 | template_path=None, stack_name=None, template_is_imported=False, \
102 | process_template_func=None, resource_name=None, resource_node=None):
103 | self._symbols = dict(**symbols)
104 | self.aws_region = aws_region
105 | self.template_path = template_path
106 | self.stack_name = stack_name
107 | self.template_is_imported = template_is_imported
108 | self.process_template_func = process_template_func
109 | self.resource_name = resource_name
110 | self.resource_node = resource_node
111 | self._proc_result_cache = {}
112 |
113 | def copy(self):
114 | ctx = Context(
115 | self._symbols,
116 | aws_region=self.aws_region,
117 | template_path=self.template_path,
118 | stack_name=self.stack_name,
119 | template_is_imported=self.template_is_imported,
120 | process_template_func=self.process_template_func,
121 | resource_name=self.resource_name,
122 | resource_node=self.resource_node)
123 | ctx._proc_result_cache = self._proc_result_cache # pylint: disable=protected-access
124 | return ctx
125 |
126 | @property
127 | def _built_in_vars(self):
128 | var_map = {}
129 | if self.aws_region is not None:
130 | var_map['AWS::Region'] = self.aws_region
131 | if self.stack_name is not None:
132 | var_map['AWS::StackName'] = self.stack_name
133 | return var_map
134 |
135 | def resolve_var(self, symbol):
136 | try:
137 | return self._symbols[symbol]
138 | except KeyError:
139 | return self._built_in_vars[symbol]
140 |
141 | def set_var(self, symbol, value):
142 | self._symbols[symbol] = value
143 |
144 | def resolve_cfn_export(self, var_name):
145 | cf = boto3.client('cloudformation', region_name=self.aws_region)
146 | args = {}
147 | while True:
148 | result = cf.list_exports(**args)
149 | for export in result['Exports']:
150 | if export['Name'] == var_name:
151 | return export['Value']
152 |
153 | try:
154 | args['NextToken'] = result['NextToken']
155 | except KeyError:
156 | raise InvalidTemplate("No such CloudFormation export: {}".\
157 | format(var_name))
158 |
159 | def abspath(self, rel_path):
160 | template_dir = os.path.dirname(self.template_path)
161 | return os.path.abspath(os.path.join(template_dir, rel_path))
162 |
163 | @staticmethod
164 | def _proc_result_cache_make_key(template_str, ctx):
165 | attrs = ['_symbols', 'aws_region', 'template_path', 'stack_name', \
166 | 'template_is_imported']
167 | d = {'template_str': template_str}
168 | for attr in attrs:
169 | d[attr] = getattr(ctx, attr)
170 | return json.dumps(d)
171 |
172 | def proc_result_cache_get(self, template_str, ctx):
173 | '''
174 | It is possible that a template is processed multiple times, so we'd like
175 | to keep a cache of results. Since template processing is a function
176 | of both templates and contexts, we need to include the context in
177 | the cache key along with the template. So we need a way to serialize
178 | or hash the context (or at least the parts of the context that are
179 | relevant to template processing).
180 |
181 | :return: The processed template as a string, or None.
182 | '''
183 |
184 |
185 | key = self._proc_result_cache_make_key(template_str, ctx)
186 | try:
187 | return self._proc_result_cache[key]
188 | except KeyError:
189 | return None
190 |
191 | def proc_result_cache_put(self, template_str, ctx, new_template_str):
192 | key = self._proc_result_cache_make_key(template_str, ctx)
193 | self._proc_result_cache[key] = new_template_str
194 |
195 | class Result(object):
196 | '''
197 | An instance of this class represents the result of processing a template.
198 | Such a result consists of
199 | - the new template
200 | - actions that should be done before the stack is created or updated
201 | - actions that should be done after the stack is created or updated
202 | '''
203 |
204 | def __init__(self, new_template=None, before_creation=None, \
205 | after_creation=None):
206 | self.new_template = new_template
207 | self.before_creation = [] if before_creation is None else before_creation
208 | self.after_creation = [] if after_creation is None else after_creation
209 | self._undoers = []
210 | self._committers = []
211 |
212 | def __enter__(self):
213 | return self
214 |
215 | def __exit__(self, exc_type, exc_val, exc_tb):
216 | try:
217 | if exc_type is None:
218 | for action in self._committers:
219 | action()
220 | self._committers = []
221 | else:
222 | print("Undoing CloudFormation Plus actions")
223 | while len(self._undoers) > 0:
224 | action = self._undoers.pop()
225 | action()
226 | except:
227 | pass
228 |
229 | def do_before_creation(self):
230 | '''
231 | Perform actions that should be done before the stack is created or
232 | updated.
233 | '''
234 |
235 | # do before-creation actions
236 | for action in self.before_creation:
237 | action(self._undoers, self._committers)
238 |
239 | def do_after_creation(self):
240 | '''
241 | Perform actions that should be done after the stack is created or
242 | updated.
243 | '''
244 |
245 | # commit the before-creation actions
246 | for action in self._committers:
247 | action()
248 | self._committers = []
249 | self._undoers = []
250 |
251 | # do after-creation actions
252 | for action in self.after_creation:
253 | action(self._undoers, self._committers)
254 |
--------------------------------------------------------------------------------
/example/bootstrap/db.sh:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 |
3 | exit 0
4 |
--------------------------------------------------------------------------------
/example/data/a-file:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/HewlettPackard/cloudformation-plus/b75e875a49dec9923e9b9c2f325a6a2e2f41d970/example/data/a-file
--------------------------------------------------------------------------------
/example/data/more-data/another-file:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/HewlettPackard/cloudformation-plus/b75e875a49dec9923e9b9c2f325a6a2e2f41d970/example/data/more-data/another-file
--------------------------------------------------------------------------------
/example/deploy.py:
--------------------------------------------------------------------------------
1 | # (C) Copyright 2018 Hewlett Packard Enterprise Development LP.
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License"). You
4 | # may not use this file except in compliance with the License. A copy of
5 | # the License is located at
6 | #
7 | # http://aws.amazon.com/apache2.0/
8 | #
9 | # and in the "LICENSE.txt" file accompanying this file. This file is
10 | # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
11 | # ANY KIND, either express or implied. See the License for the specific
12 | # language governing permissions and limitations under the License.
13 |
14 | # pylint: disable=superfluous-parens
15 | # pylint: disable=invalid-name
16 | # pylint: disable=missing-docstring
17 | # pylint: disable=global-statement
18 | # pylint: disable=broad-except
19 | # pylint: disable=bare-except
20 | # pylint: disable=too-many-branches
21 | # pylint: disable=too-many-statements
22 | # pylint: disable=too-many-return-statements
23 | # pylint: disable=import-error
24 | # pylint: disable=no-else-return
25 | # pylint: disable=len-as-condition
26 | # pylint: disable=too-many-locals
27 | # pylint: disable=unused-argument
28 | # pylint: disable=too-many-arguments
29 | # pylint: disable=too-many-ancestors
30 | import time
31 | import boto3
32 | import botocore
33 | import cfnplus
34 |
35 | _TEMPLATE_PATH = 'template.yml'
36 | _STACK_NAME = 'MyApi'
37 | _AWS_REGION = 'us-west-2'
38 |
39 | def clean_up_changesets(cfn):
40 | try:
41 | resp = cfn.list_change_sets(StackName=_STACK_NAME)
42 | except botocore.exceptions.ClientError:
43 | return
44 | for cs in resp['Summaries']:
45 | cfn.delete_change_set(ChangeSetName=cs['ChangeSetId'])
46 |
47 | def make_or_update_stack(cfn, template, params):
48 | # does stack exist?
49 | try:
50 | cfn.describe_stacks(StackName=_STACK_NAME)
51 | stack_exists = True
52 | except botocore.exceptions.ClientError:
53 | stack_exists = False
54 |
55 | # make change set
56 | change_set = cfn.create_change_set(
57 | StackName=_STACK_NAME,
58 | TemplateBody=template,
59 | Parameters=params,
60 | Capabilities=['CAPABILITY_IAM'],
61 | ChangeSetName='{}-change-set'.format(_STACK_NAME),
62 | ChangeSetType='UPDATE' if stack_exists else 'CREATE',
63 | )
64 |
65 | # wait for change set to get made
66 | while True:
67 | resp = cfn.describe_change_set(ChangeSetName=change_set['Id'])
68 | status = resp['Status']
69 | if status == 'CREATE_COMPLETE':
70 | break
71 | elif status == 'FAILED':
72 | reason = resp['StatusReason']
73 |
74 | if "The submitted information didn't contain changes." in reason or \
75 | "No updates are to be performed." in reason:
76 | print("No changes")
77 | return
78 |
79 | msg = "Failed to make change set for {}: {}".\
80 | format(_STACK_NAME, reason)
81 | raise Exception(msg)
82 |
83 | time.sleep(2)
84 |
85 | # execute change set
86 | cfn.execute_change_set(ChangeSetName=change_set['Id'])
87 |
88 | # wait for execution to finish
89 | if stack_exists:
90 | waiter = cfn.get_waiter('stack_update_complete')
91 | else:
92 | waiter = cfn.get_waiter('stack_create_complete')
93 | waiter.wait(StackName=_STACK_NAME)
94 |
95 | def main():
96 | cfn = boto3.client('cloudformation', region_name=_AWS_REGION)
97 |
98 | # read template
99 | with open(_TEMPLATE_PATH) as f:
100 | template = f.read()
101 |
102 | params = [
103 | {'ParameterKey': 'Bucket', 'ParameterValue': 'niara-tmp'},
104 | {'ParameterKey': 'BucketArn', 'ParameterValue': 'arn:aws:s3:::niara-tmp'},
105 | ]
106 |
107 | # process language extensions
108 | with cfnplus.process_template(
109 | template,
110 | params, # template params
111 | _AWS_REGION,
112 | _TEMPLATE_PATH,
113 | _STACK_NAME,
114 | ) as cfnp_result:
115 |
116 | # do actions that must be done before stack creation/update
117 | cfnp_result.do_before_creation()
118 |
119 | try:
120 | make_or_update_stack(cfn, cfnp_result.new_template, params)
121 | finally:
122 | clean_up_changesets(cfn)
123 |
124 | # do actions that must be done after stack creation/update
125 | cfnp_result.do_after_creation()
126 |
127 | if __name__ == '__main__':
128 | main()
129 |
--------------------------------------------------------------------------------
/example/lambda/api/api.py:
--------------------------------------------------------------------------------
1 | def handle():
2 | return
3 |
--------------------------------------------------------------------------------
/example/nested_template.yml:
--------------------------------------------------------------------------------
1 | AWSTemplateFormatVersion: 2010-09-09
2 |
3 | Parameters:
4 | Bucket:
5 | Type: String
6 |
7 | Resources:
8 | ApiLambda:
9 | Type: 'AWS::Lambda::Function'
10 | Properties:
11 | Aruba::LambdaCode:
12 | LocalPath: lambda/api
13 | S3Dest: {'Fn::Sub': 's3://${Bucket}/lambda'}
14 | Environment:
15 | Variables:
16 | DB_HOST: {'Fn::Sub': 'Database.PublicDnsName'}
17 | Handler: api.handle
18 | Runtime: python2.7
19 | Role: {'Fn::Sub': '${LambdaRole.Arn}'}
20 |
21 | LambdaRole:
22 | Type: 'AWS::IAM::Role'
23 | Properties:
24 | AssumeRolePolicyDocument:
25 | Version: 2012-10-17
26 | Statement:
27 | - Effect: Allow
28 | Principal:
29 | Service:
30 | - lambda.amazonaws.com
31 | Action:
32 | - 'sts:AssumeRole'
33 |
--------------------------------------------------------------------------------
/example/template.yml:
--------------------------------------------------------------------------------
1 | AWSTemplateFormatVersion: 2010-09-09
2 |
3 | Parameters:
4 | Bucket:
5 | Type: String
6 | BucketArn:
7 | Type: String
8 |
9 | Metadata:
10 | Aruba::BeforeCreation:
11 | - S3Upload:
12 | LocalFile: bootstrap/db.sh
13 | S3Dest: {'Fn::Sub': 's3://${Bucket}/bootstrap/db.sh'}
14 |
15 | Aruba::AfterCreation:
16 | - S3Sync:
17 | LocalDir: data
18 | S3Dest: {'Fn::Sub': 's3://${Bucket}/data'}
19 | - S3Mkdir: {'Fn::Sub': 's3://${Bucket}/my-dir'}
20 |
21 | DatabaseRolePolicy: &database-role-policy
22 | Version: 2012-10-17
23 | Statement:
24 | - Effect: Allow
25 | Action: 's3:HeadBucket'
26 | Resource: '*'
27 | - Effect: Allow
28 | Action: 's3:ListBucket'
29 | Resource: {Ref: BucketArn}
30 | - Effect: Allow
31 | Action:
32 | - 's3:GetObject'
33 | Resource: {'Fn::Sub': '${BucketArn}/*'}
34 |
35 | Aruba::StackPolicy:
36 | Statement:
37 | - Effect: Allow
38 | Action: 'Update:*'
39 | Principal: '*'
40 | Resource: '*'
41 |
42 | Resources:
43 | Database:
44 | Type: 'AWS::EC2::Instance'
45 | Properties:
46 | AvailabilityZone: us-west-2a
47 | ImageId: ami-e251209a
48 | InstanceType: m5.large
49 | IamInstanceProfile: {Ref: InstProf}
50 | Aruba::BootstrapActions:
51 | Actions:
52 | - Path: {'Fn::Sub': 's3://${Bucket}/bootstrap/db.sh'}
53 | Timeout: PT5M
54 |
55 | DatabaseRole:
56 | Type: 'AWS::IAM::Role'
57 | Properties:
58 | AssumeRolePolicyDocument:
59 | Version: 2012-10-17
60 | Statement:
61 | - Effect: Allow
62 | Principal:
63 | Service:
64 | - ec2.amazonaws.com
65 | Action:
66 | - 'sts:AssumeRole'
67 | Policies:
68 | - PolicyName: DatabasePolicy
69 | PolicyDocument: *database-role-policy
70 |
71 | InstProf:
72 | Type: 'AWS::IAM::InstanceProfile'
73 | Properties:
74 | Roles:
75 | - Ref: DatabaseRole
76 |
77 | LambdaStack:
78 | Type: 'Aruba::Stack'
79 | Properties:
80 | Template:
81 | LocalPath: nested_template.yml
82 | S3Dest: {'Fn::Sub': 's3://${Bucket}/cf-templates'}
83 | Parameters:
84 | Bucket: {Ref: Bucket}
85 |
--------------------------------------------------------------------------------
/setup.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 |
3 | '''
4 | distutils/setuptools install script.
5 | '''
6 |
7 | from setuptools import setup, find_packages
8 |
9 | setup(
10 | name='cloudformation-plus',
11 | version='1.0-pr1',
12 | description='A library that reduces the amount of code you must write in order to deploy non-trivial applications to AWS CloudFormation',
13 | long_description=open('README.md').read(),
14 | long_description_content_type='text/markdown',
15 | url='https://github.com/HewlettPackard/cloudformation-plus',
16 | author='Hewlett Packard Enterprise',
17 | author_email='charles.shearer@hpe.com',
18 | license='Apache License 2.0',
19 | packages=['cfnplus'],
20 | install_requires=[
21 | 'boto3>=1.9,<2',
22 | 'pyyaml',
23 | ],
24 | tests_require=[
25 | 'pytest',
26 | ],
27 | classifiers=[
28 | 'Intended Audience :: Developers',
29 | 'Intended Audience :: System Administrators',
30 | 'Natural Language :: English',
31 | 'License :: OSI Approved :: Apache Software License',
32 | 'Programming Language :: Python',
33 | 'Programming Language :: Python :: 2.7',
34 | ],
35 | )
36 |
--------------------------------------------------------------------------------
/test/integration/s3_ops_test.py:
--------------------------------------------------------------------------------
1 | # (C) Copyright 2018 Hewlett Packard Enterprise Development LP.
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License"). You
4 | # may not use this file except in compliance with the License. A copy of
5 | # the License is located at
6 | #
7 | # http://aws.amazon.com/apache2.0/
8 | #
9 | # and in the "LICENSE.txt" file accompanying this file. This file is
10 | # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
11 | # ANY KIND, either express or implied. See the License for the specific
12 | # language governing permissions and limitations under the License.
13 |
14 | # pylint: disable=superfluous-parens
15 | # pylint: disable=invalid-name
16 | # pylint: disable=missing-docstring
17 | # pylint: disable=global-statement
18 | # pylint: disable=broad-except
19 | # pylint: disable=bare-except
20 | # pylint: disable=too-many-branches
21 | # pylint: disable=too-many-statements
22 | # pylint: disable=too-many-return-statements
23 | # pylint: disable=import-error
24 | # pylint: disable=no-else-return
25 | # pylint: disable=len-as-condition
26 | # pylint: disable=too-many-locals
27 | # pylint: disable=unused-argument
28 |
29 | import unittest
30 | import io
31 | import boto3
32 | import botocore
33 | import cfnplus.s3_ops as s3_ops
34 |
35 | AWS_REGION = 'us-west-2'
36 |
37 | class S3OpsTest(unittest.TestCase):
38 | @classmethod
39 | def setUpClass(cls):
40 | cls._bucket = boto3.resource('s3', region_name=AWS_REGION).\
41 | Bucket('niara-s3ops-test')
42 | cls._bucket.create(
43 | CreateBucketConfiguration={'LocationConstraint': AWS_REGION}
44 | )
45 | cls._bucket.wait_until_exists()
46 | cls._bucket.Versioning().enable()
47 |
48 | @classmethod
49 | def tearDownClass(cls):
50 | cls._bucket.delete()
51 | cls._bucket.wait_until_not_exists()
52 |
53 | def tearDown(self):
54 | for obj in self._bucket.objects.all():
55 | self._delete_object(obj.key)
56 |
57 | def _delete_object(self, key):
58 | while True:
59 | try:
60 | obj = self._bucket.Object(key)
61 | if obj.version_id is None:
62 | obj.delete()
63 | obj.wait_until_not_exists()
64 | else:
65 | obj.delete(VersionId=obj.version_id)
66 | obj.wait_until_not_exists(VersionId=obj.version_id)
67 | except:
68 | return
69 |
70 | def assertObjectExists(self, key, contents):
71 | try:
72 | resp = self._bucket.Object(key).get()
73 | except:
74 | self.fail("No object with key \"{}\"".format(key))
75 | return
76 | actual_contents = resp['Body'].read()
77 | self.assertEqual(contents, actual_contents)
78 |
79 | def assertObjectDoesNotExist(self, key):
80 | obj = self._bucket.Object(key)
81 | self.assertRaises(botocore.exceptions.ClientError, obj.get)
82 |
83 | def assertDirObjectExists(self, key):
84 | files = self._bucket.objects.filter(Prefix=key)
85 | self.assertGreater(len(list(files)), 0)
86 |
87 | def assertDirObjectDoesNotExist(self, key):
88 | files = self._bucket.objects.filter(Prefix=key)
89 | self.assertEqual(0, len(list(files)))
90 |
91 | def testUploadFile_noExisting_success(self):
92 | #
93 | # Set up
94 | #
95 |
96 | # make local file
97 | file_contents = b"Hello world"
98 | buf = io.BytesIO(file_contents)
99 |
100 | # make S3 key
101 | key = 'my_file'
102 |
103 | #
104 | # Call
105 | #
106 | committers = []
107 | undoers = []
108 | s3_ops.upload_file(buf, self._bucket, key, committers=committers, \
109 | undoers=undoers)
110 | for f in committers:
111 | f()
112 |
113 | #
114 | # Test
115 | #
116 |
117 | # check that object exists
118 | self.assertObjectExists(key, file_contents)
119 |
120 | def testUploadFile_noExisting_failure(self):
121 | #
122 | # Set up
123 | #
124 |
125 | # make local file
126 | file_contents = b"Hello world"
127 | buf = io.BytesIO(file_contents)
128 |
129 | # make S3 key
130 | key = 'my_file'
131 |
132 | #
133 | # Call
134 | #
135 | committers = []
136 | undoers = []
137 | s3_ops.upload_file(buf, self._bucket, key, committers=committers, \
138 | undoers=undoers)
139 | for f in undoers:
140 | f()
141 |
142 | #
143 | # Test
144 | #
145 |
146 | # check that object doesn't exist
147 | self.assertObjectDoesNotExist(key)
148 |
149 | def testUploadFile_existing_success(self):
150 | #
151 | # Set up
152 | #
153 |
154 | # make S3 key
155 | key = 'my_file'
156 |
157 | # make existing S3 file
158 | s3_contents_old = b"Hello world"
159 | obj = self._bucket.put_object(Body=s3_contents_old, Key=key)
160 | obj.wait_until_exists()
161 |
162 | # make local file
163 | file_contents_new = s3_contents_old + b" again"
164 | buf = io.BytesIO(file_contents_new)
165 |
166 | #
167 | # Call
168 | #
169 | committers = []
170 | undoers = []
171 | s3_ops.upload_file(buf, self._bucket, key, committers=committers, \
172 | undoers=undoers)
173 | for f in committers:
174 | f()
175 |
176 | #
177 | # Test
178 | #
179 |
180 | # check that object exists
181 | self.assertObjectExists(key, file_contents_new)
182 |
183 | def testUploadFile_existing_failure(self):
184 | #
185 | # Set up
186 | #
187 |
188 | # make S3 key
189 | key = 'my_file'
190 |
191 | # make existing S3 file
192 | s3_contents_old = b"Hello world"
193 | obj = self._bucket.put_object(Body=s3_contents_old, Key=key)
194 | obj.wait_until_exists()
195 |
196 | # make local file
197 | file_contents_new = s3_contents_old + b" again"
198 | buf = io.BytesIO(file_contents_new)
199 |
200 | #
201 | # Call
202 | #
203 | committers = []
204 | undoers = []
205 | s3_ops.upload_file(buf, self._bucket, key, committers=committers, \
206 | undoers=undoers)
207 | for f in undoers:
208 | f()
209 |
210 | #
211 | # Test
212 | #
213 |
214 | # check that object exists
215 | self.assertObjectExists(key, s3_contents_old)
216 |
217 | def testDeleteObject_success(self):
218 | #
219 | # Set up
220 | #
221 |
222 | # make S3 key
223 | key = 'my_file'
224 |
225 | # make S3 object
226 | s3_contents = b"haaaiii!"
227 | obj = self._bucket.put_object(Key=key, Body=s3_contents)
228 | obj.wait_until_exists()
229 | self.assertObjectExists(key, s3_contents)
230 |
231 | #
232 | # Call
233 | #
234 | committers = []
235 | undoers = []
236 | s3_ops.delete_object(self._bucket, key, committers=committers, \
237 | undoers=undoers)
238 | for f in committers:
239 | f()
240 |
241 | #
242 | # Test
243 | #
244 |
245 | # check that object doesn't exist
246 | self.assertObjectDoesNotExist(key)
247 |
248 | def testDeleteObject_failure(self):
249 | #
250 | # Set up
251 | #
252 |
253 | # make S3 key
254 | key = 'my_file'
255 |
256 | # make S3 object
257 | s3_contents = b"haaaiii!"
258 | obj = self._bucket.put_object(Key=key, Body=s3_contents)
259 | obj.wait_until_exists()
260 | self.assertObjectExists(key, s3_contents)
261 |
262 | #
263 | # Call
264 | #
265 | committers = []
266 | undoers = []
267 | s3_ops.delete_object(self._bucket, key, committers=committers, \
268 | undoers=undoers)
269 | for f in undoers:
270 | f()
271 |
272 | #
273 | # Test
274 | #
275 |
276 | # check that object exists
277 | self.assertObjectExists(key, s3_contents)
278 |
279 | def testMakeDir_success(self):
280 | #
281 | # Set up
282 | #
283 |
284 | # make S3 key
285 | key = 'my_dir'
286 |
287 | #
288 | # Call
289 | #
290 | committers = []
291 | undoers = []
292 | s3_ops.make_dir(self._bucket, key, committers=committers, \
293 | undoers=undoers)
294 | for f in committers:
295 | f()
296 |
297 | #
298 | # Test
299 | #
300 |
301 | # check that dir exists
302 | self.assertDirObjectExists(key)
303 |
304 | def testMakeDir_failure(self):
305 | #
306 | # Set up
307 | #
308 |
309 | # make S3 key
310 | key = 'my_dir'
311 |
312 | #
313 | # Call
314 | #
315 | committers = []
316 | undoers = []
317 | s3_ops.make_dir(self._bucket, key, committers=committers, \
318 | undoers=undoers)
319 | for f in undoers:
320 | f()
321 |
322 | #
323 | # Test
324 | #
325 |
326 | # check that dir exists
327 | self.assertDirObjectDoesNotExist(key)
328 |
329 | def main():
330 | print("WARNING: This test performs real AWS S3 operations.")
331 | while True:
332 | resp = raw_input("Continue? [y/N] ").lower()
333 | if resp == 'y':
334 | break
335 | elif resp == 'n':
336 | return
337 |
338 | suite = unittest.TestLoader().loadTestsFromTestCase(S3OpsTest)
339 | unittest.TextTestRunner(verbosity=2).run(suite)
340 |
341 | if __name__ == '__main__':
342 | main()
343 |
--------------------------------------------------------------------------------
/test/unit/eval_cfn_expr_test.py:
--------------------------------------------------------------------------------
1 | # (C) Copyright 2018 Hewlett Packard Enterprise Development LP.
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License"). You
4 | # may not use this file except in compliance with the License. A copy of
5 | # the License is located at
6 | #
7 | # http://aws.amazon.com/apache2.0/
8 | #
9 | # and in the "LICENSE.txt" file accompanying this file. This file is
10 | # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
11 | # ANY KIND, either express or implied. See the License for the specific
12 | # language governing permissions and limitations under the License.
13 |
14 | # pylint: disable=superfluous-parens
15 | # pylint: disable=invalid-name
16 | # pylint: disable=missing-docstring
17 | # pylint: disable=global-statement
18 | # pylint: disable=broad-except
19 | # pylint: disable=bare-except
20 | # pylint: disable=too-many-branches
21 | # pylint: disable=too-many-statements
22 | # pylint: disable=too-many-return-statements
23 | # pylint: disable=import-error
24 | # pylint: disable=no-else-return
25 | # pylint: disable=len-as-condition
26 | # pylint: disable=too-few-public-methods
27 | # pylint: disable=unused-argument
28 | import unittest
29 | from cfnplus.eval_cfn_expr import eval_expr
30 | from cfnplus.utils import Context
31 |
32 | class EvalCfnExprTest(unittest.TestCase):
33 | def testSubWithString(self):
34 | cases = [
35 | {
36 | 'exp': {'Fn::Sub': 'my name is Woobie!'},
37 | 'symbols': {},
38 | 'result': 'my name is Woobie!',
39 | },
40 | {
41 | 'exp': {'Fn::Sub': 'my name is ${name}!'},
42 | 'symbols': {'name': 'Woobie'},
43 | 'result': 'my name is Woobie!',
44 | },
45 | {
46 | 'exp': {'Fn::Sub': 'my ${attr} is ${value}!'},
47 | 'symbols': {'attr': 'name', 'value': 'Woobie'},
48 | 'result': 'my name is Woobie!',
49 | },
50 | {
51 | 'exp': {'Fn::Sub': 'I have ${n} cats!'},
52 | 'symbols': {'n': 2},
53 | 'result': 'I have 2 cats!',
54 | },
55 | ]
56 |
57 | for case in cases:
58 | #
59 | # Set up
60 | #
61 | ctx = Context(case['symbols'])
62 |
63 | #
64 | # Call
65 | #
66 | result = eval_expr(case['exp'], ctx)
67 |
68 | #
69 | # Test
70 | #
71 | self.assertEqual(case['result'], result)
72 |
73 | def testSubWithList(self):
74 | cases = [
75 | {
76 | 'exp': {
77 | 'Fn::Sub': [
78 | 'my name is Woobie!',
79 | {},
80 | ],
81 | },
82 | 'symbols': {},
83 | 'result': 'my name is Woobie!',
84 | },
85 | {
86 | 'exp': {
87 | 'Fn::Sub': [
88 | 'my name is ${name}!',
89 | {'name': 'Woobie'},
90 | ],
91 | },
92 | 'symbols': {},
93 | 'result': 'my name is Woobie!',
94 | },
95 | {
96 | 'exp': {
97 | 'Fn::Sub': [
98 | 'my ${attr} is ${value}!',
99 | {'attr': 'name', 'value': 'Woobie'},
100 | ],
101 | },
102 | 'symbols': {},
103 | 'result': 'my name is Woobie!',
104 | },
105 | {
106 | 'exp': {
107 | 'Fn::Sub': [
108 | 'my ${attr} is ${value}!',
109 | {'value': 'Woobie'},
110 | ],
111 | },
112 | 'symbols': {'attr': 'name'},
113 | 'result': 'my name is Woobie!',
114 | },
115 | ]
116 |
117 | for case in cases:
118 | #
119 | # Set up
120 | #
121 | ctx = Context(case['symbols'])
122 |
123 | #
124 | # Call
125 | #
126 | result = eval_expr(case['exp'], ctx)
127 |
128 | #
129 | # Test
130 | #
131 | self.assertEqual(case['result'], result)
132 |
133 | def testImportWithSub(self):
134 | #
135 | # Set up
136 | #
137 | exp = {
138 | 'Fn::ImportValue': {'Fn::Sub': 'Tc-${DeployId}-BucketName'}
139 | }
140 | ctx = Context({'DeployId': '1'})
141 | ctx.resolve_cfn_export = lambda k: 'woobie' if k == 'Tc-1-BucketName' \
142 | else None
143 |
144 | #
145 | # Call
146 | #
147 | result = eval_expr(exp, ctx)
148 |
149 | #
150 | # Test
151 | #
152 | self.assertEqual('woobie', result)
153 |
154 | def testRefWithRegVar(self):
155 | #
156 | # Set up
157 | #
158 | ctx = Context({'Bucket': 'Woobie'})
159 |
160 | #
161 | # Call
162 | #
163 | result = eval_expr({'Ref': 'Bucket'}, ctx)
164 |
165 | #
166 | # Test
167 | #
168 | self.assertEqual('Woobie', result)
169 |
170 | def testRefWithBuiltInVar(self):
171 | #
172 | # Set up
173 | #
174 | ctx = Context({}, aws_region='us-west-2')
175 |
176 | #
177 | # Call
178 | #
179 | result = eval_expr({'Ref': 'AWS::Region'}, ctx)
180 |
181 | #
182 | # Test
183 | #
184 | self.assertEqual('us-west-2', result)
185 |
--------------------------------------------------------------------------------
/tox.ini:
--------------------------------------------------------------------------------
1 | # tox (https://tox.readthedocs.io/) is a tool for running tests
2 | # in multiple virtualenvs. This configuration file will run the
3 | # test suite on all supported python versions. To use it, "pip install tox"
4 | # and then run "tox" from this directory.
5 |
6 | [tox]
7 | envlist = py27, py36
8 |
9 | [testenv]
10 | deps =
11 | pytest
12 | commands =
13 | pytest {posargs}
14 |
--------------------------------------------------------------------------------