├── .coveragerc ├── .gitignore ├── .travis.yml ├── CONTRIBUTING.rst ├── LICENSE.md ├── Logo ├── JPG │ └── respawn-logo.jpg └── PNG │ ├── respawn-logo-2.png │ └── respawn-logo.png ├── MANIFEST.in ├── README.md ├── README.rst ├── VERSION ├── documentation ├── conf.py ├── getting_started.rst ├── index.rst ├── sample_yaml.rst ├── source_code.rst └── usage.rst ├── gen.sh ├── py_reqs.txt ├── respawn ├── __init__.py ├── autoscaling.py ├── cli.py ├── cloudformation.py ├── cloudwatch.py ├── ec2.py ├── elb.py ├── errors.py ├── gen.py ├── parameters.py ├── rds.py ├── route53.py ├── sns.py ├── test │ ├── README.rst │ ├── __init__.py │ ├── test_autoscaling.py │ ├── test_cloudformation.py │ ├── test_cloudwatch.py │ ├── test_ec2.py │ ├── test_elb.py │ ├── test_parameters.py │ ├── test_rds.py │ ├── test_route53.py │ └── test_sns.py └── util.py ├── setup.cfg ├── setup.py └── tox.ini /.coveragerc: -------------------------------------------------------------------------------- 1 | [run] 2 | omit = respawn/gen.py, respawn/cli.py, respawn/test/* 3 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | .DS_Store 5 | 6 | # C extensions 7 | *.so 8 | 9 | # Distribution / packaging 10 | .Python 11 | env/ 12 | bin/ 13 | develop-eggs/ 14 | dist/ 15 | eggs/ 16 | lib/ 17 | lib64/ 18 | parts/ 19 | sdist/ 20 | var/ 21 | .idea/ 22 | *.egg-info/ 23 | .installed.cfg 24 | *.egg 25 | 26 | # Installer logs 27 | pip-log.txt 28 | pip-delete-this-directory.txt 29 | 30 | # Unit test / coverage reports 31 | htmlcov/ 32 | .tox/ 33 | .coverage 34 | .cache 35 | nosetests.xml 36 | coverage.xml 37 | *.json 38 | 39 | # Translations 40 | *.mo 41 | 42 | # Mr Developer 43 | .mr.developer.cfg 44 | .project 45 | .pydevproject 46 | 47 | # Rope 48 | .ropeproject 49 | 50 | # Django stuff: 51 | *.log 52 | *.pot 53 | 54 | # Sphinx documentation 55 | documentation/_build/ 56 | documentation/.DS_Store 57 | 58 | #python virtual-env 59 | python-virtualenv/ 60 | -------------------------------------------------------------------------------- /.travis.yml: -------------------------------------------------------------------------------- 1 | language: python 2 | python: 3 | - '2.7' 4 | before_install: 5 | - pip install cfn-pyplates 6 | - pip install boto3 7 | - pip install Jinja2 8 | - pip install pytest-cov 9 | install: 10 | - pip install coveralls 11 | script: 12 | - py.test --cov-report= --cov=respawn/ 13 | - python setup.py install 14 | after_success: 15 | - coveralls 16 | -------------------------------------------------------------------------------- /CONTRIBUTING.rst: -------------------------------------------------------------------------------- 1 | Contributing to Respawn 2 | ######################## 3 | 4 | Bug fixes, feature additions, tests, documentation and more can be contributed via `issues `_ and/or `pull requests `_. All contributions 6 | are welcome. 7 | 8 | Bug fixes, feature additions, etc. 9 | ################################### 10 | 11 | Please send a pull request to the master branch. Please include `documentation `_ and `tests `_ for new features. Tests or documentation without bug fixes or feature additions are 12 | welcome too. Feel free to ask questions `via issues `_ or irc://irc 13 | .freenode.net#respawn 14 | 15 | - Fork the respawn repository. 16 | - Create a branch from master. 17 | - Develop bug fixes, features, tests, etc. 18 | - Run the test suite on Python 2.7. You can enable `Travis CI on your repo `_ to catch test failures prior to the pull request and `Coveralls `_ to see if the changed code is covered by tests. 19 | - Create a pull request to pull the changes from your branch to the respawn master. 20 | 21 | Guidelines 22 | ########### 23 | 24 | - Separate code commits from reformatting commits. 25 | - Provide tests for any newly added code. 26 | - Follow PEP8 standard. 27 | - When committing only documentation changes please include [ci skip] in the commit message to avoid running tests on Travis-CI. 28 | 29 | Reporting Issues 30 | ################# 31 | 32 | When reporting issues, please include code that reproduces the issue and whenever possible, an image that demonstrates the issue. The best reproductions are self-contained scripts with minimal dependencies. 33 | 34 | Provide details 35 | ################ 36 | 37 | - What did you do? 38 | - What did you expect to happen? 39 | - What actually happened? 40 | - What versions of respawn and Python are you using? 41 | -------------------------------------------------------------------------------- /LICENSE.md: -------------------------------------------------------------------------------- 1 | #License - ISC 2 | 3 | Copyright (c) 2015, Dow Jones & Company, Inc. 4 | 5 | Permission to use, copy, modify, and/or distribute this software for any purpose with or without fee is hereby granted, provided that the above copyright notice and this permission notice appear in all copies. 6 | 7 | THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. -------------------------------------------------------------------------------- /Logo/JPG/respawn-logo.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/dowjones/respawn/0cb5092de0cb9f6f3db6edc6c861862ff0552e37/Logo/JPG/respawn-logo.jpg -------------------------------------------------------------------------------- /Logo/PNG/respawn-logo-2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/dowjones/respawn/0cb5092de0cb9f6f3db6edc6c861862ff0552e37/Logo/PNG/respawn-logo-2.png -------------------------------------------------------------------------------- /Logo/PNG/respawn-logo.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/dowjones/respawn/0cb5092de0cb9f6f3db6edc6c861862ff0552e37/Logo/PNG/respawn-logo.png -------------------------------------------------------------------------------- /MANIFEST.in: -------------------------------------------------------------------------------- 1 | include MANIFEST.in README.rst LICENSE.md VERSION -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | 2 | 7 | 8 | [![Join the chat at https://gitter.im/dowjones/respawn](https://badges.gitter.im/dowjones/respawn.svg)](https://gitter.im/dowjones/respawn?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge) 9 | [![Documentation Status](https://readthedocs.org/projects/respawn/badge/?version=latest)](http://respawn.readthedocs.org/en/latest/?badge=latest) 10 | [![Coverage Status](https://coveralls.io/repos/dowjones/respawn/badge.svg?branch=master&service=github)](https://coveralls.io/github/dowjones/respawn?branch=master) 11 | [![Build Status](https://travis-ci.org/dowjones/respawn.svg?branch=master)](https://travis-ci.org/dowjones/respawn) 12 | ![Python Versions](https://img.shields.io/pypi/pyversions/respawn.svg) 13 | [![PyPi](https://img.shields.io/pypi/v/respawn.svg)](https://pypi.python.org/pypi/respawn) 14 | ![License](https://img.shields.io/pypi/l/respawn.svg) 15 | 16 | Version History 17 | ================ 18 | 19 | __v1.0.3__ 20 | 21 | * Initial release 22 | 23 | Introduction 24 | ============ 25 | 26 | Infrastructure templates and utilities for building AWS CloudFormation stacks. Respawn uses [cfn-pyplates](https://cfn-pyplates.readthedocs.org/en/latest/) to generate CloudFormation templates. Respawn digests a custom, easy to read/write YAML representation of a JSON CloudFormation template and resources, with the goal of generating CloudFormation templates based on python templates (pyplates!) that reflect the CloudFormation template hierarchy. 27 | 28 | Respawn is a Python package that provides interfaces to Amazon Web Services - Cloudformation. It allows for easier and more user friendly and concise YAML keywords to create resources/parameters/userdata in CloudFormation stacks. This is used in Dow Jones [professional information business](http://www.dowjones.com) pipeline and with success and has been modified to be as generic and serve all. Currently the library supports Python 2.7. 29 | 30 | Authors 31 | ======== 32 | 33 | Respawn has been written by the following [authors](https://github.com/dowjones/respawn/graphs/contributors). 34 | The logo for respawn has been designed by [Gregor Louden](http://www.gregorlouden.com). 35 | 36 | Documentation 37 | ============= 38 | 39 | Documentation is generated by [sphinx](http://sphinx-doc.org) and hosted on [readthedocs](http://respawn.readthedocs.org/en/latest/). 40 | 41 | Services 42 | ======== 43 | 44 | At the moment, respawn supports: 45 | 46 | - AutoScaling 47 | - AutoScalingGroup 48 | - LifecycleHook 49 | - ScalingPolicy 50 | - ScheduledAction 51 | - CloudWatch 52 | - Alarm 53 | - Elastic Compute Cloud (EC2) 54 | - Instance 55 | - NetworkInterface 56 | - NetworkInterfaceAttachment 57 | - SecurityGroup 58 | - Volume 59 | - Elastic Load Balancing (ELB) 60 | - LoadBalancer 61 | - Relational Database Service (RDS) 62 | - DBInstance 63 | - Simple Notification Service (SNS) 64 | - Topic 65 | 66 | The goal of respawn is to support the full breadth and depth of Amazon Web Services - resources. respawn is developed mainly using Python 2.7.x on Mac OSX and Ubuntu. It is known to work on Linux Distributions, Mac OS X and Windows. 67 | 68 | Installation 69 | ============ 70 | 71 | To install respawn, simply: 72 | 73 | Windows/Unix/Mac OS X 74 | --------------------- 75 | 76 | - Open command prompt and execute pip command : 77 | 78 | 79 | 80 | pip install respawn 81 | 82 | Usage - Template Generation 83 | =========================== 84 | 85 | to use respawn, in your command prompt/terminal : 86 | 87 | $ respawn pathToYAML.yaml 88 | 89 | to create & validate the JSON against AWS using [boto] and pipe output to a file: 90 | 91 | $ respawn --validate pathToYAML.yaml > pathToJSON.json 92 | 93 | to pipe the output to a file : 94 | 95 | $ respawn pathToYAML.yaml > pathToJSON.json 96 | 97 | [respawn]: Logo/JPG/respawn-logo-dj-colors.jpg 98 | [image]: http://djin-jenkins01.dowjones.net:7777/buildStatus/icon?job=respawn 99 | [cfn-pyplates]: https://github.com/seandst/cfn-pyplates/tree/master/cfn_pyplates 100 | [boto]: https://github.com/boto/boto 101 | 102 | 103 |
104 | respawn-cast 106 | respawn-validation-cast 108 |
109 | 110 | Developing and Contributing 111 | ============================ 112 | 113 | We'd love to get contributions from you! Take a look at the [CONTRIBUTING.rst](CONTRIBUTING.rst) to see how to get 114 | your changes merged in. 115 | 116 | License 117 | ========= 118 | 119 | [ISC](LICENSE.md) 120 | -------------------------------------------------------------------------------- /README.rst: -------------------------------------------------------------------------------- 1 | .. image:: https://github.com/dowjones/respawn/blob/master/Logo/PNG/respawn-logo-2.png 2 | :align: center 3 | 4 | **************** 5 | Version History 6 | **************** 7 | 8 | :Version: v1.0.3 9 | 10 | * Initial release 11 | 12 | ************ 13 | Introduction 14 | ************ 15 | Infrastructure templates and utilities for building AWS CloudFormation stacks. Respawn uses `cfn-pyplates `_ to 16 | generate CloudFormation templates. Respawn digests a custom, easy to read/write YAML representation of a JSON CloudFormation template and resources, with the goal of generating CloudFormation templates based on python templates (pyplates!) that reflect the CloudFormation template hierarchy. 17 | 18 | Respawn is a Python package that provides interfaces to Amazon Web Services - Cloudformation. It allows for easier and more user friendly and concise YAML keywords to create resources/parameters/userdata in CloudFormation stacks. This is used in Dow Jones professional information business pipeline and with success and has been modified to be as generic and serve all. Currently the library supports Python 2.7. 19 | 20 | ************ 21 | Authors 22 | ************ 23 | Respawn has been written by the following `authors `_. The 24 | logo for respawn has been designed by `Gregor Louden `_. 25 | 26 | ************* 27 | Documentation 28 | ************* 29 | Documentation is generated by `sphinx `_ and hosted on `readthedocs `_ 30 | 31 | ******** 32 | Services 33 | ******** 34 | 35 | At the moment, respawn supports: 36 | 37 | * AutoScaling 38 | 39 | * AutoScalingGroup 40 | * LifecycleHook 41 | * ScalingPolicy 42 | * ScheduledAction 43 | 44 | * CloudWatch 45 | 46 | * Alarm 47 | 48 | * Elastic Compute Cloud (EC2) 49 | 50 | * Instance 51 | * NetworkInterface 52 | * NetworkInterfaceAttachment 53 | * SecurityGroup 54 | * Volume 55 | 56 | * Elastic Load Balancing (ELB) 57 | 58 | * LoadBalancer 59 | 60 | * Relational Database Service (RDS) 61 | 62 | * DBInstance 63 | 64 | * Simple Notification Service (SNS) 65 | 66 | * Topic 67 | 68 | The goal of respawn is to support the full breadth and depth of Amazon Web Services - resources. respawn is developed mainly using Python 2.7.x on Mac OSX and Ubuntu. It is known to work on Linux Distributions, Mac 69 | OS X and Windows. 70 | 71 | 72 | ************* 73 | Installation 74 | ************* 75 | 76 | To install respawn, simply: 77 | 78 | Windows/Unix/Mac OS X 79 | ###################### 80 | 81 | - Open command prompt and execute pip command : 82 | 83 | :: 84 | 85 | pip install respawn 86 | 87 | 88 | **************************** 89 | Usage - Template Generation 90 | **************************** 91 | 92 | to use respawn, in your command prompt/terminal : 93 | 94 | :: 95 | 96 | $ respawn pathToYAML.yaml 97 | 98 | to create & validate the JSON against AWS using `boto `_ and pipe output to a file: 99 | 100 | :: 101 | 102 | $ respawn --validate pathToYAML.yaml > pathToJSON.json 103 | 104 | to pipe the output to a file : 105 | 106 | :: 107 | 108 | $ respawn pathToYAML.yaml > pathToJSON.json 109 | 110 | 111 | 112 | **************************** 113 | Developing and Contribution 114 | **************************** 115 | We'd love to get contributions from you! Take a look at the `CONTRIBUTING.rst `_ to see how to get your changes merged in. 117 | 118 | ************ 119 | License 120 | ************ 121 | `ISC `_ 122 | -------------------------------------------------------------------------------- /VERSION: -------------------------------------------------------------------------------- 1 | v1.0.3 2 | -------------------------------------------------------------------------------- /documentation/conf.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | # 3 | # respawn documentation build configuration file, created by 4 | # sphinx-quickstart on Fri Sep 18 13:28:01 2015. 5 | # 6 | # This file is execfile()d with the current directory set to its 7 | # containing dir. 8 | # 9 | # Note that not all possible configuration values are present in this 10 | # autogenerated file. 11 | # 12 | # All configuration values have a default; values that are commented out 13 | # serve to show the default. 14 | 15 | import sys 16 | import os 17 | import shlex 18 | 19 | # If extensions (or modules to document with autodoc) are in another directory, 20 | # add these directories to sys.path here. If the directory is relative to the 21 | # documentation root, use os.path.abspath to make it absolute, like shown here. 22 | sys.path.insert(0, os.path.abspath('..')) 23 | 24 | 25 | # -- General configuration ------------------------------------------------ 26 | 27 | # If your documentation needs a minimal Sphinx version, state it here. 28 | #needs_sphinx = '1.0' 29 | 30 | # Add any Sphinx extension module names here, as strings. They can be 31 | # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom 32 | # ones. 33 | extensions = [ 34 | 'sphinx.ext.autodoc', 35 | 'sphinx.ext.todo', 36 | 'sphinx.ext.viewcode', 37 | ] 38 | 39 | # Add any paths that contain templates here, relative to this directory. 40 | templates_path = ['_templates'] 41 | 42 | # The suffix(es) of source filenames. 43 | # You can specify multiple suffix as a list of string: 44 | # source_suffix = ['.rst', '.md'] 45 | source_suffix = '.rst' 46 | 47 | # The encoding of source files. 48 | #source_encoding = 'utf-8-sig' 49 | 50 | # The master toctree document. 51 | master_doc = 'index' 52 | 53 | # General information about the project. 54 | project = u'respawn' 55 | copyright = u'2015, Victor Mena, Kuber Kaul, Vishal Shah' 56 | author = u'Victor Mena, Kuber Kaul, Vishal Shah' 57 | 58 | # The version info for the project you're documenting, acts as replacement for 59 | # |version| and |release|, also used in various other places throughout the 60 | # built documents. 61 | # 62 | # The short X.Y version. 63 | version = '0.0.1' 64 | # The full version, including alpha/beta/rc tags. 65 | release = '0.0.1' 66 | 67 | # The language for content autogenerated by Sphinx. Refer to documentation 68 | # for a list of supported languages. 69 | # 70 | # This is also used if you do content translation via gettext catalogs. 71 | # Usually you set "language" from the command line for these cases. 72 | language = None 73 | 74 | # There are two options for replacing |today|: either, you set today to some 75 | # non-false value, then it is used: 76 | #today = '' 77 | # Else, today_fmt is used as the format for a strftime call. 78 | #today_fmt = '%B %d, %Y' 79 | 80 | # List of patterns, relative to source directory, that match files and 81 | # directories to ignore when looking for source files. 82 | exclude_patterns = ['_build'] 83 | 84 | # The reST default role (used for this markup: `text`) to use for all 85 | # documents. 86 | #default_role = None 87 | 88 | # If true, '()' will be appended to :func: etc. cross-reference text. 89 | #add_function_parentheses = True 90 | 91 | # If true, the current module name will be prepended to all description 92 | # unit titles (such as .. function::). 93 | #add_module_names = True 94 | 95 | # If true, sectionauthor and moduleauthor directives will be shown in the 96 | # output. They are ignored by default. 97 | #show_authors = False 98 | 99 | # The name of the Pygments (syntax highlighting) style to use. 100 | pygments_style = 'sphinx' 101 | 102 | # A list of ignored prefixes for module index sorting. 103 | #modindex_common_prefix = [] 104 | 105 | # If true, keep warnings as "system message" paragraphs in the built documents. 106 | #keep_warnings = False 107 | 108 | # If true, `todo` and `todoList` produce output, else they produce nothing. 109 | todo_include_todos = True 110 | 111 | 112 | # -- Options for HTML output ---------------------------------------------- 113 | 114 | # The theme to use for HTML and HTML Help pages. See the documentation for 115 | # a list of builtin themes. 116 | html_theme = 'sphinx_rtd_theme' 117 | 118 | # Theme options are theme-specific and customize the look and feel of a theme 119 | # further. For a list of options available for each theme, see the 120 | # documentation. 121 | #html_theme_options = {} 122 | 123 | # Add any paths that contain custom themes here, relative to this directory. 124 | #html_theme_path = [] 125 | 126 | # The name for this set of Sphinx documents. If None, it defaults to 127 | # " v documentation". 128 | html_title = 'respawn' 129 | 130 | # A shorter title for the navigation bar. Default is the same as html_title. 131 | #html_short_title = None 132 | 133 | # The name of an image file (relative to this directory) to place at the top 134 | # of the sidebar. 135 | #html_logo = None 136 | 137 | # The name of an image file (within the static path) to use as favicon of the 138 | # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 139 | # pixels large. 140 | #html_favicon = None 141 | 142 | # Add any paths that contain custom static files (such as style sheets) here, 143 | # relative to this directory. They are copied after the builtin static files, 144 | # so a file named "default.css" will overwrite the builtin "default.css". 145 | html_static_path = ['_static'] 146 | 147 | # Add any extra paths that contain custom files (such as robots.txt or 148 | # .htaccess) here, relative to this directory. These files are copied 149 | # directly to the root of the documentation. 150 | #html_extra_path = [] 151 | 152 | # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, 153 | # using the given strftime format. 154 | html_last_updated_fmt = '%b %d, %Y' 155 | 156 | # If true, SmartyPants will be used to convert quotes and dashes to 157 | # typographically correct entities. 158 | html_use_smartypants = True 159 | 160 | # Custom sidebar templates, maps document names to template names. 161 | #html_sidebars = {} 162 | 163 | # Additional templates that should be rendered to pages, maps page names to 164 | # template names. 165 | #html_additional_pages = {} 166 | 167 | # If false, no module index is generated. 168 | #html_domain_indices = True 169 | 170 | # If false, no index is generated. 171 | #html_use_index = True 172 | 173 | # If true, the index is split into individual pages for each letter. 174 | #html_split_index = False 175 | 176 | # If true, links to the reST sources are added to the pages. 177 | #html_show_sourcelink = True 178 | 179 | # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. 180 | #html_show_sphinx = True 181 | 182 | # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. 183 | #html_show_copyright = True 184 | 185 | # If true, an OpenSearch description file will be output, and all pages will 186 | # contain a tag referring to it. The value of this option must be the 187 | # base URL from which the finished HTML is served. 188 | #html_use_opensearch = '' 189 | 190 | # This is the file name suffix for HTML files (e.g. ".xhtml"). 191 | #html_file_suffix = None 192 | 193 | # Language to be used for generating the HTML full-text search index. 194 | # Sphinx supports the following languages: 195 | # 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja' 196 | # 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr' 197 | #html_search_language = 'en' 198 | 199 | # A dictionary with options for the search language support, empty by default. 200 | # Now only 'ja' uses this config value 201 | #html_search_options = {'type': 'default'} 202 | 203 | # The name of a javascript file (relative to the configuration directory) that 204 | # implements a search results scorer. If empty, the default will be used. 205 | #html_search_scorer = 'scorer.js' 206 | 207 | # Output file base name for HTML help builder. 208 | htmlhelp_basename = 'respawndoc' 209 | 210 | # -- Options for LaTeX output --------------------------------------------- 211 | 212 | latex_elements = { 213 | # The paper size ('letterpaper' or 'a4paper'). 214 | #'papersize': 'letterpaper', 215 | 216 | # The font size ('10pt', '11pt' or '12pt'). 217 | #'pointsize': '10pt', 218 | 219 | # Additional stuff for the LaTeX preamble. 220 | #'preamble': '', 221 | 222 | # Latex figure (float) alignment 223 | #'figure_align': 'htbp', 224 | } 225 | 226 | # Grouping the document tree into LaTeX files. List of tuples 227 | # (source start file, target name, title, 228 | # author, documentclass [howto, manual, or own class]). 229 | latex_documents = [ 230 | (master_doc, 'respawn.tex', u'respawn Documentation', 231 | u'Victor Mena, Kuber Kaul, Vishal Shah', 'manual'), 232 | ] 233 | 234 | # The name of an image file (relative to this directory) to place at the top of 235 | # the title page. 236 | #latex_logo = None 237 | 238 | # For "manual" documents, if this is true, then toplevel headings are parts, 239 | # not chapters. 240 | #latex_use_parts = False 241 | 242 | # If true, show page references after internal links. 243 | #latex_show_pagerefs = False 244 | 245 | # If true, show URL addresses after external links. 246 | #latex_show_urls = False 247 | 248 | # Documents to append as an appendix to all manuals. 249 | #latex_appendices = [] 250 | 251 | # If false, no module index is generated. 252 | #latex_domain_indices = True 253 | 254 | 255 | # -- Options for manual page output --------------------------------------- 256 | 257 | # One entry per manual page. List of tuples 258 | # (source start file, name, description, authors, manual section). 259 | man_pages = [ 260 | (master_doc, 'respawn', u'respawn Documentation', 261 | [author], 1) 262 | ] 263 | 264 | # If true, show URL addresses after external links. 265 | #man_show_urls = False 266 | 267 | 268 | # -- Options for Texinfo output ------------------------------------------- 269 | 270 | # Grouping the document tree into Texinfo files. List of tuples 271 | # (source start file, target name, title, author, 272 | # dir menu entry, description, category) 273 | texinfo_documents = [ 274 | (master_doc, 'respawn', u'respawn Documentation', 275 | author, 'respawn', 'One line description of project.', 276 | 'Miscellaneous'), 277 | ] 278 | 279 | # Documents to append as an appendix to all manuals. 280 | #texinfo_appendices = [] 281 | 282 | # If false, no module index is generated. 283 | #texinfo_domain_indices = True 284 | 285 | # How to display URL addresses: 'footnote', 'no', or 'inline'. 286 | #texinfo_show_urls = 'footnote' 287 | 288 | # If true, do not generate a @detailmenu in the "Top" node's menu. 289 | #texinfo_no_detailmenu = False 290 | -------------------------------------------------------------------------------- /documentation/getting_started.rst: -------------------------------------------------------------------------------- 1 | ==================== 2 | **Getting Started** 3 | ==================== 4 | 5 | This page describes how to download, install and use the basic functionality of respawn. 6 | 7 | **Installation** 8 | ################ 9 | 10 | 11 | To install respawn, simply: 12 | 13 | **Windows/Unix/Mac OS X** 14 | ========================== 15 | 16 | - Open command prompt and execute pip command : 17 | 18 | :: 19 | 20 | pip install respawn 21 | 22 | 23 | **Usage - Template Generation** 24 | ################################ 25 | 26 | to use respawn, in your command prompt/terminal : 27 | 28 | :: 29 | 30 | $ respawn pathToYAML.yaml 31 | 32 | to create & validate the JSON against AWS using `boto `_ and pipe output to a file: 33 | 34 | :: 35 | 36 | $ respawn --validate pathToYAML.yaml > pathToJSON.json 37 | 38 | to pipe the output to a file : 39 | 40 | :: 41 | 42 | $ respawn pathToYAML.yaml > pathToJSON.json 43 | 44 | where: 45 | - pathToYAML.yaml = the YAML file that needs to be processed into JSON. 46 | - pathToJSON.json = the JSON file containing AWS cloudformation. 47 | 48 | For exhaustive documentation and help with specific keywords to be used with resources , got to usage section. 49 | 50 | 51 | **Dependencies** 52 | ################# 53 | 54 | - boto==2.32.1 55 | - nose==1.3.3 56 | - cfn-pyplates==0.4.3 57 | - Jinja2==2.7.3 58 | - enum34 59 | - pytest==2.7.1 60 | 61 | **Next Steps** 62 | ############### 63 | 64 | That concludes the getting started guide for respawn. Hopefully you're excited about the possibilities of respawn and 65 | ready to begin using respawn with your applications. 66 | 67 | We've covered the basics of respawn in this guide. We recommend moving on to the usage next, which serves 68 | as a complete reference to all the features of respawn. -------------------------------------------------------------------------------- /documentation/index.rst: -------------------------------------------------------------------------------- 1 | .. respawn documentation master file, created by 2 | sphinx-quickstart on Fri Sep 18 13:28:01 2015. 3 | You can adapt this file completely to your liking, but it should at least 4 | contain the root `toctree` directive. 5 | 6 | ======== 7 | Respawn 8 | ======== 9 | 10 | Contents: 11 | 12 | .. toctree:: 13 | :maxdepth: 2 14 | 15 | getting_started 16 | usage 17 | source_code 18 | sample_yaml 19 | 20 | 21 | Overview 22 | ######## 23 | 24 | Infrastructure templates and utilities for building AWS CloudFormation stacks. Respawn uses `cfn-pyplates `_ to 25 | generate CloudFormation templates. A pyplate is a class-based python representation of a JSON CloudFormation template and resources, with the goal of generating CloudFormation templates based on input python templates (pyplates!) that reflect the CloudFormation template hierarchy. 26 | 27 | Respawn is a Python package that provides interfaces to Amazon Web Services - Cloudformation. It allows for easier and more user friendly and concise YAML keywords to create resources/parameters/userdata in CloudFormation stacks. This is used in Dow Jones professional information business pipeline and with success and has been modified to be as generic and serve all. Currently the library supports Python 2.7 because of its dependency on cfn-pyplates. 28 | 29 | Summary 30 | ####### 31 | 32 | Respawn is template and utility for spawning AWS CloudFormation stacks from simpler YAML specifications. Respawn will consume a YAML file with documented keywords and spit out a CloudFormation stack json specification. 33 | 34 | 35 | Key Features 36 | ################# 37 | 38 | The key features of Respawn are: 39 | 40 | - Automatic CloudFormation creation: Respawn detects your application type and builds a CloudFormation JSON for your application tailored to your use based on your YAML. It supports multiple resources/parameters/user-data that AWS supports. Please go through usage to see the list of resources respawn supports. 41 | 42 | - Validates CloudFormation: Respawn validates the JSON created against AWS resources to confirm the correctness of your CloudFormation script. It utilizes boto3 and AWS credentials stored in your environment. 43 | 44 | 45 | 46 | 47 | 48 | 49 | -------------------------------------------------------------------------------- /documentation/sample_yaml.rst: -------------------------------------------------------------------------------- 1 | ============================== 2 | **Sample YAML** 3 | ============================== 4 | 5 | 6 | **Sample YAML syntax for respawn.** : Please note that this contains most of the resources that respawn supports at 7 | this moment. We will keep adding on as we keep building resource support. 8 | 9 | .. code-block:: yaml 10 | 11 | 12 | # Globals 13 | stack_name: sampleStack 14 | environment: sampleEnvironment 15 | team: &team sampleTeam 16 | default_windows_ami: &win_ami sampleAMI 17 | multi_az: True 18 | eap: True 19 | ebs_optimized: &ebs_optimized false 20 | periodic_chef: false 21 | service_name: &service sampleServiceName 22 | 23 | 24 | parameters: 25 | testWeb: 26 | default: String 27 | type: String 28 | description: "Creating test param" 29 | allowed_values: 30 | - "value1" 31 | - "value2" 32 | allowed_pattern: "[A-Za-z0-9]+" 33 | no_echo: true 34 | max_length: String 35 | min_length: String 36 | max_value: String 37 | min_value: String 38 | constraint_description: "Malformed input-Parameter MyParameter must only contain upper and lower case letters" 39 | 40 | 41 | # Default Security Groups 42 | SgDevsample: &dev_djin_fcm String 43 | ELBSubnet: &elb_subnet String 44 | 45 | security_groups: 46 | Web: &web_sgs 47 | 48 | load_balancers: 49 | SampleLoadBalancer: 50 | scheme: internet-facing 51 | connection_settings: 52 | idle_timeout: 40 53 | cross_zone: True 54 | security_group: 55 | - sg-xxxxxxx1 56 | - sg-xxxxxxx2 57 | instances: 58 | - ref(SampleInstance) 59 | policies: 60 | - policy_name: SamplePolicyName1 61 | attribute: 62 | - name: SampleName1 63 | value: SampleValue1 64 | - name: SampleName2 65 | value: SampleValue2 66 | instance_ports: 67 | - 2121 68 | - 2424 69 | load_balancer_ports: 70 | - 32323 71 | - 2424 72 | policy_type: SSLNegotiationPolicyType 73 | - policy_name: SamplePolicyName2 74 | attribute: 75 | - name: SampleName1 76 | value: SampleValue1 77 | instance_ports: 78 | - 1212 79 | - 4242 80 | load_balancer_ports: 81 | - 23232 82 | - 4141 83 | app_cookie_stickiness_policy: 84 | - policy_name: SamplePolicy1 85 | cookie_name: SampleCookie1 86 | - policy_name: SamplePolicy2 87 | cookie_name: SampleCookie2 88 | connection_draining_policy: 89 | enabled: True 90 | timeout: 10 91 | availability_zones: 92 | - "Fn::GetAZs": "" 93 | health_check: 94 | healthy_threshold: 2 95 | interval: 10 96 | target: /healthcheck 97 | timeout: 10 98 | unhealthy_threshold: 2 99 | lb_cookie_stickiness_policy: 100 | - policy_name: SamplePolicyName1 101 | cookie_expiration_period: 300 102 | - policy_name: SamplePolicyName2 103 | cookie_expiration_period: 600 104 | load_balancer_name: SampleLoadBalancer1 # Unique name used by AWS 105 | access_logging_policy: 106 | emit_interval: 20 107 | enabled: True 108 | s3_bucket_name: SampleS3BucketName 109 | s3_bucket_prefix: SampleS3BucketPrefix 110 | listeners: 111 | https: 112 | load_balancer_port: 83 113 | instance_port: 84 114 | instance_protocol: tcp 115 | tcp: 116 | load_balancer_port: 8443 117 | instance_port: 8443 118 | instance_protocol: http 119 | ssl_certificate_id: SampleSSLARN 120 | tags: 121 | - key: Key1 122 | value: Value1 123 | - key: Key2 124 | value: Value2 125 | 126 | instances: 127 | SampleInstance: 128 | hostname: SampleHostname 129 | instance_type: m3.xlarge 130 | ami_id: ami-xxxxxxxx 131 | ebs_optimized: true 132 | iam_role: SampleIAMRole 133 | security_groups: 134 | - sg-00000001 135 | - sg-00000002 136 | ramdisk_id: SampleRamDiskID 137 | source_dest_check: true 138 | network_interfaces: 139 | Interface1: 140 | public_ip: true 141 | delete_on_termination: true 142 | device_index: 0 143 | subnet_id: subnet-xxxxxxxx 144 | private_ips: 145 | - private_ip: 1.1.1.1 146 | primary: false 147 | - private_ip: 2.2.2.2 148 | primary: true 149 | block_devices: 150 | /dev/sda: 151 | ebs: 152 | delete_on_termination: false 153 | encrypted: false 154 | iops: 1000 155 | size: 100 156 | type: standard 157 | /dev/sdb: 158 | ebs: 159 | snapshot_id: snap-xxxxxxxx 160 | /dev/sdc: 161 | virtual_name: ephemeral0 162 | /dev/sdd: 163 | no_device: true 164 | volumes: 165 | - device: /dev/sdd 166 | volume_id: ref(SampleVolume1) 167 | - device: /dev/sde 168 | volume_id: vol-xxxxxxx 169 | tags: 170 | - key: Key1 171 | value: Value1 172 | user_data: 173 | file: path/to/script.sh # Jinja2 Template 174 | params: 175 | param1: hello 176 | param2: world 177 | 178 | 179 | volumes: 180 | SampleVolume1: 181 | availability_zone: SampleAZ 182 | instance: ref(SampleInstance) 183 | size: 100 184 | 185 | SampleVolume2: 186 | availability_zone: SampleAZ 187 | snapshot_id: snap-xxxxxxxx 188 | size: 1000 189 | iops: 4000 190 | kms_key_id: SampleKMSKeyID 191 | volume_type: standard 192 | encrypted: true 193 | tags: 194 | - key: Key1 195 | value: Value1 196 | deletion_policy: Retain 197 | 198 | auto_scale_groups: 199 | SampleAutoScaleGroup: 200 | hostname: sampleTestName 201 | availability_zones: 202 | - AZName1 203 | - AZName2 204 | min_size: 1 205 | max_size: 10 206 | desired_capacity: 10 207 | instance_id: ami-xxxxxxxx 208 | cooldown: 10 209 | launch_configuration: LaunchConfigName 210 | load_balancer_names: 211 | - LBName 212 | - ref(Sample_LB) 213 | max_size: 2 214 | min_size: 1 215 | metrics_collection: 216 | - granularity: 1Minute 217 | - granularity: 1Minute 218 | metrics: 219 | - Metric1 220 | - Metric2 221 | notification_configs: 222 | - notification_type: 223 | - Type1 224 | - Type2 225 | topic_arn: "arn:aws:[service]:[region]:[account]:resourceType/resourcePath" 226 | - notification_type: 227 | - Type3 228 | topic_arn: "arn:aws:[service]:[region]:[account]:resourceType/resourcePath" 229 | placement_group: PlacementGroupName 230 | tags: 231 | - key: Key1 232 | value: Value1 233 | propagate_at_launch: true 234 | - key: Key2 235 | value: Value2 236 | propagate_at_launch: true 237 | termination_policies: 238 | - Policy1 239 | - Policy2 240 | vpc_zone_identifier: 241 | - ZoneIdentifier1 242 | - ZoneIdentifier2 243 | 244 | launch_configurations: 245 | SampleLaunchConfiguration: 246 | instance_type: t2.small 247 | ebs_optimized: false 248 | ami_id: ami-xxxxxxxx 249 | iam_role: SampleIAMRole 250 | key_pair: SampleKey 251 | ramdisk_id: SampleRamDiskID 252 | public_ip: true 253 | security_groups: 254 | - sg-00000001 255 | - sg-00000002 256 | block_devices: 257 | /dev/sda: 258 | ebs: 259 | delete_on_termination: false 260 | encrypted: false 261 | iops: 1000 262 | size: 100 263 | type: standard 264 | /dev/sdb: 265 | ebs: 266 | snapshot_id: id-testSnapshot 267 | /dev/sdc: 268 | virtual_name: ephemeral0 269 | /dev/sdd: 270 | no_device: true 271 | user_data: 272 | file: path/to/script.sh # Jinja2 Template 273 | params: 274 | param1: hello 275 | param2: world 276 | 277 | lifecycle_hooks: 278 | SampleLifecycleHook: 279 | asg_name: ref(SampleAutoScaleGroup) 280 | lifecycle_transition: autoscaling:EC2_INSTANCE_TERMINATING 281 | notification_target_arn: ref(SampleSNSTopic) # SNS Topic 282 | role_arn: SampleIAMRole 283 | heartbeat_timeout: 1800 284 | default_result: CONTINUE 285 | notification_metadata: SampleMetadata 286 | 287 | 288 | scheduled_actions: 289 | SampleActionDown: 290 | asg_name: SampleAutoScaleGroup 291 | desired_capacity: 0 292 | max_size: 0 293 | min_size: 0 294 | recurrence: 0 7 * * * 295 | 296 | SampleActionUp: 297 | asg_name: SampleAutoScaleGroup 298 | desired_capacity: 5 299 | max_size: 5 300 | min_size: 5 301 | recurrence: 0 9 * * * 302 | 303 | rds: 304 | SampleRDS: 305 | allocated_storage: 100 306 | instance_class: db.m1.small 307 | engine: MySQL 308 | allow_major_version_upgrade: True 309 | allow_minor_version_upgrade: True 310 | availability_zone: SampleAZ 311 | backup_retention_period: 10 312 | character_set_name: UTF8 313 | instance_identifier: SampleRDSName # Unique name used by AWS 314 | db_name: SampleDB 315 | db_parameter_group_name: SampleDBParameterGroup 316 | db_security_groups: 317 | - SampleSecurityGroup 318 | snapshot_identifier: SampleSnapshot 319 | subnet_group_name: SampleSubnetGroup 320 | engine: MySQL 321 | engine_version: 1.0.0 322 | iops: 1000 323 | kms_key_id: SampleKMSKeyID 324 | license_model: SampleLicenseModel 325 | master_username: SampleUsername 326 | multi_az: False 327 | option_group_name: SampleOptionGroup 328 | port: 3306 329 | preferred_backup_window: Mon:03:00-Mon:11:00 330 | preferred_maintenance_window: Tue:04:00-Tue:04:30 331 | publicly_accessible: False 332 | source_db_instance_identifier: SampleSourceDBIdentifier 333 | storage_encrypted: True 334 | vpc_security_groups: 335 | - SampleVPCSecurityGroup 336 | 337 | network_interfaces: 338 | SampleNetworkInterface: 339 | description: "Sample Description" 340 | group_set: 341 | - SampleGroup1 342 | - SampleGroup2 343 | private_ip_address: 10.20.03.20 344 | private_ip_addresses: 345 | - 10.23.23.23 346 | - 12.13.3.4 347 | secondary_private_ip_address_count: 4 348 | source_dest_check: true 349 | subnet_id: 131.3.13.1 350 | tags: 351 | - key: Key1 352 | value: Value1 353 | - key: Key2 354 | value: Value2 355 | 356 | network_interface_attachments: 357 | TestNetworkIntefaceAttachment: 358 | delete_on_termination: False 359 | device_index: 1 360 | instance_id: ref(SampleInstanceName) 361 | network_interface_id: ref(SampleNetworkInterfaceName) 362 | 363 | sns_topics: 364 | SampleSNSTopic: 365 | display_name : SampleSNSTopic 366 | topic_name : SampleTopic 367 | subscription: 368 | - protocol : https 369 | endpoint : Endpoint1 370 | - protocol : http 371 | endpoint : Endpoint2 372 | 373 | cloud_watch: 374 | SampleCloudWatch: 375 | actions_enabled: true 376 | alarm_actions: 377 | - AlarmAction1 378 | - AlarmAction2 379 | alarm_name: SampleAlarm 380 | alarm_description: "Sample alarm description" 381 | comparison_operator: GreaterThanOrEqualToThreshold 382 | dimensions: 383 | - name: Dimension1 384 | value: Value1 385 | - name: Dimension2 386 | value: Value2 387 | evaluation_periods: 15 388 | insufficient_data_actions: 389 | - InsufficientDataAction1 390 | - InsufficientDataAction2 391 | metric_name : SampleName 392 | namespace : SampleNamespace 393 | ok_actions : 394 | - OkAction1 395 | - OkAction2 396 | period : 12 397 | statistic : Average 398 | threshold : 10 399 | unit : Milliseconds 400 | 401 | security_group: 402 | SampleSecurityGroup: 403 | group_description: SampleDescription 404 | security_group_egress: 405 | - from_port: 80 406 | ip_protocol: http 407 | to_port: 80 408 | security_group_ingress: 409 | - from_port: 443 410 | ip_protocol: https 411 | to_port: 443 412 | tags: 413 | - key: Key1 414 | value: Value1 415 | vpc_id: SampleVPC 416 | 417 | 418 | 419 | -------------------------------------------------------------------------------- /documentation/source_code.rst: -------------------------------------------------------------------------------- 1 | ============================== 2 | **Source Code Documentation** 3 | ============================== 4 | 5 | 6 | **Resource Index** 7 | ########################## 8 | 9 | * `EC2`_ 10 | * `Load Balancer`_ 11 | * `Auto Scaling`_ 12 | * `CloudFormation`_ 13 | * `CloudWatch`_ 14 | * `RDS`_ 15 | * `Sns Topic`_ 16 | 17 | 18 | 19 | **EC2** 20 | ======================= 21 | 22 | .. automodule:: respawn.ec2 23 | :members: 24 | :show-inheritance: 25 | 26 | **Load Balancer** 27 | ======================= 28 | 29 | .. automodule:: respawn.elb 30 | :members: 31 | :show-inheritance: 32 | 33 | **Auto Scaling** 34 | ======================= 35 | 36 | .. automodule:: respawn.autoscaling 37 | :members: 38 | :show-inheritance: 39 | 40 | **CloudFormation** 41 | ======================= 42 | 43 | .. automodule:: respawn.cloudformation 44 | :members: 45 | :undoc-members: 46 | :show-inheritance: 47 | 48 | **CloudWatch** 49 | ======================= 50 | 51 | .. automodule:: respawn.cloudwatch 52 | :members: 53 | :show-inheritance: 54 | 55 | 56 | **RDS** 57 | ======================= 58 | 59 | .. automodule:: respawn.rds 60 | :members: 61 | :show-inheritance: 62 | 63 | **Sns Topic** 64 | ======================= 65 | 66 | .. automodule:: respawn.sns 67 | :members: 68 | :show-inheritance: 69 | 70 | 71 | **Parameter Index** 72 | ########################## 73 | 74 | .. automodule:: respawn.parameters 75 | :members: 76 | :undoc-members: 77 | :show-inheritance: 78 | 79 | 80 | **Setup Class Index** 81 | ########################## 82 | 83 | .. automodule:: respawn.cli 84 | :members: 85 | :undoc-members: 86 | :show-inheritance: 87 | 88 | 89 | 90 | 91 | 92 | 93 | -------------------------------------------------------------------------------- /gen.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Enter virtual env 4 | if [ -z ${VIRTUAL_ENV} ]; then 5 | tmpdir=python-virtualenv 6 | virtualenv ${tmpdir} 7 | source ${tmpdir}/bin/activate 8 | fi 9 | 10 | validate=0 11 | OPTIND=1 12 | while getopts ":v" arg; do 13 | case $arg in 14 | v) 15 | validate=1 16 | ;; 17 | \?) 18 | echo "Invalid option: -$OPTARG" >&2 19 | exit 1 20 | ;; 21 | :) 22 | echo "Option -$OPTARG requires an argument." >&2 23 | exit 2 24 | ;; 25 | esac 26 | done 27 | shift $((OPTIND-1)) 28 | 29 | StackRole=${1:-dev} 30 | YamlSpec=${2:-*.yaml} 31 | 32 | # Install deps 33 | pip install -r py_reqs.txt || exit -3 34 | 35 | # Build all of the required templates 36 | for opt in ${StackRole}/${YamlSpec}; do 37 | 38 | opt_name=$(basename ${opt}) 39 | opt_name=${opt_name%.yaml} 40 | cftName=${StackRole}/${opt_name}.template.json 41 | 42 | # Create the CFT 43 | echo "Generating ${cftName}..." 44 | cfn_py_generate gen.py -o ${opt} > ${cftName} 2> /dev/tty 45 | 46 | # Validate the CFT 47 | if [[ ${validate} -gt 0 ]]; then 48 | echo "Validating ${cftName}..." 49 | aws --region us-east-1 cloudformation validate-template --template-body file://${cftName} 50 | if [ $? != 0 ]; then 51 | echo "Validation FAILED" 52 | exit -4 53 | fi 54 | fi 55 | echo "Success" 56 | echo 57 | 58 | done 59 | 60 | -------------------------------------------------------------------------------- /py_reqs.txt: -------------------------------------------------------------------------------- 1 | cfn-pyplates==0.4.3 2 | Jinja2==2.7.3 3 | #enum34 4 | #pytest==2.7.1 5 | #pytest-cov==2.2.0 6 | #boto==2.32.1 7 | #nose==1.3.3 8 | -------------------------------------------------------------------------------- /respawn/__init__.py: -------------------------------------------------------------------------------- 1 | from respawn import autoscaling, cloudformation, cloudwatch, ec2, elb, errors, parameters, rds, sns, route53 2 | -------------------------------------------------------------------------------- /respawn/autoscaling.py: -------------------------------------------------------------------------------- 1 | from cfn_pyplates import core, functions 2 | from ec2 import BlockDevice, BlockDeviceMapping 3 | from errors import RespawnResourceError 4 | 5 | 6 | class MetricsCollection(core.JSONableDict): 7 | """ 8 | Creates a Metrics Collection 9 | 10 | :param granularity: String 11 | :param kwargs: metrics - [ String, ... ] 12 | """ 13 | # ---------------------------------------------------------------------------------------------------------- 14 | # Metrics Collection 15 | # ---------------------------------------------------------------------------------------------------------- 16 | def __init__(self, 17 | granularity, 18 | **kwargs 19 | ): 20 | super(MetricsCollection, self).__init__() 21 | self['Granularity'] = granularity 22 | if 'metrics' in kwargs: 23 | self['Metrics'] = kwargs.get('metrics') 24 | 25 | 26 | class NotificationConfigurations(core.JSONableDict): 27 | """ 28 | Creates a Notification Configuration 29 | 30 | :param notification_type: [ String, ... ] 31 | :param topic_arn: String 32 | """ 33 | # ---------------------------------------------------------------------------------------------------------- 34 | # NotificationConfiguration 35 | # ---------------------------------------------------------------------------------------------------------- 36 | def __init__(self, 37 | notification_type, 38 | topic_arn 39 | ): 40 | super(NotificationConfigurations, self).__init__() 41 | self['NotificationTypes'] = notification_type 42 | self['TopicARN'] = topic_arn 43 | 44 | 45 | class Tag(core.JSONableDict): 46 | """ 47 | Create ASG Tag 48 | 49 | :param key: String 50 | :param value: String 51 | :param propagate_at_launch: Boolean 52 | """ 53 | # ---------------------------------------------------------------------------------------------------------- 54 | # Tag 55 | # ---------------------------------------------------------------------------------------------------------- 56 | def __init__(self, 57 | key, 58 | value, 59 | propagate_at_launch 60 | ): 61 | super(Tag, self).__init__() 62 | self['Key'] = key 63 | self['Value'] = value 64 | self['PropagateAtLaunch'] = propagate_at_launch 65 | 66 | 67 | class LaunchConfiguration(core.Resource): 68 | """ 69 | Creates a Launch Configuration 70 | 71 | :param name: String 72 | :param ami_id: String 73 | :param instance_type: String 74 | 75 | kwargs 76 | - public_ip: Boolean 77 | - block_devices: [ BlockDeviceMapping, ... ] 78 | - classic_link_vpc_id: String 79 | - classic_link_vpc_security_groups: [ String, ... ], 80 | - ebs_optimized: Boolean 81 | - iam_role: String 82 | - instance_id: String 83 | - monitoring: Boolean 84 | - kernel_id: String 85 | - key_pair: String 86 | - placement_tenancy: String 87 | - ramdisk_id: String 88 | - security_groups: [ SecurityGroup, ... ] 89 | - spot_price: String 90 | - user_data_script: String 91 | - attributes: { key: value, ... } 92 | """ 93 | # ---------------------------------------------------------------------------------------------------------- 94 | # Launch Configuration 95 | # ---------------------------------------------------------------------------------------------------------- 96 | def __init__( 97 | self, 98 | name, 99 | ami_id, 100 | instance_type, 101 | **kwargs 102 | ): 103 | if "classic_link_vpc_id" in kwargs and "classic_link_vpc_security_groups" not in kwargs: 104 | raise RespawnResourceError("Classic Link VPC Sercurity Groups (classic_link_vpc_security_groups) " 105 | "required with Class Link VPC ID (classic_link_vpc_id).", 106 | "classic Link VPC Id/Classic Link Vpc Security Groups") 107 | 108 | attributes = kwargs.get("attributes") 109 | 110 | properties = { 111 | 'ImageId': ami_id, 112 | 'InstanceType': instance_type 113 | } 114 | 115 | if 'block_devices' in kwargs: 116 | devices = kwargs.get('block_devices') 117 | block_devices = [] 118 | for device, args in devices.items(): 119 | if 'ebs' in args: 120 | args['ebs'] = BlockDevice(**args['ebs']) 121 | block_devices.append(BlockDeviceMapping(device, **args)) 122 | properties['BlockDeviceMappings'] = block_devices 123 | 124 | if "public_ip" in kwargs: 125 | properties['AssociatePublicIpAddress'] = kwargs.get("public_ip") # default=False 126 | if "classic_link_vpc_id" in kwargs: 127 | properties['ClassicLinkVPCId'] = kwargs.get("classic_link_vpc_id") 128 | if "classic_link_vpc_security_groups" in kwargs: 129 | properties['ClassicLinkVPCSecurityGroups'] = kwargs.get("classic_link_vpc_security_groups") 130 | if "ebs_optimized" in kwargs: 131 | properties['EbsOptimized'] = kwargs.get("ebs_optimized") # default=False 132 | if "iam_role" in kwargs: 133 | properties['IamInstanceProfile'] = kwargs.get("iam_role") 134 | if "instance_id" in kwargs: 135 | properties['InstanceId'] = kwargs.get("instance_id") 136 | if "monitoring" in kwargs: 137 | properties['InstanceMonitoring'] = kwargs.get("monitoring") # default=True 138 | if "kernel_id" in kwargs: 139 | properties['KernelId'] = kwargs.get("kernel_id") 140 | if "key_pair" in kwargs: 141 | properties['KeyName'] = kwargs.get("key_pair") 142 | if "placement_tenancy" in kwargs: 143 | properties['PlacementTenancy'] = kwargs.get("placement_tenancy") 144 | if "private_ip" in kwargs: 145 | properties['PlacementGroupName'] = kwargs.get("private_ip") 146 | if "ramdisk_id" in kwargs: 147 | properties['RamdiskId'] = kwargs.get("ramdisk_id") 148 | if "security_groups" in kwargs: 149 | properties['SecurityGroups'] = kwargs.get("security_groups") 150 | if "spot_price" in kwargs: 151 | properties['SpotPrice'] = kwargs.get("spot_price") 152 | if "user_data_script" in kwargs: 153 | properties['UserData'] = functions.base64(kwargs.get("user_data_script")) 154 | 155 | super(LaunchConfiguration, self).__init__(name, 'AWS::AutoScaling::LaunchConfiguration', properties, attributes) 156 | 157 | 158 | class AutoScalingGroup(core.Resource): 159 | """ 160 | Creates an AutoScaling Group 161 | 162 | :param name: String 163 | :param max_size: String 164 | :param min_size: String 165 | 166 | kwargs 167 | - availability_zones: [ String, ... ] 168 | - cooldown: String 169 | - desired_capacity: String 170 | - health_check_grace_period: Integer 171 | - health_check_type: String 172 | - instance_id: String 173 | - launch_configuration: String 174 | - load_balancer_names: [ String, ... ] 175 | - metrics_collection: [ MetricsCollection, ... ] 176 | - notification_configs: [ NotificationConfigurations, ... ] 177 | - placement_group: String 178 | - tags: [ Tag, ...] 179 | - termination_policies: [ String, ..., ] 180 | - vpc_zone_identifier: [ String, ... ] 181 | - attributes: { key: value, ... } 182 | """ 183 | # ---------------------------------------------------------------------------------------------------------- 184 | # Auto Scaling Group 185 | # ---------------------------------------------------------------------------------------------------------- 186 | def __init__( 187 | self, 188 | name, 189 | max_size, 190 | min_size, 191 | **kwargs 192 | ): 193 | if "instance_id" not in kwargs and "launch_configuration" not in kwargs: 194 | raise RespawnResourceError( 195 | "Instance ID (instance_id) or Launch Configuration Name (launch_configuration) required.", 196 | "Instance Id/ Launch Configuration") 197 | 198 | if "availability_zones" not in kwargs and "vpc_zone_identifier" not in kwargs: 199 | raise RespawnResourceError( 200 | "Availability Zones (availability_zones) or VPC Zone Identifier (vpc_zone_identifier) " 201 | "required.", "AvailabilityZones/VPCZoneIdentifier") 202 | 203 | attributes = kwargs.get("attributes", dict()) 204 | 205 | properties = { 206 | 'MaxSize': max_size, 207 | 'MinSize': min_size 208 | } 209 | 210 | if "metrics_collection" in kwargs: 211 | metrics_collection = kwargs.get('metrics_collection') 212 | metrics_collections = [] 213 | for collection in metrics_collection: 214 | metrics_collections.append(MetricsCollection(**collection)) 215 | properties['MetricsCollection'] = metrics_collections 216 | 217 | if "notification_configs" in kwargs: 218 | notification_configs = kwargs.get("notification_configs") 219 | configs = [] 220 | for config in notification_configs: 221 | configs.append(NotificationConfigurations(**config)) 222 | properties['NotificationConfigurations'] = configs 223 | 224 | if 'tags' in kwargs: 225 | t = kwargs.get('tags') 226 | tags = [] 227 | for tag in t: 228 | tags.append(Tag(**tag)) 229 | properties['Tags'] = tags 230 | 231 | if "launch_configuration" in kwargs: 232 | properties['LaunchConfigurationName'] = kwargs.get("launch_configuration") 233 | if "load_balancer_names" in kwargs: 234 | properties['LoadBalancerNames'] = kwargs.get("load_balancer_names") 235 | if "availability_zones" in kwargs: 236 | properties['AvailabilityZones'] = kwargs.get("availability_zones") 237 | if "cooldown" in kwargs: 238 | properties['Cooldown'] = kwargs.get("cooldown") 239 | if "desired_capacity" in kwargs: 240 | properties['DesiredCapacity'] = kwargs.get("desired_capacity") 241 | if "health_check_grace_period" in kwargs: 242 | properties['HealthCheckGracePeriod'] = kwargs.get("health_check_grace_period") 243 | if "health_check_type" in kwargs: 244 | properties['HealthCheckType'] = kwargs.get("health_check_type") 245 | if "instance_id" in kwargs: 246 | properties['InstanceId'] = kwargs.get("instance_id") 247 | if "placement_group" in kwargs: 248 | properties['PlacementGroup'] = kwargs.get("placement_group") 249 | if "termination_policies" in kwargs: 250 | properties['TerminationPolicies'] = kwargs.get("termination_policies") 251 | if "vpc_zone_identifier" in kwargs: 252 | properties['VPCZoneIdentifier'] = kwargs.get("vpc_zone_identifier") 253 | 254 | super(AutoScalingGroup, self).__init__(name, 'AWS::AutoScaling::AutoScalingGroup', properties, attributes) 255 | 256 | 257 | class ScalingPolicy(core.Resource): 258 | """ 259 | Creates a Scaling Policy 260 | 261 | :param adjustment_type: String 262 | :param asg_name: String 263 | :param scaling_adjustment: String 264 | 265 | kwargs 266 | - cooldown: String 267 | - in_adjustment_step: String 268 | """ 269 | # ---------------------------------------------------------------------------------------------------------- 270 | # Scaling Policy 271 | # ---------------------------------------------------------------------------------------------------------- 272 | def __init__( 273 | self, 274 | name, 275 | adjustment_type, 276 | asg_name, 277 | scaling_adjustment, 278 | **kwargs 279 | ): 280 | attributes = kwargs.get("attributes", dict()) 281 | 282 | properties = { 283 | 'AdjustmentType': adjustment_type, 284 | 'AutoScalingGroupName': asg_name, 285 | 'ScalingAdjustment': scaling_adjustment 286 | } 287 | 288 | if "cooldown" in kwargs: 289 | properties['Cooldown'] = kwargs.get("cooldown") 290 | if "min_adjustment_step" in kwargs: 291 | properties['MinAdjustmentStep'] = kwargs.get("min_adjustment_step") 292 | 293 | super(ScalingPolicy, self).__init__(name, 'AWS::AutoScaling::ScalingPolicy', properties, attributes) 294 | 295 | 296 | class ScheduledAction(core.Resource): 297 | """ 298 | Creates a Scheduled Action 299 | 300 | :param asg_name: String 301 | 302 | kwargs 303 | - desired_capacity: Integer 304 | - end_time: Time stamp (e.g. 2010-06-01T00:00:00Z) 305 | - max_size: Integer 306 | - min_size: Integer 307 | - recurrence: String (e.g. cron) 308 | - start_time: Time stamp (e.g. 2010-06-01T00:00:00Z) 309 | """ 310 | # ---------------------------------------------------------------------------------------------------------- 311 | # Scheduled Action 312 | # ---------------------------------------------------------------------------------------------------------- 313 | def __init__( 314 | self, 315 | name, 316 | asg_name, 317 | **kwargs 318 | ): 319 | attributes = kwargs.get("attributes", dict()) 320 | 321 | properties = { 322 | 'AutoScalingGroupName': asg_name 323 | } 324 | 325 | if "desired_capacity" in kwargs: 326 | properties['DesiredCapacity'] = kwargs.get("desired_capacity") 327 | if "end_time" in kwargs: 328 | properties['EndTime'] = kwargs.get("end_time") 329 | if "max_size" in kwargs: 330 | properties['MaxSize'] = kwargs.get("max_size") 331 | if "min_size" in kwargs: 332 | properties['MinSize'] = kwargs.get("min_size") 333 | if "recurrence" in kwargs: 334 | properties['Recurrence'] = kwargs.get("recurrence") 335 | if "start_time" in kwargs: 336 | properties['StartTime'] = kwargs.get("start_time") 337 | 338 | super(ScheduledAction, self).__init__(name, 'AWS::AutoScaling::ScheduledAction', properties, attributes) 339 | 340 | 341 | class LifecycleHook(core.Resource): 342 | """ 343 | Creates a Lifecycle Hook 344 | 345 | :param asg_name: String 346 | :param lifecycle_transition: String 347 | :param notification_target_arn: String 348 | :param role_arn: String 349 | 350 | kwargs 351 | - default_result: String 352 | - heartbeat_timeout: Integer 353 | - notification_metadata: String 354 | """ 355 | # ---------------------------------------------------------------------------------------------------------- 356 | # LifeCycle Hook 357 | # ---------------------------------------------------------------------------------------------------------- 358 | def __init__( 359 | self, 360 | name, 361 | asg_name, 362 | lifecycle_transition, 363 | notification_target_arn, 364 | role_arn, 365 | **kwargs 366 | ): 367 | attributes = kwargs.get("attributes", dict()) 368 | 369 | properties = { 370 | 'AutoScalingGroupName': asg_name, 371 | 'LifecycleTransition': lifecycle_transition, 372 | 'NotificationTargetARN': notification_target_arn, 373 | 'RoleARN': role_arn 374 | } 375 | 376 | if "default_result" in kwargs: 377 | properties['DefaultResult'] = kwargs.get("default_result") 378 | if "heartbeat_timeout" in kwargs: 379 | properties['HeartbeatTimeout'] = kwargs.get("heartbeat_timeout") 380 | if "notification_metadata" in kwargs: 381 | properties['NotificationMetadata'] = kwargs.get("notification_metadata") 382 | 383 | super(LifecycleHook, self).__init__(name, 'AWS::AutoScaling::LifecycleHook', properties, attributes) 384 | -------------------------------------------------------------------------------- /respawn/cli.py: -------------------------------------------------------------------------------- 1 | """ 2 | CLI Entry point for respawn 3 | """ 4 | 5 | from __future__ import print_function 6 | from docopt import docopt 7 | from schema import Schema, Use, Or, Optional 8 | from subprocess import Popen, PIPE 9 | from pkg_resources import require 10 | import respawn 11 | import os 12 | import sys 13 | import boto3 14 | 15 | def generate(): 16 | """Generate CloudFormation Template from YAML Specifications 17 | Usage: 18 | respawn [--validate] 19 | respawn --help 20 | respawn --version 21 | 22 | Options: 23 | --validate 24 | Validates template with Amazon Web Services 25 | --help 26 | This usage information 27 | --version 28 | Package version 29 | """ 30 | 31 | version = require("respawn")[0].version 32 | args = docopt(generate.__doc__, version=version) 33 | scheme = Schema({ 34 | '': Use(str), 35 | '--validate': Or(True, False), 36 | '--help': Or(True, False), 37 | '--version': Or(True, False), 38 | }) 39 | args = scheme.validate(args) 40 | 41 | # The pyplates library takes a python script that specifies options 42 | # that is not in scope. As a result, the file cannot be imported, so 43 | # the path of the library is used and gen.py is appended 44 | gen_location = os.path.join(os.path.dirname(respawn.__file__), "gen.py") 45 | 46 | try: 47 | p = Popen(["cfn_py_generate", gen_location, "-o", args['']], stdin=PIPE, stdout=PIPE, stderr=PIPE) 48 | output, err = p.communicate() 49 | return_code = p.returncode 50 | if return_code == 0: 51 | print(output, file=sys.stdout) 52 | try: 53 | # Validate template 54 | if args['--validate']: 55 | client = boto3.client('cloudformation') 56 | client.validate_template(TemplateBody=output) 57 | print("---------------------------------------", file=sys.stderr) 58 | print("Template Validation Successful", file=sys.stderr) 59 | print("---------------------------------------", file=sys.stderr) 60 | except Exception as e: 61 | print("Template Validation Failed:", e, file=sys.stderr) 62 | else: 63 | print(err, file=sys.stderr) 64 | return return_code 65 | except Exception as e: 66 | print(e, file=sys.stderr) 67 | return 1 68 | -------------------------------------------------------------------------------- /respawn/cloudformation.py: -------------------------------------------------------------------------------- 1 | import jinja2 2 | from cfn_pyplates import core, functions 3 | from respawn import ec2, autoscaling, rds, sns, cloudwatch, elb, parameters 4 | from time import asctime, gmtime 5 | from errors import RespawnResourceError 6 | 7 | 8 | class WaitConditionHandle(core.Resource): 9 | def __init__(self, wait_handle_name): 10 | super(WaitConditionHandle, self).__init__(wait_handle_name, 'AWS::CloudFormation::WaitConditionHandle') 11 | 12 | 13 | class WaitCondition(core.Resource): 14 | def __init__(self, wait_condition_name, wait_handle_name, timeout, depends_on, count=1): 15 | wait_condition_properties = { 16 | 'Handle': wait_handle_name, 17 | 'Timeout': timeout, 18 | 'Count': count 19 | } 20 | super(WaitCondition, self).__init__(wait_condition_name, 21 | 'AWS::CloudFormation::WaitCondition', 22 | wait_condition_properties, 23 | [core.DependsOn([depends_on])] 24 | ) 25 | 26 | 27 | class Template(core.CloudFormationTemplate): 28 | def __init__(self, **kwargs): 29 | """ 30 | Creates CloudFormation Template 31 | """ 32 | 33 | name = kwargs.get('stack_name', 'unnamed_stack') 34 | 35 | self.__region = kwargs.get('region', 'us-east-1') 36 | 37 | self.__env = kwargs.get('environment', 'dev') 38 | 39 | self.__description = kwargs.get('description', 40 | '{name} in {env} ({ts})'.format(name=name, 41 | env=self.__env, 42 | ts=asctime(gmtime()) 43 | ) 44 | ) 45 | 46 | super(Template, self).__init__(self.__description, kwargs) 47 | 48 | def create_iam_role_param(self, name, iam_role): 49 | iam_role_param = name + 'IamRole' 50 | self.parameters.add( 51 | core.Parameter(iam_role_param, 'String', { 52 | 'Default': iam_role, 53 | 'Description': 'IAM Role for ' + name 54 | })) 55 | iam_role = functions.ref(iam_role_param) 56 | return iam_role 57 | 58 | def create_availability_zone_param(self, name, availability_zone): 59 | availability_zone_param = name + "AvailabilityZone" 60 | self.parameters.add( 61 | core.Parameter(availability_zone_param, 'String', { 62 | 'Default': availability_zone, 63 | 'Description': 'Availability Zone for ' + name 64 | })) 65 | availability_zone = functions.ref(availability_zone_param) 66 | return availability_zone 67 | 68 | def create_key_pair_param(self, name, key_pair): 69 | key_pair_param = name + 'KeyPair' 70 | self.parameters.add( 71 | core.Parameter(key_pair_param, 'String', { 72 | 'Default': key_pair, 73 | 'Description': 'Key Pair for ' + name 74 | })) 75 | key_pair = functions.ref(key_pair_param) 76 | return key_pair 77 | 78 | # ---------------------------------------------------------------------------------------------------------- 79 | # Load Balancer 80 | # ---------------------------------------------------------------------------------------------------------- 81 | def add_load_balancer(self, name, **kwargs): 82 | listener_list = [] 83 | loadbalancer_values = dict() 84 | 85 | for key, value in kwargs.iteritems(): 86 | loadbalancer_values[key] = value 87 | 88 | # Check for presence of listener 89 | if kwargs.get('listeners') is None : 90 | raise RespawnResourceError("listeners needs to be present in loadbalancer options", "Listeners") 91 | 92 | # Recursing through multiple listeners of same/different protocol. 93 | if kwargs.get('listeners') is not None: 94 | for protocol, protocol_values in (kwargs.get('listeners', dict())).items(): 95 | if protocol in ('https', 'http', 'tcp', 'ssl'): 96 | if not isinstance(protocol_values, list): 97 | listeners = (elb.ProtocolListener(protocol, protocol_values['load_balancer_port'], protocol_values[ 98 | 'instance_port'], protocol_values.get('ssl_certificate_id'), **protocol_values)) 99 | listener_list.append(listeners) 100 | else: 101 | for protocol_value in protocol_values: 102 | listeners = (elb.ProtocolListener(protocol, protocol_value['load_balancer_port'], protocol_value[ 103 | 'instance_port'], protocol_value.get('ssl_certificate_id'), **protocol_value)) 104 | listener_list.append(listeners) 105 | else: 106 | raise RespawnResourceError('protocol needs to be one of HTTPS, HTTP, TCP, SSL', "Protocol") 107 | 108 | if kwargs.get('health_check_path') is None: 109 | health_check_path = '/aptest.html' 110 | loadbalancer_values['health_check_path'] = health_check_path 111 | 112 | loadbalancer_values['listeners'] = listener_list 113 | loadbalancer_values['env'] = self.__env 114 | lb = elb.LoadBalancer(name, **loadbalancer_values) 115 | self.resources.add(lb) 116 | return lb 117 | 118 | # ---------------------------------------------------------------------------------------------------------- 119 | # Launch Config 120 | # ---------------------------------------------------------------------------------------------------------- 121 | def add_launch_config(self, name, **kwargs): 122 | lc_arguments = kwargs 123 | 124 | security_groups = lc_arguments.get('security_groups') 125 | if security_groups is not None: 126 | lc_arguments['security_groups'] = security_groups 127 | 128 | # IAM Role 129 | iam_role = self.create_iam_role_param(name, kwargs.get("iam_role")) 130 | if iam_role is not None: 131 | lc_arguments['iam_role'] = iam_role 132 | 133 | # Key Pair 134 | key_pair = kwargs.get("key_pair", None) 135 | if key_pair is not None: 136 | key_pair = self.create_key_pair_param(name, key_pair) 137 | lc_arguments['key_pair'] = key_pair 138 | 139 | # User Data 140 | if "user_data" in kwargs: 141 | user_data = kwargs.get("user_data") 142 | with open(user_data['file'], "r") as f: 143 | user_data_script = f.read() 144 | user_data_script = jinja2.Template(user_data_script) 145 | lc_arguments['user_data_script'] = user_data_script.render(**user_data.get('params', dict())) 146 | 147 | lc = autoscaling.LaunchConfiguration(name, **lc_arguments) 148 | 149 | self.resources.add(lc) 150 | return lc 151 | 152 | # ---------------------------------------------------------------------------------------------------------- 153 | # Autoscaling Group 154 | # ---------------------------------------------------------------------------------------------------------- 155 | def add_autoscaling_group(self, name, **kwargs): 156 | asg_arguments = kwargs 157 | asg = autoscaling.AutoScalingGroup(name, **asg_arguments) 158 | self.resources.add(asg) 159 | return asg 160 | 161 | # ---------------------------------------------------------------------------------------------------------- 162 | # Scheduled Action 163 | # ---------------------------------------------------------------------------------------------------------- 164 | def add_scheduled_action(self, name, **kwargs): 165 | sa_arguments = kwargs 166 | sa = autoscaling.ScheduledAction(name, **sa_arguments) 167 | self.resources.add(sa) 168 | return sa 169 | 170 | # ---------------------------------------------------------------------------------------------------------- 171 | # Lifecycle Hook 172 | # ---------------------------------------------------------------------------------------------------------- 173 | def add_lifecycle_hook(self, name, **kwargs): 174 | lh_arguments = kwargs 175 | lh = autoscaling.LifecycleHook(name, **lh_arguments) 176 | self.resources.add(lh) 177 | return lh 178 | 179 | # --------------------------------------------------------------------------------------------------------- 180 | # Route53 Record 181 | # --------------------------------------------------------------------------------------------------------- 182 | def add_route53_record_set( 183 | self, 184 | name, 185 | **kwargs 186 | ): 187 | record_set_arguments = dict() 188 | for key, value in kwargs.iteritems(): 189 | record_set_arguments[key] = value 190 | record_set = ec2.Instance(name, **record_set_arguments) 191 | self.resources.add(record_set) 192 | return record_set 193 | 194 | # ---------------------------------------------------------------------------------------------------------- 195 | # Instance 196 | # ---------------------------------------------------------------------------------------------------------- 197 | def add_instance(self, name, **kwargs): 198 | instance_arguments = kwargs 199 | 200 | # IAM Role 201 | iam_role = instance_arguments.get("iam_role", None) 202 | if iam_role is not None: 203 | iam_role = self.create_iam_role_param(name, iam_role) 204 | instance_arguments['iam_role'] = iam_role 205 | 206 | # Key Pair 207 | key_pair = instance_arguments.get("key_pair", None) 208 | if key_pair is not None: 209 | key_pair = self.create_key_pair_param(name, key_pair) 210 | instance_arguments['key_pair'] = key_pair 211 | 212 | # Availability Zone 213 | availability_zone = instance_arguments.get("availability_zone", None) 214 | if availability_zone is not None: 215 | availability_zone = self.create_availability_zone_param(name, availability_zone) 216 | instance_arguments['availability_zone'] = availability_zone 217 | 218 | # User Data 219 | if "user_data" in instance_arguments: 220 | user_data = instance_arguments.get("user_data") 221 | with open(user_data['file'], "r") as f: 222 | user_data_script = f.read() 223 | user_data_script = jinja2.Template(user_data_script) 224 | instance_arguments['user_data_script'] = user_data_script.render(**user_data.get('params', dict())) 225 | 226 | instance = ec2.Instance(name, **instance_arguments) 227 | 228 | self.resources.add(instance) 229 | return instance 230 | 231 | # ---------------------------------------------------------------------------------------------------------- 232 | # Volume 233 | # ---------------------------------------------------------------------------------------------------------- 234 | def add_volume(self, name, availability_zone, **kwargs): 235 | volume_arguments = kwargs 236 | volume = ec2.Volume(name, availability_zone, **volume_arguments) 237 | self.resources.add(volume) 238 | return volume 239 | 240 | # ---------------------------------------------------------------------------------------------------------- 241 | # RDS 242 | # ---------------------------------------------------------------------------------------------------------- 243 | def add_rds_instance(self, name, **kwargs): 244 | rds_args = kwargs 245 | dbinstance = rds.DBInstance(name, **rds_args) 246 | self.resources.add(dbinstance) 247 | return dbinstance 248 | 249 | # ---------------------------------------------------------------------------------------------------------- 250 | # Cloud Watch Alarm 251 | # ---------------------------------------------------------------------------------------------------------- 252 | def add_cloud_watch_alarm(self, 253 | name, 254 | evaluation_periods, 255 | comparison_operator, 256 | period, 257 | metric_name, 258 | statistic, 259 | threshold, 260 | **kwargs): 261 | cloudwatch_arguments = kwargs 262 | cloudwatch_alarm = cloudwatch.CloudWatchAlarm(name, 263 | evaluation_periods, 264 | comparison_operator, 265 | period, 266 | metric_name, 267 | statistic, 268 | threshold, 269 | **cloudwatch_arguments 270 | ) 271 | self.resources.add(cloudwatch_alarm) 272 | return cloudwatch_alarm 273 | 274 | # ---------------------------------------------------------------------------------------------------------- 275 | # Network Interface 276 | # ---------------------------------------------------------------------------------------------------------- 277 | def add_network_interface(self, name, **kwargs): 278 | ni_arguments = kwargs 279 | network_interface = ec2.NetworkInterface(name, **ni_arguments) 280 | self.resources.add(network_interface) 281 | return network_interface 282 | 283 | # ---------------------------------------------------------------------------------------------------------- 284 | # Network Interface Attachment 285 | # ---------------------------------------------------------------------------------------------------------- 286 | def add_network_interface_attachment(self, name, **kwargs): 287 | nia_arguments = kwargs 288 | nia = ec2.NetworkInterfaceAttachment(name, **nia_arguments) 289 | self.resources.add(nia) 290 | return nia 291 | 292 | # ---------------------------------------------------------------------------------------------------------- 293 | # Parameters 294 | # ---------------------------------------------------------------------------------------------------------- 295 | def add_parameter(self, name, **kwargs): 296 | parameter_arguments = kwargs 297 | parameter = parameters.CustomParameters(name, **parameter_arguments) 298 | self.parameters.add(parameter) 299 | return parameter 300 | 301 | # ---------------------------------------------------------------------------------------------------------- 302 | # SNS Topic 303 | # ---------------------------------------------------------------------------------------------------------- 304 | def add_sns_topic(self, name, **kwargs): 305 | sns_arguments = kwargs 306 | sns_topic = sns.SnsTopic(name, **sns_arguments) 307 | self.resources.add(sns_topic) 308 | return sns_topic 309 | 310 | # ---------------------------------------------------------------------------------------------------------- 311 | # Security Group 312 | # ---------------------------------------------------------------------------------------------------------- 313 | def add_security_group(self, name, **kwargs): 314 | security_args = kwargs 315 | security_group = ec2.SecurityGroup(name, **security_args) 316 | self.resources.add(security_group) 317 | return security_group 318 | -------------------------------------------------------------------------------- /respawn/cloudwatch.py: -------------------------------------------------------------------------------- 1 | from cfn_pyplates import core 2 | from respawn import util 3 | 4 | 5 | class CloudWatchProperties(util.SetNonEmptyPropertyMixin, core.JSONableDict): 6 | """ 7 | Available keyword arguments 8 | """ 9 | 10 | # ---------------------------------------------------------------------------------------------------------- 11 | # Cloudwatch Properties 12 | # ---------------------------------------------------------------------------------------------------------- 13 | def __init__(self, evaluation_periods, 14 | comparison_operator, 15 | period, 16 | metric_name, 17 | statistic, 18 | threshold, 19 | **kwargs): 20 | super(CloudWatchProperties, self).__init__(None, 'Properties') 21 | 22 | # ActionsEnabled : whether or not actions should be executed during any changes to the alarm's state. 23 | self._set_property('ActionsEnabled', kwargs.get('actions_enabled')) 24 | 25 | # AlarmActions : Actions to execute when this alarm transitions into an ALARM state from any other state. 26 | self._set_property('AlarmActions', kwargs.get('alarm_actions')) 27 | 28 | # AlarmDescription : The description for the alarm. 29 | self._set_property('AlarmDescription', kwargs.get('alarm_description')) 30 | 31 | # AlarmName : A name for the alarm. If not specify a name, AWS CloudFormation generates a unique physical ID 32 | self._set_property('AlarmName', kwargs.get('alarm_name')) 33 | 34 | # ComparisonOperator : You can specify the following values: GreaterThanOrEqualToThreshold 35 | # GreaterThanThreshold | LessThanThreshold | LessThanOrEqualToThreshold 36 | self._set_property('ComparisonOperator', comparison_operator) 37 | 38 | if kwargs.get('Dimensions') is not None: 39 | # Dimensions : The dimensions for the alarm's associated metric. 40 | self._set_property('Dimensions', transform_attribute(kwargs.get('dimensions'))) 41 | 42 | # EvaluationPeriods : The number of periods over which data is compared to the specified threshold. 43 | self._set_property('EvaluationPeriods', evaluation_periods) 44 | 45 | # InsufficientDataActions : The list of actions to execute when this alarm transitions into an 46 | # INSUFFICIENT_DATA state from any other state. 47 | self._set_property('InsufficientDataActions', kwargs.get('insufficient_data_actions')) 48 | 49 | # MetricName : The name for the alarm's associated metric. 50 | self._set_property('MetricName', metric_name) 51 | 52 | # Namespace : The namespace for the alarm's associated metric. 53 | self._set_property('Namespace', kwargs.get('namespace')) 54 | 55 | # OKActions : The list of actions to execute when this alarm transitions into an OK state from any other state. 56 | self._set_property('OKActions', kwargs.get('ok_actions')) 57 | 58 | # Period : The time over which the specified statistic is applied. 59 | # You must specify a time in seconds that is also a multiple of 60. 60 | self._set_property('Period', period) 61 | 62 | # Statistic : The statistic to apply to the alarm's associated metric. 63 | self._set_property('Statistic', statistic) 64 | 65 | # Threshold : The value against which the specified statistic is compared. 66 | self._set_property('Threshold', threshold) 67 | 68 | # Unit : The unit for the alarm's associated metric. 69 | self._set_property('Unit', kwargs.get('unit')) 70 | 71 | 72 | class CloudWatchAlarm(core.Resource): 73 | """ 74 | Creates cloudwatch alarm. 75 | 76 | :param evaluation_period 77 | :param namespace 78 | :param period 79 | :param statistics 80 | :param threshold 81 | :param comparison_operator 82 | 83 | kwargs 84 | - actions_enabled: Boolean 85 | - alarm_actions: [ String, ... ] 86 | - alarm_description: String 87 | - alarm_name: String 88 | - dimensions: [ Metric dimension, ... ] 89 | - insufficient_data_actions: [ String, ... ], 90 | - metric_name: String, 91 | - ok_actions: [ String, ... ], 92 | - unit: String 93 | """ 94 | 95 | # ---------------------------------------------------------------------------------------------------------- 96 | # Cloudwatch Creation 97 | # ---------------------------------------------------------------------------------------------------------- 98 | def __init__(self, 99 | name, 100 | evaluation_periods, 101 | comparison_operator, 102 | period, 103 | metric_name, 104 | statistic, 105 | threshold, 106 | **kwargs 107 | ): 108 | super(CloudWatchAlarm, self).__init__(name, 'AWS::CloudWatch::Alarm') 109 | self.Properties = CloudWatchProperties(evaluation_periods, 110 | comparison_operator, 111 | period, 112 | metric_name, 113 | statistic, 114 | threshold, 115 | **kwargs) 116 | 117 | 118 | # ---------------------------------------------------------------------------------------------------------- 119 | # Transform attributes from lower case to upper case. 120 | # ---------------------------------------------------------------------------------------------------------- 121 | def transform_attribute(attribute_list): 122 | updated_attribute_list = [] 123 | for attribute_parameters in attribute_list: 124 | updated_attribute_list.append( 125 | {'Name': attribute_parameters.get('name'), 126 | 'Value': attribute_parameters.get('value')}) 127 | return updated_attribute_list 128 | -------------------------------------------------------------------------------- /respawn/ec2.py: -------------------------------------------------------------------------------- 1 | from cfn_pyplates import core, functions 2 | from respawn import util 3 | from errors import RespawnResourceError 4 | 5 | 6 | class BlockDeviceMapping(core.JSONableDict): 7 | """ 8 | Creates a Block Device Mapping 9 | 10 | :param device_name: String 11 | 12 | kwargs 13 | - ebs: EC2 EBS Block Device 14 | - no_device: Boolean 15 | - virtual_name: String 16 | """ 17 | 18 | def __init__( 19 | self, 20 | device_name, 21 | **kwargs 22 | ): 23 | if 'ebs' not in kwargs and 'virtual_name' not in kwargs and 'no_device' not in kwargs: 24 | raise RespawnResourceError("Ebs (ebs) or Virtual Name (virtual_name) required.", 25 | "BlockDeviceMapping", device_name) 26 | if 'ebs' in kwargs and 'virtual_name' in kwargs: 27 | raise RespawnResourceError("Only one of Ebs (ebs) and Virtual Name (virtual_name) can be specified.", 28 | "BlockDeviceMapping", device_name) 29 | 30 | super(BlockDeviceMapping, self).__init__() 31 | 32 | self['DeviceName'] = device_name 33 | if 'ebs' in kwargs: 34 | self['Ebs'] = kwargs.get('ebs') 35 | if 'no_device' in kwargs: 36 | if kwargs.get('no_device'): 37 | self['NoDevice'] = {} 38 | if 'virtual_name' in kwargs: 39 | self['VirtualName'] = kwargs.get('virtual_name') 40 | 41 | 42 | class BlockDevice(core.JSONableDict): 43 | """ 44 | Creates a Block Device 45 | 46 | kwargs 47 | - delete_on_termination: Boolean 48 | - iops: Integer 49 | - snapshot_id: String 50 | - size: Integer 51 | - volume_type: String 52 | """ 53 | 54 | def __init__( 55 | self, 56 | **kwargs 57 | ): 58 | if "snapshot_id" not in kwargs and "size" not in kwargs: 59 | raise RespawnResourceError("Snapshot Id (snapshot_id) or Size (size) required.", 60 | "BlockDevice") 61 | if "volume_type" in kwargs: 62 | if kwargs.get("volume_type") == "io1" and "iops" not in kwargs: 63 | raise RespawnResourceError("Iops (iops) required if Volume Type (volume_type) is io1.", 64 | "BlockDevice") 65 | 66 | super(BlockDevice, self).__init__() 67 | 68 | if 'delete_on_termination' in kwargs: 69 | self['DeleteOnTermination'] = kwargs.get('delete_on_termination') 70 | if 'iops' in kwargs: 71 | self['Iops'] = kwargs.get('iops') 72 | if 'snapshot_id' in kwargs: 73 | self['SnapshotId'] = kwargs.get('snapshot_id') 74 | if 'size' in kwargs: 75 | self['VolumeSize'] = kwargs.get('size') 76 | if 'volume_type' in kwargs: 77 | self['VolumeType'] = kwargs.get('volume_type') 78 | 79 | 80 | class EmbeddedNetworkInterface(core.JSONableDict): 81 | """ 82 | Creates a Network Interface 83 | 84 | :param device_index: String 85 | 86 | kwargs 87 | - public_ip: Boolean 88 | - delete_on_termination: Boolean 89 | - description: String 90 | - group_set: [ String, ... ] 91 | - interface_id: String 92 | - private_ip: String 93 | - private_ips: [ PrivateIpSpecification, ... ] 94 | - secondary_private_ip_count: Integer 95 | - subnet_id: String 96 | """ 97 | 98 | def __init__( 99 | self, 100 | device_index, 101 | **kwargs 102 | ): 103 | if 'subnet_id' not in kwargs and 'interface_id' not in kwargs: 104 | raise RespawnResourceError('Subnet Id (subnet_id) required if Interface Id (interface_id) not specified', 105 | 'NetworkInterface') 106 | 107 | super(EmbeddedNetworkInterface, self).__init__() 108 | 109 | self['DeviceIndex'] = device_index 110 | if 'public_ip' in kwargs: 111 | self['AssociatePublicIpAddress'] = kwargs.get('public_ip') 112 | if 'delete_on_termination' in kwargs: 113 | self['DeleteOnTermination'] = kwargs.get('delete_on_termination') 114 | if 'description' in kwargs: 115 | self['Description'] = kwargs.get('description') 116 | if 'group_set' in kwargs: 117 | self['GroupSet'] = kwargs.get('group_set') 118 | if 'interface_id' in kwargs: 119 | self['NetworkInterfaceId'] = kwargs.get('interface_id') 120 | if 'private_ip' in kwargs: 121 | self['PrivateIpAddress'] = kwargs.get('private_ip') 122 | if 'private_ips' in kwargs: 123 | self['PrivateIpAddresses'] = kwargs.get('private_ips') 124 | if 'secondary_private_ip_count' in kwargs: 125 | self['SecondaryPrivateIpAddressCount'] = kwargs.get('secondary_private_ip_count') 126 | if 'subnet_id' in kwargs: 127 | self['SubnetId'] = kwargs.get('subnet_id') 128 | 129 | 130 | class PrivateIpSpecification(core.JSONableDict): 131 | """ 132 | Creates a Private IP Specification 133 | 134 | :param private_ip: String 135 | :param primary: Boolean 136 | """ 137 | 138 | def __init__( 139 | self, 140 | private_ip, 141 | primary 142 | ): 143 | super(PrivateIpSpecification, self).__init__() 144 | 145 | self['PrivateIpAddress'] = private_ip 146 | self['Primary'] = primary 147 | 148 | 149 | class MountPoint(core.JSONableDict): 150 | """ 151 | Create Mount Point 152 | 153 | :param device: String 154 | :param volume_id: String 155 | """ 156 | 157 | def __init__( 158 | self, 159 | device, 160 | volume_id 161 | ): 162 | super(MountPoint, self).__init__() 163 | 164 | self['Device'] = device 165 | self['VolumeId'] = volume_id 166 | 167 | 168 | class SecurityGroupIngress(core.JSONableDict): 169 | def __init__( 170 | self, 171 | from_port, 172 | ip_protocol, 173 | to_port, 174 | **kwargs 175 | ): 176 | """ 177 | Create Security Group Ingress 178 | 179 | :param from_port: String 180 | :param ip_protocol: String 181 | :param to_port: String 182 | 183 | kwargs 184 | - cidr_ip: String 185 | - source_security_group_id: String 186 | - source_security_group_name: String 187 | - source_security_group_owner_id: String 188 | """ 189 | 190 | if "cidr_ip" in kwargs and ("source_security_group_name" in kwargs or "source_security_group_id" in kwargs): 191 | raise RespawnResourceError("Source Security Group Name (source_security_group_name) or Source Security " 192 | "Group Id (source_security_group_id) cannot be specified with " 193 | "Cidr IP (cidr_ip)", 194 | "SecurityGroupIngress") 195 | 196 | if "source_security_group_name" in kwargs and "source_security_group_id" in kwargs: 197 | raise RespawnResourceError("Source Security Group Name (source_security_group_name) or Source Security " 198 | "Group Id (source_security_group_id) cannot both be specified.", 199 | "SecurityGroupIngress") 200 | 201 | super(SecurityGroupIngress, self).__init__() 202 | 203 | self['FromPort'] = from_port 204 | self['IpProtocol'] = ip_protocol 205 | self['ToPort'] = to_port 206 | 207 | if "cidr_ip" in kwargs: 208 | self['CidrIp'] = kwargs.get("cidr_ip") 209 | if "source_security_group_name" in kwargs: 210 | self['SourceSecurityGroupName'] = kwargs.get("source_security_group_name") 211 | if "source_security_group_id" in kwargs: 212 | self['SourceSecurityGroupId'] = kwargs.get("source_security_group_id") 213 | if "source_security_group_owner_id" in kwargs: 214 | self['SourceSecurityGroupOwnerId'] = kwargs.get("source_security_group_owner_id") 215 | 216 | 217 | class SecurityGroupEgress(core.JSONableDict): 218 | def __init__( 219 | self, 220 | from_port, 221 | ip_protocol, 222 | to_port, 223 | **kwargs 224 | ): 225 | """ 226 | Create Security Group Egress 227 | 228 | :param from_port: String 229 | :param ip_protocol: String 230 | :param to_port: String 231 | 232 | kwargs 233 | - cidr_ip: String 234 | - destination_security_group_id: String 235 | """ 236 | 237 | if "cidr_ip" in kwargs and "destination_security_group_id" in kwargs: 238 | raise RespawnResourceError("Destination Security Group Id (destination_security_group_id) cannot be " 239 | "specified with Cidr IP (cidr_ip)", 240 | "SecurityGroupEgress") 241 | 242 | super(SecurityGroupEgress, self).__init__() 243 | 244 | self['FromPort'] = from_port 245 | self['IpProtocol'] = ip_protocol 246 | self['ToPort'] = to_port 247 | 248 | if "cidr_ip" in kwargs: 249 | self['CidrIp'] = kwargs.get("cidr_ip") 250 | if "destination_security_group_id" in kwargs: 251 | self['DestinationSecurityGroupId'] = kwargs.get("destination_security_group_id") 252 | 253 | 254 | class Tag(core.JSONableDict): 255 | """ 256 | Create EC2 Tag 257 | 258 | :param key: String 259 | :param value: String 260 | """ 261 | 262 | def __init__( 263 | self, 264 | key, 265 | value 266 | ): 267 | super(Tag, self).__init__() 268 | self['Key'] = key 269 | self['Value'] = value 270 | 271 | 272 | class Instance(core.Resource): 273 | """ 274 | Creates an EC2 Instance 275 | 276 | :param name: String 277 | :param ami_id: String 278 | 279 | kwargs 280 | - availability_zone: String 281 | - block_devices: [ BlockDeviceMapping , ... ] 282 | - disable_api_termination: Boolean 283 | - ebs_optimized: Boolean 284 | - iam_role: String 285 | - instance_shutdown_behavior: String 286 | - instance_type: String 287 | - kernel_id: String 288 | - key_pair: String 289 | - monitoring: Boolean 290 | - network_interfaces: [ EmbeddedNetworkInterface, ... ] 291 | - placement_group: String 292 | - private_ip: String 293 | - ramdisk_id: String 294 | - security_group_ids: [ String, ... ] 295 | - security_groups: [ String, ... ] 296 | - source_dest_check: Boolean 297 | - subnet: String 298 | - tags: [ Tag, ... ] 299 | - tenancy: String 300 | - user_data_script: String 301 | - volumes: [ MountPoint, ...] 302 | - attributes: { key: value, ... } 303 | """ 304 | 305 | def __init__( 306 | self, 307 | name, 308 | ami_id, 309 | **kwargs 310 | ): 311 | properties = { 312 | 'ImageId': ami_id, 313 | } 314 | 315 | if 'block_devices' in kwargs: 316 | devices = kwargs.get('block_devices') 317 | block_devices = [] 318 | for device, args in devices.items(): 319 | if 'ebs' in args: 320 | args['ebs'] = dict(BlockDevice(**args['ebs'])) 321 | block_devices.append(dict(BlockDeviceMapping(device, **args))) 322 | properties['BlockDeviceMappings'] = block_devices 323 | 324 | if 'network_interfaces' in kwargs: 325 | interfaces = kwargs.get('network_interfaces') 326 | network_interfaces_list = [] 327 | for interface, args in interfaces.items(): 328 | if 'private_ips' in args: 329 | private_ips = args['private_ips'] 330 | for i in range(len(private_ips)): 331 | private_ips[i] = dict(PrivateIpSpecification(**private_ips[i])) 332 | network_interfaces_list.append(dict(EmbeddedNetworkInterface(description=interface, **args))) 333 | properties['NetworkInterfaces'] = network_interfaces_list 334 | 335 | if 'volumes' in kwargs: 336 | volumes = kwargs.get('volumes') 337 | for i in range(len(volumes)): 338 | volumes[i] = dict(MountPoint(**volumes[i])) 339 | properties['Volumes'] = volumes 340 | 341 | if 'tags' in kwargs: 342 | t = kwargs.get('tags') 343 | tags = [] 344 | for tag in t: 345 | tags.append(dict(Tag(**tag))) 346 | properties['Tags'] = tags 347 | 348 | if "availability_zone" in kwargs: 349 | properties['AvailabilityZone'] = kwargs.get("availability_zone") 350 | if "disable_api_termination" in kwargs: 351 | properties['DisableApiTermination'] = kwargs.get("disable_api_termination") # default=False 352 | if "ebs_optimized" in kwargs: 353 | properties['EbsOptimized'] = kwargs.get("ebs_optimized") # default=False 354 | if "iam_role" in kwargs: 355 | properties['IamInstanceProfile'] = kwargs.get("iam_role") 356 | if "instance_shutdown_behavior" in kwargs: 357 | properties['InstanceInitiatedShutdownBehavior'] = kwargs.get("instance_shutdown_behavior") 358 | if "instance_type" in kwargs: 359 | properties['InstanceType'] = kwargs.get("instance_type") 360 | if "kernel_id" in kwargs: 361 | properties['KernelId'] = kwargs.get("kernel_id") 362 | if "key_pair" in kwargs: 363 | properties['KeyName'] = kwargs.get("key_pair") 364 | if "monitoring" in kwargs: 365 | properties['Monitoring'] = kwargs.get("monitoring") # default=False 366 | if "placement_group" in kwargs: 367 | properties['PlacementGroupName'] = kwargs.get("placement_group") 368 | if "private_ip" in kwargs: 369 | properties['PrivateIpAddress'] = kwargs.get("private_ip") 370 | if "ramdisk_id" in kwargs: 371 | properties['RamdiskId'] = kwargs.get("ramdisk_id") 372 | if "security_group_ids" in kwargs: 373 | properties['SecurityGroupIds'] = kwargs.get("security_group_ids") 374 | if "security_groups" in kwargs: 375 | properties['SecurityGroups'] = kwargs.get("security_groups") 376 | if "source_dest_check" in kwargs: 377 | properties['SourceDestCheck'] = kwargs.get("source_dest_check") # default=True 378 | if "subnet" in kwargs: 379 | properties['SubnetId'] = kwargs.get("subnet") 380 | if "tenancy" in kwargs: 381 | properties['Tenancy'] = kwargs.get("tenancy") # default="default" 382 | if "user_data_script" in kwargs: 383 | properties['UserData'] = functions.base64(kwargs.get("user_data_script")) 384 | 385 | attributes = kwargs.get("attributes") 386 | 387 | super(Instance, self).__init__(name, 'AWS::EC2::Instance', properties, attributes) 388 | 389 | 390 | class Volume(core.Resource): 391 | """ 392 | Creates an EC2 Volume 393 | 394 | :param name: String 395 | :param availability_zone: String 396 | 397 | kwargs 398 | - encrypted: Boolean 399 | - iops: Integer 400 | - kms_key_id: String 401 | - size: String 402 | - snapshot_id: String 403 | - tags: [ Tag, ...] 404 | - volume_type: String 405 | - attributes: { key: value, ... } 406 | """ 407 | 408 | def __init__( 409 | self, 410 | name, 411 | availability_zone, 412 | **kwargs 413 | ): 414 | if kwargs.get("volume_type") == "io1" and "iops" not in kwargs: 415 | raise RespawnResourceError("Iops not specified for VolumeType of io1.", 416 | "Volume", name) 417 | 418 | if "snapshot_id" not in kwargs and "size" not in kwargs: 419 | raise RespawnResourceError("Size of Volume not specified.", 420 | "Volume", name) 421 | 422 | attributes = kwargs.get("attributes", dict()) 423 | 424 | properties = { 425 | 'AvailabilityZone': availability_zone, 426 | } 427 | 428 | if 'tags' in kwargs: 429 | t = kwargs.get('tags') 430 | tags = [] 431 | for tag in t: 432 | tags.append(Tag(**tag)) 433 | properties['Tags'] = tags 434 | 435 | if "encrypted" in kwargs: 436 | properties['Encrypted'] = kwargs.get("encrypted") # default=False 437 | if "iops" in kwargs: 438 | properties['Iops'] = kwargs.get("iops") 439 | if "kms_key_id" in kwargs: 440 | properties['KmsKeyId'] = kwargs.get("kms_key_id") 441 | if "size" in kwargs: 442 | properties['Size'] = kwargs.get("size") 443 | if "snapshot_id" in kwargs: 444 | properties['SnapshotId'] = kwargs.get("snapshot_id") 445 | if "volume_type" in kwargs: 446 | properties['VolumeType'] = kwargs.get("volume_type") 447 | if "DeletionPolicy" not in attributes: 448 | attributes['DeletionPolicy'] = kwargs.get("deletion_policy", "Delete") # Delete, Retain, Snapshot 449 | 450 | super(Volume, self).__init__(name, 'AWS::EC2::Volume', properties, attributes) 451 | 452 | 453 | class NetworkInterface(core.Resource): 454 | """ 455 | Creates an EC2 Network Interface 456 | 457 | :param name: String 458 | :param subnet_id: String 459 | 460 | kwargs 461 | - description: String 462 | - group_set: [ String, ... ] 463 | - private_ip: String 464 | - private_ips: [ PrivateIpSpecification, ... ] 465 | - secondary_private_ip_count: Integer 466 | - source_dest_check: Boolean 467 | - tags: [ Tag, ...] 468 | """ 469 | 470 | def __init__( 471 | self, 472 | name, 473 | subnet_id, 474 | **kwargs 475 | ): 476 | 477 | attributes = kwargs.get("attributes", dict()) 478 | 479 | properties = { 480 | 'SubnetId': subnet_id 481 | } 482 | 483 | if 'description' in kwargs: 484 | properties['Description'] = kwargs.get('description') 485 | if 'group_set' in kwargs: 486 | properties['GroupSet'] = kwargs.get('group_set') 487 | if 'private_ip' in kwargs: 488 | properties['PrivateIpAddress'] = kwargs.get('private_ip') 489 | if 'private_ips' in kwargs: 490 | private_ips = kwargs.get('private_ips') 491 | for i in range(len(private_ips)): 492 | private_ips[i] = dict(PrivateIpSpecification(**private_ips[i])) 493 | properties['PrivateIpAddresses'] = private_ips 494 | if 'secondary_private_ip_count' in kwargs: 495 | properties['SecondaryPrivateIpAddressCount'] = kwargs.get('secondary_private_ip_count') 496 | if 'source_dest_check' in kwargs: 497 | properties['SourceDestCheck'] = kwargs.get('source_dest_check') 498 | if 'tags' in kwargs: 499 | t = kwargs.get('tags') 500 | tags = [] 501 | for tag in t: 502 | tags.append(Tag(**tag)) 503 | properties['Tags'] = tags 504 | 505 | super(NetworkInterface, self).__init__(name, 'AWS::EC2::NetworkInterface', properties, attributes) 506 | 507 | 508 | class NetworkInterfaceAttachment(core.Resource): 509 | """ 510 | Creates an EC2 Network Interface Attachment 511 | 512 | :param name: String 513 | :param device_index: String 514 | :param instance_id: String 515 | :param network_interface_id: String 516 | 517 | kwargs 518 | - delete_on_termination: Boolean 519 | """ 520 | def __init__( 521 | self, 522 | name, 523 | device_index, 524 | instance_id, 525 | network_interface_id, 526 | **kwargs 527 | ): 528 | 529 | attributes = kwargs.get("attributes", dict()) 530 | 531 | properties = { 532 | 'DeviceIndex': device_index, 533 | 'InstanceId': instance_id, 534 | 'NetworkInterfaceId': network_interface_id 535 | } 536 | 537 | if "delete_on_termination" in kwargs: 538 | properties['DeleteOnTermination'] = kwargs.get("delete_on_termination") # default=False 539 | 540 | super(NetworkInterfaceAttachment, self).__init__(name, 'AWS::EC2::NetworkInterfaceAttachment', 541 | properties, attributes) 542 | 543 | 544 | class SecurityGroup(core.Resource): 545 | """ 546 | Creates a Security Group 547 | 548 | :param name: String 549 | :param group_description: String 550 | 551 | kwargs 552 | - security_group_egress: [ Security Group Rule, ... ] 553 | - security_group_ingress: [ Security Group Rule, ... ] 554 | - tags: [ Tag, ... ] 555 | - vpc_id: String 556 | """ 557 | 558 | def __init__( 559 | self, 560 | name, 561 | group_description, 562 | **kwargs 563 | ): 564 | attributes = kwargs.get("attributes", dict()) 565 | 566 | properties = { 567 | 'GroupDescription': group_description 568 | } 569 | 570 | if "security_group_egress" in kwargs: 571 | security_group_egress = [] 572 | for sg in kwargs.get("security_group_egress"): 573 | security_group_egress.append(SecurityGroupEgress(**sg)) 574 | properties['SecurityGroupEgress'] = security_group_egress 575 | 576 | if "security_group_ingress" in kwargs: 577 | security_group_ingress = [] 578 | for sg in kwargs.get("security_group_ingress"): 579 | security_group_ingress.append(SecurityGroupIngress(**sg)) 580 | properties['SecurityGroupIngress'] = security_group_ingress 581 | 582 | if 'tags' in kwargs: 583 | t = kwargs.get('tags') 584 | tags = [] 585 | for tag in t: 586 | tags.append(Tag(**tag)) 587 | properties['Tags'] = tags 588 | 589 | if "vpc_id" in kwargs: 590 | properties['VpcId'] = kwargs.get("vpc_id") 591 | 592 | super(SecurityGroup, self).__init__(name, 'AWS::EC2::SecurityGroup', properties, attributes) 593 | -------------------------------------------------------------------------------- /respawn/elb.py: -------------------------------------------------------------------------------- 1 | from cfn_pyplates import core 2 | from respawn import util 3 | from errors import RespawnResourceError 4 | 5 | 6 | class AccessLoggingPolicy(util.SetNonEmptyPropertyMixin, core.JSONableDict): 7 | """ 8 | The AccessLoggingPolicy property captures detailed information for all requests made to your load balancer. 9 | 10 | kwargs 11 | - emit_interval: Integer 12 | - enabled: Boolean 13 | - s3_bucket_name: String 14 | - s3_bucket_prefix: String 15 | """ 16 | # ---------------------------------------------------------------------------------------------------------- 17 | # AccessLoggingPolicy 18 | # ---------------------------------------------------------------------------------------------------------- 19 | def __init__(self, **kwargs): 20 | super(AccessLoggingPolicy, self).__init__(None, 'AccessLoggingPolicy') 21 | self._set_property('EmitInterval', kwargs.get('emit_interval')) 22 | self._set_property('Enabled', kwargs.get('enabled')) 23 | self._set_property('S3BucketName', kwargs.get('s3_bucket_name')) 24 | self._set_property('S3BucketPrefix', kwargs.get('s3_bucket_prefix')) 25 | 26 | 27 | class AppCookieStickinessPolicy(util.SetNonEmptyPropertyMixin, core.JSONableDict): 28 | """ 29 | Generates one or more stickiness policies with sticky session lifetimes that follow that of an application-generated 30 | cookie. These policies can be associated only with HTTP/HTTPS listeners. 31 | 32 | kwargs 33 | - cookie_name: String 34 | - policy_name: String 35 | 36 | """ 37 | # ---------------------------------------------------------------------------------------------------------- 38 | # AppCookieStickinessPolicy 39 | # ---------------------------------------------------------------------------------------------------------- 40 | def __init__(self, **kwargs): 41 | super(AppCookieStickinessPolicy, self).__init__(None, 'AppCookieStickinessPolicy') 42 | self._set_property('CookieName', kwargs.get('cookie_name')) 43 | self._set_property('PolicyName', kwargs.get('policy_name')) 44 | 45 | 46 | class AvailabilityZones(core.JSONableDict): 47 | pass 48 | 49 | 50 | class ConnectionDrainingPolicy(util.SetNonEmptyPropertyMixin, core.JSONableDict): 51 | """ 52 | Decides whether deregistered or unhealthy instances can complete all in-flight requests. 53 | 54 | :param enabled: Boolean 55 | 56 | kwargs 57 | - timeout : Integer 58 | """ 59 | # ---------------------------------------------------------------------------------------------------------- 60 | # ConnectionDrainingPolicy 61 | # ---------------------------------------------------------------------------------------------------------- 62 | def __init__(self, **kwargs): 63 | super(ConnectionDrainingPolicy, self).__init__(None, 'ConnectionDrainingPolicy') 64 | self._set_property('Enabled', kwargs.get('enabled')) 65 | self._set_property('Timeout', kwargs.get('timeout')) 66 | 67 | 68 | class ConnectionSettings(util.SetNonEmptyPropertyMixin, core.JSONableDict): 69 | """ 70 | Specifies how long front-end and back-end connections of your load balancer can remain idle. 71 | 72 | :param idle_timeout: Integer 73 | """ 74 | # ---------------------------------------------------------------------------------------------------------- 75 | # ConnectionSettings 76 | # ---------------------------------------------------------------------------------------------------------- 77 | def __init__(self, idle_timeout=60, **kwargs): 78 | super(ConnectionSettings, self).__init__(dict(IdleTimeout=idle_timeout)) 79 | self._set_property('IdleTimeout', kwargs.get('idle_timeout')) 80 | 81 | 82 | class HealthCheck(core.JSONableDict): 83 | """ 84 | Creates application health check for the instances. 85 | 86 | :param healthy_threshold: String 87 | :param interval: String 88 | :param target: String 89 | :param timeout: String 90 | :param unhealthy_threshold: String 91 | """ 92 | # ---------------------------------------------------------------------------------------------------------- 93 | # HealthCheck 94 | # ---------------------------------------------------------------------------------------------------------- 95 | 96 | def __init__(self, **kwargs): 97 | super(HealthCheck, self).__init__(None, 'HealthCheck') 98 | self['HealthyThreshold'] = kwargs.get('healthy_threshold', 3) 99 | self['Interval'] = kwargs.get('interval', 30) 100 | self['Timeout'] = kwargs.get('timeout', 5) 101 | self['UnhealthyThreshold'] = kwargs.get('unhealthy_threshold', 3) 102 | self['Target'] = kwargs.get('target') 103 | 104 | 105 | class Listener(util.SetNonEmptyPropertyMixin, core.JSONableDict): 106 | """ 107 | One or more listeners for this load balancer. Each listener must be registered for a specific port, 108 | and you cannot have more than one listener for a given port. 109 | 110 | :param instance_port: String 111 | :param load_balancer_port: String 112 | :param protocol: String 113 | 114 | kwargs 115 | - instance_protocol: String 116 | - policy_names: [ String, ... ] 117 | - sSL_certificate_id: String 118 | """ 119 | # ---------------------------------------------------------------------------------------------------------- 120 | # Listener 121 | # ---------------------------------------------------------------------------------------------------------- 122 | def __init__(self, **kwargs): 123 | super(Listener, self).__init__(None, 'Listener') 124 | self._set_property("InstancePort", kwargs.get('InstancePort')) 125 | self._set_property("InstanceProtocol", kwargs.get('InstanceProtocol')) 126 | self._set_property("LoadBalancerPort", kwargs.get('LoadBalancerPort')) 127 | self._set_property("Protocol", kwargs.get('Protocol')) 128 | self._set_property("PolicyNames", kwargs.get('PolicyNames')) 129 | self._set_property("SSLCertificateId", kwargs.get('SSLCertificateId')) 130 | 131 | 132 | class LBCookieStickinessPolicy(util.SetNonEmptyPropertyMixin, core.JSONableDict): 133 | """ 134 | Generates a stickiness policy with sticky session lifetimes controlled by the lifetime of the browser 135 | (user-agent), or by a specified expiration period. This policy can be associated only with HTTP/HTTPS listeners. 136 | 137 | kwargs 138 | - cookie_expiration_period: String 139 | - policy_name: String 140 | """ 141 | # ---------------------------------------------------------------------------------------------------------- 142 | # LBCookieStickinessPolicy 143 | # ---------------------------------------------------------------------------------------------------------- 144 | def __init__(self, **kwargs): 145 | super(LBCookieStickinessPolicy, self).__init__(None, 'LBCookieStickinessPolicy') 146 | self._set_property("PolicyName", kwargs.get('policy_name')) 147 | self._set_property("CookieExpirationPeriod", kwargs.get('cookie_expiration_period')) 148 | 149 | 150 | class Policies(util.SetNonEmptyPropertyMixin, core.JSONableDict): 151 | """ 152 | A list of elastic load balancing policies to apply to this elastic load balancer. 153 | 154 | :param policy_name: String 155 | :param policy_type: String 156 | 157 | kwargs 158 | - attributes : [ { "Name" : String, "Value" : String }, ... ] 159 | - instance_ports : [ String, ... ] 160 | - load_balancer_ports" : [ String, ... ] 161 | - policy_name : String 162 | - Policy_type : String 163 | """ 164 | # ---------------------------------------------------------------------------------------------------------- 165 | # Policies 166 | # ---------------------------------------------------------------------------------------------------------- 167 | def __init__(self, **kwargs): 168 | super(Policies, self).__init__(None, 'Policies') 169 | self._set_property('PolicyName', kwargs.get('policy_name')) 170 | self._set_property('Attribute', transform_attribute(kwargs.get('attribute'))) 171 | self._set_property('InstancePorts', kwargs.get('instance_ports')) 172 | self._set_property('LoadBalancerPorts', kwargs.get('load_balancer_ports')) 173 | self._set_property('PolicyType', kwargs.get('policy_type')) 174 | 175 | 176 | class ProtocolListener(Listener): 177 | # ---------------------------------------------------------------------------------------------------------- 178 | # ProtocolListener 179 | # ---------------------------------------------------------------------------------------------------------- 180 | def __init__(self, protocol, port, egres_port, certificateId=None, **protocol_values): 181 | protocol_dict = {"http": "HTTP", "https": "HTTPS", "tcp": "TCP", "ssl": "SSL"} 182 | if protocol_values.get('instance_protocol') is not None: 183 | protocol = protocol_dict[protocol] 184 | kwargs = dict(Protocol=protocol, 185 | LoadBalancerPort=port, 186 | InstancePort=egres_port, 187 | InstanceProtocol=protocol_values.get('instance_protocol'), 188 | SSLCertificateId=certificateId, 189 | PolicyNames=protocol_values.get('policy_names') 190 | ) 191 | else: 192 | protocol = protocol_dict[protocol] 193 | kwargs = dict(Protocol=protocol, 194 | LoadBalancerPort=port, 195 | InstancePort=egres_port, 196 | InstanceProtocol=protocol_values.get('instance_protocol'), 197 | SSLCertificateId=certificateId, 198 | PolicyNames=protocol_values.get('policy_names') 199 | ) 200 | super(ProtocolListener, self).__init__(**kwargs) 201 | 202 | 203 | class Tags(util.SetNonEmptyPropertyMixin, core.JSONableDict): 204 | """ 205 | An arbitrary set of tags (key-value pairs) for this load balancer. 206 | 207 | :param key: String 208 | :param value: String 209 | """ 210 | # ---------------------------------------------------------------------------------------------------------- 211 | # Tags 212 | # ---------------------------------------------------------------------------------------------------------- 213 | def __init__(self, **kwargs): 214 | super(Tags, self).__init__(None, 'Tags') 215 | self._set_property('Key', kwargs.get('key')) 216 | self._set_property('Value', kwargs.get('value')) 217 | 218 | 219 | class LoadBalancerProperties(util.SetNonEmptyPropertyMixin, core.JSONableDict): 220 | """ 221 | Available keyword arguments. 222 | """ 223 | # ---------------------------------------------------------------------------------------------------------- 224 | # LoadBalancerProperties 225 | # ---------------------------------------------------------------------------------------------------------- 226 | def __init__(self, **kwargs): 227 | super(LoadBalancerProperties, self).__init__(None, 'Properties') 228 | 229 | # AccessLoggingPolicy : Default is none 230 | if kwargs.get('access_logging_policy') is not None: 231 | self._set_property('AccessLoggingPolicy', AccessLoggingPolicy(**kwargs.get('access_logging_policy'))) 232 | 233 | # AppCookieStickinessPolicy : Default is none 234 | self._set_property('AppCookieStickinessPolicy', kwargs.get('app_cookie_stickiness_policy')) 235 | 236 | # AvailabilityZones : Zones (use Subnets instead) - You can specify 237 | # the AvailabilityZones or Subnets property but not both. 238 | self._set_property('AvailabilityZones', kwargs.get('availability_zones')) 239 | 240 | # ConnectionDrainingPolicy : Default is none 241 | if kwargs.get('connection_draining_policy') is not None: 242 | self._set_property('ConnectionDrainingPolicy', ConnectionDrainingPolicy(**kwargs.get( 243 | 'connection_draining_policy'))) 244 | 245 | # ConnectionSettings : Default is 60 seconds 246 | if kwargs.get('connection_settings') is not None: 247 | self._set_property('ConnectionSettings', ConnectionSettings(**kwargs.get('connection_settings'))) 248 | 249 | # CrossZone : Default is True for Cross AZ Load balancing 250 | self._set_property('CrossZone', kwargs.get('cross_zone', False)) 251 | 252 | # HealthCheck 253 | if kwargs.get('health_check') is not None: 254 | self._set_property('HealthCheck', HealthCheck(**kwargs.get('health_check'))) 255 | 256 | # Instances 257 | self._set_property('Instances', kwargs.get('instances')) 258 | 259 | # LBCookieStickinessPolicy 260 | self._set_property('LBCookieStickinessPolicy', kwargs.get('lb_cookie_stickiness_policy')) 261 | 262 | # LoadBalancerName : If you don't specify a name, AWS CloudFormation generates a unique physical ID and uses 263 | # that ID for the load balancer. 264 | self._set_property('LoadBalancerName', kwargs.get('load_balancer_name')) 265 | 266 | # Listeners : 267 | if kwargs.get('listeners') is not None: 268 | self._set_property('Listeners', kwargs.get('listeners')) 269 | else: 270 | raise RespawnResourceError("listeners parameter is required for creating an ELB", 'Listeners') 271 | 272 | # Policies : Default is none 273 | self._set_property('Policies', kwargs.get('policies')) 274 | 275 | # Scheme : 276 | self._set_property('Scheme', kwargs.get('scheme')) 277 | 278 | # SecurityGroups : Default is none 279 | self._set_property('SecurityGroups', kwargs.get('security_groups')) 280 | 281 | # Subnets : 282 | self._set_property('Subnets', kwargs.get('subnets')) 283 | 284 | # Tags : 285 | self._set_property('Tags', kwargs.get('tags')) 286 | 287 | 288 | class LoadBalancer(core.Resource): 289 | # ---------------------------------------------------------------------------------------------------------- 290 | # LoadBalancer 291 | # ---------------------------------------------------------------------------------------------------------- 292 | def __init__(self, name, **kwargs): 293 | super(LoadBalancer, self).__init__(name, 'AWS::ElasticLoadBalancing::LoadBalancer') 294 | 295 | env = kwargs.get('env') 296 | if env is None: 297 | raise RespawnResourceError("env parameter is required for properly tagging an ELB", 'Environment') 298 | 299 | availibility_zone = kwargs.get("availability_zones") 300 | 301 | subnets = kwargs.get("subnets") 302 | if availibility_zone is not None and subnets is not None: 303 | raise RespawnResourceError("You can specify the AvailabilityZones or Subnets property in the load " 304 | "balancer, but not both.", 'AvailabilityZone/Subnets', subnets) 305 | elif availibility_zone is not None and subnets is None: 306 | kwargs['availibility_zones'] = recurse_kwargs_list('availibility_zones', AvailabilityZones, **kwargs) 307 | else: 308 | # handle subnets here 309 | pass 310 | 311 | kwargs['tags'] = recurse_kwargs_list('tags', Tags, **kwargs) 312 | kwargs['app_cookie_stickiness_policy'] = recurse_kwargs_list('app_cookie_stickiness_policy', 313 | AppCookieStickinessPolicy, **kwargs) 314 | kwargs['lb_cookie_stickiness_policy'] = recurse_kwargs_list('lb_cookie_stickiness_policy', 315 | LBCookieStickinessPolicy, **kwargs) 316 | kwargs['policies'] = recurse_kwargs_list('policies', Policies, **kwargs) 317 | self.Properties = LoadBalancerProperties(**kwargs) 318 | 319 | 320 | def transform_attribute(attribute_list): 321 | """ 322 | Transforms tag attributes. 323 | 324 | kwargs 325 | - name: String 326 | - value: String 327 | """ 328 | # ---------------------------------------------------------------------------------------------------------- 329 | # Transform attribute from lower case to upper case. 330 | # ---------------------------------------------------------------------------------------------------------- 331 | updated_attribute_list = [] 332 | for attribute_parameters in attribute_list: 333 | updated_attribute_list.append( 334 | {'Name': attribute_parameters.get('name'), 335 | 'Value': attribute_parameters.get('value')}) 336 | return updated_attribute_list 337 | 338 | 339 | def recurse_kwargs_list(parameter_name, class_name, **kwargs): 340 | """ 341 | Recurses through a list of kwargs. 342 | """ 343 | if parameter_name in kwargs: 344 | parameter_list = kwargs.get(parameter_name) 345 | param_list = [] 346 | for parameter in parameter_list: 347 | param_list.append(class_name(**parameter)) 348 | return param_list 349 | else: 350 | pass 351 | -------------------------------------------------------------------------------- /respawn/errors.py: -------------------------------------------------------------------------------- 1 | class RespawnResourceError(Exception): 2 | def __init__(self, error_string, resource_type, resource_name=""): 3 | super(RespawnResourceError, self).__init__(resource_type + ": " + resource_name + ": " + error_string) -------------------------------------------------------------------------------- /respawn/gen.py: -------------------------------------------------------------------------------- 1 | from __future__ import print_function 2 | from cfn_pyplates import functions 3 | import respawn 4 | import sys 5 | 6 | 7 | def standardize_refs(d): 8 | """ 9 | Recursively transform all ref's and get_att's in dictionary to CloudFormation references. 10 | """ 11 | for k, v in d.iteritems(): 12 | if isinstance(v, dict): 13 | standardize_refs(v) 14 | elif isinstance(v, list): 15 | for i in range(len(v)): 16 | if isinstance(v[i], dict): 17 | standardize_refs(v[i]) 18 | elif isinstance(v[i], str): 19 | v[i] = transform_reference(v[i]) 20 | elif isinstance(v, str): 21 | d[k] = transform_reference(v) 22 | 23 | 24 | def transform_reference(v): 25 | """ 26 | Transform ref and ref_att in dictionary to CloudFormation ref or get_att 27 | """ 28 | if v.startswith('ref('): 29 | v = v[len('ref('):-1].strip() 30 | v = functions.ref(v) 31 | elif v.startswith('get_att('): 32 | v = [s.strip() for s in v[len('get_att('):-1].split(',')] 33 | v = functions.get_att(v[0], v[1]) 34 | return v 35 | 36 | 37 | # Initialize dictionary for resources 38 | resources = dict() 39 | 40 | # Build base template utilizing library 41 | cft = respawn.cloudformation.Template(**options) 42 | 43 | # Standardize all references 44 | standardize_refs(options) 45 | 46 | 47 | # ---------------------------------------------------------------------------------------------------------- 48 | # Load Balancers 49 | # ---------------------------------------------------------------------------------------------------------- 50 | try: 51 | if 'load_balancers' in options: 52 | for name, lb_opts in options['load_balancers'].items(): 53 | lb = cft.add_load_balancer(name, **lb_opts) 54 | resources[name] = lb 55 | except Exception as e: 56 | raise RuntimeError("Required arguments missing from Load Balancer: {0}: Exception: {1}".format(name, e)) 57 | 58 | 59 | # ---------------------------------------------------------------------------------------------------------- 60 | # Instances 61 | # ---------------------------------------------------------------------------------------------------------- 62 | try: 63 | if 'instances' in options: 64 | for name, instance_opts in options['instances'].items(): 65 | resources[name] = cft.add_instance(name, **instance_opts) 66 | except Exception as e: 67 | raise RuntimeError("Required arguments missing from Instance. Exception: {0}: Exception: {1}".format(name, e)) 68 | 69 | 70 | # ---------------------------------------------------------------------------------------------------------- 71 | # Volumes 72 | # ---------------------------------------------------------------------------------------------------------- 73 | try: 74 | if 'volumes' in options: 75 | volumes = options['volumes'] 76 | for name, volume_opts in volumes.items(): 77 | resources[name] = cft.add_volume(name, **volume_opts) 78 | except Exception as e: 79 | raise RuntimeError("Required arguments missing from Volume. Exception: {0}: Exception: {1}".format(name, e)) 80 | 81 | 82 | # ---------------------------------------------------------------------------------------------------------- 83 | # Auto-scaling Groups 84 | # ---------------------------------------------------------------------------------------------------------- 85 | try: 86 | if 'auto_scale_groups' in options: 87 | auto_scale_groups = options['auto_scale_groups'] 88 | for name, asg_opts in auto_scale_groups.items(): 89 | resources[name] = cft.add_autoscaling_group(name, **asg_opts) 90 | except Exception as e: 91 | raise RuntimeError("Required arguments missing from Autoscaling Group: {0}: Exception: {1}".format(name, e)) 92 | 93 | 94 | # ---------------------------------------------------------------------------------------------------------- 95 | # Launch Configurations 96 | # ---------------------------------------------------------------------------------------------------------- 97 | try: 98 | if 'launch_configurations' in options: 99 | launch_configurations = options['launch_configurations'] 100 | for name, lc_opts in launch_configurations.items(): 101 | resources[name] = cft.add_launch_config(name, **lc_opts) 102 | except Exception as e: 103 | raise RuntimeError("Required arguments missing from Launch Configuration: {0}: Exception: {1}".format(name, e)) 104 | 105 | 106 | # ---------------------------------------------------------------------------------------------------------- 107 | # Scheduled Actions 108 | # ---------------------------------------------------------------------------------------------------------- 109 | try: 110 | if 'scheduled_actions' in options: 111 | scheduled_actions = options['scheduled_actions'] 112 | for name, sa_opts in scheduled_actions.items(): 113 | resources[name] = cft.add_scheduled_action(name, **sa_opts) 114 | except Exception as e: 115 | raise RuntimeError("Required arguments missing from Scheduled Action: {0}: Exception: {1}".format(name, e)) 116 | 117 | 118 | # ---------------------------------------------------------------------------------------------------------- 119 | # Lifecycle Hooks 120 | # ---------------------------------------------------------------------------------------------------------- 121 | try: 122 | if 'lifecycle_hooks' in options: 123 | lifecycle_hooks = options['lifecycle_hooks'] 124 | for name, lh_opts in lifecycle_hooks.items(): 125 | resources[name] = cft.add_lifecycle_hook(name, **lh_opts) 126 | except Exception as e: 127 | raise RuntimeError("Required arguments missing from Lifecycle Hook: {0}: Exception: {1}".format(name, e)) 128 | 129 | 130 | # ---------------------------------------------------------------------------------------------------------- 131 | # RDS 132 | # ---------------------------------------------------------------------------------------------------------- 133 | try: 134 | if 'rds' in options: 135 | rds = options['rds'] 136 | for name, rds_opts in rds.items(): 137 | resources[name] = cft.add_rds_instance(name, **rds_opts) 138 | except Exception as e: 139 | raise RuntimeError("Required arguments missing from RDS: {0}: Exception: {1}".format(name, e)) 140 | 141 | 142 | # ---------------------------------------------------------------------------------------------------------- 143 | # CloudWatch 144 | # ---------------------------------------------------------------------------------------------------------- 145 | try: 146 | if 'cloud_watch' in options: 147 | for name, cloud_watch_opts in options['cloud_watch'].items(): 148 | resources[name] = cft.add_cloud_watch_alarm(name, **cloud_watch_opts) 149 | except Exception as e: 150 | raise RuntimeError("Required arguments missing from Cloud Watch: {0}: Exception: {1}".format(name, e)) 151 | 152 | 153 | # ---------------------------------------------------------------------------------------------------------- 154 | # Network Interfaces 155 | # ---------------------------------------------------------------------------------------------------------- 156 | try: 157 | if 'network_interfaces' in options: 158 | for name, network_interface_opts in options['network_interfaces'].items(): 159 | resources[name] = cft.add_network_interface(name, **network_interface_opts) 160 | except Exception as e: 161 | raise RuntimeError("Required arguments missing from Network Interface: {0}: Exception: {1}".format(name, e)) 162 | 163 | 164 | # ---------------------------------------------------------------------------------------------------------- 165 | # Network Interface Attachments 166 | # ---------------------------------------------------------------------------------------------------------- 167 | try: 168 | if 'network_interface_attachments' in options: 169 | for name, nia_opts in options['network_interface_attachments'].items(): 170 | resources[name] = cft.add_network_interface_attachment(name, **nia_opts) 171 | except Exception as e: 172 | raise RuntimeError("Required arguments missing from Network Interface Attachment: {0}: Exception: {1}".format(name, e)) 173 | 174 | # ---------------------------------------------------------------------------------------------------------- 175 | # Security Groups 176 | # ---------------------------------------------------------------------------------------------------------- 177 | try: 178 | if 'security_group' in options: 179 | for name, sg_opts in options['security_group'].items(): 180 | resources[name] = cft.add_security_group(name, **sg_opts) 181 | except Exception as e: 182 | raise RuntimeError("Required arguments missing from Security Group: {0}: Exception: {1}".format(name, e)) 183 | 184 | 185 | # ---------------------------------------------------------------------------------------------------------- 186 | # Parameters 187 | # ---------------------------------------------------------------------------------------------------------- 188 | try: 189 | if 'parameters' in options: 190 | for name, parameter_opts in options['parameters'].items(): 191 | cft.add_parameter(name, **parameter_opts) 192 | except Exception as e: 193 | raise RuntimeError("Required arguments missing from Parameters") 194 | 195 | 196 | # ---------------------------------------------------------------------------------------------------------- 197 | # SNS Topics 198 | # ---------------------------------------------------------------------------------------------------------- 199 | try: 200 | if 'sns_topics' in options: 201 | for name, sns_opts in options['sns_topics'].items(): 202 | resources[name] = cft.add_sns_topic(name, **sns_opts) 203 | except Exception as e: 204 | raise RuntimeError("Required arguments missing from SNS Topic: {0}: Exception: {1}".format(name, e)) 205 | 206 | 207 | # ---------------------------------------------------------------------------------------------------------- 208 | # Route53 Record 209 | # ---------------------------------------------------------------------------------------------------------- 210 | try: 211 | if 'record_set' in options: 212 | for name, values in options['record_set'].items(): 213 | resources[name] = cft.add_route53_record_set(name, **values) 214 | except Exception as e: 215 | raise RuntimeError("Required arguments missing from SNS Topic. Exception: ", e) 216 | -------------------------------------------------------------------------------- /respawn/parameters.py: -------------------------------------------------------------------------------- 1 | from cfn_pyplates import core 2 | 3 | 4 | class CustomParameters(core.Parameter): 5 | def __init__( 6 | self, 7 | name, 8 | **kwargs 9 | ): 10 | parameters = {} 11 | if "default" in kwargs: 12 | parameters['Default'] = kwargs.get("default") 13 | if "type" in kwargs: 14 | parameters['Type'] = kwargs.get("type") 15 | if "allowed_values" in kwargs: 16 | parameters['AllowedValues'] = kwargs.get("allowed_values") 17 | if "description" in kwargs: 18 | parameters['Description'] = kwargs.get("description") 19 | if "no_echo" in kwargs: 20 | parameters['NoEcho'] = kwargs.get("no_echo") 21 | if "allowed_pattern" in kwargs and kwargs["type"] is "String": 22 | parameters['AllowedPattern'] = kwargs.get("allowed_pattern") 23 | if "max_length" in kwargs and kwargs["type"] is "String": 24 | parameters['MaxLength'] = kwargs.get("max_length") 25 | if "min_length" in kwargs and kwargs["type"] is "String": 26 | parameters['MinLength'] = kwargs.get("min_length") 27 | if "max_value" in kwargs and kwargs["type"] is "Number": 28 | parameters['MaxValue'] = kwargs.get("max_value") 29 | if "min_value" in kwargs and kwargs["type"] is "Number": 30 | parameters['MinValue'] = kwargs.get("min_value") 31 | if "constraint_description" in kwargs: 32 | parameters['ConstraintDescription'] = kwargs.get("constraint_description") 33 | 34 | super(CustomParameters, self).__init__(name, parameters['Type'], parameters) 35 | -------------------------------------------------------------------------------- /respawn/rds.py: -------------------------------------------------------------------------------- 1 | from cfn_pyplates import core 2 | from errors import RespawnResourceError 3 | 4 | 5 | class Tag(core.JSONableDict): 6 | """ 7 | Creates RDS Tag 8 | 9 | :param key: String 10 | :param value: String 11 | """ 12 | # ---------------------------------------------------------------------------------------------------------- 13 | # Tag 14 | # ---------------------------------------------------------------------------------------------------------- 15 | def __init__(self, 16 | key, 17 | value 18 | ): 19 | super(Tag, self).__init__() 20 | self['Key'] = key 21 | self['Value'] = value 22 | 23 | 24 | class DBInstance(core.Resource): 25 | """ 26 | Creates a Database Instance 27 | 28 | :param name: String 29 | :param allocated_storage: String 30 | :param instance_class: String 31 | 32 | kwargs 33 | - allow_major_version_upgrade: Boolean 34 | - auto_minor_version_upgrade: Boolean 35 | - availability_zone: String 36 | - backup_retention_period: String 37 | - character_set_name: String 38 | - instance_identifier: String 39 | - db_name: String 40 | - db_parameter_group_name: String 41 | - db_security_groups: [ String, ... ] 42 | - snapshot_identifier: String 43 | - subnet_group_name: String 44 | - engine: String 45 | - engine_version: String 46 | - iops: Number 47 | - kms_key_id: String 48 | - license_model: String 49 | - master_username: String 50 | - master_password: String 51 | - multi_az: Boolean 52 | - option_group_name: String 53 | - port: String 54 | - preferred_backup_window: String 55 | - preferred_maintenance_window: String 56 | - publicly_accessible: Boolean 57 | - source_db_instance_identifier: String 58 | - storage_encrypted: Boolean 59 | - storage_type: String 60 | - tags: [ Tag, ... ] 61 | - vpc_security_groups: [ String, ... ] 62 | """ 63 | # ---------------------------------------------------------------------------------------------------------- 64 | # DB Instance Properties 65 | # ---------------------------------------------------------------------------------------------------------- 66 | def __init__( 67 | self, 68 | name, 69 | allocated_storage, 70 | instance_class, 71 | **kwargs 72 | ): 73 | if "snapshot_identifier" not in kwargs and "engine" not in kwargs: 74 | raise RespawnResourceError("Engine (engine) required if Snapshot Identifier (snapshot_identifier) not " 75 | "specified.", "DBSnapshotIdentifier/Engine") 76 | 77 | if kwargs.get("storage_type") == "io1" and "iops" not in kwargs: 78 | raise RespawnResourceError("Iops (iops) required for Storage Type (storage_type) io1.", "Iops") 79 | 80 | attributes = kwargs.get("attributes", dict()) 81 | 82 | properties = { 83 | 'AllocatedStorage': allocated_storage, 84 | 'DBInstanceClass': instance_class 85 | } 86 | 87 | if 'tags' in kwargs: 88 | t = kwargs.get('tags') 89 | tags = [] 90 | for tag in t: 91 | tags.append(Tag(**tag)) 92 | properties['Tags'] = tags 93 | 94 | if "allow_major_version_upgrade" in kwargs: 95 | properties['AllowMajorVersionUpgrade'] = kwargs.get("allow_major_version_upgrade") 96 | if "auto_minor_version_upgrade" in kwargs: 97 | properties['AutoMinorVersionUpgrade'] = kwargs.get("auto_minor_version_upgrade") 98 | if "availability_zone" in kwargs: 99 | properties['AvailabilityZone'] = kwargs.get("availability_zone") 100 | if "backup_retention_period" in kwargs: 101 | properties['BackupRetentionPeriod'] = kwargs.get("backup_retention_period") 102 | if "character_set_name" in kwargs: 103 | properties['CharacterSetName'] = kwargs.get("character_set_name") 104 | if "instance_identifier" in kwargs: 105 | properties['DBInstanceIdentifier'] = kwargs.get("instance_identifier") 106 | if "db_name" in kwargs: 107 | properties['DBName'] = kwargs.get("db_name") 108 | if "db_parameter_group_name" in kwargs: 109 | properties['DBParameterGroupName'] = kwargs.get("db_parameter_group_name") 110 | if "db_security_groups" in kwargs: 111 | properties['DBSecurityGroups'] = kwargs.get("db_security_groups") 112 | if "snapshot_identifier" in kwargs: 113 | properties['DBSnapshotIdentifier'] = kwargs.get("snapshot_identifier") 114 | if "subnet_group_name" in kwargs: 115 | properties['DBSubnetGroupName'] = kwargs.get("subnet_group_name") 116 | if "engine" in kwargs: 117 | properties['Engine'] = kwargs.get("engine") 118 | if "engine_version" in kwargs: 119 | properties['EngineVersion'] = kwargs.get("engine_version") 120 | if "iops" in kwargs: 121 | properties['Iops'] = kwargs.get("iops") 122 | if "kms_key_id" in kwargs: 123 | properties['KmsKeyId'] = kwargs.get("kms_key_id") 124 | if "license_model" in kwargs: 125 | properties['LicenseModel'] = kwargs.get("license_model") 126 | if "master_username" in kwargs: 127 | properties['MasterUsername'] = kwargs.get("master_username") 128 | if "master_user_password" in kwargs: 129 | properties['MasterUserPassword'] = kwargs.get("master_user_password") 130 | if "multi_az" in kwargs: 131 | properties['MultiAZ'] = kwargs.get("multi_az") 132 | if "option_group_name" in kwargs: 133 | properties['OptionGroupName'] = kwargs.get("option_group_name") 134 | if "port" in kwargs: 135 | properties['Port'] = kwargs.get("port") 136 | if "preferred_backup_window" in kwargs: 137 | properties['PreferredBackupWindow'] = kwargs.get("preferred_backup_window") 138 | if "preferred_maintenance_window" in kwargs: 139 | properties['PreferredMaintenanceWindow'] = kwargs.get("preferred_maintenance_window") 140 | if "publicly_accessible" in kwargs: 141 | properties['PubliclyAccessible'] = kwargs.get("publicly_accessible") 142 | if "source_db_instance_identifier" in kwargs: 143 | properties['SourceDBInstanceIdentifier'] = kwargs.get("source_db_instance_identifier") 144 | if "storage_encrypted" in kwargs: 145 | properties['StorageEncrypted'] = kwargs.get("storage_encrypted") 146 | if "storage_type" in kwargs: 147 | properties['StorageType'] = kwargs.get("storage_type") 148 | if "vpc_security_groups" in kwargs: 149 | properties['VPCSecurityGroups'] = kwargs.get("vpc_security_groups") 150 | 151 | super(DBInstance, self).__init__(name, 'AWS::RDS::DBInstance', properties, attributes) 152 | -------------------------------------------------------------------------------- /respawn/route53.py: -------------------------------------------------------------------------------- 1 | from cfn_pyplates import core 2 | from errors import RespawnResourceError 3 | 4 | 5 | class AliasTarget(core.JSONableDict): 6 | """ 7 | Creates an Alias Target 8 | :param dns_name: String 9 | :param hosted_zone_id: String 10 | kwargs 11 | - evaluate_target_health: Boolean 12 | """ 13 | def __init__( 14 | self, 15 | dns_name, 16 | hosted_zone_id, 17 | **kwargs 18 | ): 19 | super(AliasTarget, self).__init__() 20 | 21 | self['DNSName'] = dns_name 22 | self['HostedZoneId'] = hosted_zone_id 23 | if 'evaluate_target_health' in kwargs: 24 | self['evaluate_target_health'] = kwargs.get('evaluate_target_health') 25 | 26 | 27 | class GeoLocation(core.JSONableDict): 28 | """ 29 | Creates a GeoLocation 30 | kwargs 31 | - continent_code: String 32 | - country_code: String 33 | - subdivision_code: String 34 | """ 35 | def __init__( 36 | self, 37 | **kwargs 38 | ): 39 | super(GeoLocation, self).__init__() 40 | 41 | if 'continent_code' in kwargs: 42 | self['ContinentCode'] = kwargs.get('continent_code') 43 | if 'country_code' in kwargs: 44 | self['CountryCode'] = kwargs.get('country_code') 45 | if 'subdivision_code' in kwargs: 46 | self['SubdivisionCode'] = kwargs.get('subdivision_code') 47 | 48 | 49 | class RecordSet(core.Resource): 50 | """ 51 | Creates a Route53 RecordSet 52 | :param name: String 53 | :param domain_name: String 54 | kwargs 55 | - alias_target: AliasTarget 56 | - failover: String 57 | - geolocation: [ GeoLocation, ... ] 58 | - health_check_id: String 59 | - hosted_zone_id: String 60 | - hosted_zone_name: String 61 | - region: String, 62 | - resource_records: [ String, ...] 63 | - set_identifier: String 64 | - ttl: String 65 | - type: String 66 | - weight: String 67 | """ 68 | 69 | def __init__( 70 | self, 71 | name, 72 | domain_name, 73 | **kwargs 74 | ): 75 | if "alias_target" in kwargs and "ttl" not in kwargs: 76 | raise RespawnResourceError("TTL (ttl) cannot be specified with AliasTarget (alias_target).", name) 77 | 78 | if "hosted_zone_id" in kwargs and "hosted_zone_name" in kwargs: 79 | raise RespawnResourceError("Only one of HostedZoneId (hosted_zone_id) or HostedZoneName (hosted_zone_name) " 80 | "can be specified.", name) 81 | 82 | attributes = kwargs.get("attributes", dict()) 83 | 84 | properties = { 85 | 'Name': domain_name, 86 | } 87 | 88 | if "alias_target" in kwargs: 89 | properties['AliasTarget'] = AliasTarget(**kwargs.get("alias_target")) 90 | if "failover" in kwargs: 91 | properties['Failover'] = kwargs.get("failover") 92 | if "geolocation" in kwargs: 93 | geolocations = [GeoLocation(**geolocation) for geolocation in kwargs.get("geolocation")] 94 | properties['GeoLocation'] = geolocations 95 | if "health_check_id" in kwargs: 96 | properties['HealthCheckId'] = kwargs.get("health_check_id") 97 | if "hosted_zone_id" in kwargs: 98 | properties['HostedZoneId'] = kwargs.get("hosted_zone_id") 99 | if "hosted_zone_name" in kwargs: 100 | properties['HostedZoneName'] = kwargs.get("hosted_zone_name") 101 | if "region" in kwargs: 102 | properties['Region'] = kwargs.get("region") 103 | if "resource_records" in kwargs: 104 | properties['ResourceRecords'] = kwargs.get("resource_records") 105 | if "set_identifier" in kwargs: 106 | properties['SetIdentifier'] = kwargs.get("set_identifier") 107 | if "ttl" in kwargs: 108 | properties['TTL'] = kwargs.get("ttl") 109 | if "type" in kwargs: 110 | properties['Type'] = kwargs.get("type") # A | AAAA | CNAME | MX | NS | PTR | SOA | SPF | SRV | TXT 111 | if "weight" in kwargs: 112 | properties['Weight'] = kwargs.get("region") 113 | 114 | super(RecordSet, self).__init__(name, 'AWS::Route53::RecordSet', properties, attributes) 115 | -------------------------------------------------------------------------------- /respawn/sns.py: -------------------------------------------------------------------------------- 1 | from cfn_pyplates import core 2 | from respawn import util 3 | 4 | 5 | class Subscription(util.SetNonEmptyPropertyMixin, core.JSONableDict): 6 | """ 7 | Subscription is an embedded property of the AWS::SNS::Topic resource that describes the 8 | subscription endpoints for a topic. 9 | 10 | :param endpoint: String, 11 | :param protocol: String 12 | """ 13 | # ---------------------------------------------------------------------------------------------------------- 14 | # Subscription 15 | # ---------------------------------------------------------------------------------------------------------- 16 | def __init__(self, **kwargs): 17 | super(Subscription, self).__init__(None, 'Subscription') 18 | self._set_property('Endpoint', kwargs.get('endpoint')) 19 | self._set_property('Protocol', kwargs.get('protocol')) 20 | 21 | 22 | class SnsTopicProperties(util.SetNonEmptyPropertyMixin, core.JSONableDict): 23 | """ 24 | keyword arguments available. 25 | 26 | kwargs 27 | - display_name : String, 28 | - subscription : [ SNS Subscription, ... ] 29 | - topic_name : String 30 | """ 31 | # ---------------------------------------------------------------------------------------------------------- 32 | # SNS Topic Core Properties 33 | # ---------------------------------------------------------------------------------------------------------- 34 | def __init__(self, **kwargs): 35 | super(SnsTopicProperties, self).__init__(None, 'Properties') 36 | 37 | ''' Available keyword arguments ''' 38 | 39 | # DisplayName : A developer-defined string that can be used to identify this SNS topic. 40 | self._set_property('DisplayName', kwargs.get('display_name')) 41 | 42 | # Subscription : The SNS subscriptions (endpoints) for this topic. 43 | self._set_property('Subscription', kwargs.get('subscription')) 44 | 45 | # TopicName : A name for the topic. If you don't specify a name, 46 | # AWS CloudFormation generates a unique physical ID and uses that ID for the topic name. 47 | self._set_property('TopicName', kwargs.get('topic_name')) 48 | 49 | 50 | class SnsTopic(core.Resource): 51 | """ 52 | Creates an Amazon SNS topic. 53 | """ 54 | # ---------------------------------------------------------------------------------------------------------- 55 | # SNS Topic Creation 56 | # ---------------------------------------------------------------------------------------------------------- 57 | def __init__(self, 58 | name, 59 | **kwargs 60 | ): 61 | super(SnsTopic, self).__init__(name, 'AWS::SNS::Topic') 62 | kwargs['subscription'] = recurse_kwargs_list('subscription', Subscription, **kwargs) 63 | self.Properties = SnsTopicProperties(**kwargs) 64 | 65 | 66 | def recurse_kwargs_list(parameter_name, class_name, **kwargs): 67 | """ 68 | Recurses through a list of kwargs. 69 | """ 70 | if parameter_name in kwargs: 71 | parameter_list = kwargs.get(parameter_name) 72 | param_list = [] 73 | for parameter in parameter_list: 74 | param_list.append(class_name(**parameter)) 75 | return param_list 76 | else: 77 | pass 78 | -------------------------------------------------------------------------------- /respawn/test/README.rst: -------------------------------------------------------------------------------- 1 | respawn tests 2 | ============ 3 | 4 | Test scripts are named ``test_xxx.py`` and use the `py.test `_ module. 5 | 6 | Dependencies 7 | ----------- 8 | 9 | Install:: 10 | 11 | pip install pytest coverage pytest-cov 12 | 13 | 14 | Execution 15 | --------- 16 | 17 | **If respawn has been built in-place** 18 | 19 | To run an individual test:: 20 | 21 | python respawn/test_image.py 22 | 23 | Run all the tests from the root of the respawn source distribution:: 24 | 25 | py.test --cov=respawn/ 26 | 27 | Or with coverage:: 28 | 29 | py.test --cov=respawn/ --cov-report term 30 | -------------------------------------------------------------------------------- /respawn/test/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/dowjones/respawn/0cb5092de0cb9f6f3db6edc6c861862ff0552e37/respawn/test/__init__.py -------------------------------------------------------------------------------- /respawn/test/test_autoscaling.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | from respawn import autoscaling 3 | 4 | 5 | def test_metrics_collection(): 6 | # Successful creation of metrics collection 7 | metrics_collection = autoscaling.MetricsCollection("sample-gran", **{"metrics": "sample-metrics"}) 8 | assert metrics_collection == {"Granularity": "sample-gran", "Metrics": "sample-metrics"} 9 | 10 | # Extra arguments 11 | with pytest.raises(TypeError): 12 | autoscaling.MetricsCollection("sample-gran", "Value1", True) 13 | 14 | 15 | def test_notification_configurations(): 16 | # Successful creation of notification configuration 17 | notification_configuration = autoscaling.NotificationConfigurations("sampleNotificationTypes", "sampleTopicArn") 18 | assert notification_configuration == ({"NotificationTypes": "sampleNotificationTypes", "TopicARN": 19 | "sampleTopicArn"}) 20 | 21 | 22 | def test_lifecycle_hooks(): 23 | # Successful creation of hooks 24 | lifecycle_hooks = autoscaling.LifecycleHook("sampleName", "sampleASG", "sampleLifeCycle", "sampleNotTarget", 25 | "sampleRoleARN", **{"default_result": "sampleDefaultResult", 26 | "heartbeat_timeout": "sampleHeartBeat", 27 | "notification_metadata": "sampleNotificationMet"}) 28 | assert lifecycle_hooks['Properties'] == { 29 | 'AutoScalingGroupName': "sampleASG", 30 | 'LifecycleTransition': "sampleLifeCycle", 31 | 'NotificationTargetARN': "sampleNotTarget", 32 | 'RoleARN': "sampleRoleARN", 33 | 'DefaultResult': "sampleDefaultResult", 34 | "HeartbeatTimeout": "sampleHeartBeat", 35 | "NotificationMetadata": "sampleNotificationMet" 36 | } 37 | 38 | 39 | def test_auto_scaling_policy(): 40 | # successful auto scaling policy creation 41 | autoscaling_policy = autoscaling.AutoScalingGroup("name", "max", "min", **{"metrics_collection": [{'granularity': 42 | '1Minute'}, 43 | {'metrics': [ 44 | 'Metric1', 45 | 'Metric2'], 46 | 'granularity': '1Minute'}], 47 | "notification_configs": [{ 48 | 'notification_type': [ 49 | 'Type1', 50 | 'Type2'], 51 | 'topic_arn': 'arn:aws:[service]:[' 52 | 'region]:[' 53 | 'account]:resourceType/resourcePath'}], 54 | "cooldown": "sampleCooldown", 55 | "health_check_grace_period": 56 | "sampleCheckGracePeriod", 57 | "health_check_type": 58 | "sampleHealthCheck", 59 | "instance_id": "sampleInstanceId", 60 | "placement_group": 61 | "samplePlacementGroup", 62 | "termination_policies": 63 | "sampleTerminationPolicy", 64 | "vpc_zone_identifier": "sampleVPC"}) 65 | assert autoscaling_policy['Properties'] == { 66 | "VPCZoneIdentifier": "sampleVPC", 67 | "PlacementGroup": "samplePlacementGroup", 68 | "NotificationConfigurations": [ 69 | { 70 | "NotificationTypes": [ 71 | "Type1", 72 | "Type2" 73 | ], 74 | "TopicARN": "arn:aws:[service]:[region]:[account]:resourceType/resourcePath" 75 | } 76 | ], 77 | "InstanceId": "sampleInstanceId", 78 | "MinSize": "min", 79 | "MaxSize": "max", 80 | "Cooldown": "sampleCooldown", 81 | "TerminationPolicies": "sampleTerminationPolicy", 82 | "MetricsCollection": [ 83 | { 84 | "Granularity": "1Minute" 85 | }, 86 | { 87 | "Granularity": "1Minute", 88 | "Metrics": [ 89 | "Metric1", 90 | "Metric2" 91 | ] 92 | } 93 | ], 94 | "HealthCheckGracePeriod": "sampleCheckGracePeriod", 95 | "HealthCheckType": "sampleHealthCheck" 96 | } 97 | 98 | 99 | def test_scaling_policy(): 100 | # testing scaling policy 101 | scaling_policy = autoscaling.ScalingPolicy("name", "sampleAdjustT", "sampleASG", "sampleScaling", 102 | **{"cooldown": "sampleCool", "min_adjustment_step": "MinAdjStep"}) 103 | assert scaling_policy['Properties'] == { 104 | 'AdjustmentType': "sampleAdjustT", 105 | 'AutoScalingGroupName': "sampleASG", 106 | 'ScalingAdjustment': "sampleScaling", 107 | 'Cooldown': "sampleCool", 108 | 'MinAdjustmentStep': "MinAdjStep" 109 | } 110 | 111 | 112 | def test_launch_configuration(): 113 | # testing launch configuration 114 | launch_config = autoscaling.LaunchConfiguration("name", "sampleAMI", "sampleInstance", **{"classic_link_vpc_id": 115 | "sample_classic_link_vpc_id", 116 | "classic_link_vpc_security_groups": "sample_classicGroups", 117 | "instance_id": "sample_instance_id", 118 | "monitoring": 119 | "sample_monitoring", 120 | "kernel_id": 121 | "sample_kernel_id", 122 | "placement_tenancy": 123 | "sample_placementTenancy", 124 | "private_ip": "samplePIP", 125 | "ramdisk_id": "sampleRamdisk", 126 | "spot_price": 127 | "SampleSpotPrice", 128 | "user_data_script": "sample_user_data_script"}) 129 | assert launch_config['Properties'] == { 130 | 'ImageId': "sampleAMI", 131 | 'InstanceType': "sampleInstance", 132 | 'ClassicLinkVPCId': "sample_classic_link_vpc_id", 133 | 'ClassicLinkVPCSecurityGroups': "sample_classicGroups", 134 | 'InstanceId': "sample_instance_id", 135 | 'InstanceMonitoring': "sample_monitoring", 136 | 'KernelId': "sample_kernel_id", 137 | 'PlacementTenancy': "sample_placementTenancy", 138 | 'PlacementGroupName': "samplePIP", 139 | 'RamdiskId': "sampleRamdisk", 140 | 'SpotPrice': "SampleSpotPrice", 141 | 'UserData': {'Fn::Base64': 'sample_user_data_script'}} 142 | -------------------------------------------------------------------------------- /respawn/test/test_cloudwatch.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | from respawn import cloudwatch 3 | 4 | 5 | class TestCloudwatch(unittest.TestCase): 6 | def test_transform_attribute(self): 7 | sample_attribute = [{'name': 'x', 'value': 'y'}, {'name': 'xx', 'value': 'yy'}] 8 | v = cloudwatch.transform_attribute(sample_attribute) 9 | assert v == [{'Name': 'x', 'Value': 'y'}, {'Name': 'xx', 'Value': 'yy'}] 10 | 11 | def test_cloudwatchProperties(self): 12 | sample_kwargs = {'alarm_actions': ['String1', 'String2'], 'ok_actions': ['String', 'String2'], 13 | 'alarm_description': 'sample description', 'namespace': 'sampleNamespace', 14 | 'alarm_name': 'test_alarm', 'actions_enabled': True, 15 | 'insufficient_data_actions': ['String', 'String2'], 'unit': 'String'} 16 | v = cloudwatch.CloudWatchProperties(10, 10, 10, "sample", 10,10, **sample_kwargs) 17 | assert v == { 18 | "ActionsEnabled": True, 19 | "AlarmActions": [ 20 | "String1", 21 | "String2" 22 | ], 23 | "AlarmDescription": "sample description", 24 | "AlarmName": "test_alarm", 25 | "InsufficientDataActions": [ 26 | "String", 27 | "String2" 28 | ], 29 | "Namespace": "sampleNamespace", 30 | "OKActions": [ 31 | "String", 32 | "String2" 33 | ], 34 | "Unit": "String", 35 | 'ComparisonOperator': 10, 36 | 'EvaluationPeriods': 10, 37 | 'MetricName': 'sample', 38 | 'Period': 10, 39 | 'Statistic': 10, 40 | 'Threshold': 10 41 | } 42 | -------------------------------------------------------------------------------- /respawn/test/test_ec2.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | from respawn import ec2, errors 3 | 4 | 5 | def test_tag(): 6 | # Successful creation of tag 7 | tag = ec2.Tag("Key1", "Value1") 8 | assert tag == {"Key": "Key1", "Value": "Value1"} 9 | 10 | # Extra arguments 11 | with pytest.raises(TypeError): 12 | ec2.Tag("Key1", "Value1", True) 13 | 14 | 15 | def test_block_device(): 16 | # Successful creation of block device 17 | block_device = ec2.BlockDevice(snapshot_id="snap-xxxxxxxx", size=100, 18 | volume_type="io1", iops=1000, delete_on_termination=True) 19 | assert block_device == { 20 | "SnapshotId": "snap-xxxxxxxx", 21 | "VolumeSize": 100, 22 | "VolumeType": "io1", 23 | "Iops": 1000, 24 | "DeleteOnTermination": True} 25 | 26 | # No snapshot_id or size 27 | with pytest.raises(errors.RespawnResourceError): 28 | ec2.BlockDevice(volume_type="io1", iops=1000, delete_on_termination=True) 29 | 30 | # No iops with volume_type io1 31 | with pytest.raises(errors.RespawnResourceError): 32 | ec2.BlockDevice(snapshot_id="test_snapshot", size=100, 33 | volume_type="io1", delete_on_termination=True) 34 | 35 | 36 | def test_block_device_mapping(): 37 | # Ebs block device mapping 38 | block_device_mapping = ec2.BlockDeviceMapping("/dev/sda", ebs=ec2.BlockDevice(**{"snapshot_id": "snap-xxxxxxxx"})) 39 | assert block_device_mapping == {"DeviceName": "/dev/sda", "Ebs": {"SnapshotId": "snap-xxxxxxxx"}} 40 | 41 | # Virtual name block device mapping 42 | block_device_mapping = ec2.BlockDeviceMapping("/dev/sda", virtual_name="ephemeral0") 43 | assert block_device_mapping == {"DeviceName": "/dev/sda", "VirtualName": "ephemeral0"} 44 | 45 | # No device block device mapping 46 | block_device_mapping = ec2.BlockDeviceMapping("/dev/sda", no_device=True) 47 | 48 | assert block_device_mapping == {"DeviceName": "/dev/sda", "NoDevice": {}} 49 | 50 | # Both ebs and virtual_name 51 | with pytest.raises(errors.RespawnResourceError): 52 | ec2.BlockDeviceMapping("/dev/sda", 53 | ebs=dict(ec2.BlockDevice(**{"snapshot_id": "snap-xxxxxxxx"})), 54 | virtual_name="ephemeral0") 55 | 56 | # No ebs, virtual_name, or no_device 57 | with pytest.raises(errors.RespawnResourceError): 58 | ec2.BlockDeviceMapping("/dev/sda") 59 | 60 | 61 | def test_private_ip_specification(): 62 | # Successful creation of private ip specification 63 | private_ip_specification = ec2.PrivateIpSpecification("1.1.1.1", True) 64 | assert private_ip_specification == {"PrivateIpAddress": "1.1.1.1", "Primary": True} 65 | 66 | # Extra arguments 67 | with pytest.raises(TypeError): 68 | ec2.PrivateIpSpecification("1.1.1.1", True, True) 69 | 70 | 71 | def test_embedded_network_interface(): 72 | # Successful creation of embedded network interface 73 | embedded_network_interface = ec2.EmbeddedNetworkInterface("1", public_ip=True, delete_on_termination=True, 74 | description="test_description", 75 | group_set=["test_group"], 76 | interface_id="id-interface-test", 77 | private_ip="1.1.1.1", 78 | private_ips=[ 79 | dict(ec2.PrivateIpSpecification("2.2.2.2", True))], 80 | secondary_private_ip_count=2, subnet_id="id-subnet-test") 81 | 82 | assert embedded_network_interface == { 83 | "DeviceIndex": "1", 84 | "AssociatePublicIpAddress": True, 85 | "DeleteOnTermination": True, 86 | "Description": "test_description", 87 | "GroupSet": ["test_group"], 88 | "NetworkInterfaceId": "id-interface-test", 89 | "PrivateIpAddress": "1.1.1.1", 90 | "PrivateIpAddresses": [{"PrivateIpAddress": "2.2.2.2", "Primary": True}], 91 | "SecondaryPrivateIpAddressCount": 2, 92 | "SubnetId": "id-subnet-test" 93 | } 94 | 95 | # No subnet_id or interface_id 96 | with pytest.raises(errors.RespawnResourceError): 97 | ec2.EmbeddedNetworkInterface("1", public_ip=True, delete_on_termination=True, 98 | description="test_description", 99 | group_set=["test_group"], 100 | private_ip="1.1.1.1", 101 | private_ips=[dict(ec2.PrivateIpSpecification("2.2.2.2", True))], 102 | secondary_private_ip_count=2) 103 | 104 | 105 | def test_mount_point(): 106 | # Successful mount point 107 | mount_point = ec2.MountPoint("/dev/sda", "id-volume-test") 108 | assert mount_point == {"Device": "/dev/sda", "VolumeId": "id-volume-test"} 109 | 110 | # Extra arguments 111 | with pytest.raises(TypeError): 112 | ec2.MountPoint("/dev/sda", "id-volume-test", True) 113 | 114 | 115 | def test_security_group_egress(): 116 | # Successful security group egress with cidr_ip 117 | security_group_egress = ec2.SecurityGroupEgress(from_port="80", ip_protocol="tcp", 118 | to_port="443", cidr_ip="10.0.0.0/8") 119 | assert dict(security_group_egress) == { 120 | "FromPort": "80", 121 | "IpProtocol": "tcp", 122 | "ToPort": "443", 123 | "CidrIp": "10.0.0.0/8"} 124 | 125 | # Successful security group egress with destination_security_group_id 126 | security_group_egress = ec2.SecurityGroupEgress(from_port="80", ip_protocol="tcp", 127 | to_port="443", destination_security_group_id="id-test") 128 | assert dict(security_group_egress) == { 129 | "FromPort": "80", 130 | "IpProtocol": "tcp", 131 | "ToPort": "443", 132 | "DestinationSecurityGroupId": "id-test"} 133 | 134 | # Both cidr_ip and destination_security_group_id 135 | with pytest.raises(errors.RespawnResourceError): 136 | ec2.SecurityGroupEgress(from_port="80", ip_protocol="tcp", to_port="443", 137 | cidr_ip="10.0.0.0/8", destination_security_group_id="id-test") 138 | 139 | 140 | def test_security_group_ingress(): 141 | # Successful security group ingress with cidr_ip 142 | security_group_ingress = ec2.SecurityGroupIngress(from_port="80", ip_protocol="tcp", to_port="443", 143 | cidr_ip="10.0.0.0/8") 144 | assert dict(security_group_ingress) == { 145 | "FromPort": "80", 146 | "IpProtocol": "tcp", 147 | "ToPort": "443", 148 | "CidrIp": "10.0.0.0/8"} 149 | 150 | # Successful security group ingress with source_security_group_name 151 | security_group_ingress = ec2.SecurityGroupIngress(from_port="80", ip_protocol="tcp", to_port="443", 152 | source_security_group_name="test-sg", 153 | source_security_group_owner_id="sg-owner-id-test") 154 | assert dict(security_group_ingress) == { 155 | "FromPort": "80", 156 | "IpProtocol": "tcp", 157 | "ToPort": "443", 158 | "SourceSecurityGroupName": "test-sg", 159 | "SourceSecurityGroupOwnerId": "sg-owner-id-test"} 160 | 161 | # Successful security group ingress with source_security_group_id 162 | security_group_ingress = ec2.SecurityGroupIngress(from_port="80", ip_protocol="tcp", to_port="443", 163 | source_security_group_id="sg-id-test") 164 | assert dict(security_group_ingress) == { 165 | "FromPort": "80", 166 | "IpProtocol": "tcp", 167 | "ToPort": "443", 168 | "SourceSecurityGroupId": "sg-id-test"} 169 | 170 | # Both cidr_ip and source_security_group_name 171 | with pytest.raises(errors.RespawnResourceError): 172 | ec2.SecurityGroupIngress(from_port="80", ip_protocol="tcp", to_port="443", cidr_ip="10.0.0.0/8", 173 | source_security_group_name="test-sg") 174 | 175 | # Both cidr_ip and source_security_group_id 176 | with pytest.raises(errors.RespawnResourceError): 177 | ec2.SecurityGroupIngress(from_port="80", ip_protocol="tcp", to_port="443", cidr_ip="10.0.0.0/8", 178 | source_security_group_id="sg-id-test") 179 | 180 | # Both source_security_group_id and source_security_group_name 181 | with pytest.raises(errors.RespawnResourceError): 182 | ec2.SecurityGroupIngress(from_port="80", ip_protocol="tcp", to_port="443", 183 | source_security_group_name="test-sg", 184 | source_security_group_id="sg-id-test") 185 | 186 | 187 | def test_instance(): 188 | # Successful instance 189 | instance = ec2.Instance(name="TestInstance", ami_id="ami-test", availability_zone="us-east1", 190 | block_devices={"/dev/sda": dict(ebs=dict(snapshot_id="snap-xxxxxxxx"))}, 191 | disable_api_termination=True, ebs_optimized=True, iam_role="iam-test", 192 | instance_shutdown_behavior="test_shutdown_behavior", instance_type="t2.micro", 193 | kernel_id="kernel-test", key_pair="keypair-test", monitoring=True, 194 | network_interfaces={"Interface1": dict(device_index="1", public_ip=True, 195 | delete_on_termination=True, 196 | group_set=["test_group"], 197 | interface_id="id-interface-test", 198 | private_ip="1.1.1.1", 199 | private_ips=[ 200 | dict(private_ip="2.2.2.2", primary=True)], 201 | secondary_private_ip_count=2, 202 | subnet_id="id-subnet-test")}, 203 | placement_group="placementgroup-test", private_ip="1.1.1.1", 204 | ramdisk_id="ramdisk-test", security_group_ids=["sg-test"], security_groups=["sgs-test"], 205 | source_dest_check=True, subnet="subnet-test", tags=[dict(key="Key1", value="Value1")], 206 | tenancy="tenancy-test", user_data_script="test_script", 207 | volumes=[dict(volume_id="vol-xxxxxxx", device="/dev/sde")], attributes={"a": "val1"}) 208 | 209 | assert instance == { 210 | "Type": "AWS::EC2::Instance", 211 | "Properties": { 212 | "Monitoring": True, 213 | "EbsOptimized": True, 214 | "RamdiskId": "ramdisk-test", 215 | "PrivateIpAddress": "1.1.1.1", 216 | "Tags": [ 217 | { 218 | "Key": "Key1", 219 | "Value": "Value1" 220 | } 221 | ], 222 | "PlacementGroupName": "placementgroup-test", 223 | "ImageId": "ami-test", 224 | "KeyName": "keypair-test", 225 | "SecurityGroups": [ 226 | "sgs-test" 227 | ], 228 | "SubnetId": "subnet-test", 229 | "InstanceType": "t2.micro", 230 | "NetworkInterfaces": [ 231 | { 232 | "DeviceIndex": "1", 233 | "GroupSet": [ 234 | "test_group" 235 | ], 236 | "Description": "Interface1", 237 | "NetworkInterfaceId": "id-interface-test", 238 | "PrivateIpAddresses": [ 239 | { 240 | "Primary": True, 241 | "PrivateIpAddress": "2.2.2.2" 242 | } 243 | ], 244 | "DeleteOnTermination": True, 245 | "AssociatePublicIpAddress": True, 246 | "SubnetId": "id-subnet-test", 247 | "PrivateIpAddress": "1.1.1.1", 248 | "SecondaryPrivateIpAddressCount": 2 249 | } 250 | ], 251 | "SourceDestCheck": True, 252 | "InstanceInitiatedShutdownBehavior": "test_shutdown_behavior", 253 | "SecurityGroupIds": [ 254 | "sg-test" 255 | ], 256 | "BlockDeviceMappings": [ 257 | { 258 | "DeviceName": "/dev/sda", 259 | "Ebs": { 260 | "SnapshotId": "snap-xxxxxxxx" 261 | } 262 | } 263 | ], 264 | "Volumes": [ 265 | { 266 | "Device": "/dev/sde", 267 | "VolumeId": "vol-xxxxxxx" 268 | } 269 | ], 270 | "KernelId": "kernel-test", 271 | "IamInstanceProfile": "iam-test", 272 | "UserData": { 273 | "Fn::Base64": "test_script" 274 | }, 275 | "AvailabilityZone": "us-east1", 276 | "Tenancy": "tenancy-test", 277 | "DisableApiTermination": True 278 | } 279 | } 280 | 281 | 282 | def test_volume(): 283 | # Successful volume 284 | volume = ec2.Volume(name="TestVolume", availability_zone="TestAZ", snapshot_id="test_snapshot", 285 | size=1000, iops=4000, kms_key_id="TestKMS", volume_type="io1", encrypted=True, 286 | tags=[dict(key="Key1", value="Value1")], deletion_policy="Retain") 287 | 288 | assert volume == { 289 | "Type": "AWS::EC2::Volume", 290 | "Properties": { 291 | "AvailabilityZone": "TestAZ", 292 | "Tags": [ 293 | { 294 | "Key": "Key1", 295 | "Value": "Value1" 296 | } 297 | ], 298 | "Encrypted": True, 299 | "VolumeType": "io1", 300 | "KmsKeyId": "TestKMS", 301 | "SnapshotId": "test_snapshot", 302 | "Iops": 4000, 303 | "Size": 1000 304 | } 305 | } 306 | 307 | # No snapshot_id or size 308 | with pytest.raises(errors.RespawnResourceError): 309 | ec2.Volume(name="TestVolume", availability_zone="TestAZ", volume_type="io1", iops=1000, 310 | delete_on_termination=True) 311 | 312 | # No iops with volume_type io1 313 | with pytest.raises(errors.RespawnResourceError): 314 | ec2.Volume(name="TestVolume", availability_zone="TestAZ", snapshot_id="test_snapshot", size=1000, 315 | volume_type="io1", delete_on_termination=True) 316 | 317 | 318 | def test_network_interface(): 319 | # Successful network interface 320 | network_interface = ec2.NetworkInterface(name="TestNetworkInterface", public_ip=True, delete_on_termination=True, 321 | description="test_description", 322 | group_set=["test_group"], 323 | private_ip="1.1.1.1", 324 | private_ips=[ 325 | dict(private_ip="2.2.2.2", primary=True)], 326 | secondary_private_ip_count=2, subnet_id="id-subnet-test") 327 | 328 | assert network_interface == { 329 | "Type": "AWS::EC2::NetworkInterface", 330 | "Properties": { 331 | "GroupSet": [ 332 | "test_group" 333 | ], 334 | "Description": "test_description", 335 | "PrivateIpAddresses": [ 336 | { 337 | "Primary": True, 338 | "PrivateIpAddress": "2.2.2.2" 339 | } 340 | ], 341 | "SecondaryPrivateIpAddressCount": 2, 342 | "SubnetId": "id-subnet-test", 343 | "PrivateIpAddress": "1.1.1.1" 344 | } 345 | } 346 | 347 | 348 | def test_network_interface_attachment(): 349 | # Successful network interface attachment 350 | network_interface_attachment = ec2.NetworkInterfaceAttachment(name="TestNetworkInterfaceAttachment", 351 | device_index="deviceindex-test", 352 | instance_id="instanceid-test", 353 | network_interface_id="networkinterfaceid-test", 354 | delete_on_termination=True) 355 | 356 | assert network_interface_attachment == { 357 | "Type": "AWS::EC2::NetworkInterfaceAttachment", 358 | "Properties": { 359 | "InstanceId": "instanceid-test", 360 | "DeviceIndex": "deviceindex-test", 361 | "NetworkInterfaceId": "networkinterfaceid-test", 362 | "" 363 | "DeleteOnTermination": True 364 | } 365 | } 366 | 367 | 368 | def test_security_group(): 369 | # Successful security group 370 | security_group = ec2.SecurityGroup(name="TestSecurityGroup", group_description="Description", 371 | security_group_ingress=[dict(from_port="80", 372 | ip_protocol="tcp", 373 | to_port="443", 374 | source_security_group_name="test-sg")], 375 | security_group_egress=[dict(from_port="80", ip_protocol="tcp", 376 | to_port="443", cidr_ip="10.0.0.0/8")], 377 | vpc_id="TestVPC", tags=[dict(key="Key1", value="Value1")]) 378 | 379 | assert security_group == { 380 | "Type": "AWS::EC2::SecurityGroup", 381 | "Properties": { 382 | "SecurityGroupIngress": [ 383 | { 384 | "FromPort": "80", 385 | "IpProtocol": "tcp", 386 | "ToPort": "443", 387 | "SourceSecurityGroupName": "test-sg" 388 | } 389 | ], 390 | "VpcId": "TestVPC", 391 | "Tags": [ 392 | { 393 | "Key": "Key1", 394 | "Value": "Value1" 395 | } 396 | ], 397 | "GroupDescription": "Description", 398 | "SecurityGroupEgress": [ 399 | { 400 | "FromPort": "80", 401 | "IpProtocol": "tcp", 402 | "ToPort": "443", 403 | "CidrIp": "10.0.0.0/8" 404 | } 405 | ] 406 | } 407 | } 408 | -------------------------------------------------------------------------------- /respawn/test/test_elb.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | from respawn import elb, sns, errors 3 | 4 | 5 | class TestElb(unittest.TestCase): 6 | def test_accessLoggingPolicy(self): 7 | sample_kwargs = {'s3_bucket_prefix': 'sampleName', 'enabled': True, 'emit_interval': 20, 's3_bucket_name': 8 | 'sampleName'} 9 | v = elb.AccessLoggingPolicy(**sample_kwargs) 10 | assert v == { 11 | "EmitInterval": 20, 12 | "Enabled": True, 13 | "S3BucketName": "sampleName", 14 | "S3BucketPrefix": "sampleName" 15 | } 16 | 17 | def test_appCookieStickinessPolicy(self): 18 | sample_kwargs = {'cookie_name': 'cookie1', 'policy_name': 'policy1'} 19 | v = elb.AppCookieStickinessPolicy(**sample_kwargs) 20 | assert v == { 21 | "CookieName": "cookie1", 22 | "PolicyName": "policy1" 23 | } 24 | 25 | def test_availabilityZones(self): 26 | pass 27 | 28 | def test_connectionDrainingPolicy(self): 29 | sample_kwargs = {'enabled': True, 'timeout': 10} 30 | v = elb.ConnectionDrainingPolicy(**sample_kwargs) 31 | assert v == {"Enabled": True, 32 | "Timeout": 10 33 | } 34 | 35 | def test_connectionSettings(self): 36 | sample_kwargs = {'idle_timeout': 40} 37 | v = elb.ConnectionSettings(**sample_kwargs) 38 | assert v == {'IdleTimeout': 40} 39 | 40 | def test_healthCheck(self): 41 | sample_kwargs = {'healthy_threshold': 111111, 'interval': 222222, 'target': 333333, 'timeout': 444444, 42 | 'unhealthy_threshold': 555555} 43 | v = elb.HealthCheck(**sample_kwargs) 44 | assert v == { 45 | "HealthyThreshold": 111111, 46 | "Interval": 222222, 47 | "Timeout": 444444, 48 | "UnhealthyThreshold": 555555, 49 | "Target": 333333 50 | } 51 | 52 | def test_listener(self): 53 | pass 54 | 55 | def test_httpListener(self): 56 | pass 57 | 58 | def test_LBCookieStickinessPolicy(self): 59 | sample_kwargs = {'policy_name': 'sampleName', 'cookie_expiration_period': 2222} 60 | v = elb.LBCookieStickinessPolicy(**sample_kwargs) 61 | assert v == { 62 | "PolicyName": "sampleName", 63 | "CookieExpirationPeriod": 2222 64 | } 65 | 66 | def test_policies(self): 67 | pass 68 | 69 | def test_protocolListener(self): 70 | pass 71 | 72 | def test_tcpListener(self): 73 | pass 74 | 75 | def test_Tags(self): 76 | pass 77 | 78 | def test_loadBalancerProperties(self): 79 | sample_kwargs = {'health_check_port': 8443, 80 | 'other_security_groups': 'sampleName', 'health_check_path': '/iisstart.htm', 81 | 'instances': ['10.23.23.23', '13.12.13.14'], 'cross_zone': True, 'port': 443, 82 | 'security_groups': ['sg-111111'], 83 | 'access_logging_policy': {'s3_bucket_prefix': 'sampleName', 'enabled': True, 'emit_interval': 84 | 20, 85 | 's3_bucket_name': 'sampleName'}, 'realm': 'protected', 86 | 'scheme': 'internet-facing', 'connection_draining_policy': {'enabled': True, 'timeout': 10}, 87 | 'subnets': ['subnet-111111', 'subnet-222222', 'subnet-3333333'], 88 | 'health_check': {'healthy_threshold': 3434, 'interval': 343434, 'target': 343434, 89 | 'timeout': 434, 'unhealthy_threshold': 343434}, 'env': 'int', 90 | 'connection_settings': {'idle_timeout': 40}, 91 | 'listeners': {'instance_port': 84, 'instance_protocol': 'tcp', 'load_balancer_port': 83, 92 | 'protocol': "HTTPS"}} 93 | v = elb.LoadBalancerProperties(**sample_kwargs) 94 | assert v == { 95 | "AccessLoggingPolicy": { 96 | "EmitInterval": 20, 97 | "Enabled": True, 98 | "S3BucketName": "sampleName", 99 | "S3BucketPrefix": "sampleName" 100 | }, 101 | "ConnectionDrainingPolicy": { 102 | "Enabled": True, 103 | "Timeout": 10 104 | }, 105 | "ConnectionSettings": { 106 | "IdleTimeout": 40 107 | }, 108 | "CrossZone": True, 109 | "HealthCheck": { 110 | "HealthyThreshold": 3434, 111 | "Interval": 343434, 112 | "Timeout": 434, 113 | "UnhealthyThreshold": 343434, 114 | "Target": 343434 115 | }, 116 | "Instances": [ 117 | "10.23.23.23", 118 | "13.12.13.14" 119 | ], 120 | "Listeners": { 121 | "instance_port": 84, 122 | "instance_protocol": "tcp", 123 | "load_balancer_port": 83, 124 | "protocol": "HTTPS" 125 | }, 126 | "Scheme": "internet-facing", 127 | "SecurityGroups": [ 128 | "sg-111111" 129 | ], 130 | "Subnets": [ 131 | "subnet-111111", 132 | "subnet-222222", 133 | "subnet-3333333" 134 | ] 135 | } 136 | 137 | def test_loadBalancer(self): 138 | sample_kwargs = {'health_check_port': 8443, 139 | 'other_security_groups': 'sg-111111', 'health_check_path': '/iisstart.htm', 140 | 'listeners': {'instance_port': 84, 'instance_protocol': 'tcp', 'load_balancer_port': 83, 141 | 'protocol': "HTTPS"}, 'instances': ['10.23.23.23', '13.12.13.14'], 142 | 'cross_zone': True, 'port': 443, 'security_groups': ['sg-111111'], 143 | 'access_logging_policy': {'s3_bucket_prefix': 'sampleName', 'enabled': True, 'emit_interval': 144 | 20, 145 | 's3_bucket_name': 'sampleName'}, 'realm': 'protected', 146 | 'app_cookie_stickiness_policy': [{'cookie_name': 'sampleName', 'policy_name': 147 | 'samplePolicyName'}, 148 | {'cookie_name': 'sampleName', 'policy_name': 149 | 'samplePolicyName'}], 150 | 'scheme': 'internet-facing', 'connection_draining_policy': {'enabled': True, 'timeout': 10}, 151 | 'health_check': {'healthy_threshold': 3434, 'interval': 343434, 'target': 343434, 152 | 'timeout': 434, 'unhealthy_threshold': 343434}, 'env': 'int', 153 | 'tags': [{'key': 'sampleKey', 'value': 'sampleValue'}, {'key': 'sampleKey1', 154 | 'value': 'sampleValue1'}], 155 | 'connection_settings': {'idle_timeout': 40}, 156 | 'lb_cookie_stickiness_policy': [{'cookie_expiration_period': 2131, 'policy_name': 157 | 'samplePolicyName'}, 158 | {'cookie_expiration_period': 1313, 'policy_name': 159 | 'samplePolicyName2'}], 160 | 'policies': [ 161 | {'attribute': [{'name': 'name1', 'value': 'value2'}, {'name': 'name2', 'value': 'value2'}], 162 | 'instance_ports': ['2121', '2424'], 'load_balancer_ports': ['32323', '2424'], 163 | 'policy_type': 'SSLNegotiationPolicyType', 'policy_name': 'sds'}, 164 | {'attribute': [{'name': 'value1', 'value': 'value2'}], 'instance_ports': ['2121', '2424'], 165 | 'load_balancer_ports': ['32323', '2424'], 'policy_type': 'SSLNegotiationPolicyType', 166 | 'policy_name': 'samplePolicyName'}]} 167 | 168 | v = elb.LoadBalancer("name", **sample_kwargs) 169 | assert v['Properties'] == { 170 | "AccessLoggingPolicy": { 171 | "EmitInterval": 20, 172 | "Enabled": True, 173 | "S3BucketName": "sampleName", 174 | "S3BucketPrefix": "sampleName" 175 | }, 176 | "AppCookieStickinessPolicy": [ 177 | { 178 | "CookieName": "sampleName", 179 | "PolicyName": "samplePolicyName" 180 | }, 181 | { 182 | "CookieName": "sampleName", 183 | "PolicyName": "samplePolicyName" 184 | } 185 | ], 186 | "ConnectionDrainingPolicy": { 187 | "Enabled": True, 188 | "Timeout": 10 189 | }, 190 | "ConnectionSettings": { 191 | "IdleTimeout": 40 192 | }, 193 | "CrossZone": True, 194 | "HealthCheck": { 195 | "HealthyThreshold": 3434, 196 | "Interval": 343434, 197 | "Timeout": 434, 198 | "UnhealthyThreshold": 343434, 199 | "Target": 343434 200 | }, 201 | "Instances": [ 202 | "10.23.23.23", 203 | "13.12.13.14" 204 | ], 205 | "LBCookieStickinessPolicy": [ 206 | { 207 | "PolicyName": "samplePolicyName", 208 | "CookieExpirationPeriod": 2131 209 | }, 210 | { 211 | "PolicyName": "samplePolicyName2", 212 | "CookieExpirationPeriod": 1313 213 | } 214 | ], 215 | "Listeners": { 216 | "instance_port": 84, 217 | "instance_protocol": "tcp", 218 | "load_balancer_port": 83, 219 | "protocol": "HTTPS" 220 | }, 221 | "Policies": [ 222 | { 223 | "PolicyName": "sds", 224 | "Attribute": [ 225 | { 226 | "Name": "name1", 227 | "Value": "value2" 228 | }, 229 | { 230 | "Name": "name2", 231 | "Value": "value2" 232 | } 233 | ], 234 | "InstancePorts": [ 235 | "2121", 236 | "2424" 237 | ], 238 | "LoadBalancerPorts": [ 239 | "32323", 240 | "2424" 241 | ], 242 | "PolicyType": "SSLNegotiationPolicyType" 243 | }, 244 | { 245 | "PolicyName": "samplePolicyName", 246 | "Attribute": [ 247 | { 248 | "Name": "value1", 249 | "Value": "value2" 250 | } 251 | ], 252 | "InstancePorts": [ 253 | "2121", 254 | "2424" 255 | ], 256 | "LoadBalancerPorts": [ 257 | "32323", 258 | "2424" 259 | ], 260 | "PolicyType": "SSLNegotiationPolicyType" 261 | } 262 | ], 263 | "Scheme": "internet-facing", 264 | "SecurityGroups": [ 265 | "sg-111111" 266 | ], 267 | "Tags": [ 268 | { 269 | "Key": "sampleKey", 270 | "Value": "sampleValue" 271 | }, 272 | { 273 | "Key": "sampleKey1", 274 | "Value": "sampleValue1" 275 | } 276 | ] 277 | } 278 | 279 | def test_make_web(self): 280 | pass 281 | 282 | def test_make_internal(self): 283 | pass 284 | 285 | def test_transform_attribute(self): 286 | sample_attribute = [{'name': 'x', 'value': 'y'}, {'name': 'xx', 'value': 'yy'}] 287 | v = elb.transform_attribute(sample_attribute) 288 | assert v == [{'Name': 'x', 'Value': 'y'}, {'Name': 'xx', 'Value': 'yy'}] 289 | 290 | def test_recurse_kwargs_list(self): 291 | sample_kwargs = {'topic_name': 'SampleTopic', 'display_name': 'MySnSTopic', 292 | 'subscription': [{'endpoint': {'ref': 'OpsGenieEndpoint'}, 'protocol': 'https'}, 293 | {'endpoint': 'htps://sampleSite.com', 'protocol': 'http'}]} 294 | sample_kwargs_no_suscription = {'topic_name': 'SampleTopic', 'display_name': 'MySnSTopic'} 295 | 296 | v = sns.recurse_kwargs_list('subscription', sns.Subscription, **sample_kwargs) 297 | assert str( 298 | v) == "[Subscription([('Endpoint', {'ref': 'OpsGenieEndpoint'}), ('Protocol', 'https')]), Subscription([('Endpoint', 'htps://sampleSite.com'), ('Protocol', 'http')])]" 299 | 300 | v = sns.recurse_kwargs_list('subscription', sns.Subscription, **sample_kwargs_no_suscription) 301 | assert v is None 302 | -------------------------------------------------------------------------------- /respawn/test/test_parameters.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | from respawn import parameters 3 | 4 | 5 | class TestParameters(unittest.TestCase): 6 | def test_CustomParameters(self): 7 | sample_kwargs = { 8 | 'constraint_description': 'Malformed input-Parameter MyParameter must only contain upper and lower case letters', 9 | 'description': 'sample description', 'default': '10.201.22.33', 'max_value': 34, 'min_value': 12, 10 | 'allowed_values': ['sampleValue1', 'sampleValue2'], 'type': 'String', 'no_echo': True} 11 | v = parameters.CustomParameters("name", **sample_kwargs) 12 | assert v == {'Description': 'sample description', 'Default': '10.201.22.33', 'Type': 'String', 13 | 'AllowedValues': ['sampleValue1', 'sampleValue2'], 'NoEcho': True, 14 | 'ConstraintDescription': 'Malformed input-Parameter MyParameter must only contain upper and lower case letters'} 15 | -------------------------------------------------------------------------------- /respawn/test/test_rds.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | from respawn import rds 3 | 4 | 5 | class TestRds(unittest.TestCase): 6 | def test_Tag(self): 7 | sample_kwargs = {'value': 'djin/metadata/test/rds', 'key': 'service_name'} 8 | v = rds.Tag(**sample_kwargs) 9 | assert v == { 10 | "Key": "service_name", 11 | "Value": "djin/metadata/test/rds" 12 | } 13 | 14 | 15 | def test_DBInstance(self): 16 | sample_name = 'myTestRDS' 17 | sample_kwargs = {'engine': 'MySQL', 'realm': 'private', 'tags': [{'value': 'djin/metadata/test/rds', 'key': 'service_name'}], 'service_name': 'djin/metadata/test/rds', 'subnet_group_name': 'djin-int-pri', 'allocated_storage': 100, 'instance_class': 'db.m1.small', 'instance_identifier': 'myTestRDSdb-int'} 18 | v = rds.DBInstance(sample_name, **sample_kwargs) 19 | assert v == { 20 | "Type": "AWS::RDS::DBInstance", 21 | "Properties": { 22 | "Engine": "MySQL", 23 | "Tags": [ 24 | { 25 | "Key": "service_name", 26 | "Value": "djin/metadata/test/rds" 27 | } 28 | ], 29 | "AllocatedStorage": 100, 30 | "DBInstanceClass": "db.m1.small", 31 | "DBSubnetGroupName": "djin-int-pri", 32 | "DBInstanceIdentifier": "myTestRDSdb-int" 33 | } 34 | } 35 | def test_DBProperties(self): 36 | sample_name = 'myTestRDS' 37 | sample_kwargs = {'backup_retention_period': 'string', 'source_db_instance_identifier': 'string', 'availability_zone': 'string', 'service_name': 'djin/metadata/test/rds', 'subnet_group_name': 'string', 'kms_key_id': 'string', 'iops': 1000, 'db_security_groups': 'string', 'master_username': 'string', 'snapshot_identifier': 'string', 'allow_minor_version_upgrade': True, 'vpc_security_groups': ['string'], 'realm': 'private', 'port': 'string', 'preferred_backup_window': 'string', 'engine': 'string', 'db_parameter_group_name': 'string', 'tags': [{'value': 'djin/metadata/test/rds', 'key': 'service_name'}], 'allow_major_version_upgrade': True, 'db_name': 'string', 'license_model': 'string', 'storage_encrypted': True, 'character_set_name': 'string', 'engine_version': 'string', 'option_group_name': 'string', 'multi_az': False, 'instance_identifier': 'string', 'publicly_accessible': False, 'preferred_maintenance_window': 'string', 38 | 'instance_class': 'db.m1.small', 'allocated_storage': '100'} 39 | v = rds.DBInstance(sample_name, **sample_kwargs) 40 | assert v['Properties'] == { 41 | "DBParameterGroupName": "string", 42 | "AllowMajorVersionUpgrade": True, 43 | "MasterUsername": "string", 44 | "LicenseModel": "string", 45 | "VPCSecurityGroups": [ 46 | "string" 47 | ], 48 | "Engine": "string", 49 | "MultiAZ": False, 50 | "DBSecurityGroups": "string", 51 | "PubliclyAccessible": False, 52 | "Tags": [ 53 | { 54 | "Key": "service_name", 55 | "Value": "djin/metadata/test/rds" 56 | } 57 | ], 58 | "PreferredBackupWindow": "string", 59 | "DBSnapshotIdentifier": "string", 60 | "AllocatedStorage": '100', 61 | "DBSubnetGroupName": "string", 62 | "DBName": "string", 63 | "PreferredMaintenanceWindow": "string", 64 | "EngineVersion": "string", 65 | "SourceDBInstanceIdentifier": "string", 66 | "BackupRetentionPeriod": "string", 67 | "OptionGroupName": "string", 68 | "CharacterSetName": "string", 69 | "AvailabilityZone": "string", 70 | "Iops": 1000, 71 | "StorageEncrypted": True, 72 | "KmsKeyId": "string", 73 | "DBInstanceClass": "db.m1.small", 74 | "Port": "string", 75 | "DBInstanceIdentifier": "string" 76 | } -------------------------------------------------------------------------------- /respawn/test/test_route53.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | from respawn import route53 3 | 4 | 5 | def test_record_set(): 6 | # Successful instance 7 | record_set = route53.RecordSet(name="TestRecordSet", domain_name="test.dowjones.net", 8 | failover="Primary", geolocation=[dict(continent_code="NA"), 9 | dict(country_code="US", subdivision_code="NJ")], 10 | health_check_id="1", hosted_zone_id="2", resource_records= ["aaa.dowjones.net", 11 | "bbb.dowjones.net"], 12 | ttl="20", type="CNAME") 13 | assert record_set == { 14 | "Type": "AWS::Route53::RecordSet", 15 | "Properties": { 16 | "Name": "test.dowjones.net", 17 | "Failover": "Primary", 18 | "GeoLocation": [ 19 | { 20 | "ContinentCode": "NA", 21 | }, 22 | { 23 | "CountryCode": "US", 24 | "SubdivisionCode": "NJ" 25 | 26 | } 27 | ], 28 | "HealthCheckId": "1", 29 | "HostedZoneId": "2", 30 | "ResourceRecords": [ 31 | "aaa.dowjones.net", 32 | "bbb.dowjones.net" 33 | ], 34 | "TTL": "20", 35 | "Type": "CNAME" 36 | } 37 | } 38 | -------------------------------------------------------------------------------- /respawn/test/test_sns.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | from respawn import sns 3 | 4 | 5 | class TestSns(unittest.TestCase): 6 | def test_subscription(self): 7 | sample_attribute = {'endpoint': 'x', 'protocol': 'y'} 8 | v = sns.Subscription(**sample_attribute) 9 | assert v == {'Endpoint': 'x', 'Protocol': 'y'} 10 | 11 | def test_SnsTopicProperties(self): 12 | sample_kwargs = {'topic_name': 'SampleTopic', 'display_name': 'MySnSTopic'} 13 | v = sns.SnsTopicProperties(**sample_kwargs) 14 | 15 | assert v == { 16 | "DisplayName": "MySnSTopic", 17 | "TopicName": "SampleTopic" 18 | } 19 | 20 | def test_SnsTopic(self): 21 | pass 22 | 23 | def test_recurse_kwargs_list(self): 24 | sample_kwargs = {'topic_name': 'SampleTopic', 'display_name': 'MySnSTopic', 25 | 'subscription': [{'endpoint': {'ref': 'OpsGenieEndpoint'}, 'protocol': 'https'}, 26 | {'endpoint': 'https://sampleSite.com', 'protocol': 'http'}]} 27 | sample_kwargs_no_suscription = {'topic_name': 'SampleTopic', 'display_name': 'MySnSTopic'} 28 | 29 | v = sns.recurse_kwargs_list('subscription', sns.Subscription, **sample_kwargs) 30 | assert str( 31 | v) == "[Subscription([('Endpoint', {'ref': 'OpsGenieEndpoint'}), ('Protocol', 'https')]), Subscription([('Endpoint', 'https://sampleSite.com'), ('Protocol', 'http')])]" 32 | 33 | v = sns.recurse_kwargs_list('subscription', sns.Subscription, **sample_kwargs_no_suscription) 34 | assert v is None 35 | -------------------------------------------------------------------------------- /respawn/util.py: -------------------------------------------------------------------------------- 1 | class SetNonEmptyPropertyMixin(object): 2 | def _set_property(self, k, v): 3 | if k and v: 4 | self[k] = v 5 | -------------------------------------------------------------------------------- /setup.cfg: -------------------------------------------------------------------------------- 1 | [bdist_wheel] 2 | universal=1 3 | -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | """ Setup script. Used by easy_install and pip. """ 4 | 5 | import os 6 | from setuptools import setup, find_packages 7 | from setuptools.command.test import test 8 | import sys 9 | 10 | 11 | class PyTest(test): 12 | user_options = [('pytest-args=', 'a', "Arguments to pass to py.test")] 13 | 14 | def initialize_options(self): 15 | test.initialize_options(self) 16 | self.pytest_args = ['respawn'] 17 | 18 | def run_tests(self): 19 | import pytest 20 | errno = pytest.main(self.pytest_args) 21 | sys.exit(errno) 22 | 23 | 24 | def get_version(): 25 | try: 26 | # get version from './VERSION' 27 | src_root = os.path.dirname(__file__) 28 | if not src_root: 29 | src_root = '.' 30 | 31 | with open(src_root + '/VERSION', 'r') as f: 32 | version = f.readline ().strip() 33 | 34 | return version 35 | 36 | except Exception as e: 37 | raise RuntimeError('Could not extract version: %s' % e) 38 | 39 | # Check Python version. Required > 2.7, <3.x 40 | if sys.hexversion < 0x02070000 or sys.hexversion >= 0x03000000: 41 | raise RuntimeError("respawn requires Python 2.x (2.7 or higher)") 42 | 43 | setup_args = { 44 | 'name': 'respawn', 45 | 'version': get_version(), 46 | 'description': 'AWS CloudFormation Template generator from Yaml specifications.', 47 | 'url': 'https://github.com/dowjones/respawn/', 48 | 'license': 'ISC', 49 | 'keywords': 'aws cloudformation yaml', 50 | 'classifiers': [ 51 | 'Development Status :: 4 - Beta', 52 | 'Intended Audience :: Developers', 53 | 'Environment :: Console', 54 | 'License :: OSI Approved :: ISC License (ISCL)', 55 | 'Programming Language :: Python', 56 | 'Programming Language :: Python :: 2', 57 | 'Programming Language :: Python :: 2.7', 58 | 'Topic :: Utilities', 59 | 'Topic :: System :: Systems Administration', 60 | 'Operating System :: OS Independent' 61 | ], 62 | 'packages': find_packages(), 63 | 'package_data': {'respawn': ['VERSION']}, 64 | 'entry_points': { 65 | 'console_scripts': [ 66 | 'respawn=respawn.cli:generate', 67 | ], 68 | }, 69 | 'install_requires': [ 70 | 'cfn-pyplates', 71 | 'Jinja2', 72 | 'boto3', 73 | 'botocore' 74 | ], 75 | 'tests_require': ['pytest'], 76 | 'cmdclass': {'test': PyTest}, 77 | 'zip_safe': False, 78 | } 79 | 80 | setup(**setup_args) 81 | 82 | -------------------------------------------------------------------------------- /tox.ini: -------------------------------------------------------------------------------- 1 | # content of: tox.ini , put in same dir as setup.py 2 | [tox] 3 | envlist=py27 4 | skip_missing_interpreters=true 5 | 6 | [testenv] 7 | deps= 8 | pytest 9 | coverage 10 | pytest-cov 11 | cfn-pyplates 12 | Jinja2 13 | setenv= 14 | PYTHONWARNINGS=all 15 | commands=py.test 16 | 17 | [pytest] 18 | python_functions=test_ 19 | norecursedirs=.tox .git 20 | 21 | [testenv:py27verbose] 22 | basepython=python 23 | commands= 24 | py.test --cov=respawn/ --cov-report term 25 | --------------------------------------------------------------------------------