├── .github └── workflows │ └── ci.yaml ├── .gitignore ├── LICENSE ├── README.md ├── docs ├── Makefile └── source │ ├── conf.py │ ├── fundamentals │ ├── configuration.rst │ ├── index.rst │ └── templates.rst │ ├── guides │ └── step_by_step_walkthrough.rst │ ├── index.rst │ ├── quickstarts │ └── getting_started.rst │ └── requirements-docs.txt ├── examples ├── buckets.yaml ├── config.yaml ├── coreos.yaml └── infra.yaml ├── requirements.txt ├── setup.cfg ├── setup.py ├── stacks ├── __about__.py ├── __init__.py ├── __main__.py ├── aws.py ├── cf.py ├── cli.py ├── config.py ├── helpers.py ├── main.py └── states.py ├── tests ├── __init__.py ├── fixtures │ ├── aws_config │ ├── aws_credentials │ ├── config.d │ │ ├── 10-config.yaml │ │ └── 20-config.yaml │ ├── config_flat.yaml │ ├── config_with_envs.yaml │ ├── create_stack_template.yaml │ ├── invalid_template.yaml │ ├── invalid_template_with_null_value.yaml │ ├── load_yaml.yaml │ ├── no_metadata_template.yaml │ └── valid_template.yaml ├── test_cf.py └── test_config.py └── tox.ini /.github/workflows/ci.yaml: -------------------------------------------------------------------------------- 1 | name: CI 2 | 3 | on: [push, workflow_dispatch] 4 | 5 | jobs: 6 | test: 7 | runs-on: ubuntu-latest 8 | strategy: 9 | matrix: 10 | python-version: ["3.8", "3.9", "3.10", "3.11"] 11 | steps: 12 | - uses: actions/checkout@v2 13 | - name: Setup Python 14 | uses: actions/setup-python@v2 15 | with: 16 | python-version: ${{ matrix.python }} 17 | - name: Install flake8 and Tox 18 | run: pip install flake8 tox 19 | - name: Lint with flake8 20 | run: | 21 | flake8 stacks --count --select=E9,F63,F7,F82 --show-source --statistics 22 | flake8 stacks --count --exit-zero --max-complexity=10 23 | - name: Run Tox 24 | run: tox -e py 25 | publish: 26 | needs: test 27 | if: github.event_name == 'push' && startsWith(github.event.ref, 'refs/tags') 28 | runs-on: ubuntu-latest 29 | steps: 30 | - uses: actions/checkout@v2 31 | - name: Set up Python 32 | uses: actions/setup-python@v2 33 | with: 34 | python-version: '3.x' 35 | - name: Install dependencies 36 | run: | 37 | python -m pip install --upgrade pip 38 | pip install setuptools wheel 39 | - name: Build 40 | run: python setup.py sdist bdist_wheel 41 | - name: Publish 42 | uses: pypa/gh-action-pypi-publish@master 43 | with: 44 | user: __token__ 45 | password: ${{ secrets.PYPI_PASSWORD }} 46 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | 6 | # C extensions 7 | *.so 8 | 9 | # Distribution / packaging 10 | .Python 11 | env/ 12 | venv/ 13 | build/ 14 | develop-eggs/ 15 | dist/ 16 | downloads/ 17 | eggs/ 18 | .eggs/ 19 | lib/ 20 | lib64/ 21 | parts/ 22 | sdist/ 23 | var/ 24 | *.egg-info/ 25 | .installed.cfg 26 | *.egg 27 | *.pex 28 | 29 | # PyInstaller 30 | # Usually these files are written by a python script from a template 31 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 32 | *.manifest 33 | *.spec 34 | 35 | # Installer logs 36 | pip-log.txt 37 | pip-delete-this-directory.txt 38 | 39 | # Unit test / coverage reports 40 | htmlcov/ 41 | .tox/ 42 | .coverage 43 | .coverage.* 44 | .cache 45 | nosetests.xml 46 | coverage.xml 47 | *,cover 48 | 49 | # Translations 50 | *.mo 51 | *.pot 52 | 53 | # Django stuff: 54 | *.log 55 | 56 | # Sphinx documentation 57 | docs/_build/ 58 | docs/build/ 59 | 60 | # PyBuilder 61 | target/ 62 | 63 | # Rope stuff 64 | .ropeproject/ 65 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | The MIT License (MIT) 2 | 3 | Copyright (c) 2015 Open Source Projects from State 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | 23 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # stacks 2 | ![Build Status](https://github.com/cfstacks/stacks/workflows/CI/badge.svg?branch=master) 3 | [![Documentation Status](https://readthedocs.org/projects/stacks/badge/?version=latest)](https://stacks.tools/en/stable/?badge=latest) 4 | 5 | Stacks is CloudFormation management tool that allows you to define AWS 6 | resources configuration in YAML with Jinja2 templating. 7 | 8 | Using stacks, you create flexible and declarative templates that deploy various 9 | AWS resources. 10 | 11 | Use stacks to define and configure the infrastructure for your services, to 12 | create repeatable deployments that you can reuse, to create different 13 | deployment environments. For example, you can use stacks to deploy 14 | production, staging, and development environments for the same service, and 15 | take advantage of stacks's templating syntax so you can selectively roll out or 16 | test new features. 17 | 18 | Stacks does not make any assumptions how you should manage your infrastructure. 19 | If you use CloudFormation JSON templates already, you can just convert them to 20 | YAML and start using stacks and grow from there. 21 | 22 | 23 | ## Features 24 | 25 | * Templates written in YAML 26 | * For loops, conditionals and more with the power of Jinja2 27 | * Cross-stack resource referencing 28 | * Reusable snippets support 29 | * Multiple environments 30 | * Flexible configuration 31 | * Stack events streaming 32 | 33 | 34 | ## [Documentation](https://stacks.readthedocs.io/en/latest/) 35 | 36 | #### Quickstarts 37 | 38 | * [Getting Started](https://stacks.readthedocs.io/en/latest/quickstarts/getting_started.html) 39 | 40 | #### Guides 41 | 42 | * [Step-by-Step Walkthrough](https://stacks.readthedocs.io/en/latest/guides/step_by_step_walkthrough.html) 43 | 44 | 45 | ## Contribution 46 | 47 | Please get involved in helping us improve stacks! It's very easy to get started. 48 | 49 | Before you make a change it's best to file the bug or feature as a [github issues](https://github.com/cfstacks/stacks/issues) 50 | so that no one else is working on a similar change. 51 | 52 | You can use pip to install stacks in editable mode. This means changes you make to the 53 | checkout will affect the global `stacks` command. 54 | 55 | ``` 56 | $ git clone https://github.com/cfstacks/stacks.git && cd stacks 57 | $ mkvirtualenv --python=python3 venv 58 | $ source ./venv/bin/activate 59 | $ pip install -r requirements.txt 60 | $ pip install -e . 61 | ``` 62 | 63 | Once you've got a change please open a pull-request to master then we'll review and merge the change! 64 | -------------------------------------------------------------------------------- /docs/Makefile: -------------------------------------------------------------------------------- 1 | # Makefile for Sphinx documentation 2 | # 3 | 4 | # You can set these variables from the command line. 5 | SPHINXOPTS = 6 | SPHINXBUILD = sphinx-build 7 | PAPER = 8 | BUILDDIR = build 9 | 10 | # User-friendly check for sphinx-build 11 | ifeq ($(shell which $(SPHINXBUILD) >/dev/null 2>&1; echo $$?), 1) 12 | $(error The '$(SPHINXBUILD)' command was not found. Make sure you have Sphinx installed, then set the SPHINXBUILD environment variable to point to the full path of the '$(SPHINXBUILD)' executable. Alternatively you can add the directory with the executable to your PATH. If you don't have Sphinx installed, grab it from http://sphinx-doc.org/) 13 | endif 14 | 15 | # Internal variables. 16 | PAPEROPT_a4 = -D latex_paper_size=a4 17 | PAPEROPT_letter = -D latex_paper_size=letter 18 | ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) source 19 | # the i18n builder cannot share the environment and doctrees with the others 20 | I18NSPHINXOPTS = $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) source 21 | 22 | .PHONY: help clean html dirhtml singlehtml pickle json htmlhelp qthelp devhelp epub latex latexpdf text man changes linkcheck doctest coverage gettext 23 | 24 | help: 25 | @echo "Please use \`make ' where is one of" 26 | @echo " html to make standalone HTML files" 27 | @echo " dirhtml to make HTML files named index.html in directories" 28 | @echo " singlehtml to make a single large HTML file" 29 | @echo " pickle to make pickle files" 30 | @echo " json to make JSON files" 31 | @echo " htmlhelp to make HTML files and a HTML help project" 32 | @echo " qthelp to make HTML files and a qthelp project" 33 | @echo " applehelp to make an Apple Help Book" 34 | @echo " devhelp to make HTML files and a Devhelp project" 35 | @echo " epub to make an epub" 36 | @echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter" 37 | @echo " latexpdf to make LaTeX files and run them through pdflatex" 38 | @echo " latexpdfja to make LaTeX files and run them through platex/dvipdfmx" 39 | @echo " text to make text files" 40 | @echo " man to make manual pages" 41 | @echo " texinfo to make Texinfo files" 42 | @echo " info to make Texinfo files and run them through makeinfo" 43 | @echo " gettext to make PO message catalogs" 44 | @echo " changes to make an overview of all changed/added/deprecated items" 45 | @echo " xml to make Docutils-native XML files" 46 | @echo " pseudoxml to make pseudoxml-XML files for display purposes" 47 | @echo " linkcheck to check all external links for integrity" 48 | @echo " doctest to run all doctests embedded in the documentation (if enabled)" 49 | @echo " coverage to run coverage check of the documentation (if enabled)" 50 | 51 | clean: 52 | rm -rf $(BUILDDIR)/* 53 | 54 | html: 55 | $(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html 56 | @echo 57 | @echo "Build finished. The HTML pages are in $(BUILDDIR)/html." 58 | 59 | dirhtml: 60 | $(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml 61 | @echo 62 | @echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml." 63 | 64 | singlehtml: 65 | $(SPHINXBUILD) -b singlehtml $(ALLSPHINXOPTS) $(BUILDDIR)/singlehtml 66 | @echo 67 | @echo "Build finished. The HTML page is in $(BUILDDIR)/singlehtml." 68 | 69 | pickle: 70 | $(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle 71 | @echo 72 | @echo "Build finished; now you can process the pickle files." 73 | 74 | json: 75 | $(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json 76 | @echo 77 | @echo "Build finished; now you can process the JSON files." 78 | 79 | htmlhelp: 80 | $(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp 81 | @echo 82 | @echo "Build finished; now you can run HTML Help Workshop with the" \ 83 | ".hhp project file in $(BUILDDIR)/htmlhelp." 84 | 85 | qthelp: 86 | $(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp 87 | @echo 88 | @echo "Build finished; now you can run "qcollectiongenerator" with the" \ 89 | ".qhcp project file in $(BUILDDIR)/qthelp, like this:" 90 | @echo "# qcollectiongenerator $(BUILDDIR)/qthelp/stacks.qhcp" 91 | @echo "To view the help file:" 92 | @echo "# assistant -collectionFile $(BUILDDIR)/qthelp/stacks.qhc" 93 | 94 | applehelp: 95 | $(SPHINXBUILD) -b applehelp $(ALLSPHINXOPTS) $(BUILDDIR)/applehelp 96 | @echo 97 | @echo "Build finished. The help book is in $(BUILDDIR)/applehelp." 98 | @echo "N.B. You won't be able to view it unless you put it in" \ 99 | "~/Library/Documentation/Help or install it in your application" \ 100 | "bundle." 101 | 102 | devhelp: 103 | $(SPHINXBUILD) -b devhelp $(ALLSPHINXOPTS) $(BUILDDIR)/devhelp 104 | @echo 105 | @echo "Build finished." 106 | @echo "To view the help file:" 107 | @echo "# mkdir -p $$HOME/.local/share/devhelp/stacks" 108 | @echo "# ln -s $(BUILDDIR)/devhelp $$HOME/.local/share/devhelp/stacks" 109 | @echo "# devhelp" 110 | 111 | epub: 112 | $(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) $(BUILDDIR)/epub 113 | @echo 114 | @echo "Build finished. The epub file is in $(BUILDDIR)/epub." 115 | 116 | latex: 117 | $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex 118 | @echo 119 | @echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex." 120 | @echo "Run \`make' in that directory to run these through (pdf)latex" \ 121 | "(use \`make latexpdf' here to do that automatically)." 122 | 123 | latexpdf: 124 | $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex 125 | @echo "Running LaTeX files through pdflatex..." 126 | $(MAKE) -C $(BUILDDIR)/latex all-pdf 127 | @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex." 128 | 129 | latexpdfja: 130 | $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex 131 | @echo "Running LaTeX files through platex and dvipdfmx..." 132 | $(MAKE) -C $(BUILDDIR)/latex all-pdf-ja 133 | @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex." 134 | 135 | text: 136 | $(SPHINXBUILD) -b text $(ALLSPHINXOPTS) $(BUILDDIR)/text 137 | @echo 138 | @echo "Build finished. The text files are in $(BUILDDIR)/text." 139 | 140 | man: 141 | $(SPHINXBUILD) -b man $(ALLSPHINXOPTS) $(BUILDDIR)/man 142 | @echo 143 | @echo "Build finished. The manual pages are in $(BUILDDIR)/man." 144 | 145 | texinfo: 146 | $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo 147 | @echo 148 | @echo "Build finished. The Texinfo files are in $(BUILDDIR)/texinfo." 149 | @echo "Run \`make' in that directory to run these through makeinfo" \ 150 | "(use \`make info' here to do that automatically)." 151 | 152 | info: 153 | $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo 154 | @echo "Running Texinfo files through makeinfo..." 155 | make -C $(BUILDDIR)/texinfo info 156 | @echo "makeinfo finished; the Info files are in $(BUILDDIR)/texinfo." 157 | 158 | gettext: 159 | $(SPHINXBUILD) -b gettext $(I18NSPHINXOPTS) $(BUILDDIR)/locale 160 | @echo 161 | @echo "Build finished. The message catalogs are in $(BUILDDIR)/locale." 162 | 163 | changes: 164 | $(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes 165 | @echo 166 | @echo "The overview file is in $(BUILDDIR)/changes." 167 | 168 | linkcheck: 169 | $(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck 170 | @echo 171 | @echo "Link check complete; look for any errors in the above output " \ 172 | "or in $(BUILDDIR)/linkcheck/output.txt." 173 | 174 | doctest: 175 | $(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest 176 | @echo "Testing of doctests in the sources finished, look at the " \ 177 | "results in $(BUILDDIR)/doctest/output.txt." 178 | 179 | coverage: 180 | $(SPHINXBUILD) -b coverage $(ALLSPHINXOPTS) $(BUILDDIR)/coverage 181 | @echo "Testing of coverage in the sources finished, look at the " \ 182 | "results in $(BUILDDIR)/coverage/python.txt." 183 | 184 | xml: 185 | $(SPHINXBUILD) -b xml $(ALLSPHINXOPTS) $(BUILDDIR)/xml 186 | @echo 187 | @echo "Build finished. The XML files are in $(BUILDDIR)/xml." 188 | 189 | pseudoxml: 190 | $(SPHINXBUILD) -b pseudoxml $(ALLSPHINXOPTS) $(BUILDDIR)/pseudoxml 191 | @echo 192 | @echo "Build finished. The pseudo-XML files are in $(BUILDDIR)/pseudoxml." 193 | -------------------------------------------------------------------------------- /docs/source/conf.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | # -*- coding: utf-8 -*- 3 | 4 | import stacks 5 | import sphinx_bootstrap_theme 6 | 7 | # If extensions (or modules to document with autodoc) are in another directory, 8 | # add these directories to sys.path here. If the directory is relative to the 9 | # documentation root, use os.path.abspath to make it absolute, like shown here. 10 | #sys.path.insert(0, os.path.abspath('.')) 11 | 12 | # -- General configuration ------------------------------------------------ 13 | 14 | # If your documentation needs a minimal Sphinx version, state it here. 15 | #needs_sphinx = '1.0' 16 | 17 | # Add any Sphinx extension module names here, as strings. They can be 18 | # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom 19 | # ones. 20 | extensions = [] 21 | 22 | # Add any paths that contain templates here, relative to this directory. 23 | templates_path = ['_templates'] 24 | 25 | # The suffix(es) of source filenames. 26 | # You can specify multiple suffix as a list of string: 27 | # source_suffix = ['.rst', '.md'] 28 | source_suffix = '.rst' 29 | 30 | # The encoding of source files. 31 | #source_encoding = 'utf-8-sig' 32 | 33 | # The master toctree document. 34 | master_doc = 'index' 35 | 36 | # General information about the project. 37 | project = 'stacks' 38 | copyright = '2016, Vaidas Jablonskis' 39 | author = 'Vaidas Jablonskis' 40 | 41 | # The version info for the project you're documenting, acts as replacement for 42 | # |version| and |release|, also used in various other places throughout the 43 | # built documents. 44 | # 45 | # The short X.Y version. 46 | version = stacks.__version__ 47 | # The full version, including alpha/beta/rc tags. 48 | release = version 49 | 50 | # The language for content autogenerated by Sphinx. Refer to documentation 51 | # for a list of supported languages. 52 | # 53 | # This is also used if you do content translation via gettext catalogs. 54 | # Usually you set "language" from the command line for these cases. 55 | language = None 56 | 57 | # There are two options for replacing |today|: either, you set today to some 58 | # non-false value, then it is used: 59 | #today = '' 60 | # Else, today_fmt is used as the format for a strftime call. 61 | #today_fmt = '%B %d, %Y' 62 | 63 | # List of patterns, relative to source directory, that match files and 64 | # directories to ignore when looking for source files. 65 | exclude_patterns = [] 66 | 67 | # The reST default role (used for this markup: `text`) to use for all 68 | # documents. 69 | #default_role = None 70 | 71 | # If true, '()' will be appended to :func: etc. cross-reference text. 72 | #add_function_parentheses = True 73 | 74 | # If true, the current module name will be prepended to all description 75 | # unit titles (such as .. function::). 76 | #add_module_names = True 77 | 78 | # If true, sectionauthor and moduleauthor directives will be shown in the 79 | # output. They are ignored by default. 80 | #show_authors = False 81 | 82 | # The name of the Pygments (syntax highlighting) style to use. 83 | pygments_style = 'sphinx' 84 | 85 | # A list of ignored prefixes for module index sorting. 86 | #modindex_common_prefix = [] 87 | 88 | # If true, keep warnings as "system message" paragraphs in the built documents. 89 | #keep_warnings = False 90 | 91 | # If true, `todo` and `todoList` produce output, else they produce nothing. 92 | todo_include_todos = False 93 | 94 | 95 | # -- Options for HTML output ---------------------------------------------- 96 | 97 | # The theme to use for HTML and HTML Help pages. See the documentation for 98 | # a list of builtin themes. 99 | html_theme = 'bootstrap' 100 | 101 | # Theme options are theme-specific and customize the look and feel of a theme 102 | # further. For a list of options available for each theme, see the 103 | # documentation. 104 | html_theme_options = { 105 | 'bootswatch_theme': 'flatly', 106 | 'navbar_sidebarrel': False, 107 | 'navbar_pagenav': False, 108 | 'source_link_position': False, 109 | 'navbar_links': [ 110 | ('Contribute', 'https://github.com/cfstacks/stacks', True), 111 | ('Issues', 'https://github.com/cfstacks/stacks/issues', True), 112 | ], 113 | } 114 | 115 | # Add any paths that contain custom themes here, relative to this directory. 116 | html_theme_path = sphinx_bootstrap_theme.get_html_theme_path() 117 | 118 | # The name for this set of Sphinx documents. If None, it defaults to 119 | # " v documentation". 120 | html_title = 'Manage CloudFormation with Templates Written in YAML' 121 | 122 | # A shorter title for the navigation bar. Default is the same as html_title. 123 | html_short_title = 'stacks' 124 | 125 | # The name of an image file (relative to this directory) to place at the top 126 | # of the sidebar. 127 | # html_logo = None 128 | 129 | # The name of an image file (within the static path) to use as favicon of the 130 | # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 131 | # pixels large. 132 | #html_favicon = None 133 | 134 | # Add any paths that contain custom static files (such as style sheets) here, 135 | # relative to this directory. They are copied after the builtin static files, 136 | # so a file named "default.css" will overwrite the builtin "default.css". 137 | html_static_path = ['_static'] 138 | 139 | # Add any extra paths that contain custom files (such as robots.txt or 140 | # .htaccess) here, relative to this directory. These files are copied 141 | # directly to the root of the documentation. 142 | #html_extra_path = [] 143 | 144 | # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, 145 | # using the given strftime format. 146 | #html_last_updated_fmt = '%b %d, %Y' 147 | 148 | # If true, SmartyPants will be used to convert quotes and dashes to 149 | # typographically correct entities. 150 | html_use_smartypants = False 151 | 152 | # Custom sidebar templates, maps document names to template names. 153 | #html_sidebars = {} 154 | html_sidebars = { 155 | '**': ['localtoc.html'], 156 | } 157 | 158 | # Additional templates that should be rendered to pages, maps page names to 159 | # template names. 160 | #html_additional_pages = {} 161 | 162 | # If false, no module index is generated. 163 | #html_domain_indices = True 164 | 165 | # If false, no index is generated. 166 | #html_use_index = True 167 | 168 | # If true, the index is split into individual pages for each letter. 169 | #html_split_index = False 170 | 171 | # If true, links to the reST sources are added to the pages. 172 | html_show_sourcelink = True 173 | 174 | # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. 175 | #html_show_sphinx = True 176 | 177 | # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. 178 | #html_show_copyright = True 179 | 180 | # If true, an OpenSearch description file will be output, and all pages will 181 | # contain a tag referring to it. The value of this option must be the 182 | # base URL from which the finished HTML is served. 183 | #html_use_opensearch = '' 184 | 185 | # This is the file name suffix for HTML files (e.g. ".xhtml"). 186 | #html_file_suffix = None 187 | 188 | # Language to be used for generating the HTML full-text search index. 189 | # Sphinx supports the following languages: 190 | # 'da', 'de', 'en', 'es', 'fi', 'fr', 'h', 'it', 'ja' 191 | # 'nl', 'no', 'pt', 'ro', 'r', 'sv', 'tr' 192 | #html_search_language = 'en' 193 | 194 | # A dictionary with options for the search language support, empty by default. 195 | # Now only 'ja' uses this config value 196 | #html_search_options = {'type': 'default'} 197 | 198 | # The name of a javascript file (relative to the configuration directory) that 199 | # implements a search results scorer. If empty, the default will be used. 200 | #html_search_scorer = 'scorer.js' 201 | 202 | # Output file base name for HTML help builder. 203 | htmlhelp_basename = 'stacksdoc' 204 | 205 | -------------------------------------------------------------------------------- /docs/source/fundamentals/configuration.rst: -------------------------------------------------------------------------------- 1 | Configuration 2 | ============= 3 | 4 | Profiles 5 | -------- 6 | 7 | Environments 8 | ------------ 9 | -------------------------------------------------------------------------------- /docs/source/fundamentals/index.rst: -------------------------------------------------------------------------------- 1 | Fundamentals 2 | ============ 3 | 4 | -------------------------------------------------------------------------------- /docs/source/fundamentals/templates.rst: -------------------------------------------------------------------------------- 1 | Templates 2 | ========= 3 | 4 | Metadata document 5 | ----------------- 6 | 7 | Resources document 8 | ------------------ 9 | 10 | Snippets 11 | -------- 12 | -------------------------------------------------------------------------------- /docs/source/guides/step_by_step_walkthrough.rst: -------------------------------------------------------------------------------- 1 | Step-by-Step Walkthrough 2 | ======================== 3 | 4 | Use this step-by-step guide as a comprehensive walkthrough of stacks. The goal 5 | of this guide is to build a simple deployment and then gradually create a more 6 | complex deployment using features of stacks that you will probably reuse in 7 | your own deployments. 8 | 9 | This guide uses a Linux command-line tool and assumes you are familiar with 10 | YAML syntax and are comfortable running commands on a Linux command-line 11 | window. 12 | 13 | .. note:: If this is your first time using stacks, try the :doc:`/quickstarts/getting_started` first. 14 | 15 | Installation and Setup 16 | ---------------------- 17 | Follow the setup steps to prepare your local environment to use stacks. 18 | 19 | Install stacks 20 | ^^^^^^^^^^^^^^ 21 | 22 | """"""""" 23 | 24 | .. parsed-literal:: 25 | 26 | $ pip install cfstacks 27 | 28 | 29 | Configure Authentication 30 | ^^^^^^^^^^^^^^^^^^^^^^^^ 31 | Stacks supports standard AWS SDK configuration, which means you can interact 32 | with multiple AWS accounts via profiles. 33 | 34 | Be sure to replace access keys with your own ones. 35 | 36 | AWS credentials file 37 | """""""""""""""""""" 38 | .. code-block:: shell 39 | 40 | $ install -d -m 0700 ~/.aws 41 | $ vim ~/.aws/credentials 42 | 43 | .. code-block:: ini 44 | 45 | [project1] 46 | aws_access_key_id = AKIAIqH2jHP2BGMKPikN 47 | aws_secret_access_key = HFq+nQfq59yStF+qLl6/rW0AHFq+nQfq59yStF+q 48 | output = json 49 | region = us-east-1 50 | 51 | [project2] 52 | aws_access_key_id = AKIAIj2UPlaLJgMmNejS 53 | aws_secret_access_key = UZSYq2dzZqLzc1s+sXENnzEmUZSYq2dzZqLzc1s+ 54 | output = json 55 | 56 | As you can see ``project2`` profile does not have region specified, in this 57 | case, the region will have to either be provided via ``AWS_DEFAULT_REGION`` 58 | environment variable or command line argument ``stacks --region us-west-1``. 59 | 60 | 61 | Environment variables 62 | """"""""""""""""""""" 63 | .. code-block:: shell 64 | 65 | export AWS_ACCESS_KEY_ID="AKIAIqH2jHP2BGMKPikN" 66 | export AWS_SECRET_ACCESS_KEY="HFq+nQfq59yStF+qLl6/rW0AHFq+nQfq59yStF+q" 67 | export AWS_DEFAULT_REGION="us-east-1" 68 | 69 | Now that you have set up your environment, you can begin constructing your 70 | deployment. 71 | 72 | 73 | Create a Deployment 74 | ------------------- 75 | 76 | For this guide, let's define a set of templates that will create a new multi-AZ 77 | VPC and an autoscaling group to launch an instance in the new VPC. 78 | 79 | If you were working with native CloudFormation JSON templates, you would have 80 | to define all your AWS resources for this deployment in a single JSON template, 81 | because CloudFormation JSON templates are static - there is no way to reference 82 | resources created by different CloudFormation templates. 83 | 84 | Stacks aims to solve this problem, by allowing you to logically separate out 85 | AWS resources. As your infrastructure grows, it makes sense to have your VPC 86 | defined in a separate template from S3 buckets or RDS instances. 87 | 88 | First of all, we need to create a local directory structure, it will help keep 89 | all the files you create in this guide organized: 90 | 91 | .. code-block:: shell 92 | 93 | $ mkdir -p step-by-step/{templates/snippets,config.d} 94 | $ cd step-by-step 95 | 96 | 97 | .. _create-properties-file: 98 | 99 | Create a properties file 100 | ^^^^^^^^^^^^^^^^^^^^^^^^ 101 | If you remember from :doc:`/quickstarts/getting_started` guide, we used command 102 | line arguments to specify template properties. In this guide we are going to 103 | create properties configuration file ``config.yaml``. A properties file allows 104 | us to set key/value pairs and reference values from within templates. 105 | 106 | .. code-block:: shell 107 | 108 | $ vim config.yaml 109 | 110 | .. code-block:: yaml 111 | 112 | --- 113 | vpc_name: MyVPC 114 | vpc_cidr: 10.10.0.0/16 115 | vpc_subnets: 116 | - 10.10.0.0/24 117 | - 10.10.1.0/24 118 | - 10.10.2.0/24 119 | 120 | ami_name: CoreOS-beta-877.1.0-hvm 121 | instance_type: t3.micro 122 | 123 | 124 | Define a VPC template 125 | ^^^^^^^^^^^^^^^^^^^^^ 126 | From this point, we assume that you are familiar with CloudFormation templates. 127 | Let's jump right in and start by creating a VPC template. 128 | 129 | .. code-block:: shell 130 | 131 | $ vim templates/vpc.yaml 132 | 133 | .. code-block:: jinja 134 | 135 | --- 136 | name: {{ env }}-vpc 137 | disable_rollback: true 138 | tags: 139 | - key: Project 140 | value: step-by-step 141 | 142 | --- 143 | AWSTemplateFormatVersion: '2010-09-09' 144 | Description: VPC stack in {{ env }} environment 145 | Resources: 146 | VPC: 147 | Type: AWS::EC2::VPC 148 | Properties: 149 | CidrBlock: {{ vpc_cidr }} 150 | EnableDnsSupport: true 151 | EnableDnsHostnames: true 152 | Tags: 153 | - Key: Name 154 | Value: {{ vpc_name }} 155 | - Key: Env 156 | Value: {{ env }} 157 | 158 | DefaultSG: 159 | Type: AWS::EC2::SecurityGroup 160 | Properties: 161 | VpcId: 162 | Ref: VPC 163 | GroupDescription: {{ env }}-vpc Default SG 164 | SecurityGroupEgress: 165 | - IpProtocol: -1 166 | CidrIp: 0.0.0.0/0 167 | FromPort: -1 168 | ToPort: -1 169 | Tags: 170 | - Key: Name 171 | Value: {{ env }}-default 172 | - Key: Env 173 | Value: {{ env }} 174 | 175 | # Allow traffic within subnets 176 | AllTrafficSGIn: 177 | Type: AWS::EC2::SecurityGroupIngress 178 | Properties: 179 | GroupId: 180 | Ref: DefaultSG 181 | IpProtocol: -1 182 | SourceSecurityGroupId: 183 | Ref: DefaultSG 184 | FromPort: -1 185 | ToPort: -1 186 | 187 | # Create a subnet in each AZ 188 | {% for n in vpc_subnets %} 189 | Subnet{{ loop.index0 }}: 190 | Type: AWS::EC2::Subnet 191 | Properties: 192 | AvailabilityZone: 193 | Fn::Select: 194 | - '{{ loop.index0 }}' 195 | - Fn::GetAZs: '' 196 | VpcId: 197 | Ref: VPC 198 | CidrBlock: {{ n }} 199 | Tags: 200 | - Key: Name 201 | Value: {{ env }}-az{{ loop.index0 }} 202 | - Key: Env 203 | Value: {{ env }} 204 | 205 | SubnetRouteTableAssociation{{ loop.index0 }}: 206 | Type: AWS::EC2::SubnetRouteTableAssociation 207 | Properties: 208 | SubnetId: 209 | Ref: Subnet{{ loop.index0 }} 210 | RouteTableId: 211 | Ref: RouteTable 212 | {%- endfor %} 213 | 214 | InternetGateway: 215 | Type: AWS::EC2::InternetGateway 216 | Properties: 217 | Tags: 218 | - Key: Name 219 | Value: {{ env }}-igw 220 | - Key: Env 221 | Value: {{ env }} 222 | 223 | AttachGateway: 224 | Type: AWS::EC2::VPCGatewayAttachment 225 | Properties: 226 | VpcId: 227 | Ref: VPC 228 | InternetGatewayId: 229 | Ref: InternetGateway 230 | 231 | RouteTable: 232 | Type: AWS::EC2::RouteTable 233 | Properties: 234 | VpcId: 235 | Ref: VPC 236 | Tags: 237 | - Key: Name 238 | Value: {{ env }}-default-routetable 239 | - Key: Env 240 | Value: {{ env }} 241 | 242 | Route: 243 | Type: AWS::EC2::Route 244 | DependsOn: AttachGateway 245 | Properties: 246 | RouteTableId: 247 | Ref: RouteTable 248 | DestinationCidrBlock: 0.0.0.0/0 249 | GatewayId: 250 | Ref: InternetGateway 251 | 252 | There are two YAML documents in the above template. First one is a stack metadata 253 | document and the second one is where we define AWS resources. Both documents 254 | can be templated using Jinja2 syntax. In fact, the metadata document says that 255 | a stack name starts with an environment name, which makes it easy to reuse this 256 | template for different environments. 257 | 258 | We defined the following AWS resources in the VPC template: 259 | 260 | * **VPC** 261 | 262 | ``vpc_cidr`` and ``vpc_name`` properties are being referenced from the 263 | properties file. Yes, we could hard-code them into the template, but by not 264 | doing so, we made our template more flexible. 265 | 266 | * **DefaultSG** 267 | 268 | A default security group with a single rule allowing all egress traffic. 269 | 270 | * **AllTrafficSGIn** 271 | 272 | Adds an ingress rule to ``DefaultSG`` to allow all traffic between instances 273 | with the same security group. 274 | 275 | * **SubnetN** 276 | 277 | We iterate over ``vpc_subnets`` and create a subnet in each availability 278 | zone. 279 | 280 | The rest should be self-explanatory. 281 | 282 | Now that we have our VPC template defined, it's time to create another template 283 | to define our auto scaling group. 284 | 285 | 286 | Define an AutoScaling Group Template 287 | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ 288 | 289 | .. code-block:: shell 290 | 291 | $ vim templates/coreos-asg.yaml 292 | 293 | .. code-block:: jinja 294 | 295 | --- 296 | name: {{ env }}-coreos-asg 297 | tags: 298 | - key: Project 299 | value: step-by-step 300 | 301 | --- 302 | AWSTemplateFormatVersion: '2010-09-09' 303 | Description: CoreOS ASG in {{ env }} environment 304 | Resources: 305 | ASG: 306 | Type: AWS::AutoScaling::AutoScalingGroup 307 | Properties: 308 | AvailabilityZones: 309 | {% for n in range(vpc_subnets|count) %} 310 | - Fn::Select: 311 | - '{{ n }}' 312 | - Fn::GetAZs: '' 313 | {% endfor -%} 314 | LaunchConfigurationName: 315 | Ref: LaunchConfiguration 316 | VPCZoneIdentifier: 317 | {% for n in range(vpc_subnets|count) %} 318 | - {{ get_stack_resource(cf_conn, env + '-vpc', 'Subnet' + n|string) }} 319 | {% endfor -%} 320 | TerminationPolicies: 321 | - 'OldestInstance' 322 | - 'Default' 323 | MaxSize: 1 324 | MinSize: 1 325 | Tags: 326 | - Key: Name 327 | Value: {{ env }}-coreos 328 | PropagateAtLaunch: true 329 | - Key: Env 330 | Value: {{ env }} 331 | PropagateAtLaunch: true 332 | UpdatePolicy: 333 | AutoScalingRollingUpdate: 334 | PauseTime: PT1S 335 | 336 | LaunchConfiguration: 337 | Type: AWS::AutoScaling::LaunchConfiguration 338 | Properties: 339 | AssociatePublicIpAddress: true 340 | ImageId: {{ get_ami_id(ec2_conn, ami_name) }} 341 | InstanceMonitoring: false 342 | InstanceType: {{ instance_type }} 343 | SecurityGroups: 344 | - {{ get_stack_resource(cf_conn, env + '-vpc', 'DefaultSG') }} 345 | 346 | 347 | This template demonstrates the power of cross-stack referencing. 348 | 349 | In our VPC stack we created 3 subnets, one in each availability zone. An 350 | autoscaling group can dynamically spread instances across availability zones, 351 | but for that to work, it needs to be given a list of availability zones and a 352 | list of VPC subnets corresponding to each particular availability zone. Using 353 | stacks we can simply iterate over a number of ``vpc_subnets`` and get each 354 | subnet's physical id using ``get_stack_resource()`` helper function. 355 | 356 | If you look at ``LaunchConfiguration`` resource definition, you notice that 357 | instead of specifying an AMI id for the CoreOS instances, we used 358 | ``get_ami_id()`` function to get the correct AMI id, by providing just 359 | ``ami_name``, which we set in our properties file, see 360 | :ref:`create-properties-file`. 361 | 362 | This allows us to reuse this template in different regions, because otherwise 363 | you would have to create some sort of AMI to region mapping, because AMIs are 364 | region specific. 365 | 366 | Now that we have both templates defined, let's deploy them. 367 | 368 | 369 | Deploy templates 370 | ---------------- 371 | We are going to be deploying stacks using ``project1`` profile. If you prefer 372 | using environment variables for configuring AWS authentication, then you 373 | don't have to specify the profile. 374 | 375 | Sample templates requires you to provide an environment name, for this guide, let's call 376 | it ``learning``. Properties file is environment-aware and can have a common set 377 | of properties as well as per-environment ones. See :doc:`/fundamentals/configuration`. 378 | 379 | There are two ways to set an environment: 380 | 381 | * ``STACKS_ENV`` environment variable 382 | * ``--env`` or ``-e`` command line argument to the subcommands 383 | 384 | Create the VPC stack: 385 | 386 | .. code-block:: shell 387 | 388 | $ stacks -p project1 create -e learning -t templates/vpc.yaml -f 389 | 390 | 391 | Wait for the VPC stack to finish creating, because we know that the coreos-asg 392 | stack depends on the VPC and other infrastructure resources being there. 393 | 394 | .. code-block:: shell 395 | 396 | $ stacks -p project1 create -e learning -t templates/coreos-asg.yaml -f 397 | 398 | 399 | Check the status: 400 | 401 | .. code-block:: shell 402 | 403 | $ stacks -p project1 list 404 | learning-coreos-asg CREATE_COMPLETE 405 | learning-vpc CREATE_COMPLETE 406 | 407 | 408 | Deployment update 409 | ----------------- 410 | After you have successfully launched your stacks, you might want to update or change the deployment 411 | as your application evolves. Stacks allows you to change a stack by: 412 | 413 | * Adding or removing resources to the deployment 414 | * Updating some properties of existing resources in your stack 415 | 416 | Remember that stacks uses CloudFormation, which uses the underlying APIs of AWS 417 | services to manage resources. If an API doesn't support a certain action, 418 | stacks cannot perform that action either. For example, CloudFormation can 419 | only update an existing resource if there is an update method in the 420 | corresponding API. Some resources have to be replaced instead. 421 | 422 | Let's change the version CoreOS in ``config.yaml``, so that it now looks like 423 | this: 424 | 425 | .. code-block:: yaml 426 | :emphasize-lines: 9 427 | 428 | --- 429 | vpc_name: MyVPC 430 | vpc_cidr: 10.10.0.0/16 431 | vpc_subnets: 432 | - 10.10.0.0/24 433 | - 10.10.1.0/24 434 | - 10.10.2.0/24 435 | 436 | ami_name: CoreOS-alpha-899.1.0-hvm 437 | instance_type: t3.micro 438 | 439 | Since ``ami_name`` is not used anywhere in VPC stack, we only need to update 440 | the autoscaling group stack: 441 | 442 | .. code-block:: shell 443 | 444 | $ stacks -p project1 update -e learning -t templates/coreos-asg.yaml 445 | 446 | This time we didn't set ``-f`` command line flag to follow events, which means 447 | that stacks fires an API call and exits. But luckily, there is a way to get 448 | events from an existing stack: 449 | 450 | .. code-block:: shell 451 | :emphasize-lines: 2 452 | 453 | $ stacks -p project1 list 454 | learning-coreos-asg UPDATE_IN_PROGRESS 455 | learning-vpc CREATE_COMPLETE 456 | 457 | $ stacks -p project1 events -f learning-coreos-asg 458 | 459 | Wait until the update has finished, and check the status: 460 | 461 | .. code-block:: shell 462 | 463 | $ stacks --profile project1 list 464 | learning-coreos-asg UPDATE_COMPLETE 465 | learning-vpc CREATE_COMPLETE 466 | 467 | 468 | Clean up 469 | -------- 470 | AWS resources incur charges, so you should delete this deployment. 471 | Deleting this deployment takes care of deleting all the resources 472 | created by both templates. 473 | 474 | To delete this deployment: 475 | 476 | .. code-block:: shell 477 | 478 | $ stacks -p project1 delete learning-coreos-asg -f --yes 479 | $ stacks -p project1 delete learning-vpc -f --yes 480 | 481 | -------------------------------------------------------------------------------- /docs/source/index.rst: -------------------------------------------------------------------------------- 1 | stacks 2 | ====== 3 | Stacks is CloudFormation management tool that allows you to define AWS 4 | resources configuration in YAML with `Jinja2 `_ 5 | templating. 6 | 7 | Using stacks, you create flexible and declarative templates that deploy various 8 | AWS resources. 9 | 10 | Use stacks to define and configure the infrastructure for your services, to 11 | create repeatable deployments that you can reuse, to create different 12 | deployment environments. For example, you can use stacks to deploy 13 | production, staging, and development environments for the same service, and 14 | take advantage of stacks's templating syntax so you can selectively roll out or 15 | test new features. 16 | 17 | Stacks does not make any assumptions how you should manage your infrastructure. 18 | If you use CloudFormation JSON templates already, you can just convert them to 19 | YAML and start using stacks and grow from there. 20 | 21 | Features 22 | -------- 23 | 24 | * Templates written in YAML 25 | * For loops, conditionals and more with the power of Jinja2 26 | * Cross-stack resource referencing 27 | * Reusable snippets support 28 | * Multiple environments 29 | * Flexible configuration 30 | * Stack events streaming 31 | 32 | 33 | Documentation 34 | ------------- 35 | 36 | Quickstarts 37 | ^^^^^^^^^^^ 38 | 39 | .. toctree:: 40 | :maxdepth: 2 41 | 42 | quickstarts/getting_started 43 | 44 | 45 | Guides 46 | ^^^^^^ 47 | 48 | .. toctree:: 49 | :maxdepth: 2 50 | 51 | guides/step_by_step_walkthrough 52 | 53 | 54 | Fundamentals 55 | ^^^^^^^^^^^^ 56 | 57 | .. toctree:: 58 | :maxdepth: 2 59 | 60 | fundamentals/configuration 61 | fundamentals/templates 62 | 63 | -------------------------------------------------------------------------------- /docs/source/quickstarts/getting_started.rst: -------------------------------------------------------------------------------- 1 | Getting Sarted Guide 2 | ==================== 3 | Use this Getting Started Guide to get you up and running as quickly as possible 4 | with your first deployment. 5 | 6 | The goal of this guide is to create a given number of S3 buckets. The 7 | quickstart assumes you're familiar with YAML syntax, comfortable with Linux 8 | command line and have a working Python 3.x installation. 9 | 10 | Create an AWS account 11 | ---------------------- 12 | .. warning:: 13 | We are not responsible for any charges that may incur. However, this guide 14 | does not create AWS resources which are outside of free tier. 15 | 16 | If you don't already have an AWS account, you can sign up for a free trial: 17 | 18 | 1. Go to https://aws.amazon.com/free 19 | 2. Follow the instructions to sign up for the free trial. 20 | 3. Go to IAM and create a user with ``AdministratorAccess`` policy 21 | attached and generate access keys. 22 | 23 | 24 | Install stacks 25 | -------------- 26 | .. parsed-literal:: 27 | 28 | $ pip install cfstacks 29 | 30 | Export aws access keys and default region to shell environment, replacing 31 | ``AWS_ACCESS_KEY_ID`` and ``AWS_SECRET_ACCESS_KEY`` with your own keys. 32 | 33 | .. code-block:: shell 34 | 35 | export AWS_ACCESS_KEY_ID=AKIAIERE3KJY2EJB8U2Q 36 | export AWS_SECRET_ACCESS_KEY=BAA3Vpu3X2X493WG63sVRDovoXB8lxgKi3qU9YYl 37 | export AWS_DEFAULT_REGION=us-east-1 38 | 39 | 40 | Define a template 41 | ----------------- 42 | A template describes all the resources for a single CloudFormation stack. A 43 | template is written in YAML syntax and is made of two documents: stack metadata 44 | document, followed by resources document. 45 | 46 | * Metadata document defines stack-specific properties, like stack name, 47 | tags, rollback policy, etc. 48 | * Resources document defines AWS resources and it is a standard CloudFormation 49 | template, only in YAML. 50 | 51 | Create a template file named ``buckets.yaml``: 52 | 53 | .. code-block:: jinja 54 | 55 | --- 56 | name: s3-buckets 57 | 58 | --- 59 | AWSTemplateFormatVersion: '2010-09-09' 60 | Description: S3 buckets 61 | Resources: 62 | {% for n in range(buckets_count|int) %} 63 | S3Bucket{{ loop.index0 }}: 64 | Type: AWS::S3::Bucket 65 | Properties: 66 | BucketName: {{ env }}-{{ bucket_name_prefix }}{{ loop.index0 }}-{{ region }} 67 | {% endfor -%} 68 | 69 | In summary, the template describes, that ``s3-buckets`` stack contains 70 | a number of ``AWS::S3::Bucket`` type resources. The number of buckets is 71 | defined via ``buckets_count`` property. 72 | 73 | 74 | Deploy your template 75 | -------------------- 76 | 77 | Create our first stack: 78 | 79 | .. code-block:: shell 80 | 81 | $ stacks create --template buckets.yaml --env dev 82 | Required properties not set: buckets_count,bucket_name_prefix 83 | 84 | We get an error about missing properties. To fix that, we must specify the 85 | missing properties. Add the ``--follow`` flag, to follow stack events: 86 | 87 | .. code-block:: shell 88 | 89 | $ stacks create --template buckets.yaml --env dev --follow \ 90 | --property bucket_name_prefix=my-awesome-bucket \ 91 | --property buckets_count=3 92 | 2018-09-10 18:30:08.428000+01:00 CREATE_IN_PROGRESS AWS::CloudFormation::Stack s3-buckets User Initiated 93 | 2018-09-10 18:30:10.883000+01:00 CREATE_IN_PROGRESS AWS::S3::Bucket S3Bucket0 94 | 2018-09-10 18:30:10.963000+01:00 CREATE_IN_PROGRESS AWS::S3::Bucket S3Bucket2 95 | 2018-09-10 18:30:10.966000+01:00 CREATE_IN_PROGRESS AWS::S3::Bucket S3Bucket1 96 | 2018-09-10 18:30:11.732000+01:00 CREATE_IN_PROGRESS AWS::S3::Bucket S3Bucket2 Resource creation Initiated 97 | 2018-09-10 18:30:11.837000+01:00 CREATE_IN_PROGRESS AWS::S3::Bucket S3Bucket0 Resource creation Initiated 98 | 2018-09-10 18:30:11.923000+01:00 CREATE_IN_PROGRESS AWS::S3::Bucket S3Bucket1 Resource creation Initiated 99 | 2018-09-10 18:30:32.608000+01:00 CREATE_COMPLETE AWS::S3::Bucket S3Bucket2 100 | 2018-09-10 18:30:32.615000+01:00 CREATE_COMPLETE AWS::S3::Bucket S3Bucket0 101 | 2018-09-10 18:30:32.782000+01:00 CREATE_COMPLETE AWS::S3::Bucket S3Bucket1 102 | 2018-09-10 18:30:34.229000+01:00 CREATE_COMPLETE AWS::CloudFormation::Stack s3-buckets 103 | 104 | Use of ``--env`` is optional, however it's a good practice to separate resources by environment. 105 | 106 | See your new deployment 107 | ----------------------- 108 | 109 | See the status of your new stack by running: 110 | 111 | .. code-block:: shell 112 | 113 | $ stacks list 114 | s3-buckets CREATE_COMPLETE 115 | 116 | If you want to see what resources have been created by the stack, you can do that by running: 117 | 118 | .. code-block:: shell 119 | 120 | $ stacks resources s3-buckets 121 | S3Bucket0 dev-my-awesome-bucket0-us-east-1 AWS::S3::Bucket CREATE_COMPLETE 122 | S3Bucket1 dev-my-awesome-bucket1-us-east-1 AWS::S3::Bucket CREATE_COMPLETE 123 | S3Bucket2 dev-my-awesome-bucket2-us-east-1 AWS::S3::Bucket CREATE_COMPLETE 124 | 125 | 126 | Clean up 127 | -------- 128 | Once you are done with your deployment, make sure to delete it to avoid being 129 | charged for resources by AWS. 130 | 131 | To delete the deployment, run: 132 | 133 | .. code-block:: shell 134 | 135 | $ stacks delete s3-buckets -y 136 | 137 | 138 | Next steps 139 | ---------- 140 | Now that you have an idea of how stacks enhances CloudFormation, we recommend 141 | going through :doc:`/guides/step_by_step_walkthrough` for more comprehensive 142 | walkthrough. 143 | -------------------------------------------------------------------------------- /docs/source/requirements-docs.txt: -------------------------------------------------------------------------------- 1 | sphinx_bootstrap_theme 2 | -------------------------------------------------------------------------------- /examples/buckets.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | name: s3-buckets 3 | 4 | --- 5 | AWSTemplateFormatVersion: '2010-09-09' 6 | Description: S3 buckets 7 | Resources: 8 | {% for n in range(buckets_count|int) %} 9 | S3Bucket{{ loop.index0 }}: 10 | Type: AWS::S3::Bucket 11 | Properties: 12 | BucketName: {{ env }}-{{ bucket_name_prefix }}{{ loop.index0 }}-{{ region }} 13 | {% endfor -%} 14 | -------------------------------------------------------------------------------- /examples/config.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | common: 3 | ssh_key_name: vaijab 4 | coreos_ami_name: CoreOS-beta-695.0.0-hvm 5 | coreos_min_instances: 3 6 | 7 | prod: 8 | vpc_name: prod-vpc 9 | infra_stack_name: prod-infra 10 | dev: 11 | vpc_name: dev-vpc 12 | infra_stack_name: dev-infra 13 | etcd_discovery_url: 14 | -------------------------------------------------------------------------------- /examples/coreos.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | metadata: 3 | name: {{ env }}-coreos 4 | tags: 5 | - key: Env 6 | value: {{ env }} 7 | - key: Foo 8 | value: {{ bar }} 9 | --- 10 | AWSTemplateFormatVersion: '2010-09-09' 11 | Description: CoreOS Cluster Stack in {{ env }} environment 12 | Resources: 13 | InstanceProfile: 14 | Type: AWS::IAM::InstanceProfile 15 | Properties: 16 | Roles: 17 | - {{ get_stack_output(cf_conn, infra_stack_name, 'CoreOSRole') }} 18 | Path: / 19 | 20 | RolePolicies: 21 | Type: AWS::IAM::Policy 22 | Properties: 23 | PolicyName: {{ env }}-coreos 24 | Roles: 25 | - {{ get_stack_output(cf_conn, infra_stack_name, 'CoreOSRole') }} 26 | PolicyDocument: 27 | Statement: 28 | - Resource: '*' 29 | Effect: Allow 30 | Action: 31 | - ec2:DescribeInstances 32 | - ec2:DescribeTags 33 | - elasticloadbalancing:DescribeLoadBalancers 34 | - elasticloadbalancing:RegisterInstancesWithLoadBalancer 35 | - elasticloadbalancing:DeregisterInstancesFromLoadBalancer 36 | - Resource: 'arn:aws:s3:::example-bucket/*' 37 | Effect: Allow 38 | Action: 39 | - 's3:Put*' 40 | - 's3:Get*' 41 | - Resource: 'arn:aws:s3:::example-bucket' 42 | Effect: Allow 43 | Action: 44 | - 's3:*' 45 | 46 | CoreOSScalingGroup: 47 | Type: AWS::AutoScaling::AutoScalingGroup 48 | Properties: 49 | AvailabilityZones: 50 | - Fn::Select: 51 | - '0' 52 | - Fn::GetAZs: '' 53 | - Fn::Select: 54 | - '1' 55 | - Fn::GetAZs: '' 56 | - Fn::Select: 57 | - '2' 58 | - Fn::GetAZs: '' 59 | LaunchConfigurationName: {Ref: CoreOSLaunchConfig} 60 | VPCZoneIdentifier: 61 | - {{ get_stack_output(cf_conn, infra_stack_name, 'SubnetAZ0') }} 62 | - {{ get_stack_output(cf_conn, infra_stack_name, 'SubnetAZ1') }} 63 | - {{ get_stack_output(cf_conn, infra_stack_name, 'SubnetAZ2') }} 64 | TerminationPolicies: 65 | - 'OldestInstance' 66 | - 'Default' 67 | MaxSize: 100 68 | MinSize: {{ coreos_min_instances }} 69 | Tags: 70 | - Key: Name 71 | Value: {{ env }}-coreos 72 | PropagateAtLaunch: true 73 | - Key: Role 74 | Value: coreos 75 | PropagateAtLaunch: true 76 | - Key: Env 77 | Value: {{ env }} 78 | PropagateAtLaunch: true 79 | UpdatePolicy: 80 | AutoScalingRollingUpdate: 81 | MinInstancesInService: {{ coreos_min_instances }} 82 | PauseTime: PT10M 83 | 84 | CoreOSLaunchConfig: 85 | Type: AWS::AutoScaling::LaunchConfiguration 86 | Properties: 87 | AssociatePublicIpAddress: true 88 | IamInstanceProfile: {Ref: InstanceProfile} 89 | ImageId: {{ get_ami_id(ec2_conn, coreos_ami_name) }} 90 | InstanceMonitoring: false 91 | InstanceType: {% if env == 'prod' %}m4.large{% else %}t2.small{% endif %} 92 | KeyName: {{ ssh_key_name }} 93 | SecurityGroups: 94 | - {{ get_stack_output(cf_conn, infra_stack_name, 'DefaultSG') }} 95 | BlockDeviceMappings: 96 | - DeviceName: "/dev/xvda" 97 | Ebs: 98 | VolumeSize: "80" 99 | DeleteOnTermination: true 100 | VolumeType: "gp2" 101 | UserData: 102 | 'Fn::Base64': | 103 | #cloud-config 104 | 105 | coreos: 106 | etcd: 107 | discovery: {{ etcd_discovery_url }} 108 | addr: $private_ipv4:4001 109 | peer-addr: $private_ipv4:7001 110 | fleet: 111 | metadata: region={{ region }},public_ip=$public_ipv4,env={{ env }} 112 | update: 113 | reboot-strategy: 'best-effort' 114 | units: 115 | - name: etcd.service 116 | command: start 117 | - name: fleet.service 118 | command: start 119 | -------------------------------------------------------------------------------- /examples/infra.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | metadata: 3 | name: {{ env }}-infra 4 | tags: 5 | - key: Env 6 | value: {{ env }} 7 | - key: Foo 8 | value: {{ bar }} 9 | --- 10 | AWSTemplateFormatVersion: '2010-09-09' 11 | Description: Infrastructure stack 12 | Parameters: 13 | SubnetAZ0: 14 | Type: String 15 | Default: 10.50.0.0/24 16 | Description: First AZ subnet 17 | ConstraintDescription: must be a valid subnet string. 18 | SubnetAZ1: 19 | Type: String 20 | Default: 10.50.1.0/24 21 | Description: Second AZ subnet 22 | ConstraintDescription: must be a valid subnet string. 23 | SubnetAZ2: 24 | Type: String 25 | Default: 10.50.2.0/24 26 | Description: Third AZ subnet 27 | ConstraintDescription: must be a valid subnet string. 28 | Resources: 29 | VPC: 30 | Type: AWS::EC2::VPC 31 | Properties: 32 | CidrBlock: 10.50.0.0/16 33 | EnableDnsSupport: true 34 | EnableDnsHostnames: true 35 | Tags: 36 | - Key: Name 37 | Value: {{ vpc_name }} 38 | - Key: Env 39 | Value: {{ env }} 40 | CoreOSRole: 41 | Type: AWS::IAM::Role 42 | Properties: 43 | AssumeRolePolicyDocument: 44 | Statement: 45 | - Principal: 46 | Service: 47 | - ec2.amazonaws.com 48 | Effect: Allow 49 | Action: 50 | - sts:AssumeRole 51 | Path: / 52 | DefaultSG: 53 | Type: AWS::EC2::SecurityGroup 54 | Properties: 55 | VpcId: {Ref: VPC} 56 | GroupDescription: {{ env }}-vpc Default SG 57 | SecurityGroupEgress: 58 | - IpProtocol: -1 59 | CidrIp: 0.0.0.0/0 60 | FromPort: -1 61 | ToPort: -1 62 | SecurityGroupIngress: 63 | - IpProtocol: tcp 64 | CidrIp: 0.0.0.0/0 65 | FromPort: 22 66 | ToPort: 22 67 | Tags: 68 | - Key: Name 69 | Value: {{ env }}-default 70 | - Key: Env 71 | Value: {{ env }} 72 | AllTrafficSGIn: 73 | Type: AWS::EC2::SecurityGroupIngress 74 | Properties: 75 | GroupId: {Ref: DefaultSG} 76 | IpProtocol: -1 77 | SourceSecurityGroupId: {Ref: DefaultSG} 78 | FromPort: -1 79 | ToPort: -1 80 | {% for n in [0, 1, 2] %} 81 | SubnetAZ{{ n }}Resource: 82 | Type: AWS::EC2::Subnet 83 | Properties: 84 | AvailabilityZone: 85 | Fn::Select: 86 | - '{{ n }}' 87 | - Fn::GetAZs: '' 88 | VpcId: 89 | Ref: VPC 90 | CidrBlock: {Ref: SubnetAZ{{ n }}} 91 | Tags: 92 | - Key: Name 93 | Value: {{ env }}-primary 94 | - Key: Env 95 | Value: {{ env }} 96 | SubnetRouteTableAssociationAZ{{ n }}: 97 | Type: AWS::EC2::SubnetRouteTableAssociation 98 | Properties: 99 | SubnetId: 100 | Ref: SubnetAZ{{ n }}Resource 101 | RouteTableId: 102 | Ref: RouteTable 103 | {%- endfor %} 104 | InternetGateway: 105 | Type: AWS::EC2::InternetGateway 106 | Properties: 107 | Tags: 108 | - Key: Name 109 | Value: {{ env }}-igw 110 | - Key: Env 111 | Value: {{ env }} 112 | AttachGateway: 113 | Type: AWS::EC2::VPCGatewayAttachment 114 | Properties: 115 | VpcId: 116 | Ref: VPC 117 | InternetGatewayId: 118 | Ref: InternetGateway 119 | RouteTable: 120 | Type: AWS::EC2::RouteTable 121 | Properties: 122 | VpcId: 123 | Ref: VPC 124 | Tags: 125 | - Key: Name 126 | Value: default 127 | - Key: Env 128 | Value: {{ env }} 129 | Route: 130 | Type: AWS::EC2::Route 131 | DependsOn: AttachGateway 132 | Properties: 133 | RouteTableId: 134 | Ref: RouteTable 135 | DestinationCidrBlock: 0.0.0.0/0 136 | GatewayId: 137 | Ref: InternetGateway 138 | Outputs: 139 | VpcId: 140 | Value: {Ref: VPC} 141 | CoreOSRole: 142 | Value: {Ref: CoreOSRole} 143 | DefaultSG: 144 | Value: {Ref: DefaultSG} 145 | SubnetAZ0: 146 | Value: {Ref: SubnetAZ0Resource} 147 | SubnetAZ1: 148 | Value: {Ref: SubnetAZ1Resource} 149 | SubnetAZ2: 150 | Value: {Ref: SubnetAZ2Resource} 151 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | configargparse>=0.9.3 2 | PyYAML>=5.3.1 3 | Jinja2>=2.7.3 4 | boto>=2.40.0 5 | docker==6.1.3 6 | tabulate>=0.7.5 7 | setuptools==70.0.0 8 | moto[all]==4.1.11 9 | pytz 10 | tzlocal 11 | pytest 12 | -------------------------------------------------------------------------------- /setup.cfg: -------------------------------------------------------------------------------- 1 | [tool:pytest] 2 | pep8ignore = E501 3 | -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | from setuptools import setup, find_packages 4 | 5 | base_dir = os.path.dirname(__file__) 6 | 7 | about = {} 8 | with open(os.path.join(base_dir, 'stacks', '__about__.py')) as f: 9 | exec(f.read(), about) 10 | 11 | install_requires = [ 12 | 'configargparse>=0.9.3', 13 | 'PyYAML>=4.2b1', 14 | 'Jinja2>=2.7.3', 15 | 'boto>=2.40.0', 16 | 'docker==6.1.3', 17 | 'openapi-spec-validator==0.5.7', 18 | 'tabulate>=0.7.5', 19 | 'setuptools', 20 | 'pytz', 21 | 'tzlocal', 22 | ] 23 | 24 | tests_require = [ 25 | 'moto', 26 | ] 27 | 28 | config = { 29 | 'name': 'cfstacks', 30 | 'description': 'Manage CloudFormation sanely with templates written in YAML', 31 | 'url': about['__url__'], 32 | 'download_url': about['__url__'], 33 | 'version': about['__version__'], 34 | 'maintainer': about['__maintainer__'], 35 | 'maintainer_email': about['__maintainer_email__'], 36 | 'packages': find_packages(), 37 | 'install_requires': install_requires, 38 | 'tests_require': tests_require, 39 | 'entry_points': { 40 | 'console_scripts': [ 41 | 'stacks = stacks.__main__:main', 42 | ], 43 | }, 44 | 'python_requires': '>=3', 45 | } 46 | 47 | setup(**config) 48 | -------------------------------------------------------------------------------- /stacks/__about__.py: -------------------------------------------------------------------------------- 1 | __version__ = '0.4.7' 2 | __licence__ = 'MIT' 3 | __url__ = 'https://github.com/cfstacks/stacks' 4 | __maintainer__ = 'Vaidas Jablonskis' 5 | __maintainer_email__ = 'jablonskis@gmail.com' 6 | -------------------------------------------------------------------------------- /stacks/__init__.py: -------------------------------------------------------------------------------- 1 | """stacks - Manage Cloud Formation sanely with templates written in YAML""" 2 | from stacks import __about__ 3 | 4 | __version__ = __about__.__version__ 5 | __licence__ = __about__.__licence__ 6 | -------------------------------------------------------------------------------- /stacks/__main__.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | from .main import main 4 | 5 | if __name__ == '__main__': 6 | main() 7 | -------------------------------------------------------------------------------- /stacks/aws.py: -------------------------------------------------------------------------------- 1 | import time 2 | 3 | from boto.exception import BotoServerError 4 | 5 | 6 | def throttling_retry(func): 7 | """Retry when AWS is throttling API calls""" 8 | 9 | def retry_call(*args, **kwargs): 10 | retries = 0 11 | while True: 12 | try: 13 | retval = func(*args) 14 | return retval 15 | except BotoServerError as err: 16 | if (err.code == 'Throttling' or err.code == 'RequestLimitExceeded') and retries <= 3: 17 | sleep = 3 * (2 ** retries) 18 | print('Being throttled. Retrying after {} seconds..'.format(sleep)) 19 | time.sleep(sleep) 20 | retries += 1 21 | else: 22 | raise err 23 | 24 | return retry_call 25 | 26 | 27 | @throttling_retry 28 | def get_ami_id(conn, name): 29 | """Return the first AMI ID given its name""" 30 | images = conn.get_all_images(filters={'name': name}) 31 | conn.close() 32 | if len(images) != 0: 33 | return images[0].id 34 | else: 35 | raise RuntimeError('{} AMI not found'.format(name)) 36 | 37 | 38 | @throttling_retry 39 | def get_zone_id(conn, name): 40 | """Return the first Route53 zone ID given its name""" 41 | zone = conn.get_zone(name) 42 | conn.close() 43 | if zone: 44 | return zone.id 45 | else: 46 | raise RuntimeError('{} zone not found'.format(name)) 47 | 48 | 49 | @throttling_retry 50 | def get_vpc_id(conn, name): 51 | """Return the first VPC ID given its name and region""" 52 | vpcs = conn.get_all_vpcs(filters={'tag:Name': name}) 53 | conn.close() 54 | if len(vpcs) == 1: 55 | return vpcs[0].id 56 | else: 57 | raise RuntimeError('{} VPC not found'.format(name)) 58 | 59 | 60 | @throttling_retry 61 | def get_stack_output(conn, name, key): 62 | """Return stack output key value""" 63 | result = conn.describe_stacks(name) 64 | if len(result) != 1: 65 | raise RuntimeError('{} stack not found'.format(name)) 66 | outputs = [s.outputs for s in result][0] 67 | for output in outputs: 68 | if output.key == key: 69 | return output.value 70 | raise RuntimeError('{} output not found'.format(key)) 71 | 72 | 73 | @throttling_retry 74 | def get_stack_tag(conn, name, tag): 75 | """Return stack tag""" 76 | result = conn.describe_stacks(name) 77 | if len(result) != 1: 78 | raise RuntimeError('{} stack not found'.format(name)) 79 | tags = [s.tags for s in result][0] 80 | return tags.get(tag, '') 81 | 82 | 83 | @throttling_retry 84 | def get_stack_resource(conn, stack_name, logical_id): 85 | """Return a physical_resource_id given its logical_id""" 86 | resources = conn.describe_stack_resources(stack_name_or_id=stack_name) 87 | for r in resources: 88 | # TODO: would be nice to check for resource_status 89 | if r.logical_resource_id == logical_id: 90 | return r.physical_resource_id 91 | return None 92 | 93 | 94 | @throttling_retry 95 | def get_stack_template(conn, stack_name): 96 | """Return a template body of live stack""" 97 | try: 98 | template = conn.get_template(stack_name) 99 | return template['GetTemplateResponse']['GetTemplateResult']['TemplateBody'], [] 100 | except BotoServerError as e: 101 | return None, [e.message] 102 | -------------------------------------------------------------------------------- /stacks/cf.py: -------------------------------------------------------------------------------- 1 | """ 2 | Cloudformation related functions 3 | """ 4 | import builtins 5 | import difflib 6 | import hashlib 7 | import json 8 | import sys 9 | import time 10 | from datetime import datetime 11 | from fnmatch import fnmatch 12 | from operator import attrgetter 13 | from os import path 14 | from typing import Mapping, Sequence, Set 15 | 16 | import boto 17 | import jinja2 18 | import pytz 19 | import tzlocal 20 | import yaml 21 | from boto.exception import BotoServerError 22 | from jinja2 import meta 23 | from tabulate import tabulate 24 | 25 | from stacks.aws import get_stack_tag, get_stack_template, throttling_retry 26 | from stacks.helpers import intrinsics_multi_constructor 27 | from stacks.states import (COMPLETE_STACK_STATES, FAILED_STACK_STATES, 28 | IN_PROGRESS_STACK_STATES, ROLLBACK_STACK_STATES) 29 | 30 | YES = ['y', 'Y', 'yes', 'YES', 'Yes'] 31 | 32 | 33 | def gen_template(tpl_file, config): 34 | """Return a tuple of json string template and options dict""" 35 | tpl_path, tpl_fname = path.split(tpl_file.name) 36 | env = _new_jinja_env(tpl_path) 37 | 38 | _check_missing_vars(env, tpl_file, config) 39 | 40 | tpl = env.get_template(tpl_fname) 41 | rendered = tpl.render(config) 42 | try: 43 | yaml.SafeLoader.add_multi_constructor("!", intrinsics_multi_constructor) 44 | docs = list(yaml.safe_load_all(rendered)) 45 | except yaml.parser.ParserError as err: 46 | print(err) 47 | sys.exit(1) 48 | 49 | if len(docs) == 2: 50 | tpl, metadata = docs[1], docs[0] 51 | else: 52 | tpl, metadata = docs[0], None 53 | 54 | errors = validate_template(tpl) 55 | return json.dumps(tpl, indent=2, sort_keys=True), metadata, errors 56 | 57 | 58 | def _check_missing_vars(env, tpl_file, config): 59 | """Check for missing variables in a template string""" 60 | tpl_str = tpl_file.read() 61 | ast = env.parse(tpl_str) 62 | required_properties = meta.find_undeclared_variables(ast) 63 | missing_properties = required_properties - config.keys() - set(dir(builtins)) 64 | 65 | if len(missing_properties) > 0: 66 | print('Required properties not set: {}'.format(','.join(missing_properties))) 67 | sys.exit(1) 68 | 69 | 70 | def _new_jinja_env(tpl_path): 71 | loader = jinja2.loaders.FileSystemLoader(tpl_path) 72 | env = jinja2.Environment(loader=loader) 73 | return env 74 | 75 | 76 | # TODO(vaijab): fix 'S3ResponseError: 301 Moved Permanently', this happens when 77 | # a connection to S3 is being made from a different region than the one a bucket 78 | # was created in. 79 | def upload_template(config, tpl, stack_name): 80 | """Upload a template to S3 bucket and returns S3 key url""" 81 | bn = config.get('templates_bucket_name', '{}-stacks-{}'.format(config['env'], config['region'])) 82 | 83 | try: 84 | b = config['s3_conn'].get_bucket(bn) 85 | except boto.exception.S3ResponseError as err: 86 | if err.code == 'NoSuchBucket': 87 | print('Bucket {} does not exist.'.format(bn)) 88 | else: 89 | print(err) 90 | sys.exit(1) 91 | 92 | h = _calc_md5(tpl) 93 | k = boto.s3.key.Key(b) 94 | k.key = '{}/{}/{}'.format(config['env'], stack_name, h) 95 | k.set_contents_from_string(tpl) 96 | url = k.generate_url(expires_in=30) 97 | return url 98 | 99 | 100 | def stack_resources(conn, stack_name, logical_resource_id=None): 101 | """List stack resources""" 102 | try: 103 | result = conn.describe_stack_resources(stack_name_or_id=stack_name, 104 | logical_resource_id=logical_resource_id) 105 | except BotoServerError as err: 106 | print(err.message) 107 | sys.exit(1) 108 | resources = [] 109 | if logical_resource_id: 110 | resources.append([r.physical_resource_id for r in result]) 111 | else: 112 | for r in result: 113 | columns = [ 114 | r.logical_resource_id, 115 | r.physical_resource_id, 116 | r.resource_type, 117 | r.resource_status, 118 | ] 119 | resources.append(columns) 120 | 121 | if len(result) >= 1: 122 | return tabulate(resources, tablefmt='plain') 123 | return None 124 | 125 | 126 | def stack_outputs(conn, stack_name, output_name): 127 | """List stacks outputs""" 128 | try: 129 | result = conn.describe_stacks(stack_name) 130 | except BotoServerError as err: 131 | print(err.message) 132 | sys.exit(1) 133 | 134 | outputs = [] 135 | outs = [s.outputs for s in result][0] 136 | for o in outs: 137 | if not output_name: 138 | columns = [o.key, o.value] 139 | outputs.append(columns) 140 | elif output_name and o.key == output_name: 141 | outputs.append([o.value]) 142 | 143 | if len(result) >= 1: 144 | return tabulate(outputs, tablefmt='plain') 145 | return None 146 | 147 | 148 | def list_stacks(conn, name_filter='*', verbose=False): 149 | """List active stacks""" 150 | states = FAILED_STACK_STATES + COMPLETE_STACK_STATES + IN_PROGRESS_STACK_STATES + ROLLBACK_STACK_STATES 151 | s = conn.list_stacks(states) 152 | 153 | stacks = [] 154 | for n in s: 155 | if name_filter and fnmatch(n.stack_name, name_filter): 156 | columns = [n.stack_name, n.stack_status] 157 | if verbose: 158 | env = get_stack_tag(conn, n.stack_name, 'Env') 159 | columns.append(env) 160 | columns.append(n.template_description) 161 | stacks.append(columns) 162 | 163 | if len(stacks) >= 1: 164 | return tabulate(stacks, tablefmt='plain') 165 | return None 166 | 167 | 168 | def create_stack(conn, stack_name, tpl_file, config, update=False, dry=False, create_on_update=False): 169 | """Create or update CloudFormation stack from a jinja2 template""" 170 | tpl, metadata, errors = gen_template(tpl_file, config) 171 | 172 | # Set default tags which cannot be overwritten 173 | default_tags = { 174 | 'Env': config['env'], 175 | 'MD5Sum': _calc_md5(tpl) 176 | } 177 | 178 | if metadata: 179 | tags = _extract_tags(metadata) 180 | tags.update(default_tags) 181 | name_from_metadata = metadata.get('name', None) 182 | disable_rollback = metadata.get('disable_rollback', None) 183 | else: 184 | name_from_metadata = None 185 | tags = default_tags 186 | disable_rollback = None 187 | 188 | stack_name = stack_name or name_from_metadata 189 | if not stack_name: 190 | print('Stack name must be specified via command line argument or stack metadata.') 191 | sys.exit(1) 192 | if errors: 193 | for err in errors: 194 | print('ERROR: ' + err) 195 | if not dry: 196 | sys.exit(1) 197 | 198 | tpl_size = len(tpl) 199 | 200 | if dry: 201 | print(tpl, flush=True) 202 | print('Name: {}'.format(stack_name), file=sys.stderr, flush=True) 203 | print('Tags: ' + ', '.join(['{}={}'.format(k, v) for (k, v) in tags.items()]), file=sys.stderr, flush=True) 204 | print('Template size:', tpl_size, file=sys.stderr, flush=True) 205 | return True 206 | 207 | if tpl_size > 51200: 208 | tpl_url = upload_template(config, tpl, stack_name) 209 | tpl_body = None 210 | else: 211 | tpl_url = None 212 | tpl_body = tpl 213 | 214 | try: 215 | if update and create_on_update and not stack_exists(conn, stack_name): 216 | conn.create_stack(stack_name, template_url=tpl_url, template_body=tpl_body, 217 | tags=tags, capabilities=['CAPABILITY_IAM'], 218 | disable_rollback=disable_rollback) 219 | elif update: 220 | conn.update_stack(stack_name, template_url=tpl_url, template_body=tpl_body, 221 | tags=tags, capabilities=['CAPABILITY_IAM'], 222 | disable_rollback=disable_rollback) 223 | else: 224 | conn.create_stack(stack_name, template_url=tpl_url, template_body=tpl_body, 225 | tags=tags, capabilities=['CAPABILITY_IAM'], 226 | disable_rollback=disable_rollback) 227 | except BotoServerError as err: 228 | # Do not exit with 1 when one of the below messages are returned 229 | non_error_messages = [ 230 | 'No updates are to be performed', 231 | 'already exists', 232 | ] 233 | if any(s in err.message for s in non_error_messages): 234 | print(err.message) 235 | sys.exit(0) 236 | print(err.message) 237 | sys.exit(1) 238 | return stack_name 239 | 240 | 241 | def _extract_tags(metadata): 242 | """Return tags from a metadata""" 243 | tags = {} 244 | 245 | for tag in metadata.get('tags', []): 246 | tags[tag['key']] = tag['value'] 247 | return tags 248 | 249 | 250 | def _calc_md5(j): 251 | """Calculate an MD5 hash of a string""" 252 | return hashlib.md5(j.encode()).hexdigest() 253 | 254 | 255 | def delete_stack(conn, stack_name, region, profile, confirm): 256 | """Deletes stack given its name""" 257 | msg = ('You are about to delete the following stack:\n' 258 | 'Name: {}\n' 259 | 'Region: {}\n' 260 | 'Profile: {}\n').format(stack_name, region, profile) 261 | if not confirm: 262 | print(msg) 263 | response = input('Are you sure? [y/N] ') 264 | else: 265 | response = 'yes' 266 | 267 | if response in YES: 268 | try: 269 | conn.delete_stack(stack_name) 270 | except BotoServerError as err: 271 | if 'does not exist' in err.message: 272 | print(err.message) 273 | sys.exit(0) 274 | else: 275 | print(err.message) 276 | sys.exit(1) 277 | else: 278 | sys.exit(0) 279 | 280 | 281 | def get_events(conn, stack_name, next_token): 282 | """Get stack events""" 283 | try: 284 | events = conn.describe_stack_events(stack_name, next_token) 285 | next_token = events.next_token 286 | return sorted_events(events), next_token 287 | except BotoServerError as err: 288 | if 'does not exist' in err.message: 289 | print(err.message) 290 | sys.exit(0) 291 | else: 292 | print(err.message) 293 | sys.exit(1) 294 | 295 | 296 | def sorted_events(events): 297 | """Sort stack events by timestamp""" 298 | return sorted(events, key=attrgetter('timestamp')) 299 | 300 | 301 | def print_events(conn, stack_name, follow, lines=100, from_dt=datetime.fromtimestamp(0, tz=pytz.UTC)): 302 | """Prints tabulated list of events""" 303 | events_display = [] 304 | seen_ids = set() 305 | next_token = None 306 | 307 | while True: 308 | events, next_token = get_events(conn, stack_name, next_token) 309 | status = get_stack_status(conn, stack_name) 310 | normalize_events_timestamps(events) 311 | if follow: 312 | events_display = [(ev.timestamp.astimezone(tzlocal.get_localzone()), ev.resource_status, ev.resource_type, 313 | ev.logical_resource_id, ev.resource_status_reason) for ev in events 314 | if ev.event_id not in seen_ids and ev.timestamp >= from_dt] 315 | if len(events_display) > 0: 316 | print(tabulate(events_display, tablefmt='plain'), flush=True) 317 | seen_ids |= set([event.event_id for event in events]) 318 | if status not in IN_PROGRESS_STACK_STATES and next_token is None: 319 | break 320 | if next_token is None: 321 | time.sleep(5) 322 | else: 323 | events_display.extend([(event.timestamp.astimezone(tzlocal.get_localzone()), event.resource_status, 324 | event.resource_type, event.logical_resource_id, event.resource_status_reason) 325 | for event in events]) 326 | if len(events_display) >= lines or next_token is None: 327 | break 328 | 329 | if not follow: 330 | print(tabulate(events_display[:lines], tablefmt='plain'), flush=True) 331 | 332 | return status 333 | 334 | 335 | @throttling_retry 336 | def get_stack_status(conn, stack_name): 337 | """Check stack status""" 338 | stacks = [] 339 | resp = conn.list_stacks() 340 | stacks.extend(resp) 341 | while resp.next_token: 342 | resp = conn.list_stacks(next_token=resp.next_token) 343 | stacks.extend(resp) 344 | for s in stacks: 345 | if s.stack_name == stack_name and s.stack_status != 'DELETE_COMPLETE': 346 | return s.stack_status 347 | return None 348 | 349 | 350 | def stack_exists(conn, stack_name): 351 | """Check whether stack_name exists 352 | 353 | CF keeps deleted duplicate stack names with DELETE_COMPLETE status, which is 354 | treated as non existing stack. 355 | """ 356 | status = get_stack_status(conn, stack_name) 357 | if status == 'DELETE_COMPLETE' or status is None: 358 | return False 359 | return True 360 | 361 | 362 | def normalize_events_timestamps(events): 363 | for ev in events: 364 | ev.timestamp = ev.timestamp.replace(tzinfo=pytz.UTC) 365 | 366 | 367 | def traverse_template(obj, obj_path=(), memo=None): 368 | def iteritems(mapping): 369 | return getattr(mapping, 'iteritems', mapping.items)() 370 | 371 | if memo is None: 372 | memo = set() 373 | iterator = None 374 | if isinstance(obj, Mapping): 375 | iterator = iteritems 376 | elif isinstance(obj, (Sequence, Set)) and not isinstance(obj, (str, bytes)): 377 | iterator = enumerate 378 | if iterator: 379 | if id(obj) not in memo: 380 | memo.add(id(obj)) 381 | for path_component, value in iterator(obj): 382 | for result in traverse_template(value, obj_path + (path_component,), memo): 383 | yield result 384 | memo.remove(id(obj)) 385 | else: 386 | yield obj_path, obj 387 | 388 | 389 | def validate_template(tpl): 390 | errors = [] 391 | for k, v in traverse_template(tpl): 392 | if v is None: 393 | errors.append( 394 | "/{} 'null' values are not allowed in templates".format('/'.join(map(str, k))) 395 | ) 396 | return errors 397 | 398 | 399 | def print_stack_diff(conn, stack_name, tpl_file, config): 400 | local_template, metadata, errors = gen_template(tpl_file, config) 401 | 402 | if metadata: 403 | name_from_metadata = metadata.get('name', None) 404 | else: 405 | name_from_metadata = None 406 | 407 | stack_name = stack_name or name_from_metadata 408 | if not stack_name: 409 | print('Stack name must be specified via command line argument or stack metadata.') 410 | sys.exit(1) 411 | if errors: 412 | for err in errors: 413 | print('ERROR: ' + err) 414 | 415 | live_template, errors = get_stack_template(conn, stack_name) 416 | if errors: 417 | for err in errors: 418 | print('ERROR: ' + err) 419 | sys.exit(1) 420 | 421 | if local_template == live_template: 422 | return 423 | for line in difflib.ndiff(live_template.split('\n'), local_template.split('\n')): 424 | print(line) 425 | -------------------------------------------------------------------------------- /stacks/cli.py: -------------------------------------------------------------------------------- 1 | import os.path 2 | 3 | import configargparse 4 | 5 | from stacks import __about__ 6 | 7 | 8 | def parse_options(): 9 | """Handle command-line options 10 | 11 | Return parser object and list of arguments 12 | """ 13 | parser = configargparse.ArgumentParser() 14 | parser.add_argument('-p', '--profile', required=False) 15 | parser.add_argument('-r', '--region', required=False) 16 | parser.add_argument('--version', action='version', version=__about__.__version__) 17 | subparsers = parser.add_subparsers(title='available subcommands', dest='subcommand') 18 | 19 | # resources subparser 20 | parser_resources = subparsers.add_parser('resources', help='List stack resources') 21 | parser_resources.add_argument('name', help='Stack name') 22 | parser_resources.add_argument('logical_id', nargs='?', default=None, 23 | help='Logical resource id. Returns physical_resource_id.') 24 | 25 | # outputs subparser 26 | parser_outputs = subparsers.add_parser('outputs', help='List stack outputs') 27 | parser_outputs.add_argument('name', help='Stack name') 28 | parser_outputs.add_argument('output_name', nargs='?', default=None, 29 | help='Output name. Returns output value.') 30 | 31 | # config subparser 32 | parser_config = subparsers.add_parser('config', help='Print config properties') 33 | # noinspection PyArgumentList 34 | parser_config.add_argument('-e', '--env', env_var='STACKS_ENV', required=False, default=None) 35 | parser_config.add_argument('-o', '--output', default='text', choices=['text', 'yaml', 'json'], 36 | dest='output_format', help='Output format') 37 | # noinspection PyArgumentList 38 | parser_config.add_argument('-c', '--config', default='config.yaml', 39 | env_var='STACKS_CONFIG', required=False, 40 | type=_is_file) 41 | # noinspection PyArgumentList 42 | parser_config.add_argument('--config-dir', default='config.d', 43 | env_var='STACKS_CONFIG_DIR', required=False, 44 | type=_is_dir) 45 | parser_config.add_argument('property_name', nargs='?', default=None) 46 | 47 | # list subparser 48 | parser_list = subparsers.add_parser('list', help='List stacks') 49 | parser_list.add_argument('-v', '--verbose', action='store_true') 50 | parser_list.add_argument('name', default='*', nargs='?', 51 | help='Stack name or unix shell-style pattern') 52 | 53 | # create subparser 54 | parser_create = subparsers.add_parser('create', help='Create a new stack') 55 | parser_create.add_argument('-t', '--template', required=True, type=configargparse.FileType()) 56 | # noinspection PyArgumentList 57 | parser_create.add_argument('-c', '--config', default='config.yaml', 58 | env_var='STACKS_CONFIG', required=False, 59 | type=_is_file) 60 | # noinspection PyArgumentList 61 | parser_create.add_argument('--config-dir', default='config.d', 62 | env_var='STACKS_CONFIG_DIR', required=False, 63 | type=_is_dir) 64 | parser_create.add_argument('name', nargs='?', default=None) 65 | # noinspection PyArgumentList 66 | parser_create.add_argument('-e', '--env', env_var='STACKS_ENV', required=False, default=None) 67 | parser_create.add_argument('-P', '--property', required=False, action='append') 68 | parser_create.add_argument('-d', '--dry-run', action='store_true') 69 | parser_create.add_argument('-f', '--follow', dest='events_follow', help='Follow stack events', action='store_true') 70 | 71 | # update subparser 72 | parser_update = subparsers.add_parser('update', help='Update an existing stack') 73 | parser_update.add_argument('-t', '--template', required=True, type=configargparse.FileType()) 74 | # noinspection PyArgumentList 75 | parser_update.add_argument('-c', '--config', env_var='STACKS_CONFIG', 76 | default='config.yaml', required=False, 77 | type=_is_file) 78 | # noinspection PyArgumentList 79 | parser_update.add_argument('--config-dir', default='config.d', 80 | env_var='STACKS_CONFIG_DIR', required=False, 81 | type=_is_dir) 82 | parser_update.add_argument('name', nargs='?', default=None) 83 | # noinspection PyArgumentList 84 | parser_update.add_argument('-e', '--env', env_var='STACKS_ENV', required=False, default=None) 85 | parser_update.add_argument('-P', '--property', required=False, action='append') 86 | parser_update.add_argument('-d', '--dry-run', action='store_true') 87 | parser_update.add_argument('--create', dest='create_on_update', 88 | help='Create if stack does not exist.', 89 | action='store_true') 90 | parser_update.add_argument('-f', '--follow', dest='events_follow', help='Follow stack events', action='store_true') 91 | 92 | # delete subparser 93 | parser_delete = subparsers.add_parser('delete', help='Delete an existing stack') 94 | parser_delete.add_argument('-f', '--follow', dest='events_follow', help='Follow stack events', action='store_true') 95 | parser_delete.add_argument('-y', '--yes', help='Confirm stack deletion.', action='store_true') 96 | parser_delete.add_argument('name') 97 | 98 | # events subparser 99 | parser_events = subparsers.add_parser('events', help='List events from a stack') 100 | parser_events.add_argument('name') 101 | parser_events.add_argument('-f', '--follow', dest='events_follow', action='store_true', 102 | help='Poll for new events until stopped (overrides -n)') 103 | parser_events.add_argument('-n', '--lines', default=100, type=int) 104 | 105 | # diff subparser 106 | parser_create = subparsers.add_parser('diff', help='Print diff of current vs compiled template') 107 | parser_create.add_argument('-t', '--template', required=True, type=configargparse.FileType()) 108 | # noinspection PyArgumentList 109 | parser_create.add_argument('-c', '--config', default='config.yaml', 110 | env_var='STACKS_CONFIG', required=False, 111 | type=_is_file) 112 | # noinspection PyArgumentList 113 | parser_create.add_argument('--config-dir', default='config.d', 114 | env_var='STACKS_CONFIG_DIR', required=False, 115 | type=_is_dir) 116 | parser_create.add_argument('name', nargs='?', default=None) 117 | # noinspection PyArgumentList 118 | parser_create.add_argument('-e', '--env', env_var='STACKS_ENV', required=False, default=None) 119 | parser_create.add_argument('-P', '--property', required=False, action='append') 120 | 121 | return parser, parser.parse_args() 122 | 123 | 124 | def _is_file(fname): 125 | """Check whether fname is a file 126 | 127 | To be used as a type argument in add_argument() 128 | """ 129 | return fname if os.path.isfile(fname) else None 130 | 131 | 132 | def _is_dir(dirname): 133 | """Check whether dirname is a dir 134 | 135 | To be used as a type argument in add_argument() 136 | """ 137 | return dirname if os.path.isdir(dirname) else None 138 | -------------------------------------------------------------------------------- /stacks/config.py: -------------------------------------------------------------------------------- 1 | import json 2 | import os 3 | import sys 4 | 5 | import boto 6 | import yaml 7 | 8 | AWS_CONFIG_FILE = os.environ.get('HOME', '') + '/.aws/config' 9 | AWS_CREDENTIALS_FILE = os.environ.get('HOME', '') + '/.aws/credentials' 10 | RESERVED_PROPERTIES = ['region', 'profile', 'env'] 11 | 12 | 13 | def config_load(env, config_file=None, config_dir=None): 14 | """Load stack configuration files""" 15 | config = {} 16 | conf_files = list_files(config_dir) 17 | if config_file: 18 | conf_files.insert(0, config_file) 19 | for f in conf_files: 20 | config.update(config_merge(env, f)) 21 | config['env'] = env 22 | return config 23 | 24 | 25 | def config_merge(env, config_file=None): 26 | """Merge stacks configuration file environments""" 27 | c = _load_yaml(config_file) 28 | config = {} 29 | if c: 30 | merged = _merge(c, env) 31 | config.update(merged) 32 | else: 33 | config.update({}) 34 | 35 | return config 36 | 37 | 38 | def list_files(dirname): 39 | """Return a sorted list of files from dirname""" 40 | lf = [] 41 | if not dirname: 42 | return lf 43 | for f in os.listdir(dirname): 44 | joined = os.path.join(dirname, f) 45 | if os.path.isfile(joined) and joined.endswith('.yaml'): 46 | lf.append(joined) 47 | return sorted(lf, reverse=True) 48 | 49 | 50 | def _merge(config, env): 51 | """ 52 | Takes `config` loaded from a config file and the environment name `env`. 53 | 54 | If "common" and `env` are keys in `config`, return 55 | config['common'].update(config[env]) (i.e. the common config updated with 56 | environmet specific config) 57 | 58 | If one of env or common exists, return that config. 59 | 60 | Otherwise just return the whole of `config` unmodified. 61 | """ 62 | if 'common' in config and env in config: 63 | c = config['common'].copy() 64 | c.update(config[env]) 65 | elif env in config.keys(): 66 | c = config[env] 67 | elif 'common' in config.keys(): 68 | c = config['common'] 69 | else: 70 | c = config 71 | return c 72 | 73 | 74 | def _load_yaml(fname): 75 | try: 76 | with open(fname) as f: 77 | return yaml.full_load(f) 78 | except (FileNotFoundError, PermissionError, yaml.YAMLError): 79 | return None 80 | 81 | 82 | def get_region_name(profile): 83 | """Get region name from AWS_CREDENTIALS_FILE 84 | 85 | Return region name 86 | """ 87 | if os.path.isfile(AWS_CREDENTIALS_FILE): 88 | boto.config.load_credential_file(AWS_CREDENTIALS_FILE) 89 | 90 | if boto.config.get(profile, 'region'): 91 | return boto.config.get(profile, 'region') 92 | else: 93 | return None 94 | return None 95 | 96 | 97 | def get_default_region_name(): 98 | """Get default region name from AWS_CONFIG_FILE 99 | 100 | Return region name 101 | """ 102 | if os.path.isfile(AWS_CONFIG_FILE): 103 | boto.config.load_credential_file(AWS_CONFIG_FILE) 104 | 105 | if boto.config.get('default', 'region'): 106 | return boto.config.get('default', 'region') 107 | else: 108 | return None 109 | return None 110 | 111 | 112 | def profile_exists(profile): 113 | """Return True if profile exists in AWS_CREDENTIALS_FILE""" 114 | if os.path.isfile(AWS_CREDENTIALS_FILE): 115 | boto.config.load_credential_file(AWS_CREDENTIALS_FILE) 116 | if boto.config.get(profile, 'region'): 117 | return True 118 | else: 119 | return False 120 | return False 121 | 122 | 123 | def validate_properties(props_arg): 124 | properties = dict(p.split('=') for p in props_arg) 125 | reserved = [i for i in RESERVED_PROPERTIES if i in properties.keys()] 126 | if len(reserved): 127 | print('Unable to override reserved properties: {}'.format(','.join(reserved))) 128 | sys.exit(1) 129 | return properties 130 | 131 | 132 | def print_config(config, property_name=None, output_format=None): 133 | if property_name is not None: 134 | if config.get(property_name): 135 | if output_format == 'json': 136 | print(json.dumps(config[property_name], indent=2)) 137 | elif output_format == 'yaml': 138 | print(yaml.dump(config[property_name])) 139 | else: 140 | print(config[property_name]) 141 | return 142 | 143 | elif output_format == 'yaml': 144 | print(yaml.dump(config)) 145 | elif output_format == 'json': 146 | print(json.dumps(config, indent=2)) 147 | else: 148 | for k, v in config.items(): 149 | print('{}={}'.format(k, v)) 150 | return 151 | -------------------------------------------------------------------------------- /stacks/helpers.py: -------------------------------------------------------------------------------- 1 | # noinspection PyProtectedMember 2 | from yaml.resolver import ScalarNode, SequenceNode 3 | 4 | 5 | # noinspection PyUnusedLocal 6 | def intrinsics_multi_constructor(loader, tag_prefix, node): 7 | """ 8 | YAML constructor to parse CloudFormation intrinsics. 9 | This will return a dictionary with key being the instrinsic name 10 | """ 11 | 12 | # Get the actual tag name excluding the first exclamation 13 | tag = node.tag[1:] 14 | 15 | # Some intrinsic functions doesn't support prefix "Fn::" 16 | prefix = "Fn::" 17 | if tag in ["Ref", "Condition"]: 18 | prefix = "" 19 | 20 | cfntag = prefix + tag 21 | 22 | if tag == "GetAtt" and isinstance(node.value, str): 23 | # ShortHand notation for !GetAtt accepts Resource.Attribute format 24 | # while the standard notation is to use an array 25 | # [Resource, Attribute]. Convert shorthand to standard format 26 | value = node.value.split(".", 1) 27 | 28 | elif isinstance(node, ScalarNode): 29 | # Value of this node is scalar 30 | value = loader.construct_scalar(node) 31 | 32 | elif isinstance(node, SequenceNode): 33 | # Value of this node is an array (Ex: [1,2]) 34 | value = loader.construct_sequence(node) 35 | 36 | else: 37 | # Value of this node is an mapping (ex: {foo: bar}) 38 | value = loader.construct_mapping(node) 39 | 40 | return {cfntag: value} 41 | -------------------------------------------------------------------------------- /stacks/main.py: -------------------------------------------------------------------------------- 1 | import os 2 | import signal 3 | import sys 4 | from datetime import datetime 5 | 6 | import boto.cloudformation 7 | import boto.ec2 8 | import boto.route53 9 | import boto.s3 10 | import boto.vpc 11 | import pytz 12 | 13 | from stacks import aws, cf, cli 14 | from stacks.config import (config_load, get_default_region_name, 15 | get_region_name, print_config, profile_exists, 16 | validate_properties) 17 | from stacks.states import FAILED_STACK_STATES, ROLLBACK_STACK_STATES 18 | 19 | 20 | def main(): 21 | for sig in [signal.SIGTERM, signal.SIGINT, signal.SIGHUP, signal.SIGQUIT]: 22 | signal.signal(sig, handler) 23 | 24 | parser, args = cli.parse_options() 25 | 26 | if not args.subcommand: 27 | parser.print_help() 28 | sys.exit(0) 29 | 30 | config_file = vars(args).get('config', None) 31 | config_dir = vars(args).get('config_dir', None) 32 | env = vars(args).get('env', None) 33 | config = config_load(env, config_file, config_dir) 34 | now = datetime.now(tz=pytz.UTC) 35 | 36 | if args.subcommand == 'config': 37 | print_config(config, args.property_name, output_format=args.output_format) 38 | sys.exit(0) 39 | 40 | config['get_ami_id'] = aws.get_ami_id 41 | config['get_vpc_id'] = aws.get_vpc_id 42 | config['get_zone_id'] = aws.get_zone_id 43 | config['get_stack_output'] = aws.get_stack_output 44 | config['get_stack_resource'] = aws.get_stack_resource 45 | 46 | # Figure out profile value in the following order 47 | # - cli arg 48 | # - env variable 49 | # - default profile if exists 50 | if args.profile: 51 | profile = args.profile 52 | elif os.environ.get('AWS_DEFAULT_PROFILE'): 53 | profile = os.environ.get('AWS_DEFAULT_PROFILE') 54 | elif profile_exists('default'): 55 | profile = 'default' 56 | elif args.profile and not profile_exists(args.profile): 57 | print('Profile "{}" does not exist.'.format(args.profile)) 58 | sys.exit(1) 59 | else: 60 | profile = None 61 | 62 | # Figure out region value in the following order 63 | # - cli arg 64 | # - env variable 65 | # - region from config 66 | if args.region: 67 | region = args.region 68 | elif os.environ.get('AWS_DEFAULT_REGION'): 69 | region = os.environ.get('AWS_DEFAULT_REGION') 70 | elif get_region_name(profile): 71 | region = get_region_name(profile) 72 | else: 73 | region = get_default_region_name() 74 | 75 | if not region: 76 | print('Region is not specified.') 77 | sys.exit(1) 78 | 79 | config['region'] = region 80 | 81 | # Not great, but try to catch everything. Above should be refactored in a 82 | # function which handles setting up connections to different aws services 83 | try: 84 | ec2_conn = boto.ec2.connect_to_region(region, profile_name=profile) 85 | vpc_conn = boto.vpc.connect_to_region(region, profile_name=profile) 86 | cf_conn = boto.cloudformation.connect_to_region(region, profile_name=profile) 87 | r53_conn = boto.route53.connect_to_region(region, profile_name=profile) 88 | s3_conn = boto.s3.connect_to_region(region, profile_name=profile) 89 | config['ec2_conn'] = ec2_conn 90 | config['vpc_conn'] = vpc_conn 91 | config['cf_conn'] = cf_conn 92 | config['r53_conn'] = r53_conn 93 | config['s3_conn'] = s3_conn 94 | # TODO(alekna): Fix too broad exception 95 | except: 96 | print(sys.exc_info()[1]) 97 | sys.exit(1) 98 | 99 | if args.subcommand == 'resources': 100 | output = cf.stack_resources(cf_conn, args.name, args.logical_id) 101 | if output: 102 | print(output) 103 | cf_conn.close() 104 | 105 | if args.subcommand == 'outputs': 106 | output = cf.stack_outputs(cf_conn, args.name, args.output_name) 107 | if output: 108 | print(output) 109 | cf_conn.close() 110 | 111 | if args.subcommand == 'list': 112 | output = cf.list_stacks(cf_conn, args.name, args.verbose) 113 | if output: 114 | print(output) 115 | cf_conn.close() 116 | 117 | if args.subcommand == 'create' or args.subcommand == 'update': 118 | if args.property: 119 | properties = validate_properties(args.property) 120 | config.update(properties) 121 | 122 | if args.subcommand == 'create': 123 | stack_name = cf.create_stack(cf_conn, args.name, args.template, config, dry=args.dry_run) 124 | if args.events_follow and not args.dry_run: 125 | stack_status = cf.print_events(cf_conn, stack_name, args.events_follow) 126 | if stack_status in FAILED_STACK_STATES + ROLLBACK_STACK_STATES: 127 | sys.exit(1) 128 | else: 129 | stack_name = cf.create_stack(cf_conn, args.name, args.template, config, update=True, dry=args.dry_run, 130 | create_on_update=args.create_on_update) 131 | if args.events_follow and not args.dry_run: 132 | stack_status = cf.print_events(cf_conn, stack_name, args.events_follow, from_dt=now) 133 | if stack_status in FAILED_STACK_STATES + ROLLBACK_STACK_STATES: 134 | sys.exit(1) 135 | 136 | if args.subcommand == 'delete': 137 | cf.delete_stack(cf_conn, args.name, region, profile, args.yes) 138 | if args.events_follow: 139 | stack_status = cf.print_events(cf_conn, args.name, args.events_follow, from_dt=now) 140 | if stack_status in FAILED_STACK_STATES: 141 | sys.exit(1) 142 | 143 | if args.subcommand == 'events': 144 | cf.print_events(cf_conn, args.name, args.events_follow, args.lines) 145 | 146 | if args.subcommand == 'diff': 147 | if args.property: 148 | properties = validate_properties(args.property) 149 | config.update(properties) 150 | cf.print_stack_diff(cf_conn, args.name, args.template, config) 151 | 152 | 153 | def handler(signum, _): 154 | print('Signal {} received. Stopping.'.format(signum)) 155 | sys.exit(0) 156 | -------------------------------------------------------------------------------- /stacks/states.py: -------------------------------------------------------------------------------- 1 | FAILED_STACK_STATES = [ 2 | 'CREATE_FAILED', 3 | 'ROLLBACK_FAILED', 4 | 'DELETE_FAILED', 5 | 'UPDATE_ROLLBACK_FAILED' 6 | ] 7 | COMPLETE_STACK_STATES = [ 8 | 'CREATE_COMPLETE', 9 | 'UPDATE_COMPLETE', 10 | ] 11 | ROLLBACK_STACK_STATES = [ 12 | 'ROLLBACK_COMPLETE', 13 | 'UPDATE_ROLLBACK_COMPLETE', 14 | ] 15 | IN_PROGRESS_STACK_STATES = [ 16 | 'CREATE_IN_PROGRESS', 17 | 'ROLLBACK_IN_PROGRESS', 18 | 'DELETE_IN_PROGRESS', 19 | 'UPDATE_IN_PROGRESS', 20 | 'UPDATE_COMPLETE_CLEANUP_IN_PROGRESS', 21 | 'UPDATE_ROLLBACK_IN_PROGRESS', 22 | 'UPDATE_ROLLBACK_COMPLETE_CLEANUP_IN_PROGRESS', 23 | ] 24 | -------------------------------------------------------------------------------- /tests/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cfstacks/stacks/0f23c78d278299628edf362fb8f6926aad814025/tests/__init__.py -------------------------------------------------------------------------------- /tests/fixtures/aws_config: -------------------------------------------------------------------------------- 1 | [default] 2 | region = us-east-1 3 | -------------------------------------------------------------------------------- /tests/fixtures/aws_credentials: -------------------------------------------------------------------------------- 1 | [default] 2 | region = us-east-1 3 | 4 | [bar] 5 | region = eu-west-1 6 | -------------------------------------------------------------------------------- /tests/fixtures/config.d/10-config.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | common: 3 | foo: baz 4 | -------------------------------------------------------------------------------- /tests/fixtures/config.d/20-config.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | comes_from: 20-config 3 | -------------------------------------------------------------------------------- /tests/fixtures/config_flat.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | foo: bar 3 | false_boolean: false 4 | zero: 0 5 | -------------------------------------------------------------------------------- /tests/fixtures/config_with_envs.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | common: 3 | foo: bar 4 | vpc_name: myvpc 5 | 6 | myenv: 7 | foo: 'foo-value-in-myenv' 8 | -------------------------------------------------------------------------------- /tests/fixtures/create_stack_template.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | name: {{ env }}-infra 3 | disable_rollback: true 4 | 5 | tags: 6 | - key: Test 7 | value: {{ custom_tag }} 8 | 9 | --- 10 | # MD5Sum of json object is b08c2e9d7003f62ba8ffe5c985c50a63 11 | AWSTemplateFormatVersion: '2010-09-09' 12 | Description: Infrastructure stack 13 | Resources: 14 | VPC: 15 | Type: AWS::EC2::VPC 16 | Properties: 17 | CidrBlock: 10.50.0.0/16 18 | EnableDnsSupport: true 19 | EnableDnsHostnames: true 20 | Tags: 21 | - Key: Name 22 | Value: {{ env }}-vpc 23 | - Key: Env 24 | Value: {{ env }} 25 | -------------------------------------------------------------------------------- /tests/fixtures/invalid_template.yaml: -------------------------------------------------------------------------------- 1 | - key: Test 2 | Resources: 3 | VPC: 4 | Type: AWS::EC2::VPC 5 | Properties: 6 | CidrBlock: 10.50.0.0/16 7 | EnableDnsSupport: true 8 | EnableDnsHostnames: true 9 | Tags: 10 | - Key: Name 11 | Value: {{ env }}-test-vpc 12 | - Key: Env 13 | Value: {{ env }} 14 | 15 | -------------------------------------------------------------------------------- /tests/fixtures/invalid_template_with_null_value.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | metadata: 3 | name: {{ env }}-test-stack 4 | tags: 5 | - key: Test 6 | value: {{ test_tag }} 7 | --- 8 | AWSTemplateFormatVersion: '2010-09-09' 9 | Description: Test Stack 10 | Parameters: 11 | Foo: 12 | Type: String 13 | Default: Bar 14 | Resources: 15 | VPC: 16 | Type: AWS::EC2::VPC 17 | Properties: 18 | CidrBlock: 10.50.0.0/16 19 | EnableDnsSupport: true 20 | EnableDnsHostnames: true 21 | Tags: 22 | - Key: Name 23 | Value: {{ env }}-test-vpc 24 | - Key: Env 25 | Value: null 26 | Outputs: 27 | Foo: 28 | Description: 'Just to test reference and substitution' 29 | Value: !Ref Foo 30 | Export: 31 | Name: !Sub ${AWS::StackName}-Foo 32 | VPCCidrBlock: 33 | Description: 'Just to test attribute selection' 34 | Value: !GetAtt VPC.CidrBlock 35 | Export: 36 | Name: !Sub ${AWS::StackName}-VPCCidrBlock 37 | 38 | -------------------------------------------------------------------------------- /tests/fixtures/load_yaml.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | foo: bar 3 | -------------------------------------------------------------------------------- /tests/fixtures/no_metadata_template.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | # MD5Sum of json object is b08c2e9d7003f62ba8ffe5c985c50a63 3 | AWSTemplateFormatVersion: '2010-09-09' 4 | Description: Infrastructure stack 5 | Resources: 6 | VPC: 7 | Type: AWS::EC2::VPC 8 | Properties: 9 | CidrBlock: 10.50.0.0/16 10 | EnableDnsSupport: true 11 | EnableDnsHostnames: true 12 | Tags: 13 | - Key: Name 14 | Value: {{ env }}-vpc 15 | - Key: Env 16 | Value: {{ env }} 17 | -------------------------------------------------------------------------------- /tests/fixtures/valid_template.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | metadata: 3 | name: {{ env }}-test-stack 4 | tags: 5 | - key: Test 6 | value: {{ test_tag }} 7 | --- 8 | AWSTemplateFormatVersion: '2010-09-09' 9 | Description: Test Stack 10 | Parameters: 11 | Foo: 12 | Type: String 13 | Default: Bar 14 | Resources: 15 | VPC: 16 | Type: AWS::EC2::VPC 17 | Properties: 18 | CidrBlock: 10.50.0.0/16 19 | EnableDnsSupport: true 20 | EnableDnsHostnames: true 21 | Tags: 22 | - Key: Name 23 | Value: {{ env }}-test-vpc 24 | - Key: Env 25 | Value: {{ env }} 26 | Outputs: 27 | Foo: 28 | Description: 'Just to test reference and substitution' 29 | Value: !Ref Foo 30 | Export: 31 | Name: !Sub ${AWS::StackName}-Foo 32 | VPCCidrBlock: 33 | Description: 'Just to test attribute selection' 34 | Value: !GetAtt VPC.CidrBlock 35 | Export: 36 | Name: !Sub ${AWS::StackName}-VPCCidrBlock 37 | 38 | -------------------------------------------------------------------------------- /tests/test_cf.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | 3 | from boto import cloudformation, s3 4 | from moto import mock_cloudformation 5 | 6 | from stacks import cf 7 | 8 | 9 | class TestTemplate(unittest.TestCase): 10 | 11 | def test_gen_valid_template(self): 12 | config = {'env': 'dev', 'test_tag': 'testing'} 13 | tpl_file = open('tests/fixtures/valid_template.yaml') 14 | tpl, metadata, errors = cf.gen_template(tpl_file, config) 15 | self.assertIsInstance(tpl, str) 16 | self.assertIsInstance(metadata, dict) 17 | self.assertEqual(len(errors), 0) 18 | 19 | def test_gen_invalid_template(self): 20 | config = {'env': 'dev', 'test_tag': 'testing'} 21 | tpl_file = open('tests/fixtures/invalid_template.yaml') 22 | 23 | with self.assertRaises(SystemExit) as err: 24 | cf.gen_template(tpl_file, config) 25 | self.assertEqual(err.exception.code, 1) 26 | 27 | def test_gen_template_missing_properties(self): 28 | config = {'env': 'unittest'} 29 | tpl_file = open('tests/fixtures/valid_template.yaml') 30 | 31 | with self.assertRaises(SystemExit) as err: 32 | cf.gen_template(tpl_file, config) 33 | self.assertEqual(err.exception.code, 1) 34 | 35 | def test_gen_invalid_template_with_null_value(self): 36 | config = {'env': 'dev', 'test_tag': 'testing'} 37 | tpl_file = open('tests/fixtures/invalid_template_with_null_value.yaml') 38 | tpl, metadata, errors = cf.gen_template(tpl_file, config) 39 | self.assertIsInstance(tpl, str) 40 | self.assertIsInstance(metadata, dict) 41 | self.assertEqual(len(errors), 1) 42 | 43 | 44 | @mock_cloudformation 45 | class TestStackActions(unittest.TestCase): 46 | 47 | def setUp(self): 48 | self.config = { 49 | 'env': 'unittest', 50 | 'custom_tag': 'custom-tag-value', 51 | 'region': 'us-east-1', 52 | } 53 | self.config['cf_conn'] = cloudformation.connect_to_region(self.config['region']) 54 | self.config['s3_conn'] = s3.connect_to_region(self.config['region']) 55 | 56 | def test_create_stack(self): 57 | stack_name = None 58 | with open('tests/fixtures/create_stack_template.yaml') as tpl_file: 59 | cf.create_stack(self.config['cf_conn'], stack_name, tpl_file, self.config) 60 | 61 | stack = self.config['cf_conn'].describe_stacks('unittest-infra')[0] 62 | self.assertEqual('unittest-infra', stack.stack_name) 63 | self.assertEqual(self.config['env'], stack.tags['Env']) 64 | self.assertEqual(self.config['custom_tag'], stack.tags['Test']) 65 | self.assertEqual('b08c2e9d7003f62ba8ffe5c985c50a63', stack.tags['MD5Sum']) 66 | 67 | def test_update_stack(self): 68 | stack_name = None 69 | with open('tests/fixtures/create_stack_template.yaml') as tpl_file: 70 | cf.create_stack(self.config['cf_conn'], stack_name, tpl_file, 71 | self.config, update=True) 72 | stack = self.config['cf_conn'].describe_stacks('unittest-infra')[0] 73 | self.assertEqual('b08c2e9d7003f62ba8ffe5c985c50a63', stack.tags['MD5Sum']) 74 | 75 | def test_create_on_update(self): 76 | stack_name = 'create-on-update-stack' 77 | with open('tests/fixtures/create_stack_template.yaml') as tpl_file: 78 | cf.create_stack(self.config['cf_conn'], stack_name, tpl_file, 79 | self.config, update=True, create_on_update=True) 80 | stack = self.config['cf_conn'].describe_stacks(stack_name)[0] 81 | self.assertEqual('b08c2e9d7003f62ba8ffe5c985c50a63', stack.tags['MD5Sum']) 82 | 83 | def test_create_stack_no_stack_name(self): 84 | stack_name = None 85 | with open('tests/fixtures/no_metadata_template.yaml') as tpl_file: 86 | with self.assertRaises(SystemExit) as err: 87 | cf.create_stack(self.config['cf_conn'], stack_name, tpl_file, self.config) 88 | self.assertEqual(err.exception.code, 1) 89 | 90 | def test_create_stack_no_metadata(self): 91 | stack_name = 'my-stack' 92 | with open('tests/fixtures/no_metadata_template.yaml') as tpl_file: 93 | cf.create_stack(self.config['cf_conn'], stack_name, tpl_file, self.config) 94 | stack = self.config['cf_conn'].describe_stacks('my-stack')[0] 95 | self.assertEqual('my-stack', stack.stack_name) 96 | self.assertEqual(self.config['env'], stack.tags['Env']) 97 | self.assertEqual('b08c2e9d7003f62ba8ffe5c985c50a63', stack.tags['MD5Sum']) 98 | 99 | 100 | if __name__ == '__main__': 101 | unittest.main() 102 | -------------------------------------------------------------------------------- /tests/test_config.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | import uuid 3 | 4 | from stacks import config 5 | 6 | 7 | class TestConfig(unittest.TestCase): 8 | def test_load_yaml_valid_file(self): 9 | y = config._load_yaml('tests/fixtures/load_yaml.yaml') 10 | print(y) 11 | self.assertIsInstance(y, dict) 12 | 13 | def test_load_yaml_non_existing_file(self): 14 | y = config._load_yaml(str(uuid.uuid1())) 15 | self.assertIsNone(y) 16 | 17 | def test_get_region_name_no_file(self): 18 | config.AWS_CREDENTIALS_FILE = str(uuid.uuid1()) 19 | region = config.get_region_name('bar') 20 | self.assertIsNone(region) 21 | 22 | def test_get_region_name_file_exists(self): 23 | config.AWS_CREDENTIALS_FILE = 'tests/fixtures/aws_credentials' 24 | region = config.get_region_name('bar') 25 | self.assertEqual(region, 'eu-west-1') 26 | 27 | def test_get_region_name_default_profile(self): 28 | config.AWS_CREDENTIALS_FILE = 'tests/fixtures/aws_credentials' 29 | region = config.get_region_name('default') 30 | self.assertEqual(region, 'us-east-1') 31 | 32 | def test_get_default_region_name(self): 33 | config.AWS_CONFIG_FILE = 'tests/fixtures/aws_config' 34 | region = config.get_default_region_name() 35 | self.assertEqual(region, 'us-east-1') 36 | 37 | def test_get_default_region_name_no_file(self): 38 | config.AWS_CONFIG_FILE = 'tests/fixtures/aws_nonexistingconfig' 39 | region = config.get_default_region_name() 40 | self.assertIsNone(region) 41 | 42 | def test_config_load_no_file(self): 43 | cfg = config.config_load('myenv') 44 | self.assertIsInstance(cfg, dict) 45 | self.assertEqual(cfg['env'], 'myenv') 46 | 47 | def test_config_load_with_envs(self): 48 | config_file = 'tests/fixtures/config_with_envs.yaml' 49 | cfg = config.config_load('myenv', config_file) 50 | self.assertIsInstance(cfg, dict) 51 | self.assertEqual(cfg['env'], 'myenv') 52 | self.assertEqual(cfg['foo'], 'foo-value-in-myenv') 53 | 54 | def test_config_load_flat(self): 55 | config_file = 'tests/fixtures/config_flat.yaml' 56 | cfg = config.config_load('myenv', config_file) 57 | self.assertIsInstance(cfg, dict) 58 | self.assertEqual(cfg['env'], 'myenv') 59 | self.assertEqual(cfg['foo'], 'bar') 60 | 61 | def test_list_files_order(self): 62 | config_dir = 'tests/fixtures/config.d' 63 | correct_order = [ 64 | 'tests/fixtures/config.d/20-config.yaml', 65 | 'tests/fixtures/config.d/10-config.yaml', 66 | ] 67 | ls = config.list_files(config_dir) 68 | self.assertListEqual(ls, correct_order) 69 | 70 | def test_config_dir_override(self): 71 | config_file = 'tests/fixtures/config_flat.yaml' 72 | config_dir = 'tests/fixtures/config.d' 73 | cfg = config.config_load('myenv', config_file, config_dir) 74 | self.assertIsInstance(cfg, dict) 75 | self.assertEqual(cfg['env'], 'myenv') 76 | self.assertEqual(cfg['foo'], 'baz') 77 | self.assertEqual(cfg['comes_from'], '20-config') 78 | 79 | 80 | class TestPrintConfig(unittest.TestCase): 81 | def test_print_config(self): 82 | config_file = 'tests/fixtures/config_flat.yaml' 83 | cfg = config.config_load('myenv', config_file) 84 | self.assertIsInstance(cfg, dict) 85 | self.assertEqual(cfg['env'], 'myenv') 86 | self.assertEqual(cfg['false_boolean'], False) 87 | self.assertEqual(cfg['zero'], 0) 88 | 89 | 90 | if __name__ == '__main__': 91 | unittest.main() 92 | -------------------------------------------------------------------------------- /tox.ini: -------------------------------------------------------------------------------- 1 | # Tox (http://tox.testrun.org/) is a tool for running tests 2 | # in multiple virtualenvs. This configuration file will run the 3 | # test suite on all supported python versions. To use it, "pip install tox" 4 | # and then run "tox" from this directory. 5 | 6 | [tox] 7 | envlist = py36, py36-linux-package, docs 8 | 9 | [testenv] 10 | setenv = 11 | # Used by moto 12 | AWS_ACCESS_KEY_ID = 'AUIAIRUAE7WSA2EFDAS' 13 | AWS_SECRET_ACCESS_KEY = 'iE/dTc/oXJH9Nrvm+Jj7r7m0iE/dTc/oXJ' 14 | 15 | deps = 16 | pytest 17 | pytest-pep8 18 | pytest-pylint 19 | pex 20 | wheel 21 | moto 22 | httpretty>=0.8.14 23 | commands = py.test 24 | 25 | [testenv:py36-linux-package] 26 | basepython = python3.6 27 | 28 | deps = 29 | pex 30 | wheel 31 | commands = 32 | /bin/bash -c 'pex . -r <(pip freeze) --disable-cache --python-shebang="/usr/bin/env python3" -o stacks_py36_linux.pex -c stacks' 33 | 34 | 35 | [testenv:docs] 36 | basepython = python3.6 37 | 38 | deps = 39 | Sphinx>=1.3.3 40 | sphinx_bootstrap_theme 41 | 42 | whitelist_externals=make 43 | changedir=docs 44 | commands = 45 | make html 46 | 47 | --------------------------------------------------------------------------------