├── .checkignore ├── .codeclimate.yml ├── .editorconfig ├── .gitignore ├── .travis.yml ├── LICENSE ├── MANIFEST.in ├── README.rst ├── TODO ├── docs ├── Makefile ├── conf.py ├── history.rst └── index.rst ├── example ├── README.md ├── files │ ├── global.yaml │ ├── service │ │ └── blog.yaml │ └── standalone │ │ └── redis.yaml ├── manifest.yaml └── units │ ├── service │ └── blog.service │ └── shared │ ├── consul.service │ ├── datadog.service │ ├── fluentd.service │ ├── memcached.service │ ├── mysql.service │ ├── newrelic-sysmond.service │ └── nginx.service ├── houston ├── __init__.py ├── cli.py ├── controller.py ├── files.py └── utils.py ├── requirements.txt ├── setup.cfg ├── setup.py ├── test-requirements.txt └── tests └── utils_tests.py /.checkignore: -------------------------------------------------------------------------------- 1 | tests 2 | example 3 | docs 4 | setup.py 5 | -------------------------------------------------------------------------------- /.codeclimate.yml: -------------------------------------------------------------------------------- 1 | languages: 2 | Python: true 3 | exclude_paths: 4 | - docs/* 5 | - tests/* 6 | - example/* 7 | -------------------------------------------------------------------------------- /.editorconfig: -------------------------------------------------------------------------------- 1 | root = true 2 | 3 | [*] 4 | end_of_line = lf 5 | insert_final_newline = true 6 | trim_trailing_whitespace = true 7 | 8 | [*.py] 9 | indent_style = space 10 | indent_size = 4 11 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | 5 | # C extensions 6 | *.so 7 | 8 | # Distribution / packaging 9 | .Python 10 | env/ 11 | bin/ 12 | build/ 13 | develop-eggs/ 14 | dist/ 15 | eggs/ 16 | lib/ 17 | lib64/ 18 | parts/ 19 | sdist/ 20 | var/ 21 | *.egg-info/ 22 | .installed.cfg 23 | *.egg 24 | 25 | # Installer logs 26 | pip-log.txt 27 | pip-delete-this-directory.txt 28 | 29 | # Unit test / coverage reports 30 | htmlcov/ 31 | .tox/ 32 | .coverage 33 | .cache 34 | nosetests.xml 35 | coverage.xml 36 | 37 | # Translations 38 | *.mo 39 | 40 | # Mr Developer 41 | .mr.developer.cfg 42 | .project 43 | .pydevproject 44 | 45 | # Rope 46 | .ropeproject 47 | 48 | # Django stuff: 49 | *.log 50 | *.pot 51 | 52 | # Sphinx documentation 53 | docs/_build/ 54 | 55 | .idea 56 | .vagrant 57 | 58 | config 59 | env 60 | .coverage 61 | -------------------------------------------------------------------------------- /.travis.yml: -------------------------------------------------------------------------------- 1 | sudo: false 2 | language: python 3 | python: 4 | - 2.7 5 | - pypy 6 | - 3.3 7 | - 3.4 8 | - 3.5 9 | install: 10 | - pip install -r requirements.txt --download-cache $HOME/.pip-cache 11 | - pip install -r test-requirements.txt --download-cache $HOME/.pip-cache 12 | script: nosetests --with-coverage --cover-package=houston -v 13 | after_success: 14 | - codecov 15 | deploy: 16 | distributions: sdist bdist_wheel 17 | provider: pypi 18 | user: crad 19 | on: 20 | python: 2.7 21 | tags: true 22 | all_branches: true 23 | password: 24 | secure: "SQreo6LUi7jLPWqDzfv4+IY+wYJstu4OKa5uJTmk9cXt5MteMJwvvYnjWGXNxK/GrkzQEh9zRm6cdoy9C7YUwy2JRToduVnmzA+qTE8BaMbgssGqImoq3SsDEtqlmkCMNHwzbuOehcPvjfAXBBWKaNcURxjX1grgijMxaIWBrMf4XxJcgEFD2D9vccRVoeJCC5QLZ0JSfgy7qwbbj3v/9B+tFyHaXR8bg+asESRvlrJ/De9lptxL85bJu0w+oO9xfNI+kt+1/Xt+EGXzePYuoyEChTP/XHMTR+7nJzwoNjFqEtBBg6bi1h4BI7BXy6oKN9pyMGReCIlc+JRWK+2vDlifOMFD6mVXPrvIdikjo3bajgUrsPisud6PVlKQ6ZABgRf3K2Be5EMRItKgFOHaptE6wzE7onztwXvk8g4v0ge3gGb1abyJPi051XLWX+z7Giltwe0n47jJrKdEWtl81BCYTemA0CS8YT135IwU4OKQlGTd/nUJkcWVfoff7CuBAs8FYJ1ErsBwFnORCK80WV/2mCZYcaQl4ShlJFPW/AV+7oXtJLbKr77EPcWVUiyeUB5lFUVKkTs+X5jufz5XOxnCWoox0ZFvVC1QOkInYMV5kdr6+xyMVlPadqYvd8ssLiopJ1LuGm7sfd+P4vf9rm2zWvxQXDnhrbU1YQ+PaTo=" 25 | cache: 26 | directories: 27 | - $HOME/.pip-cache/ 28 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Copyright (c) 2015 AWeber Communications 2 | All rights reserved. 3 | 4 | Redistribution and use in source and binary forms, with or without modification, 5 | are permitted provided that the following conditions are met: 6 | 7 | * Redistributions of source code must retain the above copyright notice, this 8 | list of conditions and the following disclaimer. 9 | * Redistributions in binary form must reproduce the above copyright notice, 10 | this list of conditions and the following disclaimer in the documentation 11 | and/or other materials provided with the distribution. 12 | * Neither the name of the Houston project nor the names of its 13 | contributors may be used to endorse or promote products derived from this 14 | software without specific prior written permission. 15 | 16 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND 17 | ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 18 | WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 19 | IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, 20 | INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, 21 | BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 22 | DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF 23 | LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE 24 | OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF 25 | ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 26 | -------------------------------------------------------------------------------- /MANIFEST.in: -------------------------------------------------------------------------------- 1 | include *.rst LICENSE -------------------------------------------------------------------------------- /README.rst: -------------------------------------------------------------------------------- 1 | Houston 2 | ======= 3 | 4 | :boom:**Important**:boom: This project is deprecated and no longer maintained. 5 | 6 | Easy docker stack deployment to `CoreOS `_ clusters using 7 | `Fleet `_ and `Consul `_. 8 | 9 | Houston installs as a command-line application and is meant to be used for automated 10 | deployment of Dockerized application stacks. 11 | 12 | Houston deployments allow for files to be placed onto the host OS, the deployment 13 | of dependency containers, confirmed startup of a container using Consul, and 14 | teardown of previous container versions in a single run. 15 | 16 | 17 | |Version| |Downloads| |Status| |Coverage| |License| 18 | 19 | Installation 20 | ------------ 21 | 22 | Houston may be installed via the `Python package index `_ 23 | with the tool of your choice: 24 | 25 | .. code:: bash 26 | 27 | pip install houston 28 | 29 | Documentation 30 | ------------- 31 | 32 | Documentation is available on `ReadTheDocs `_. 33 | 34 | There is also an `example configuration directory `_. 35 | 36 | Deployment Types 37 | ---------------- 38 | Houston has 3 deployment types: global, standalone stacks, and services. All three types allow for file archive deployments [1]_ using a `cloud-init style `_ ``write_files`` section. 39 | 40 | - Global deployments place a single list of units intended to be shared across all or a majority of CoreOS instances. 41 | - Standalone deployments are like the global deployment but allows for more targeted deployments with file archives deployed first. 42 | - Service deployments allow for the deployment of a single unit and the shared units that it is dependent upon 43 | 44 | Usage Example 45 | ------------- 46 | Example of deploying a full stack application: 47 | 48 | .. code:: 49 | 50 | $ houston -c config -e test-us-east-1 example 7b7d061b 51 | INFO Deploying example-file-deploy@11bede3c.service 52 | INFO Deploying example-memcached@1.4.24.service 53 | INFO Deploying example-nginx@35f9e1f3.service 54 | INFO Deploying example-consul-template-nginx@d3bac01d.service 55 | INFO Deploying example-pgbouncer@f20fb494.service 56 | INFO Deploying example-consul-template-pgbouncer@d3bac01d.service 57 | INFO Deploying example-datadog@ff444e66.service 58 | INFO Deploying example@7b7d061b.service 59 | INFO example@7b7d061b.service has started 60 | INFO Validated service is running with Consul 61 | INFO Destroying example@b67b4317.service 62 | INFO Deployment of example 7b7d061b and its dependencies successful. 63 | INFO Eagle, looking great. You're Go. 64 | 65 | When executed, houston creates a tarball of files from the `service's file manifest `_ 66 | and uploads it to Consul's KV database. It then deploys a dynamically created systemd unit to fleet, 67 | which pulls the tarball from Consul and extracts the files to the CoreOS filesystem. 68 | 69 | In the next step, it iterates through the dependency containers specified in the 70 | `manifest `_, submitting and starting each unit, waiting 71 | until a unit is listed as ``active`` in systemd for all nodes, and then 72 | moves on to the next. 73 | 74 | One the dependency containers have started, it starts the example service, 75 | waiting for systemd to report it as active. It then queries Consul for the version 76 | of the service that has started, ensuring that it is running on all the expected 77 | nodes that fleet says it has deployed it to. 78 | 79 | Once a deployment has been confirmed, it looks at all units submitted to fleet, 80 | checking to see if there are other versions of containers running than what it deployed. 81 | If so, it will destroy those other containers with fleet. 82 | 83 | Finally it will check to see if any other file archive versions exist in Consul's for the 84 | service, removing them if so. 85 | 86 | One of the more interesting parts for managing stack deployment is the namespacing 87 | of the shared stack elements in fleet, so that updating one stack does not impact 88 | another. For example, in the configuration, a service may be referred to as only 89 | ``pgbouncer:f20fb494``, but when deployed it will be prefixed and versioned 90 | appropriately as ``example-pgbouncer@f20fb494`` if the service name is ``example``. 91 | 92 | Version History 93 | --------------- 94 | 95 | Available at https://houston.readthedocs.org/en/latest/history.html 96 | 97 | .. |Version| image:: https://img.shields.io/pypi/v/houston.svg? 98 | :target: https://pypi.python.org/pypi/houston 99 | 100 | .. |Status| image:: https://img.shields.io/travis/aweber/houston.svg? 101 | :target: https://travis-ci.org/aweber/houston 102 | 103 | .. |Coverage| image:: https://img.shields.io/codecov/c/github/aweber/houston.svg? 104 | :target: https://codecov.io/github/aweber/houston?branch=master 105 | 106 | .. |Downloads| image:: https://img.shields.io/pypi/dm/houston.svg? 107 | :target: https://pypi.python.org/pypi/houston 108 | 109 | .. |License| image:: https://img.shields.io/pypi/l/houston.svg? 110 | :target: https://houston.readthedocs.org 111 | 112 | .. [1] Global file deployments happen after the unit files are deployed so that Consul can be up and running prior to the placement of the global files. 113 | -------------------------------------------------------------------------------- /TODO: -------------------------------------------------------------------------------- 1 | Todo 2 | ==== 3 | - Restart a deployment stack in order when the file archive or a shared image changes 4 | - Write docs 5 | - Add meaningful test coverage 6 | -------------------------------------------------------------------------------- /docs/Makefile: -------------------------------------------------------------------------------- 1 | # Makefile for Sphinx documentation 2 | # 3 | 4 | # You can set these variables from the command line. 5 | SPHINXOPTS = 6 | SPHINXBUILD = sphinx-build 7 | PAPER = 8 | BUILDDIR = _build 9 | 10 | # User-friendly check for sphinx-build 11 | ifeq ($(shell which $(SPHINXBUILD) >/dev/null 2>&1; echo $$?), 1) 12 | $(error The '$(SPHINXBUILD)' command was not found. Make sure you have Sphinx installed, then set the SPHINXBUILD environment variable to point to the full path of the '$(SPHINXBUILD)' executable. Alternatively you can add the directory with the executable to your PATH. If you don't have Sphinx installed, grab it from http://sphinx-doc.org/) 13 | endif 14 | 15 | # Internal variables. 16 | PAPEROPT_a4 = -D latex_paper_size=a4 17 | PAPEROPT_letter = -D latex_paper_size=letter 18 | ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) . 19 | # the i18n builder cannot share the environment and doctrees with the others 20 | I18NSPHINXOPTS = $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) . 21 | 22 | .PHONY: help clean html dirhtml singlehtml pickle json htmlhelp qthelp devhelp epub latex latexpdf text man changes linkcheck doctest coverage gettext 23 | 24 | help: 25 | @echo "Please use \`make ' where is one of" 26 | @echo " html to make standalone HTML files" 27 | @echo " dirhtml to make HTML files named index.html in directories" 28 | @echo " singlehtml to make a single large HTML file" 29 | @echo " pickle to make pickle files" 30 | @echo " json to make JSON files" 31 | @echo " htmlhelp to make HTML files and a HTML help project" 32 | @echo " qthelp to make HTML files and a qthelp project" 33 | @echo " applehelp to make an Apple Help Book" 34 | @echo " devhelp to make HTML files and a Devhelp project" 35 | @echo " epub to make an epub" 36 | @echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter" 37 | @echo " latexpdf to make LaTeX files and run them through pdflatex" 38 | @echo " latexpdfja to make LaTeX files and run them through platex/dvipdfmx" 39 | @echo " text to make text files" 40 | @echo " man to make manual pages" 41 | @echo " texinfo to make Texinfo files" 42 | @echo " info to make Texinfo files and run them through makeinfo" 43 | @echo " gettext to make PO message catalogs" 44 | @echo " changes to make an overview of all changed/added/deprecated items" 45 | @echo " xml to make Docutils-native XML files" 46 | @echo " pseudoxml to make pseudoxml-XML files for display purposes" 47 | @echo " linkcheck to check all external links for integrity" 48 | @echo " doctest to run all doctests embedded in the documentation (if enabled)" 49 | @echo " coverage to run coverage check of the documentation (if enabled)" 50 | 51 | clean: 52 | rm -rf $(BUILDDIR)/* 53 | 54 | html: 55 | $(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html 56 | @echo 57 | @echo "Build finished. The HTML pages are in $(BUILDDIR)/html." 58 | 59 | dirhtml: 60 | $(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml 61 | @echo 62 | @echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml." 63 | 64 | singlehtml: 65 | $(SPHINXBUILD) -b singlehtml $(ALLSPHINXOPTS) $(BUILDDIR)/singlehtml 66 | @echo 67 | @echo "Build finished. The HTML page is in $(BUILDDIR)/singlehtml." 68 | 69 | pickle: 70 | $(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle 71 | @echo 72 | @echo "Build finished; now you can process the pickle files." 73 | 74 | json: 75 | $(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json 76 | @echo 77 | @echo "Build finished; now you can process the JSON files." 78 | 79 | htmlhelp: 80 | $(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp 81 | @echo 82 | @echo "Build finished; now you can run HTML Help Workshop with the" \ 83 | ".hhp project file in $(BUILDDIR)/htmlhelp." 84 | 85 | qthelp: 86 | $(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp 87 | @echo 88 | @echo "Build finished; now you can run "qcollectiongenerator" with the" \ 89 | ".qhcp project file in $(BUILDDIR)/qthelp, like this:" 90 | @echo "# qcollectiongenerator $(BUILDDIR)/qthelp/Houston.qhcp" 91 | @echo "To view the help file:" 92 | @echo "# assistant -collectionFile $(BUILDDIR)/qthelp/Houston.qhc" 93 | 94 | applehelp: 95 | $(SPHINXBUILD) -b applehelp $(ALLSPHINXOPTS) $(BUILDDIR)/applehelp 96 | @echo 97 | @echo "Build finished. The help book is in $(BUILDDIR)/applehelp." 98 | @echo "N.B. You won't be able to view it unless you put it in" \ 99 | "~/Library/Documentation/Help or install it in your application" \ 100 | "bundle." 101 | 102 | devhelp: 103 | $(SPHINXBUILD) -b devhelp $(ALLSPHINXOPTS) $(BUILDDIR)/devhelp 104 | @echo 105 | @echo "Build finished." 106 | @echo "To view the help file:" 107 | @echo "# mkdir -p $$HOME/.local/share/devhelp/Houston" 108 | @echo "# ln -s $(BUILDDIR)/devhelp $$HOME/.local/share/devhelp/Houston" 109 | @echo "# devhelp" 110 | 111 | epub: 112 | $(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) $(BUILDDIR)/epub 113 | @echo 114 | @echo "Build finished. The epub file is in $(BUILDDIR)/epub." 115 | 116 | latex: 117 | $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex 118 | @echo 119 | @echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex." 120 | @echo "Run \`make' in that directory to run these through (pdf)latex" \ 121 | "(use \`make latexpdf' here to do that automatically)." 122 | 123 | latexpdf: 124 | $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex 125 | @echo "Running LaTeX files through pdflatex..." 126 | $(MAKE) -C $(BUILDDIR)/latex all-pdf 127 | @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex." 128 | 129 | latexpdfja: 130 | $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex 131 | @echo "Running LaTeX files through platex and dvipdfmx..." 132 | $(MAKE) -C $(BUILDDIR)/latex all-pdf-ja 133 | @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex." 134 | 135 | text: 136 | $(SPHINXBUILD) -b text $(ALLSPHINXOPTS) $(BUILDDIR)/text 137 | @echo 138 | @echo "Build finished. The text files are in $(BUILDDIR)/text." 139 | 140 | man: 141 | $(SPHINXBUILD) -b man $(ALLSPHINXOPTS) $(BUILDDIR)/man 142 | @echo 143 | @echo "Build finished. The manual pages are in $(BUILDDIR)/man." 144 | 145 | texinfo: 146 | $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo 147 | @echo 148 | @echo "Build finished. The Texinfo files are in $(BUILDDIR)/texinfo." 149 | @echo "Run \`make' in that directory to run these through makeinfo" \ 150 | "(use \`make info' here to do that automatically)." 151 | 152 | info: 153 | $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo 154 | @echo "Running Texinfo files through makeinfo..." 155 | make -C $(BUILDDIR)/texinfo info 156 | @echo "makeinfo finished; the Info files are in $(BUILDDIR)/texinfo." 157 | 158 | gettext: 159 | $(SPHINXBUILD) -b gettext $(I18NSPHINXOPTS) $(BUILDDIR)/locale 160 | @echo 161 | @echo "Build finished. The message catalogs are in $(BUILDDIR)/locale." 162 | 163 | changes: 164 | $(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes 165 | @echo 166 | @echo "The overview file is in $(BUILDDIR)/changes." 167 | 168 | linkcheck: 169 | $(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck 170 | @echo 171 | @echo "Link check complete; look for any errors in the above output " \ 172 | "or in $(BUILDDIR)/linkcheck/output.txt." 173 | 174 | doctest: 175 | $(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest 176 | @echo "Testing of doctests in the sources finished, look at the " \ 177 | "results in $(BUILDDIR)/doctest/output.txt." 178 | 179 | coverage: 180 | $(SPHINXBUILD) -b coverage $(ALLSPHINXOPTS) $(BUILDDIR)/coverage 181 | @echo "Testing of coverage in the sources finished, look at the " \ 182 | "results in $(BUILDDIR)/coverage/python.txt." 183 | 184 | xml: 185 | $(SPHINXBUILD) -b xml $(ALLSPHINXOPTS) $(BUILDDIR)/xml 186 | @echo 187 | @echo "Build finished. The XML files are in $(BUILDDIR)/xml." 188 | 189 | pseudoxml: 190 | $(SPHINXBUILD) -b pseudoxml $(ALLSPHINXOPTS) $(BUILDDIR)/pseudoxml 191 | @echo 192 | @echo "Build finished. The pseudo-XML files are in $(BUILDDIR)/pseudoxml." 193 | -------------------------------------------------------------------------------- /docs/conf.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | # 3 | # Houston documentation build configuration file, created by 4 | # sphinx-quickstart on Tue Jul 21 21:45:12 2015. 5 | # 6 | # This file is execfile()d with the current directory set to its 7 | # containing dir. 8 | # 9 | # Note that not all possible configuration values are present in this 10 | # autogenerated file. 11 | # 12 | # All configuration values have a default; values that are commented out 13 | # serve to show the default. 14 | 15 | import sys 16 | import os 17 | import shlex 18 | 19 | # If extensions (or modules to document with autodoc) are in another directory, 20 | # add these directories to sys.path here. If the directory is relative to the 21 | # documentation root, use os.path.abspath to make it absolute, like shown here. 22 | #sys.path.insert(0, os.path.abspath('.')) 23 | 24 | # -- General configuration ------------------------------------------------ 25 | 26 | # If your documentation needs a minimal Sphinx version, state it here. 27 | #needs_sphinx = '1.0' 28 | 29 | # Add any Sphinx extension module names here, as strings. They can be 30 | # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom 31 | # ones. 32 | extensions = [ 33 | 'sphinx.ext.autodoc', 34 | 'sphinx.ext.intersphinx', 35 | 'sphinx.ext.todo', 36 | 'sphinx.ext.viewcode', 37 | ] 38 | 39 | # Add any paths that contain templates here, relative to this directory. 40 | templates_path = ['_templates'] 41 | 42 | # The suffix(es) of source filenames. 43 | # You can specify multiple suffix as a list of string: 44 | # source_suffix = ['.rst', '.md'] 45 | source_suffix = '.rst' 46 | 47 | # The encoding of source files. 48 | #source_encoding = 'utf-8-sig' 49 | 50 | # The master toctree document. 51 | master_doc = 'index' 52 | 53 | # General information about the project. 54 | project = u'Houston' 55 | copyright = u'2015, AWeber Communications' 56 | author = u'AWeber Communications' 57 | 58 | # The version info for the project you're documenting, acts as replacement for 59 | # |version| and |release|, also used in various other places throughout the 60 | # built documents. 61 | # 62 | 63 | import houston 64 | release = houston.__version__ 65 | version = '.'.join(release.split('.')[0:1]) 66 | 67 | # The language for content autogenerated by Sphinx. Refer to documentation 68 | # for a list of supported languages. 69 | # 70 | # This is also used if you do content translation via gettext catalogs. 71 | # Usually you set "language" from the command line for these cases. 72 | language = None 73 | 74 | # There are two options for replacing |today|: either, you set today to some 75 | # non-false value, then it is used: 76 | #today = '' 77 | # Else, today_fmt is used as the format for a strftime call. 78 | #today_fmt = '%B %d, %Y' 79 | 80 | # List of patterns, relative to source directory, that match files and 81 | # directories to ignore when looking for source files. 82 | exclude_patterns = ['_build'] 83 | 84 | # The reST default role (used for this markup: `text`) to use for all 85 | # documents. 86 | #default_role = None 87 | 88 | # If true, '()' will be appended to :func: etc. cross-reference text. 89 | #add_function_parentheses = True 90 | 91 | # If true, the current module name will be prepended to all description 92 | # unit titles (such as .. function::). 93 | #add_module_names = True 94 | 95 | # If true, sectionauthor and moduleauthor directives will be shown in the 96 | # output. They are ignored by default. 97 | #show_authors = False 98 | 99 | # The name of the Pygments (syntax highlighting) style to use. 100 | pygments_style = 'sphinx' 101 | 102 | # A list of ignored prefixes for module index sorting. 103 | #modindex_common_prefix = [] 104 | 105 | # If true, keep warnings as "system message" paragraphs in the built documents. 106 | #keep_warnings = False 107 | 108 | # If true, `todo` and `todoList` produce output, else they produce nothing. 109 | todo_include_todos = True 110 | 111 | 112 | # -- Options for HTML output ---------------------------------------------- 113 | 114 | # The theme to use for HTML and HTML Help pages. See the documentation for 115 | # a list of builtin themes. 116 | html_theme = 'alabaster' 117 | 118 | # Theme options are theme-specific and customize the look and feel of a theme 119 | # further. For a list of options available for each theme, see the 120 | # documentation. 121 | #html_theme_options = {} 122 | 123 | # Add any paths that contain custom themes here, relative to this directory. 124 | #html_theme_path = [] 125 | 126 | # The name for this set of Sphinx documents. If None, it defaults to 127 | # " v documentation". 128 | #html_title = None 129 | 130 | # A shorter title for the navigation bar. Default is the same as html_title. 131 | #html_short_title = None 132 | 133 | # The name of an image file (relative to this directory) to place at the top 134 | # of the sidebar. 135 | #html_logo = None 136 | 137 | # The name of an image file (within the static path) to use as favicon of the 138 | # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 139 | # pixels large. 140 | #html_favicon = None 141 | 142 | # Add any paths that contain custom static files (such as style sheets) here, 143 | # relative to this directory. They are copied after the builtin static files, 144 | # so a file named "default.css" will overwrite the builtin "default.css". 145 | html_static_path = ['_static'] 146 | 147 | # Add any extra paths that contain custom files (such as robots.txt or 148 | # .htaccess) here, relative to this directory. These files are copied 149 | # directly to the root of the documentation. 150 | #html_extra_path = [] 151 | 152 | # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, 153 | # using the given strftime format. 154 | #html_last_updated_fmt = '%b %d, %Y' 155 | 156 | # If true, SmartyPants will be used to convert quotes and dashes to 157 | # typographically correct entities. 158 | #html_use_smartypants = True 159 | 160 | # Custom sidebar templates, maps document names to template names. 161 | #html_sidebars = {} 162 | 163 | # Additional templates that should be rendered to pages, maps page names to 164 | # template names. 165 | #html_additional_pages = {} 166 | 167 | # If false, no module index is generated. 168 | #html_domain_indices = True 169 | 170 | # If false, no index is generated. 171 | #html_use_index = True 172 | 173 | # If true, the index is split into individual pages for each letter. 174 | #html_split_index = False 175 | 176 | # If true, links to the reST sources are added to the pages. 177 | #html_show_sourcelink = True 178 | 179 | # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. 180 | #html_show_sphinx = True 181 | 182 | # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. 183 | #html_show_copyright = True 184 | 185 | # If true, an OpenSearch description file will be output, and all pages will 186 | # contain a tag referring to it. The value of this option must be the 187 | # base URL from which the finished HTML is served. 188 | #html_use_opensearch = '' 189 | 190 | # This is the file name suffix for HTML files (e.g. ".xhtml"). 191 | #html_file_suffix = None 192 | 193 | # Language to be used for generating the HTML full-text search index. 194 | # Sphinx supports the following languages: 195 | # 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja' 196 | # 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr' 197 | #html_search_language = 'en' 198 | 199 | # A dictionary with options for the search language support, empty by default. 200 | # Now only 'ja' uses this config value 201 | #html_search_options = {'type': 'default'} 202 | 203 | # The name of a javascript file (relative to the configuration directory) that 204 | # implements a search results scorer. If empty, the default will be used. 205 | #html_search_scorer = 'scorer.js' 206 | 207 | # Output file base name for HTML help builder. 208 | htmlhelp_basename = 'Houstondoc' 209 | 210 | # -- Options for LaTeX output --------------------------------------------- 211 | 212 | latex_elements = { 213 | # The paper size ('letterpaper' or 'a4paper'). 214 | #'papersize': 'letterpaper', 215 | 216 | # The font size ('10pt', '11pt' or '12pt'). 217 | #'pointsize': '10pt', 218 | 219 | # Additional stuff for the LaTeX preamble. 220 | #'preamble': '', 221 | 222 | # Latex figure (float) alignment 223 | #'figure_align': 'htbp', 224 | } 225 | 226 | # Grouping the document tree into LaTeX files. List of tuples 227 | # (source start file, target name, title, 228 | # author, documentclass [howto, manual, or own class]). 229 | latex_documents = [ 230 | (master_doc, 'Houston.tex', u'Houston Documentation', 231 | u'AWeber Communications', 'manual'), 232 | ] 233 | 234 | # The name of an image file (relative to this directory) to place at the top of 235 | # the title page. 236 | #latex_logo = None 237 | 238 | # For "manual" documents, if this is true, then toplevel headings are parts, 239 | # not chapters. 240 | #latex_use_parts = False 241 | 242 | # If true, show page references after internal links. 243 | #latex_show_pagerefs = False 244 | 245 | # If true, show URL addresses after external links. 246 | #latex_show_urls = False 247 | 248 | # Documents to append as an appendix to all manuals. 249 | #latex_appendices = [] 250 | 251 | # If false, no module index is generated. 252 | #latex_domain_indices = True 253 | 254 | 255 | # -- Options for manual page output --------------------------------------- 256 | 257 | # One entry per manual page. List of tuples 258 | # (source start file, name, description, authors, manual section). 259 | man_pages = [ 260 | (master_doc, 'houston', u'Houston Documentation', 261 | [author], 1) 262 | ] 263 | 264 | # If true, show URL addresses after external links. 265 | #man_show_urls = False 266 | 267 | 268 | # -- Options for Texinfo output ------------------------------------------- 269 | 270 | # Grouping the document tree into Texinfo files. List of tuples 271 | # (source start file, target name, title, author, 272 | # dir menu entry, description, category) 273 | texinfo_documents = [ 274 | (master_doc, 'Houston', u'Houston Documentation', 275 | author, 'Houston', 'One line description of project.', 276 | 'Miscellaneous'), 277 | ] 278 | 279 | # Documents to append as an appendix to all manuals. 280 | #texinfo_appendices = [] 281 | 282 | # If false, no module index is generated. 283 | #texinfo_domain_indices = True 284 | 285 | # How to display URL addresses: 'footnote', 'no', or 'inline'. 286 | #texinfo_show_urls = 'footnote' 287 | 288 | # If true, do not generate a @detailmenu in the "Top" node's menu. 289 | #texinfo_no_detailmenu = False 290 | 291 | 292 | # Example configuration for intersphinx: refer to the Python standard library. 293 | intersphinx_mapping = {'https://docs.python.org/': None} 294 | -------------------------------------------------------------------------------- /docs/history.rst: -------------------------------------------------------------------------------- 1 | Version History 2 | =============== 3 | - 0.1.3 - released *2015-07-23* 4 | - Added shared stacks 5 | - Added file archive deployments for globals and shared stacks 6 | - 0.1.2 - released *2015-07-22* - Shared-unit dependency chain construction 7 | - 0.1.0 - released *2015-07-22* - Initial Version (alpha) 8 | -------------------------------------------------------------------------------- /docs/index.rst: -------------------------------------------------------------------------------- 1 | Houston 2 | ======= 3 | Easy docker stack deployment to `CoreOS `_ clusters using 4 | `Fleet `_ and `Consul `_. 5 | 6 | Contents: 7 | 8 | .. toctree:: 9 | :maxdepth: 2 10 | 11 | Issues 12 | ------ 13 | Please report any issues to the Github repo at `https://github.com/aweber/houston/issues `_ 14 | 15 | Source 16 | ------ 17 | Source code is available on Github at `https://github.com/aweber/houston `_ 18 | 19 | Version History 20 | --------------- 21 | See :doc:`history` 22 | 23 | Indices and tables 24 | ================== 25 | 26 | * :ref:`genindex` 27 | * :ref:`modindex` 28 | * :ref:`search` 29 | 30 | -------------------------------------------------------------------------------- /example/README.md: -------------------------------------------------------------------------------- 1 | Example Houston Configuration 2 | ============================= 3 | The files in this repository are for the automated deployment of application 4 | stacks to [CoreOS](https://coreos.com) servers running [fleet](https://github.com/coreos/fleet), 5 | using [Houston](https://github.com/aweber/houston). 6 | 7 | This is a contrived example and is not expected to work out of the box, but instead is intended to demonstrate a starting point. 8 | 9 | **Files** 10 | 11 | - ``manifest.yaml``: The core houston manifest with environment and service dependency information 12 | 13 | **Directories** 14 | 15 | - ``files``: Cloud-Init style file manifests for deploying of files as part of service deployment 16 | - ``units/service``: Application/service units 17 | - ``units/shared``: Non-service units 18 | -------------------------------------------------------------------------------- /example/files/global.yaml: -------------------------------------------------------------------------------- 1 | %YAML 1.2 2 | --- 3 | - path: /opt/bin/consul-register 4 | permissions: 0755 5 | owner: core 6 | content: | 7 | #!/usr/bin/bash 8 | source /etc/environment 9 | PORT=`docker inspect -f '{{range $p, $conf := .NetworkSettings.Ports}}{{ if eq $p "8000/tcp" }}{{(index $conf 0).HostPort}}{{end}}{{end}}' $1-$2` 10 | cat << EOF > /tmp/body.json 11 | { 12 | "ID": "$1-$2", 13 | "Name": "$1", 14 | "Tags": ["http", "$2"], 15 | "Address": "${COREOS_PRIVATE_IPV4}", 16 | "Port": $PORT, 17 | "Check": { 18 | "HTTP": "http://${COREOS_PRIVATE_IPV4}:${PORT}/status", 19 | "Interval": "10s" 20 | } 21 | } 22 | EOF 23 | /usr/bin/curl -s -H "Content-type: application/json" -X POST -d "@/tmp/body.json" http://${COREOS_PRIVATE_IPV4}:8500/v1/agent/service/register 24 | rm /tmp/body.json 25 | -------------------------------------------------------------------------------- /example/files/service/blog.yaml: -------------------------------------------------------------------------------- 1 | %YAML 1.2 2 | --- 3 | - path: /etc/profile.d/memcached.env 4 | permissions: 0644 5 | owner: core 6 | content: | 7 | MEMCACHED_ARGS="-m 512" 8 | 9 | - path: /etc/profile.d/datadog.env 10 | permissions: 0644 11 | owner: core 12 | content: | 13 | DATADOG_ARGS="--link=memcached:memcached --link=nginx:nginx" 14 | 15 | - path: /etc/dd-agent/conf.d/mcache.yaml 16 | permissions: 0755 17 | owner: core 18 | content: | 19 | init_config: 20 | 21 | instances: 22 | - url: memcached 23 | port: 11211 24 | tags: 25 | - service:{service} 26 | - environment:{environment} 27 | 28 | - path: /etc/dd-agent/conf.d/nginx.yaml 29 | permissions: 0755 30 | owner: core 31 | content: | 32 | init_config: 33 | 34 | instances: 35 | - nginx_status_url: http://nginx/nginx_status/ 36 | tags: 37 | - service:{service} 38 | - environment:{environment} 39 | -------------------------------------------------------------------------------- /example/files/standalone/redis.yaml: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/gmr/houston/c9f79ae86d395fb875adcf6a76748984f20158ae/example/files/standalone/redis.yaml -------------------------------------------------------------------------------- /example/manifest.yaml: -------------------------------------------------------------------------------- 1 | %YAML 1.2 2 | --- 3 | environments: 4 | test-us-east-1: 5 | consul: https://consul.test-us-east-1.com 6 | fleet: https://fleet.test-us-east-1.com 7 | 8 | stage-us-east-1: 9 | consul: https://consul.stage-us-east-1.com 10 | fleet: https://fleet.stage-us-east-1.com 11 | 12 | prod-us-east-1: 13 | consul: https://consul.prod-us-east-1.com 14 | fleet: https://fleet.prod-us-east-1.com 15 | 16 | variables: 17 | test-us-east-1: 18 | domain: test-us-east-1.com 19 | 20 | stage-us-east-1: 21 | domain: stage-us-east-1.com 22 | 23 | prod-us-east-1: 24 | domain: prod-us-east-1.com 25 | 26 | global-units: 27 | - fluentd:707464a0 28 | - consul:35e75d05 29 | 30 | shared-stacks: 31 | redis: 32 | - redis:2.8 33 | - datadog:ff444e66 34 | 35 | shared-units: 36 | blog: 37 | - memcached:1.4.24 38 | - nginx:1.9.3 39 | - mysql:5.5 40 | - datadog:5.2.0 41 | - newrelic-sysmond:2.1.0.124[environment:prod-us-east-1] 42 | -------------------------------------------------------------------------------- /example/units/service/blog.service: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=Blog Service 3 | Requires=consul.service 4 | Requires=memcached.service 5 | Requires=mysql.service 6 | After=consul.service 7 | After=memcached.service 8 | After=mysql.service 9 | 10 | [Service] 11 | Type=oneshot 12 | RemainAfterExit=true 13 | ExecStartPre=-/usr/bin/docker kill %p-%i 14 | ExecStartPre=-/usr/bin/docker rm %p-%i 15 | ExecStartPre=/usr/bin/docker pull wordpress:%i 16 | ExecStart=/usr/bin/docker run -d --name %p-%i -P --link=memcached:memcached --link=mysql:mysql wordpress:%i 17 | ExecStop=/usr/bin/docker stop %p-%i 18 | 19 | [X-Fleet] 20 | Global=true 21 | MachineMetadata=service=%p 22 | -------------------------------------------------------------------------------- /example/units/shared/consul.service: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=Consul Server 3 | Requires=docker.service 4 | After=docker.service 5 | 6 | [Service] 7 | Type=oneshot 8 | RemainAfterExit=true 9 | EnvironmentFile=/etc/environment 10 | ExecStartPre=-/usr/bin/docker stop consul 11 | ExecStartPre=-/usr/bin/docker rm consul 12 | ExecStartPre=/usr/bin/docker pull progrium/consul:latest 13 | ExecStart=/usr/bin/docker run -d \ 14 | --name consul \ 15 | -v /var/lib/consul:/var/lib/consul \ 16 | -p 8300:8300 \ 17 | -p 8301:8301 \ 18 | -p 8301:8301/udp \ 19 | -p 8500:8500 \ 20 | -p 53:8600 \ 21 | -p 53:8600/udp \ 22 | progrium/consul:latest \ 23 | -advertise=${COREOS_PRIVATE_IPV4} \ 24 | -dc={environment} \ 25 | -recursor=${RESOLVER} \ 26 | -node=%H 27 | ExecStop=/usr/bin/docker stop consul 28 | 29 | [X-Fleet] 30 | Global=true 31 | -------------------------------------------------------------------------------- /example/units/shared/datadog.service: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=Datadog Agent 3 | Requires=docker.service 4 | After=docker.service 5 | 6 | [Service] 7 | Type=oneshot 8 | RemainAfterExit=true 9 | ExecStartPre=-/usr/bin/docker kill datadog 10 | ExecStartPre=-/usr/bin/docker rm datadog 11 | ExecStartPre=/usr/bin/docker pull datadog/docker-dd-agent:%i 12 | ExecStart=| 13 | /usr/bin/docker run -d \ 14 | --name=datadog \ 15 | -h %H \ 16 | -p 8125:8125/udp \ 17 | -v /etc/dd-agent/conf.d:/etc/dd-agent/conf.d \ 18 | -v /var/run/docker.sock:/var/run/docker.sock \ 19 | -v /proc/mounts:/host/proc/mounts:ro \ 20 | -v /sys/fs/cgroup/:/host/sys/fs/cgroup:ro \ 21 | datadog/docker-dd-agent:%i 22 | ExecStop=/usr/bin/docker stop datadog 23 | 24 | [X-Fleet] 25 | Global=true 26 | MachineMetadata=datadog=true 27 | MachineMetadata=service={service} 28 | -------------------------------------------------------------------------------- /example/units/shared/fluentd.service: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=FluentD Log Forwarder 3 | After=docker.service 4 | Requires=docker.service 5 | 6 | [Service] 7 | Type=oneshot 8 | RemainAfterExit=true 9 | EnvironmentFile=/etc/environment 10 | ExecStartPre=-/usr/bin/docker kill fluentd 11 | ExecStartPre=-/usr/bin/docker rm fluentd 12 | ExecStartPre=/usr/bin/docker pull fluent/fluentd:latest 13 | ExecStart=/usr/bin/docker run -d \ 14 | --name fluentd \ 15 | -v /var/lib/docker/containers:/var/lib/docker/containers \ 16 | fluent/fluentd:latest 17 | ExecStop=/usr/bin/docker stop fluentd 18 | 19 | [X-Fleet] 20 | Global=true 21 | -------------------------------------------------------------------------------- /example/units/shared/memcached.service: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=Memcached 3 | After=docker.service 4 | Requires=docker.service 5 | 6 | [Service] 7 | Type=oneshot 8 | RemainAfterExit=true 9 | EnvironmentFile=-/etc/profile.d/memcached.env 10 | ExecStartPre=-/usr/bin/docker kill memcached 11 | ExecStartPre=-/usr/bin/docker rm memcached 12 | ExecStartPre=/usr/bin/docker pull memcached:%i 13 | ExecStart=/bin/bash -c "/usr/bin/docker run -d --name memcached -p 11211:11211 memcached:%i memcached ${MEMCACHED_ARGS:--m 64}" 14 | ExecStop=/usr/bin/docker stop memcached 15 | 16 | [X-Fleet] 17 | Global=true 18 | MachineMetadata=memcached=true 19 | MachineMetadata=service={service} 20 | -------------------------------------------------------------------------------- /example/units/shared/mysql.service: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=MySQL 3 | Requires=docker.service 4 | After=docker.service 5 | 6 | [Service] 7 | Type=oneshot 8 | RemainAfterExit=true 9 | ExecStartPre=-/usr/bin/docker kill mysql 10 | ExecStartPre=-/usr/bin/docker rm mysql 11 | ExecStartPre=/usr/bin/docker pull mysql:%i 12 | ExecStart=/usr/bin/docker run -d --name mysql mysql:%i 13 | ExecStop=/usr/bin/docker stop mysql 14 | 15 | [X-Fleet] 16 | Global=true 17 | MachineMetadata=mysql=true 18 | MachineMetadata=service={service} 19 | -------------------------------------------------------------------------------- /example/units/shared/newrelic-sysmond.service: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=New Relic System Monitor (nrsysmond) 3 | After=docker.service 4 | Requires=docker.service 5 | 6 | [Service] 7 | Type=oneshot 8 | RemainAfterExit=true 9 | ExecStartPre=-/usr/bin/docker kill nrsysmond 10 | ExecStartPre=-/usr/bin/docker rm nrsysmond 11 | ExecStartPre=/usr/bin/docker newrelic/nrsysmond:%i 12 | ExecStart=/usr/bin/docker run -d \ 13 | --privileged=true \ 14 | --name nrsysmond \ 15 | --pid=host \ 16 | --net=host \ 17 | -v /sys:/sys \ 18 | -v /dev:/dev \ 19 | -v /var/run/docker.sock:/var/run/docker.sock \ 20 | -v /var/log:/var/log:rw \ 21 | -e NRSYSMOND_license_key= \ 22 | -e NRSYSMOND_logfile=/var/log/nrsysmond.log \ 23 | ${DOCKER_SERVER}/newrelic/nrsysmond:%i 24 | ExecStop=/usr/bin/docker stop -t 30 nrsysmond 25 | 26 | [X-Fleet] 27 | Global=true 28 | MachineMetadata=environment=prod-us-east-1 29 | MachineMetadata=service={service} 30 | -------------------------------------------------------------------------------- /example/units/shared/nginx.service: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=Nginx 3 | After=docker.service 4 | Requires=docker.service 5 | 6 | [Service] 7 | Type=oneshot 8 | RemainAfterExit=true 9 | EnvironmentFile=/etc/profile.d/aweber.env 10 | ExecStartPre=-/usr/bin/docker kill nginx 11 | ExecStartPre=-/usr/bin/docker rm nginx 12 | ExecStartPre=/usr/bin/docker pull ${DOCKER_SERVER}/aweber/nginx 13 | ExecStart=/usr/bin/docker run -d --name nginx -p 80:80 ${DOCKER_SERVER}/aweber/nginx 14 | ExecStop=/usr/bin/docker stop nginx 15 | 16 | [X-Fleet] 17 | Global=true 18 | MachineMetadata=nginx=true 19 | MachineMetadata=service={service} 20 | -------------------------------------------------------------------------------- /houston/__init__.py: -------------------------------------------------------------------------------- 1 | """ 2 | Houston 3 | ======= 4 | 5 | Easy docker stack deployment to CoreOS clusters using Fleet and Consul 6 | 7 | """ 8 | __version__ = '0.3.0' 9 | 10 | 11 | DEBUG_CONFIG = { 12 | 'version': 1, 13 | 'disable_existing_loggers': True, 14 | 'incremental': False, 15 | 'formatters': { 16 | 'console': { 17 | 'format': ( 18 | '%(levelname)-8s %(name) -30s %(message)s' 19 | ) 20 | }, 21 | }, 22 | 'handlers': { 23 | 'console': { 24 | 'class': 'logging.StreamHandler', 25 | 'formatter': 'console', 26 | }, 27 | }, 28 | 'loggers': { 29 | 'consulate': { 30 | 'handlers': ['console'], 31 | 'level': 'INFO', 32 | }, 33 | 'fleetpy': { 34 | 'handlers': ['console'], 35 | 'level': 'INFO', 36 | }, 37 | 'houston': { 38 | 'handlers': ['console'], 39 | 'level': 'DEBUG', 40 | } 41 | } 42 | } 43 | 44 | 45 | LOG_CONFIG = { 46 | 'version': 1, 47 | 'disable_existing_loggers': True, 48 | 'incremental': False, 49 | 'formatters': { 50 | 'console': { 51 | 'format': ( 52 | '%(levelname)-8s %(message)s' 53 | ) 54 | }, 55 | }, 56 | 'handlers': { 57 | 'console': { 58 | 'class': 'logging.StreamHandler', 59 | 'formatter': 'console', 60 | }, 61 | }, 62 | 'loggers': { 63 | 'consulate': { 64 | 'handlers': ['console'], 65 | 'level': 'WARNING', 66 | }, 67 | 'fleetpy': { 68 | 'handlers': ['console'], 69 | 'level': 'WARNING', 70 | }, 71 | 'houston': { 72 | 'handlers': ['console'], 73 | 'level': 'INFO', 74 | } 75 | } 76 | } 77 | -------------------------------------------------------------------------------- /houston/cli.py: -------------------------------------------------------------------------------- 1 | """ 2 | Houston Command Line Interface 3 | 4 | """ 5 | import argparse 6 | import logging.config 7 | import logging 8 | from os import path 9 | import sys 10 | 11 | from houston import controller 12 | 13 | from houston import DEBUG_CONFIG 14 | from houston import LOG_CONFIG 15 | 16 | LOGGER = logging.getLogger(__name__) 17 | 18 | DESC = 'Easy docker stack deployment to CoreOS clusters using Fleet and Consul' 19 | 20 | 21 | class CLI(object): 22 | 23 | def __init__(self): 24 | self._parser = self._create_parser() 25 | 26 | def run(self): 27 | args = self._parser.parse_args() 28 | 29 | if args.verbose: 30 | logging.config.dictConfig(DEBUG_CONFIG) 31 | else: 32 | logging.config.dictConfig(LOG_CONFIG) 33 | 34 | if not args.command: 35 | sys.stderr.write('ERROR: You must specify a command\n\n') 36 | self._parser.print_help() 37 | sys.exit(1) 38 | 39 | args_dict = vars(args) 40 | for key in ['name', 'version']: 41 | if key in args_dict: 42 | args_dict[key] = args_dict[key][0] 43 | 44 | obj = controller.Controller(args.config_dir, args.environment, 45 | args.command, 46 | args_dict.get('name'), 47 | args_dict.get('group'), 48 | args_dict.get('version'), 49 | args.delay, args.max_tries, 50 | args.no_dependencies, 51 | args.no_removal, 52 | args.skip_consul, 53 | args.remove) 54 | if obj.run(): 55 | LOGGER.info('Eagle, looking great. You\'re Go.') 56 | else: 57 | LOGGER.info('Deployment failed.') 58 | sys.exit(2) 59 | 60 | @staticmethod 61 | def _create_parser(): 62 | parser = argparse.ArgumentParser(description=DESC) 63 | parser.add_argument('-c', '--config-dir', 64 | default=path.abspath('.'), 65 | help='Specify the path to the configuration ' 66 | 'directory. Default: .') 67 | 68 | parser.add_argument('-e', '--environment', required=True, 69 | help='The environment name') 70 | 71 | parser.add_argument('-d', '--delay', action='store', type=int, 72 | help='How long to pause between service ' 73 | 'activation checks', default=5) 74 | 75 | parser.add_argument('-g', '--group', action='store', 76 | help='Optional deployment group') 77 | 78 | parser.add_argument('-m', '--max-tries', action='store', type=int, 79 | help='How many times should Houston try and' 80 | 'validate that a service has started', 81 | default=15) 82 | 83 | parser.add_argument('--no-dependencies', action='store_true', 84 | help='Do not perform dependency injection in units') 85 | 86 | parser.add_argument('-n', '--no-removal', action='store_true', 87 | help='Do not remove units from fleet upon failure') 88 | 89 | parser.add_argument('--remove', action='store_true', 90 | help='Remove any deployed units') 91 | 92 | parser.add_argument('-s', '--skip-consul', action='store_true', 93 | help='Skip consul check on stack deployment') 94 | 95 | parser.add_argument('-v', '--verbose', action='store_true') 96 | 97 | sparser = parser.add_subparsers(title='Commands', dest='command') 98 | 99 | sparser.add_parser('global', help='Deploy the global stack') 100 | 101 | s_parser = sparser.add_parser('service', 102 | help='Deploy a service stack') 103 | 104 | s_parser.add_argument('name', nargs=1, 105 | help='Name of the service to deploy') 106 | s_parser.add_argument('version', nargs=1, 107 | help='The version of the service to deploy') 108 | 109 | sa_parser = sparser.add_parser('standalone', 110 | help='Deploy a standalone stack') 111 | sa_parser.add_argument('name', nargs=1, 112 | help='Name of the standalone stack to deploy') 113 | return parser 114 | 115 | 116 | def run(): 117 | CLI().run() 118 | -------------------------------------------------------------------------------- /houston/controller.py: -------------------------------------------------------------------------------- 1 | """ 2 | Core Houston Application 3 | 4 | """ 5 | import hashlib 6 | import logging 7 | from os import path 8 | import re 9 | import time 10 | 11 | import consulate 12 | import fleetpy 13 | import yaml 14 | 15 | from houston import files 16 | from houston import utils 17 | 18 | LOGGER = logging.getLogger(__name__) 19 | 20 | CONFIG_FILE = 'manifest.yaml' 21 | UNIT_PATTERN = re.compile(r'(?P[\w\-]+):?(?P[\w\-\.]+)?' 22 | r'(\[(?P[\w\-:]+)\])?') 23 | 24 | 25 | class Controller(object): 26 | 27 | def __init__(self, config_path, environment, command, name, group, version, 28 | delay, max_tries, no_dependencies, no_removal, skip_consul, 29 | remove_units): 30 | self._config_path = self._normalize_path(config_path) 31 | self._environment = environment 32 | self._command = command 33 | self._name = name or ('' if command != 'global' else 'global') 34 | self._group = group 35 | self._version = version 36 | self._deployed_units = [] 37 | self._delay = delay 38 | self._max_tries = max_tries 39 | self._no_dependencies = no_dependencies 40 | self._no_removal = no_removal 41 | self._skip_consul = skip_consul 42 | self._remove_unit_files = remove_units 43 | self._config = self._load_config(CONFIG_FILE) 44 | if environment not in self._config.get('environments', {}): 45 | raise ValueError('environment not found') 46 | 47 | kwargs = utils.parse_endpoint(self.env_config['consul']) 48 | self._consul = consulate.Consul(**kwargs) 49 | self._file_deployment = None 50 | self._fleet = fleetpy.Client(self.env_config.get('fleet')) 51 | 52 | @property 53 | def env_config(self): 54 | return self._config.get('environments', {}).get(self._environment, {}) 55 | 56 | def run(self): 57 | if self._remove_unit_files: 58 | return self._remove_units() 59 | 60 | name = self._name 61 | if self._version: 62 | name = '{} {}'.format(name, self._version) 63 | if self._group: 64 | name = '{} to {} deployment group'.format(name, self._group) 65 | LOGGER.info('Deploying %s', name) 66 | 67 | if self._global: 68 | self._name = 'global' 69 | if not self._deploy_globals(): 70 | return False 71 | if not self._deploy_files(): 72 | return False 73 | self._shutdown_other_versions() 74 | if self._file_deployment: 75 | self._file_deployment.remove_other_archive_versions() 76 | return True 77 | 78 | self._add_last_deployed_global() 79 | 80 | if not self._deploy_files(): 81 | LOGGER.info('Aborting run due to file deployment error') 82 | return False 83 | 84 | if self._standalone: 85 | if not self._deploy_shared_units(): 86 | return False 87 | self._shutdown_other_versions() 88 | if self._file_deployment: 89 | self._file_deployment.remove_other_archive_versions() 90 | return True 91 | 92 | return self._deploy_service() 93 | 94 | def _add_last_deployed_global(self): 95 | """Add the last global unit as a deployed unit for dependency injection 96 | in the first standalone or service unit. 97 | 98 | """ 99 | service = self._config.get('global', [])[-1] 100 | version = None 101 | if ':' in service: 102 | service, version = service.split(':') 103 | self._deployed_units.append(utils.unit_name(service, 'global', 104 | self._group, version)) 105 | 106 | def _apply_variables(self, value): 107 | value = value.replace('{service}', self._name) 108 | if self._group: 109 | value = value.replace('{group}', self._group) 110 | variables = self._config.get('variables', {}) 111 | if self._environment in variables: 112 | variables = variables[self._environment] 113 | for name in variables: 114 | key = '{{{0}}}'.format(name) 115 | if key in value: 116 | value = value.replace(key, variables[name]) 117 | 118 | for unit_name in self._deployed_units: 119 | _parent, service, _group, _ver = utils.parse_unit_name(unit_name) 120 | base_service = '{}.service'.format(service) 121 | if base_service in value: 122 | LOGGER.debug('Replacing %s with %s in value', 123 | base_service, unit_name) 124 | value = value.replace(base_service, unit_name) 125 | return value 126 | 127 | def _check_consul_for_service(self): 128 | """Return true if the service expected to be running, is reported up 129 | in Consul by checking for all ip addresses to be present. 130 | 131 | :rtype: bool 132 | 133 | """ 134 | version = 'version:{}'.format(self._version or 'latest') 135 | group = 'deploy:{}'.format(self._group) if self._group else None 136 | unit_name = utils.unit_name(self._name, None, self._group, 137 | self._version) 138 | state = self._fleet.state(True, unit_name) 139 | expected = set([s.ipaddr for s in state]) 140 | LOGGER.debug('Checking to ensure service is up in consul for %i nodes', 141 | len(expected)) 142 | running = self._consul.catalog.service(self._name) 143 | actual = set() 144 | for node in running: 145 | if group and group not in node['ServiceTags']: 146 | continue 147 | elif version not in node['ServiceTags']: 148 | continue 149 | else: 150 | actual.add(node['ServiceAddress']) 151 | LOGGER.debug('Found %i nodes running %s %s', 152 | len(actual), self._name, version) 153 | return (expected & actual) == expected 154 | 155 | def _deploy_files(self): 156 | if self._file_manifest(): 157 | if self._global: 158 | manifest = 'global.yaml' 159 | else: 160 | manifest = '{0}/{1}.yaml'.format(self._command, self._name) 161 | 162 | unit_name = utils.unit_name('file-deploy', self._name, self._group, 163 | self._file_manifest_hash()) 164 | 165 | self._file_deployment = files.FileDeployment(unit_name, 166 | self.env_config, 167 | self._config_path, 168 | manifest, 169 | self._name, 170 | self._group, 171 | self._environment) 172 | 173 | if self._unit_is_active(unit_name): 174 | self._deployed_units.append(unit_name) 175 | return True 176 | 177 | if self._file_deployment.build_archive(): 178 | LOGGER.info('Uploading archive file to consul') 179 | self._file_deployment.upload_archive() 180 | 181 | unit = self._fleet.unit(unit_name) 182 | unit.read_string(self._file_deployment.unit_file()) 183 | if self._deployed_units: 184 | self._maybe_add_last_unit(unit, self._deployed_units[-1]) 185 | 186 | LOGGER.info('Deploying archive file service: %s', unit_name) 187 | unit.submit() 188 | unit.start() 189 | 190 | if not self._wait_for_unit_to_become_active(unit_name): 191 | LOGGER.error('Failed to deploy files') 192 | return False 193 | 194 | self._deployed_units.append(unit_name) 195 | return True 196 | 197 | def _deploy_globals(self): 198 | last_unit = None 199 | global_unit_prefix = path.join(self._config_path, 'units', 'shared') 200 | for service in self._config.get('global', []): 201 | version = None 202 | if ':' in service: 203 | service, version = service.split(':') 204 | unit_file = path.join(global_unit_prefix, service) 205 | unit_name = utils.unit_name(service, 'global', self._group, version) 206 | if not self._deploy_unit(unit_name, unit_file, last_unit): 207 | LOGGER.error('Aborting, failed to deploy %s', service) 208 | return False 209 | last_unit = unit_name 210 | 211 | self._shutdown_other_versions() 212 | if self._file_deployment: 213 | self._file_deployment.remove_other_archive_versions() 214 | 215 | return True 216 | 217 | def _deploy_service(self): 218 | if not self._deploy_shared_units(): 219 | LOGGER.info('Aborting run due to shared unit deployment error') 220 | return False 221 | unit_file = path.join(self._config_path, 'units', 'service', 222 | self._name) 223 | unit_name = utils.unit_name(self._name, None, self._group, 224 | self._version) 225 | 226 | if not self._deploy_unit(unit_name, unit_file): 227 | LOGGER.info('Aborted: service unit deployment error') 228 | return False 229 | 230 | if not self._skip_consul: 231 | if not self._check_consul_for_service(): 232 | LOGGER.error('Aborted: service missing on expected nodes') 233 | return False 234 | LOGGER.info('Service is being announced with Consul') 235 | 236 | self._shutdown_other_versions() 237 | if self._file_deployment: 238 | self._file_deployment.remove_other_archive_versions() 239 | 240 | parts = [] 241 | if self._group: 242 | parts += [self._group] 243 | parts += [self._name, self._version] 244 | LOGGER.info('Deployment of %s and its dependencies successful', 245 | ' '.join(parts)) 246 | return True 247 | 248 | def _deploy_shared_units(self): 249 | # Ensure the file archive is there 250 | last_unit = self._deployed_units[-1] 251 | shared_unit_prefix = path.join(self._config_path, 'units', 'shared') 252 | for name in self._get_units(): 253 | version = None 254 | if ':' in name: 255 | name, version = name.split(':') 256 | unit_file = path.join(shared_unit_prefix, name) 257 | unit_name = utils.unit_name(name, self._name, 258 | self._group, 259 | version) 260 | if not self._deploy_unit(unit_name, unit_file, last_unit): 261 | LOGGER.error('Aborting, failed to deploy %s', unit_name) 262 | return False 263 | last_unit = unit_name 264 | return True 265 | 266 | def _deploy_unit(self, unit_name, unit_file, last_unit=None): 267 | unit = self._fleet.unit(unit_name) 268 | unit.read_string(self._apply_variables(self._unit_file(unit_file))) 269 | 270 | if not self._group: 271 | for index, option in enumerate(unit.options()): 272 | if option['name'] == 'MachineMetadata' and \ 273 | option['value'].startswith('group='): 274 | LOGGER.debug('Removing group metadata for non-group deploy') 275 | unit._options.pop(index) 276 | 277 | self._maybe_add_last_unit(unit, last_unit) 278 | 279 | if self._unit_is_active(unit_name): 280 | self._deployed_units.append(unit_name) 281 | LOGGER.debug('Skipping %s: already active', unit_name) 282 | return True 283 | 284 | LOGGER.debug('Deploying %s', unit_name) 285 | if unit.submit(): 286 | LOGGER.debug('Starting %s', unit_name) 287 | if unit.start(): 288 | if not self._wait_for_unit_to_become_active(unit_name): 289 | LOGGER.error('Failed to deploy %s', unit_name) 290 | if not self._no_removal: 291 | LOGGER.debug('Removing unit from fleet: %s', unit_name) 292 | unit.destroy() 293 | return False 294 | else: 295 | LOGGER.error('Failed to start %s', unit_name) 296 | if not self._no_removal: 297 | LOGGER.debug('Removing unit from fleet: %s', unit_name) 298 | unit.destroy() 299 | return False 300 | else: 301 | LOGGER.error('Failed to submit %s', unit_name) 302 | return False 303 | LOGGER.info("%s has started", unit_name) 304 | self._deployed_units.append(unit_name) 305 | return True 306 | 307 | def _file_manifest(self): 308 | if self._global: 309 | file_path = path.join(self._config_path, 'files', 'global.yaml') 310 | else: 311 | file_path = path.join(self._config_path, 'files', self._command, 312 | '{0}.yaml'.format(self._name)) 313 | if not path.exists(file_path): 314 | return None 315 | with open(file_path) as handle: 316 | return handle.read() 317 | 318 | def _file_manifest_hash(self): 319 | value = self._file_manifest() 320 | hash_value = hashlib.md5(value.encode('utf-8')) 321 | return hash_value.hexdigest()[:8] 322 | 323 | def _get_units(self): 324 | units = [] 325 | for unit in self._config.get(self._command).get(self._name, []): 326 | match = UNIT_PATTERN.match(unit) 327 | if match.group('exp'): 328 | key, value = match.group('exp').split(':') 329 | if key == 'environment': 330 | LOGGER.debug('Evaluating %s for %s[%s]', 331 | key, value, self._environment) 332 | if value != self._environment: 333 | continue 334 | if match.group('version'): 335 | units.append('{0}:{1}'.format(match.group('image'), 336 | match.group('version'))) 337 | else: 338 | units.append(match.group('image')) 339 | return units 340 | 341 | @property 342 | def _global(self): 343 | return self._command == 'global' 344 | 345 | def _load_config(self, filename): 346 | file = path.join(self._config_path, filename) 347 | if not path.exists(file): 348 | raise ValueError('Config file {0} not found'.format(file)) 349 | with open(file, 'r') as handle: 350 | return yaml.load(handle) 351 | 352 | @staticmethod 353 | def _machine_label(ntuple): 354 | return '{0}.../{1}'.format(ntuple.id[0:7], ntuple.ipaddr) 355 | 356 | def _maybe_add_last_unit(self, unit, last_unit): 357 | if not last_unit or self._no_dependencies: 358 | return 359 | 360 | for option in unit.options(): 361 | if (option['section'] == 'Unit' and 362 | option['name'] in ['After', 'Requires'] and 363 | option['value'] == last_unit): 364 | LOGGER.debug('Bypassing addition of last unit dependency') 365 | return 366 | 367 | options = unit._options 368 | unit._options = [] 369 | for option in options: 370 | unit.add_option(option.section, option.name, option.value) 371 | if option.section == 'Unit' and option.name == 'Description': 372 | LOGGER.debug('Adding dependency on %s', last_unit) 373 | unit.add_option('Unit', 'Requires', last_unit) 374 | unit.add_option('Unit', 'After', last_unit) 375 | 376 | @staticmethod 377 | def _normalize_path(value): # pragma: no cover 378 | """Normalize the specified path value returning the absolute 379 | path for it. 380 | 381 | :param str value: The path value to normalize 382 | :rtype: str 383 | 384 | """ 385 | return path.abspath(path.normpath(value)) 386 | 387 | def _remove_files(self): 388 | if self._file_manifest(): 389 | unit_name = utils.unit_name('file-deploy', self._name, self._group, 390 | self._file_manifest_hash()) 391 | self._remove_unit(unit_name) 392 | if self._global: 393 | manifest_file = 'global.yaml' 394 | else: 395 | manifest_file = '{0}/{1}.yaml'.format(self._command, self._name) 396 | 397 | file_deployment = files.FileDeployment(unit_name, 398 | self.env_config, 399 | self._config_path, 400 | manifest_file, 401 | self._name, 402 | self._environment) 403 | file_deployment.remove_archive() 404 | file_deployment.remove_other_archive_versions() 405 | else: 406 | LOGGER.debug('No manifest found') 407 | 408 | def _remove_globals(self): 409 | for name in self._config.get('global', []): 410 | version = None 411 | if ':' in name: 412 | name, version = name.split(':') 413 | self._remove_unit(utils.unit_name(name, 'global', None, version)) 414 | 415 | def _remove_shared_units(self): 416 | for service in self._get_units(): 417 | version = None 418 | if ':' in service: 419 | service, version = service.split(':') 420 | self._remove_unit(utils.unit_name(service, self._name, self._group, 421 | version)) 422 | 423 | def _remove_unit(self, unit_name): 424 | LOGGER.info('Removing %s', unit_name) 425 | unit = self._fleet.unit(unit_name) 426 | unit.destroy() 427 | 428 | def _remove_units(self): 429 | if self._global: 430 | self._remove_files() 431 | self._remove_globals() 432 | return True 433 | 434 | self._remove_unit(utils.unit_name(self._name, None, self._group, 435 | self._version)) 436 | self._remove_shared_units() 437 | self._remove_files() 438 | return True 439 | 440 | @property 441 | def _service(self): 442 | return self._command == 'service' 443 | 444 | def _shutdown_other_versions(self): 445 | LOGGER.debug('Shutting down running units for other image versions') 446 | units = [utils.parse_unit_name(u.name) for u in self._fleet.units()] 447 | destroy = set() 448 | for deployed_unit in self._deployed_units: 449 | parent, service, group, version = \ 450 | utils.parse_unit_name(deployed_unit) 451 | for _parent, _service, _group, _version in units: 452 | if _parent == parent and _service == service and \ 453 | _group == group and _version != version: 454 | destroy.add((_parent, _service, _group, _version)) 455 | 456 | for name, version in destroy: 457 | LOGGER.info('Destroying %s@%s.service', name, version) 458 | unit = self._fleet.unit(name, version) 459 | if not unit.destroy(): 460 | LOGGER.error('Error destroying %s@%s.service', name, version) 461 | 462 | @property 463 | def _standalone(self): 464 | return self._command == 'standalone' 465 | 466 | def _unit_is_active(self, unit_name, state=None): 467 | state = self._fleet.state(True, unit_name) if state is None else state 468 | return state and all([s.state == 'active' for s in state]) 469 | 470 | def _unit_file(self, name): 471 | for extension in ['service', 'yaml']: 472 | file_path = '{0}.{1}'.format(name, extension) 473 | if path.exists(file_path): 474 | with open(file_path) as handle: 475 | if extension == 'service': 476 | return handle.read() 477 | data = yaml.load(handle) 478 | if self._global and 'global' in data: 479 | return data['global'] 480 | if self._name in data: 481 | return data[self._name] 482 | raise ValueError('No unit found for {0}'.format(self._name)) 483 | raise ValueError('No unit file: '.format(name)) 484 | 485 | def _wait_for_unit_to_become_active(self, unit_name): 486 | for attempt in range(0, self._max_tries): 487 | state = self._fleet.state(True, unit_name) 488 | if self._unit_is_active(unit_name, state): 489 | LOGGER.debug('All %s units active', unit_name) 490 | return True 491 | 492 | if state and all([s.state == 'failed' for s in state]): 493 | LOGGER.warn('All %s units failed', unit_name) 494 | LOGGER.debug('State: %r', state) 495 | return False 496 | 497 | for s in [s for s in state 498 | if s.loaded and s.state == 'activating']: 499 | LOGGER.debug('Unit %s is activating on %s', unit_name, 500 | self._machine_label(s)) 501 | 502 | for s in [s for s in state if s.loaded and s.state == 'inactive']: 503 | LOGGER.debug('Unit %s is inactive on %s', unit_name, 504 | self._machine_label(s)) 505 | 506 | LOGGER.debug('Sleeping %i seconds before checking again', 507 | self._delay) 508 | time.sleep(self._delay) 509 | 510 | LOGGER.warn('Failed to validate unit state after %i attempts', 511 | self._max_tries) 512 | return False 513 | -------------------------------------------------------------------------------- /houston/files.py: -------------------------------------------------------------------------------- 1 | """ 2 | Build a deployable unit file that writes files to the CoreOS filesystem 3 | 4 | """ 5 | import base64 6 | import errno 7 | import logging 8 | import os 9 | from os import path 10 | import shutil 11 | import tarfile 12 | import tempfile 13 | import uuid 14 | import yaml 15 | 16 | import consulate 17 | 18 | from houston import utils 19 | 20 | LOGGER = logging.getLogger(__name__) 21 | 22 | DEFAULT_UNIT_TEMPLATE = """\ 23 | [Unit] 24 | Description=Houston File Archive Deployment 25 | 26 | [Service] 27 | Type=oneshot 28 | RemainAfterExit=true 29 | ExecStartPre=/usr/bin/sleep 30 30 | ExecStart=/usr/bin/bash -c 'curl -s "http://localhost:8500/v1/kv/{archive_key}?raw" | base64 -d | tar -C / -xvf -' 31 | 32 | [X-Fleet] 33 | Global=true 34 | """ 35 | 36 | GROUP_TEMPLATE = "MachineMetadata=group={group}\n" 37 | SERVICE_TEMPLATE = "MachineMetadata=service={service}\n" 38 | 39 | 40 | class FileDeployment(object): 41 | 42 | CONFIG_PREFIX = 'files' 43 | CONSUL_PREFIX = 'houston' 44 | 45 | def __init__(self, name, config, config_path, manifest_file, service, 46 | group=None, environment=None, prefix=None): 47 | self._archive = None 48 | self._config = config 49 | self._config_path = config_path 50 | self._consul_prefix = prefix or self.CONSUL_PREFIX 51 | self._environment = environment 52 | self._manifest_file = manifest_file 53 | self._service = service 54 | self._group = group 55 | self._unit_name = name 56 | 57 | self._unit_template = DEFAULT_UNIT_TEMPLATE 58 | unit_template_file = path.join(config_path, 'file-unit.template') 59 | if path.exists(unit_template_file): 60 | with open(unit_template_file, 'r') as handle: 61 | self._unit_template = handle.read() 62 | 63 | kwargs = utils.parse_endpoint(self._config['consul']) 64 | self._consul = consulate.Consul(**kwargs) 65 | 66 | self._temp_dir = tempfile.mkdtemp() 67 | try: 68 | self._file_list = self._get_file_list() 69 | except ValueError as error: 70 | LOGGER.info(error) 71 | self._file_list = [] 72 | 73 | self._archive_key = '{0}/{1}'.format(self._consul_prefix, name) 74 | 75 | @property 76 | def archive_key(self): 77 | return self._archive_key 78 | 79 | def build_archive(self): 80 | if not self._file_list: 81 | LOGGER.debug('No files to build archive for') 82 | return False 83 | LOGGER.debug('Building archive file') 84 | self._archive = self._create_archive() 85 | return True 86 | 87 | def remove_archive(self): 88 | LOGGER.debug('Removing archive from Consul as %s', self._archive_key) 89 | return self._consul.kv.delete(self.archive_key) 90 | 91 | def remove_other_archive_versions(self): 92 | name, parent, group, version = utils.parse_unit_name(self._unit_name) 93 | if parent: 94 | name = '{0}.{1}'.format(name, parent) 95 | if group: 96 | name = '{0}:{1}'.format(name, group) 97 | keys = self._consul.kv.find('{0}/{1}@'.format(self._consul_prefix, 98 | name)) 99 | for key in keys: 100 | if key != self._archive_key: 101 | LOGGER.debug('Removing previous archive version: %s', key) 102 | self._consul.kv.delete(key) 103 | 104 | def unit_file(self): 105 | output = self._unit_template 106 | if self._service != 'global': 107 | output += SERVICE_TEMPLATE 108 | if self._group: 109 | output += GROUP_TEMPLATE 110 | output = output.replace('{group}', self._group) 111 | output = output.replace('{archive_key}', self._archive_key) 112 | return output.replace('{service}', self._service) 113 | 114 | def upload_archive(self): 115 | LOGGER.debug('Uploading archive to Consul as %s', self._archive_key) 116 | return self._consul.kv.set(self.archive_key, self._archive) 117 | 118 | def _get_file_list(self): 119 | file_path = path.join(self._config_path, self.CONFIG_PREFIX, 120 | self._manifest_file) 121 | if not path.exists(file_path): 122 | raise ValueError('File config not found for {0}'.format(file_path)) 123 | with open(file_path) as handle: 124 | return yaml.load(handle) 125 | 126 | def _create_archive(self): 127 | cwd = os.getcwd() 128 | os.chdir(self._temp_dir) 129 | archive_file = path.join(tempfile.gettempdir(), str(uuid.uuid4())) 130 | tar = tarfile.open(archive_file, 'w') 131 | for entry in self._file_list: 132 | if entry.get('environment'): 133 | if entry['environment'] != self._environment: 134 | LOGGER.debug('Bypassing file for %s [%s]', 135 | entry['environment'], self._environment) 136 | continue 137 | with tempfile.TemporaryFile() as handle: 138 | content = self._replace_variables(entry.get('content', '')) 139 | handle.write(self._maybe_encode(content)) 140 | handle.seek(0) 141 | 142 | info = tar.gettarinfo(arcname=entry['path'], fileobj=handle) 143 | if 'owner' in entry: 144 | info.uname = entry['owner'] 145 | if 'group' in entry: 146 | info.gname = entry['group'] 147 | if 'permissions' in entry: 148 | info.mode = entry['permissions'] 149 | 150 | handle.seek(0) 151 | tar.addfile(info, handle) 152 | 153 | tar.close() 154 | os.chdir(cwd) 155 | 156 | with open(archive_file, 'r') as handle: 157 | tarball = handle.read() 158 | if utils.PYTHON3: 159 | tarball = bytes(tarball, encoding='utf-8') 160 | archive = base64.b64encode(tarball) 161 | os.unlink(archive_file) 162 | return archive 163 | 164 | @staticmethod 165 | def _maybe_encode(value): 166 | """If the value passed in is a str, encode it as UTF-8 bytes for 167 | Python 3 168 | 169 | :param str|bytes value: The value to maybe encode 170 | :rtype: bytes 171 | 172 | """ 173 | try: 174 | return value.encode('utf-8') 175 | except AttributeError: 176 | return value 177 | 178 | def _replace_variables(self, content): 179 | if '{service}' in content: 180 | content = content.replace('{service}', self._service) 181 | if '{group}' in content: 182 | content = content.replace('{group}', self._group) 183 | if '{environment}' in content: 184 | content = content.replace('{environment}', self._environment) 185 | return content 186 | -------------------------------------------------------------------------------- /houston/utils.py: -------------------------------------------------------------------------------- 1 | """ 2 | Common Utility Methods 3 | 4 | """ 5 | import re 6 | import sys 7 | 8 | PYTHON3 = True if sys.version_info > (3, 0, 0) else False 9 | 10 | SERVICE_PATTERN = re.compile(r'^(((?P\w+)\.|)(?P[\w-]+)' 11 | r'(:(?P\w+)|)(@(?P[\w\.]+)|)' 12 | r'\.service$)') 13 | 14 | URI = re.compile(r'(?P\w+)\://(?P[\/\w\d\.]+)(\:(?P\d+)|)') 15 | DEFAULT_PORTS = {'http': 80, 'https': 443} 16 | 17 | 18 | def parse_endpoint(endpoint): 19 | """Parse a endpoint in the form of ``scheme://host[:port]`` and return 20 | the values as a dict of ``scheme``, ``host``, and ``port``. 21 | 22 | :param str endpoint: 23 | :rtype: dict 24 | 25 | """ 26 | match = URI.match(endpoint) 27 | port = None 28 | if match.group('scheme') != 'unix': 29 | port = int(match.group('port') or DEFAULT_PORTS[match.group('scheme')]) 30 | return {'scheme': match.group('scheme'), 31 | 'host': match.group('host'), 32 | 'port': port} 33 | 34 | 35 | def parse_unit_name(value): 36 | """Parse the given unit name returning a tuple of parent, name, group, 37 | and version. 38 | 39 | :param str value: The unit name to parse 40 | :rtype: (str|None, str, str|None, str|None) 41 | :returns: (Parent, Service, Group, Version) 42 | 43 | """ 44 | matches = SERVICE_PATTERN.match(value) 45 | return (matches.group('parent'), matches.group('service'), 46 | matches.group('group'), matches.group('version')) 47 | 48 | 49 | def unit_name(service, parent=None, group=None, version='latest'): 50 | """Return the houston standard service name that can be reverse parsed by 51 | the parse 52 | 53 | :param str service: The service name 54 | :param str|None parent: An optional parent for the service 55 | :param str|None group: An optional deployment group 56 | :param str|None version: An optional service version 57 | :rtype: str 58 | 59 | """ 60 | parts = [] 61 | if parent is not None and parent != service: 62 | parts += [parent, '.'] 63 | parts += [service] 64 | if group is not None: 65 | parts += [':', group] 66 | parts += ['@', version or 'latest', '.service'] 67 | return ''.join(parts) 68 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | consulate>=0.6.0 2 | fleetpy>=0.2.2 3 | pyyaml 4 | -------------------------------------------------------------------------------- /setup.cfg: -------------------------------------------------------------------------------- 1 | [wheel] 2 | universal=1 3 | -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | import setuptools 2 | import sys 3 | 4 | tests_require = ['nose', 'mock'] 5 | if sys.version_info < (2, 7, 0): 6 | tests_require.append('unittest2') 7 | 8 | desc = 'Application deployment on CoreOS clusters using fleetd and Consul' 9 | 10 | classifiers = ['Development Status :: 3 - Alpha', 11 | 'Environment :: Console', 12 | 'Intended Audience :: Developers', 13 | 'License :: OSI Approved :: BSD License', 14 | 'Operating System :: OS Independent', 15 | 'Programming Language :: Python :: 2', 16 | 'Programming Language :: Python :: 2.6', 17 | 'Programming Language :: Python :: 2.7', 18 | 'Programming Language :: Python :: 3', 19 | 'Programming Language :: Python :: 3.2', 20 | 'Programming Language :: Python :: 3.3', 21 | 'Programming Language :: Python :: 3.4', 22 | 'Programming Language :: Python :: 3.5', 23 | 'Programming Language :: Python :: Implementation :: CPython', 24 | 'Programming Language :: Python :: Implementation :: PyPy', 25 | 'Topic :: Communications', 26 | 'Topic :: Internet', 27 | 'Topic :: System :: Boot :: Init', 28 | 'Topic :: System :: Clustering', 29 | 'Topic :: System :: Operating System', 30 | 'Topic :: System :: Software Distribution'] 31 | 32 | setuptools.setup(name='houston', 33 | version='0.4.0', 34 | description=desc, 35 | long_description=open('README.rst').read(), 36 | author='Gavin M. Roy', 37 | author_email='gavinr@aweber.com', 38 | url='http://houston.readthedocs.org', 39 | packages=['houston'], 40 | package_data={'': ['LICENSE', 'README.rst']}, 41 | include_package_data=True, 42 | install_requires=['consulate', 'fleetpy', 'pyyaml'], 43 | tests_require=tests_require, 44 | license='BSD', 45 | classifiers=classifiers, 46 | entry_points={'console_scripts': ['houston=houston.cli:run']}, 47 | zip_safe=True) 48 | -------------------------------------------------------------------------------- /test-requirements.txt: -------------------------------------------------------------------------------- 1 | nose 2 | coverage 3 | codecov 4 | pep8 5 | pylint 6 | -------------------------------------------------------------------------------- /tests/utils_tests.py: -------------------------------------------------------------------------------- 1 | """ 2 | Tests for houston.utils 3 | 4 | """ 5 | try: 6 | import unittest2 as unittest 7 | except ImportError: 8 | import unittest 9 | 10 | from houston import utils 11 | 12 | 13 | class ParseEndpointTests(unittest.TestCase): 14 | 15 | def test_http_with_default_port(self): 16 | value = 'http://consul.ec2.local' 17 | expectation = {'scheme': 'http', 18 | 'host': 'consul.ec2.local', 19 | 'port': 80} 20 | self.assertDictEqual(utils.parse_endpoint(value), expectation) 21 | 22 | def test_https_with_default_port(self): 23 | value = 'https://consul.ec2.local' 24 | expectation = {'scheme': 'https', 25 | 'host': 'consul.ec2.local', 26 | 'port': 443} 27 | self.assertDictEqual(utils.parse_endpoint(value), expectation) 28 | 29 | def test_http_with_specified_port(self): 30 | value = 'http://consul.ec2.local:8500' 31 | expectation = {'scheme': 'http', 32 | 'host': 'consul.ec2.local', 33 | 'port': 8500} 34 | self.assertDictEqual(utils.parse_endpoint(value), expectation) 35 | 36 | def test_https_with_specified_port(self): 37 | value = 'https://consul.ec2.local:8500' 38 | expectation = {'scheme': 'https', 39 | 'host': 'consul.ec2.local', 40 | 'port': 8500} 41 | self.assertDictEqual(utils.parse_endpoint(value), expectation) 42 | 43 | def test_unix_socket_endpoint(self): 44 | value = 'unix:///var/run/consul.sock' 45 | expectation = {'scheme': 'unix', 46 | 'host': '/var/run/consul.sock', 47 | 'port': None} 48 | self.assertDictEqual(utils.parse_endpoint(value), expectation) 49 | --------------------------------------------------------------------------------