├── .gitignore ├── LICENSE ├── Makefile ├── README.md ├── _static └── epub.css ├── ansible-v2 ├── .gitignore ├── .nojekyll ├── Makefile ├── README.md ├── _static │ ├── ansible-local.css │ ├── basic.css │ ├── bootstrap-dropdown.js │ ├── bootstrap-scrollspy.js │ ├── bootstrap-sphinx.css │ ├── bootstrap.css │ ├── default.css │ ├── doctools.js │ ├── favicon.ico │ ├── file.png │ ├── jquery.js │ ├── minus.png │ ├── plus.png │ ├── pygments.css │ ├── searchtools.js │ ├── sidebar.js │ ├── solar.css │ ├── solarized-dark.css │ ├── subtle_dots.png │ └── underscore.js ├── _themes │ └── srtd │ │ ├── __init__.py │ │ ├── breadcrumbs.html │ │ ├── footer.html │ │ ├── layout.html │ │ ├── layout_old.html │ │ ├── search.html │ │ ├── searchbox.html │ │ ├── static │ │ ├── css │ │ │ ├── badge_only.css │ │ │ └── theme.css │ │ ├── font │ │ │ ├── fontawesome_webfont.eot │ │ │ ├── fontawesome_webfont.svg │ │ │ ├── fontawesome_webfont.ttf │ │ │ └── fontawesome_webfont.woff │ │ ├── images │ │ │ ├── banner_ad_1.png │ │ │ ├── banner_ad_2.png │ │ │ └── logo_invert.png │ │ └── js │ │ │ └── theme.js │ │ ├── theme.conf │ │ └── versions.html ├── build-site.py ├── conf.py ├── favicon.ico ├── js │ └── ansible │ │ └── application.js ├── man │ ├── ansible-playbook.1.html │ └── ansible.1.html ├── modules.js ├── rst │ ├── YAMLSyntax.rst │ ├── become.rst │ ├── common_return_values.rst │ ├── community.rst │ ├── developing.rst │ ├── developing_api.rst │ ├── developing_inventory.rst │ ├── developing_modules.rst │ ├── developing_plugins.rst │ ├── developing_test_pr.rst │ ├── faq.rst │ ├── galaxy.rst │ ├── glossary.rst │ ├── guide_aws.rst │ ├── guide_gce.rst │ ├── guide_rax.rst │ ├── guide_rolling_upgrade.rst │ ├── guide_vagrant.rst │ ├── guides.rst │ ├── index.rst │ ├── intro.rst │ ├── intro_adhoc.rst │ ├── intro_configuration.rst │ ├── intro_dynamic_inventory.rst │ ├── intro_getting_started.rst │ ├── intro_installation.rst │ ├── intro_inventory.rst │ ├── intro_patterns.rst │ ├── intro_windows.rst │ ├── modules.rst │ ├── modules │ │ └── .gitdir │ ├── modules_core.rst │ ├── modules_extra.rst │ ├── modules_intro.rst │ ├── playbooks.rst │ ├── playbooks_acceleration.rst │ ├── playbooks_async.rst │ ├── playbooks_best_practices.rst │ ├── playbooks_blocks.rst │ ├── playbooks_checkmode.rst │ ├── playbooks_conditionals.rst │ ├── playbooks_delegation.rst │ ├── playbooks_environment.rst │ ├── playbooks_error_handling.rst │ ├── playbooks_filters.rst │ ├── playbooks_filters_ipaddr.rst │ ├── playbooks_intro.rst │ ├── playbooks_lookups.rst │ ├── playbooks_loops.rst │ ├── playbooks_prompts.rst │ ├── playbooks_roles.rst │ ├── playbooks_special_topics.rst │ ├── playbooks_startnstep.rst │ ├── playbooks_strategies.rst │ ├── playbooks_tags.rst │ ├── playbooks_variables.rst │ ├── playbooks_vault.rst │ ├── quickstart.rst │ ├── test_strategies.rst │ └── tower.rst └── variables.dot ├── ansible.cfg_sample ├── ansible ├── .gitignore ├── CDnRU.rst ├── Makefile ├── a.j2 ├── a.yml ├── ansible-doc ├── ansible-module-doc.rst ├── ansible-quickref.rst ├── ansible_cfg.rst ├── async.yml ├── conf.py ├── docker.rst ├── dynamic_inventory.rst ├── ec2.rst ├── facts.rst ├── filter.yml ├── hosts ├── index.rst ├── install.rst ├── inventory_intro.rst ├── keystone.yml ├── list ├── loops.yml ├── make.bat ├── playbooks_best_practise.rst ├── playbooks_conditionals.rst ├── playbooks_filters.rst ├── playbooks_intro.rst ├── playbooks_loops.rst ├── playbooks_roles.rst ├── playbooks_special_topics.rst ├── playbooks_variables.rst ├── t.yml ├── test.yml ├── test_strategies.rst ├── tips.rst └── yaml_syntax.rst ├── conf.py ├── consul ├── .gitignore ├── Makefile ├── conf.py ├── consul_addndel.rst ├── consul_agent.rst ├── consul_bootstrap.rst ├── index.rst └── make.bat ├── docker ├── Makefile ├── conf.py ├── docker_command.rst ├── docker_compose.rst ├── docker_dockerfile.rst ├── docker_in_action.rst ├── docker_intro.rst ├── docker_swarm.rst ├── dockerfile_best_practices_take.rst └── index.rst ├── index.rst ├── iptables ├── Makefile ├── conf.py ├── index.rst ├── iptables.jpg ├── iptables.rst └── make.bat ├── jenkins-ci ├── .gitignore ├── Makefile ├── conf.py ├── index.rst ├── jenkins_global_settings.rst └── make.bat ├── jinja2 ├── Makefile ├── conf.py ├── index.rst ├── jinja2_design.rst ├── jinja2_intro.rst └── make.bat ├── k8s ├── .gitignore ├── Makefile ├── conf.py ├── index.rst ├── k8s_network.rst ├── k8s_scratch.rst ├── make.bat └── ovs-networking.png ├── logstash ├── .gitignore ├── Makefile ├── conf.py ├── index.rst ├── lekstack │ ├── elasticsearch.rst │ ├── index.rst │ ├── kibana.rst │ ├── lekstack.png │ ├── logstash.rst │ └── redis.rst ├── make.bat ├── playbooks.rst └── roles │ ├── cinder-logging.rst │ ├── common-env.rst │ ├── elasticsearch.rst │ ├── index.rst │ ├── kibana.rst │ ├── logstash.rst │ ├── neutron-logging.rst │ ├── nova-logging.rst │ ├── redis.rst │ └── sun-jdk.rst ├── make.bat └── screen ├── Makefile ├── conf.py ├── index.rst └── make.bat /.gitignore: -------------------------------------------------------------------------------- 1 | _build 2 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | This is free and unencumbered software released into the public domain. 2 | 3 | Anyone is free to copy, modify, publish, use, compile, sell, or 4 | distribute this software, either in source code form or as a compiled 5 | binary, for any purpose, commercial or non-commercial, and by any 6 | means. 7 | 8 | In jurisdictions that recognize copyright laws, the author or authors 9 | of this software dedicate any and all copyright interest in the 10 | software to the public domain. We make this dedication for the benefit 11 | of the public at large and to the detriment of our heirs and 12 | successors. We intend this dedication to be an overt act of 13 | relinquishment in perpetuity of all present and future rights to this 14 | software under copyright law. 15 | 16 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 17 | EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 18 | MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. 19 | IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR 20 | OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 21 | ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 22 | OTHER DEALINGS IN THE SOFTWARE. 23 | 24 | For more information, please refer to 25 | 26 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # notes 2 | RTFM 3 | -------------------------------------------------------------------------------- /ansible-v2/.gitignore: -------------------------------------------------------------------------------- 1 | # Old compiled python stuff 2 | *.py[co] 3 | # package building stuff 4 | build 5 | # Emacs backup files... 6 | *~ 7 | .\#* 8 | .doctrees 9 | # Generated docs stuff 10 | ansible*.xml 11 | .buildinfo 12 | objects.inv 13 | .doctrees 14 | rst/modules/*.rst 15 | *.min.css 16 | htmlout 17 | -------------------------------------------------------------------------------- /ansible-v2/.nojekyll: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tuxknight/notes/e20fbe9edaef304769849cb501fff720ca60657f/ansible-v2/.nojekyll -------------------------------------------------------------------------------- /ansible-v2/Makefile: -------------------------------------------------------------------------------- 1 | #!/usr/bin/make 2 | SITELIB = $(shell python -c "from distutils.sysconfig import get_python_lib; print get_python_lib()") 3 | FORMATTER=../hacking/module_formatter.py 4 | 5 | all: clean docs 6 | 7 | docs: clean modules staticmin 8 | ./build-site.py 9 | -(cp *.ico htmlout/) 10 | -(cp *.jpg htmlout/) 11 | -(cp *.png htmlout/) 12 | 13 | variables: 14 | (mkdir -p htmlout/) 15 | dot variables.dot -Tpng -o htmlout/variables.png 16 | 17 | viewdocs: clean staticmin 18 | ./build-site.py view 19 | 20 | htmldocs: staticmin 21 | ./build-site.py rst 22 | 23 | clean: 24 | -rm -rf htmlout 25 | -rm -f .buildinfo 26 | -rm -f *.inv 27 | -rm -rf *.doctrees 28 | @echo "Cleaning up minified css files" 29 | find . -type f -name "*.min.css" -delete 30 | @echo "Cleaning up byte compiled python stuff" 31 | find . -regex ".*\.py[co]$$" -delete 32 | @echo "Cleaning up editor backup files" 33 | find . -type f \( -name "*~" -or -name "#*" \) -delete 34 | find . -type f \( -name "*.swp" \) -delete 35 | @echo "Cleaning up generated rst" 36 | -rm rst/list_of_*.rst 37 | -rm rst/*_by_category.rst 38 | -rm rst/*_module.rst 39 | 40 | .PHONEY: docs clean 41 | 42 | modules: $(FORMATTER) ../hacking/templates/rst.j2 43 | PYTHONPATH=../lib $(FORMATTER) -t rst --template-dir=../hacking/templates --module-dir=../lib/ansible/modules -o rst/ 44 | 45 | staticmin: 46 | cat _themes/srtd/static/css/theme.css | sed -e 's/^[ \t]*//g; s/[ \t]*$$//g; s/\([:{;,]\) /\1/g; s/ {/{/g; s/\/\*.*\*\///g; /^$$/d' | sed -e :a -e '$$!N; s/\n\(.\)/\1/; ta' > _themes/srtd/static/css/theme.min.css 47 | -------------------------------------------------------------------------------- /ansible-v2/README.md: -------------------------------------------------------------------------------- 1 | Homepage and documentation source for Ansible 2 | ============================================= 3 | 4 | This project hosts the source behind [docs.ansible.com](http://docs.ansible.com/) 5 | 6 | Contributions to the documentation are welcome. To make changes, submit a pull request 7 | that changes the reStructuredText files in the "rst/" directory only, and Michael can 8 | do a docs build and push the static files. 9 | 10 | If you wish to verify output from the markup 11 | such as link references, you may install sphinx and build the documentation by running 12 | `make viewdocs` from the `ansible/docsite` directory. 13 | 14 | To include module documentation you'll need to run `make webdocs` at the top level of the repository. The generated 15 | html files are in docsite/htmlout/. 16 | 17 | If you do not want to learn the reStructuredText format, you can also [file issues] about 18 | documentation problems on the Ansible GitHub project. 19 | 20 | Note that module documentation can actually be [generated from a DOCUMENTATION docstring][module-docs] 21 | in the modules directory, so corrections to modules written as such need to be made 22 | in the module source, rather than in docsite source. 23 | 24 | To install sphinx and the required theme, install pip and then "pip install sphinx sphinx_rtd_theme" 25 | 26 | [file issues]: https://github.com/ansible/ansible/issues 27 | [module-docs]: http://docs.ansible.com/developing_modules.html#documenting-your-module 28 | 29 | 30 | -------------------------------------------------------------------------------- /ansible-v2/_static/ansible-local.css: -------------------------------------------------------------------------------- 1 | /* Local CSS tweaks for ansible */ 2 | .dropdown-menu { 3 | overflow-y: auto; 4 | } 5 | 6 | h2 { 7 | padding-top: 40px; 8 | } -------------------------------------------------------------------------------- /ansible-v2/_static/bootstrap-dropdown.js: -------------------------------------------------------------------------------- 1 | /* ============================================================ 2 | * bootstrap-dropdown.js v1.4.0 3 | * http://twitter.github.com/bootstrap/javascript.html#dropdown 4 | * ============================================================ 5 | * Copyright 2011 Twitter, Inc. 6 | * 7 | * Licensed under the Apache License, Version 2.0 (the "License"); 8 | * you may not use this file except in compliance with the License. 9 | * You may obtain a copy of the License at 10 | * 11 | * http://www.apache.org/licenses/LICENSE-2.0 12 | * 13 | * Unless required by applicable law or agreed to in writing, software 14 | * distributed under the License is distributed on an "AS IS" BASIS, 15 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 16 | * See the License for the specific language governing permissions and 17 | * limitations under the License. 18 | * ============================================================ */ 19 | 20 | 21 | !function( $ ){ 22 | 23 | "use strict" 24 | 25 | /* DROPDOWN PLUGIN DEFINITION 26 | * ========================== */ 27 | 28 | $.fn.dropdown = function ( selector ) { 29 | return this.each(function () { 30 | $(this).delegate(selector || d, 'click', function (e) { 31 | var li = $(this).parent('li') 32 | , isActive = li.hasClass('open') 33 | 34 | clearMenus() 35 | !isActive && li.toggleClass('open') 36 | return false 37 | }) 38 | }) 39 | } 40 | 41 | /* APPLY TO STANDARD DROPDOWN ELEMENTS 42 | * =================================== */ 43 | 44 | var d = 'a.menu, .dropdown-toggle' 45 | 46 | function clearMenus() { 47 | $(d).parent('li').removeClass('open') 48 | } 49 | 50 | $(function () { 51 | $('html').bind("click", clearMenus) 52 | $('body').dropdown( '[data-dropdown] a.menu, [data-dropdown] .dropdown-toggle' ) 53 | }) 54 | 55 | }( window.jQuery || window.ender ); 56 | -------------------------------------------------------------------------------- /ansible-v2/_static/bootstrap-scrollspy.js: -------------------------------------------------------------------------------- 1 | /* ============================================================= 2 | * bootstrap-scrollspy.js v1.4.0 3 | * http://twitter.github.com/bootstrap/javascript.html#scrollspy 4 | * ============================================================= 5 | * Copyright 2011 Twitter, Inc. 6 | * 7 | * Licensed under the Apache License, Version 2.0 (the "License"); 8 | * you may not use this file except in compliance with the License. 9 | * You may obtain a copy of the License at 10 | * 11 | * http://www.apache.org/licenses/LICENSE-2.0 12 | * 13 | * Unless required by applicable law or agreed to in writing, software 14 | * distributed under the License is distributed on an "AS IS" BASIS, 15 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 16 | * See the License for the specific language governing permissions and 17 | * limitations under the License. 18 | * ============================================================== */ 19 | 20 | 21 | !function ( $ ) { 22 | 23 | "use strict" 24 | 25 | var $window = $(window) 26 | 27 | function ScrollSpy( topbar, selector ) { 28 | var processScroll = $.proxy(this.processScroll, this) 29 | this.$topbar = $(topbar) 30 | this.selector = selector || 'li > a' 31 | this.refresh() 32 | this.$topbar.delegate(this.selector, 'click', processScroll) 33 | $window.scroll(processScroll) 34 | this.processScroll() 35 | } 36 | 37 | ScrollSpy.prototype = { 38 | 39 | refresh: function () { 40 | this.targets = this.$topbar.find(this.selector).map(function () { 41 | var href = $(this).attr('href') 42 | return /^#\w/.test(href) && $(href).length ? href : null 43 | }) 44 | 45 | this.offsets = $.map(this.targets, function (id) { 46 | return $(id).offset().top 47 | }) 48 | } 49 | 50 | , processScroll: function () { 51 | var scrollTop = $window.scrollTop() + 10 52 | , offsets = this.offsets 53 | , targets = this.targets 54 | , activeTarget = this.activeTarget 55 | , i 56 | 57 | for (i = offsets.length; i--;) { 58 | activeTarget != targets[i] 59 | && scrollTop >= offsets[i] 60 | && (!offsets[i + 1] || scrollTop <= offsets[i + 1]) 61 | && this.activateButton( targets[i] ) 62 | } 63 | } 64 | 65 | , activateButton: function (target) { 66 | this.activeTarget = target 67 | 68 | this.$topbar 69 | .find(this.selector).parent('.active') 70 | .removeClass('active') 71 | 72 | this.$topbar 73 | .find(this.selector + '[href="' + target + '"]') 74 | .parent('li') 75 | .addClass('active') 76 | } 77 | 78 | } 79 | 80 | /* SCROLLSPY PLUGIN DEFINITION 81 | * =========================== */ 82 | 83 | $.fn.scrollSpy = function( options ) { 84 | var scrollspy = this.data('scrollspy') 85 | 86 | if (!scrollspy) { 87 | return this.each(function () { 88 | $(this).data('scrollspy', new ScrollSpy( this, options )) 89 | }) 90 | } 91 | 92 | if ( options === true ) { 93 | return scrollspy 94 | } 95 | 96 | if ( typeof options == 'string' ) { 97 | scrollspy[options]() 98 | } 99 | 100 | return this 101 | } 102 | 103 | $(document).ready(function () { 104 | $('body').scrollSpy('[data-scrollspy] li > a') 105 | }) 106 | 107 | }( window.jQuery || window.ender ); -------------------------------------------------------------------------------- /ansible-v2/_static/bootstrap-sphinx.css: -------------------------------------------------------------------------------- 1 | /* 2 | * bootstrap-sphinx.css 3 | * ~~~~~~~~~~~~~~~~~~~~ 4 | * 5 | * Sphinx stylesheet -- Twitter Bootstrap theme. 6 | */ 7 | 8 | body { 9 | padding-top: 42px; 10 | } 11 | 12 | div.documentwrapper { 13 | float: left; 14 | width: 100%; 15 | } -------------------------------------------------------------------------------- /ansible-v2/_static/favicon.ico: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tuxknight/notes/e20fbe9edaef304769849cb501fff720ca60657f/ansible-v2/_static/favicon.ico -------------------------------------------------------------------------------- /ansible-v2/_static/file.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tuxknight/notes/e20fbe9edaef304769849cb501fff720ca60657f/ansible-v2/_static/file.png -------------------------------------------------------------------------------- /ansible-v2/_static/minus.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tuxknight/notes/e20fbe9edaef304769849cb501fff720ca60657f/ansible-v2/_static/minus.png -------------------------------------------------------------------------------- /ansible-v2/_static/plus.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tuxknight/notes/e20fbe9edaef304769849cb501fff720ca60657f/ansible-v2/_static/plus.png -------------------------------------------------------------------------------- /ansible-v2/_static/pygments.css: -------------------------------------------------------------------------------- 1 | .highlight .hll { background-color: #ffffcc } 2 | .highlight { background: #eeffcc; } 3 | .highlight .c { color: #408090; font-style: italic } /* Comment */ 4 | .highlight .err { border: 1px solid #FF0000 } /* Error */ 5 | .highlight .k { color: #007020; font-weight: bold } /* Keyword */ 6 | .highlight .o { color: #666666 } /* Operator */ 7 | .highlight .cm { color: #408090; font-style: italic } /* Comment.Multiline */ 8 | .highlight .cp { color: #007020 } /* Comment.Preproc */ 9 | .highlight .c1 { color: #408090; font-style: italic } /* Comment.Single */ 10 | .highlight .cs { color: #408090; background-color: #fff0f0 } /* Comment.Special */ 11 | .highlight .gd { color: #A00000 } /* Generic.Deleted */ 12 | .highlight .ge { font-style: italic } /* Generic.Emph */ 13 | .highlight .gr { color: #FF0000 } /* Generic.Error */ 14 | .highlight .gh { color: #000080; font-weight: bold } /* Generic.Heading */ 15 | .highlight .gi { color: #00A000 } /* Generic.Inserted */ 16 | .highlight .go { color: #303030 } /* Generic.Output */ 17 | .highlight .gp { color: #c65d09; font-weight: bold } /* Generic.Prompt */ 18 | .highlight .gs { font-weight: bold } /* Generic.Strong */ 19 | .highlight .gu { color: #800080; font-weight: bold } /* Generic.Subheading */ 20 | .highlight .gt { color: #0040D0 } /* Generic.Traceback */ 21 | .highlight .kc { color: #007020; font-weight: bold } /* Keyword.Constant */ 22 | .highlight .kd { color: #007020; font-weight: bold } /* Keyword.Declaration */ 23 | .highlight .kn { color: #007020; font-weight: bold } /* Keyword.Namespace */ 24 | .highlight .kp { color: #007020 } /* Keyword.Pseudo */ 25 | .highlight .kr { color: #007020; font-weight: bold } /* Keyword.Reserved */ 26 | .highlight .kt { color: #902000 } /* Keyword.Type */ 27 | .highlight .m { color: #208050 } /* Literal.Number */ 28 | .highlight .s { color: #4070a0 } /* Literal.String */ 29 | .highlight .na { color: #4070a0 } /* Name.Attribute */ 30 | .highlight .nb { color: #007020 } /* Name.Builtin */ 31 | .highlight .nc { color: #0e84b5; font-weight: bold } /* Name.Class */ 32 | .highlight .no { color: #60add5 } /* Name.Constant */ 33 | .highlight .nd { color: #555555; font-weight: bold } /* Name.Decorator */ 34 | .highlight .ni { color: #d55537; font-weight: bold } /* Name.Entity */ 35 | .highlight .ne { color: #007020 } /* Name.Exception */ 36 | .highlight .nf { color: #06287e } /* Name.Function */ 37 | .highlight .nl { color: #002070; font-weight: bold } /* Name.Label */ 38 | .highlight .nn { color: #0e84b5; font-weight: bold } /* Name.Namespace */ 39 | .highlight .nt { color: #062873; font-weight: bold } /* Name.Tag */ 40 | .highlight .nv { color: #bb60d5 } /* Name.Variable */ 41 | .highlight .ow { color: #007020; font-weight: bold } /* Operator.Word */ 42 | .highlight .w { color: #bbbbbb } /* Text.Whitespace */ 43 | .highlight .mf { color: #208050 } /* Literal.Number.Float */ 44 | .highlight .mh { color: #208050 } /* Literal.Number.Hex */ 45 | .highlight .mi { color: #208050 } /* Literal.Number.Integer */ 46 | .highlight .mo { color: #208050 } /* Literal.Number.Oct */ 47 | .highlight .sb { color: #4070a0 } /* Literal.String.Backtick */ 48 | .highlight .sc { color: #4070a0 } /* Literal.String.Char */ 49 | .highlight .sd { color: #4070a0; font-style: italic } /* Literal.String.Doc */ 50 | .highlight .s2 { color: #4070a0 } /* Literal.String.Double */ 51 | .highlight .se { color: #4070a0; font-weight: bold } /* Literal.String.Escape */ 52 | .highlight .sh { color: #4070a0 } /* Literal.String.Heredoc */ 53 | .highlight .si { color: #70a0d0; font-style: italic } /* Literal.String.Interpol */ 54 | .highlight .sx { color: #c65d09 } /* Literal.String.Other */ 55 | .highlight .sr { color: #235388 } /* Literal.String.Regex */ 56 | .highlight .s1 { color: #4070a0 } /* Literal.String.Single */ 57 | .highlight .ss { color: #517918 } /* Literal.String.Symbol */ 58 | .highlight .bp { color: #007020 } /* Name.Builtin.Pseudo */ 59 | .highlight .vc { color: #bb60d5 } /* Name.Variable.Class */ 60 | .highlight .vg { color: #bb60d5 } /* Name.Variable.Global */ 61 | .highlight .vi { color: #bb60d5 } /* Name.Variable.Instance */ 62 | .highlight .il { color: #208050 } /* Literal.Number.Integer.Long */ -------------------------------------------------------------------------------- /ansible-v2/_static/sidebar.js: -------------------------------------------------------------------------------- 1 | /* 2 | * sidebar.js 3 | * ~~~~~~~~~~ 4 | * 5 | * This script makes the Sphinx sidebar collapsible. 6 | * 7 | * .sphinxsidebar contains .sphinxsidebarwrapper. This script adds 8 | * in .sphixsidebar, after .sphinxsidebarwrapper, the #sidebarbutton 9 | * used to collapse and expand the sidebar. 10 | * 11 | * When the sidebar is collapsed the .sphinxsidebarwrapper is hidden 12 | * and the width of the sidebar and the margin-left of the document 13 | * are decreased. When the sidebar is expanded the opposite happens. 14 | * This script saves a per-browser/per-session cookie used to 15 | * remember the position of the sidebar among the pages. 16 | * Once the browser is closed the cookie is deleted and the position 17 | * reset to the default (expanded). 18 | * 19 | * :copyright: Copyright 2007-2011 by the Sphinx team, see AUTHORS. 20 | * :license: BSD, see LICENSE for details. 21 | * 22 | */ 23 | 24 | $(function() { 25 | // global elements used by the functions. 26 | // the 'sidebarbutton' element is defined as global after its 27 | // creation, in the add_sidebar_button function 28 | var bodywrapper = $('.bodywrapper'); 29 | var sidebar = $('.sphinxsidebar'); 30 | var sidebarwrapper = $('.sphinxsidebarwrapper'); 31 | 32 | // original margin-left of the bodywrapper and width of the sidebar 33 | // with the sidebar expanded 34 | var bw_margin_expanded = bodywrapper.css('margin-left'); 35 | var ssb_width_expanded = sidebar.width(); 36 | 37 | // margin-left of the bodywrapper and width of the sidebar 38 | // with the sidebar collapsed 39 | var bw_margin_collapsed = '.8em'; 40 | var ssb_width_collapsed = '.8em'; 41 | 42 | // colors used by the current theme 43 | var dark_color = $('.related').css('background-color'); 44 | var light_color = $('.document').css('background-color'); 45 | 46 | function sidebar_is_collapsed() { 47 | return sidebarwrapper.is(':not(:visible)'); 48 | } 49 | 50 | function toggle_sidebar() { 51 | if (sidebar_is_collapsed()) 52 | expand_sidebar(); 53 | else 54 | collapse_sidebar(); 55 | } 56 | 57 | function collapse_sidebar() { 58 | sidebarwrapper.hide(); 59 | sidebar.css('width', ssb_width_collapsed); 60 | bodywrapper.css('margin-left', bw_margin_collapsed); 61 | sidebarbutton.css({ 62 | 'margin-left': '0', 63 | 'height': bodywrapper.height() 64 | }); 65 | sidebarbutton.find('span').text('»'); 66 | sidebarbutton.attr('title', _('Expand sidebar')); 67 | document.cookie = 'sidebar=collapsed'; 68 | } 69 | 70 | function expand_sidebar() { 71 | bodywrapper.css('margin-left', bw_margin_expanded); 72 | sidebar.css('width', ssb_width_expanded); 73 | sidebarwrapper.show(); 74 | sidebarbutton.css({ 75 | 'margin-left': ssb_width_expanded-12, 76 | 'height': bodywrapper.height() 77 | }); 78 | sidebarbutton.find('span').text('«'); 79 | sidebarbutton.attr('title', _('Collapse sidebar')); 80 | document.cookie = 'sidebar=expanded'; 81 | } 82 | 83 | function add_sidebar_button() { 84 | sidebarwrapper.css({ 85 | 'float': 'left', 86 | 'margin-right': '0', 87 | 'width': ssb_width_expanded - 28 88 | }); 89 | // create the button 90 | sidebar.append( 91 | '
«
' 92 | ); 93 | var sidebarbutton = $('#sidebarbutton'); 94 | light_color = sidebarbutton.css('background-color'); 95 | // find the height of the viewport to center the '<<' in the page 96 | var viewport_height; 97 | if (window.innerHeight) 98 | viewport_height = window.innerHeight; 99 | else 100 | viewport_height = $(window).height(); 101 | sidebarbutton.find('span').css({ 102 | 'display': 'block', 103 | 'margin-top': (viewport_height - sidebar.position().top - 20) / 2 104 | }); 105 | 106 | sidebarbutton.click(toggle_sidebar); 107 | sidebarbutton.attr('title', _('Collapse sidebar')); 108 | sidebarbutton.css({ 109 | 'color': '#FFFFFF', 110 | 'border-left': '1px solid ' + dark_color, 111 | 'font-size': '1.2em', 112 | 'cursor': 'pointer', 113 | 'height': bodywrapper.height(), 114 | 'padding-top': '1px', 115 | 'margin-left': ssb_width_expanded - 12 116 | }); 117 | 118 | sidebarbutton.hover( 119 | function () { 120 | $(this).css('background-color', dark_color); 121 | }, 122 | function () { 123 | $(this).css('background-color', light_color); 124 | } 125 | ); 126 | } 127 | 128 | function set_position_from_cookie() { 129 | if (!document.cookie) 130 | return; 131 | var items = document.cookie.split(';'); 132 | for(var k=0; k 2 |
  • Docs »
  • 3 |
  • {{ title }}
  • 4 | {% if not pagename.endswith('_module') and (not 'list_of' in pagename) and (not 'category' in pagename) %} 5 |
  • 6 | Edit on GitHub 7 |
  • 8 | {% endif %} 9 | 10 |
    11 | 12 | -------------------------------------------------------------------------------- /ansible-v2/_themes/srtd/footer.html: -------------------------------------------------------------------------------- 1 |
    2 | {% if next or prev %} 3 | 11 | {% endif %} 12 | 13 |
    14 | 15 |

    16 | © Copyright 2015 Ansible, Inc.. 17 | 18 | {%- if last_updated %} 19 | {% trans last_updated=last_updated|e %}Last updated on {{ last_updated }}.{% endtrans %} 20 | {%- endif %} 21 |

    22 | 23 | Ansible docs are generated from GitHub sources using Sphinx using a theme provided by Read the Docs. {% if pagename.endswith("_module") %}. Module documentation is not edited directly, but is generated from the source code for the modules. To submit an update to module docs, edit the 'DOCUMENTATION' metadata in the core and extras modules source repositories. {% endif %} 24 | 25 |
    26 | -------------------------------------------------------------------------------- /ansible-v2/_themes/srtd/search.html: -------------------------------------------------------------------------------- 1 | {# 2 | basic/search.html 3 | ~~~~~~~~~~~~~~~~~ 4 | 5 | Template for the search page. 6 | 7 | :copyright: Copyright 2007-2013 by the Sphinx team, see AUTHORS. 8 | :license: BSD, see LICENSE for details. 9 | #} 10 | {%- extends "layout.html" %} 11 | {% set title = _('Search') %} 12 | {% set script_files = script_files + ['_static/searchtools.js'] %} 13 | {% block extrahead %} 14 | 17 | {# this is used when loading the search index using $.ajax fails, 18 | such as on Chrome for documents on localhost #} 19 | 20 | {{ super() }} 21 | {% endblock %} 22 | {% block body %} 23 | 31 | 32 | {% if search_performed %} 33 |

    {{ _('Search Results') }}

    34 | {% if not search_results %} 35 |

    {{ _('Your search did not match any documents. Please make sure that all words are spelled correctly and that you\'ve selected enough categories.') }}

    36 | {% endif %} 37 | {% endif %} 38 |
    39 | {% if search_results %} 40 |
      41 | {% for href, caption, context in search_results %} 42 |
    • 43 | {{ caption }} 44 |

      {{ context|e }}

      45 |
    • 46 | {% endfor %} 47 |
    48 | {% endif %} 49 |
    50 | {% endblock %} 51 | -------------------------------------------------------------------------------- /ansible-v2/_themes/srtd/searchbox.html: -------------------------------------------------------------------------------- 1 | 6 | 7 | 19 | 20 |
    21 | 22 | 23 | 24 |
    25 | 26 | 27 | 28 | 62 | -------------------------------------------------------------------------------- /ansible-v2/_themes/srtd/static/css/badge_only.css: -------------------------------------------------------------------------------- 1 | .font-smooth,.icon:before{-webkit-font-smoothing:antialiased}.clearfix{*zoom:1}.clearfix:before,.clearfix:after{display:table;content:""}.clearfix:after{clear:both}@font-face{font-family:fontawesome-webfont;font-weight:normal;font-style:normal;src:url("../font/fontawesome_webfont.eot");src:url("../font/fontawesome_webfont.eot?#iefix") format("embedded-opentype"),url("../font/fontawesome_webfont.woff") format("woff"),url("../font/fontawesome_webfont.ttf") format("truetype"),url("../font/fontawesome_webfont.svg#fontawesome-webfont") format("svg")}.icon:before{display:inline-block;font-family:fontawesome-webfont;font-style:normal;font-weight:normal;line-height:1;text-decoration:inherit}a .icon{display:inline-block;text-decoration:inherit}li .icon{display:inline-block}li .icon-large:before,li .icon-large:before{width:1.875em}ul.icons{list-style-type:none;margin-left:2em;text-indent:-0.8em}ul.icons li .icon{width:0.8em}ul.icons li .icon-large:before,ul.icons li .icon-large:before{vertical-align:baseline}.icon-book:before{content:"\f02d"}.icon-caret-down:before{content:"\f0d7"}.icon-caret-up:before{content:"\f0d8"}.icon-caret-left:before{content:"\f0d9"}.icon-caret-right:before{content:"\f0da"}.rst-versions{position:fixed;bottom:0;left:0;width:300px;color:#fcfcfc;background:#1f1d1d;border-top:solid 10px #343131;font-family:"Lato","proxima-nova","Helvetica Neue",Arial,sans-serif;z-index:400}.rst-versions a{color:#2980b9;text-decoration:none}.rst-versions .rst-badge-small{display:none}.rst-versions .rst-current-version{padding:12px;background-color:#272525;display:block;text-align:right;font-size:90%;cursor:pointer;color:#27ae60;*zoom:1}.rst-versions .rst-current-version:before,.rst-versions .rst-current-version:after{display:table;content:""}.rst-versions .rst-current-version:after{clear:both}.rst-versions .rst-current-version .icon{color:#fcfcfc}.rst-versions .rst-current-version .icon-book{float:left}.rst-versions .rst-current-version.rst-out-of-date{background-color:#e74c3c;color:#fff}.rst-versions.shift-up .rst-other-versions{display:block}.rst-versions .rst-other-versions{font-size:90%;padding:12px;color:gray;display:none}.rst-versions .rst-other-versions hr{display:block;height:1px;border:0;margin:20px 0;padding:0;border-top:solid 1px #413d3d}.rst-versions .rst-other-versions dd{display:inline-block;margin:0}.rst-versions .rst-other-versions dd a{display:inline-block;padding:6px;color:#fcfcfc}.rst-versions.rst-badge{width:auto;bottom:20px;right:20px;left:auto;border:none;max-width:300px}.rst-versions.rst-badge .icon-book{float:none}.rst-versions.rst-badge.shift-up .rst-current-version{text-align:right}.rst-versions.rst-badge.shift-up .rst-current-version .icon-book{float:left}.rst-versions.rst-badge .rst-current-version{width:auto;height:30px;line-height:30px;padding:0 6px;display:block;text-align:center}@media screen and (max-width: 768px){.rst-versions{width:85%;display:none}.rst-versions.shift{display:block}img{width:100%;height:auto}} 2 | -------------------------------------------------------------------------------- /ansible-v2/_themes/srtd/static/font/fontawesome_webfont.eot: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tuxknight/notes/e20fbe9edaef304769849cb501fff720ca60657f/ansible-v2/_themes/srtd/static/font/fontawesome_webfont.eot -------------------------------------------------------------------------------- /ansible-v2/_themes/srtd/static/font/fontawesome_webfont.ttf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tuxknight/notes/e20fbe9edaef304769849cb501fff720ca60657f/ansible-v2/_themes/srtd/static/font/fontawesome_webfont.ttf -------------------------------------------------------------------------------- /ansible-v2/_themes/srtd/static/font/fontawesome_webfont.woff: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tuxknight/notes/e20fbe9edaef304769849cb501fff720ca60657f/ansible-v2/_themes/srtd/static/font/fontawesome_webfont.woff -------------------------------------------------------------------------------- /ansible-v2/_themes/srtd/static/images/banner_ad_1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tuxknight/notes/e20fbe9edaef304769849cb501fff720ca60657f/ansible-v2/_themes/srtd/static/images/banner_ad_1.png -------------------------------------------------------------------------------- /ansible-v2/_themes/srtd/static/images/banner_ad_2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tuxknight/notes/e20fbe9edaef304769849cb501fff720ca60657f/ansible-v2/_themes/srtd/static/images/banner_ad_2.png -------------------------------------------------------------------------------- /ansible-v2/_themes/srtd/static/images/logo_invert.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tuxknight/notes/e20fbe9edaef304769849cb501fff720ca60657f/ansible-v2/_themes/srtd/static/images/logo_invert.png -------------------------------------------------------------------------------- /ansible-v2/_themes/srtd/static/js/theme.js: -------------------------------------------------------------------------------- 1 | $( document ).ready(function() { 2 | // Shift nav in mobile when clicking the menu. 3 | $("[data-toggle='wy-nav-top']").click(function() { 4 | $("[data-toggle='wy-nav-shift']").toggleClass("shift"); 5 | $("[data-toggle='rst-versions']").toggleClass("shift"); 6 | }); 7 | // Close menu when you click a link. 8 | $(".wy-menu-vertical .current ul li a").click(function() { 9 | $("[data-toggle='wy-nav-shift']").removeClass("shift"); 10 | $("[data-toggle='rst-versions']").toggleClass("shift"); 11 | }); 12 | $("[data-toggle='rst-current-version']").click(function() { 13 | $("[data-toggle='rst-versions']").toggleClass("shift-up"); 14 | }); 15 | $("table.docutils:not(.field-list").wrap("
    "); 16 | }); 17 | -------------------------------------------------------------------------------- /ansible-v2/_themes/srtd/theme.conf: -------------------------------------------------------------------------------- 1 | [theme] 2 | inherit = basic 3 | stylesheet = css/theme.min.css 4 | 5 | [options] 6 | typekit_id = hiw1hhg 7 | analytics_id = 8 | -------------------------------------------------------------------------------- /ansible-v2/_themes/srtd/versions.html: -------------------------------------------------------------------------------- 1 | {% if READTHEDOCS %} 2 | {# Add rst-badge after rst-versions for small badge style. #} 3 |
    4 | 5 | Read the Docs 6 | v: {{ current_version }} 7 | 8 | 9 |
    10 |
    11 |
    Versions
    12 | {% for slug, url in versions %} 13 |
    {{ slug }}
    14 | {% endfor %} 15 |
    16 |
    17 |
    Downloads
    18 | {% for type, url in downloads %} 19 |
    {{ type }}
    20 | {% endfor %} 21 |
    22 |
    23 |
    On Read the Docs
    24 |
    25 | Project Home 26 |
    27 |
    28 | Builds 29 |
    30 |
    31 |
    32 | Free document hosting provided by Read the Docs. 33 | 34 |
    35 |
    36 | {% endif %} 37 | 38 | -------------------------------------------------------------------------------- /ansible-v2/build-site.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # (c) 2012, Michael DeHaan 3 | # 4 | # This file is part of the Ansible Documentation 5 | # 6 | # Ansible is free software: you can redistribute it and/or modify 7 | # it under the terms of the GNU General Public License as published by 8 | # the Free Software Foundation, either version 3 of the License, or 9 | # (at your option) any later version. 10 | # 11 | # Ansible is distributed in the hope that it will be useful, 12 | # but WITHOUT ANY WARRANTY; without even the implied warranty of 13 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 | # GNU General Public License for more details. 15 | # 16 | # You should have received a copy of the GNU General Public License 17 | # along with Ansible. If not, see . 18 | 19 | __docformat__ = 'restructuredtext' 20 | 21 | import os 22 | import sys 23 | import traceback 24 | try: 25 | from sphinx.application import Sphinx 26 | except ImportError: 27 | print "#################################" 28 | print "Dependency missing: Python Sphinx" 29 | print "#################################" 30 | sys.exit(1) 31 | import os 32 | 33 | 34 | class SphinxBuilder(object): 35 | """ 36 | Creates HTML documentation using Sphinx. 37 | """ 38 | 39 | def __init__(self): 40 | """ 41 | Run the DocCommand. 42 | """ 43 | print "Creating html documentation ..." 44 | 45 | try: 46 | buildername = 'html' 47 | 48 | outdir = os.path.abspath(os.path.join(os.getcwd(), "htmlout")) 49 | # Create the output directory if it doesn't exist 50 | if not os.access(outdir, os.F_OK): 51 | os.mkdir(outdir) 52 | 53 | doctreedir = os.path.join('./', '.doctrees') 54 | 55 | confdir = os.path.abspath('./') 56 | srcdir = os.path.abspath('rst') 57 | freshenv = True 58 | 59 | # Create the builder 60 | app = Sphinx(srcdir, 61 | confdir, 62 | outdir, 63 | doctreedir, 64 | buildername, 65 | {}, 66 | sys.stdout, 67 | sys.stderr, 68 | freshenv) 69 | 70 | app.builder.build_all() 71 | 72 | except ImportError, ie: 73 | traceback.print_exc() 74 | except Exception, ex: 75 | print >> sys.stderr, "FAIL! exiting ... (%s)" % ex 76 | 77 | def build_docs(self): 78 | self.app.builder.build_all() 79 | 80 | 81 | def build_rst_docs(): 82 | docgen = SphinxBuilder() 83 | 84 | if __name__ == '__main__': 85 | if '-h' in sys.argv or '--help' in sys.argv: 86 | print "This script builds the html documentation from rst/asciidoc sources.\n" 87 | print " Run 'make docs' to build everything." 88 | print " Run 'make viewdocs' to build and then preview in a web browser." 89 | sys.exit(0) 90 | 91 | build_rst_docs() 92 | 93 | if "view" in sys.argv: 94 | import webbrowser 95 | if not webbrowser.open('htmlout/index.html'): 96 | print >> sys.stderr, "Could not open on your webbrowser." 97 | -------------------------------------------------------------------------------- /ansible-v2/favicon.ico: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tuxknight/notes/e20fbe9edaef304769849cb501fff720ca60657f/ansible-v2/favicon.ico -------------------------------------------------------------------------------- /ansible-v2/js/ansible/application.js: -------------------------------------------------------------------------------- 1 | angular.module('ansibleApp', []).filter('moduleVersion', function() { 2 | return function(modules, version) { 3 | 4 | var parseVersionString = function (str) { 5 | if (typeof(str) != 'string') { return false; } 6 | var x = str.split('.'); 7 | // parse from string or default to 0 if can't parse 8 | var maj = parseInt(x[0]) || 0; 9 | var min = parseInt(x[1]) || 0; 10 | var pat = parseInt(x[2]) || 0; 11 | return { 12 | major: maj, 13 | minor: min, 14 | patch: pat 15 | } 16 | } 17 | 18 | var vMinMet = function(vmin, vcurrent) { 19 | minimum = parseVersionString(vmin); 20 | running = parseVersionString(vcurrent); 21 | if (running.major != minimum.major) 22 | return (running.major > minimum.major); 23 | else { 24 | if (running.minor != minimum.minor) 25 | return (running.minor > minimum.minor); 26 | else { 27 | if (running.patch != minimum.patch) 28 | return (running.patch > minimum.patch); 29 | else 30 | return true; 31 | } 32 | } 33 | }; 34 | 35 | var result = []; 36 | if (!version) { 37 | return modules; 38 | } 39 | for (var i = 0; i < modules.length; i++) { 40 | if (vMinMet(modules[i].version_added, version)) { 41 | result[result.length] = modules[i]; 42 | } 43 | } 44 | 45 | return result; 46 | }; 47 | }).filter('uniqueVersion', function() { 48 | return function(modules) { 49 | var result = []; 50 | var inArray = function (needle, haystack) { 51 | var length = haystack.length; 52 | for(var i = 0; i < length; i++) { 53 | if(haystack[i] == needle) return true; 54 | } 55 | return false; 56 | } 57 | 58 | var parseVersionString = function (str) { 59 | if (typeof(str) != 'string') { return false; } 60 | var x = str.split('.'); 61 | // parse from string or default to 0 if can't parse 62 | var maj = parseInt(x[0]) || 0; 63 | var min = parseInt(x[1]) || 0; 64 | var pat = parseInt(x[2]) || 0; 65 | return { 66 | major: maj, 67 | minor: min, 68 | patch: pat 69 | } 70 | } 71 | 72 | for (var i = 0; i < modules.length; i++) { 73 | if (!inArray(modules[i].version_added, result)) { 74 | // Some module do not define version 75 | if (modules[i].version_added) { 76 | result[result.length] = "" + modules[i].version_added; 77 | } 78 | } 79 | } 80 | 81 | result.sort( 82 | function (a, b) { 83 | ao = parseVersionString(a); 84 | bo = parseVersionString(b); 85 | if (ao.major == bo.major) { 86 | if (ao.minor == bo.minor) { 87 | if (ao.patch == bo.patch) { 88 | return 0; 89 | } 90 | else { 91 | return (ao.patch > bo.patch) ? 1 : -1; 92 | } 93 | } 94 | else { 95 | return (ao.minor > bo.minor) ? 1 : -1; 96 | } 97 | } 98 | else { 99 | return (ao.major > bo.major) ? 1 : -1; 100 | } 101 | }); 102 | 103 | return result; 104 | }; 105 | }); 106 | 107 | -------------------------------------------------------------------------------- /ansible-v2/man/ansible-playbook.1.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | ansible-playbook

    Name

    ansible-playbook — run an ansible playbook

    Synopsis

    ansible-playbook <filename.yml> … [options]

    DESCRIPTION

    Ansible playbooks are a configuration and multinode deployment 4 | system. Ansible-playbook is the tool used to run them. See the 5 | project home page (link below) for more information.

    ARGUMENTS

    6 | filename.yml 7 |
    8 | The names of one or more YAML format files to run as ansible playbooks. 9 |

    OPTIONS

    10 | -i PATH, --inventory=PATH 11 |
    12 | The PATH to the inventory hosts file, which defaults to /etc/ansible/hosts. 13 |
    14 | -M DIRECTORY, --module-path=DIRECTORY 15 |
    16 | The DIRECTORY to load modules from. The default is /usr/share/ansible. 17 |
    18 | -f NUM, --forks=NUM 19 |
    20 | Level of parallelism. NUM is specified as an integer, the default is 5. 21 |
    22 | -k, --ask-pass 23 |
    24 | Prompt for the SSH password instead of assuming key-based authentication with ssh-agent. 25 |
    26 | -T SECONDS, --timeout=SECONDS 27 |
    28 | Connection timeout to use when trying to talk to hosts, in SECONDS. 29 |

    ENVIRONMENT

    The following environment variables may specified.

    ANSIBLE_HOSTS  — Override the default ansible hosts file

    ANSIBLE_LIBRARY — Override the default ansible module library path

    AUTHOR

    Ansible was originally written by Michael DeHaan. See the AUTHORS file 30 | for a complete list of contributors.

    COPYRIGHT

    Copyright © 2012, Michael DeHaan

    Ansible is released under the terms of the GPLv3 License.

    SEE ALSO

    ansible(1)

    Extensive documentation as well as IRC and mailing list info 31 | is available on the ansible home page: https://ansible.github.com/

    32 | -------------------------------------------------------------------------------- /ansible-v2/modules.js: -------------------------------------------------------------------------------- 1 | function AnsibleModules($scope) { 2 | $scope.modules = []; 3 | 4 | $scope.orderProp = "module"; 5 | } -------------------------------------------------------------------------------- /ansible-v2/rst/YAMLSyntax.rst: -------------------------------------------------------------------------------- 1 | YAML Syntax 2 | =========== 3 | 4 | This page provides a basic overview of correct YAML syntax, which is how Ansible 5 | playbooks (our configuration management language) are expressed. 6 | 7 | We use YAML because it is easier for humans to read and write than other common 8 | data formats like XML or JSON. Further, there are libraries available in most 9 | programming languages for working with YAML. 10 | 11 | You may also wish to read :doc:`playbooks` at the same time to see how this 12 | is used in practice. 13 | 14 | 15 | YAML Basics 16 | ----------- 17 | 18 | For Ansible, nearly every YAML file starts with a list. 19 | Each item in the list is a list of key/value pairs, commonly 20 | called a "hash" or a "dictionary". So, we need to know how 21 | to write lists and dictionaries in YAML. 22 | 23 | There's another small quirk to YAML. All YAML files (regardless of their association with 24 | Ansible or not) should begin with ``---``. This is part of the YAML 25 | format and indicates the start of a document. 26 | 27 | All members of a list are lines beginning at the same indentation level starting 28 | with a ``"- "`` (a dash and a space):: 29 | 30 | --- 31 | # A list of tasty fruits 32 | - Apple 33 | - Orange 34 | - Strawberry 35 | - Mango 36 | 37 | A dictionary is represented in a simple ``key: value`` form (the colon must be followed by a space):: 38 | 39 | --- 40 | # An employee record 41 | name: Example Developer 42 | job: Developer 43 | skill: Elite 44 | 45 | Dictionaries can also be represented in an abbreviated form if you really want to:: 46 | 47 | --- 48 | # An employee record 49 | {name: Example Developer, job: Developer, skill: Elite} 50 | 51 | .. _truthiness: 52 | 53 | Ansible doesn't really use these too much, but you can also specify a 54 | boolean value (true/false) in several forms:: 55 | 56 | --- 57 | create_key: yes 58 | needs_agent: no 59 | knows_oop: True 60 | likes_emacs: TRUE 61 | uses_cvs: false 62 | 63 | Let's combine what we learned so far in an arbitrary YAML example. This really 64 | has nothing to do with Ansible, but will give you a feel for the format:: 65 | 66 | --- 67 | # An employee record 68 | name: Example Developer 69 | job: Developer 70 | skill: Elite 71 | employed: True 72 | foods: 73 | - Apple 74 | - Orange 75 | - Strawberry 76 | - Mango 77 | languages: 78 | ruby: Elite 79 | python: Elite 80 | dotnet: Lame 81 | 82 | That's all you really need to know about YAML to start writing 83 | `Ansible` playbooks. 84 | 85 | Gotchas 86 | ------- 87 | 88 | While YAML is generally friendly, the following is going to result in a YAML syntax error:: 89 | 90 | foo: somebody said I should put a colon here: so I did 91 | 92 | You will want to quote any hash values using colons, like so:: 93 | 94 | foo: "somebody said I should put a colon here: so I did" 95 | 96 | And then the colon will be preserved. 97 | 98 | Further, Ansible uses "{{ var }}" for variables. If a value after a colon starts 99 | with a "{", YAML will think it is a dictionary, so you must quote it, like so:: 100 | 101 | foo: "{{ variable }}" 102 | 103 | 104 | .. seealso:: 105 | 106 | :doc:`playbooks` 107 | Learn what playbooks can do and how to write/run them. 108 | `YAMLLint `_ 109 | YAML Lint (online) helps you debug YAML syntax if you are having problems 110 | `Github examples directory `_ 111 | Complete playbook files from the github project source 112 | `Mailing List `_ 113 | Questions? Help? Ideas? Stop by the list on Google Groups 114 | `irc.freenode.net `_ 115 | #ansible IRC chat channel 116 | 117 | -------------------------------------------------------------------------------- /ansible-v2/rst/become.rst: -------------------------------------------------------------------------------- 1 | Ansible Privilege Escalation 2 | ++++++++++++++++++++++++++++ 3 | 4 | Ansible can use existing privilege escalation systems to allow a user to execute tasks as another. 5 | 6 | .. contents:: Topics 7 | 8 | Become 9 | `````` 10 | Before 1.9 Ansible mostly allowed the use of sudo and a limited use of su to allow a login/remote user to become a different user 11 | and execute tasks, create resources with the 2nd user's permissions. As of 1.9 'become' supersedes the old sudo/su, while still 12 | being backwards compatible. This new system also makes it easier to add other privilege escalation tools like pbrun (Powerbroker), 13 | pfexec and others. 14 | 15 | 16 | New directives 17 | -------------- 18 | 19 | become 20 | equivalent to adding 'sudo:' or 'su:' to a play or task, set to 'true'/'yes' to activate privilege escalation 21 | 22 | become_user 23 | equivalent to adding 'sudo_user:' or 'su_user:' to a play or task, set to user with desired privileges 24 | 25 | become_method 26 | at play or task level overrides the default method set in ansible.cfg, set to 'sudo'/'su'/'pbrun'/'pfexec' 27 | 28 | 29 | New ansible\_ variables 30 | ----------------------- 31 | Each allows you to set an option per group and/or host 32 | 33 | ansible_become 34 | equivalent to ansible_sudo or ansible_su, allows to force privilege escalation 35 | 36 | ansible_become_method 37 | allows to set privilege escalation method 38 | 39 | ansible_become_user 40 | equivalent to ansible_sudo_user or ansible_su_user, allows to set the user you become through privilege escalation 41 | 42 | ansible_become_pass 43 | equivalent to ansible_sudo_pass or ansible_su_pass, allows you to set the privilege escalation password 44 | 45 | 46 | New command line options 47 | ------------------------ 48 | 49 | --ask-become-pass 50 | ask for privilege escalation password 51 | 52 | --become,-b 53 | run operations with become (no password implied) 54 | 55 | --become-method=BECOME_METHOD 56 | privilege escalation method to use (default=sudo), 57 | valid choices: [ sudo | su | pbrun | pfexec ] 58 | 59 | --become-user=BECOME_USER 60 | run operations as this user (default=root) 61 | 62 | 63 | sudo and su still work! 64 | ----------------------- 65 | 66 | Old playbooks will not need to be changed, even though they are deprecated, sudo and su directives will continue to work though it 67 | is recommended to move to become as they may be retired at one point. You cannot mix directives on the same object though, Ansible 68 | will complain if you try to. 69 | 70 | Become will default to using the old sudo/su configs and variables if they exist, but will override them if you specify any of the 71 | new ones. 72 | 73 | 74 | 75 | .. note:: Privilege escalation methods must also be supported by the connection plugin used, most will warn if they do not, some will just ignore it as they always run as root (jail, chroot, etc). 76 | 77 | .. note:: Methods cannot be chained, you cannot use 'sudo /bin/su -' to become a user, you need to have privileges to run the command as that user in sudo or be able to su directly to it (the same for pbrun, pfexec or other supported methods). 78 | 79 | .. note:: Privilege escalation permissions have to be general, Ansible does not always use a specific command to do something but runs modules (code) from a temporary file name which changes every time. So if you have '/sbin/sevice' or '/bin/chmod' as the allowed commands this will fail with ansible. 80 | 81 | .. seealso:: 82 | 83 | `Mailing List `_ 84 | Questions? Help? Ideas? Stop by the list on Google Groups 85 | `irc.freenode.net `_ 86 | #ansible IRC chat channel 87 | 88 | -------------------------------------------------------------------------------- /ansible-v2/rst/common_return_values.rst: -------------------------------------------------------------------------------- 1 | Common Return Values 2 | ==================== 3 | 4 | .. contents:: Topics 5 | 6 | Ansible modules normally return a data structure that can be registered into a variable, or seen directly when using 7 | the `ansible` program as output. Here we document the values common to all modules, each module can optionally document 8 | its own unique returns. If these docs exist they will be visible through ansible-doc and https://docs.ansible.com. 9 | 10 | .. _facts: 11 | 12 | Facts 13 | ````` 14 | 15 | Some modules return 'facts' to ansible (i.e setup), this is done through a 'ansible_facts' key and anything inside 16 | will automatically be available for the current host directly as a variable and there is no need to 17 | register this data. 18 | 19 | 20 | .. _status: 21 | 22 | Status 23 | `````` 24 | 25 | Every module must return a status, saying if the module was successful, if anything changed or not. Ansible itself 26 | will return a status if it skips the module due to a user condition (when: ) or running in check mode when the module 27 | does not support it. 28 | 29 | 30 | .. _other: 31 | 32 | Other common returns 33 | ```````````````````` 34 | 35 | It is common on failure or success to return a 'msg' that either explains the failure or makes a note about the execution. 36 | Some modules, specifically those that execute shell or commands directly, will return stdout and stderr, if ansible sees 37 | a stdout in the results it will append a stdout_lines which is just a list or the lines in stdout. 38 | 39 | .. seealso:: 40 | 41 | :doc:`modules` 42 | Learn about available modules 43 | `GitHub Core modules directory `_ 44 | Browse source of core modules 45 | `Github Extras modules directory `_ 46 | Browse source of extras modules. 47 | `Mailing List `_ 48 | Development mailing list 49 | `irc.freenode.net `_ 50 | #ansible IRC chat channel 51 | -------------------------------------------------------------------------------- /ansible-v2/rst/developing.rst: -------------------------------------------------------------------------------- 1 | Developer Information 2 | ````````````````````` 3 | 4 | Learn how to build modules of your own in any language, and also how to extend Ansible through several kinds of plugins. Explore Ansible's Python API and write Python plugins to integrate with other solutions in your environment. 5 | 6 | .. toctree:: 7 | :maxdepth: 1 8 | 9 | developing_api 10 | developing_inventory 11 | developing_modules 12 | developing_plugins 13 | developing_test_pr 14 | 15 | Developers will also likely be interested in the fully-discoverable in :doc:`tower`. It's great for embedding Ansible in all manner of applications. 16 | 17 | -------------------------------------------------------------------------------- /ansible-v2/rst/developing_api.rst: -------------------------------------------------------------------------------- 1 | Python API 2 | ========== 3 | 4 | .. contents:: Topics 5 | 6 | There are several interesting ways to use Ansible from an API perspective. You can use 7 | the Ansible python API to control nodes, you can extend Ansible to respond to various python events, you can 8 | write various plugins, and you can plug in inventory data from external data sources. This document 9 | covers the Runner and Playbook API at a basic level. 10 | 11 | If you are looking to use Ansible programmatically from something other than Python, trigger events asynchronously, 12 | or have access control and logging demands, take a look at :doc:`tower` 13 | as it has a very nice REST API that provides all of these things at a higher level. 14 | 15 | Ansible is written in its own API so you have a considerable amount of power across the board. 16 | This chapter discusses the Python API. 17 | 18 | .. _python_api: 19 | 20 | Python API 21 | ---------- 22 | 23 | The Python API is very powerful, and is how the ansible CLI and ansible-playbook 24 | are implemented. 25 | 26 | It's pretty simple:: 27 | 28 | import ansible.runner 29 | 30 | runner = ansible.runner.Runner( 31 | module_name='ping', 32 | module_args='', 33 | pattern='web*', 34 | forks=10 35 | ) 36 | datastructure = runner.run() 37 | 38 | The run method returns results per host, grouped by whether they 39 | could be contacted or not. Return types are module specific, as 40 | expressed in the :doc:`modules` documentation.:: 41 | 42 | { 43 | "dark" : { 44 | "web1.example.com" : "failure message" 45 | }, 46 | "contacted" : { 47 | "web2.example.com" : 1 48 | } 49 | } 50 | 51 | A module can return any type of JSON data it wants, so Ansible can 52 | be used as a framework to rapidly build powerful applications and scripts. 53 | 54 | .. _detailed_api_example: 55 | 56 | Detailed API Example 57 | ```````````````````` 58 | 59 | The following script prints out the uptime information for all hosts:: 60 | 61 | #!/usr/bin/python 62 | 63 | import ansible.runner 64 | import sys 65 | 66 | # construct the ansible runner and execute on all hosts 67 | results = ansible.runner.Runner( 68 | pattern='*', forks=10, 69 | module_name='command', module_args='/usr/bin/uptime', 70 | ).run() 71 | 72 | if results is None: 73 | print "No hosts found" 74 | sys.exit(1) 75 | 76 | print "UP ***********" 77 | for (hostname, result) in results['contacted'].items(): 78 | if not 'failed' in result: 79 | print "%s >>> %s" % (hostname, result['stdout']) 80 | 81 | print "FAILED *******" 82 | for (hostname, result) in results['contacted'].items(): 83 | if 'failed' in result: 84 | print "%s >>> %s" % (hostname, result['msg']) 85 | 86 | print "DOWN *********" 87 | for (hostname, result) in results['dark'].items(): 88 | print "%s >>> %s" % (hostname, result) 89 | 90 | Advanced programmers may also wish to read the source to ansible itself, for 91 | it uses the Runner() API (with all available options) to implement the 92 | command line tools ``ansible`` and ``ansible-playbook``. 93 | 94 | .. seealso:: 95 | 96 | :doc:`developing_inventory` 97 | Developing dynamic inventory integrations 98 | :doc:`developing_modules` 99 | How to develop modules 100 | :doc:`developing_plugins` 101 | How to develop plugins 102 | `Development Mailing List `_ 103 | Mailing list for development topics 104 | `irc.freenode.net `_ 105 | #ansible IRC chat channel 106 | 107 | -------------------------------------------------------------------------------- /ansible-v2/rst/developing_inventory.rst: -------------------------------------------------------------------------------- 1 | Developing Dynamic Inventory Sources 2 | ==================================== 3 | 4 | .. contents:: Topics 5 | :local: 6 | 7 | As described in :doc:`intro_dynamic_inventory`, ansible can pull inventory information from dynamic sources, including cloud sources. 8 | 9 | How do we write a new one? 10 | 11 | Simple! We just create a script or program that can return JSON in the right format when fed the proper arguments. 12 | You can do this in any language. 13 | 14 | .. _inventory_script_conventions: 15 | 16 | Script Conventions 17 | `````````````````` 18 | 19 | When the external node script is called with the single argument ``--list``, the script must return a JSON hash/dictionary of all the groups to be managed. Each group's value should be either a hash/dictionary containing a list of each host/IP, potential child groups, and potential group variables, or simply a list of host/IP addresses, like so:: 20 | 21 | { 22 | "databases" : { 23 | "hosts" : [ "host1.example.com", "host2.example.com" ], 24 | "vars" : { 25 | "a" : true 26 | } 27 | }, 28 | "webservers" : [ "host2.example.com", "host3.example.com" ], 29 | "atlanta" : { 30 | "hosts" : [ "host1.example.com", "host4.example.com", "host5.example.com" ], 31 | "vars" : { 32 | "b" : false 33 | }, 34 | "children": [ "marietta", "5points" ] 35 | }, 36 | "marietta" : [ "host6.example.com" ], 37 | "5points" : [ "host7.example.com" ] 38 | } 39 | 40 | .. versionadded:: 1.0 41 | 42 | Before version 1.0, each group could only have a list of hostnames/IP addresses, like the webservers, marietta, and 5points groups above. 43 | 44 | When called with the arguments ``--host `` (where is a host from above), the script must return either an empty JSON 45 | hash/dictionary, or a hash/dictionary of variables to make available to templates and playbooks. Returning variables is optional, 46 | if the script does not wish to do this, returning an empty hash/dictionary is the way to go:: 47 | 48 | { 49 | "favcolor" : "red", 50 | "ntpserver" : "wolf.example.com", 51 | "monitoring" : "pack.example.com" 52 | } 53 | 54 | .. _inventory_script_tuning: 55 | 56 | Tuning the External Inventory Script 57 | ```````````````````````````````````` 58 | 59 | .. versionadded:: 1.3 60 | 61 | The stock inventory script system detailed above works for all versions of Ansible, but calling 62 | ``--host`` for every host can be rather expensive, especially if it involves expensive API calls to 63 | a remote subsystem. In Ansible 64 | 1.3 or later, if the inventory script returns a top level element called "_meta", it is possible 65 | to return all of the host variables in one inventory script call. When this meta element contains 66 | a value for "hostvars", the inventory script will not be invoked with ``--host`` for each host. This 67 | results in a significant performance increase for large numbers of hosts, and also makes client 68 | side caching easier to implement for the inventory script. 69 | 70 | The data to be added to the top level JSON dictionary looks like this:: 71 | 72 | { 73 | 74 | # results of inventory script as above go here 75 | # ... 76 | 77 | "_meta" : { 78 | "hostvars" : { 79 | "moocow.example.com" : { "asdf" : 1234 }, 80 | "llama.example.com" : { "asdf" : 5678 }, 81 | } 82 | } 83 | 84 | } 85 | 86 | .. seealso:: 87 | 88 | :doc:`developing_api` 89 | Python API to Playbooks and Ad Hoc Task Execution 90 | :doc:`developing_modules` 91 | How to develop modules 92 | :doc:`developing_plugins` 93 | How to develop plugins 94 | `Ansible Tower `_ 95 | REST API endpoint and GUI for Ansible, syncs with dynamic inventory 96 | `Development Mailing List `_ 97 | Mailing list for development topics 98 | `irc.freenode.net `_ 99 | #ansible IRC chat channel 100 | -------------------------------------------------------------------------------- /ansible-v2/rst/galaxy.rst: -------------------------------------------------------------------------------- 1 | Ansible Galaxy 2 | ++++++++++++++ 3 | 4 | "Ansible Galaxy" can either refer to a website for sharing and downloading Ansible roles, or a command line tool that helps work with roles. 5 | 6 | .. contents:: Topics 7 | 8 | The Website 9 | ``````````` 10 | 11 | The website `Ansible Galaxy `_, is a free site for finding, downloading, rating, and reviewing all kinds of community developed Ansible roles and can be a great way to get a jumpstart on your automation projects. 12 | 13 | You can sign up with social auth and use the download client 'ansible-galaxy' which is included in Ansible 1.4.2 and later. 14 | 15 | Read the "About" page on the Galaxy site for more information. 16 | 17 | The ansible-galaxy command line tool 18 | ```````````````````````````````````` 19 | 20 | The command line ansible-galaxy has many different subcommands. 21 | 22 | Installing Roles 23 | ---------------- 24 | 25 | The most obvious is downloading roles from the Ansible Galaxy website:: 26 | 27 | ansible-galaxy install username.rolename 28 | 29 | Building out Role Scaffolding 30 | ----------------------------- 31 | 32 | It can also be used to initialize the base structure of a new role, saving time on creating the various directories and main.yml files a role requires:: 33 | 34 | ansible-galaxy init rolename 35 | 36 | Installing Multiple Roles From A File 37 | ------------------------------------- 38 | 39 | To install multiple roles, the ansible-galaxy CLI can be fed a requirements file. All versions of ansible allow the following syntax for installing roles from the Ansible Galaxy website:: 40 | 41 | ansible-galaxy install -r requirements.txt 42 | 43 | Where the requirements.txt looks like:: 44 | 45 | username1.foo_role 46 | username2.bar_role 47 | 48 | To request specific versions (tags) of a role, use this syntax in the roles file:: 49 | 50 | username1.foo_role,version 51 | username2.bar_role,version 52 | 53 | Available versions will be listed on the Ansible Galaxy webpage for that role. 54 | 55 | Advanced Control over Role Requirements Files 56 | --------------------------------------------- 57 | 58 | For more advanced control over where to download roles from, including support for remote repositories, Ansible 1.8 and later support a new YAML format for the role requirements file, which must end in a 'yml' extension. It works like this:: 59 | 60 | ansible-galaxy install -r requirements.yml 61 | 62 | The extension is important. If the .yml extension is left off, the ansible-galaxy CLI will assume the file is in the "basic" format and will be confused. 63 | 64 | And here's an example showing some specific version downloads from multiple sources. In one of the examples we also override the name of the role and download it as something different:: 65 | 66 | # from galaxy 67 | - src: yatesr.timezone 68 | 69 | # from github 70 | - src: https://github.com/bennojoy/nginx 71 | 72 | # from github installing to a relative path 73 | - src: https://github.com/bennojoy/nginx 74 | path: vagrant/roles/ 75 | 76 | # from github, overriding the name and specifying a specific tag 77 | - src: https://github.com/bennojoy/nginx 78 | version: master 79 | name: nginx_role 80 | 81 | # from a webserver, where the role is packaged in a tar.gz 82 | - src: https://some.webserver.example.com/files/master.tar.gz 83 | name: http-role 84 | 85 | # from bitbucket, if bitbucket happens to be operational right now :) 86 | - src: git+http://bitbucket.org/willthames/git-ansible-galaxy 87 | version: v1.4 88 | 89 | # from bitbucket, alternative syntax and caveats 90 | - src: http://bitbucket.org/willthames/hg-ansible-galaxy 91 | scm: hg 92 | 93 | As you can see in the above, there are a large amount of controls available 94 | to customize where roles can be pulled from, and what to save roles as. 95 | 96 | Roles pulled from galaxy work as with other SCM sourced roles above. To download a role with dependencies, and automatically install those dependencies, the role must be uploaded to the Ansible Galaxy website. 97 | 98 | .. seealso:: 99 | 100 | :doc:`playbooks_roles` 101 | All about ansible roles 102 | `Mailing List `_ 103 | Questions? Help? Ideas? Stop by the list on Google Groups 104 | `irc.freenode.net `_ 105 | #ansible IRC chat channel 106 | 107 | -------------------------------------------------------------------------------- /ansible-v2/rst/guide_vagrant.rst: -------------------------------------------------------------------------------- 1 | Using Vagrant and Ansible 2 | ========================= 3 | 4 | .. _vagrant_intro: 5 | 6 | Introduction 7 | ```````````` 8 | 9 | Vagrant is a tool to manage virtual machine environments, and allows you to 10 | configure and use reproducible work environments on top of various 11 | virtualization and cloud platforms. It also has integration with Ansible as a 12 | provisioner for these virtual machines, and the two tools work together well. 13 | 14 | This guide will describe how to use Vagrant and Ansible together. 15 | 16 | If you're not familiar with Vagrant, you should visit `the documentation 17 | `_. 18 | 19 | This guide assumes that you already have Ansible installed and working. 20 | Running from a Git checkout is fine. Follow the :doc:`intro_installation` 21 | guide for more information. 22 | 23 | .. _vagrant_setup: 24 | 25 | Vagrant Setup 26 | ````````````` 27 | 28 | The first step once you've installed Vagrant is to create a ``Vagrantfile`` 29 | and customize it to suit your needs. This is covered in detail in the Vagrant 30 | documentation, but here is a quick example: 31 | 32 | .. code-block:: bash 33 | 34 | $ mkdir vagrant-test 35 | $ cd vagrant-test 36 | $ vagrant init precise32 http://files.vagrantup.com/precise32.box 37 | 38 | This will create a file called Vagrantfile that you can edit to suit your 39 | needs. The default Vagrantfile has a lot of comments. Here is a simplified 40 | example that includes a section to use the Ansible provisioner: 41 | 42 | .. code-block:: ruby 43 | 44 | # Vagrantfile API/syntax version. Don't touch unless you know what you're doing! 45 | VAGRANTFILE_API_VERSION = "2" 46 | 47 | Vagrant.configure(VAGRANTFILE_API_VERSION) do |config| 48 | config.vm.box = "precise32" 49 | config.vm.box_url = "http://files.vagrantup.com/precise32.box" 50 | 51 | config.vm.network :public_network 52 | 53 | config.vm.provision "ansible" do |ansible| 54 | ansible.playbook = "playbook.yml" 55 | end 56 | end 57 | 58 | The Vagrantfile has a lot of options, but these are the most important ones. 59 | Notice the ``config.vm.provision`` section that refers to an Ansible playbook 60 | called ``playbook.yml`` in the same directory as the Vagrantfile. Vagrant runs 61 | the provisioner once the virtual machine has booted and is ready for SSH 62 | access. 63 | 64 | .. code-block:: bash 65 | 66 | $ vagrant up 67 | 68 | This will start the VM and run the provisioning playbook. 69 | 70 | There are a lot of Ansible options you can configure in your Vagrantfile. Some 71 | particularly useful options are ``ansible.extra_vars``, ``ansible.sudo`` and 72 | ``ansible.sudo_user``, and ``ansible.host_key_checking`` which you can disable 73 | to avoid SSH connection problems to new virtual machines. 74 | 75 | Visit the `Ansible Provisioner documentation 76 | `_ for more 77 | information. 78 | 79 | To re-run a playbook on an existing VM, just run: 80 | 81 | .. code-block:: bash 82 | 83 | $ vagrant provision 84 | 85 | This will re-run the playbook. 86 | 87 | .. _running_ansible: 88 | 89 | Running Ansible Manually 90 | ```````````````````````` 91 | 92 | Sometimes you may want to run Ansible manually against the machines. This is 93 | pretty easy to do. 94 | 95 | Vagrant automatically creates an inventory file for each Vagrant machine in 96 | the same directory located under ``.vagrant/provisioners/ansible/inventory/vagrant_ansible_inventory``. 97 | It configures the inventory file according to the SSH tunnel that Vagrant 98 | automatically creates, and executes ``ansible-playbook`` with the correct 99 | username and SSH key options to allow access. A typical automatically-created 100 | inventory file may look something like this: 101 | 102 | .. code-block:: none 103 | 104 | # Generated by Vagrant 105 | 106 | machine ansible_ssh_host=127.0.0.1 ansible_ssh_port=2222 107 | 108 | If you want to run Ansible manually, you will want to make sure to pass 109 | ``ansible`` or ``ansible-playbook`` commands the correct arguments for the 110 | username (usually ``vagrant``) and the SSH key (since Vagrant 1.7.0, this will be something like 111 | ``.vagrant/machines/[machine name]/[provider]/private_key``), and the autogenerated inventory file. 112 | 113 | Here is an example: 114 | 115 | .. code-block:: bash 116 | 117 | $ ansible-playbook -i .vagrant/provisioners/ansible/inventory/vagrant_ansible_inventory --private-key=.vagrant/machines/default/virtualbox/private_key -u vagrant playbook.yml 118 | 119 | Note: Vagrant versions prior to 1.7.0 will use the private key located at ``~/.vagrant.d/insecure_private_key.`` 120 | 121 | .. seealso:: 122 | 123 | `Vagrant Home `_ 124 | The Vagrant homepage with downloads 125 | `Vagrant Documentation `_ 126 | Vagrant Documentation 127 | `Ansible Provisioner `_ 128 | The Vagrant documentation for the Ansible provisioner 129 | :doc:`playbooks` 130 | An introduction to playbooks 131 | 132 | -------------------------------------------------------------------------------- /ansible-v2/rst/guides.rst: -------------------------------------------------------------------------------- 1 | Detailed Guides 2 | ``````````````` 3 | 4 | This section is new and evolving. The idea here is explore particular use cases in greater depth and provide a more "top down" explanation of some basic features. 5 | 6 | .. toctree:: 7 | :maxdepth: 1 8 | 9 | guide_aws 10 | guide_rax 11 | guide_gce 12 | guide_vagrant 13 | guide_rolling_upgrade 14 | 15 | Pending topics may include: Docker, Jenkins, Google Compute Engine, Linode/DigitalOcean, Continuous Deployment, and more. 16 | 17 | -------------------------------------------------------------------------------- /ansible-v2/rst/index.rst: -------------------------------------------------------------------------------- 1 | Ansible V2 Documentation 2 | ======================== 3 | 4 | About Ansible 5 | ````````````` 6 | 7 | Welcome to the Ansible documentation! 8 | 9 | Ansible is an IT automation tool. It can configure systems, deploy software, and orchestrate more advanced IT tasks 10 | such as continuous deployments or zero downtime rolling updates. 11 | 12 | Ansible's main goals are simplicity and ease-of-use. It also has a strong focus on security and reliability, featuring a minimum of moving parts, usage of OpenSSH for transport (with an accelerated socket mode and pull modes as alternatives), and a language that is designed around auditability by humans--even those not familiar with the program. 13 | 14 | We believe simplicity is relevant to all sizes of environments, so we design for busy users of all types: developers, sysadmins, release engineers, IT managers, and everyone in between. Ansible is appropriate for managing all environments, from small setups with a handful of instances to enterprise environments with many thousands of instances. 15 | 16 | Ansible manages machines in an agent-less manner. There is never a question of how to 17 | upgrade remote daemons or the problem of not being able to manage systems because daemons are uninstalled. Because OpenSSH is one of the most peer-reviewed open source components, security exposure is greatly reduced. Ansible is decentralized--it relies on your existing OS credentials to control access to remote machines. If needed, Ansible can easily connect with Kerberos, LDAP, and other centralized authentication management systems. 18 | 19 | This documentation covers the current released version of Ansible (1.9.1) and also some development version features (2.0). For recent features, we note in each section the version of Ansible where the feature was added. 20 | 21 | Ansible, Inc. releases a new major release of Ansible approximately every two months. The core application evolves somewhat conservatively, valuing simplicity in language design and setup. However, the community around new modules and plugins being developed and contributed moves very quickly, typically adding 20 or so new modules in each release. 22 | 23 | .. _an_introduction: 24 | 25 | .. toctree:: 26 | :maxdepth: 1 27 | 28 | intro 29 | quickstart 30 | playbooks 31 | playbooks_special_topics 32 | modules 33 | guides 34 | developing 35 | tower 36 | community 37 | galaxy 38 | test_strategies 39 | faq 40 | glossary 41 | YAMLSyntax 42 | 43 | -------------------------------------------------------------------------------- /ansible-v2/rst/intro.rst: -------------------------------------------------------------------------------- 1 | Introduction 2 | ============ 3 | 4 | Before we dive into the really fun parts -- playbooks, configuration management, deployment, and orchestration, we'll learn how to get Ansible installed and cover some basic concepts. We'll also go over how to execute ad-hoc commands in parallel across your nodes using /usr/bin/ansible. Additionally, we'll see what sort of modules are available in Ansible's core (though you can also write your own, which is also covered later). 5 | 6 | .. toctree:: 7 | :maxdepth: 1 8 | 9 | intro_installation 10 | intro_getting_started 11 | intro_inventory 12 | intro_dynamic_inventory 13 | intro_patterns 14 | intro_adhoc 15 | intro_configuration 16 | intro_windows 17 | 18 | -------------------------------------------------------------------------------- /ansible-v2/rst/intro_patterns.rst: -------------------------------------------------------------------------------- 1 | Patterns 2 | ++++++++ 3 | 4 | .. contents:: Topics 5 | 6 | Patterns in Ansible are how we decide which hosts to manage. This can mean what hosts to communicate with, but in terms 7 | of :doc:`playbooks` it actually means what hosts to apply a particular configuration or IT process to. 8 | 9 | We'll go over how to use the command line in :doc:`intro_adhoc` section, however, basically it looks like this:: 10 | 11 | ansible -m -a 12 | 13 | Such as:: 14 | 15 | ansible webservers -m service -a "name=httpd state=restarted" 16 | 17 | A pattern usually refers to a set of groups (which are sets of hosts) -- in the above case, machines in the "webservers" group. 18 | 19 | Anyway, to use Ansible, you'll first need to know how to tell Ansible which hosts in your inventory to talk to. 20 | This is done by designating particular host names or groups of hosts. 21 | 22 | The following patterns are equivalent and target all hosts in the inventory:: 23 | 24 | all 25 | * 26 | 27 | It is also possible to address a specific host or set of hosts by name:: 28 | 29 | one.example.com 30 | one.example.com:two.example.com 31 | 192.168.1.50 32 | 192.168.1.* 33 | 34 | The following patterns address one or more groups. Groups separated by a colon indicate an "OR" configuration. 35 | This means the host may be in either one group or the other:: 36 | 37 | webservers 38 | webservers:dbservers 39 | 40 | You can exclude groups as well, for instance, all machines must be in the group webservers but not in the group phoenix:: 41 | 42 | webservers:!phoenix 43 | 44 | You can also specify the intersection of two groups. This would mean the hosts must be in the group webservers and 45 | the host must also be in the group staging:: 46 | 47 | webservers:&staging 48 | 49 | You can do combinations:: 50 | 51 | webservers:dbservers:&staging:!phoenix 52 | 53 | The above configuration means "all machines in the groups 'webservers' and 'dbservers' are to be managed if they are in 54 | the group 'staging' also, but the machines are not to be managed if they are in the group 'phoenix' ... whew! 55 | 56 | You can also use variables if you want to pass some group specifiers via the "-e" argument to ansible-playbook, but this 57 | is uncommonly used:: 58 | 59 | webservers:!{{excluded}}:&{{required}} 60 | 61 | You also don't have to manage by strictly defined groups. Individual host names, IPs and groups, can also be referenced using 62 | wildcards:: 63 | 64 | *.example.com 65 | *.com 66 | 67 | It's also ok to mix wildcard patterns and groups at the same time:: 68 | 69 | one*.com:dbservers 70 | 71 | As an advanced usage, you can also select the numbered server in a group:: 72 | 73 | webservers[0] 74 | 75 | Or a portion of servers in a group:: 76 | 77 | webservers[0-25] 78 | 79 | Most people don't specify patterns as regular expressions, but you can. Just start the pattern with a '~':: 80 | 81 | ~(web|db).*\.example\.com 82 | 83 | While we're jumping a bit ahead, additionally, you can add an exclusion criteria just by supplying the ``--limit`` flag to /usr/bin/ansible or /usr/bin/ansible-playbook:: 84 | 85 | ansible-playbook site.yml --limit datacenter2 86 | 87 | And if you want to read the list of hosts from a file, prefix the file name with '@'. Since Ansible 1.2:: 88 | 89 | ansible-playbook site.yml --limit @retry_hosts.txt 90 | 91 | Easy enough. See :doc:`intro_adhoc` and then :doc:`playbooks` for how to apply this knowledge. 92 | 93 | .. seealso:: 94 | 95 | :doc:`intro_adhoc` 96 | Examples of basic commands 97 | :doc:`playbooks` 98 | Learning ansible's configuration management language 99 | `Mailing List `_ 100 | Questions? Help? Ideas? Stop by the list on Google Groups 101 | `irc.freenode.net `_ 102 | #ansible IRC chat channel 103 | 104 | -------------------------------------------------------------------------------- /ansible-v2/rst/modules.rst: -------------------------------------------------------------------------------- 1 | About Modules 2 | ============= 3 | 4 | .. toctree:: 5 | :maxdepth: 1 6 | 7 | modules_intro 8 | modules_core 9 | modules_extra 10 | common_return_values 11 | 12 | 13 | Ansible ships with a number of modules (called the 'module library') 14 | that can be executed directly on remote hosts or through :doc:`Playbooks `. 15 | 16 | Users can also write their own modules. These modules can control system resources, 17 | like services, packages, or files (anything really), or handle executing system commands. 18 | 19 | 20 | .. seealso:: 21 | 22 | :doc:`intro_adhoc` 23 | Examples of using modules in /usr/bin/ansible 24 | :doc:`playbooks` 25 | Examples of using modules with /usr/bin/ansible-playbook 26 | :doc:`developing_modules` 27 | How to write your own modules 28 | :doc:`developing_api` 29 | Examples of using modules with the Python API 30 | `Mailing List `_ 31 | Questions? Help? Ideas? Stop by the list on Google Groups 32 | `irc.freenode.net `_ 33 | #ansible IRC chat channel 34 | -------------------------------------------------------------------------------- /ansible-v2/rst/modules/.gitdir: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tuxknight/notes/e20fbe9edaef304769849cb501fff720ca60657f/ansible-v2/rst/modules/.gitdir -------------------------------------------------------------------------------- /ansible-v2/rst/modules_core.rst: -------------------------------------------------------------------------------- 1 | Core Modules 2 | ------------ 3 | 4 | These are modules that the core ansible team maintains and will always ship with ansible itself. 5 | They will also receive slightly higher priority for all requests than those in the "extras" repos. 6 | 7 | The source of these modules is hosted on GitHub in the `ansible-modules-core `_ repo. 8 | 9 | If you believe you have found a bug in a core module and are already running the latest stable or development version of Ansible, first look in the `issue tracker at github.com/ansible/ansible-modules-core `_ to see if a bug has already been filed. If not, we would be grateful if you would file one. 10 | 11 | Should you have a question rather than a bug report, inquries are welcome on the `ansible-project google group `_ or on Ansible's "#ansible" channel, located on irc.freenode.net. Development oriented topics should instead use the similar `ansible-devel google group `_. 12 | 13 | Documentation updates for these modules can also be edited directly in the module itself and by submitting a pull request to the module source code, just look for the "DOCUMENTATION" block in the source tree. 14 | -------------------------------------------------------------------------------- /ansible-v2/rst/modules_extra.rst: -------------------------------------------------------------------------------- 1 | Extras Modules 2 | -------------- 3 | 4 | These modules are currently shipped with Ansible, but might be shipped separately in the future. They are also mostly maintained by the community. 5 | Non-core modules are still fully usable, but may receive slightly lower response rates for issues and pull requests. 6 | 7 | Popular "extras" modules may be promoted to core modules over time. 8 | 9 | This source for these modules is hosted on GitHub in the `ansible-modules-extras `_ repo. 10 | 11 | If you believe you have found a bug in an extras module and are already running the latest stable or development version of Ansible, 12 | first look in the `issue tracker at github.com/ansible/ansible-modules-extras `_ 13 | o see if a bug has already been filed. If not, we would be grateful if you would file one. 14 | 15 | Should you have a question rather than a bug report, inquries are welcome on the `ansible-project google group `_ 16 | or on Ansible's "#ansible" channel, located on irc.freenode.net. 17 | Development oriented topics should instead use the similar `ansible-devel google group `_. 18 | 19 | Documentation updates for this module can also be edited directly in the module and by submitting a pull request to the module source code, just look for the "DOCUMENTATION" block in the source tree. 20 | 21 | For help in developing on modules, should you be so inclined, please read :doc:`community`, :doc:`developing_test_pr` and :doc:`developing_modules`. 22 | 23 | -------------------------------------------------------------------------------- /ansible-v2/rst/modules_intro.rst: -------------------------------------------------------------------------------- 1 | Introduction 2 | ============ 3 | 4 | Modules (also referred to as "task plugins" or "library plugins") are the ones that do 5 | the actual work in ansible, they are what gets executed in each playbook task. 6 | But you can also run a single one using the 'ansible' command. 7 | 8 | Let's review how we execute three different modules from the command line:: 9 | 10 | ansible webservers -m service -a "name=httpd state=started" 11 | ansible webservers -m ping 12 | ansible webservers -m command -a "/sbin/reboot -t now" 13 | 14 | Each module supports taking arguments. Nearly all modules take ``key=value`` 15 | arguments, space delimited. Some modules take no arguments, and the command/shell modules simply 16 | take the string of the command you want to run. 17 | 18 | From playbooks, Ansible modules are executed in a very similar way:: 19 | 20 | - name: reboot the servers 21 | action: command /sbin/reboot -t now 22 | 23 | Which can be abbreviated to:: 24 | 25 | - name: reboot the servers 26 | command: /sbin/reboot -t now 27 | 28 | Another way to pass arguments to a module is using yaml syntax also called 'complex args' :: 29 | 30 | - name: restart webserver 31 | service: 32 | name: httpd 33 | state: restarted 34 | 35 | All modules technically return JSON format data, though if you are using the command line or playbooks, you don't really need to know much about 36 | that. If you're writing your own module, you care, and this means you do not have to write modules in any particular language -- you get to choose. 37 | 38 | Modules strive to be `idempotent`, meaning they will seek to avoid changes to the system unless a change needs to be made. When using Ansible 39 | playbooks, these modules can trigger 'change events' in the form of notifying 'handlers' to run additional tasks. 40 | 41 | Documentation for each module can be accessed from the command line with the ansible-doc tool:: 42 | 43 | ansible-doc yum 44 | 45 | A list of all installed modules is also available:: 46 | 47 | ansible-doc -l 48 | 49 | 50 | .. seealso:: 51 | 52 | :doc:`intro_adhoc` 53 | Examples of using modules in /usr/bin/ansible 54 | :doc:`playbooks` 55 | Examples of using modules with /usr/bin/ansible-playbook 56 | :doc:`developing_modules` 57 | How to write your own modules 58 | :doc:`developing_api` 59 | Examples of using modules with the Python API 60 | `Mailing List `_ 61 | Questions? Help? Ideas? Stop by the list on Google Groups 62 | `irc.freenode.net `_ 63 | #ansible IRC chat channel 64 | 65 | -------------------------------------------------------------------------------- /ansible-v2/rst/playbooks.rst: -------------------------------------------------------------------------------- 1 | Playbooks 2 | ````````` 3 | 4 | Playbooks are Ansible's configuration, deployment, and orchestration language. They can describe a policy you want your remote systems to enforce, or a set of steps in a general IT process. 5 | 6 | If Ansible modules are the tools in your workshop, playbooks are your design plans. 7 | 8 | At a basic level, playbooks can be used to manage configurations of and deployments to remote machines. At a more advanced level, they can sequence multi-tier rollouts involving rolling updates, and can delegate actions to other hosts, interacting with monitoring servers and load balancers along the way. 9 | 10 | While there's a lot of information here, there's no need to learn everything at once. You can start small and pick up more features 11 | over time as you need them. 12 | 13 | Playbooks are designed to be human-readable and are developed in a basic text language. There are multiple 14 | ways to organize playbooks and the files they include, and we'll offer up some suggestions on that and making the most out of Ansible. 15 | 16 | It is recommended to look at `Example Playbooks `_ while reading along with the playbook documentation. These illustrate best practices as well as how to put many of the various concepts together. 17 | 18 | .. toctree:: 19 | :maxdepth: 1 20 | 21 | playbooks_intro 22 | playbooks_roles 23 | playbooks_variables 24 | playbooks_filters 25 | playbooks_filters_ipaddr 26 | playbooks_conditionals 27 | playbooks_loops 28 | playbooks_blocks 29 | playbooks_strategies 30 | playbooks_best_practices 31 | 32 | 33 | -------------------------------------------------------------------------------- /ansible-v2/rst/playbooks_acceleration.rst: -------------------------------------------------------------------------------- 1 | Accelerated Mode 2 | ================ 3 | 4 | .. versionadded:: 1.3 5 | 6 | You Might Not Need This! 7 | ```````````````````````` 8 | 9 | Are you running Ansible 1.5 or later? If so, you may not need accelerated mode due to a new feature called "SSH pipelining" and should read the :ref:`pipelining` section of the documentation. 10 | 11 | For users on 1.5 and later, accelerated mode only makes sense if you (A) are managing from an Enterprise Linux 6 or earlier host 12 | and still are on paramiko, or (B) can't enable TTYs with sudo as described in the pipelining docs. 13 | 14 | If you can use pipelining, Ansible will reduce the amount of files transferred over the wire, 15 | making everything much more efficient, and performance will be on par with accelerated mode in nearly all cases, possibly excluding very large file transfer. Because less moving parts are involved, pipelining is better than accelerated mode for nearly all use cases. 16 | 17 | Accelerated moded remains around in support of EL6 18 | control machines and other constrained environments. 19 | 20 | Accelerated Mode Details 21 | ```````````````````````` 22 | 23 | While OpenSSH using the ControlPersist feature is quite fast and scalable, there is a certain small amount of overhead involved in 24 | using SSH connections. While many people will not encounter a need, if you are running on a platform that doesn't have ControlPersist support (such as an EL6 control machine), you'll probably be even more interested in tuning options. 25 | 26 | Accelerated mode is there to help connections work faster, but still uses SSH for initial secure key exchange. There is no 27 | additional public key infrastructure to manage, and this does not require things like NTP or even DNS. 28 | 29 | Accelerated mode can be anywhere from 2-6x faster than SSH with ControlPersist enabled, and 10x faster than paramiko. 30 | 31 | Accelerated mode works by launching a temporary daemon over SSH. Once the daemon is running, Ansible will connect directly 32 | to it via a socket connection. Ansible secures this communication by using a temporary AES key that is exchanged during 33 | the SSH connection (this key is different for every host, and is also regenerated periodically). 34 | 35 | By default, Ansible will use port 5099 for the accelerated connection, though this is configurable. Once running, the daemon will accept connections for 30 minutes, after which time it will terminate itself and need to be restarted over SSH. 36 | 37 | Accelerated mode offers several improvements over the (deprecated) original fireball mode from which it was based: 38 | 39 | * No bootstrapping is required, only a single line needs to be added to each play you wish to run in accelerated mode. 40 | * Support for sudo commands (see below for more details and caveats) is available. 41 | * There are fewer requirements. ZeroMQ is no longer required, nor are there any special packages beyond python-keyczar 42 | * python 2.5 or higher is required. 43 | 44 | In order to use accelerated mode, simply add `accelerate: true` to your play:: 45 | 46 | --- 47 | 48 | - hosts: all 49 | accelerate: true 50 | 51 | tasks: 52 | 53 | - name: some task 54 | command: echo {{ item }} 55 | with_items: 56 | - foo 57 | - bar 58 | - baz 59 | 60 | If you wish to change the port Ansible will use for the accelerated connection, just add the `accelerated_port` option:: 61 | 62 | --- 63 | 64 | - hosts: all 65 | accelerate: true 66 | # default port is 5099 67 | accelerate_port: 10000 68 | 69 | The `accelerate_port` option can also be specified in the environment variable ACCELERATE_PORT, or in your `ansible.cfg` configuration:: 70 | 71 | [accelerate] 72 | accelerate_port = 5099 73 | 74 | As noted above, accelerated mode also supports running tasks via sudo, however there are two important caveats: 75 | 76 | * You must remove requiretty from your sudoers options. 77 | * Prompting for the sudo password is not yet supported, so the NOPASSWD option is required for sudo'ed commands. 78 | 79 | As of Ansible version `1.6`, you can also allow the use of multiple keys for connections from multiple Ansible management nodes. To do so, add the following option 80 | to your `ansible.cfg` configuration:: 81 | 82 | accelerate_multi_key = yes 83 | 84 | When enabled, the daemon will open a UNIX socket file (by default `$ANSIBLE_REMOTE_TEMP/.ansible-accelerate/.local.socket`). New connections over SSH can 85 | use this socket file to upload new keys to the daemon. 86 | 87 | -------------------------------------------------------------------------------- /ansible-v2/rst/playbooks_async.rst: -------------------------------------------------------------------------------- 1 | Asynchronous Actions and Polling 2 | ================================ 3 | 4 | By default tasks in playbooks block, meaning the connections stay open 5 | until the task is done on each node. This may not always be desirable, or you may 6 | be running operations that take longer than the SSH timeout. 7 | 8 | The easiest way to do this is 9 | to kick them off all at once and then poll until they are done. 10 | 11 | You will also want to use asynchronous mode on very long running 12 | operations that might be subject to timeout. 13 | 14 | To launch a task asynchronously, specify its maximum runtime 15 | and how frequently you would like to poll for status. The default 16 | poll value is 10 seconds if you do not specify a value for `poll`:: 17 | 18 | --- 19 | 20 | - hosts: all 21 | remote_user: root 22 | 23 | tasks: 24 | 25 | - name: simulate long running op (15 sec), wait for up to 45 sec, poll every 5 sec 26 | command: /bin/sleep 15 27 | async: 45 28 | poll: 5 29 | 30 | .. note:: 31 | There is no default for the async time limit. If you leave off the 32 | 'async' keyword, the task runs synchronously, which is Ansible's 33 | default. 34 | 35 | Alternatively, if you do not need to wait on the task to complete, you may 36 | "fire and forget" by specifying a poll value of 0:: 37 | 38 | --- 39 | 40 | - hosts: all 41 | remote_user: root 42 | 43 | tasks: 44 | 45 | - name: simulate long running op, allow to run for 45 sec, fire and forget 46 | command: /bin/sleep 15 47 | async: 45 48 | poll: 0 49 | 50 | .. note:: 51 | You shouldn't "fire and forget" with operations that require 52 | exclusive locks, such as yum transactions, if you expect to run other 53 | commands later in the playbook against those same resources. 54 | 55 | .. note:: 56 | Using a higher value for ``--forks`` will result in kicking off asynchronous 57 | tasks even faster. This also increases the efficiency of polling. 58 | 59 | If you would like to perform a variation of the "fire and forget" where you 60 | "fire and forget, check on it later" you can perform a task similar to the 61 | following:: 62 | 63 | --- 64 | # Requires ansible 1.8+ 65 | - name: 'YUM - fire and forget task' 66 | yum: name=docker-io state=installed 67 | async: 1000 68 | poll: 0 69 | register: yum_sleeper 70 | 71 | - name: 'YUM - check on fire and forget task' 72 | async_status: jid={{ yum_sleeper.ansible_job_id }} 73 | register: job_result 74 | until: job_result.finished 75 | retries: 30 76 | 77 | .. note:: 78 | If the value of ``async:`` is not high enough, this will cause the 79 | "check on it later" task to fail because the temporary status file that 80 | the ``async_status:`` is looking for will not have been written or no longer exist 81 | 82 | .. seealso:: 83 | 84 | :doc:`playbooks` 85 | An introduction to playbooks 86 | `User Mailing List `_ 87 | Have a question? Stop by the google group! 88 | `irc.freenode.net `_ 89 | #ansible IRC chat channel 90 | 91 | -------------------------------------------------------------------------------- /ansible-v2/rst/playbooks_blocks.rst: -------------------------------------------------------------------------------- 1 | Blocks 2 | ====== 3 | 4 | In 2.0 we added a block feature to allow for logical grouping of tasks and even 5 | in play error handling. Most of what you can apply to a single task can be applied 6 | at the block level, which also makes it much easier to set data or directives common 7 | to the tasks. 8 | 9 | 10 | .. code-block:: YAML 11 | :emphasize-lines: 2 12 | :caption: Block example 13 | 14 | tasks: 15 | - block: 16 | - yum: name={{ item }} state=installed 17 | with_items: 18 | - httpd 19 | - memcached 20 | 21 | - template: src=templates/src.j2 dest=/etc/foo.conf 22 | 23 | - service: name=bar state=started enabled=True 24 | 25 | when: ansible_distribution == 'CentOS' 26 | become: true 27 | become_user: root 28 | 29 | 30 | In the example above the 3 tasks will be executed only when the block's when condition is met and enables 31 | privilege escalation for all the enclosed tasks. 32 | 33 | 34 | .. _block_error_handling: 35 | 36 | Error Handling 37 | `````````````` 38 | 39 | About Blocks 40 | Blocks also introduce the ability to handle errors in a way similar to exceptions in most programming languages. 41 | 42 | .. code-block:: YAML 43 | :emphasize-lines: 2,6,10 44 | :caption: Block error handling example 45 | 46 | tasks: 47 | - block: 48 | - debug: msg='i execute normally' 49 | - command: /bin/false 50 | - debug: msg='i never execute, cause ERROR!' 51 | rescue: 52 | - debug: msg='I caught an error' 53 | - command: /bin/false 54 | - debug: msg='I also never execute :-(' 55 | always: 56 | - debug: msg="this always executes" 57 | 58 | 59 | The tasks in the ``block`` would execute normally, if there is any error the ``rescue`` section would get executed 60 | with whatever you need to do to recover from the previous error. The ``always`` section runs no matter what previous 61 | error did or did not occur in the ``block`` and ``rescue`` sections. 62 | 63 | 64 | 65 | .. seealso:: 66 | 67 | :doc:`playbooks` 68 | An introduction to playbooks 69 | :doc:`playbooks_roles` 70 | Playbook organization by roles 71 | `User Mailing List `_ 72 | Have a question? Stop by the google group! 73 | `irc.freenode.net `_ 74 | #ansible IRC chat channel 75 | 76 | 77 | 78 | 79 | -------------------------------------------------------------------------------- /ansible-v2/rst/playbooks_checkmode.rst: -------------------------------------------------------------------------------- 1 | Check Mode ("Dry Run") 2 | ====================== 3 | 4 | .. versionadded:: 1.1 5 | 6 | .. contents:: Topics 7 | 8 | When ansible-playbook is executed with ``--check`` it will not make any changes on remote systems. Instead, any module 9 | instrumented to support 'check mode' (which contains most of the primary core modules, but it is not required that all modules do 10 | this) will report what changes they would have made rather than making them. Other modules that do not support check mode will also take no action, but just will not report what changes they might have made. 11 | 12 | Check mode is just a simulation, and if you have steps that use conditionals that depend on the results of prior commands, 13 | it may be less useful for you. However it is great for one-node-at-time basic configuration management use cases. 14 | 15 | Example:: 16 | 17 | ansible-playbook foo.yml --check 18 | 19 | .. _forcing_to_run_in_check_mode: 20 | 21 | Running a task in check mode 22 | ```````````````````````````` 23 | 24 | .. versionadded:: 1.3 25 | 26 | Sometimes you may want to have a task to be executed even in check 27 | mode. To achieve this, use the `always_run` clause on the task. Its 28 | value is a Jinja2 expression, just like the `when` clause. In simple 29 | cases a boolean YAML value would be sufficient as a value. 30 | 31 | Example:: 32 | 33 | tasks: 34 | 35 | - name: this task is run even in check mode 36 | command: /something/to/run --even-in-check-mode 37 | always_run: yes 38 | 39 | As a reminder, a task with a `when` clause evaluated to false, will 40 | still be skipped even if it has a `always_run` clause evaluated to 41 | true. 42 | 43 | .. _diff_mode: 44 | 45 | Showing Differences with ``--diff`` 46 | ``````````````````````````````````` 47 | 48 | .. versionadded:: 1.1 49 | 50 | The ``--diff`` option to ansible-playbook works great with ``--check`` (detailed above) but can also be used by itself. When this flag is supplied, if any templated files on the remote system are changed, and the ansible-playbook CLI will report back 51 | the textual changes made to the file (or, if used with ``--check``, the changes that would have been made). Since the diff 52 | feature produces a large amount of output, it is best used when checking a single host at a time, like so:: 53 | 54 | ansible-playbook foo.yml --check --diff --limit foo.example.com 55 | 56 | -------------------------------------------------------------------------------- /ansible-v2/rst/playbooks_environment.rst: -------------------------------------------------------------------------------- 1 | Setting the Environment (and Working With Proxies) 2 | ================================================== 3 | 4 | .. versionadded:: 1.1 5 | 6 | It is quite possible that you may need to get package updates through a proxy, or even get some package 7 | updates through a proxy and access other packages not through a proxy. Or maybe a script you might wish to 8 | call may also need certain environment variables set to run properly. 9 | 10 | Ansible makes it easy for you to configure your environment by using the 'environment' keyword. Here is an example:: 11 | 12 | - hosts: all 13 | remote_user: root 14 | 15 | tasks: 16 | 17 | - apt: name=cobbler state=installed 18 | environment: 19 | http_proxy: http://proxy.example.com:8080 20 | 21 | The environment can also be stored in a variable, and accessed like so:: 22 | 23 | - hosts: all 24 | remote_user: root 25 | 26 | # here we make a variable named "proxy_env" that is a dictionary 27 | vars: 28 | proxy_env: 29 | http_proxy: http://proxy.example.com:8080 30 | 31 | tasks: 32 | 33 | - apt: name=cobbler state=installed 34 | environment: proxy_env 35 | 36 | While just proxy settings were shown above, any number of settings can be supplied. The most logical place 37 | to define an environment hash might be a group_vars file, like so:: 38 | 39 | --- 40 | # file: group_vars/boston 41 | 42 | ntp_server: ntp.bos.example.com 43 | backup: bak.bos.example.com 44 | proxy_env: 45 | http_proxy: http://proxy.bos.example.com:8080 46 | https_proxy: http://proxy.bos.example.com:8080 47 | 48 | .. seealso:: 49 | 50 | :doc:`playbooks` 51 | An introduction to playbooks 52 | `User Mailing List `_ 53 | Have a question? Stop by the google group! 54 | `irc.freenode.net `_ 55 | #ansible IRC chat channel 56 | 57 | 58 | -------------------------------------------------------------------------------- /ansible-v2/rst/playbooks_error_handling.rst: -------------------------------------------------------------------------------- 1 | Error Handling In Playbooks 2 | =========================== 3 | 4 | .. contents:: Topics 5 | 6 | Ansible normally has defaults that make sure to check the return codes of commands and modules and 7 | it fails fast -- forcing an error to be dealt with unless you decide otherwise. 8 | 9 | Sometimes a command that returns 0 isn't an error. Sometimes a command might not always 10 | need to report that it 'changed' the remote system. This section describes how to change 11 | the default behavior of Ansible for certain tasks so output and error handling behavior is 12 | as desired. 13 | 14 | .. _ignoring_failed_commands: 15 | 16 | Ignoring Failed Commands 17 | ```````````````````````` 18 | 19 | .. versionadded:: 0.6 20 | 21 | Generally playbooks will stop executing any more steps on a host that 22 | has a failure. Sometimes, though, you want to continue on. To do so, 23 | write a task that looks like this:: 24 | 25 | - name: this will not be counted as a failure 26 | command: /bin/false 27 | ignore_errors: yes 28 | 29 | Note that the above system only governs the failure of the particular task, so if you have an undefined 30 | variable used, it will still raise an error that users will need to address. 31 | 32 | .. _handlers_and_failure: 33 | 34 | Handlers and Failure 35 | ```````````````````` 36 | 37 | .. versionadded:: 1.9.1 38 | 39 | When a task fails on a host, handlers which were previously notified 40 | will *not* be run on that host. This can lead to cases where an unrelated failure 41 | can leave a host in an unexpected state. For example, a task could update 42 | a configuration file and notify a handler to restart some service. If a 43 | task later on in the same play fails, the service will not be restarted despite 44 | the configuration change. 45 | 46 | You can change this behavior with the ``--force-handlers`` command-line option, 47 | or by including ``force_handlers: True`` in a play, or ``force_handlers = True`` 48 | in ansible.cfg. When handlers are forced, they will run when notified even 49 | if a task fails on that host. (Note that certain errors could still prevent 50 | the handler from running, such as a host becoming unreachable.) 51 | 52 | .. _controlling_what_defines_failure: 53 | 54 | Controlling What Defines Failure 55 | ```````````````````````````````` 56 | 57 | .. versionadded:: 1.4 58 | 59 | Suppose the error code of a command is meaningless and to tell if there 60 | is a failure what really matters is the output of the command, for instance 61 | if the string "FAILED" is in the output. 62 | 63 | Ansible in 1.4 and later provides a way to specify this behavior as follows:: 64 | 65 | - name: this command prints FAILED when it fails 66 | command: /usr/bin/example-command -x -y -z 67 | register: command_result 68 | failed_when: "'FAILED' in command_result.stderr" 69 | 70 | In previous version of Ansible, this can be still be accomplished as follows:: 71 | 72 | - name: this command prints FAILED when it fails 73 | command: /usr/bin/example-command -x -y -z 74 | register: command_result 75 | ignore_errors: True 76 | 77 | - name: fail the play if the previous command did not succeed 78 | fail: msg="the command failed" 79 | when: "'FAILED' in command_result.stderr" 80 | 81 | .. _override_the_changed_result: 82 | 83 | Overriding The Changed Result 84 | ````````````````````````````` 85 | 86 | .. versionadded:: 1.3 87 | 88 | When a shell/command or other module runs it will typically report 89 | "changed" status based on whether it thinks it affected machine state. 90 | 91 | Sometimes you will know, based on the return code 92 | or output that it did not make any changes, and wish to override 93 | the "changed" result such that it does not appear in report output or 94 | does not cause handlers to fire:: 95 | 96 | tasks: 97 | 98 | - shell: /usr/bin/billybass --mode="take me to the river" 99 | register: bass_result 100 | changed_when: "bass_result.rc != 2" 101 | 102 | # this will never report 'changed' status 103 | - shell: wall 'beep' 104 | changed_when: False 105 | 106 | 107 | .. seealso:: 108 | 109 | :doc:`playbooks` 110 | An introduction to playbooks 111 | :doc:`playbooks_best_practices` 112 | Best practices in playbooks 113 | :doc:`playbooks_conditionals` 114 | Conditional statements in playbooks 115 | :doc:`playbooks_variables` 116 | All about variables 117 | `User Mailing List `_ 118 | Have a question? Stop by the google group! 119 | `irc.freenode.net `_ 120 | #ansible IRC chat channel 121 | 122 | 123 | -------------------------------------------------------------------------------- /ansible-v2/rst/playbooks_prompts.rst: -------------------------------------------------------------------------------- 1 | Prompts 2 | ======= 3 | 4 | When running a playbook, you may wish to prompt the user for certain input, and can 5 | do so with the 'vars_prompt' section. 6 | 7 | A common use for this might be for asking for sensitive data that you do not want to record. 8 | 9 | This has uses beyond security, for instance, you may use the same playbook for all 10 | software releases and would prompt for a particular release version 11 | in a push-script. 12 | 13 | Here is a most basic example:: 14 | 15 | --- 16 | - hosts: all 17 | remote_user: root 18 | 19 | vars: 20 | from: "camelot" 21 | 22 | vars_prompt: 23 | - name: "name" 24 | prompt: "what is your name?" 25 | - name: "quest" 26 | prompt: "what is your quest?" 27 | - name: "favcolor" 28 | prompt: "what is your favorite color?" 29 | 30 | If you have a variable that changes infrequently, it might make sense to 31 | provide a default value that can be overridden. This can be accomplished using 32 | the default argument:: 33 | 34 | vars_prompt: 35 | 36 | - name: "release_version" 37 | prompt: "Product release version" 38 | default: "1.0" 39 | 40 | An alternative form of vars_prompt allows for hiding input from the user, and may later support 41 | some other options, but otherwise works equivalently:: 42 | 43 | vars_prompt: 44 | 45 | - name: "some_password" 46 | prompt: "Enter password" 47 | private: yes 48 | 49 | - name: "release_version" 50 | prompt: "Product release version" 51 | private: no 52 | 53 | If `Passlib `_ is installed, vars_prompt can also crypt the 54 | entered value so you can use it, for instance, with the user module to define a password:: 55 | 56 | vars_prompt: 57 | 58 | - name: "my_password2" 59 | prompt: "Enter password2" 60 | private: yes 61 | encrypt: "sha512_crypt" 62 | confirm: yes 63 | salt_size: 7 64 | 65 | You can use any crypt scheme supported by 'Passlib': 66 | 67 | - *des_crypt* - DES Crypt 68 | - *bsdi_crypt* - BSDi Crypt 69 | - *bigcrypt* - BigCrypt 70 | - *crypt16* - Crypt16 71 | - *md5_crypt* - MD5 Crypt 72 | - *bcrypt* - BCrypt 73 | - *sha1_crypt* - SHA-1 Crypt 74 | - *sun_md5_crypt* - Sun MD5 Crypt 75 | - *sha256_crypt* - SHA-256 Crypt 76 | - *sha512_crypt* - SHA-512 Crypt 77 | - *apr_md5_crypt* - Apache’s MD5-Crypt variant 78 | - *phpass* - PHPass’ Portable Hash 79 | - *pbkdf2_digest* - Generic PBKDF2 Hashes 80 | - *cta_pbkdf2_sha1* - Cryptacular’s PBKDF2 hash 81 | - *dlitz_pbkdf2_sha1* - Dwayne Litzenberger’s PBKDF2 hash 82 | - *scram* - SCRAM Hash 83 | - *bsd_nthash* - FreeBSD’s MCF-compatible nthash encoding 84 | 85 | However, the only parameters accepted are 'salt' or 'salt_size'. You can use your own salt using 86 | 'salt', or have one generated automatically using 'salt_size'. If nothing is specified, a salt 87 | of size 8 will be generated. 88 | 89 | .. seealso:: 90 | 91 | :doc:`playbooks` 92 | An introduction to playbooks 93 | :doc:`playbooks_conditionals` 94 | Conditional statements in playbooks 95 | :doc:`playbooks_variables` 96 | All about variables 97 | `User Mailing List `_ 98 | Have a question? Stop by the google group! 99 | `irc.freenode.net `_ 100 | #ansible IRC chat channel 101 | 102 | 103 | 104 | -------------------------------------------------------------------------------- /ansible-v2/rst/playbooks_special_topics.rst: -------------------------------------------------------------------------------- 1 | Playbooks: Special Topics 2 | ````````````````````````` 3 | Here are some playbook features that not everyone may need to learn, but can be quite useful for particular applications. 4 | Browsing these topics is recommended as you may find some useful tips here, but feel free to learn the basics of Ansible first 5 | and adopt these only if they seem relevant or useful to your environment. 6 | 7 | .. toctree:: 8 | :maxdepth: 1 9 | 10 | become 11 | playbooks_acceleration 12 | playbooks_async 13 | playbooks_checkmode 14 | playbooks_delegation 15 | playbooks_environment 16 | playbooks_error_handling 17 | playbooks_lookups 18 | playbooks_prompts 19 | playbooks_tags 20 | playbooks_vault 21 | playbooks_startnstep 22 | -------------------------------------------------------------------------------- /ansible-v2/rst/playbooks_startnstep.rst: -------------------------------------------------------------------------------- 1 | Start and Step 2 | ====================== 3 | 4 | This shows a few alternative ways to run playbooks. These modes are very useful for testing new plays or debugging. 5 | 6 | 7 | .. _start_at_task: 8 | 9 | Start-at-task 10 | ````````````` 11 | If you want to start executing your playbook at a particular task, you can do so with the ``--start-at-task`` option:: 12 | 13 | ansible-playbook playbook.yml --start-at-task="install packages" 14 | 15 | The above will start executing your playbook at a task named "install packages". 16 | 17 | 18 | .. _step: 19 | 20 | Step 21 | ```` 22 | 23 | Playbooks can also be executed interactively with ``--step``:: 24 | 25 | ansible-playbook playbook.yml --step 26 | 27 | This will cause ansible to stop on each task, and ask if it should execute that task. 28 | Say you had a task called "configure ssh", the playbook run will stop and ask:: 29 | 30 | Perform task: configure ssh (y/n/c): 31 | 32 | Answering "y" will execute the task, answering "n" will skip the task, and answering "c" 33 | will continue executing all the remaining tasks without asking. 34 | 35 | -------------------------------------------------------------------------------- /ansible-v2/rst/playbooks_strategies.rst: -------------------------------------------------------------------------------- 1 | Strategies 2 | =========== 3 | 4 | In 2.0 we added a new way to control play execution, ``strategy``, by default plays will 5 | still run as they used to, with what we call the ``linear`` strategy. All hosts will run each 6 | task before any host starts the next task, using the number of forks (default 5) to parallelize. 7 | 8 | The ``serial`` directive can 'batch' this behaviour to a subset of the hosts, which then run to 9 | completion of the play before the next 'batch' starts. 10 | 11 | A second ``strategy`` ships with ansible ``free``, which allows each host to run until the end of 12 | the play as fast as it can.:: 13 | 14 | - hosts: all 15 | strategy: free 16 | tasks: 17 | ... 18 | 19 | 20 | .. _strategy_plugins: 21 | 22 | Strategy Plugins 23 | ````````````````` 24 | 25 | The strategies are implelented via a new type of plugin, this means that in the future new 26 | execution types can be added, either locally by users or to Ansible itself by 27 | a code contribution. 28 | 29 | .. seealso:: 30 | 31 | :doc:`playbooks` 32 | An introduction to playbooks 33 | :doc:`playbooks_roles` 34 | Playbook organization by roles 35 | `User Mailing List `_ 36 | Have a question? Stop by the google group! 37 | `irc.freenode.net `_ 38 | #ansible IRC chat channel 39 | 40 | -------------------------------------------------------------------------------- /ansible-v2/rst/playbooks_tags.rst: -------------------------------------------------------------------------------- 1 | Tags 2 | ==== 3 | 4 | If you have a large playbook it may become useful to be able to run a 5 | specific part of the configuration without running the whole playbook. 6 | 7 | Both plays and tasks support a "tags:" attribute for this reason. 8 | 9 | Example:: 10 | 11 | tasks: 12 | 13 | - yum: name={{ item }} state=installed 14 | with_items: 15 | - httpd 16 | - memcached 17 | tags: 18 | - packages 19 | 20 | - template: src=templates/src.j2 dest=/etc/foo.conf 21 | tags: 22 | - configuration 23 | 24 | If you wanted to just run the "configuration" and "packages" part of a very long playbook, you could do this:: 25 | 26 | ansible-playbook example.yml --tags "configuration,packages" 27 | 28 | On the other hand, if you want to run a playbook *without* certain tasks, you could do this:: 29 | 30 | ansible-playbook example.yml --skip-tags "notification" 31 | 32 | You may also apply tags to roles:: 33 | 34 | roles: 35 | - { role: webserver, port: 5000, tags: [ 'web', 'foo' ] } 36 | 37 | And you may also tag basic include statements:: 38 | 39 | - include: foo.yml tags=web,foo 40 | 41 | Both of these apply the specified tags to every task inside the included 42 | file or role, so that these tasks can be selectively run when the playbook 43 | is invoked with the corresponding tags. 44 | 45 | Special Tags 46 | ```````````` 47 | 48 | There is a special 'always' tag that will always run a task, unless specifically skipped (--skip-tags always) 49 | 50 | Example:: 51 | 52 | tasks: 53 | 54 | - debug: msg="Always runs" 55 | tags: 56 | - always 57 | 58 | - debug: msg="runs when you use tag1" 59 | tags: 60 | - tag1 61 | 62 | There are another 3 special keywords for tags, 'tagged', 'untagged' and 'all', which run only tagged, only untagged 63 | and all tasks respectively. 64 | 65 | By default ansible runs as if '--tags all' had been specified. 66 | 67 | 68 | .. seealso:: 69 | 70 | :doc:`playbooks` 71 | An introduction to playbooks 72 | :doc:`playbooks_roles` 73 | Playbook organization by roles 74 | `User Mailing List `_ 75 | Have a question? Stop by the google group! 76 | `irc.freenode.net `_ 77 | #ansible IRC chat channel 78 | 79 | 80 | 81 | 82 | -------------------------------------------------------------------------------- /ansible-v2/rst/quickstart.rst: -------------------------------------------------------------------------------- 1 | Quickstart Video 2 | ```````````````` 3 | 4 | We've recorded a short video that shows how to get started with Ansible that you may like to use alongside the documentation. 5 | 6 | The `quickstart video `_ is about 30 minutes long and will show you some of the basics about your 7 | first steps with Ansible. 8 | 9 | Enjoy, and be sure to visit the rest of the documentation to learn more. 10 | -------------------------------------------------------------------------------- /ansible-v2/rst/tower.rst: -------------------------------------------------------------------------------- 1 | Ansible Tower 2 | ````````````` 3 | 4 | `Ansible Tower `_ (formerly 'AWX') is a web-based solution that makes Ansible even more easy to use for IT teams of all kinds. It's designed to be the hub for all of your automation tasks. 5 | 6 | Tower allows you to control access to who can access what, even allowing sharing of SSH credentials without someone being able to transfer those credentials. Inventory can be graphically managed or synced with a wide variety of cloud sources. It logs all of your jobs, integrates well with LDAP, and has an amazing browsable REST API. Command line tools are available for easy integration with Jenkins as well. Provisioning callbacks provide great support for autoscaling topologies. 7 | 8 | Find out more about Tower features and how to download it on the `Ansible Tower webpage `_. Tower 9 | is free for usage for up to 10 nodes, and comes bundled with amazing support from Ansible, Inc. As you would expect, Tower is 10 | installed using Ansible playbooks! 11 | -------------------------------------------------------------------------------- /ansible-v2/variables.dot: -------------------------------------------------------------------------------- 1 | digraph G { 2 | 3 | subgraph cluster_0 { 4 | "command line variables" -> "--extra-args" 5 | } 6 | 7 | subgraph cluster_1 { 8 | "role variables" -> "roles/rolename/vars.yml" -> "parameters passed to role" -> "parameters from dependent roles" 9 | } 10 | 11 | subgraph cluster_2 { 12 | "top-level playbook variables" -> "vars: directives" -> "vars_files: directives"; 13 | } 14 | 15 | subgraph cluster_3 { 16 | "inventory variables" -> "group_vars/all" -> "group_vars/grandparent1" -> "group_vars/parent1" -> "host_vars/myhostname"; 17 | "group_vars/all" -> "group_vars/grandparent2"; 18 | "group_vars/grandparent1" -> "group_vars/parent2" 19 | "group_vars/grandparent2" -> "host_vars/myhostname"; 20 | "group_vars/parent2" -> "host_vars/myhostname" 21 | } 22 | 23 | subgraph cluster_4 { 24 | "facts" -> "gathered host facts" 25 | "facts" -> "host facts from /etc/ansible/facts.d" 26 | "facts" -> "set_fact" 27 | "facts" -> "include_vars" 28 | } 29 | 30 | subgraph cluster_5 { 31 | "role defaults" -> "roles/rolename/defaults.yml" 32 | } 33 | 34 | "command line variables" -> "role variables" -> "top-level playbook variables" -> "inventory variables" -> "role defaults" -> "facts" 35 | 36 | 37 | 38 | } 39 | -------------------------------------------------------------------------------- /ansible/.gitignore: -------------------------------------------------------------------------------- 1 | _build 2 | -------------------------------------------------------------------------------- /ansible/a.j2: -------------------------------------------------------------------------------- 1 | {%for host in groups['web01']%} 2 | {{host}} 3 | {{hostvars[inventory_hostname]['ansible_eth0']['ipv4']['address']}} 4 | {%endfor%} 5 | -------------------------------------------------------------------------------- /ansible/a.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: localhost 3 | name: install httpd 4 | gather_facts: true 5 | 6 | tasks: 7 | - name: install httpd 8 | yum: name={{ item }} state=installed 9 | with_items: 10 | - httpd 11 | - httpd-devel 12 | when: ansible_os_family == "Redhat" 13 | notify: 14 | - restart apache2 15 | 16 | - name: install httpd 17 | apt: name={{ item }} state=installed 18 | with_items: 19 | - apache2 20 | - apache2-dev 21 | when: ansible_os_family == "Debian" 22 | notify: 23 | - restart apache2 24 | 25 | handlers: 26 | - name: restart httpd 27 | service: name=httpd state=restarted 28 | when: ansible_os_family == "Debian" 29 | 30 | - name: restart apache2 31 | service: name=apache2 state=restarted 32 | when: ansible_os_family == "Debian" 33 | -------------------------------------------------------------------------------- /ansible/async.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: localhost 3 | remote_user: root 4 | tasks: 5 | - name: run a command that costs a very long time 6 | #command: /bin/sleep 7 7 | apt: name=docker.io state=installed 8 | async: 4000 9 | poll: 0 10 | register: result 11 | 12 | - name: result 13 | debug: msg={{result.ansible_job_id}} 14 | 15 | - name: jobid 16 | async_status: jid={{result.ansible_job_id}} 17 | register: jobresult 18 | until: jobresult.finished 19 | retries: 30 20 | delay: 2 21 | 22 | - name: async mesg 23 | debug: msg={{jobresult.finished}} 24 | -------------------------------------------------------------------------------- /ansible/dynamic_inventory.rst: -------------------------------------------------------------------------------- 1 | 2 | ==================== 3 | Dynamic Inventory 4 | ==================== 5 | 6 | .. contents:: Topics 7 | 8 | Ansible provides a basic text-based system as described in :ref:`inventory_intro`. And it easily supports all of these options via an external invnetory system. 9 | 10 | Example. AWS EC2 External Inventory Script 11 | ============================================ 12 | 13 | You can use this `script`_ in one of two ways. The easiest is to use Ansible's ``-i`` command line option and specify the path to the script after marking it executable:: 14 | 15 | ansible -i ec2.py -u ubuntu us-east-1d -m ping 16 | 17 | The second option is to copy the script to ``/etc/ansible/hosts`` and ``chmod +x`` it. You will also need to copy the `ec2.ini`_ file to ``/etc/ansible/ec2.ini`` 18 | 19 | .. _`script`: https://raw.github.com/ansible/ansible/devel/plugins/inventory/ec2.py 20 | .. _`ec2.ini`: https://raw.githubusercontent.com/ansible/ansible/devel/plugins/inventory/ec2.ini 21 | 22 | Using Multiple Inventory Sources 23 | ================================== 24 | 25 | If the location given to ``-i`` in Ansible is a directory, Ansible can use multiple inventory sources at the same time. 26 | 27 | Static Groups of Dynamic Groups 28 | ================================= 29 | 30 | When defining a static group of dynamic child groups, define the dynamic groups as empty in the static inventory file:: 31 | 32 | [foo] 33 | 34 | [bar] 35 | 36 | [cluster:children] 37 | foo 38 | bar 39 | 40 | 41 | ======================================== 42 | Developing Dynamic Inventory Sources 43 | ======================================== 44 | 45 | You just need to create a script or program that can return JSON in the right format when fed the proper arguments. 46 | 47 | Script Conventions 48 | ==================== 49 | 50 | When the external node script is called with the single argument ``--list`` , the script must return a JSON dictionary of all the groups to be managed. 51 | 52 | :: 53 | 54 | { 55 | "databases":{ 56 | "hosts": ["host01", "host02"], 57 | "vars": { 58 | "foo": bar, 59 | "flag": true 60 | } 61 | }, 62 | "webservers":["web01", "web02"], 63 | "apps":{ 64 | "hosts":["backend01", "backend02"], 65 | "vars":{ 66 | "local": false 67 | }, 68 | "children":["databases","webservers"] 69 | } 70 | } 71 | 72 | When called with the arguments ``--host `` , the script must return either an empty JSON dictionary, or a dictionary of variables to make available to templates and playbooks. 73 | -------------------------------------------------------------------------------- /ansible/filter.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: localhost 3 | remote_user: root 4 | gather_facts: false 5 | vars: 6 | l: ['a','b','c'] 7 | n: [37,91,3,52,7,9,7,23,9] 8 | m: [91,52,7,23,9,2,4] 9 | digit: 2 10 | secret: 'banana' 11 | addr: '9.9.0.1/24' 12 | word: 'A1122ED' 13 | content: 'multimedia' 14 | tasks: 15 | - name: random filter in list l 16 | debug: msg="{{l|random}}" 17 | - name: random filter from 30 to 80 with steps of 3 18 | debug: msg="{{80|random(start=30,step=3)}}" 19 | - name: set theory filter 20 | debug: msg="{{n|unique}}" 21 | - name: list filter 22 | debug: msg="{{n|max}}" 23 | - name: set theory filter 24 | debug: msg="{{n|difference(m)}}" 25 | - name: shuffle a list 26 | debug: msg="{{l|shuffle}}" 27 | - name: shuffle a list again 28 | debug: msg="{{l|shuffle}}" 29 | - name: if it is a number 30 | debug: msg="{{digit}} is a number {{digit|isnan}}" 31 | - name: logarithm 32 | debug: msg="{{digit|log}}" 33 | - name: power 34 | debug: msg="{{digit|pow(3)}}" 35 | - name: root 36 | debug: msg="{{digit|root(2)}}" 37 | - name: ipaddr 38 | debug: msg="{{addr|ipaddr}}" 39 | - name: ipaddr 40 | debug: msg="{{addr|ipaddr('address')}}" 41 | - name: hash sha1 string 42 | debug: msg="{{secret|hash('sha1')}}" 43 | - name: hash md5 string 44 | debug: msg="{{secret|hash('md5')}}" 45 | - name: hash checksum string 46 | debug: msg="{{secret|checksum}}" 47 | - name: To use one value on true and another on false 48 | debug: msg="{{ (digit==2)|ternary('two','not two')}}" 49 | - name: a string seprated with - 50 | debug: msg="{{l|join('-')}}" 51 | - name: basename 52 | debug: msg="{{'/usr/bin/python'|basename}}" 53 | - name: dirname 54 | debug: msg="{{'/usr/bin/python'|dirname }}" 55 | - name: user expand 56 | debug: msg="{{'~/Download'|expanduser}}" 57 | - name: encode base64 58 | debug: msg="{{'HelloWorld!'|b64encode}}" 59 | - name: decode base64 60 | debug: msg="{{'SGVsbG9Xb3JsZCE='|b64decode}}" 61 | - name: generate uuid 62 | debug: msg="{{'hostname'|to_uuid}}" 63 | - name: cast values 64 | debug: msg="{{ 'yes'|bool}}" 65 | - name: match filter 66 | debug: msg="{{ 'A1122ED'|match('^[A-Z]\d{4}[A-Z]{2}')}}" 67 | when: word|match('^[A-Z]\d{4}[A-Z]{2}') 68 | - name: search filter 69 | debug: msg="{{ 'A1122ED'|match('^[A-Z]\d{4}[A-Z]{2}')}}" 70 | when: word|match('^[A-Z]') 71 | - name: regex replace -- result should be media 72 | debug: msg="{{content|regex_replace('^m.*?i(.*)$','\\1')}}" 73 | - name: regex replace -- result should be multiple 74 | debug: msg="{{content|regex_replace('^(m.*?i)(.*)$','\\1ple')}}" 75 | -------------------------------------------------------------------------------- /ansible/hosts: -------------------------------------------------------------------------------- 1 | [keystone] 2 | 127.0.0.1 3 | -------------------------------------------------------------------------------- /ansible/index.rst: -------------------------------------------------------------------------------- 1 | .. PAnsible documentation master file, created by 2 | sphinx-quickstart on Mon May 25 19:36:04 2015. 3 | You can adapt this file completely to your liking, but it should at least 4 | contain the root `toctree` directive. 5 | Welcome to PAnsible's documentation! 6 | ==================================== 7 | 8 | Ansible 9 | ========== 10 | 11 | .. toctree:: 12 | :maxdepth: 3 13 | 14 | playbooks_intro 15 | install 16 | playbooks_roles 17 | playbooks_best_practise 18 | inventory_intro 19 | dynamic_inventory 20 | playbooks_variables 21 | playbooks_conditionals 22 | playbooks_loops 23 | playbooks_special_topics 24 | tips 25 | CDnRU 26 | test_strategies 27 | playbooks_filters 28 | ansible-quickref 29 | ec2 30 | facts 31 | docker 32 | ansible_cfg 33 | yaml_syntax 34 | ansible-module-doc 35 | 36 | .. Indices and tables 37 | ================== 38 | 39 | * :ref:`genindex` 40 | * :ref:`modindex` 41 | * :ref:`search` 42 | 43 | -------------------------------------------------------------------------------- /ansible/install.rst: -------------------------------------------------------------------------------- 1 | .. _install: 2 | 3 | ====================== 4 | Ansible quick start 5 | ====================== 6 | 7 | install 8 | ========== 9 | 10 | :: 11 | 12 | yum install ansible # RHEL/CentOS/Fedora 13 | 14 | apt-get install ansible # Debian/Ubuntu 15 | 16 | emerge -avt ansible # Gentoo/Funtoo 17 | 18 | pip install ansible # will also install paramiko PyYAML jinja2 19 | 20 | Inventory 21 | =========== 22 | 23 | Inventory文件用来定义需要管理的主机,默认位置在 ``/etc/ansible/hosts`` , 使用 ``-i`` 选项指定非默认位置。 24 | 25 | 被管理的机器通过IP或域名指定,未分组的机器保留在hosts文件的顶部,分组使用 ``[]`` 指定,同时支持嵌套:: 26 | 27 | [miss] 28 | dn0[1:3] 29 | 30 | [lvs:children] 31 | web 32 | db 33 | 34 | 在配置中可以为主机设定端口、默认用户、私钥等参数:: 35 | 36 | [db] 37 | orcl01.example.com ansible_ssh_user=oracle 38 | 39 | Try it out 40 | ============= 41 | 42 | ``ansible -i hosts all -m ping -u root -f 3`` 43 | 44 | :: 45 | 46 | *. -i 指定了inventory文件,当前目录下的hosts文件,不指定则使用默认位置。 47 | 48 | *. all 针对hosts定义的所有主机执行,可以指定组名或者模式 49 | 50 | *. -m 指定所使用的模块,不指定则使用默认模块 command 51 | 52 | *. -u 指定远端机器的用户 53 | 54 | *. -a 指定传给模块的参数,Ansible以键值对(Key-Value)的方式接受参数,并以JSON格式返回结果。 55 | 56 | *. -f 同一时间只在指定数量的机器上执行 57 | 58 | Ad-Hoc 简单任务 59 | ================== 60 | 61 | 执行命令 62 | ------------ 63 | 64 | ``ansible -m raw -a 'yum install -y python-simplejson'`` 65 | 66 | 可以在对方机器上python版本为2.4或者其他没有装python的设备上使用raw模块执行命令。类似于直接在远端执行 shell命令。 67 | 68 | ``ansible-doc raw`` 69 | 70 | 传输文件 71 | ----------- 72 | 73 | ``ansible all -m copy -a "src=/etc/hosts dest=/tmp/hosts"`` 74 | 75 | 76 | 模块 77 | ======= 78 | 79 | setup 80 | ---------- 81 | 82 | ``ansible -i hosts -m setup -a 'filter="ansible_processor_count"'`` 83 | 可以获取主机的系统信息,并将其保存在内置变量中方便其他模块进行引用。 84 | 85 | user 86 | ----- 87 | 88 | ``ansible -i hosts -m user -a 'name="test" shell="/bin/false"'`` 89 | 90 | file 91 | ------ 92 | 93 | ``ansible -i hosts -m file -a 'path=/etc/fstab'`` 94 | 95 | ``ansible -i hosts -m file -a 'path=/tmp/ksops state=directory mode=0755 owner=nobody'`` 96 | 97 | ``ansible -i hosts -m file -a 'path=/tmp/file state=absent'`` 98 | 99 | copy 100 | ------ 101 | 102 | ``ansible -i hosts -m copy -a 'src=vimrc dest=/root/.vimrc mode=644 owner=root'`` 103 | 104 | command 105 | --------- 106 | 107 | ``ansible -i hosts -m command -a 'rm -rfv /tmp/test removes=/tmp/test'`` 108 | 109 | ``ansible -i hosts -m command -a 'touch /tmp/test creates=/tmp/test'`` 110 | 111 | 使用command模块无法通过返回值判断命令是否执行成功,如果定义了creates属性,当文件存在时,不执行创建文件的命令,如果定义了removes属性,当文件不存在时,不执行删除文件的命令。 112 | 113 | shell 114 | ------- 115 | 116 | ``ansible -i hosts -m shell -a '/home/monitor.sh > /home/applog/test.log creates=/home/applog/test.log'`` 117 | 118 | shell模块提供了对重定向、管道、后台任务等特性的支持。 119 | 120 | Playbook 复杂任务 121 | =================== 122 | 123 | 创建新用户:: 124 | 125 | cat user.yml 126 | 127 | --- 128 | - name:create user #Playbook名称 129 | hosts:db #起作用的主机组 130 | user:root #以指定用户执行 131 | gather_facts:false #收集远端机器的相关信息 132 | 133 | vars: #定义变量 user 134 | - user: "toy" 135 | tasks: #指定要执行的任务 136 | - name:create {{user}} on miss 137 | user:name="{{user}}" 138 | -------------------------------------------------------------------------------- /ansible/inventory_intro.rst: -------------------------------------------------------------------------------- 1 | .. _inventory_intro: 2 | 3 | ============== 4 | Inventory 5 | ============== 6 | 7 | .. contents:: Topics 8 | 9 | Multiple inventory files can be used at the same time and also pull inventory from ``dynamic`` or ``cloud sources`` as described in :ref:`dynamic_inventory` . 10 | 11 | 12 | Hosts and Groups 13 | ================== 14 | 15 | ``/etc/ansible/hosts`` is an INI-like format hostfile.To make things explicit, you could describe hosts like this:: 16 | 17 | [webserver] 18 | ws01.bj.example.com 19 | ws02.bj.example.com:3322 20 | [dbserver] 21 | ws01.bj.example.com 22 | 23 | [ops] 24 | jumper ansible_ssh_port=6222 ansible_ssh_host=192.168.1.80 25 | 26 | Patterns like ``[1-5] [a-z]`` can be included to define large range of hosts. 27 | 28 | :: 29 | 30 | [lbserver] 31 | lvs[a-c][00:21].example.com 32 | 33 | Host Variables 34 | ================ 35 | 36 | It is easy to assign variables to hosts that will be used later in playbooks:: 37 | 38 | [vast] 39 | server01 app_port=3876 Maxrequests=300 40 | server03 app_port=3876 Maxrequests=500 41 | 42 | Group Variables 43 | ================= 44 | 45 | However, variables can also be applied to an entire group at once:: 46 | 47 | [vast] 48 | server01 49 | server02 50 | server03 51 | 52 | [vast:vars] 53 | proxy=proxy.local.com 54 | nextJump=next.local.com 55 | 56 | Groups of Groups, and Group Variables 57 | ========================================= 58 | 59 | It is possible to make groups of groups and assign variables to groups. These variables can be used by ``ansible-playbook`` , but not ``ansible`` . 60 | 61 | :: 62 | 63 | [region01] 64 | server01 65 | server02 66 | 67 | [region02] 68 | serverA 69 | serverB 70 | 71 | [region03] 72 | serverX 73 | serverY 74 | 75 | [large:children] 76 | region01 77 | region02 78 | 79 | [large:vars] 80 | hostDistance=2 81 | allowFailure=False 82 | 83 | Splitting Out Host and Group Specific Data 84 | ============================================ 85 | 86 | The preferred practice in Ansible is actually not to store variables in the main inventory file. Host and group variables can be stored in individual files relative to the inventory file. 87 | 88 | Assuming the inventory file path is:: 89 | 90 | /etc/ansible/hosts 91 | 92 | You can assign varialbes in ``/etc/ansible/group_vars/region02`` ``/etc/ansible/host_vars/server01`` 93 | 94 | List of Behavioral Inventory Parameters 95 | ========================================= 96 | 97 | :: 98 | 99 | 100 | ansible_ssh_host 101 | The name of the host to connect to, if different from the alias you wish to give to it. 102 | ansible_ssh_port 103 | The ssh port number, if not 22 104 | ansible_ssh_user 105 | The default ssh user name to use. 106 | ansible_ssh_pass 107 | The ssh password to use (this is insecure, we strongly recommend using --ask-pass or SSH keys) 108 | ansible_sudo 109 | The boolean to decide if sudo should be used for this host. Defaults to false. 110 | ansible_sudo_pass 111 | The sudo password to use (this is insecure, we strongly recommend using --ask-sudo-pass) 112 | ansible_sudo_exe (new in version 1.8) 113 | The sudo command path. 114 | ansible_connection 115 | Connection type of the host. Candidates are local, ssh or paramiko. The default is paramiko before Ansible 1.2, and 'smart' afterwards which detects whether usage of 'ssh' would be feasible based on whether ControlPersist is supported. 116 | ansible_ssh_private_key_file 117 | Private key file used by ssh. Useful if using multiple keys and you don't want to use SSH agent. 118 | ansible_shell_type 119 | The shell type of the target system. Commands are formatted using 'sh'-style syntax by default. Setting this to 'csh' or 'fish' will cause commands executed on target systems to follow those shell's syntax instead. 120 | ansible_python_interpreter 121 | The target host python path. This is useful for systems with more 122 | than one Python or not located at "/usr/bin/python" such as \*BSD, or where /usr/bin/python 123 | is not a 2.X series Python. We do not use the "/usr/bin/env" mechanism as that requires the remote user's 124 | path to be set right and also assumes the "python" executable is named python, where the executable might 125 | be named something like "python26". 126 | ansible\_\*\_interpreter 127 | Works for anything such as ruby or perl and works just like ansible_python_interpreter. 128 | This replaces shebang of modules which will run on that host. 129 | -------------------------------------------------------------------------------- /ansible/keystone.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: keystone 3 | gather_facts: True 4 | remote_user: root 5 | 6 | tasks: 7 | - name: install keystone 8 | apt: pkg=keystone state=present 9 | when: ansible_os_family == "Debian" 10 | 11 | - name: install keystone 12 | yum: name=keystone state=present 13 | when: ansible_os_family == "RedHat" 14 | -------------------------------------------------------------------------------- /ansible/list: -------------------------------------------------------------------------------- 1 | acl 2 | add_host 3 | airbrake_deployment 4 | apt 5 | apt_key 6 | apt_repository 7 | arista_interface 8 | arista_l2interface 9 | arista_lag 10 | arista_vlan 11 | assemble 12 | assert 13 | at 14 | authorized_key 15 | bigip_monitor_http 16 | bigip_monitor_tcp 17 | bigip_node 18 | bigip_pool 19 | bigip_pool_member 20 | boundary_meter 21 | bzr 22 | campfire 23 | cloudformation 24 | command 25 | copy 26 | cron 27 | datadog_event 28 | debug 29 | digital_ocean 30 | django_manage 31 | dnsmadeeasy 32 | docker 33 | docker_image 34 | easy_install 35 | ec2 36 | ec2_ami 37 | ec2_eip 38 | ec2_elb 39 | ec2_elb_lb 40 | ec2_facts 41 | ec2_group 42 | ec2_key 43 | ec2_snapshot 44 | ec2_tag 45 | ec2_vol 46 | ec2_vpc 47 | ejabberd_user 48 | elasticache 49 | facter 50 | fail 51 | fetch 52 | file 53 | filesystem 54 | fireball 55 | firewalld 56 | flowdock 57 | gc_storage 58 | gce 59 | gce_lb 60 | gce_net 61 | gce_pd 62 | gem 63 | get_url 64 | git 65 | github_hooks 66 | glance_image 67 | group 68 | group_by 69 | grove 70 | hg 71 | hipchat 72 | homebrew 73 | hostname 74 | htpasswd 75 | include_vars 76 | ini_file 77 | irc 78 | jabber 79 | jboss 80 | kernel_blacklist 81 | keystone_user 82 | lineinfile 83 | linode 84 | lvg 85 | lvol 86 | macports 87 | mail 88 | miss 89 | modprobe 90 | mongodb_user 91 | monit 92 | mount 93 | mqtt 94 | mysql_db 95 | mysql_replication 96 | mysql_user 97 | mysql_variables 98 | nagios 99 | netscaler 100 | newrelic_deployment 101 | nova_compute 102 | nova_keypair 103 | npm 104 | ohai 105 | open_iscsi 106 | openbsd_pkg 107 | openvswitch_bridge 108 | openvswitch_port 109 | opkg 110 | osx_say 111 | ovirt 112 | pacman 113 | pagerduty 114 | pause 115 | ping 116 | pingdom 117 | pip 118 | pkgin 119 | pkgng 120 | pkgutil 121 | portinstall 122 | postgresql_db 123 | postgresql_privs 124 | postgresql_user 125 | quantum_floating_ip 126 | quantum_floating_ip_a 127 | quantum_network 128 | quantum_router 129 | quantum_router_gatewa 130 | quantum_router_interf 131 | quantum_subnet 132 | rabbitmq_parameter 133 | rabbitmq_plugin 134 | rabbitmq_policy 135 | rabbitmq_user 136 | rabbitmq_vhost 137 | raw 138 | rax 139 | rax_clb 140 | rax_clb_nodes 141 | rax_dns 142 | rax_dns_record 143 | rax_facts 144 | rax_files 145 | rax_files_objects 146 | rax_keypair 147 | rax_network 148 | rax_queue 149 | rds 150 | redhat_subscription 151 | redis 152 | rhn_channel 153 | rhn_register 154 | riak 155 | route53 156 | rpm_key 157 | s3 158 | script 159 | seboolean 160 | selinux 161 | service 162 | set_fact 163 | setup 164 | shell 165 | slurp 166 | stat 167 | subversion 168 | supervisorctl 169 | svr4pkg 170 | swdepot 171 | synchronize 172 | sysctl 173 | template 174 | unarchive 175 | uri 176 | urpmi 177 | user 178 | virt 179 | wait_for 180 | xattr 181 | yum 182 | zfs 183 | zypper 184 | zypper_repository 185 | -------------------------------------------------------------------------------- /ansible/loops.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: localhost 3 | remote_user: root 4 | gather_facts: true 5 | vars: 6 | users: 7 | alice: 8 | name: Alice Brown 9 | tele: 12121212121 10 | bob: 11 | name: Bob Brown 12 | tele: 9494949949 13 | 14 | tasks: 15 | # - name: loop with hashed 16 | # debug: msg="User {{item.key}} is {{item.value.name}} {{item.value.tele}}" 17 | # with_dict: users 18 | # 19 | # - name: nested loops 20 | # debug: msg="{{item[0]}} {{item[1]}}" 21 | # with_nested: 22 | # - ["outerloop1", "outerloop2"] 23 | # - ["innerloop1", "innerloop2", "innerloop3"] 24 | # 25 | # - name: file globs 26 | # copy: src={{item}} dest=/tmp/ 27 | # with_fileglob: 28 | # - /etc/default/a* 29 | # - name: sequence loops 30 | # file: dest=/tmp/loops.{{item}} state=touch 31 | # with_sequence: start=4 end=23 stride=3 32 | # register: loop 33 | # 34 | # - name: save loop register 35 | # debug: msg="{{loop}}" 36 | - name: file globs 37 | debug: msg="{{item}}" 38 | with_fileglob: 39 | - /home/susu/.ssh/* 40 | -------------------------------------------------------------------------------- /ansible/playbooks_loops.rst: -------------------------------------------------------------------------------- 1 | .. _playbooks_loops: 2 | 3 | ======= 4 | Loops 5 | ======= 6 | 7 | .. contents:: Topics 8 | 9 | Standard Loops 10 | ================ 11 | 12 | :: 13 | 14 | - name: common packages 15 | apt: name={{item.name}} state={{item.state}} 16 | with_items: 17 | - {name:"vim-enhanced", state:"present"} 18 | - {name:"build-essentials", state:"present"} 19 | - {name:"firefox", state:"abent"} 20 | 21 | Just to save some typing of repeat sections, we use *loop* statements. It works fine with a YAML list in a variables file or the vars section. 22 | 23 | Nested Loops 24 | ============== 25 | 26 | :: 27 | 28 | - name: make three copies of original file 29 | copy: src={{item[0]}} dest={{item[1]}} 30 | with_nested: 31 | - ["foo.conf", "bar.conf"] 32 | - ["/etc/app.d/", "/usr/share/app.d/, "/usr/local/app.d"] 33 | 34 | Looping over Hashed 35 | ===================== 36 | 37 | Suppose you have the following variable: 38 | 39 | :: 40 | 41 | --- 42 | users: 43 | alice: 44 | name: Alice Brown 45 | tele: 32323232 46 | bob: 47 | name: Bob Green 48 | tele: 1010100101 49 | 50 | You can loop through the elements of a hash using ``with_dict`` like this:: 51 | 52 | tasks: 53 | - name: print info 54 | debug: msg="User {{item.key}} is {{item.value.name}} {{item.value.tele}}" 55 | with_dict: "{{users}}" 56 | 57 | Looping with Fileglobs 58 | ======================= 59 | 60 | *with_fileglobs* matched all files in a single directory, non-recursively, that match a pattern:: 61 | 62 | --- 63 | - hosts: apps 64 | tasks: 65 | - file: dest=/etc/apps state=directory 66 | - copy: src={{item}} dest=/etc/apps owner=root mode=600 67 | with_fileglob: 68 | - /playbooks/files/apps/*.conf 69 | 70 | .. note:: 71 | 72 | When using a relative path with *with_fileglob* in a role, Ansible resolves the path relative to the roles//files directory 73 | 74 | Looping over Integer Sequences 75 | ================================ 76 | 77 | :: 78 | 79 | tasks: 80 | - name: sequence loops 81 | file: dest=/tmp/loops.{{item}} state=touch 82 | with_sequence: start=4 end=23 stride=3 83 | 84 | 85 | Random Choices 86 | ================= 87 | 88 | Random Choices is not a good load balancer approach. It can be used to add chaos and excitement to otherwise predictable automation environments:: 89 | 90 | tasks: 91 | - name: delete files by random choice 92 | file: src={{item}} state=absent 93 | with_random_choice: 94 | - "/root/.bashrc" 95 | - "/etc/default/grub" 96 | - "/var/log/message" 97 | 98 | Do-Until Loops 99 | ================ 100 | 101 | :: 102 | 103 | tasks: 104 | - action: shell /usr/bin/foo 105 | register: result 106 | until: result.stdout.find("all systems go") != -1 107 | retries: 5 108 | delay: 10 109 | 110 | Do-Until loops would retry a task untill acertain condition is met. 111 | -------------------------------------------------------------------------------- /ansible/playbooks_roles.rst: -------------------------------------------------------------------------------- 1 | .. _playbook_roles: 2 | 3 | =============== 4 | Playbook Roles 5 | =============== 6 | 7 | .. contents:: Topics 8 | 9 | Task Include Files And Encouraging Reuse 10 | ============================================== 11 | 12 | The goal of a play in a playbook is to map a group of systems into multiple roles:: 13 | 14 | --- 15 | # possibly saved as tasks/foo.yml 16 | - name: foo 17 | command: /bin/foo 18 | - name: bar 19 | command: /bin/bar 20 | 21 | --- 22 | tasks: 23 | - include: tasks/foo.yml 24 | 25 | You can also pass variables into includes:: 26 | 27 | tasks: 28 | - include: wordpress.yml wp_user=timmy 29 | - include: wordpress.yml wp_user=alice 30 | - include: wordpress.yml wp_user=bob 31 | 32 | Includes can also be used in the 'handlers' section:: 33 | 34 | --- 35 | # handlers/handlers.yml 36 | - name: restart apache 37 | serice: name=apache state=restarted 38 | 39 | --- 40 | # in your main playbook file 41 | handlers: 42 | - include: handlers/handlers.yml 43 | 44 | Roles 45 | ======= 46 | 47 | Roles are ways of automatically loading certain vars_files, tasks, and handlers based on a known file structure. Grouping content by roles also allows easy sharing of roles with other users. 48 | 49 | Example project structure:: 50 | 51 | site.yml 52 | webservers.yml 53 | fooservers.yml 54 | roles/ 55 | common/ 56 | files/ 57 | templates/ 58 | tasks/ 59 | handlers/ 60 | vars/ 61 | defaults/ 62 | meta/ 63 | webservers/ 64 | files/ 65 | templates/ 66 | tasks/ 67 | handlers/ 68 | vars/ 69 | defaults/ 70 | meta/ 71 | 72 | In a playbook, it would look like this:: 73 | 74 | --- 75 | - hosts: webservers 76 | roles: 77 | - common 78 | - webservers 79 | - { role: foo_app_instance, dir: '/opt/a', port: 5000 } 80 | - { role: some_role, when: "ansible_os_family == 'RedHat'" } 81 | - { role: foo, tags: ["bar", "baz"] } 82 | 83 | main.yml in tasks/ handlers/ vars/ will all be added to the play, and main.yml in meta/ will add any role dependencies to the list of roles. 84 | 85 | copy tasks, script tasks can reference files in roles/xxx/files/ without have to path them relatively or absolutely. 86 | 87 | template tasks can reference files in roles/xxx/templates/ without have to path them relatively or absolutely. 88 | 89 | include tasks can reference files in roles/x/tasks/ without having to path them relatively or absolutely. 90 | 91 | Role Default Variables 92 | ======================= 93 | 94 | Role default variables allow you to set default variables for included or dependent roles (see below). To create defaults, simply add a defaults/main.yml file in your role directory. These variables will have the lowest priority of any variables available, and can be easily overridden by any other variable, including inventory variables. 95 | 96 | Role Dependencies 97 | =================== 98 | 99 | Role dependencies are stored in the meta/main.yml file contained within the role directory:: 100 | 101 | --- 102 | dependencies: 103 | - { role: common, some_parameter: 3 } 104 | - { role: apache, port: 80 } 105 | - { role: postgres, dbname: blarg, other_parameter: 12 } 106 | - { role: '/path/to/common/roles/foo', x: 1 } 107 | 108 | Roles dependencies are always executed before the role that includes them, and are recursive. 109 | 110 | By default, roles can also only be added as a dependency once - if another role also lists it as a dependency it will not be run again. This behavior can be overridden by adding allow_duplicates: yes to the meta/main.yml file. 111 | 112 | Embedding Modules In Roles 113 | ============================= 114 | 115 | You may distribute a custom module as part of a role. You could add a directory named 'library' which include the module directly inside of it. And the module will be usable in the role itselft, as well as any roles that are called after this role. 116 | -------------------------------------------------------------------------------- /ansible/t.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: localhost 3 | remote_user: susu 4 | gather_facts: true 5 | tasks: 6 | - name: failed with return code 7 | command: touch /var/log/e.log 8 | register: r 9 | ignore_errors: true 10 | #failed_when: "'susu' in r.stdout" 11 | changed_when: "r.rc==1" 12 | - name: loop over hosts in a group 13 | template: src=a.j2 dest=~/file.f 14 | -------------------------------------------------------------------------------- /ansible/test.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: localhost 3 | remote_user: root 4 | gather_facts: true 5 | # vars: 6 | # mysql_port: "3306" 7 | 8 | tasks: 9 | - name: wait for port mysql_port 10 | wait_for: port="{{mysql_port|default(3306)}}" state=present timeout=3 11 | register: wait 12 | ignore_errors: true 13 | 14 | - fail: msg="Port Start Failed" 15 | when: wait|failed 16 | 17 | # - debug: msg="{{wait}}" 18 | 19 | - name: file state 20 | stat: path=/etc/ansible/ansible.cfg 21 | register: f 22 | ignore_errors: true 23 | 24 | - assert: 25 | that: 26 | - f.stat.exists 27 | - debug: msg="FILE CHANGED" 28 | when: f|changed 29 | -------------------------------------------------------------------------------- /ansible/tips.rst: -------------------------------------------------------------------------------- 1 | .. _tips: 2 | 3 | ====================== 4 | Tips and Tricks 5 | ====================== 6 | 7 | .. contents:: Topics 8 | 9 | Reboot a server and wait for it to come back 10 | ================================================ 11 | 12 | :: 13 | 14 | - name: restart machine 15 | command: shutdown -r now "Ansible updates triggered" 16 | async: 0 17 | poll: 0 18 | ignore_errors: true 19 | 20 | This task uses the command module to send the shutdown command to the host. By using async=0 and poll=0 , we background the process. The ``ignore_errors`` is there just to ensure that the task runs even though it is running a command that could prematurely terminate the process. 21 | 22 | :: 23 | 24 | - name: waiting for server to come back 25 | local_action: wait_for host={{inventory_hostname}} 26 | state=started 27 | delay=30 28 | timeout=600 29 | connect_timeout=30 30 | sudo:false 31 | 32 | The second task waits for the server to come back online. This is a local action that was delegated to run on the Ansible control node. 33 | 34 | :: 35 | 36 | handlers: 37 | - name: restart server 38 | command: shutdown -r now 'Reboot triggered by Ansible' 39 | async: 0 40 | poll: 0 41 | ignore_errors: true 42 | 43 | - name: wait for server to restart 44 | local_action: 45 | module: wait_for 46 | host={{inventory_hostname}} 47 | delay=3 48 | timeout=600 49 | state=started 50 | sudo: false 51 | 52 | 53 | tasks: 54 | - name: Set hostname 55 | hostname: name=host01 56 | notify: 57 | - restart server 58 | - wait for server to restart 59 | 60 | How do I split an action into a multi-line format 61 | ================================================== 62 | 63 | To split a long task line into multiple lines, such as "action: copy src=httpd.conf dest=/etc/httpd/httpd.conf", you could format it as follows (note indentations.):: 64 | 65 | - name: Update the Apache config 66 | copy: 67 | src: httpd.conf 68 | dest: /etc/httpd/httpd.conf 69 | 70 | Or, conversely, using the old 'action' syntax:: 71 | 72 | - name: Update the Apache config 73 | action: 74 | module: copy 75 | src: httpd.conf 76 | dest: /etc/httpd/httpd.conf 77 | 78 | Use YAML line continuations:: 79 | 80 | - name: Update the Apache config 81 | copy:> 82 | src=httpd.conf 83 | dest=/etc/httpd/httpd.conf 84 | 85 | Or:: 86 | 87 | - name: Update the Apache config 88 | action: copy > 89 | src=httpd.conf 90 | dest=/etc/httpd/httpd.conf 91 | -------------------------------------------------------------------------------- /ansible/yaml_syntax.rst: -------------------------------------------------------------------------------- 1 | .. _yaml_syntax: 2 | 3 | ============== 4 | YAML Syntax 5 | ============== 6 | 7 | .. contents:: Topics 8 | 9 | Basics 10 | ========== 11 | 12 | All YAML files should begin with ``---`` . 13 | 14 | All members of a list are lines beginning at the same indentation level starting with a ``-`` (a dash and a space):: 15 | 16 | --- 17 | # A list of tasty fruits 18 | - Apple 19 | - Orange 20 | - Strawberry 21 | - Mango 22 | 23 | Dictionaries can also be represented in an abbreviated form if you really want to:: 24 | 25 | --- 26 | { name: MEM, job: Dev, skill: delta } 27 | 28 | Ansible doesnt really use these too much, but you can also specify a boolean value(true/false) in several forms:: 29 | 30 | --- 31 | create_key: yes 32 | needs_agent: no 33 | knows_oop: True 34 | likes_emacs: TRUE 35 | uses_cvs: false 36 | 37 | Gotchas 38 | ======== 39 | 40 | While YAML is generally friendly, the following is going to result in a YAML syntax error:: 41 | 42 | foo: somebody said I should put a colon here: so I did 43 | 44 | You wil want to quote any hash values using colons:: 45 | 46 | foo: "someone said i should put a colon: so i did" 47 | 48 | Further, Ansible uses "{{var}}" for variables. If a value after a colon starts with a "{" , YAML will think it is a dictionary, so you must quote it:: 49 | 50 | foo: "{{ varialbe }}" 51 | -------------------------------------------------------------------------------- /consul/.gitignore: -------------------------------------------------------------------------------- 1 | _build 2 | -------------------------------------------------------------------------------- /consul/consul_addndel.rst: -------------------------------------------------------------------------------- 1 | .. _add_del_server: 2 | 3 | For changes to be processed, a minimum quorum of servers (N/2)+1 must be available. That means if there are 3 server nodes, at least 2 must be available. 4 | 5 | In general, if you are ever adding and removing nodes simultaneously, it is better to first add the new nodes and then remove the old nodes. 6 | 7 | Add Servers 8 | ============ 9 | 10 | Adding new servers is generally straightforward. Simply start the new agent with the -server flag. At this point, the server will not be a member of any cluster. 11 | 12 | we can now add this node to the existing cluster using ``join`` . From the new server, we can join any member of the existing cluster:: 13 | 14 | consul join 15 | 16 | It is important to note that any node, including a non-server may be specified for join. The gossip protocol is used to properly discover all the nodes in the cluster. 17 | 18 | Check out the ``last_log_index`` on leader server by running ``consul info`` which is the latest log on leader. Then run the same ``consul info`` on the new node to how far behind it is. 19 | 20 | .. note:: 21 | 22 | It is best to add servers one at a time, allowing them to catch up. This avoids the possibility of data loss in case the existing servers fail while bringing the new servers up-to-date. 23 | 24 | Remove Servers 25 | =============== 26 | 27 | For a cluster of N servers, at least (N/2)+1 must be available for the cluster to function. 28 | 29 | To avoid this, it may be necessary to first add new servers to the cluster, increasing the failure tolerance of the cluster, and then to remove old servers. 30 | 31 | Once you have verified the existing servers are healthy, and that the cluster can handle a node leaving, the actual process is simple. You simply issue a ``leave`` command to the server. 32 | 33 | Forced Removal 34 | ================ 35 | 36 | If the server can be recovered, it is best to bring it back online and then gracefully leave the cluster. However, if this is not a possibility, then the ``force-leave`` command can be used to force removal of a server. 37 | -------------------------------------------------------------------------------- /consul/consul_agent.rst: -------------------------------------------------------------------------------- 1 | .. _consul_agent: 2 | 3 | Consul Agent 4 | ============== 5 | 6 | The Consul agent is the core process of Consul. The agent maintains *membership information* , *registers services* , *runs checks* , *responds to queries* , and *more* . The agent must run on every node that is part of a Consul cluster. 7 | 8 | Any agent may run in one of two modes: client or server. 9 | 10 | + A server node takes on the additional responsibility of being part of the consensus quorum. These nodes take part in Raft and provide strong consistency and availability in the case of failure. 11 | 12 | + Client nodes make up the majority of the cluster, and they are very lightweight as they interface with the server nodes for most operations and maintain very little state of their own. 13 | 14 | Running An Agent 15 | ================= 16 | 17 | While running ``consul agent`` , you should see output similar to this 18 | 19 | :: 20 | 21 | Node name: 'sclg124' 22 | Datacenter: 'local' 23 | Server: true (bootstrap: false) 24 | Client Addr: 127.0.0.1 (HTTP: 8500, HTTPS: -1, DNS: 8600, RPC: 8400) 25 | Cluster Addr: 10.13.182.124 (LAN: 8301, WAN: 8302) 26 | Gossip encrypt: false, RPC-TLS: false, TLS-Incoming: false 27 | Atlas: 28 | 29 | * Node name: 30 | 31 | This is a unique name for the agent. By default, this is the hostname of the machine. You also could customize it using the ``-node`` flag. 32 | 33 | * Datacenter: 34 | 35 | This is the datacenter in which the agent is configured to run. Consul has first-class support for multiple datacenters. The ``-dc`` flag can be used to set the datacenter. For single-DC configurations, the agent will default to "dc1". 36 | 37 | * Server: 38 | 39 | This indicate whether the agent is running in server or client mode. Additionally, a server may be in "bootstrap" mode. Multiple servers cannot be in bootstrap mode as that would put the cluster in an inconsistent state. 40 | 41 | * Client Addr: 42 | 43 | This is the address used for client interfaces to the agent. This includes the ports for the HTTP, DNS, and RPC interfaces. 44 | 45 | * Cluster Addr: 46 | 47 | This is the address and set of ports used for communication between Consul agents in a cluster. 48 | 49 | Stopping An Agent 50 | ================== 51 | 52 | An agent can be stopped in two ways: gracefully or forcefully. 53 | 54 | * To gracefully halt an agent, send the process an interrupt signal. When gracefully exiting, the agent first notifies the cluster it intends to leave the cluster. This way, other cluster members notify the cluster that the node has left. 55 | 56 | * Alternatively, you can force kill the agent by sending it a kill signal and the agent ends immediately. The rest of the cluster will detect that the node has died and notify the cluster that the node has failed. 57 | 58 | It is especially important that a server node be allowed to leave gracefully so that there will be a minimal impact on availability as the server leaves the consensus quorum. 59 | 60 | Lifecycle 61 | ========== 62 | 63 | When an agent is first started, it does not know about any other node in the cluster. To discover its peers, it must **join** the cluster. This is done with the ``join`` command or by providing the proper configuration to auto-join on start. 64 | 65 | Once a node joins, this information is gossiped to the entire cluster, meaning all nodes will eventually be aware of each other. If the agent is a server, existing servers will begin replicating to the new node. 66 | 67 | In the case of a network failure, some nodes may be unreachable by other nodes. In this case, unreachable nodes are marked as failed. It is impossible to distinguish between a network failure and an agent crash, so both cases are handled the same. Once a node is marked as failed, this information is updated in the service catalog. 68 | 69 | .. note:: 70 | 71 | There is some nuance here since this update is only possible if the servers can still form a quorum. Once the network recovers or a crashed agent restarts the cluster will repair itself and unmark a node as failed. The health check in the catalog will also be updated to reflect this. 72 | 73 | When a node *leaves* , it specifies its intent to do so, and the cluster marks that node as having *left* . Unlike the failed case, all of the services provided by a node are immediately deregistered. If the agent was a server, replication to it will stop. To prevent an accumulation of dead nodes, Consul will automatically reap failed nodes out of the catalog as well. This is currently done on a non-configurable interval of 72 hours. Reaping is similar to leaving, causing all associated services to be deregistered. 74 | -------------------------------------------------------------------------------- /consul/consul_bootstrap.rst: -------------------------------------------------------------------------------- 1 | .. _consul_bootstrap: 2 | 3 | Bootstrap 4 | =========== 5 | 6 | An agent can run in both client and server mode. Server nodes are responsible for running the consensus protocol and storing the cluster state. The client nodes are mostly stateless and rely heavily on the server nodes. 7 | 8 | The recommended way to bootstrap is to use the ``-bootstrap-expect`` configuration option. This option informs Consul of the expected number of server nodes and automatically bootstraps when that many servers are available. 9 | 10 | All servers should either specify the same value for -bootstrap-expect or specify no value at all. Only servers that specify a value will attempt to bootstrap the cluster. 11 | 12 | You can bootstrap each node using command ``consul agent -server -data-dir /tmp/default.consul -bootstrap-expect 3 -dc dev`` . These nodes will expect 3 peers but none of them are known to each other now. So, the servers will not elect themselves leader. 13 | 14 | Creating a Cluster 15 | ==================== 16 | 17 | To trigger leader election, we must join these machines together and create a cluster. There are two options for joining; you can use `Atlas by HashiCorp `_ to auto-join or you can access the machine and manually run the join. 18 | 19 | To manually create a cluster, access one of the machines and run the following:: 20 | 21 | consul join 22 | 23 | Since a join operation is symmetric, it does not matter which node initiates it.As a sanity check, the ``consul info`` command is a useful tool. It can be used to verify ``raft.num_peers`` , and you can view the latest log index under ``raft.last_log_index`` . When running ``consul info`` on the followers, you should see ``raft.last_log_index`` converge to the same value once the leader begins replication. That value represents the last log entry that has been stored on disk. 24 | 25 | Now that the servers are all started and replicating to each other, all the remaining clients can be joined. Clients are much easier as they can join against any existing node. All nodes participate in a gossip protocol to perform basic discovery, so once joined to any member of the cluster, new clients will automatically find the servers and register themselves. 26 | -------------------------------------------------------------------------------- /consul/index.rst: -------------------------------------------------------------------------------- 1 | .. consul documentation master file, created by 2 | sphinx-quickstart on Mon Aug 24 10:47:24 2015. 3 | You can adapt this file completely to your liking, but it should at least 4 | contain the root `toctree` directive. 5 | 6 | Consul 7 | ======= 8 | .. toctree:: 9 | :maxdepth: 2 10 | 11 | consul_bootstrap 12 | consul_addndel 13 | -------------------------------------------------------------------------------- /docker/docker_command.rst: -------------------------------------------------------------------------------- 1 | .. _docker_command: 2 | 3 | Docker入门 -- 命令 4 | ================== 5 | 6 | 7 | .. _docker_info: 8 | 9 | docker info 10 | ------------- 11 | 12 | 通过运行 ``docker info`` 检测docker安装是否正确 13 | 14 | :: 15 | 16 | # docker info 17 | Containers: 16 18 | Images: 3 19 | Storage Driver: aufs 20 | Root Dir: /var/lib/docker/aufs 21 | Dirs: 35 22 | Execution Driver: native-0.2 23 | Kernel Version: 3.13.0-36-generic 24 | Operating System: Ubuntu 14.04.1 LTS 25 | CPUs: 4 26 | Total Memory: 3.729 GiB 27 | Name: nn 28 | ID: NZE2:4R7O:GGXU:WYP3:PC4W:3G23:CSM4:XIRH:RAIB:GRV2:HKS2:Y47U 29 | WARNING: No swap limit support 30 | 31 | .. _docker_pull: 32 | 33 | docker pull 34 | -------------- 35 | 36 | ``docker pull busybox`` 从Docker Hub上拉取busybox镜像 37 | 38 | .. _docker_run: 39 | 40 | docker run 41 | ----------- 42 | 43 | ``docker run busybox /bin/echo Hello Docker`` 运行容器 44 | 45 | ``docker run -d busybox /bin/sh -c "while true; do echo Docker; sleep 1; done"`` 以后台进程的方式运行容器 46 | 47 | .. _docker_logs: 48 | 49 | docker logs 50 | ------------ 51 | 52 | :: 53 | 54 | # docker run -d busybox /bin/sh -c "while true; do echo Docker; sleep 1; done" 55 | adc090ccbcf7feb6c9cee7228cc32f2739eddfa78ad2b10a192d8e12b2a2c594 56 | 57 | 在后台运行容器,会返回一个容器ID,之后使用 ``docker logs adc090ccbcf7feb6c9cee7228cc32f2739eddfa78ad2b10a192d8e12b2a2c594`` 可以查看输出的结果。容器ID长度很长,实际使用中可以只取前八位来使用。 58 | 59 | .. _docker_stop: 60 | 61 | docker start & stop & restart 62 | -------------------------------- 63 | 64 | ``docker stop docker_id`` 停止容器, ``docker restart docker_id`` 重启该容器。如果要完全移除容器,需要先停止容器,然后才能移除:: 65 | 66 | docker stop acd090ccbc 67 | docker rm acd090ccbc 68 | 69 | .. _docker_commit: 70 | 71 | docker commit 72 | --------------- 73 | 74 | ``docker commit acd090ccbc sample01`` 将容器的状态保存为镜像,镜像名称为sample01,镜像名称只能取字符[a-z]和数字[0-9] 75 | 76 | .. _docker_images: 77 | 78 | docker images 79 | ------------- 80 | 81 | ``docker images`` 查看所有镜像的列表 82 | 83 | .. _docker_search: 84 | 85 | docker search 86 | -------------- 87 | 88 | ``docker search (image_name)`` 在Docker registry搜索镜像 89 | 90 | .. _docker_history: 91 | 92 | docker history 93 | --------------- 94 | 95 | ``docker history (image_name)`` 查看镜像的历史版本 96 | 97 | .. _docker_push: 98 | 99 | docker push 100 | ------------ 101 | 102 | ``docker push (image_name)`` 将镜像推送到registry 103 | 104 | .. _docker_build: 105 | 106 | docker build 107 | --------------- 108 | 109 | ``docker build [options] PATH | URL`` 使用Dockerfile构建镜像 110 | 111 | ``--rm=true`` -- 构建成功后,移除所有中间容器 112 | 113 | ``--no-cache=false`` -- 构建过程中不使用缓存 114 | 115 | .. _docker_attach: 116 | 117 | docker attach 118 | --------------- 119 | 120 | ``docker attach container`` 附加到正在运行的容器上 121 | 122 | .. _docker_diff: 123 | 124 | docker diff 125 | ------------ 126 | 127 | ``docker diff container`` 列出容器内发生变化的文件和目录 128 | 129 | .. _docker_events: 130 | 131 | docker events 132 | -------------- 133 | 134 | 打印指定时间内容器的实时系统事件 135 | 136 | .. _docker_import: 137 | 138 | docker import 139 | --------------- 140 | 141 | 导入远程文件、本地文件和目录:: 142 | 143 | docker import http://example.com/example.tar 144 | tar -C image.tar | docker import - image_app 145 | 146 | .. _docker_export: 147 | 148 | docker export 149 | --------------- 150 | 151 | 导出容器的系统文件打包成tarball:: 152 | 153 | docker export container > imags.tar 154 | 155 | .. _docker_cp: 156 | 157 | docker cp 158 | ---------- 159 | 160 | 从容器内复制文件到指定路径上:: 161 | 162 | docker cp container:path hostpath 163 | 164 | .. _docker_login: 165 | 166 | docker login 167 | -------------- 168 | 169 | 用来登陆Docker Registry服务器:: 170 | 171 | docker login [options] [server] 172 | docker login localhost:8080 173 | 174 | .. _docker_inspect: 175 | 176 | docker inspect 177 | --------------- 178 | 179 | 收集关于容器和镜像的底层信息,包括:: 180 | 181 | * 容器实例的IP地址 182 | 183 | * 端口绑定列表 184 | 185 | * 特定端口映射的搜索 186 | 187 | * 收集配置的详细信息 188 | 189 | ``docker inspect [ --format= ] container/image`` 190 | 191 | ``docker inispact --format='{{.State}}' container/image`` 192 | 193 | .. _docker_kill: 194 | 195 | docker kill 196 | ------------ 197 | 198 | 发送 ``SIGKILL`` 信号来停止容器的主进程:: 199 | 200 | docker kill [options] container 201 | 202 | .. _docker_rmi: 203 | 204 | docker rmi 205 | ----------- 206 | 207 | 移除一个或多个镜像:: 208 | 209 | docker rmi image 210 | 211 | .. _docker_wait: 212 | 213 | docker wait 214 | ------------- 215 | 216 | 阻塞对指定容器的其他调用方法,直到容器停止后退出阻塞。 217 | 218 | .. _docker_load: 219 | 220 | docker load 221 | ------------ 222 | 223 | 从tarball中载入镜像到STDIN:: 224 | 225 | docker load -i app_box.tar 226 | 227 | .. _docker_save: 228 | 229 | docker save 230 | ------------ 231 | 232 | 将镜像保存为tarball并发送到STDOUT:: 233 | 234 | docker save image > app_box.tar 235 | -------------------------------------------------------------------------------- /docker/docker_dockerfile.rst: -------------------------------------------------------------------------------- 1 | .. _docker_dockerfile: 2 | 3 | Dockerfile 4 | =========== 5 | 6 | Dockerfile包含创建镜像所需要的全部指令。基于在Dockerfile中的指令,我们可以使用Docker build命令来创建镜像。通过减少镜像和容器的创建过程来简化部署。 7 | 8 | Dockerfile支持的语法命令如下:: 9 | 10 | INSTRUCTION argument 11 | 12 | 指令不区分大小写,但是命名约定为全部大写。 13 | 14 | FROM 15 | ------ 16 | 17 | 所有Dockerfile必须以 ``FROM`` 命令开始。 ``FROM`` 命令指定镜像基于哪个基础镜像创建。接下来的命令也会基于这个基础镜像。可以使用多次FROM命令,表示会创建多个镜像。 18 | 19 | :: 20 | 21 | FROM 22 | FROM : 23 | FROM @ 24 | 25 | 例如 ``FROM ubuntu`` 这样的指令告诉我们,新的镜像将基于Ubuntu的景象来构建。 26 | 27 | MAINTAINER 28 | ----------- 29 | 30 | 设置该镜像的作者:: 31 | 32 | MAINTAINER 33 | 34 | RUN 35 | ---- 36 | 37 | 在shell或exec环境下执行的命令。会在新创建的镜像上添加新的层面,接下来提交的镜像用在Dockerfile的下一条指令中。 38 | 39 | :: 40 | 41 | RUN shell form 42 | RUN ["executable", "param1", "param2"] exec form 43 | 44 | exec形式没有变量扩展,如 ``RUN ["echo", "$HOME"]`` 不会展开 $HOME,需要改成 ``RUN ["sh", "-c", "echo", "$HOME"]`` 45 | 46 | ADD 47 | ---- 48 | 49 | 复制文件指令,将URL或者启动配置上下文中的一个文件复制到容器内的位置:: 50 | 51 | ADD 52 | 53 | CMD 54 | ---- 55 | 56 | 提供了容器默认的执行命令。Dockerfile只允许使用一次CMD指令,使用多个CMD会抵消之前所有的指令,只有最后一个指令失效:: 57 | 58 | CMD ["executable", "param1", "param2"] 59 | 60 | CMD ["param1", "param2"] 61 | 62 | CMD command param1 param2 63 | 64 | .. note:: 65 | 66 | CMD ["executable", "param1", "param2"] 是推荐使用的格式。exec form被解析成JSON数组,因此不能使用单引号,要使用双引号。 67 | 68 | EXPOSE 69 | ------- 70 | 71 | 指定容器在运行时监听的端口 72 | 73 | :: 74 | 75 | EXPOSE 76 | 77 | ENTRYPOINT 78 | ----------- 79 | 80 | 配置给容器一个可执行的命令,这意味着每次使用镜像创建容器时一个特定的应用程序可以被设置为默认程序。类似与CMD,Docker只允许一个ENTRYPOINT,多个ENTRYPOINT会抵消之前所有的指令。 81 | 82 | :: 83 | 84 | ENTRYPOINT ["executable", "param1", "param2"] 85 | 86 | ENTRYPOINT command param1 param2 87 | 88 | WORKDIR 89 | -------- 90 | 91 | 指定RUN、CMD和ENTRYPOINT命令的工作目录。 92 | 93 | :: 94 | 95 | WORKDIR /path/to/workdir 96 | 97 | ENV 98 | ---- 99 | 100 | 设置环境变量,使用键值对,增加运行程序的灵活性。 101 | 102 | :: 103 | 104 | ENV 105 | 106 | USER 107 | ----- 108 | 109 | 镜像正在运行时设置一个UID。 110 | 111 | :: 112 | 113 | USER 114 | 115 | VOLUME 116 | ------- 117 | 118 | 授权容器访问主机上的目录。 119 | 120 | :: 121 | 122 | VOLUME ["/data"] 123 | 124 | 125 | example 126 | -------- 127 | 128 | :: 129 | 130 | # Nginx 131 | # 132 | # VERSION 0.0.1 133 | 134 | FROM ubuntu 135 | MAINTAINER Victor Vieux 136 | 137 | LABEL Description="This image is used to start the foobar executable" Vendor="ACME Products" Version="1.0" 138 | RUN apt-get update && apt-get install -y inotify-tools nginx apache2 openssh-server 139 | 140 | # Firefox over VNC 141 | # 142 | # VERSION 0.3 143 | 144 | FROM ubuntu 145 | 146 | # Install vnc, xvfb in order to create a 'fake' display and firefox 147 | RUN apt-get update && apt-get install -y x11vnc xvfb firefox 148 | RUN mkdir ~/.vnc 149 | # Setup a password 150 | RUN x11vnc -storepasswd 1234 ~/.vnc/passwd 151 | # Autostart firefox (might not be the best way, but it does the trick) 152 | RUN bash -c 'echo "firefox" >> /.bashrc' 153 | 154 | EXPOSE 5900 155 | CMD ["x11vnc", "-forever", "-usepw", "-create"] 156 | 157 | # Multiple images example 158 | # 159 | # VERSION 0.1 160 | 161 | FROM ubuntu 162 | RUN echo foo > bar 163 | # Will output something like ===> 907ad6c2736f 164 | 165 | FROM ubuntu 166 | RUN echo moo > oink 167 | # Will output something like ===> 695d7793cbe4 168 | 169 | # You᾿ll now have two images, 907ad6c2736f with /bar, and 695d7793cbe4 with 170 | # /oink. 171 | 172 | 173 | .. seealso:: 174 | 175 | #. :ref:`Dockerfile best practices ` 176 | -------------------------------------------------------------------------------- /docker/docker_in_action.rst: -------------------------------------------------------------------------------- 1 | .. _docker_in_action: 2 | 3 | Docker 案例 -- 搭建工作流 4 | ========================== 5 | 6 | 流程 7 | ------ 8 | 9 | #. 在本地功能分支上完成应用代码 10 | 11 | #. 在Github上发起一个到master分支的pull request 12 | 13 | #. 在Docker容器上运行自动测试 14 | 15 | #. 如果测试通过,手动将pull request merge进master分支 16 | 17 | #. 一旦merge成功,再次运行自动测试 18 | 19 | #. 如果第二次测试也通过,就在Docker Hub上对应用进行构建 20 | 21 | #. 一旦构建完成,自动化的部署到生产环境 22 | 23 | 概念 24 | ------ 25 | 26 | * `Dockerfile `_ 中包含了一系列语句,用于对镜像的行为进行描述。 27 | * `镜像 `_ 是一个模板,用来保存环境状态并创建容器。 28 | * `容器 `_ 可以理解为实例化的镜像,并会在其中运行一系列进程。 29 | 30 | Why ? 31 | ------ 32 | 33 | 使用Docker意味着能够在开发机上完美地模拟生产环境,而不用再为任何由两者环境、配置差异所造成的问题而担心,此外,docker带给我们的还有: 34 | 35 | * 良好的版本控制 36 | 37 | * 随时便捷地发布或重建整个开发环境 38 | 39 | * 一次构建,随处运行 40 | 41 | 配置Docker 42 | ------------ 43 | 44 | 45 | * 由于windows NT、Darwin内核缺少运行Docker容器的一些Linux内核功能,需要借助boot2docker,一个用于运行Docker的轻量级Linux发行版。 46 | 47 | * Linux内核的操作系统可以直接运行docker守护进程。 48 | 49 | * ``docker version`` 50 | 51 | Compose UP 52 | ------------ 53 | 54 | Docker compose 是官方提供的容器业务流程框架,只需通过简单的YAML配置文件就能完成多个容器服务的构建和运行。 55 | 56 | ``pip install docker-compose`` 安装docker compose 57 | 58 | 开始搭建Flask+Redis应用 59 | 60 | 在项目根目录下新建docker-compose.yml文件:: 61 | 62 | web: 63 | build: web 64 | volumes: 65 | - web: /code 66 | ports: 67 | - "80:5000" 68 | links: 69 | - redis 70 | command: python app.py 71 | redis: 72 | image: redis:2.8.19 73 | ports: 74 | - "6379:6379" 75 | 76 | 77 | 上面我们对项目所含的两个服务进行了操作: 78 | 79 | * web: 我们将在web目录下进行容器的构建,并且将其作为Volume挂载到容器的/code目录中,然后通过python app.py来启动Flask应用。最后将容器的5000端口暴露出来,映射到80端口上。 80 | 81 | * redis: 直接使用Docker Hub上的官方镜像来提供所需的Redis服务支持,将6379端口暴露并映射到主机上。 82 | 83 | 之后在web目录下创建Dockerfile文件用于指导如何构建应用镜像。 84 | 85 | 构建并运行 86 | ----------- 87 | 88 | ``docker-compose up`` 这会根据dockerfile构建Flask应用的镜像,从官方仓库拉取Redis镜像,然后将一切运行起来。docker compose会并行地期都过全部容器,每个容器会被分配各自的名字。 89 | 90 | ``docker-compose ps`` 可以查看应用进程的运行状态。两个进程运行在不同的容器中,而Docker Compose将它们组织在一起。 91 | 92 | 我们建立了本地环境,通过Dockerfile详尽描述了如何构建镜像,并基于该镜像启动了相应容器。我们使用Docker Compose来将这一切整合起来,包括构建和容器之间的关联、通信(在Flask和Redis进程之间)。 93 | 94 | 95 | -------------------------------------------------------------------------------- /docker/docker_intro.rst: -------------------------------------------------------------------------------- 1 | .. _docker_intro: 2 | 3 | Docker入门 -- 介绍 4 | ==================== 5 | 6 | :: 7 | 8 | Build once, configure once and run anywhere. 9 | 10 | Docker特征 11 | ----------- 12 | 13 | * 速度飞快以及优雅的隔离框架 14 | 15 | * 物美价廉 16 | 17 | * CPU/内存的低消耗 18 | 19 | * 快速开/关机 20 | 21 | * 跨云计算基础构架 22 | 23 | Docker组件和元素 24 | ----------------- 25 | 26 | 三个组件 27 | 28 | * Docker Client - 用户界面 29 | 30 | * Docker Daemon - 运行于主机上,处理服务请求 31 | 32 | * Docker Index - 中央registry,支持拥有公有与私有访问权限的Docker容器镜像的备份 33 | 34 | 35 | 三个基本要素 36 | 37 | + Docker Containers - 负责应用程序的运行,包括操作系统,用户添加的文件以及元数据 38 | 39 | + Docker Images - 只读模板,用来运行Docker容器 40 | 41 | + DockerFile - 文件指令集,用来说明如何自动创建Docker镜像 42 | 43 | Docker使用一下操作系统的功能来提高容器效率 44 | 45 | * NameSpace - 充当隔离的第一级,确保一个容器中运行一个进程而且不能看到或影响容器外的其他进程 46 | 47 | * Control Groups - LXC的重要组成部分,具有资源核算与限制的关键功能 48 | 49 | * UnionFS - 作为容器的构建块,支持Docker的轻量级以及速度快的特性,带有用户层的文件系统。 50 | 51 | 如何把它们放在一起 52 | ------------------- 53 | 54 | 运行任何应用程序,都需要有两个基本步骤: 55 | 56 | #. 构建一个镜像 57 | 58 | Docker image 是一个构建容器的只读模板,包含了容器启动所需要的所有信息,包括运行程序和配置数据。每个镜像都源于一个基本的镜像,随后根据Dockerfile中的指令创建模板。对于每个指令,在镜像上创建一个新的层面。 59 | 60 | #. 运行容器 61 | 62 | 运行容器源于第一步创建的镜像。当容器被启动后,一个读写层会被添加到镜像的顶层。当分配到合适的网络和IP地址后,需要的应用程序就可以在容器中运行了。 63 | 64 | 65 | -------------------------------------------------------------------------------- /docker/docker_swarm.rst: -------------------------------------------------------------------------------- 1 | .. _docker_swarm: 2 | 3 | Swarm 4 | ======= 5 | 6 | Discovery 7 | ---------- 8 | 9 | node discovery 10 | ````````````````` 11 | 12 | :: 13 | 14 | docker run -d -p 2376:2375 swarm manage --discovery dockerhost01:2375,docker02:2375:docker03:2375 \ 15 | -H=0.0.0.0:2375 16 | 17 | file discovery 18 | ```````````````` 19 | 20 | :: 21 | 22 | docker run -d -p 2376:2375 -v /etc/swarm/cluster_config:/cluster \ 23 | swarm manage file:///cluster 24 | 25 | # cat /etc/swarm/cluster_config 26 | dockerhost01:2375 27 | dockerhost02:2375 28 | dockerhost03:2375 29 | 30 | docker hub token 31 | `````````````````` 32 | 33 | :: 34 | 35 | #docker run --rm swarm create 36 | e9a6015cd02bfddb03ef95848b490450 37 | 38 | docker run --rm swarm --addr=10.13.181.85:2375 token://e9a6015cd02bfddb03ef95848b490450 39 | 40 | docker run -d -p 2376:2375 swarm manage token://e9a6015cd02bfddb03ef95848b490450 41 | 42 | Schedule 43 | ---------- 44 | 45 | spread 46 | ```````` 47 | 48 | :: 49 | 50 | docker run -d -p 2376:2375 -v /etc/swarm/cluster_config:/cluster \ 51 | swarm manage --strategy=spread file:///cluster 52 | 53 | binpack 54 | ```````` 55 | 56 | :: 57 | 58 | docker run -d -p 2376:2375 -v /etc/swarm/cluster_config:/cluster \ 59 | swarm manage --strategy=binpack file:///cluster 60 | 61 | random 62 | ```````` 63 | :: 64 | 65 | docker run -d -p 2376:2375 -v /etc/swarm/cluster_config:/cluster \ 66 | swarm manage --strategy=random file:///cluster 67 | 68 | Filter 69 | -------- 70 | 71 | TLS support 72 | ------------- 73 | 74 | `Create TLS`_ Certificates for Docker and Docker Swarm 75 | 76 | .. _`Create TLS`: http://technolo-g.com/generate-ssl-for-docker-swarm/ 77 | 78 | Docker:: 79 | 80 | docker -d \ 81 | --tlsverify \ 82 | --tlscacert=/etc/pki/tls/certs/ca.pem \ 83 | --tlscert=/etc/pki/tls/certs/dockerhost01-cert.pem \ 84 | --tlskey-/etc/pki/tls/private/dockerhost01-key.pem \ 85 | -H tcp://0.0.0.0:2376 86 | 87 | Swarm master:: 88 | 89 | swarm manage \ 90 | --tlsverify \ 91 | --tlscacert=/etc/pki/tls/certs/ca.pem \ 92 | --tlscert=/etc/pki/tls/certs/swarm-cert.pem \ 93 | --tlskey=/etc/pki/tls/private/swarm-key.pem \ 94 | --discovery file://etc/swarm_config \ 95 | -H tcp://0.0.0.0:2376 96 | 97 | Settings on client:: 98 | 99 | 100 | export DOCKER_HOST=tcp://dockerswarm01:2376 101 | export DOCKER_CERT_PATH="`pwd`" 102 | export DOCKER_TLS_VERIFY=1 103 | -------------------------------------------------------------------------------- /docker/index.rst: -------------------------------------------------------------------------------- 1 | .. docker documentation master file, created by 2 | sphinx-quickstart on Thu Apr 23 16:11:08 2015. 3 | You can adapt this file completely to your liking, but it should at least 4 | contain the root `toctree` directive. 5 | 6 | Docker 7 | ================================== 8 | 9 | .. toctree:: 10 | :maxdepth: 2 11 | 12 | docker_intro 13 | docker_command 14 | docker_dockerfile 15 | dockerfile_best_practices_take 16 | docker_in_action 17 | docker_compose 18 | docker_swarm 19 | -------------------------------------------------------------------------------- /index.rst: -------------------------------------------------------------------------------- 1 | .. Dhub documentation master file, created by 2 | sphinx-quickstart on Wed Jun 10 15:20:25 2015. 3 | You can adapt this file completely to your liking, but it should at least 4 | contain the root `toctree` directive. 5 | 6 | Welcome to Dhub's documentation! 7 | ================================ 8 | 9 | Contents: 10 | 11 | .. toctree:: 12 | :maxdepth: 2 13 | 14 | ansible/index 15 | ansible-v2/rst/index 16 | logstash/index 17 | docker/index 18 | jenkins-ci/index 19 | k8s/index 20 | jinja2/index 21 | iptables/index 22 | screen/index 23 | 24 | Indices and tables 25 | ================== 26 | 27 | * :ref:`genindex` 28 | * :ref:`modindex` 29 | * :ref:`search` 30 | 31 | -------------------------------------------------------------------------------- /iptables/index.rst: -------------------------------------------------------------------------------- 1 | .. iptables documentation master file, created by 2 | sphinx-quickstart on Fri Apr 24 08:58:43 2015. 3 | You can adapt this file completely to your liking, but it should at least 4 | contain the root `toctree` directive. 5 | 6 | iptables 7 | ========== 8 | 9 | .. toctree:: 10 | :maxdepth: 2 11 | 12 | iptables 13 | -------------------------------------------------------------------------------- /iptables/iptables.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tuxknight/notes/e20fbe9edaef304769849cb501fff720ca60657f/iptables/iptables.jpg -------------------------------------------------------------------------------- /jenkins-ci/.gitignore: -------------------------------------------------------------------------------- 1 | _build 2 | -------------------------------------------------------------------------------- /jenkins-ci/index.rst: -------------------------------------------------------------------------------- 1 | .. Jenkins-CI documentation master file, created by 2 | sphinx-quickstart on Fri Jul 24 11:03:35 2015. 3 | You can adapt this file completely to your liking, but it should at least 4 | contain the root `toctree` directive. 5 | 6 | Jenkins-CI 7 | ====================================== 8 | 9 | .. toctree:: 10 | :maxdepth: 2 11 | 12 | jenkins_global_settings 13 | -------------------------------------------------------------------------------- /jenkins-ci/jenkins_global_settings.rst: -------------------------------------------------------------------------------- 1 | .. _jenkins_global_settings: 2 | 3 | Global Settings 4 | ================= 5 | 6 | .. note:: 7 | 8 | HOME: /var/jenkins_home 9 | 10 | Workspace Root Directory: ${JENKINS_HOME}/workspace/${ITEM_FULLNAME} 11 | 12 | Build Record Root Directory: ${ITEM_ROOTDIR}/builds 13 | 14 | 15 | #. Docker Builder 16 | 17 | Docker URL: docker server REST API URL, http://172.17.42.1:2375 if jenkins is running in a docker container. 18 | 19 | 20 | #. Gerrit Trigger 21 | 22 | Hostname, Frontend URL, SSH Port, Username, SSH Keyfile(/var/jenkins_home/.ssh/id_rsa), SSH Keyfile Password 23 | 24 | Advanced:: 25 | 26 | REST API - HTTP Username && HTTP Password 27 | 28 | Enable code review 29 | 30 | Enable Verified 31 | 32 | 33 | Integrate With Gerrit 34 | ====================== 35 | 36 | Concepts:: 37 | 38 | refs/for/ 39 | refs/changes/nn//m 40 | git hook commit-msg Change-Id 41 | 42 | command line: ssh -P -p 29418 jenkins@127.0.0.1 gerrit 43 | get git hook: scp -P 29418 jenkins@127.0.0.1:/hooks/commit-msg ./ 44 | 45 | 46 | Gerrit settings 47 | ---------------- 48 | 49 | * User settings 50 | 51 | #. Add user jenkins on Gerrit Code review 52 | 53 | Email Address == git config user.email 54 | 55 | #. Add SSH Public Keys 56 | 57 | #. Generate HTTP Password for HTTP REST API 58 | 59 | #. Add jenkins to groups: Anonymous Users, Event Streaming Users, Non-Interactive Users 60 | 61 | * Project settings 62 | 63 | #. Create a new project. Rights inherit from **All-Projects** 64 | 65 | #. Require Change-Id in commit message **TRUE** or **FALSE** if necessary 66 | 67 | * Project Access settings 68 | 69 | #. Global Capabilities 70 | 71 | Stream Events: ALLOW - Event Streaming Users 72 | 73 | Stream Events: ALLOW - Non-Interactive Users 74 | 75 | #. Reference: refs/* 76 | 77 | Read: ALLOW - Non-Interactive Users 78 | 79 | Label Verified: +1/-1 - Non-Interactive Users 80 | 81 | #. Reference: refs/heads/* 82 | 83 | Push: ALLOW - Non-Interactive Users 84 | 85 | Label Code-Review: +1/-1 - Non-Interactive Users 86 | 87 | Label Verified: +1/-1 - Non-Interactive Users 88 | 89 | * Add ``Label Verified`` to All-Project 90 | 91 | :: 92 | 93 | git init project 94 | git config user.name 'admin' 95 | git config user.email 'admin@example.com' 96 | git remote add origin ssh://admin@10.13.182.124:29418/All-Project 97 | git pull origin refs/meta/config 98 | cat << EOF >> project.config 99 | [label "Verified"] 100 | function = MaxWithBlock 101 | value = -1 Fails 102 | value = 0 No score 103 | value = +1 Verified 104 | EOF 105 | git add project.config 106 | git commit -a -m 'add Label Verified to All-Project' 107 | git push origin HEAD:refs/meta/config 108 | 109 | 110 | Jenkins Job 111 | ------------- 112 | 113 | Auto Verify 114 | ````````````` 115 | 116 | #. Source Code Management 117 | 118 | git repository: ssh://jenkins@10.13.182.124:29418/esTookit 119 | 120 | Credentials: passwords/ssh key 121 | 122 | Name: gerrit 123 | 124 | Refspec: $GERRIT_REFSPEC 125 | 126 | Branches to build: $GERRIT_BRANCH 127 | 128 | Additional Behaviours 129 | 130 | * Strategy for choosing what to build - Gerrit Trigger 131 | * Clean before checkout 132 | 133 | #. Build Triggers - Gerrit event 134 | 135 | Choose a server 136 | Trigger on 137 | 138 | * Patchset Created 139 | * Draft Published 140 | 141 | Gerrit Project 142 | 143 | * Type - Plain 144 | * Pattern - esTookit 145 | * Branches 146 | 147 | + Type - Path 148 | + Pattern - ** 149 | 150 | #. Build 151 | 152 | Use ``Execute shell`` to load extern scripts. 153 | 154 | Perform build & test 155 | 156 | #. Project will be automately checked with ``Label Verified`` if build succeed. So, no Post-build actions to perform. 157 | 158 | Auto Publish 159 | ````````````` 160 | 161 | #. Gerrit Trigger 162 | 163 | Trigger on - Change Merged 164 | 165 | #. Publish to Cloud Foundry 166 | 167 | Post-build Actions 168 | Target, Credentials, Space, Allow self-signed certificate 169 | Reset app if already exists 170 | Read configuration from a manifest file / Enter configuration in Jenkins 171 | 172 | Tips && Suggestions 173 | ---------------------- 174 | 175 | #. Parameterize Jenkins jobs 176 | 177 | #. Destruct complex Jenkins jobs 178 | -------------------------------------------------------------------------------- /jinja2/index.rst: -------------------------------------------------------------------------------- 1 | .. jinja2-docs documentation master file, created by 2 | sphinx-quickstart on Tue Jun 23 11:30:06 2015. 3 | You can adapt this file completely to your liking, but it should at least 4 | contain the root `toctree` directive. 5 | 6 | Jinja2 7 | ======= 8 | 9 | .. toctree:: 10 | :maxdepth: 3 11 | 12 | jinja2_intro 13 | jinja2_design 14 | -------------------------------------------------------------------------------- /jinja2/jinja2_intro.rst: -------------------------------------------------------------------------------- 1 | .. _jinja2_intro: 2 | 3 | ============= 4 | Jinja2 Intro 5 | ============= 6 | 7 | .. contents:: Topics 8 | 9 | Installation 10 | =============== 11 | 12 | As a Python egg:: 13 | 14 | easy_install Jinja2 15 | pip install Jinja2 16 | 17 | From the tarball release:: 18 | 19 | wget https://pypi.python.org/packages/source/J/Jinja2/Jinja2-2.7.3.tar.gz 20 | tar xzvf Jinja2-2.7.3.tar.gz 21 | cd Jinja2-2.7.3 && python setup.py install 22 | # root privilege required 23 | 24 | Installing the development version:: 25 | 26 | git clone git://github.com/mitsuhiko/jinja2.git 27 | cd jinja2 28 | ln -s jinja2 /usr/lib/python2.X/site-packages 29 | # package git required 30 | 31 | As of version 2.7 Jinja2 depends on the *MarkupSafe* module. 32 | 33 | Basic API Usage 34 | ================== 35 | 36 | The most basic way to create a template and render it is through Template. This however is not the recommended way to work with it if your templates are not loaded from strings but the file system or another data source:: 37 | 38 | >>> from jinja2 import Template 39 | >>> template = Template('Hello {{ name }}!') 40 | >>> template.render(name='World') 41 | u'Hello World!' 42 | 43 | 44 | -------------------------------------------------------------------------------- /k8s/.gitignore: -------------------------------------------------------------------------------- 1 | _build 2 | -------------------------------------------------------------------------------- /k8s/index.rst: -------------------------------------------------------------------------------- 1 | .. k8s documentation master file, created by 2 | sphinx-quickstart on Wed Aug 5 10:33:44 2015. 3 | You can adapt this file completely to your liking, but it should at least 4 | contain the root `toctree` directive. 5 | 6 | Kubernets 7 | ================ 8 | 9 | .. toctree:: 10 | :maxdepth: 2 11 | 12 | k8s_scratch 13 | k8s_network 14 | -------------------------------------------------------------------------------- /k8s/k8s_network.rst: -------------------------------------------------------------------------------- 1 | .. _k8s_network: 2 | 3 | K8S - Network 4 | ============== 5 | 6 | #. Highly-coupled container-to-container communications. 7 | 8 | #. Pop-to-Pod communications. 9 | 10 | #. Pod-to-Service communications. 11 | 12 | #. External-to-Service communications. 13 | 14 | Kubernetes model 15 | ------------------ 16 | 17 | Kubernetes imposes some fundamental requirements on any networking implementation:: 18 | 19 | * All containers can communicate with all other containers without NAT. 20 | 21 | * All nodes can communicate with all containers without NAT. 22 | 23 | * The IP that a container sees itself as is the same IP that others see it as. 24 | 25 | What this means in practice is that you can not just take two computers running Docker and expect Kubernetes to work. 26 | 27 | You must ensure that the fundamental requirements are met. 28 | 29 | Kubernetes applies IP addresses at the ``pod`` scope, containers within a ``pod`` share their network namespace, including their IP address. This means that containers within a ``pod`` can all reach each other's ports on ``localhost`` . This does imply that containers within a ``pod`` must coordinate port usage, but this is no different that processes in a VM. This is what we call **IP-per-pod** model. 30 | 31 | 32 | .. note:: 33 | 34 | This is implemented in Docker as a "pod container" which holds the network namespace open while "app container" join that namespace with Docker's ``--net=container:`` function. 35 | 36 | As with Docker, it is possible to request host ports, but this is reduced to a very niche operation. In this case a port will be allocated on the host *Node* and traffic will be forwarded to the *Pod* . The *Pod* itself is blind to the existence or non-existence of host ports. 37 | 38 | How to achieve this 39 | ---------------------- 40 | 41 | #. Google Compute Engine (GCE) 42 | 43 | #. L2 networks and linux bridging 44 | 45 | `Four ways to connect a docker container to a local network `_ 46 | 47 | 48 | #. Flannel 49 | 50 | `Flannel `_ is a very simple overlay network that satisfies the Kubernetes requirements. 51 | 52 | 53 | #. OpenVSwitch 54 | 55 | `OpenVSwitch `_ is a somewhat more mature but also complicated way to build an overlay network. :ref:`OpenVSwitch networking` 56 | 57 | 58 | #. Weave 59 | 60 | `Weave `_ is yet another way to build an overlay network, primarily aiming at Docker integration. 61 | 62 | 63 | #. Calico 64 | 65 | `Calico `_ uses BGP to enable real container IPs. 66 | 67 | 68 | .. _OpenVSwitch networking: 69 | 70 | Kubernetes OpenVSwitch GRE/VxLAN networking 71 | ---------------------------------------------- 72 | 73 | This document describes how OpenVSwitch is used to setup networking between pods across nodes. The tunnel type could be GRE or VxLAN. VxLAN is preferable when large scale isolation needs to be performed within the network. 74 | 75 | .. figure:: ovs-networking.png 76 | 77 | The vagrant setup in Kubernetes does the following: 78 | 79 | The docker bridge is replaced with a brctl generated linux bridge(kbr0) with a 256 address space subnet. Basically, a node gets 10.244.x.0/24 subnet and docker is configured to use that bridge instead of the default docker0 bridge. 80 | 81 | Also, an OVS bridge is created(obr0) and added as a port to the kbr0 bridge. All OVS bridges across all nodes are linked with GRE tunnels. So, each node has an outgoing GRE tunnel to all other nodes. It does not need to be a complete mesh really, just meshier the better. STP(spanning tree) mode is enabled in the bridges to prevent loops. 82 | 83 | Routing rules enable any 10.244.0.0/16 target to become reachable via the OVS bridge connected with the tunnels. 84 | 85 | -------------------------------------------------------------------------------- /k8s/ovs-networking.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tuxknight/notes/e20fbe9edaef304769849cb501fff720ca60657f/k8s/ovs-networking.png -------------------------------------------------------------------------------- /logstash/.gitignore: -------------------------------------------------------------------------------- 1 | _build 2 | -------------------------------------------------------------------------------- /logstash/index.rst: -------------------------------------------------------------------------------- 1 | .. L-E-K documentation master file, created by 2 | sphinx-quickstart on Mon Jun 29 09:54:26 2015. 3 | You can adapt this file completely to your liking, but it should at least 4 | contain the root `toctree` directive. 5 | 6 | Deploy Logstash-Elasticsearch-Kibana using ansible 7 | ================================================== 8 | 9 | 10 | .. toctree:: 11 | :maxdepth: 2 12 | 13 | roles/index 14 | playbooks 15 | lekstack/index 16 | 17 | .. seealso:: 18 | 19 | #. `Grok Debugger`_ 20 | #. Rsyslog `RFC5424`_ 21 | 22 | .. _Grok Debugger: http://www.grokdebug.herokuapp.com 23 | .. _RFC5424: http://tools.ietf.org/html/rfc5423 24 | -------------------------------------------------------------------------------- /logstash/lekstack/elasticsearch.rst: -------------------------------------------------------------------------------- 1 | .. _elasticsearch: 2 | 3 | Elasticsearch 4 | ================ 5 | 6 | Cluster 7 | --------- 8 | 9 | An elasticsearch cluster is a collection of one or more nodes that together holds your entire data and provides federated indexing and search capabilities across all nodes. 10 | 11 | A cluster is identified by a unique name which by default is "elasticsearch". This name is important because a node can only be part of a cluster if the node is set up to join the cluster by its name. 12 | 13 | -------------------------------------------------------------------------------- /logstash/lekstack/index.rst: -------------------------------------------------------------------------------- 1 | LEK stack 2 | ================ 3 | 4 | .. contents:: Topics 5 | 6 | Overview 7 | --------------------------------- 8 | 9 | .. figure:: lekstack.png 10 | 11 | .. #.. toctree:: 12 | :maxdepth: 2 13 | logstash 14 | elasticsearch 15 | kibana 16 | -------------------------------------------------------------------------------- /logstash/lekstack/kibana.rst: -------------------------------------------------------------------------------- 1 | .. _kibana: 2 | 3 | Kibana 4 | ======= 5 | -------------------------------------------------------------------------------- /logstash/lekstack/lekstack.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tuxknight/notes/e20fbe9edaef304769849cb501fff720ca60657f/logstash/lekstack/lekstack.png -------------------------------------------------------------------------------- /logstash/lekstack/logstash.rst: -------------------------------------------------------------------------------- 1 | .. _logstash: 2 | 3 | Logstash 4 | ============== 5 | 6 | The logstash event processing pipeline has three stages: input -> filters -> output. 7 | 8 | * input genenrate events 9 | 10 | * filters modify events 11 | 12 | * output ship events elsewhere 13 | 14 | Inputs and outputs support codecs that enable you to encode or decode the data as it enters or exits the pipeline without having to use a separate filter. 15 | 16 | To configure logstash, you create a config that specifies which plugins you want to use and settings for each plugin. 17 | 18 | Configuration 19 | ------------------- 20 | 21 | :: 22 | 23 | # {{ansible_managed}} 24 | # 25 | 26 | # input section 27 | input { 28 | 29 | {% if logstash_whoami == "shipper" %} 30 | # settings for plugin: file as a shipper 31 | #============================================ 32 | file { 33 | path => {{default_syslog}} 34 | type => "syslog" 35 | } 36 | file { 37 | path => {{default_authlog}} 38 | type => "authlog" 39 | } 40 | #============================================= 41 | {% elif logstash_whoami == "indexer" %} 42 | # settings for plugin: redis as an indexer 43 | #============================================ 44 | redis { 45 | data_type => "pattern_channel" 46 | key => "{{redis_key}}" 47 | host => "{{read_from_redis_addr}}" 48 | port => "{{read_from_redis_port}}" 49 | } 50 | #============================================= 51 | {% endif %} 52 | } 53 | 54 | # filter section 55 | filter { 56 | {% if logstash_whoami == "shipper" %} 57 | # apply filter rules on shipper end 58 | #============================================= 59 | if [type] == "syslog" or [type] == "authlog" { 60 | grok { 61 | match => { "message" => "%{SYSLOGTIMESTAMP:syslog_timestamp} %{SYSLOGHOST:syslog_hostname} %{DATA:syslog_program}(?:\[%{POSINT:syslog_pid}\])?: %{GREEDYDATA:syslog_message}" } 62 | add_field => [ "received_at", "%{@timestamp}" ] 63 | add_field => [ "received_from", "%{host}" ] 64 | } 65 | syslog_pri {} 66 | date { 67 | match => [ "syslog_timestamp", "MMM d HH:mm:ss", "MMM dd HH:mm:ss" ] 68 | } 69 | } 70 | #============================================= 71 | {% endif %} 72 | } 73 | 74 | # output section 75 | output { 76 | {% if logstash_whoami == "shipper" %} 77 | # settings for plugin: redis as a shipper 78 | #============================================ 79 | redis { 80 | data_type => "channel" 81 | key => "{{redis_key}}" 82 | host => "{{write_to_redis_addr}}" 83 | port => {{write_to_redis_port}} 84 | } 85 | #============================================ 86 | {% elif logstash_whoami == "indexer" %} 87 | # settings for plugin: elasticsearch as an indexer 88 | #============================================ 89 | elasticsearch { 90 | host => "{{els_addr}}" 91 | protocol => "http" 92 | } 93 | #============================================ 94 | {% endif %} 95 | 96 | } 97 | -------------------------------------------------------------------------------- /logstash/lekstack/redis.rst: -------------------------------------------------------------------------------- 1 | .. _redis: 2 | 3 | Redis On Docker 4 | ================== 5 | 6 | Pull official image from docker hub:: 7 | 8 | docker pull redis`` 9 | 10 | Usage 11 | ------ 12 | 13 | #. start a redis instance 14 | docker run --name broker -d -p 6379:6379 redis 15 | 16 | #. connect to it from an application 17 | docker run --rm --name shipper --link broker:redis -p 10514:10514 -v /root/shipper/shipper.conf:/shipper.conf logstash logstash -f /shipper.conf 18 | -------------------------------------------------------------------------------- /logstash/playbooks.rst: -------------------------------------------------------------------------------- 1 | .. _lek_playbooks: 2 | 3 | Playbooks 4 | ============== 5 | 6 | .. contents:: Topics 7 | 8 | 9 | Linear Execution 10 | ------------------- 11 | 12 | #. Common settings 13 | 14 | #. Setup services 15 | 16 | #. Setup applications 17 | 18 | Privilege Problems 19 | -------------------- 20 | 21 | Controlling machine have the permission to connect remote nodes as normal users. [ elc, ats, susu ] 22 | 23 | But some tasks require root privileges. If you connect to nodes as root, applications you installed will running with root privileges. It is better that using sudo & sudo_user in each task which needs root privileges. 24 | 25 | :: 26 | 27 | tasks: 28 | - name: root privilege required 29 | apt: pkg="redis-server" state=present 30 | sudo: yes 31 | sudo_user: root 32 | 33 | *ansible-playbook -i hosts site.yml --ask-sudo-pass -u elc* This command will prompt for a password of elc on every hosts. When you connect to remote nodes with various users and passwords, using group_vars and hosts_vars is a convenient approach to manage each node's username and passwords. 34 | 35 | :: 36 | 37 | --- 38 | ansible_ssh_user: susu 39 | ansible_sudo_pass: ******** 40 | 41 | *ansible-vault* allows keeping sensitive data such as passwords or keys in encrypted files. 42 | 43 | :: 44 | 45 | ansible-vault create host_vars/10.32.131.107.yml 46 | 47 | ansible-vault edit host_vars/10.32.131.107.yml --ask-vault-pass 48 | 49 | ansible-vault view host_vars/10.32.131.107.yml --vault-password-file /some/safe/place/pass.txt 50 | 51 | ansible-vault rekey host_vars/10.32.131.107.yml 52 | 53 | Inventory 54 | -------------- 55 | 56 | Inventory file:: 57 | 58 | [lek:children] 59 | logstash 60 | els 61 | 62 | [logstash:children] 63 | shipper 64 | indexer 65 | 66 | [els] 67 | 10.13.181.85 68 | 69 | [shipper] 70 | 10.32.105.214 71 | 10.32.131.107 72 | 73 | [indexer] 74 | 10.32.105.213 75 | 76 | [redis] 77 | 10.32.105.213 78 | 79 | 80 | Playbook site.xml :: 81 | 82 | --- 83 | - name: common settings 84 | hosts: lek 85 | gather_facts: true 86 | roles: 87 | - {role: common-env} 88 | - {role: sun-jdk} 89 | 90 | - name: setup redis 91 | hosts: redis 92 | gather_facts: true 93 | roles: 94 | - {role: redis} 95 | 96 | - name: setup kibana and elasticsearch 97 | hosts: els 98 | gather_facts: true 99 | roles: 100 | - {role: elasticsearch} 101 | - {role: kibana} 102 | 103 | # when redis and elasticsearch are ready 104 | - name: setup logstash 105 | hosts: logstash 106 | gather_facts: true 107 | roles: 108 | - {role: logstash} 109 | 110 | -------------------------------------------------------------------------------- /logstash/roles/cinder-logging.rst: -------------------------------------------------------------------------------- 1 | Cinder-logging 2 | ================== 3 | 4 | .. contents:: Topics 5 | 6 | Sending logging information to syslog 7 | 8 | Requirements 9 | ------------ 10 | 11 | Ansible-core-modules: ``lineinfile`` 12 | 13 | Role Variables 14 | -------------- 15 | 16 | Default vars:: 17 | 18 | cinder_conf: "/etc/cinder/cinder.conf" 19 | cinder_facility: 20 | LOG_LOCAL1: local1 21 | port: "10514" 22 | host: "127.0.0.1" 23 | log_server: "{{host}}:{{port}}" 24 | protocal: "tcp" 25 | lines: 26 | - {regx: "^verbose=", line: "verbose=False" } 27 | - {regx: "^debug=", line: "debug=False" } 28 | - {regx: "^use_syslog=", line: "use_syslog=True" } 29 | - {regx: "^syslog_log_facility=", line: "syslog_log_facility=LOG_LOCAL1" } 30 | 31 | Dependencies 32 | ------------ 33 | 34 | None 35 | 36 | Example Playbook 37 | ---------------- 38 | 39 | :: 40 | 41 | - hosts: servers 42 | roles: 43 | - { role: cinder-logging, port: "11514" } 44 | 45 | License 46 | ------- 47 | 48 | BSD 49 | -------------------------------------------------------------------------------- /logstash/roles/common-env.rst: -------------------------------------------------------------------------------- 1 | .. _common-env: 2 | 3 | 4 | common-env 5 | ============ 6 | 7 | .. contents:: Topics 8 | 9 | * Disable ``requiretty`` in /etc/sudoers 10 | 11 | Enabling pipelining could reduce the number of SSH operations requried to execute a module on the remote server. When using "sudo:" with *pipelining* option enabled, ``requiretty`` in /etc/sudoers should be disabled. 12 | 13 | * Install build essential packages 14 | 15 | 16 | Requirements 17 | ------------ 18 | 19 | Ansible-core-module: ``lineinfile`` , ``yum`` or ``apt`` . 20 | 21 | Role Variables 22 | -------------- 23 | 24 | Default vars:: 25 | 26 | sudoers(string)- path to sudoers file. 27 | packages(list)- build essential package names 28 | pkgmgr(dict) 29 | pkgmgr.pkg - package management tool [yum,apt] 30 | pkgmgr.name - module option [name,pkg] 31 | pkgmgr.state - module option [state] 32 | 33 | Dependencies 34 | ------------ 35 | 36 | None 37 | 38 | Example Playbook 39 | ---------------- 40 | 41 | :: 42 | 43 | - name: common settings 44 | hosts: servers 45 | gather_facts: true 46 | roles: 47 | - { role: commom-env} 48 | -------------------------------------------------------------------------------- /logstash/roles/elasticsearch.rst: -------------------------------------------------------------------------------- 1 | .. _elasticsearch_role: 2 | 3 | elasticsearch 4 | ================ 5 | 6 | .. contents:: Topics 7 | 8 | Setup a single node elasticsearch service on remote server. 9 | 10 | Requirements 11 | ------------ 12 | 13 | Ansible-core-modules: ``template`` , ``file`` , ``unarchive`` , ``stat`` , ``lineinfile`` . 14 | 15 | Role Variables 16 | -------------- 17 | 18 | Default vars:: 19 | 20 | base_home: "{{ansible_env.HOME}}/lek" 21 | es_home: "{{base_home}}/elasticsearch" 22 | es_tarball: "elasticsearch-1.6.0.tar.gz" 23 | es_config: "elasticsearch.yml" 24 | es_logging_config: "logging.yml" 25 | es_srvname: "elasticsearch" 26 | env_profile: "{{ansible_env.HOME}}/.profile" 27 | operation: "install" 28 | 29 | Dependencies 30 | ------------ 31 | 32 | JDK 33 | 34 | .. note:: 35 | 36 | JDK 1.8 or later required. 37 | Make sure executable java binary installed in /bin:/usr/bin:/sbin:/usr/sbin, or make a symbolic link in /bin:/usr/bin:/sbin:/usr/sbin . 38 | 39 | Example Playbook 40 | ---------------- 41 | 42 | :: 43 | 44 | - hosts: servers 45 | gather_facts: true 46 | roles: 47 | - { role: elasticsearch, operation: "install" } 48 | -------------------------------------------------------------------------------- /logstash/roles/index.rst: -------------------------------------------------------------------------------- 1 | Ansible Roles 2 | ================ 3 | 4 | .. toctree:: 5 | :maxdepth: 2 6 | 7 | common-env 8 | sun-jdk 9 | logstash 10 | redis 11 | elasticsearch 12 | kibana 13 | nova-logging 14 | cinder-logging 15 | neutron-logging 16 | -------------------------------------------------------------------------------- /logstash/roles/kibana.rst: -------------------------------------------------------------------------------- 1 | .. _kibana_role: 2 | 3 | kibana 4 | ========= 5 | 6 | .. contents:: Topics 7 | 8 | Setup Kibana on remote server. 9 | 10 | Requirements 11 | ------------ 12 | 13 | Ansible-core-module: ``file`` , ``unarchive`` , ``lineinfile`` , ``template`` , ``stat`` . 14 | 15 | Role Variables 16 | -------------- 17 | 18 | Default vars:: 19 | 20 | base_home: "{{ansible_env.HOME}}/lek" 21 | kibana_home: "{{base_home}}/kibana" 22 | kibana_tarball: "kibana-4.1.0-linux-x64.tar.gz" 23 | env_profile: "{{ansible_env.HOME}}/.profile" 24 | kibana_config: "kibana.yml" 25 | elasticsearch_url: "http://127.0.0.1:9200" 26 | kibana_port: 5601 27 | operation: "install" 28 | 29 | Dependencies 30 | ------------ 31 | 32 | None 33 | 34 | Example Playbook 35 | ---------------- 36 | 37 | :: 38 | 39 | - hosts: servers 40 | gather_facts: true 41 | roles: 42 | - { role: kibana, elasticsearch_url:"http://10.13.181.85:9200", \ 43 | operation: "install" } 44 | -------------------------------------------------------------------------------- /logstash/roles/logstash.rst: -------------------------------------------------------------------------------- 1 | .. _logstash_role: 2 | 3 | logstash 4 | ========= 5 | 6 | .. contents:: Topics 7 | 8 | Setup logstash with a tarball and running it as a service. 9 | 10 | Requirements 11 | ------------ 12 | 13 | Ansible-core-modules: ``template`` , ``file`` , ``stat`` , ``unarchive`` , ``lineinfile`` 14 | 15 | Role Variables 16 | -------------- 17 | 18 | Default vars:: 19 | 20 | logstash_srvname: "logstash" 21 | base_home: "{{ansible_env.HOME}}/lek" 22 | logstash_tarball: "logstash-1.5.0.tar.gz" 23 | logstash_home: "{{base_home}}/logstash" 24 | env_profile: "{{ansible_env.HOME}}/.profile" 25 | logstash_whoami: "shipper" 26 | read_from_redis_addr: "127.0.0.1" 27 | read_from_redis_port: 6379 28 | write_to_redis_addr: "127.0.0.1" 29 | write_to_redis_port: 6379 30 | redis_key: "logstash-*" 31 | els_addr: "127.0.0.1" 32 | operation: "install" 33 | 34 | OS-specified vars:: 35 | 36 | RedHat-based 37 | default_syslog: ["/var/log/messages"] 38 | default_authlog: ["/var/log/secure"] 39 | 40 | Debian-based 41 | default_syslog: [ "/var/log/syslog", "/var/log/kern.log" ] 42 | default_authlog: [ "/var/log/authlog" ] 43 | 44 | Dependencies 45 | ------------ 46 | 47 | None 48 | 49 | Example Playbook 50 | ---------------- 51 | 52 | :: 53 | 54 | - hosts: servers 55 | gather_facts: true 56 | roles: 57 | - { role: logstash, logstash_whoami: "shipper", \ 58 | write_to_redis_addr: "127.0.0.1", operation: "install" } 59 | -------------------------------------------------------------------------------- /logstash/roles/neutron-logging.rst: -------------------------------------------------------------------------------- 1 | Neutron-logging 2 | ===================== 3 | 4 | .. contents:: Topics 5 | 6 | Send logging information to syslog 7 | 8 | Requirements 9 | ------------ 10 | 11 | Ansible-core-module: ``lineinfile`` 12 | 13 | Role Variables 14 | -------------- 15 | 16 | :: 17 | 18 | Default vars: 19 | neutron_conf: "/etc/neutron/neutron.conf" 20 | neutron_facility: 21 | LOG_LOCAL2: local2 22 | port: "10514" 23 | host: "127.0.0.1" 24 | log_server: "{{host}}:{{port}}" 25 | protocal: "tcp" 26 | lines: 27 | - {regx: "^verbose=", line: "verbose=False" } 28 | - {regx: "^debug=", line: "debug=False" } 29 | - {regx: "^use_syslog=", line: "use_syslog=True" } 30 | - {regx: "^syslog_log_facility=", line: "syslog_log_facility=LOG_LOCAL2" } 31 | 32 | Dependencies 33 | ------------ 34 | 35 | None 36 | 37 | Example Playbook 38 | ---------------- 39 | 40 | :: 41 | 42 | - hosts: servers 43 | roles: 44 | - { role: neutron-logging, host: "10.32.10.153" } 45 | 46 | License 47 | ------- 48 | 49 | BSD 50 | 51 | -------------------------------------------------------------------------------- /logstash/roles/nova-logging.rst: -------------------------------------------------------------------------------- 1 | Nova-logging 2 | =============== 3 | 4 | .. contents:: Topics 5 | 6 | Send logging information to syslog 7 | 8 | Requirements 9 | ------------ 10 | 11 | Ansible-core-modules: ``lineinfile`` 12 | 13 | Role Variables 14 | -------------- 15 | 16 | Default vars:: 17 | 18 | nova_conf: "/etc/nova/nova.conf" 19 | nova_facility: 20 | LOG_LOCAL0: local0 21 | host: "127.0.0.1" 22 | port: "10514" 23 | log_server: "{{host}}:{{port}}" 24 | protocol: "tcp" 25 | lines: 26 | - {regx: "^verbose=", line: "verbose=False" } 27 | - {regx: "^debug=", line: "debug=False" } 28 | - {regx: "^use_syslog=", line: "use_syslog=True" } 29 | - {regx: "^syslog_log_facility=", line: "syslog_log_facility=LOG_LOCAL0" } 30 | 31 | 32 | Dependencies 33 | ------------ 34 | 35 | None 36 | 37 | Example Playbook 38 | ---------------- 39 | 40 | :: 41 | 42 | - hosts: servers 43 | roles: 44 | - { role: nova-logging, log_server: "10.32.105.153:10514"} 45 | 46 | License 47 | ------- 48 | 49 | BSD 50 | -------------------------------------------------------------------------------- /logstash/roles/redis.rst: -------------------------------------------------------------------------------- 1 | .. _redis_roles: 2 | 3 | redis 4 | ========= 5 | 6 | .. contents:: Topics 7 | 8 | Setup redis on remote servers with redis-3.0.2 source code. 9 | 10 | Requirements 11 | ------------ 12 | 13 | Ansible-core-modules: ``unarchive`` , ``stat`` , ``shell`` , ``template`` , ``lineinfile`` , ``file`` . 14 | 15 | Role Variables 16 | -------------- 17 | 18 | Default vars:: 19 | 20 | base_home: "{{ansible_env.HOME}}/lek" 21 | redis_home: "{{base_home}}/redis" 22 | redis_tarball: "redis-3.0.2.tar.gz" 23 | redis_config: "redis_{{redis_port}}.conf" 24 | redis_init: "redis_{{redis_port}}_init" 25 | redis_port: 6379 26 | redis_srvname: "redis_{{redis_port}}" 27 | 28 | Dependencies 29 | ------------ 30 | 31 | None 32 | 33 | Example Playbook 34 | ---------------- 35 | 36 | :: 37 | 38 | - hosts: servers 39 | gather_facts: true 40 | roles: 41 | - { role: redis, redis_port: 6379 } 42 | - { role: redis, redis_port: 6380 } 43 | -------------------------------------------------------------------------------- /logstash/roles/sun-jdk.rst: -------------------------------------------------------------------------------- 1 | .. _sun-jdk: 2 | 3 | sun-jdk 4 | ========= 5 | 6 | .. contents:: Topics 7 | 8 | Install sun-jdk-8u45 9 | 10 | Requirements 11 | ------------ 12 | 13 | Ansible-core-module: ``file`` , ``unarchive`` , ``stat`` , ``lineinfile`` . 14 | 15 | Role Variables 16 | -------------- 17 | 18 | Default vars:: 19 | 20 | base_home: "{{ansible_env.HOME}}/lek" 21 | java_home: "{{base_home}}/jdk1.8.0_45" 22 | jdk_tarball: "jdk-8u45-linux-x64.tar.gz" 23 | 24 | Dependencies 25 | ------------ 26 | 27 | None 28 | 29 | Example Playbook 30 | ---------------- 31 | 32 | :: 33 | 34 | - hosts: servers 35 | gather_facts: true 36 | roles: 37 | - { role: sun-jdk } 38 | -------------------------------------------------------------------------------- /screen/index.rst: -------------------------------------------------------------------------------- 1 | .. screen documentation master file, created by 2 | sphinx-quickstart on Sat Feb 7 15:02:39 2015. 3 | You can adapt this file completely to your liking, but it should at least 4 | contain the root `toctree` directive. 5 | 6 | .. toctree:: 7 | :maxdepth: 2 8 | 9 | 10 | Screen Usage 11 | ==================== 12 | 13 | *你是不是经常需要远程登录到Linux服务器?你是不是经常为一些长时间运行的任务头疼?还在用 nohup 吗?那么来看看 screen 吧,它会给你一个惊喜!* 14 | 15 | *Screen是一个可以在多个进程之间多路复用一个物理终端的窗口管理器。Screen中有会话的概念,用户可以在一个screen会话中创建多个screen窗口,在每一个screen窗口中就像操作一个真实的telnet/SSH连接窗口那样。* 16 | 17 | --------------- 18 | 一、screen功能 19 | --------------- 20 | 21 | - 会话恢复 22 | - 会话共享 23 | - 多窗口 24 | 25 | --------------- 26 | 二、screen参数 27 | --------------- 28 | 29 | + screen -ls *列出正在运行的screen* 30 | + screen -S name *启动screen的时候以name作为名称* 31 | + -d *将指定的screen作业离线(Detach)* 32 | + screen -r name或pid *进入之前断开的screen* 33 | + screen -d -r name *强抢一个已经存在的screen* 34 | + screen -x name *进入没有断开的screen,这样可以让一个人操作,另外一个人可以看到他的全部操作* 35 | 36 | ------------------------- 37 | 三、screen 多窗口管理 38 | ------------------------- 39 | 40 | 每个screen的session中,所有命令都是ctrl+a开头 41 | 42 | - C-a c ==> 在当前screen中创建一个新的shell窗口 43 | - C-a n ==> 切换到下一个window 44 | - C-a p ==> 切换到上一个window 45 | - C-a 0...9 ==> 切换到第0...9个window 46 | - C-a [space] ==> 由第0个window循环切换到第9个window 47 | - C-a C-a ==> 在两个最近使用的window之间切换 48 | - C-a x ==> 锁住当前window,需要密码解锁 49 | - C-a d ==> dettach,离开当前session 50 | - C-a w ==> 显示所有窗口列表 51 | - C-a t ==> 显示当前时间以及系统load 52 | - C-a k ==> 强制关闭当前window 53 | 54 | 55 | - C-a S ==> 水平分屏 56 | - C-a [TAB] ==> 下一屏,分屏后需要C-a c 新建窗口后方可使用 57 | 58 | ------------------- 59 | Indices and tables 60 | ------------------- 61 | 62 | * :ref:`genindex` 63 | * :ref:`modindex` 64 | * :ref:`search` 65 | 66 | --------------------------------------------------------------------------------