├── .github └── workflows │ └── main.yml ├── .gitignore ├── README.md ├── jboss-standalone ├── LICENSE.md ├── README.md ├── demo-aws-launch.yml ├── deploy-application.yml ├── group_vars │ └── all ├── hosts ├── roles │ ├── java-app │ │ ├── files │ │ │ ├── jboss-helloworld.war │ │ │ └── ticket-monster.war │ │ └── tasks │ │ │ └── main.yml │ └── jboss-standalone │ │ ├── files │ │ └── jboss-as-standalone.sh │ │ ├── handlers │ │ └── main.yml │ │ ├── tasks │ │ └── main.yml │ │ └── templates │ │ ├── iptables-save │ │ └── standalone.xml └── site.yml ├── lamp_haproxy ├── LICENSE.md ├── README.md ├── aws │ ├── LICENSE.md │ ├── README.md │ ├── demo-aws-launch.yml │ ├── group_vars │ │ ├── all │ │ ├── tag_ansible_group_dbservers │ │ ├── tag_ansible_group_lbservers │ │ └── tag_ansible_group_webservers │ ├── roles │ │ ├── base-apache │ │ │ └── tasks │ │ │ │ └── main.yml │ │ ├── common │ │ │ ├── files │ │ │ │ ├── RPM-GPG-KEY-EPEL-6 │ │ │ │ └── epel.repo │ │ │ ├── handlers │ │ │ │ └── main.yml │ │ │ ├── tasks │ │ │ │ └── main.yml │ │ │ └── templates │ │ │ │ ├── iptables.j2 │ │ │ │ └── ntp.conf.j2 │ │ ├── db │ │ │ ├── handlers │ │ │ │ └── main.yml │ │ │ ├── tasks │ │ │ │ └── main.yml │ │ │ └── templates │ │ │ │ └── my.cnf.j2 │ │ ├── haproxy │ │ │ ├── handlers │ │ │ │ └── main.yml │ │ │ ├── tasks │ │ │ │ └── main.yml │ │ │ └── templates │ │ │ │ └── haproxy.cfg.j2 │ │ ├── nagios │ │ │ ├── files │ │ │ │ ├── ansible-managed-services.cfg │ │ │ │ ├── localhost.cfg │ │ │ │ └── nagios.cfg │ │ │ ├── handlers │ │ │ │ └── main.yml │ │ │ ├── tasks │ │ │ │ └── main.yml │ │ │ └── templates │ │ │ │ ├── dbservers.cfg.j2 │ │ │ │ ├── lbservers.cfg.j2 │ │ │ │ └── webservers.cfg.j2 │ │ └── web │ │ │ └── tasks │ │ │ └── main.yml │ ├── rolling_update.yml │ └── site.yml ├── group_vars │ ├── all │ ├── dbservers │ ├── lbservers │ └── webservers ├── hosts ├── provision.yml ├── roles │ ├── base-apache │ │ └── tasks │ │ │ └── main.yml │ ├── common │ │ ├── files │ │ │ ├── RPM-GPG-KEY-EPEL-6 │ │ │ └── epel.repo │ │ ├── handlers │ │ │ └── main.yml │ │ ├── tasks │ │ │ └── main.yml │ │ └── templates │ │ │ ├── iptables.j2 │ │ │ └── ntp.conf.j2 │ ├── db │ │ ├── handlers │ │ │ └── main.yml │ │ ├── tasks │ │ │ └── main.yml │ │ └── templates │ │ │ └── my.cnf.j2 │ ├── haproxy │ │ ├── handlers │ │ │ └── main.yml │ │ ├── tasks │ │ │ └── main.yml │ │ └── templates │ │ │ └── haproxy.cfg.j2 │ ├── nagios │ │ ├── files │ │ │ ├── ansible-managed-services.cfg │ │ │ ├── localhost.cfg │ │ │ └── nagios.cfg │ │ ├── handlers │ │ │ └── main.yml │ │ ├── tasks │ │ │ └── main.yml │ │ └── templates │ │ │ ├── dbservers.cfg.j2 │ │ │ ├── lbservers.cfg.j2 │ │ │ └── webservers.cfg.j2 │ └── web │ │ └── tasks │ │ └── main.yml ├── rolling_update.yml └── site.yml ├── lamp_simple ├── LICENSE.md ├── README.md ├── group_vars │ ├── all │ └── dbservers ├── hosts ├── roles │ ├── common │ │ ├── handlers │ │ │ └── main.yml │ │ ├── tasks │ │ │ └── main.yml │ │ └── templates │ │ │ └── ntp.conf.j2 │ ├── db │ │ ├── handlers │ │ │ └── main.yml │ │ ├── tasks │ │ │ └── main.yml │ │ └── templates │ │ │ └── my.cnf.j2 │ └── web │ │ ├── handlers │ │ └── main.yml │ │ ├── tasks │ │ ├── copy_code.yml │ │ ├── install_httpd.yml │ │ └── main.yml │ │ └── templates │ │ └── index.php.j2 └── site.yml ├── lamp_simple_rhel7 ├── LICENSE.md ├── README.md ├── group_vars │ ├── all │ └── dbservers ├── hosts ├── roles │ ├── common │ │ ├── handlers │ │ │ └── main.yml │ │ ├── tasks │ │ │ └── main.yml │ │ └── templates │ │ │ └── ntp.conf.j2 │ ├── db │ │ ├── handlers │ │ │ └── main.yml │ │ ├── tasks │ │ │ └── main.yml │ │ └── templates │ │ │ └── my.cnf.j2 │ └── web │ │ ├── tasks │ │ ├── copy_code.yml │ │ ├── install_httpd.yml │ │ └── main.yml │ │ └── templates │ │ └── index.php.j2 └── site.yml ├── language_features ├── ansible_pull.yml ├── batch_size_control.yml ├── cloudformation.yaml ├── complex_args.yml ├── conditionals_part1.yml ├── conditionals_part2.yml ├── custom_filters.yml ├── delegation.yml ├── environment.yml ├── eucalyptus-ec2.yml ├── file_secontext.yml ├── files │ └── cloudformation-example.json ├── filter_plugins │ └── custom_plugins.py ├── get_url.yml ├── group_by.yml ├── group_commands.yml ├── handlers │ └── handlers.yml ├── intermediate_example.yml ├── intro_example.yml ├── loop_nested.yml ├── loop_plugins.yml ├── loop_with_items.yml ├── mysql.yml ├── nested_playbooks.yml ├── netscaler.yml ├── postgresql.yml ├── prompts.yml ├── rabbitmq.yml ├── register_logic.yml ├── roles │ └── foo │ │ ├── files │ │ └── foo.txt │ │ ├── handlers │ │ └── main.yml │ │ ├── tasks │ │ └── main.yml │ │ ├── templates │ │ └── foo.j2 │ │ └── vars │ │ └── main.yml ├── roletest.yml ├── roletest2.yml ├── selective_file_sources.yml ├── tags.yml ├── tasks │ └── base.yml ├── templates │ ├── custom-filters.j2 │ ├── etc_cron.d_ansible-pull.j2 │ ├── etc_logrotate.d_ansible-pull.j2 │ ├── foo.j2 │ └── hostvars.j2 ├── upgraded_vars.yml ├── user_commands.yml ├── vars │ ├── CentOS.yml │ ├── defaults.yml │ └── external_vars.yml └── zfs.yml ├── mongodb ├── LICENSE.md ├── README.md ├── group_vars │ └── all ├── hosts ├── images │ ├── check.png │ ├── nosql_primer.png │ ├── replica_set.png │ ├── scale.png │ ├── sharding.png │ └── site.png ├── playbooks │ └── testsharding.yml ├── roles │ ├── common │ │ ├── files │ │ │ ├── 10gen.repo.j2 │ │ │ ├── RPM-GPG-KEY-EPEL-6 │ │ │ └── epel.repo.j2 │ │ ├── handlers │ │ │ └── main.yml │ │ ├── tasks │ │ │ └── main.yml │ │ └── templates │ │ │ ├── hosts.j2 │ │ │ └── iptables.j2 │ ├── mongoc │ │ ├── files │ │ │ └── secret │ │ ├── tasks │ │ │ └── main.yml │ │ └── templates │ │ │ ├── adduser.j2 │ │ │ ├── mongoc.conf.j2 │ │ │ └── mongoc.j2 │ ├── mongod │ │ ├── files │ │ │ └── secret │ │ ├── tasks │ │ │ ├── main.yml │ │ │ └── shards.yml │ │ └── templates │ │ │ ├── mongod.conf.j2 │ │ │ ├── mongod.j2 │ │ │ ├── repset_init.j2 │ │ │ └── shard_init.j2 │ └── mongos │ │ ├── files │ │ └── secret │ │ ├── tasks │ │ └── main.yml │ │ └── templates │ │ ├── enablesharding.j2 │ │ ├── mongos.conf.j2 │ │ ├── mongos.j2 │ │ └── testsharding.j2 └── site.yml ├── phillips_hue ├── README.md ├── ansible.cfg ├── ansible_colors.yml ├── effect.yml ├── hosts ├── hue.gif ├── on_off.yml ├── register.yml └── username_info.yml ├── rust-module-hello-world ├── Makefile ├── library │ └── .gitignore ├── module-src │ ├── Cargo.lock │ ├── Cargo.toml │ ├── src │ │ └── main.rs │ └── target │ │ └── .gitignore └── rust.yml ├── tomcat-memcached-failover ├── LICENSE.md ├── README.md ├── group_vars │ └── all ├── hosts ├── roles │ ├── common │ │ ├── handlers │ │ │ └── main.yml │ │ ├── tasks │ │ │ └── main.yml │ │ └── templates │ │ │ └── iptables.j2 │ ├── lb-nginx │ │ ├── handlers │ │ │ └── main.yml │ │ ├── tasks │ │ │ └── main.yml │ │ └── templates │ │ │ ├── default.conf.j2 │ │ │ └── nginx.conf.j2 │ ├── memcached │ │ ├── handlers │ │ │ └── main.yml │ │ ├── tasks │ │ │ └── main.yml │ │ └── templates │ │ │ ├── init.sh.j2 │ │ │ └── memcached.conf.j2 │ └── tomcat │ │ ├── files │ │ └── msm-sample-webapp-1.0-SNAPSHOT.war │ │ ├── handlers │ │ └── main.yml │ │ ├── tasks │ │ └── main.yml │ │ └── templates │ │ ├── context.xml.j2 │ │ ├── default.j2 │ │ └── server.xml.j2 └── site.yml ├── tomcat-standalone ├── LICENSE.md ├── README.md ├── group_vars │ └── tomcat-servers ├── hosts ├── roles │ ├── selinux │ │ └── tasks │ │ │ └── main.yml │ └── tomcat │ │ ├── files │ │ └── tomcat-initscript.sh │ │ ├── handlers │ │ └── main.yml │ │ ├── tasks │ │ └── main.yml │ │ └── templates │ │ ├── iptables-save │ │ ├── server.xml │ │ └── tomcat-users.xml └── site.yml ├── windows ├── create-user.yml ├── deploy-site.yml ├── enable-iis.yml ├── files │ └── helloworld.ps1 ├── install-msi.yml ├── ping.yml ├── run-powershell.yml ├── test.yml └── wamp_haproxy │ ├── demo-aws-wamp-launch.yml │ ├── group_vars │ ├── all │ ├── windows_dbservers │ └── windows_webservers │ ├── roles │ ├── elb │ │ └── tasks │ │ │ └── main.yml │ ├── iis │ │ └── tasks │ │ │ └── main.yml │ ├── mssql │ │ ├── files │ │ │ └── create-db.ps1 │ │ └── tasks │ │ │ └── main.yml │ └── web │ │ └── tasks │ │ └── main.yml │ ├── rolling_update.yml │ └── site.yml ├── wordpress-nginx ├── LICENSE.md ├── README.md ├── group_vars │ └── all ├── hosts.example ├── roles │ ├── common │ │ ├── files │ │ │ ├── RPM-GPG-KEY-EPEL-6 │ │ │ ├── epel.repo │ │ │ └── iptables-save │ │ ├── handlers │ │ │ └── main.yml │ │ └── tasks │ │ │ └── main.yml │ ├── mysql │ │ ├── handlers │ │ │ └── main.yml │ │ ├── tasks │ │ │ └── main.yml │ │ └── templates │ │ │ └── my.cnf.j2 │ ├── nginx │ │ ├── handlers │ │ │ └── main.yml │ │ ├── tasks │ │ │ └── main.yml │ │ └── templates │ │ │ └── default.conf │ ├── php-fpm │ │ ├── handlers │ │ │ └── main.yml │ │ ├── tasks │ │ │ └── main.yml │ │ └── templates │ │ │ └── wordpress.conf │ └── wordpress │ │ ├── tasks │ │ └── main.yml │ │ └── templates │ │ └── wp-config.php └── site.yml └── wordpress-nginx_rhel7 ├── LICENSE.md ├── README.md ├── group_vars └── all ├── hosts.example ├── roles ├── common │ ├── files │ │ ├── RPM-GPG-KEY-EPEL-7 │ │ ├── RPM-GPG-KEY-NGINX │ │ ├── RPM-GPG-KEY-remi │ │ ├── epel.repo │ │ ├── nginx.repo │ │ └── remi.repo │ └── tasks │ │ └── main.yml ├── mariadb │ ├── handlers │ │ └── main.yml │ ├── tasks │ │ └── main.yml │ └── templates │ │ └── my.cnf.j2 ├── nginx │ ├── handlers │ │ └── main.yml │ ├── tasks │ │ └── main.yml │ └── templates │ │ └── default.conf ├── php-fpm │ ├── handlers │ │ └── main.yml │ ├── tasks │ │ └── main.yml │ └── templates │ │ └── wordpress.conf └── wordpress │ ├── tasks │ └── main.yml │ └── templates │ └── wp-config.php └── site.yml /.github/workflows/main.yml: -------------------------------------------------------------------------------- 1 | name: Ansible Lint 2 | 3 | on: [push, pull_request] 4 | 5 | jobs: 6 | build: 7 | 8 | runs-on: ubuntu-latest 9 | 10 | steps: 11 | # Important: This sets up your GITHUB_WORKSPACE environment variable 12 | - uses: actions/checkout@v2 13 | 14 | - name: Lint Ansible Playbook 15 | # replace "master" with any valid ref 16 | uses: ansible/ansible-lint-action@master 17 | with: 18 | # [required] 19 | # Paths to ansible files (i.e., playbooks, tasks, handlers etc..) 20 | # or valid Ansible directories according to the Ansible role 21 | # directory structure. 22 | # If you want to lint multiple ansible files, use the following syntax 23 | # targets: | 24 | # playbook_1.yml 25 | # playbook_2.yml 26 | targets: wordpress-nginx/site.yml 27 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | wordpress-nginx/hosts 2 | .DS_Store -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | 2 | Ansible Examples 3 | ---------------- 4 | 5 | This repository contains examples and best practices for building Ansible Playbooks. 6 | 7 | -------------------------------------------------------------------------------- /jboss-standalone/LICENSE.md: -------------------------------------------------------------------------------- 1 | Copyright (C) 2013 AnsibleWorks, Inc. 2 | 3 | This work is licensed under the Creative Commons Attribution 3.0 Unported License. 4 | To view a copy of this license, visit http://creativecommons.org/licenses/by/3.0/deed.en_US. 5 | -------------------------------------------------------------------------------- /jboss-standalone/README.md: -------------------------------------------------------------------------------- 1 | ## Standalone JBoss Deployment 2 | 3 | - Requires Ansible 1.2 or newer 4 | - Expects CentOS/RHEL 6 or 7 hosts 5 | 6 | These playbooks deploy a very basic implementation of JBoss Application Server, 7 | version 7. To use them, first edit the `hosts` inventory file to contain the 8 | hostnames of the machines on which you want JBoss deployed, and edit the 9 | group_vars/all file to set any JBoss configuration parameters you need. 10 | 11 | Then run the playbook, like this: 12 | 13 | ansible-playbook -i hosts site.yml 14 | 15 | When the playbook run completes, you should be able to see the JBoss 16 | Application Server running on the ports you chose, on the target machines. 17 | 18 | This is a very simple playbook and could serve as a starting point for more 19 | complex JBoss-based projects. 20 | 21 | ## Application deployment 22 | 23 | The playbook deploy-application.yml may be used to deploy the HelloWorld and Ticket Monster demo applications to JBoss hosts that have been deployed using site.yml, as above. 24 | 25 | Run the playbook using: 26 | 27 | ansible-playbook -i hosts deploy-application.yml 28 | 29 | The HelloWorld application will be available at `http://:/helloworld` 30 | 31 | The Ticket Monster application will be available at `http://:/ticket-monster` 32 | 33 | ## Provisioning for Amazon Web Services 34 | 35 | A simple playbook is provided, as an example, to provision hosts in preparation for running this JBoss deployment example. 36 | 37 | ansible-playbook -i hosts demo-aws-launch.yml 38 | 39 | ### Ideas for Improvement 40 | 41 | Here are some ideas for ways that these playbooks could be extended: 42 | 43 | - Write a playbook or an Ansible module to configure JBoss users. 44 | - Extend this configuration to multiple application servers fronted by a load 45 | balancer or other web server frontend. 46 | 47 | We would love to see contributions and improvements, so please fork this 48 | repository on GitHub and send us your changes via pull requests. 49 | -------------------------------------------------------------------------------- /jboss-standalone/demo-aws-launch.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Provision instances 3 | hosts: localhost 4 | connection: local 5 | gather_facts: False 6 | 7 | # load AWS variables from this group vars file 8 | vars_files: 9 | - group_vars/all 10 | 11 | tasks: 12 | - name: Launch instances 13 | ec2: 14 | access_key: "{{ ec2_access_key }}" 15 | secret_key: "{{ ec2_secret_key }}" 16 | keypair: "{{ ec2_keypair }}" 17 | group: "{{ ec2_security_group }}" 18 | type: "{{ ec2_instance_type }}" 19 | image: "{{ ec2_image }}" 20 | region: "{{ ec2_region }}" 21 | instance_tags: "{'ansible_group':'jboss', 'type':'{{ ec2_instance_type }}', 'group':'{{ ec2_security_group }}', 'Name':'demo_''{{ tower_user_name }}'}" 22 | count: "{{ ec2_instance_count }}" 23 | wait: true 24 | register: ec2 25 | 26 | - name: Wait for SSH to come up 27 | wait_for: 28 | host: "{{ item.public_dns_name }}" 29 | port: 22 30 | delay: 60 31 | timeout: 320 32 | state: started 33 | with_items: "{{ ec2.instances }}" 34 | -------------------------------------------------------------------------------- /jboss-standalone/deploy-application.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # This playbook deploys two simple applications to JBoss server. 3 | 4 | - hosts: all 5 | 6 | roles: 7 | # Optionally, (re)deploy JBoss here. 8 | # - jboss-standalone 9 | - java-app 10 | -------------------------------------------------------------------------------- /jboss-standalone/group_vars/all: -------------------------------------------------------------------------------- 1 | # Here are variables related to the standalone JBoss installation 2 | 3 | http_port: 8080 4 | https_port: 8443 5 | 6 | # AWS specific variables 7 | ec2_access_key: 8 | ec2_secret_key: 9 | ec2_region: us-east-1 10 | ec2_zone: 11 | ec2_image: ami-6c1e8f04 12 | ec2_instance_type: m1.small 13 | ec2_keypair: djohnson 14 | ec2_security_group: default 15 | ec2_instance_count: 3 16 | ec2_tag: demo 17 | ec2_tag_name_prefix: dj 18 | ec2_hosts: all 19 | wait_for_port: 22 20 | 21 | # This user name will be set by Tower, when run through Tower 22 | tower_user_name: admin 23 | -------------------------------------------------------------------------------- /jboss-standalone/hosts: -------------------------------------------------------------------------------- 1 | appserver1 2 | -------------------------------------------------------------------------------- /jboss-standalone/roles/java-app/files/jboss-helloworld.war: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ansible/ansible-examples/b50586543c6c4be907fdc88f9f78a2b35d2a895f/jboss-standalone/roles/java-app/files/jboss-helloworld.war -------------------------------------------------------------------------------- /jboss-standalone/roles/java-app/files/ticket-monster.war: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ansible/ansible-examples/b50586543c6c4be907fdc88f9f78a2b35d2a895f/jboss-standalone/roles/java-app/files/ticket-monster.war -------------------------------------------------------------------------------- /jboss-standalone/roles/java-app/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Copy application WAR file to host 3 | copy: 4 | src: jboss-helloworld.war 5 | dest: /tmp 6 | 7 | - name: Deploy HelloWorld to JBoss 8 | jboss: 9 | deploy_path: /usr/share/jboss-as/standalone/deployments/ 10 | src: /tmp/jboss-helloworld.war 11 | deployment: helloworld.war 12 | state: present 13 | 14 | - name: Copy application WAR file to host 15 | copy: 16 | src: ticket-monster.war 17 | dest: /tmp 18 | 19 | - name: Deploy Ticket Monster to JBoss 20 | jboss: 21 | deploy_path: /usr/share/jboss-as/standalone/deployments/ 22 | src: /tmp/ticket-monster.war 23 | deployment: ticket-monster.war 24 | state: present 25 | -------------------------------------------------------------------------------- /jboss-standalone/roles/jboss-standalone/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: restart jboss 3 | service: 4 | name: jboss 5 | state: restarted 6 | 7 | - name: restart iptables 8 | service: 9 | name: iptables 10 | state: restarted 11 | -------------------------------------------------------------------------------- /jboss-standalone/roles/jboss-standalone/templates/iptables-save: -------------------------------------------------------------------------------- 1 | # {{ ansible_managed }} 2 | *filter 3 | :INPUT ACCEPT [0:0] 4 | :FORWARD ACCEPT [0:0] 5 | :OUTPUT ACCEPT [4:512] 6 | -A INPUT -m state --state RELATED,ESTABLISHED -j ACCEPT 7 | -A INPUT -p icmp -j ACCEPT 8 | -A INPUT -i lo -j ACCEPT 9 | -A INPUT -p tcp -m state --state NEW -m tcp --dport 22 -j ACCEPT 10 | -A INPUT -p tcp -m state --state NEW -m tcp --dport {{ http_port }} -j ACCEPT 11 | -A INPUT -p tcp -m state --state NEW -m tcp --dport {{ https_port }} -j ACCEPT 12 | -A INPUT -j REJECT --reject-with icmp-host-prohibited 13 | -A FORWARD -j REJECT --reject-with icmp-host-prohibited 14 | COMMIT 15 | -------------------------------------------------------------------------------- /jboss-standalone/site.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # This playbook deploys a simple standalone JBoss server. 3 | 4 | - hosts: all 5 | 6 | roles: 7 | - jboss-standalone 8 | -------------------------------------------------------------------------------- /lamp_haproxy/LICENSE.md: -------------------------------------------------------------------------------- 1 | Copyright (C) 2013 AnsibleWorks, Inc. 2 | 3 | This work is licensed under the Creative Commons Attribution 3.0 Unported License. 4 | To view a copy of this license, visit http://creativecommons.org/licenses/by/3.0/deed.en_US. 5 | -------------------------------------------------------------------------------- /lamp_haproxy/aws/LICENSE.md: -------------------------------------------------------------------------------- 1 | Copyright (C) 2013 AnsibleWorks, Inc. 2 | 3 | This work is licensed under the Creative Commons Attribution 3.0 Unported License. 4 | To view a copy of this license, visit http://creativecommons.org/licenses/by/3.0/deed.en_US. 5 | -------------------------------------------------------------------------------- /lamp_haproxy/aws/group_vars/all: -------------------------------------------------------------------------------- 1 | --- 2 | # Variables here are applicable to all host groups 3 | 4 | httpd_port: 80 5 | ntpserver: 192.168.1.2 6 | 7 | # AWS specific variables 8 | ec2_access_key: 9 | ec2_secret_key: 10 | ec2_region: us-east-1 11 | ec2_zone: 12 | ec2_image: ami-bc8131d4 13 | ec2_instance_type: m1.small 14 | ec2_keypair: djohnson 15 | ec2_security_group: default 16 | ec2_instance_count: 3 17 | ec2_tag: demo 18 | ec2_tag_name_prefix: dj 19 | ec2_hosts: all 20 | wait_for_port: 22 21 | 22 | # This user name will be set by Tower, when run through Tower 23 | tower_user_name: admin 24 | -------------------------------------------------------------------------------- /lamp_haproxy/aws/group_vars/tag_ansible_group_dbservers: -------------------------------------------------------------------------------- 1 | --- 2 | # The variables file used by the playbooks in the dbservers group. 3 | # These don't have to be explicitly imported by vars_files: they are autopopulated. 4 | 5 | mysqlservice: mysqld 6 | mysql_port: 3306 7 | dbuser: root 8 | dbname: foodb 9 | upassword: abc 10 | -------------------------------------------------------------------------------- /lamp_haproxy/aws/group_vars/tag_ansible_group_lbservers: -------------------------------------------------------------------------------- 1 | --- 2 | # Variables for the HAproxy configuration 3 | 4 | # HAProxy supports "http" and "tcp". For SSL, SMTP, etc, use "tcp". 5 | mode: http 6 | 7 | # Port on which HAProxy should listen 8 | listenport: 8888 9 | 10 | # A name for the proxy daemon, this wil be the suffix in the logs. 11 | daemonname: myapplb 12 | 13 | # Balancing Algorithm. Available options: 14 | # roundrobin, source, leastconn, source, uri 15 | # (if persistance is required use, "source") 16 | balance: roundrobin 17 | 18 | # Ethernet interface on which the load balancer should listen 19 | # Defaults to the first interface. Change this to: 20 | # 21 | # iface: eth1 22 | # 23 | # ...to override. 24 | # 25 | iface: '{{ ansible_default_ipv4.interface }}' 26 | -------------------------------------------------------------------------------- /lamp_haproxy/aws/group_vars/tag_ansible_group_webservers: -------------------------------------------------------------------------------- 1 | --- 2 | # Variables for the web server configuration 3 | 4 | # Ethernet interface on which the web server should listen. 5 | # Defaults to the first interface. Change this to: 6 | # 7 | # iface: eth1 8 | # 9 | # ...to override. 10 | # 11 | iface: '{{ ansible_default_ipv4.interface }}' 12 | 13 | # this is the repository that holds our sample webapp 14 | repository: https://github.com/bennojoy/mywebapp.git 15 | 16 | # this is the sha1sum of V5 of the test webapp. 17 | webapp_version: 351e47276cc66b018f4890a04709d4cc3d3edb0d 18 | -------------------------------------------------------------------------------- /lamp_haproxy/aws/roles/base-apache/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # This role installs httpd 3 | 4 | - name: Install http 5 | yum: 6 | name: "{{ item }}" 7 | state: present 8 | with_items: 9 | - httpd 10 | - php 11 | - php-mysql 12 | - git 13 | 14 | - name: Configure SELinux to allow httpd to connect to remote database 15 | seboolean: 16 | name: httpd_can_network_connect_db 17 | state: true 18 | persistent: yes 19 | when: sestatus.rc != 0 20 | 21 | - name: http service state 22 | service: 23 | name: httpd 24 | state: started 25 | enabled: yes 26 | -------------------------------------------------------------------------------- /lamp_haproxy/aws/roles/common/files/RPM-GPG-KEY-EPEL-6: -------------------------------------------------------------------------------- 1 | -----BEGIN PGP PUBLIC KEY BLOCK----- 2 | Version: GnuPG v1.4.5 (GNU/Linux) 3 | 4 | mQINBEvSKUIBEADLGnUj24ZVKW7liFN/JA5CgtzlNnKs7sBg7fVbNWryiE3URbn1 5 | JXvrdwHtkKyY96/ifZ1Ld3lE2gOF61bGZ2CWwJNee76Sp9Z+isP8RQXbG5jwj/4B 6 | M9HK7phktqFVJ8VbY2jfTjcfxRvGM8YBwXF8hx0CDZURAjvf1xRSQJ7iAo58qcHn 7 | XtxOAvQmAbR9z6Q/h/D+Y/PhoIJp1OV4VNHCbCs9M7HUVBpgC53PDcTUQuwcgeY6 8 | pQgo9eT1eLNSZVrJ5Bctivl1UcD6P6CIGkkeT2gNhqindRPngUXGXW7Qzoefe+fV 9 | QqJSm7Tq2q9oqVZ46J964waCRItRySpuW5dxZO34WM6wsw2BP2MlACbH4l3luqtp 10 | Xo3Bvfnk+HAFH3HcMuwdaulxv7zYKXCfNoSfgrpEfo2Ex4Im/I3WdtwME/Gbnwdq 11 | 3VJzgAxLVFhczDHwNkjmIdPAlNJ9/ixRjip4dgZtW8VcBCrNoL+LhDrIfjvnLdRu 12 | vBHy9P3sCF7FZycaHlMWP6RiLtHnEMGcbZ8QpQHi2dReU1wyr9QgguGU+jqSXYar 13 | 1yEcsdRGasppNIZ8+Qawbm/a4doT10TEtPArhSoHlwbvqTDYjtfV92lC/2iwgO6g 14 | YgG9XrO4V8dV39Ffm7oLFfvTbg5mv4Q/E6AWo/gkjmtxkculbyAvjFtYAQARAQAB 15 | tCFFUEVMICg2KSA8ZXBlbEBmZWRvcmFwcm9qZWN0Lm9yZz6JAjYEEwECACAFAkvS 16 | KUICGw8GCwkIBwMCBBUCCAMEFgIDAQIeAQIXgAAKCRA7Sd8qBgi4lR/GD/wLGPv9 17 | qO39eyb9NlrwfKdUEo1tHxKdrhNz+XYrO4yVDTBZRPSuvL2yaoeSIhQOKhNPfEgT 18 | 9mdsbsgcfmoHxmGVcn+lbheWsSvcgrXuz0gLt8TGGKGGROAoLXpuUsb1HNtKEOwP 19 | Q4z1uQ2nOz5hLRyDOV0I2LwYV8BjGIjBKUMFEUxFTsL7XOZkrAg/WbTH2PW3hrfS 20 | WtcRA7EYonI3B80d39ffws7SmyKbS5PmZjqOPuTvV2F0tMhKIhncBwoojWZPExft 21 | HpKhzKVh8fdDO/3P1y1Fk3Cin8UbCO9MWMFNR27fVzCANlEPljsHA+3Ez4F7uboF 22 | p0OOEov4Yyi4BEbgqZnthTG4ub9nyiupIZ3ckPHr3nVcDUGcL6lQD/nkmNVIeLYP 23 | x1uHPOSlWfuojAYgzRH6LL7Idg4FHHBA0to7FW8dQXFIOyNiJFAOT2j8P5+tVdq8 24 | wB0PDSH8yRpn4HdJ9RYquau4OkjluxOWf0uRaS//SUcCZh+1/KBEOmcvBHYRZA5J 25 | l/nakCgxGb2paQOzqqpOcHKvlyLuzO5uybMXaipLExTGJXBlXrbbASfXa/yGYSAG 26 | iVrGz9CE6676dMlm8F+s3XXE13QZrXmjloc6jwOljnfAkjTGXjiB7OULESed96MR 27 | XtfLk0W5Ab9pd7tKDR6QHI7rgHXfCopRnZ2VVQ== 28 | =V/6I 29 | -----END PGP PUBLIC KEY BLOCK----- 30 | -------------------------------------------------------------------------------- /lamp_haproxy/aws/roles/common/files/epel.repo: -------------------------------------------------------------------------------- 1 | [epel] 2 | name=Extra Packages for Enterprise Linux 6 - $basearch 3 | #baseurl=http://download.fedoraproject.org/pub/epel/6/$basearch 4 | mirrorlist=https://mirrors.fedoraproject.org/metalink?repo=epel-6&arch=$basearch 5 | failovermethod=priority 6 | enabled=1 7 | gpgcheck=1 8 | gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-EPEL-6 9 | 10 | [epel-debuginfo] 11 | name=Extra Packages for Enterprise Linux 6 - $basearch - Debug 12 | #baseurl=http://download.fedoraproject.org/pub/epel/6/$basearch/debug 13 | mirrorlist=https://mirrors.fedoraproject.org/metalink?repo=epel-debug-6&arch=$basearch 14 | failovermethod=priority 15 | enabled=0 16 | gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-EPEL-6 17 | gpgcheck=1 18 | 19 | [epel-source] 20 | name=Extra Packages for Enterprise Linux 6 - $basearch - Source 21 | #baseurl=http://download.fedoraproject.org/pub/epel/6/SRPMS 22 | mirrorlist=https://mirrors.fedoraproject.org/metalink?repo=epel-source-6&arch=$basearch 23 | failovermethod=priority 24 | enabled=0 25 | gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-EPEL-6 26 | gpgcheck=1 27 | -------------------------------------------------------------------------------- /lamp_haproxy/aws/roles/common/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # Handlers for common notifications 3 | 4 | - name: restart ntp 5 | service: name=ntpd state=restarted 6 | 7 | - name: restart iptables 8 | service: name=iptables state=restarted 9 | -------------------------------------------------------------------------------- /lamp_haproxy/aws/roles/common/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # This role contains common plays that will run on all nodes. 3 | 4 | - name: Install python bindings for SE Linux 5 | yum: 6 | name: "{{ item }}" 7 | state: present 8 | with_items: 9 | - libselinux-python 10 | - libsemanage-python 11 | 12 | - name: Create the repository for EPEL 13 | copy: 14 | src: epel.repo 15 | dest: /etc/yum.repos.d/epel.repo 16 | 17 | - name: Create the GPG key for EPEL 18 | copy: 19 | src: RPM-GPG-KEY-EPEL-6 20 | dest: /etc/pki/rpm-gpg 21 | 22 | - name: install some useful nagios plugins 23 | yum: 24 | name: "{{ item }}" 25 | state: present 26 | with_items: 27 | - nagios-nrpe 28 | - nagios-plugins-swap 29 | - nagios-plugins-users 30 | - nagios-plugins-procs 31 | - nagios-plugins-load 32 | - nagios-plugins-disk 33 | 34 | - name: Install ntp 35 | yum: 36 | name: ntp 37 | state: present 38 | tags: ntp 39 | 40 | - name: Configure ntp file 41 | template: 42 | src: ntp.conf.j2 43 | dest: /etc/ntp.conf 44 | tags: ntp 45 | notify: restart ntp 46 | 47 | - name: Start the ntp service 48 | service: 49 | name: ntpd 50 | state: started 51 | enabled: yes 52 | tags: ntp 53 | 54 | # work around RHEL 7, for now 55 | - name: insert iptables template 56 | template: 57 | src: iptables.j2 58 | dest: /etc/sysconfig/iptables 59 | when: ansible_distribution_major_version != '7' 60 | notify: restart iptables 61 | 62 | - name: test to see if selinux is running 63 | command: getenforce 64 | register: sestatus 65 | changed_when: false 66 | -------------------------------------------------------------------------------- /lamp_haproxy/aws/roles/common/templates/iptables.j2: -------------------------------------------------------------------------------- 1 | # {{ ansible_managed }} 2 | # Manual customization of this file is not recommended. 3 | *filter 4 | :INPUT ACCEPT [0:0] 5 | :FORWARD ACCEPT [0:0] 6 | :OUTPUT ACCEPT [0:0] 7 | 8 | {% if (inventory_hostname in groups.tag_ansible_group_webservers) or (inventory_hostname in groups.tag_ansible_group_monitoring) %} 9 | -A INPUT -p tcp --dport 80 -j ACCEPT 10 | {% endif %} 11 | 12 | {% if (inventory_hostname in groups.tag_ansible_group_dbservers) %} 13 | -A INPUT -p tcp --dport 3306 -j ACCEPT 14 | {% endif %} 15 | 16 | {% if (inventory_hostname in groups.tag_ansible_group_lbservers) %} 17 | -A INPUT -p tcp --dport {{ listenport }} -j ACCEPT 18 | {% endif %} 19 | 20 | {% for host in groups.tag_ansible_group_monitoring %} 21 | -A INPUT -p tcp -s {{ hostvars[host].ansible_default_ipv4.address }} --dport 5666 -j ACCEPT 22 | {% endfor %} 23 | 24 | -A INPUT -m state --state ESTABLISHED,RELATED -j ACCEPT 25 | -A INPUT -p icmp -j ACCEPT 26 | -A INPUT -i lo -j ACCEPT 27 | -A INPUT -m state --state NEW -m tcp -p tcp --dport 22 -j ACCEPT 28 | -A INPUT -j REJECT --reject-with icmp-host-prohibited 29 | -A FORWARD -j REJECT --reject-with icmp-host-prohibited 30 | COMMIT 31 | -------------------------------------------------------------------------------- /lamp_haproxy/aws/roles/common/templates/ntp.conf.j2: -------------------------------------------------------------------------------- 1 | 2 | driftfile /var/lib/ntp/drift 3 | 4 | restrict 127.0.0.1 5 | restrict -6 ::1 6 | 7 | server {{ ntpserver }} 8 | 9 | includefile /etc/ntp/crypto/pw 10 | 11 | keys /etc/ntp/keys 12 | 13 | -------------------------------------------------------------------------------- /lamp_haproxy/aws/roles/db/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # Handler to handle DB tier notifications 3 | 4 | - name: restart mysql 5 | service: name=mysqld state=restarted 6 | 7 | -------------------------------------------------------------------------------- /lamp_haproxy/aws/roles/db/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # This role will install MySQL and create db user and give permissions. 3 | 4 | - name: Install Mysql package 5 | yum: 6 | name: "{{ item }}" 7 | state: present 8 | with_items: 9 | - mysql-server 10 | - MySQL-python 11 | 12 | - name: Configure SELinux to start mysql on any port 13 | seboolean: 14 | name: mysql_connect_any 15 | state: true 16 | persistent: yes 17 | when: sestatus.rc != 0 18 | 19 | - name: Create Mysql configuration file 20 | template: 21 | src: my.cnf.j2 22 | dest: /etc/my.cnf 23 | notify: 24 | - restart mysql 25 | 26 | - name: Start Mysql Service 27 | service: 28 | name: mysqld 29 | state: started 30 | enabled: yes 31 | 32 | - name: Create Application Database 33 | mysql_db: 34 | name: "{{ dbname }}" 35 | state: present 36 | 37 | - name: Create Application DB User 38 | mysql_user: 39 | name: "{{ dbuser }}" 40 | password: "{{ upassword }}" 41 | priv: "*.*:ALL" 42 | host: '%' 43 | state: present 44 | -------------------------------------------------------------------------------- /lamp_haproxy/aws/roles/db/templates/my.cnf.j2: -------------------------------------------------------------------------------- 1 | [mysqld] 2 | datadir=/var/lib/mysql 3 | socket=/var/lib/mysql/mysql.sock 4 | user=mysql 5 | # Disabling symbolic-links is recommended to prevent assorted security risks 6 | symbolic-links=0 7 | port={{ mysql_port }} 8 | 9 | [mysqld_safe] 10 | log-error=/var/log/mysqld.log 11 | pid-file=/var/run/mysqld/mysqld.pid 12 | -------------------------------------------------------------------------------- /lamp_haproxy/aws/roles/haproxy/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # Handlers for HAproxy 3 | 4 | - name: restart haproxy 5 | service: name=haproxy state=restarted 6 | 7 | - name: reload haproxy 8 | service: name=haproxy state=reloaded 9 | 10 | -------------------------------------------------------------------------------- /lamp_haproxy/aws/roles/haproxy/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # This role installs HAProxy and configures it. 3 | 4 | - name: Download and install haproxy 5 | yum: 6 | name: haproxy 7 | state: present 8 | 9 | - name: Configure the haproxy cnf file with hosts 10 | template: 11 | src: haproxy.cfg.j2 12 | dest: /etc/haproxy/haproxy.cfg 13 | notify: restart haproxy 14 | 15 | - name: Start the haproxy service 16 | service: 17 | name: haproxy 18 | state: started 19 | enabled: yes 20 | -------------------------------------------------------------------------------- /lamp_haproxy/aws/roles/haproxy/templates/haproxy.cfg.j2: -------------------------------------------------------------------------------- 1 | global 2 | log 127.0.0.1 local2 3 | 4 | chroot /var/lib/haproxy 5 | pidfile /var/run/haproxy.pid 6 | maxconn 4000 7 | user root 8 | group root 9 | daemon 10 | 11 | # turn on stats unix socket 12 | stats socket /var/lib/haproxy/stats level admin 13 | 14 | defaults 15 | mode {{ mode }} 16 | log global 17 | option httplog 18 | option dontlognull 19 | option http-server-close 20 | option forwardfor except 127.0.0.0/8 21 | option redispatch 22 | retries 3 23 | timeout http-request 10s 24 | timeout queue 1m 25 | timeout connect 10s 26 | timeout client 1m 27 | timeout server 1m 28 | timeout http-keep-alive 10s 29 | timeout check 10s 30 | maxconn 3000 31 | 32 | backend app 33 | {% for host in groups.tag_ansible_group_lbservers %} 34 | listen {{ daemonname }} 0.0.0.0:{{ listenport }} 35 | {% endfor %} 36 | balance {{ balance }} 37 | {% for host in groups.tag_ansible_group_webservers %} 38 | server {{ host }} {{ hostvars[host]['ansible_' + iface].ipv4.address }}:{{ httpd_port }} 39 | {% endfor %} 40 | -------------------------------------------------------------------------------- /lamp_haproxy/aws/roles/nagios/files/ansible-managed-services.cfg: -------------------------------------------------------------------------------- 1 | # {{ ansible_managed }} 2 | 3 | # service checks to be applied to all hosts 4 | 5 | define service { 6 | use local-service 7 | host_name localhost 8 | service_description Root Partition 9 | check_command check_local_disk!20%!10%!/ 10 | } 11 | 12 | define service { 13 | use local-service 14 | host_name * 15 | service_description Current Users 16 | check_command check_local_users!20!50 17 | } 18 | 19 | 20 | define service { 21 | use local-service 22 | host_name * 23 | service_description Total Processes 24 | check_command check_local_procs!250!400!RSZDT 25 | } 26 | 27 | define service { 28 | use local-service 29 | host_name * 30 | service_description Current Load 31 | check_command check_local_load!5.0,4.0,3.0!10.0,6.0,4.0 32 | } 33 | 34 | define service { 35 | use local-service 36 | host_name * 37 | service_description Swap Usage 38 | check_command check_local_swap!20!10 39 | } 40 | -------------------------------------------------------------------------------- /lamp_haproxy/aws/roles/nagios/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # handlers for nagios 3 | - name: restart httpd 4 | service: name=httpd state=restarted 5 | 6 | - name: restart nagios 7 | service: name=nagios state=restarted 8 | -------------------------------------------------------------------------------- /lamp_haproxy/aws/roles/nagios/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # This will install nagios 3 | 4 | - name: install nagios 5 | yum: 6 | pkg: "{{ item }}" 7 | state: present 8 | with_items: 9 | - nagios 10 | - nagios-plugins 11 | - nagios-plugins-nrpe 12 | - nagios-plugins-ping 13 | - nagios-plugins-ssh 14 | - nagios-plugins-http 15 | - nagios-plugins-mysql 16 | - nagios-devel 17 | notify: restart httpd 18 | 19 | - name: create nagios config dir 20 | file: 21 | path: /etc/nagios/ansible-managed 22 | state: directory 23 | 24 | - name: configure nagios 25 | copy: 26 | src: nagios.cfg 27 | dest: /etc/nagios/nagios.cfg 28 | notify: restart nagios 29 | 30 | - name: configure localhost monitoring 31 | copy: 32 | src: localhost.cfg 33 | dest: /etc/nagios/objects/localhost.cfg 34 | notify: restart nagios 35 | 36 | - name: configure nagios services 37 | copy: 38 | src: ansible-managed-services.cfg 39 | dest: /etc/nagios/ 40 | 41 | - name: create the nagios object files 42 | template: 43 | src: "{{ item + '.j2' }}" 44 | dest: "/etc/nagios/ansible-managed/{{ item }}" 45 | with_items: 46 | - webservers.cfg 47 | - dbservers.cfg 48 | - lbservers.cfg 49 | notify: restart nagios 50 | 51 | - name: start nagios 52 | service: name=nagios state=started enabled=yes 53 | -------------------------------------------------------------------------------- /lamp_haproxy/aws/roles/nagios/templates/dbservers.cfg.j2: -------------------------------------------------------------------------------- 1 | # {{ ansible_managed }} 2 | 3 | define hostgroup { 4 | hostgroup_name dbservers 5 | alias Database Servers 6 | } 7 | 8 | {% for host in groups.tag_ansible_group_dbservers %} 9 | define host { 10 | use linux-server 11 | host_name {{ host }} 12 | alias {{ host }} 13 | address {{ hostvars[host].ansible_default_ipv4.address }} 14 | hostgroups dbservers 15 | } 16 | {% endfor %} 17 | 18 | #define service { 19 | # use local-service 20 | # hostgroup_name dbservers 21 | # service_description MySQL Database Server 22 | # check_command check_mysql 23 | # notifications_enabled 0 24 | #} 25 | 26 | -------------------------------------------------------------------------------- /lamp_haproxy/aws/roles/nagios/templates/lbservers.cfg.j2: -------------------------------------------------------------------------------- 1 | # {{ ansible_managed }} 2 | 3 | define hostgroup { 4 | hostgroup_name loadbalancers 5 | alias Load Balancers 6 | } 7 | 8 | {% for host in groups.tag_ansible_group_lbservers %} 9 | define host { 10 | use linux-server 11 | host_name {{ host }} 12 | alias {{ host }} 13 | address {{ hostvars[host].ansible_default_ipv4.address }} 14 | hostgroups loadbalancers 15 | } 16 | define service { 17 | use local-service 18 | host_name {{ host }} 19 | service_description HAProxy Load Balancer 20 | check_command check_http!-p{{ hostvars[host].listenport }} 21 | } 22 | {% endfor %} 23 | -------------------------------------------------------------------------------- /lamp_haproxy/aws/roles/nagios/templates/webservers.cfg.j2: -------------------------------------------------------------------------------- 1 | # {{ ansible_managed }} 2 | 3 | define hostgroup { 4 | hostgroup_name webservers 5 | alias Web Servers 6 | } 7 | 8 | {% for host in groups.tag_ansible_group_webservers %} 9 | define host { 10 | use linux-server 11 | host_name {{ host }} 12 | alias {{ host }} 13 | address {{ hostvars[host].ansible_default_ipv4.address }} 14 | hostgroups webservers 15 | } 16 | {% endfor %} 17 | 18 | # service checks to be applied to the web server 19 | define service { 20 | use local-service 21 | hostgroup_name webservers 22 | service_description webserver 23 | check_command check_http 24 | notifications_enabled 0 25 | } 26 | -------------------------------------------------------------------------------- /lamp_haproxy/aws/roles/web/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Copy the code from repository 3 | git: 4 | repo: "{{ repository }}" 5 | version: "{{ webapp_version }}" 6 | dest: /var/www/html/ 7 | -------------------------------------------------------------------------------- /lamp_haproxy/aws/rolling_update.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # This playbook does a rolling update for all webservers serially (one at a time). 3 | # Change the value of serial: to adjust the number of server to be updated. 4 | # 5 | # The three roles that apply to the webserver hosts will be applied: common, 6 | # base-apache, and web. So any changes to configuration, package updates, etc, 7 | # will be applied as part of the rolling update process. 8 | # 9 | 10 | # gather facts from monitoring nodes for iptables rules 11 | - hosts: tag_ansible_group_monitoring 12 | tasks: [] 13 | 14 | - hosts: tag_ansible_group_webservers 15 | serial: 1 16 | 17 | # These are the tasks to run before applying updates: 18 | pre_tasks: 19 | - name: disable nagios alerts for this host webserver service 20 | nagios: 'action=disable_alerts host={{ inventory_hostname }} services=webserver' 21 | delegate_to: "{{ item }}" 22 | with_items: "{{ groups.tag_ansible_group_monitoring }}" 23 | 24 | - name: disable the server in haproxy 25 | haproxy: 'state=disabled backend=myapplb host={{ inventory_hostname }} socket=/var/lib/haproxy/stats' 26 | delegate_to: "{{ item }}" 27 | with_items: "{{ groups.tag_ansible_group_lbservers }}" 28 | 29 | roles: 30 | - web 31 | ## Optionally, re-run the common and base-apache roles 32 | #- common 33 | #- base-apache 34 | 35 | # These tasks run after the roles: 36 | post_tasks: 37 | - name: wait for webserver to come up 38 | wait_for: 'host={{ inventory_hostname }} port=80 state=started timeout=80' 39 | 40 | - name: enable the server in haproxy 41 | haproxy: 'state=enabled backend=myapplb host={{ inventory_hostname }} socket=/var/lib/haproxy/stats' 42 | delegate_to: "{{ item }}" 43 | with_items: "{{ groups.tag_ansible_group_lbservers }}" 44 | 45 | - name: re-enable nagios alerts 46 | nagios: 'action=enable_alerts host={{ inventory_hostname }} services=webserver' 47 | delegate_to: "{{ item }}" 48 | with_items: "{{ groups.tag_ansible_group_monitoring }}" 49 | -------------------------------------------------------------------------------- /lamp_haproxy/aws/site.yml: -------------------------------------------------------------------------------- 1 | --- 2 | ## This playbook deploys the whole application stack in this site. 3 | 4 | # Apply common configuration to all hosts 5 | - hosts: all 6 | 7 | roles: 8 | - common 9 | 10 | # Configure and deploy database servers. 11 | - hosts: tag_ansible_group_dbservers 12 | 13 | roles: 14 | - db 15 | 16 | tags: 17 | - db 18 | 19 | # Configure and deploy the web servers. Note that we include two roles here, 20 | # the 'base-apache' role which simply sets up Apache, and 'web' which includes 21 | # our example web application. 22 | - hosts: tag_ansible_group_webservers 23 | 24 | roles: 25 | - base-apache 26 | - web 27 | 28 | tags: 29 | - web 30 | 31 | # Configure and deploy the load balancer(s). 32 | - hosts: tag_ansible_group_lbservers 33 | 34 | roles: 35 | - haproxy 36 | 37 | tags: 38 | - lb 39 | 40 | # Configure and deploy the Nagios monitoring node(s). 41 | - hosts: tag_ansible_group_monitoring 42 | 43 | roles: 44 | - base-apache 45 | - nagios 46 | 47 | tags: 48 | - monitoring 49 | -------------------------------------------------------------------------------- /lamp_haproxy/group_vars/all: -------------------------------------------------------------------------------- 1 | --- 2 | # Variables here are applicable to all host groups 3 | 4 | httpd_port: 80 5 | ntpserver: 192.168.1.2 6 | -------------------------------------------------------------------------------- /lamp_haproxy/group_vars/dbservers: -------------------------------------------------------------------------------- 1 | --- 2 | # The variables file used by the playbooks in the dbservers group. 3 | # These don't have to be explicitly imported by vars_files: they are autopopulated. 4 | 5 | mysqlservice: mysqld 6 | mysql_port: 3306 7 | dbuser: root 8 | dbname: foodb 9 | upassword: abc 10 | -------------------------------------------------------------------------------- /lamp_haproxy/group_vars/lbservers: -------------------------------------------------------------------------------- 1 | --- 2 | # Variables for the HAproxy configuration 3 | 4 | # HAProxy supports "http" and "tcp". For SSL, SMTP, etc, use "tcp". 5 | mode: http 6 | 7 | # Port on which HAProxy should listen 8 | listenport: 8888 9 | 10 | # A name for the proxy daemon, this wil be the suffix in the logs. 11 | daemonname: myapplb 12 | 13 | # Balancing Algorithm. Available options: 14 | # roundrobin, source, leastconn, source, uri 15 | # (if persistance is required use, "source") 16 | balance: roundrobin 17 | 18 | # Ethernet interface on which the load balancer should listen 19 | # Defaults to the first interface. Change this to: 20 | # 21 | # iface: eth1 22 | # 23 | # ...to override. 24 | # 25 | iface: '{{ ansible_default_ipv4.interface }}' 26 | -------------------------------------------------------------------------------- /lamp_haproxy/group_vars/webservers: -------------------------------------------------------------------------------- 1 | --- 2 | # Variables for the web server configuration 3 | 4 | # Ethernet interface on which the web server should listen. 5 | # Defaults to the first interface. Change this to: 6 | # 7 | # iface: eth1 8 | # 9 | # ...to override. 10 | # 11 | iface: '{{ ansible_default_ipv4.interface }}' 12 | 13 | # this is the repository that holds our sample webapp 14 | repository: https://github.com/bennojoy/mywebapp.git 15 | 16 | # this is the sha1sum of V5 of the test webapp. 17 | webapp_version: 351e47276cc66b018f4890a04709d4cc3d3edb0d 18 | -------------------------------------------------------------------------------- /lamp_haproxy/hosts: -------------------------------------------------------------------------------- 1 | [webservers] 2 | web1 3 | web2 4 | 5 | [dbservers] 6 | db1 7 | 8 | [lbservers] 9 | lb1 10 | 11 | [monitoring] 12 | nagios 13 | -------------------------------------------------------------------------------- /lamp_haproxy/roles/base-apache/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # This role installs httpd 3 | 4 | - name: Install http 5 | yum: 6 | name: "{{ item }}" 7 | state: present 8 | with_items: 9 | - httpd 10 | - php 11 | - php-mysql 12 | - git 13 | 14 | - name: Configure SELinux to allow httpd to connect to remote database 15 | seboolean: 16 | name: httpd_can_network_connect_db 17 | state: true 18 | persistent: yes 19 | when: sestatus.rc != 0 20 | 21 | - name: http service state 22 | service: 23 | name: httpd 24 | state: started 25 | enabled: yes 26 | -------------------------------------------------------------------------------- /lamp_haproxy/roles/common/files/RPM-GPG-KEY-EPEL-6: -------------------------------------------------------------------------------- 1 | -----BEGIN PGP PUBLIC KEY BLOCK----- 2 | Version: GnuPG v1.4.5 (GNU/Linux) 3 | 4 | mQINBEvSKUIBEADLGnUj24ZVKW7liFN/JA5CgtzlNnKs7sBg7fVbNWryiE3URbn1 5 | JXvrdwHtkKyY96/ifZ1Ld3lE2gOF61bGZ2CWwJNee76Sp9Z+isP8RQXbG5jwj/4B 6 | M9HK7phktqFVJ8VbY2jfTjcfxRvGM8YBwXF8hx0CDZURAjvf1xRSQJ7iAo58qcHn 7 | XtxOAvQmAbR9z6Q/h/D+Y/PhoIJp1OV4VNHCbCs9M7HUVBpgC53PDcTUQuwcgeY6 8 | pQgo9eT1eLNSZVrJ5Bctivl1UcD6P6CIGkkeT2gNhqindRPngUXGXW7Qzoefe+fV 9 | QqJSm7Tq2q9oqVZ46J964waCRItRySpuW5dxZO34WM6wsw2BP2MlACbH4l3luqtp 10 | Xo3Bvfnk+HAFH3HcMuwdaulxv7zYKXCfNoSfgrpEfo2Ex4Im/I3WdtwME/Gbnwdq 11 | 3VJzgAxLVFhczDHwNkjmIdPAlNJ9/ixRjip4dgZtW8VcBCrNoL+LhDrIfjvnLdRu 12 | vBHy9P3sCF7FZycaHlMWP6RiLtHnEMGcbZ8QpQHi2dReU1wyr9QgguGU+jqSXYar 13 | 1yEcsdRGasppNIZ8+Qawbm/a4doT10TEtPArhSoHlwbvqTDYjtfV92lC/2iwgO6g 14 | YgG9XrO4V8dV39Ffm7oLFfvTbg5mv4Q/E6AWo/gkjmtxkculbyAvjFtYAQARAQAB 15 | tCFFUEVMICg2KSA8ZXBlbEBmZWRvcmFwcm9qZWN0Lm9yZz6JAjYEEwECACAFAkvS 16 | KUICGw8GCwkIBwMCBBUCCAMEFgIDAQIeAQIXgAAKCRA7Sd8qBgi4lR/GD/wLGPv9 17 | qO39eyb9NlrwfKdUEo1tHxKdrhNz+XYrO4yVDTBZRPSuvL2yaoeSIhQOKhNPfEgT 18 | 9mdsbsgcfmoHxmGVcn+lbheWsSvcgrXuz0gLt8TGGKGGROAoLXpuUsb1HNtKEOwP 19 | Q4z1uQ2nOz5hLRyDOV0I2LwYV8BjGIjBKUMFEUxFTsL7XOZkrAg/WbTH2PW3hrfS 20 | WtcRA7EYonI3B80d39ffws7SmyKbS5PmZjqOPuTvV2F0tMhKIhncBwoojWZPExft 21 | HpKhzKVh8fdDO/3P1y1Fk3Cin8UbCO9MWMFNR27fVzCANlEPljsHA+3Ez4F7uboF 22 | p0OOEov4Yyi4BEbgqZnthTG4ub9nyiupIZ3ckPHr3nVcDUGcL6lQD/nkmNVIeLYP 23 | x1uHPOSlWfuojAYgzRH6LL7Idg4FHHBA0to7FW8dQXFIOyNiJFAOT2j8P5+tVdq8 24 | wB0PDSH8yRpn4HdJ9RYquau4OkjluxOWf0uRaS//SUcCZh+1/KBEOmcvBHYRZA5J 25 | l/nakCgxGb2paQOzqqpOcHKvlyLuzO5uybMXaipLExTGJXBlXrbbASfXa/yGYSAG 26 | iVrGz9CE6676dMlm8F+s3XXE13QZrXmjloc6jwOljnfAkjTGXjiB7OULESed96MR 27 | XtfLk0W5Ab9pd7tKDR6QHI7rgHXfCopRnZ2VVQ== 28 | =V/6I 29 | -----END PGP PUBLIC KEY BLOCK----- 30 | -------------------------------------------------------------------------------- /lamp_haproxy/roles/common/files/epel.repo: -------------------------------------------------------------------------------- 1 | [epel] 2 | name=Extra Packages for Enterprise Linux 6 - $basearch 3 | #baseurl=http://download.fedoraproject.org/pub/epel/6/$basearch 4 | mirrorlist=https://mirrors.fedoraproject.org/metalink?repo=epel-6&arch=$basearch 5 | failovermethod=priority 6 | enabled=1 7 | gpgcheck=1 8 | gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-EPEL-6 9 | 10 | [epel-debuginfo] 11 | name=Extra Packages for Enterprise Linux 6 - $basearch - Debug 12 | #baseurl=http://download.fedoraproject.org/pub/epel/6/$basearch/debug 13 | mirrorlist=https://mirrors.fedoraproject.org/metalink?repo=epel-debug-6&arch=$basearch 14 | failovermethod=priority 15 | enabled=0 16 | gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-EPEL-6 17 | gpgcheck=1 18 | 19 | [epel-source] 20 | name=Extra Packages for Enterprise Linux 6 - $basearch - Source 21 | #baseurl=http://download.fedoraproject.org/pub/epel/6/SRPMS 22 | mirrorlist=https://mirrors.fedoraproject.org/metalink?repo=epel-source-6&arch=$basearch 23 | failovermethod=priority 24 | enabled=0 25 | gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-EPEL-6 26 | gpgcheck=1 27 | -------------------------------------------------------------------------------- /lamp_haproxy/roles/common/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # Handlers for common notifications 3 | 4 | - name: restart ntp 5 | service: name=ntpd state=restarted 6 | 7 | - name: restart iptables 8 | service: name=iptables state=restarted 9 | -------------------------------------------------------------------------------- /lamp_haproxy/roles/common/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # This role contains common plays that will run on all nodes. 3 | 4 | - name: Install python bindings for SE Linux 5 | yum: name={{ item }} state=present 6 | with_items: 7 | - libselinux-python 8 | - libsemanage-python 9 | 10 | - name: Create the repository for EPEL 11 | copy: src=epel.repo dest=/etc/yum.repos.d/epel.repo 12 | 13 | - name: Create the GPG key for EPEL 14 | copy: src=RPM-GPG-KEY-EPEL-6 dest=/etc/pki/rpm-gpg 15 | 16 | - name: install some useful nagios plugins 17 | yum: name={{ item }} state=present 18 | with_items: 19 | - nagios-nrpe 20 | - nagios-plugins-swap 21 | - nagios-plugins-users 22 | - nagios-plugins-procs 23 | - nagios-plugins-load 24 | - nagios-plugins-disk 25 | 26 | - name: Install ntp 27 | yum: name=ntp state=present 28 | tags: ntp 29 | 30 | - name: Configure ntp file 31 | template: src=ntp.conf.j2 dest=/etc/ntp.conf 32 | tags: ntp 33 | notify: restart ntp 34 | 35 | - name: Start the ntp service 36 | service: name=ntpd state=started enabled=yes 37 | tags: ntp 38 | 39 | # work around RHEL 7, for now 40 | - name: insert iptables template 41 | template: src=iptables.j2 dest=/etc/sysconfig/iptables 42 | when: ansible_distribution_major_version != '7' 43 | notify: restart iptables 44 | 45 | - name: test to see if selinux is running 46 | command: getenforce 47 | register: sestatus 48 | changed_when: false 49 | -------------------------------------------------------------------------------- /lamp_haproxy/roles/common/templates/iptables.j2: -------------------------------------------------------------------------------- 1 | # {{ ansible_managed }} 2 | # Manual customization of this file is not recommended. 3 | *filter 4 | :INPUT ACCEPT [0:0] 5 | :FORWARD ACCEPT [0:0] 6 | :OUTPUT ACCEPT [0:0] 7 | 8 | {% if (inventory_hostname in groups.webservers) or (inventory_hostname in groups.monitoring) %} 9 | -A INPUT -p tcp --dport 80 -j ACCEPT 10 | {% endif %} 11 | 12 | {% if (inventory_hostname in groups.dbservers) %} 13 | -A INPUT -p tcp --dport 3306 -j ACCEPT 14 | {% endif %} 15 | 16 | {% if (inventory_hostname in groups.lbservers) %} 17 | -A INPUT -p tcp --dport {{ listenport }} -j ACCEPT 18 | {% endif %} 19 | 20 | {% for host in groups.monitoring %} 21 | -A INPUT -p tcp -s {{ hostvars[host].ansible_default_ipv4.address }} --dport 5666 -j ACCEPT 22 | {% endfor %} 23 | 24 | -A INPUT -m state --state ESTABLISHED,RELATED -j ACCEPT 25 | -A INPUT -p icmp -j ACCEPT 26 | -A INPUT -i lo -j ACCEPT 27 | -A INPUT -m state --state NEW -m tcp -p tcp --dport 22 -j ACCEPT 28 | -A INPUT -j REJECT --reject-with icmp-host-prohibited 29 | -A FORWARD -j REJECT --reject-with icmp-host-prohibited 30 | COMMIT 31 | -------------------------------------------------------------------------------- /lamp_haproxy/roles/common/templates/ntp.conf.j2: -------------------------------------------------------------------------------- 1 | 2 | driftfile /var/lib/ntp/drift 3 | 4 | restrict 127.0.0.1 5 | restrict -6 ::1 6 | 7 | server {{ ntpserver }} 8 | 9 | includefile /etc/ntp/crypto/pw 10 | 11 | keys /etc/ntp/keys 12 | 13 | -------------------------------------------------------------------------------- /lamp_haproxy/roles/db/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # Handler to handle DB tier notifications 3 | 4 | - name: restart mysql 5 | service: name=mysqld state=restarted 6 | 7 | -------------------------------------------------------------------------------- /lamp_haproxy/roles/db/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # This role will install MySQL and create db user and give permissions. 3 | 4 | - name: Install Mysql package 5 | yum: name={{ item }} state=present 6 | with_items: 7 | - mysql-server 8 | - MySQL-python 9 | 10 | - name: Configure SELinux to start mysql on any port 11 | seboolean: name=mysql_connect_any state=true persistent=yes 12 | when: sestatus.rc != 0 13 | 14 | - name: Create Mysql configuration file 15 | template: src=my.cnf.j2 dest=/etc/my.cnf 16 | notify: 17 | - restart mysql 18 | 19 | - name: Start Mysql Service 20 | service: name=mysqld state=started enabled=yes 21 | 22 | - name: Create Application Database 23 | mysql_db: name={{ dbname }} state=present 24 | 25 | - name: Create Application DB User 26 | mysql_user: name={{ dbuser }} password={{ upassword }} priv=*.*:ALL host='%' state=present 27 | -------------------------------------------------------------------------------- /lamp_haproxy/roles/db/templates/my.cnf.j2: -------------------------------------------------------------------------------- 1 | [mysqld] 2 | datadir=/var/lib/mysql 3 | socket=/var/lib/mysql/mysql.sock 4 | user=mysql 5 | # Disabling symbolic-links is recommended to prevent assorted security risks 6 | symbolic-links=0 7 | port={{ mysql_port }} 8 | 9 | [mysqld_safe] 10 | log-error=/var/log/mysqld.log 11 | pid-file=/var/run/mysqld/mysqld.pid 12 | -------------------------------------------------------------------------------- /lamp_haproxy/roles/haproxy/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # Handlers for HAproxy 3 | 4 | - name: restart haproxy 5 | service: name=haproxy state=restarted 6 | 7 | - name: reload haproxy 8 | service: name=haproxy state=reloaded 9 | 10 | -------------------------------------------------------------------------------- /lamp_haproxy/roles/haproxy/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # This role installs HAProxy and configures it. 3 | 4 | - name: Download and install haproxy 5 | yum: name=haproxy state=present 6 | 7 | - name: Configure the haproxy cnf file with hosts 8 | template: src=haproxy.cfg.j2 dest=/etc/haproxy/haproxy.cfg 9 | notify: restart haproxy 10 | 11 | - name: Start the haproxy service 12 | service: name=haproxy state=started enabled=yes 13 | -------------------------------------------------------------------------------- /lamp_haproxy/roles/haproxy/templates/haproxy.cfg.j2: -------------------------------------------------------------------------------- 1 | global 2 | log 127.0.0.1 local2 3 | 4 | chroot /var/lib/haproxy 5 | pidfile /var/run/haproxy.pid 6 | maxconn 4000 7 | user root 8 | group root 9 | daemon 10 | 11 | # turn on stats unix socket 12 | stats socket /var/lib/haproxy/stats level admin 13 | 14 | defaults 15 | mode {{ mode }} 16 | log global 17 | option httplog 18 | option dontlognull 19 | option http-server-close 20 | option forwardfor except 127.0.0.0/8 21 | option redispatch 22 | retries 3 23 | timeout http-request 10s 24 | timeout queue 1m 25 | timeout connect 10s 26 | timeout client 1m 27 | timeout server 1m 28 | timeout http-keep-alive 10s 29 | timeout check 10s 30 | maxconn 3000 31 | 32 | backend app 33 | {% for host in groups.lbservers %} 34 | listen {{ daemonname }} 0.0.0.0:{{ listenport }} 35 | {% endfor %} 36 | balance {{ balance }} 37 | {% for host in groups.webservers %} 38 | server {{ host }} {{ hostvars[host]['ansible_' + iface].ipv4.address }}:{{ httpd_port }} 39 | {% endfor %} 40 | -------------------------------------------------------------------------------- /lamp_haproxy/roles/nagios/files/ansible-managed-services.cfg: -------------------------------------------------------------------------------- 1 | # {{ ansible_managed }} 2 | 3 | # service checks to be applied to all hosts 4 | 5 | define service { 6 | use local-service 7 | host_name localhost 8 | service_description Root Partition 9 | check_command check_local_disk!20%!10%!/ 10 | } 11 | 12 | define service { 13 | use local-service 14 | host_name * 15 | service_description Current Users 16 | check_command check_local_users!20!50 17 | } 18 | 19 | 20 | define service { 21 | use local-service 22 | host_name * 23 | service_description Total Processes 24 | check_command check_local_procs!250!400!RSZDT 25 | } 26 | 27 | define service { 28 | use local-service 29 | host_name * 30 | service_description Current Load 31 | check_command check_local_load!5.0,4.0,3.0!10.0,6.0,4.0 32 | } 33 | 34 | define service { 35 | use local-service 36 | host_name * 37 | service_description Swap Usage 38 | check_command check_local_swap!20!10 39 | } 40 | -------------------------------------------------------------------------------- /lamp_haproxy/roles/nagios/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # handlers for nagios 3 | - name: restart httpd 4 | service: name=httpd state=restarted 5 | 6 | - name: restart nagios 7 | service: name=nagios state=restarted 8 | -------------------------------------------------------------------------------- /lamp_haproxy/roles/nagios/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # This will install nagios 3 | 4 | - name: install nagios 5 | yum: pkg={{ item }} state=present 6 | with_items: 7 | - nagios 8 | - nagios-plugins 9 | - nagios-plugins-nrpe 10 | - nagios-plugins-ping 11 | - nagios-plugins-ssh 12 | - nagios-plugins-http 13 | - nagios-plugins-mysql 14 | - nagios-devel 15 | notify: restart httpd 16 | 17 | - name: create nagios config dir 18 | file: path=/etc/nagios/ansible-managed state=directory 19 | 20 | - name: configure nagios 21 | copy: src=nagios.cfg dest=/etc/nagios/nagios.cfg 22 | notify: restart nagios 23 | 24 | - name: configure localhost monitoring 25 | copy: src=localhost.cfg dest=/etc/nagios/objects/localhost.cfg 26 | notify: restart nagios 27 | 28 | - name: configure nagios services 29 | copy: src=ansible-managed-services.cfg dest=/etc/nagios/ 30 | 31 | - name: create the nagios object files 32 | template: src={{ item + ".j2" }} 33 | dest=/etc/nagios/ansible-managed/{{ item }} 34 | with_items: 35 | - webservers.cfg 36 | - dbservers.cfg 37 | - lbservers.cfg 38 | notify: restart nagios 39 | 40 | - name: start nagios 41 | service: name=nagios state=started enabled=yes 42 | -------------------------------------------------------------------------------- /lamp_haproxy/roles/nagios/templates/dbservers.cfg.j2: -------------------------------------------------------------------------------- 1 | # {{ ansible_managed }} 2 | 3 | define hostgroup { 4 | hostgroup_name dbservers 5 | alias Database Servers 6 | } 7 | 8 | {% for host in groups.dbservers %} 9 | define host { 10 | use linux-server 11 | host_name {{ host }} 12 | alias {{ host }} 13 | address {{ hostvars[host].ansible_default_ipv4.address }} 14 | hostgroups dbservers 15 | } 16 | {% endfor %} 17 | 18 | #define service { 19 | # use local-service 20 | # hostgroup_name dbservers 21 | # service_description MySQL Database Server 22 | # check_command check_mysql 23 | # notifications_enabled 0 24 | #} 25 | 26 | -------------------------------------------------------------------------------- /lamp_haproxy/roles/nagios/templates/lbservers.cfg.j2: -------------------------------------------------------------------------------- 1 | # {{ ansible_managed }} 2 | 3 | define hostgroup { 4 | hostgroup_name loadbalancers 5 | alias Load Balancers 6 | } 7 | 8 | {% for host in groups.lbservers %} 9 | define host { 10 | use linux-server 11 | host_name {{ host }} 12 | alias {{ host }} 13 | address {{ hostvars[host].ansible_default_ipv4.address }} 14 | hostgroups loadbalancers 15 | } 16 | define service { 17 | use local-service 18 | host_name {{ host }} 19 | service_description HAProxy Load Balancer 20 | check_command check_http!-p{{ hostvars[host].listenport }} 21 | } 22 | {% endfor %} 23 | -------------------------------------------------------------------------------- /lamp_haproxy/roles/nagios/templates/webservers.cfg.j2: -------------------------------------------------------------------------------- 1 | # {{ ansible_managed }} 2 | 3 | define hostgroup { 4 | hostgroup_name webservers 5 | alias Web Servers 6 | } 7 | 8 | {% for host in groups.webservers %} 9 | define host { 10 | use linux-server 11 | host_name {{ host }} 12 | alias {{ host }} 13 | address {{ hostvars[host].ansible_default_ipv4.address }} 14 | hostgroups webservers 15 | } 16 | {% endfor %} 17 | 18 | # service checks to be applied to the web server 19 | define service { 20 | use local-service 21 | hostgroup_name webservers 22 | service_description webserver 23 | check_command check_http 24 | notifications_enabled 0 25 | } 26 | -------------------------------------------------------------------------------- /lamp_haproxy/roles/web/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Copy the code from repository 3 | git: repo={{ repository }} version={{ webapp_version }} dest=/var/www/html/ 4 | -------------------------------------------------------------------------------- /lamp_haproxy/rolling_update.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # This playbook does a rolling update for all webservers serially (one at a time). 3 | # Change the value of serial: to adjust the number of server to be updated. 4 | # 5 | # The three roles that apply to the webserver hosts will be applied: common, 6 | # base-apache, and web. So any changes to configuration, package updates, etc, 7 | # will be applied as part of the rolling update process. 8 | # 9 | 10 | # gather facts from monitoring nodes for iptables rules 11 | - hosts: monitoring 12 | tasks: [] 13 | 14 | - hosts: webservers 15 | serial: 1 16 | 17 | # These are the tasks to run before applying updates: 18 | pre_tasks: 19 | - name: disable nagios alerts for this host webserver service 20 | nagios: 'action=disable_alerts host={{ inventory_hostname }} services=webserver' 21 | delegate_to: "{{ item }}" 22 | with_items: "{{ groups.monitoring }}" 23 | 24 | - name: disable the server in haproxy 25 | haproxy: 'state=disabled backend=myapplb host={{ inventory_hostname }} socket=/var/lib/haproxy/stats' 26 | delegate_to: "{{ item }}" 27 | with_items: "{{ groups.lbservers }}" 28 | 29 | roles: 30 | - common 31 | - base-apache 32 | - web 33 | 34 | # These tasks run after the roles: 35 | post_tasks: 36 | - name: wait for webserver to come up 37 | wait_for: 'host={{ inventory_hostname }} port=80 state=started timeout=80' 38 | 39 | - name: enable the server in haproxy 40 | haproxy: 'state=enabled backend=myapplb host={{ inventory_hostname }} socket=/var/lib/haproxy/stats' 41 | delegate_to: "{{ item }}" 42 | with_items: "{{ groups.lbservers }}" 43 | 44 | - name: re-enable nagios alerts 45 | nagios: 'action=enable_alerts host={{ inventory_hostname }} services=webserver' 46 | delegate_to: "{{ item }}" 47 | with_items: "{{ groups.monitoring }}" 48 | -------------------------------------------------------------------------------- /lamp_haproxy/site.yml: -------------------------------------------------------------------------------- 1 | --- 2 | ## This playbook deploys the whole application stack in this site. 3 | 4 | # Apply common configuration to all hosts 5 | - hosts: all 6 | 7 | roles: 8 | - common 9 | 10 | # Configure and deploy database servers. 11 | - hosts: dbservers 12 | 13 | roles: 14 | - db 15 | 16 | tags: 17 | - db 18 | 19 | # Configure and deploy the web servers. Note that we include two roles here, 20 | # the 'base-apache' role which simply sets up Apache, and 'web' which includes 21 | # our example web application. 22 | - hosts: webservers 23 | 24 | roles: 25 | - base-apache 26 | - web 27 | 28 | tags: 29 | - web 30 | 31 | # Configure and deploy the load balancer(s). 32 | - hosts: lbservers 33 | 34 | roles: 35 | - haproxy 36 | 37 | tags: 38 | - lb 39 | 40 | # Configure and deploy the Nagios monitoring node(s). 41 | - hosts: monitoring 42 | 43 | roles: 44 | - base-apache 45 | - nagios 46 | 47 | tags: 48 | - monitoring 49 | -------------------------------------------------------------------------------- /lamp_simple/LICENSE.md: -------------------------------------------------------------------------------- 1 | Copyright (C) 2013 AnsibleWorks, Inc. 2 | 3 | This work is licensed under the Creative Commons Attribution 3.0 Unported License. 4 | To view a copy of this license, visit http://creativecommons.org/licenses/by/3.0/deed.en_US. 5 | -------------------------------------------------------------------------------- /lamp_simple/README.md: -------------------------------------------------------------------------------- 1 | Building a simple LAMP stack and deploying Application using Ansible Playbooks. 2 | ------------------------------------------- 3 | 4 | These playbooks require Ansible 1.2. 5 | 6 | These playbooks are meant to be a reference and starter's guide to building 7 | Ansible Playbooks. These playbooks were tested on CentOS 6.x so we recommend 8 | that you use CentOS or RHEL to test these modules. 9 | 10 | This LAMP stack can be on a single node or multiple nodes. The inventory file 11 | 'hosts' defines the nodes in which the stacks should be configured. 12 | 13 | [webservers] 14 | localhost 15 | 16 | [dbservers] 17 | bensible 18 | 19 | Here the webserver would be configured on the local host and the dbserver on a 20 | server called `bensible`. The stack can be deployed using the following 21 | command: 22 | 23 | ansible-playbook -i hosts site.yml 24 | 25 | Once done, you can check the results by browsing to http://localhost/index.php. 26 | You should see a simple test page and a list of databases retrieved from the 27 | database server. 28 | -------------------------------------------------------------------------------- /lamp_simple/group_vars/all: -------------------------------------------------------------------------------- 1 | --- 2 | # Variables listed here are applicable to all host groups 3 | 4 | httpd_port: 80 5 | ntpserver: 192.168.1.2 6 | repository: https://github.com/bennojoy/mywebapp.git 7 | -------------------------------------------------------------------------------- /lamp_simple/group_vars/dbservers: -------------------------------------------------------------------------------- 1 | --- 2 | # The variables file used by the playbooks in the dbservers group. 3 | # These don't have to be explicitly imported by vars_files: they are autopopulated. 4 | 5 | mysqlservice: mysqld 6 | mysql_port: 3306 7 | dbuser: foouser 8 | dbname: foodb 9 | upassword: abc 10 | -------------------------------------------------------------------------------- /lamp_simple/hosts: -------------------------------------------------------------------------------- 1 | [webservers] 2 | web3 3 | 4 | [dbservers] 5 | web2 6 | 7 | 8 | -------------------------------------------------------------------------------- /lamp_simple/roles/common/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # Handler to handle common notifications. Handlers are called by other plays. 3 | # See http://docs.ansible.com/playbooks_intro.html for more information about handlers. 4 | 5 | - name: restart ntp 6 | service: 7 | name: ntpd 8 | state: restarted 9 | -------------------------------------------------------------------------------- /lamp_simple/roles/common/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # This playbook contains common plays that will be run on all nodes. 3 | 4 | - name: Install ntp 5 | yum: 6 | name: ntp 7 | state: present 8 | tags: ntp 9 | 10 | - name: Configure ntp file 11 | template: 12 | src: ntp.conf.j2 13 | dest: /etc/ntp.conf 14 | tags: ntp 15 | notify: restart ntp 16 | 17 | - name: Start the ntp service 18 | service: 19 | name: ntpd 20 | state: started 21 | enabled: yes 22 | tags: ntp 23 | 24 | - name: test to see if selinux is running 25 | command: getenforce 26 | register: sestatus 27 | changed_when: false 28 | -------------------------------------------------------------------------------- /lamp_simple/roles/common/templates/ntp.conf.j2: -------------------------------------------------------------------------------- 1 | 2 | driftfile /var/lib/ntp/drift 3 | 4 | restrict 127.0.0.1 5 | restrict -6 ::1 6 | 7 | server {{ ntpserver }} 8 | 9 | includefile /etc/ntp/crypto/pw 10 | 11 | keys /etc/ntp/keys 12 | 13 | -------------------------------------------------------------------------------- /lamp_simple/roles/db/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # Handler to handle DB tier notifications 3 | 4 | - name: restart mysql 5 | service: 6 | name: mysqld 7 | state: restarted 8 | 9 | - name: restart iptables 10 | service: 11 | name: iptables 12 | state: restarted 13 | -------------------------------------------------------------------------------- /lamp_simple/roles/db/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # This playbook will install mysql and create db user and give permissions. 3 | 4 | - name: Install Mysql package 5 | yum: 6 | name: "{{ item }}" 7 | state: installed 8 | with_items: 9 | - mysql-server 10 | - MySQL-python 11 | - libselinux-python 12 | - libsemanage-python 13 | 14 | - name: Configure SELinux to start mysql on any port 15 | seboolean: 16 | name: mysql_connect_any 17 | state: true 18 | persistent: yes 19 | when: sestatus.rc != 0 20 | 21 | - name: Create Mysql configuration file 22 | template: 23 | src: my.cnf.j2 24 | dest: /etc/my.cnf 25 | notify: 26 | - restart mysql 27 | 28 | - name: Start Mysql Service 29 | service: 30 | name: mysqld 31 | state: started 32 | enabled: yes 33 | 34 | - name: insert iptables rule 35 | lineinfile: 36 | dest: /etc/sysconfig/iptables 37 | state: present 38 | regexp: "{{ mysql_port }}" 39 | insertafter: "^:OUTPUT " 40 | line: "-A INPUT -p tcp --dport {{ mysql_port }} -j ACCEPT" 41 | notify: restart iptables 42 | 43 | - name: Create Application Database 44 | mysql_db: 45 | name: "{{ dbname }}" 46 | state: present 47 | 48 | - name: Create Application DB User 49 | mysql_user: 50 | name: "{{ dbuser }}" 51 | password: "{{ upassword }}" 52 | priv: "*.*:ALL" 53 | host: '%' 54 | state: present 55 | -------------------------------------------------------------------------------- /lamp_simple/roles/db/templates/my.cnf.j2: -------------------------------------------------------------------------------- 1 | [mysqld] 2 | datadir=/var/lib/mysql 3 | socket=/var/lib/mysql/mysql.sock 4 | user=mysql 5 | # Disabling symbolic-links is recommended to prevent assorted security risks 6 | symbolic-links=0 7 | port={{ mysql_port }} 8 | 9 | [mysqld_safe] 10 | log-error=/var/log/mysqld.log 11 | pid-file=/var/run/mysqld/mysqld.pid 12 | -------------------------------------------------------------------------------- /lamp_simple/roles/web/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # Handler for the webtier: handlers are called by other plays. 3 | # See http://docs.ansible.com/playbooks_intro.html for more information about handlers. 4 | 5 | - name: restart iptables 6 | service: 7 | name: iptables 8 | state: restarted 9 | -------------------------------------------------------------------------------- /lamp_simple/roles/web/tasks/copy_code.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # These tasks are responsible for copying the latest dev/production code from 3 | # the version control system. 4 | 5 | - name: Copy the code from repository 6 | git: 7 | repo: "{{ repository }}" 8 | dest: /var/www/html/ 9 | 10 | - name: Creates the index.php file 11 | template: 12 | src: index.php.j2 13 | dest: /var/www/html/index.php 14 | -------------------------------------------------------------------------------- /lamp_simple/roles/web/tasks/install_httpd.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # These tasks install http and the php modules. 3 | 4 | - name: Install http and php etc 5 | yum: 6 | name: "{{ item }}" 7 | state: present 8 | with_items: 9 | - httpd 10 | - php 11 | - php-mysql 12 | - git 13 | - libsemanage-python 14 | - libselinux-python 15 | 16 | - name: insert iptables rule for httpd 17 | lineinfile: 18 | dest: /etc/sysconfig/iptables 19 | create: yes 20 | state: present 21 | regexp: "{{ httpd_port }}" 22 | insertafter: "^:OUTPUT " 23 | line: "-A INPUT -p tcp --dport {{ httpd_port }} -j ACCEPT" 24 | notify: restart iptables 25 | 26 | - name: http service state 27 | service: 28 | name: httpd 29 | state: started 30 | enabled: yes 31 | 32 | - name: Configure SELinux to allow httpd to connect to remote database 33 | seboolean: 34 | name: httpd_can_network_connect_db 35 | state: true 36 | persistent: yes 37 | when: sestatus.rc != 0 38 | -------------------------------------------------------------------------------- /lamp_simple/roles/web/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - include: install_httpd.yml 3 | - include: copy_code.yml 4 | -------------------------------------------------------------------------------- /lamp_simple/roles/web/templates/index.php.j2: -------------------------------------------------------------------------------- 1 | 2 | 3 | Ansible Application 4 | 5 | 6 |
7 | Homepage 8 |
9 | "; 13 | echo "List of Databases:
"; 14 | {% for host in groups['dbservers'] %} 15 | $link = mysqli_connect('{{ hostvars[host].ansible_default_ipv4.address }}', '{{ hostvars[host].dbuser }}', '{{ hostvars[host].upassword }}') or die(mysqli_connect_error($link)); 16 | {% endfor %} 17 | $res = mysqli_query($link, "SHOW DATABASES;"); 18 | while ($row = mysqli_fetch_assoc($res)) { 19 | echo $row['Database'] . "\n"; 20 | } 21 | ?> 22 | 23 | 24 | 25 | -------------------------------------------------------------------------------- /lamp_simple/site.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # This playbook deploys the whole application stack in this site. 3 | 4 | - name: apply common configuration to all nodes 5 | hosts: all 6 | remote_user: root 7 | 8 | roles: 9 | - common 10 | 11 | - name: configure and deploy the webservers and application code 12 | hosts: webservers 13 | remote_user: root 14 | 15 | roles: 16 | - web 17 | 18 | - name: deploy MySQL and configure the databases 19 | hosts: dbservers 20 | remote_user: root 21 | 22 | roles: 23 | - db 24 | -------------------------------------------------------------------------------- /lamp_simple_rhel7/LICENSE.md: -------------------------------------------------------------------------------- 1 | Copyright (C) 2015 Eugene Varnavsky (varnavruz@gmail.com) 2 | 3 | This work is licensed under the Creative Commons Attribution 3.0 Unported License. 4 | To view a copy of this license, visit http://creativecommons.org/licenses/by/3.0/deed.en_US. 5 | -------------------------------------------------------------------------------- /lamp_simple_rhel7/README.md: -------------------------------------------------------------------------------- 1 | Building a simple LAMP stack and deploying Application using Ansible Playbooks. 2 | ------------------------------------------- 3 | 4 | These playbooks require Ansible 1.2. 5 | 6 | These playbooks are meant to be a reference and starter's guide to building 7 | Ansible Playbooks. These playbooks were tested on CentOS 7.x so we recommend 8 | that you use CentOS or RHEL to test these modules. 9 | 10 | RHEL7 version reflects changes in Red Hat Enterprise Linux and CentOS 7: 11 | 1. Network device naming scheme has changed 12 | 2. iptables is replaced with firewalld 13 | 3. MySQL is replaced with MariaDB 14 | 15 | This LAMP stack can be on a single node or multiple nodes. The inventory file 16 | 'hosts' defines the nodes in which the stacks should be configured. 17 | 18 | [webservers] 19 | localhost 20 | 21 | [dbservers] 22 | bensible 23 | 24 | Here the webserver would be configured on the local host and the dbserver on a 25 | server called `bensible`. The stack can be deployed using the following 26 | command: 27 | 28 | ansible-playbook -i hosts site.yml 29 | 30 | Once done, you can check the results by browsing to http://localhost/index.php. 31 | You should see a simple test page and a list of databases retrieved from the 32 | database server. 33 | -------------------------------------------------------------------------------- /lamp_simple_rhel7/group_vars/all: -------------------------------------------------------------------------------- 1 | --- 2 | # Variables listed here are applicable to all host groups 3 | 4 | httpd_port: 80 5 | ntpserver: 192.168.1.2 6 | repository: https://github.com/bennojoy/mywebapp.git 7 | -------------------------------------------------------------------------------- /lamp_simple_rhel7/group_vars/dbservers: -------------------------------------------------------------------------------- 1 | --- 2 | # The variables file used by the playbooks in the dbservers group. 3 | # These don't have to be explicitly imported by vars_files: they are autopopulated. 4 | 5 | mysqlservice: mysqld 6 | mysql_port: 3306 7 | dbuser: foouser 8 | dbname: foodb 9 | upassword: abc 10 | -------------------------------------------------------------------------------- /lamp_simple_rhel7/hosts: -------------------------------------------------------------------------------- 1 | [webservers] 2 | webserver.local 3 | 4 | [dbservers] 5 | dbserver.local 6 | 7 | 8 | -------------------------------------------------------------------------------- /lamp_simple_rhel7/roles/common/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # Handler to handle common notifications. Handlers are called by other plays. 3 | # See http://docs.ansible.com/playbooks_intro.html for more information about handlers. 4 | 5 | - name: restart ntp 6 | service: name=ntpd state=restarted 7 | -------------------------------------------------------------------------------- /lamp_simple_rhel7/roles/common/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # This playbook contains common plays that will be run on all nodes. 3 | 4 | - name: Install ntp 5 | yum: name=ntp state=present 6 | tags: ntp 7 | 8 | - name: Install common dependencies 9 | yum: name={{ item }} state=installed 10 | with_items: 11 | - libselinux-python 12 | - libsemanage-python 13 | - firewalld 14 | 15 | - name: Configure ntp file 16 | template: src=ntp.conf.j2 dest=/etc/ntp.conf 17 | tags: ntp 18 | notify: restart ntp 19 | 20 | - name: Start the ntp service 21 | service: name=ntpd state=started enabled=yes 22 | tags: ntp 23 | -------------------------------------------------------------------------------- /lamp_simple_rhel7/roles/common/templates/ntp.conf.j2: -------------------------------------------------------------------------------- 1 | 2 | driftfile /var/lib/ntp/drift 3 | 4 | restrict 127.0.0.1 5 | restrict -6 ::1 6 | 7 | server {{ ntpserver }} 8 | 9 | includefile /etc/ntp/crypto/pw 10 | 11 | keys /etc/ntp/keys 12 | 13 | -------------------------------------------------------------------------------- /lamp_simple_rhel7/roles/db/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # Handler to handle DB tier notifications 3 | 4 | - name: restart mariadb 5 | service: name=mariadb state=restarted 6 | -------------------------------------------------------------------------------- /lamp_simple_rhel7/roles/db/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # This playbook will install MariaDB and create db user and give permissions. 3 | 4 | - name: Install MariaDB package 5 | yum: name={{ item }} state=installed 6 | with_items: 7 | - mariadb-server 8 | - MySQL-python 9 | 10 | - name: Configure SELinux to start mysql on any port 11 | seboolean: name=mysql_connect_any state=true persistent=yes 12 | 13 | - name: Create Mysql configuration file 14 | template: src=my.cnf.j2 dest=/etc/my.cnf 15 | notify: 16 | - restart mariadb 17 | 18 | - name: Create MariaDB log file 19 | file: path=/var/log/mysqld.log state=touch owner=mysql group=mysql mode=0775 20 | 21 | - name: Create MariaDB PID directory 22 | file: path=/var/run/mysqld state=directory owner=mysql group=mysql mode=0775 23 | 24 | - name: Start MariaDB Service 25 | service: name=mariadb state=started enabled=yes 26 | 27 | - name: Start firewalld 28 | service: name=firewalld state=started enabled=yes 29 | 30 | - name: insert firewalld rule 31 | firewalld: port={{ mysql_port }}/tcp permanent=true state=enabled immediate=yes 32 | 33 | - name: Create Application Database 34 | mysql_db: name={{ dbname }} state=present 35 | 36 | - name: Create Application DB User 37 | mysql_user: name={{ dbuser }} password={{ upassword }} priv=*.*:ALL host='%' state=present 38 | -------------------------------------------------------------------------------- /lamp_simple_rhel7/roles/db/templates/my.cnf.j2: -------------------------------------------------------------------------------- 1 | [mysqld] 2 | datadir=/var/lib/mysql 3 | socket=/var/lib/mysql/mysql.sock 4 | user=mysql 5 | # Disabling symbolic-links is recommended to prevent assorted security risks 6 | symbolic-links=0 7 | port={{ mysql_port }} 8 | 9 | [mysqld_safe] 10 | log-error=/var/log/mysqld.log 11 | pid-file=/var/run/mysqld/mysqld.pid 12 | -------------------------------------------------------------------------------- /lamp_simple_rhel7/roles/web/tasks/copy_code.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # These tasks are responsible for copying the latest dev/production code from 3 | # the version control system. 4 | 5 | - name: Copy the code from repository 6 | git: repo={{ repository }} dest=/var/www/html/ 7 | 8 | - name: Creates the index.php file 9 | template: src=index.php.j2 dest=/var/www/html/index.php 10 | -------------------------------------------------------------------------------- /lamp_simple_rhel7/roles/web/tasks/install_httpd.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # These tasks install http and the php modules. 3 | 4 | - name: Install httpd and php 5 | yum: name={{ item }} state=present 6 | with_items: 7 | - httpd 8 | - php 9 | - php-mysql 10 | 11 | - name: Install web role specific dependencies 12 | yum: name={{ item }} state=installed 13 | with_items: 14 | - git 15 | 16 | - name: Start firewalld 17 | service: name=firewalld state=started enabled=yes 18 | 19 | - name: insert firewalld rule for httpd 20 | firewalld: port={{ httpd_port }}/tcp permanent=true state=enabled immediate=yes 21 | 22 | - name: http service state 23 | service: name=httpd state=started enabled=yes 24 | 25 | - name: Configure SELinux to allow httpd to connect to remote database 26 | seboolean: name=httpd_can_network_connect_db state=true persistent=yes 27 | -------------------------------------------------------------------------------- /lamp_simple_rhel7/roles/web/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - include: install_httpd.yml 3 | - include: copy_code.yml 4 | -------------------------------------------------------------------------------- /lamp_simple_rhel7/roles/web/templates/index.php.j2: -------------------------------------------------------------------------------- 1 | 2 | 3 | Ansible Application 4 | 5 | 6 |
7 | Homepage 8 |
9 | "; 13 | echo "List of Databases:
"; 14 | {% for host in groups['dbservers'] %} 15 | $link = mysqli_connect('{{ hostvars[host].ansible_default_ipv4.address }}', '{{ hostvars[host].dbuser }}', '{{ hostvars[host].upassword }}') or die(mysqli_connect_error($link)); 16 | {% endfor %} 17 | $res = mysqli_query($link, "SHOW DATABASES;"); 18 | while ($row = mysqli_fetch_assoc($res)) { 19 | echo $row['Database'] . "\n"; 20 | } 21 | ?> 22 | 23 | 24 | 25 | -------------------------------------------------------------------------------- /lamp_simple_rhel7/site.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # This playbook deploys the whole application stack in this site. 3 | 4 | - name: apply common configuration to all nodes 5 | hosts: all 6 | remote_user: root 7 | 8 | roles: 9 | - common 10 | 11 | - name: configure and deploy the webservers and application code 12 | hosts: webservers 13 | remote_user: root 14 | 15 | roles: 16 | - web 17 | 18 | - name: deploy MySQL and configure the databases 19 | hosts: dbservers 20 | remote_user: root 21 | 22 | roles: 23 | - db 24 | -------------------------------------------------------------------------------- /language_features/ansible_pull.yml: -------------------------------------------------------------------------------- 1 | # ansible-pull setup 2 | # 3 | # on remote hosts, set up ansible to run periodically using the latest code 4 | # from a particular checkout, in pull based fashion, inverting Ansible's 5 | # usual push-based operating mode. 6 | # 7 | # This particular pull based mode is ideal for: 8 | # 9 | # (A) massive scale out 10 | # (B) continual system remediation 11 | # 12 | # DO NOT RUN THIS AGAINST YOUR HOSTS WITHOUT CHANGING THE repo_url 13 | # TO SOMETHING YOU HAVE PERSONALLY VERIFIED 14 | # 15 | # 16 | --- 17 | 18 | - hosts: pull_mode_hosts 19 | remote_user: root 20 | 21 | vars: 22 | 23 | # schedule is fed directly to cron 24 | schedule: '*/15 * * * *' 25 | 26 | # User to run ansible-pull as from cron 27 | cron_user: root 28 | 29 | # File that ansible will use for logs 30 | logfile: /var/log/ansible-pull.log 31 | 32 | # Directory to where repository will be cloned 33 | workdir: /var/lib/ansible/local 34 | 35 | # Repository to check out -- YOU MUST CHANGE THIS 36 | # repo must contain a local.yml file at top level 37 | #repo_url: git://github.com/sfromm/ansible-playbooks.git 38 | repo_url: SUPPLY_YOUR_OWN_GIT_URL_HERE 39 | 40 | tasks: 41 | 42 | - name: Install ansible 43 | yum: pkg=ansible state=installed 44 | 45 | - name: Create local directory to work from 46 | file: path={{workdir}} state=directory owner=root group=root mode=0751 47 | 48 | - name: Copy ansible inventory file to client 49 | copy: src=/etc/ansible/hosts dest=/etc/ansible/hosts 50 | owner=root group=root mode=0644 51 | 52 | - name: Create crontab entry to clone/pull git repository 53 | template: src=templates/etc_cron.d_ansible-pull.j2 dest=/etc/cron.d/ansible-pull owner=root group=root mode=0644 54 | 55 | - name: Create logrotate entry for ansible-pull.log 56 | template: src=templates/etc_logrotate.d_ansible-pull.j2 dest=/etc/logrotate.d/ansible-pull owner=root group=root mode=0644 57 | -------------------------------------------------------------------------------- /language_features/batch_size_control.yml: -------------------------------------------------------------------------------- 1 | # ordinarily, without the 'serial' keyword set, ansible will control all of your machines in a play at once, in parallel. 2 | # if you want to perform a rolling update, so that each play completes all the way through on a certain number of hosts 3 | # before moving on to the remaining hosts, use the 'serial' keyword like so: 4 | 5 | --- 6 | - hosts: all 7 | serial: 3 8 | 9 | # now each of the tasks below will complete on 3 hosts before moving on to the next 3, regardless of how many 10 | # hosts are selected by the "hosts:" line 11 | 12 | tasks: 13 | 14 | - name: ping 15 | ping: 16 | - name: ping2 17 | ping: 18 | 19 | 20 | -------------------------------------------------------------------------------- /language_features/cloudformation.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | # This playbook demonstrates how to use the ansible cloudformation module to launch an AWS CloudFormation stack. 3 | # 4 | # This module requires that the boto python library is installed, and that you have your AWS credentials 5 | # in $HOME/.boto 6 | 7 | #The thought here is to bring up a bare infrastructure with CloudFormation, but use ansible to configure it. 8 | #I generally do this in 2 different playbook runs as to allow the ec2.py inventory to be updated. 9 | 10 | #This module also uses "complex arguments" which were introduced in ansible 1.1 allowing you to specify the 11 | #Cloudformation template parameters 12 | 13 | #This example launches a 3 node AutoScale group, with a security group, and an InstanceProfile with root permissions. 14 | 15 | #If a stack does not exist, it will be created. If it does exist and the template file has changed, the stack will be updated. 16 | #If the parameters are different, the stack will also be updated. 17 | 18 | #CloudFormation stacks can take awhile to provision, if you are curious about its status, use the AWS 19 | #web console or one of the CloudFormation CLI's. 20 | 21 | #Example update -- try first launching the stack with 3 as the ClusterSize. After it is launched, change it to 4 22 | #and run the playbook again. 23 | 24 | - name: provision stack 25 | hosts: localhost 26 | connection: local 27 | gather_facts: false 28 | 29 | # Launch the cloudformation-example.json template. Register the output. 30 | 31 | tasks: 32 | - name: launch ansible cloudformation example 33 | cloudformation: > 34 | stack_name="ansible-cloudformation" state=present 35 | region=us-east-1 disable_rollback=true 36 | template=files/cloudformation-example.json 37 | args: 38 | template_parameters: 39 | KeyName: jmartin 40 | DiskType: ephemeral 41 | InstanceType: m1.small 42 | ClusterSize: 3 43 | register: stack 44 | - name: show stack outputs 45 | debug: msg="My stack outputs are {{stack.stack_outputs}}" 46 | -------------------------------------------------------------------------------- /language_features/complex_args.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | # this is a bit of an advanced topic. 4 | # 5 | # generally Ansible likes to pass simple key=value arguments to modules. It 6 | # occasionally comes up though that you might want to write a module that takes 7 | # COMPLEX arguments, like lists and dictionaries. 8 | # 9 | # In order for this to happen, at least right now, it should be a Python 10 | # module, so it can leverage some common code in Ansible that makes this easy. 11 | # If you write a non-Python module, you can still pass data across, but only 12 | # hashes that do not contain lists or other hashes. If you write the Python 13 | # module, you can do anything. 14 | # 15 | # note that if you were to use BOTH the key=value form and the 'args' form for 16 | # passing data in, the behaviour is currently undefined. Ansible is working to 17 | # standardize on returning a duplicate parameter failure in this case but 18 | # modules which don't use the common module framework may do something 19 | # different. 20 | 21 | - hosts: localhost 22 | gather_facts: no 23 | 24 | vars: 25 | complex: 26 | ghostbusters: [ 'egon', 'ray', 'peter', 'winston' ] 27 | mice: [ 'pinky', 'brain', 'larry' ] 28 | 29 | tasks: 30 | 31 | - name: this is the basic way data passing works for any module 32 | action: ping data='Hi Mom' 33 | 34 | - name: of course this can also be written like so, which is shorter 35 | ping: data='Hi Mom' 36 | 37 | - name: but what if you have a complex module that needs complicated data? 38 | ping: 39 | data: 40 | moo: cow 41 | asdf: [1,2,3,4] 42 | 43 | - name: can we make that cleaner? sure! 44 | ping: 45 | data: "{{ complex }}" 46 | -------------------------------------------------------------------------------- /language_features/conditionals_part1.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # this is a demo of conditional imports. This is a powerful concept 3 | # and can be used to use the same recipe for different types of hosts, 4 | # based on variables that bubble up from the hosts from tools such 5 | # as ohai or facter. 6 | # 7 | # Here's an example use case: 8 | # 9 | # what to do if the service for apache is named 'httpd' on CentOS 10 | # but is named 'apache' on Debian? 11 | 12 | 13 | # there is only one play in this playbook, it runs on all hosts 14 | # as root 15 | 16 | - hosts: all 17 | remote_user: root 18 | 19 | # we have a common list of variables stored in /vars/external_vars.yml 20 | # that we will always import 21 | 22 | # next, we want to import files that are different per operating system 23 | # and if no per operating system file is found, load a defaults file. 24 | # for instance, if the OS was "CentOS", we'd try to load vars/CentOS.yml. 25 | # if that was found, we would immediately stop. However if that wasn't 26 | # present, we'd try to load vars/defaults.yml. If that in turn was not 27 | # found, we would fail immediately, because we had gotten to the end of 28 | # the list without importing anything. 29 | 30 | vars_files: 31 | 32 | - "vars/external_vars.yml" 33 | 34 | - [ "vars/{{ facter_operatingsystem }}.yml", "vars/defaults.yml" ] 35 | 36 | # and this is just a regular task line from a playbook, as we're used to. 37 | # but with variables in it that come from above. Note that the variables 38 | # from above are *also* available in templates 39 | 40 | tasks: 41 | 42 | - name: ensure apache is latest 43 | action: "{{ packager }} pkg={{ apache }} state=latest" 44 | 45 | - name: ensure apache is running 46 | service: name={{ apache }} state=running 47 | 48 | 49 | 50 | 51 | -------------------------------------------------------------------------------- /language_features/conditionals_part2.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # this is a demo of conditional executions using 'when' statements, which can skip 3 | # certain tasks on machines/platforms/etc where they do not apply. 4 | 5 | - hosts: all 6 | remote_user: root 7 | 8 | vars: 9 | favcolor: "red" 10 | dog: "fido" 11 | cat: "whiskers" 12 | ssn: 8675309 13 | 14 | tasks: 15 | 16 | - name: "do this if my favcolor is blue, and my dog is named fido" 17 | shell: /bin/false 18 | when: favcolor == 'blue' and dog == 'fido' 19 | 20 | - name: "do this if my favcolor is not blue, and my dog is named fido" 21 | shell: /bin/true 22 | when: favcolor != 'blue' and dog == 'fido' 23 | 24 | - name: "do this if my SSN is over 9000" 25 | shell: /bin/true 26 | when: ssn > 9000 27 | 28 | - name: "do this if I have one of these SSNs" 29 | shell: /bin/true 30 | when: ssn in [ 8675309, 8675310, 8675311 ] 31 | 32 | - name: "do this if a variable named hippo is NOT defined" 33 | shell: /bin/true 34 | when: hippo is not defined 35 | 36 | - name: "do this if a variable named hippo is defined" 37 | shell: /bin/true 38 | when: hippo is defined 39 | 40 | 41 | -------------------------------------------------------------------------------- /language_features/custom_filters.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: Demonstrate custom jinja2 filters 4 | hosts: all 5 | tasks: 6 | - template: src=templates/custom-filters.j2 dest=/tmp/custom-filters.txt 7 | -------------------------------------------------------------------------------- /language_features/delegation.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | # this is an example of how we can perform actions on a given host on behalf of all the hosts 4 | # in a play. 5 | # 6 | # The two main uses of this would be signalling an outage window for hosts that 7 | # we are going to start upgrading, or to take a machine out of rotation by talking to a load 8 | # balancer. 9 | # 10 | # This example cheats by replacing the load balancer script with the 'echo' command, 11 | # leaving actual communication with the load balancer as an exercise to the reader. In reality, 12 | # you could call anything you want, the main thing is that it should do something with 13 | # {{inventory_hostname}} 14 | 15 | # NOTE: see batch_size_control.yml for an example of the 'serial' keyword, which you almost certainly 16 | # want to use in this kind of example. Here we have a mocked up example that does something to 17 | # 5 hosts at a time 18 | 19 | - hosts: all 20 | serial: 5 21 | 22 | tasks: 23 | 24 | - name: take the machine out of rotation 25 | command: echo taking out of rotation {{inventory_hostname}} 26 | delegate_to: 127.0.0.1 27 | 28 | # here's an alternate notation if you are delegating to 127.0.0.1, you can use 'local_action' 29 | # instead of 'action' and leave off the 'delegate_to' part. 30 | # 31 | # - local_action: command echo taking out of rotation {{inventory_hostname}} 32 | 33 | - name: do several things on the actual host 34 | command: echo hi mom {{inventory_hostname}} 35 | 36 | - name: put machine back into rotation 37 | command: echo inserting into rotation {{inventory_hostname}} 38 | delegate_to: 127.0.0.1 39 | 40 | -------------------------------------------------------------------------------- /language_features/environment.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | # it is often useful to be able to set the environment for one command and have that environment be totally 4 | # different for another. An example is you might use a HTTP proxy for some packages but not for others. 5 | # 6 | # in Ansible 1.1 and later, you can pass the environment to any module using either a dictionary variable 7 | # or a dictionary itself. 8 | 9 | 10 | - hosts: all 11 | remote_user: root 12 | 13 | # here we make a variable named "env" that is a dictionary 14 | vars: 15 | env: 16 | HI: test2 17 | http_proxy: http://proxy.example.com:8080 18 | 19 | tasks: 20 | 21 | # here we just define the dictionary directly and use it 22 | # (here $HI is the shell variable as nothing in Ansible will replace it) 23 | 24 | - shell: echo $HI 25 | environment: 26 | HI: test1 27 | 28 | # here we are using the "env" map variable above 29 | 30 | - shell: echo $HI 31 | environment: "{{ env }}" 32 | -------------------------------------------------------------------------------- /language_features/file_secontext.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # This is a demo of how to manage the selinux context using the file module 3 | - hosts: test 4 | remote_user: root 5 | tasks: 6 | - name: Change setype of /etc/exports to non-default value 7 | file: path=/etc/exports setype=etc_t 8 | - name: Change seuser of /etc/exports to non-default value 9 | file: path=/etc/exports seuser=unconfined_u 10 | - name: Set selinux context back to default value 11 | file: path=/etc/exports context=default 12 | - name: Create empty file 13 | command: /bin/touch /tmp/foo 14 | - name: Change setype of /tmp/foo 15 | file: path=/tmp/foo setype=default_t 16 | - name: Try to set secontext to default, but this will fail 17 | because of the lack of a default in the policy 18 | file: path=/tmp/foo context=default 19 | -------------------------------------------------------------------------------- /language_features/filter_plugins/custom_plugins.py: -------------------------------------------------------------------------------- 1 | # (c) 2012, Jeroen Hoekx 2 | # 3 | # This file is part of Ansible 4 | # 5 | # Ansible is free software: you can redistribute it and/or modify 6 | # it under the terms of the GNU General Public License as published by 7 | # the Free Software Foundation, either version 3 of the License, or 8 | # (at your option) any later version. 9 | # 10 | # Ansible is distributed in the hope that it will be useful, 11 | # but WITHOUT ANY WARRANTY; without even the implied warranty of 12 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 | # GNU General Public License for more details. 14 | # 15 | # You should have received a copy of the GNU General Public License 16 | # along with Ansible. If not, see . 17 | 18 | class FilterModule(object): 19 | ''' Custom filters are loaded by FilterModule objects ''' 20 | 21 | def filters(self): 22 | ''' FilterModule objects return a dict mapping filter names to 23 | filter functions. ''' 24 | return { 25 | 'generate_answer': self.generate_answer, 26 | } 27 | 28 | def generate_answer(self, value): 29 | return '42' 30 | -------------------------------------------------------------------------------- /language_features/get_url.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: webservers 3 | vars: 4 | - jquery_directory: /var/www/html/javascript 5 | - person: 'Susie%20Smith' 6 | tasks: 7 | - name: Create directory for jQuery 8 | file: dest={{jquery_directory}} state=directory mode=0755 9 | - name: Grab a bunch of jQuery stuff 10 | get_url: url=http://code.jquery.com/{{item}} dest={{jquery_directory}} mode=0444 11 | with_items: 12 | - jquery.min.js 13 | - mobile/latest/jquery.mobile.min.js 14 | - ui/jquery-ui-git.css 15 | #- name: Pass urlencoded name to CGI 16 | # get_url: url=http://example.com/name.cgi?name='{{person}}' dest=/tmp/test 17 | -------------------------------------------------------------------------------- /language_features/group_by.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # Example playbook to demonstrate the group_by action plugin. 3 | # 4 | # as we know, the setup module will automatically run in each play, and sets up various 5 | # facts. We can then create temporary (in memory only) groups based on those facts, which 6 | # are useful ways of selecting similar sets of hosts. 7 | # 8 | # Additionally, we can use the 'register' keyword in Ansible to set similar variables 9 | # and use those for grouping. This is not shown in this example. 10 | 11 | - hosts: all 12 | 13 | tasks: 14 | 15 | - name: Create a group of all hosts by operating system 16 | group_by: key={{ansible_distribution}}-{{ansible_distribution_version}} 17 | 18 | # the following host group does not exist in inventory and was created by the group_by 19 | # module. 20 | 21 | - hosts: CentOS-6.2 22 | 23 | tasks: 24 | 25 | - name: ping all CentOS 6.2 hosts 26 | ping: 27 | 28 | - hosts: CentOS-6.3 29 | 30 | tasks: 31 | 32 | - name: ping all CentOS 6.3 hosts 33 | ping: 34 | 35 | 36 | -------------------------------------------------------------------------------- /language_features/group_commands.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # This is a demo of how the group command works. 3 | 4 | - hosts: all 5 | remote_user: root 6 | become: yes 7 | become_method: sudo 8 | 9 | tasks: 10 | 11 | # Walk through group creation, modification, and deletion 12 | - name: create a group 13 | group: name=tset 14 | 15 | # You can only modify the group's gid 16 | - group: name=tset gid=7777 17 | 18 | # And finally remove the group 19 | - group: name=tset state=absent 20 | -------------------------------------------------------------------------------- /language_features/handlers/handlers.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | # this is an example to show that handlers can be included from yaml files, 4 | # to promote reuse between different plays or even playbooks. They work 5 | # just like normal handlers. 6 | 7 | - name: restart apache 8 | service: name=httpd state=restarted 9 | - name: restart memcached 10 | service: name=memcached state=restarted 11 | -------------------------------------------------------------------------------- /language_features/loop_nested.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # this is a trivial example of how to do a nested loop. 3 | 4 | - hosts: all 5 | tasks: 6 | - shell: echo "nested test a={{ item[0] }} b={{ item[1] }} c={{ item[2] }}" 7 | with_nested: 8 | - [ 'red', 'blue', 'green' ] 9 | - [ 1, 2, 3 ] 10 | - [ 'up', 'down', 'strange'] 11 | 12 | # you can reference a raw variable name without putting it in {{ brackets }} 13 | 14 | - hosts: all 15 | vars: 16 | listvar1: 17 | - 'a' 18 | - 'b' 19 | - 'c' 20 | tasks: 21 | - shell: echo "nested test a={{ item[0] }} b={{ item[1] }}" 22 | with_nested: 23 | - listvar1 24 | - [ 1, 2, 3 ] 25 | -------------------------------------------------------------------------------- /language_features/loop_plugins.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | # in addition to loop_with_items, the loop that works over a variable, ansible can do more sophisticated looping. 4 | 5 | # developer types: these are powered by 'lookup_plugins' should you ever decide to write your own 6 | # see lib/ansible/runner/lookup_plugins/fileglob.py -- they can do basically anything! 7 | 8 | - hosts: all 9 | gather_facts: no 10 | 11 | tasks: 12 | 13 | # this will copy a bunch of config files over -- dir must be created first 14 | 15 | - file: dest=/etc/fooapp state=directory 16 | 17 | - copy: src={{ item }} dest=/etc/fooapp/ owner=root mode=600 18 | with_fileglob: /playbooks/files/fooapp/* 19 | 20 | 21 | -------------------------------------------------------------------------------- /language_features/loop_with_items.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # this is an example of how to run repeated task elements over lists 3 | # of items, for example, installing multiple packages or configuring 4 | # multiple users 5 | 6 | - hosts: all 7 | remote_user: root 8 | 9 | tasks: 10 | 11 | - name: install packages 12 | yum: name={{ item }} state=installed 13 | with_items: 14 | - cobbler 15 | - httpd 16 | 17 | - name: configure users 18 | user: name={{ item }} state=present groups=wheel 19 | with_items: 20 | - testuser1 21 | - testuser2 22 | 23 | - name: remove users 24 | user: name={{ item }} state=absent 25 | with_items: 26 | - testuser1 27 | - testuser2 28 | 29 | - name: copy templates 30 | template: src={{ item.src }} dest={{ item.dest }} 31 | with_items: 32 | - src: templates/testsource1 33 | dest: /example/dest1/test.conf 34 | - src: templates/testsource2 35 | dest: /example/dest2/test.conf 36 | -------------------------------------------------------------------------------- /language_features/mysql.yml: -------------------------------------------------------------------------------- 1 | ## 2 | # Example Ansible playbook that uses the MySQL module. 3 | # 4 | 5 | --- 6 | - hosts: all 7 | remote_user: root 8 | 9 | tasks: 10 | 11 | - name: Create database user 12 | mysql_user: user=bob password=12345 priv=*.*:ALL state=present 13 | 14 | - name: Create database 15 | mysql_db: db=bobdata state=present 16 | 17 | - name: Ensure no user named 'sally' exists and delete if found. 18 | mysql_user: user=sally state=absent 19 | -------------------------------------------------------------------------------- /language_features/nested_playbooks.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # it is possible to have top level playbook files import other playbook 3 | # files. For example, a playbook called could include three 4 | # different playbooks, such as webservers, workers, dbservers, etc. 5 | # 6 | # Running the site playbook would run all playbooks, while individual 7 | # playbooks could still be run directly. This is somewhat like 8 | # the tag feature and can be used in conjunction for very fine grained 9 | # control over what you want to target when running ansible. 10 | 11 | - name: this is a play at the top level of a file 12 | hosts: all 13 | remote_user: root 14 | tasks: 15 | - name: say hi 16 | tags: foo 17 | shell: echo "hi..." 18 | 19 | # and this is how we include another playbook, be careful and 20 | # don't recurse infinitely or anything. Note you can't use 21 | # any variables in the include path here. 22 | 23 | - include: intro_example.yml 24 | 25 | # and if we wanted, we can continue with more includes here, 26 | # or more plays inline in this file 27 | -------------------------------------------------------------------------------- /language_features/netscaler.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # 3 | # NetScaler module example 4 | # 5 | 6 | - hosts: web-pool 7 | serial: 3 8 | vars: 9 | nsc_host: nsc.example.com 10 | nsc_user: admin 11 | nsc_pass: nimda 12 | # type of the netscaler object you want to manipulate 13 | type: service 14 | # netscaler object name 15 | name: "{{facter_fqdn}}:8080" 16 | 17 | tasks: 18 | - name: disable service in the lb 19 | netscaler: nsc_host={{nsc_host}} user={{nsc_user}} password={{nsc_pass}} name={{name}} type={{type}} action=disable 20 | 21 | - name: deploy new code 22 | shell: yum upgrade -y 23 | 24 | - name: enable in the lb 25 | netscaler: nsc_host={{nsc_host}} user={{nsc_user}} password={{nsc_pass}} name={{name}} type={{type}} action=enable 26 | -------------------------------------------------------------------------------- /language_features/postgresql.yml: -------------------------------------------------------------------------------- 1 | ## 2 | # Example Ansible playbook that uses the PostgreSQL module. 3 | # 4 | # This installs PostgreSQL on an Ubuntu system, creates a database called 5 | # "myapp" and a user called "django" with password "mysupersecretpassword" 6 | # with access to the "myapp" database. 7 | # 8 | --- 9 | - hosts: webservers 10 | become: yes 11 | gather_facts: no 12 | 13 | tasks: 14 | - name: ensure apt cache is up to date 15 | apt: update_cache=yes 16 | - name: ensure packages are installed 17 | apt: name={{item}} 18 | with_items: 19 | - postgresql 20 | - libpq-dev 21 | - python-psycopg2 22 | 23 | - hosts: webservers 24 | become: yes 25 | become_user: postgres 26 | gather_facts: no 27 | 28 | vars: 29 | dbname: myapp 30 | dbuser: django 31 | dbpassword: mysupersecretpassword 32 | 33 | tasks: 34 | - name: ensure database is created 35 | postgresql_db: name={{dbname}} 36 | 37 | - name: ensure user has access to database 38 | postgresql_user: db={{dbname}} name={{dbuser}} password={{dbpassword}} priv=ALL 39 | 40 | - name: ensure user does not have unnecessary privilege 41 | postgresql_user: name={{dbuser}} role_attr_flags=NOSUPERUSER,NOCREATEDB 42 | 43 | - name: ensure no other user can access the database 44 | postgresql_privs: db={{dbname}} role=PUBLIC type=database priv=ALL state=absent 45 | 46 | -------------------------------------------------------------------------------- /language_features/prompts.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | # it is possible to ask for variables from the user at the start 4 | # of a playbook run, for example, as part of a release script. 5 | 6 | - hosts: all 7 | remote_user: root 8 | 9 | # regular variables are a dictionary of keys and values 10 | 11 | vars: 12 | this_is_a_regular_var: 'moo' 13 | so_is_this: 'quack' 14 | 15 | # alternatively, they can ALSO be passed in from the outside: 16 | # ansible-playbook foo.yml --extra-vars="foo=100 bar=101" 17 | # or through external inventory scripts (see online API docs) 18 | 19 | # here's basic mode prompting. Specify a hash of variable names and a prompt for 20 | # each. 21 | # 22 | # vars_prompt: 23 | # release_version: "product release version" 24 | 25 | # prompts can also be specified like this, allowing for hiding the prompt as 26 | # entered. In the future, this may also be used to support crypted variables 27 | 28 | vars_prompt: 29 | - name: "some_password" 30 | prompt: "Enter password" 31 | private: yes 32 | 33 | - name: "release_version" 34 | prompt: "Product release version" 35 | default: "my_default_version" 36 | private: no 37 | 38 | - name: "my_password2" 39 | prompt: "Enter password2" 40 | private: yes 41 | encrypt: "md5_crypt" 42 | confirm: yes 43 | salt_size: 7 44 | salt: "foo" 45 | 46 | # this is just a simple example to show that vars_prompt works, but 47 | # you might ask for a tag to use with the git module or perhaps 48 | # a package version to use with the yum module. 49 | 50 | tasks: 51 | 52 | - name: imagine this did something interesting with {{release_version}} 53 | shell: echo foo >> /tmp/{{release_version}}-alpha 54 | 55 | - name: look we crypted a password 56 | shell: echo my password is {{my_password2}} 57 | 58 | 59 | 60 | 61 | -------------------------------------------------------------------------------- /language_features/rabbitmq.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: rabbitmq 3 | become: true 4 | become_method: sudo 5 | vars: 6 | rabbitmq_version: 3.0.2-1 7 | 8 | tasks: 9 | - name: ensure python-software-properties is installed 10 | apt: pkg=python-software-properties state=installed 11 | 12 | - name: add rabbitmq official apt repository 13 | apt_repository: repo='deb http://www.rabbitmq.com/debian/ testing main' state=present 14 | 15 | - name: add trusted key 16 | apt_key: url=https://www.rabbitmq.com/rabbitmq-signing-key-public.asc state=present 17 | 18 | - name: install package 19 | apt: name={{ item }} update_cache=yes state=installed 20 | with_items: 21 | - rabbitmq-server 22 | 23 | - name: enable rabbitmq plugins 24 | rabbitmq_plugin: names=rabbitmq_management,rabbitmq_tracing,rabbitmq_federation state=enabled 25 | notify: 26 | - restart rabbitmq 27 | 28 | - name: add users 29 | rabbitmq_user: user={{item.username}} password={{item.password}} tags=administrator,{{item.username}} vhost=/ configure_priv=.* write_priv=.* read_priv=.* state=present 30 | with_items: 31 | - { username: user1, password: changeme } 32 | - { username: user2, password: changeme } 33 | 34 | - name: remove default guest user 35 | rabbitmq_user: user=guest state=absent 36 | 37 | - name: ensure vhost /test is present 38 | rabbitmq_vhost: name=/test state=present 39 | 40 | handlers: 41 | - name: restart rabbitmq 42 | service: name=rabbitmq-server state=restarted 43 | -------------------------------------------------------------------------------- /language_features/register_logic.yml: -------------------------------------------------------------------------------- 1 | # here's a cool advanced topic about how to perform conditional logic in ansible without resorting 2 | # to writing your own module that defines facts. You can do that too, and it's easy to do, but 3 | # often you just want to run a command and then decide whether to run some steps or not. That's 4 | # easy to do, and here we'll show you how. 5 | 6 | - name: test playbook 7 | remote_user: root 8 | hosts: all 9 | 10 | tasks: 11 | 12 | # it is possible to save the result of any command in a named register. This variable will be made 13 | # available to tasks and templates made further down in the execution flow. 14 | 15 | - shell: grep hi /etc/motd 16 | ignore_errors: yes 17 | register: motd_result 18 | 19 | # and here we access the register. Note that variable is structured data because 20 | # it is a return from the command module. The shell module makes available variables such as 21 | # as 'stdout', 'stderr', and 'rc'. 22 | 23 | # here we run the next action only if the previous grep returned true 24 | 25 | - shell: echo "motd contains the word hi" 26 | when: motd_result.rc == 0 27 | 28 | # alternatively: 29 | 30 | - shell: echo "motd contains the word hi" 31 | when: motd_result.stdout.find('hi') != -1 32 | 33 | # or also: 34 | 35 | - shell: echo "motd contains word hi" 36 | when: "'hi' in motd_result.stdout" 37 | 38 | # you can use 'stdout_lines' to loop over the registered output lines 39 | - name: motd lines matching 'hi' 40 | shell: echo "{{ item }}" 41 | with_items: motd_result.stdout_lines 42 | 43 | # you can also split 'stdout' yourself 44 | - name: motd lines matching 'hi' 45 | shell: echo "{{ item }}" 46 | with_items: motd_result.stdout.split('\n') 47 | 48 | 49 | -------------------------------------------------------------------------------- /language_features/roles/foo/files/foo.txt: -------------------------------------------------------------------------------- 1 | This is a file 2 | 3 | -------------------------------------------------------------------------------- /language_features/roles/foo/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: blippy 4 | shell: echo notifier called, and the value of x is '{{ x }}' 5 | 6 | # within a role, it's possible to include other task files as well. By default, we 7 | # can reference files in the same directory without doing anything special: 8 | 9 | # - include: other.yml 10 | 11 | -------------------------------------------------------------------------------- /language_features/roles/foo/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: copy operation 4 | copy: src=foo.txt dest=/tmp/roles_test1.txt 5 | 6 | - name: template operation 7 | template: src=foo.j2 dest=/tmp/roles_test2.txt 8 | notify: 9 | - blippy 10 | 11 | - name: demo that parameterized roles work 12 | shell: echo just FYI, param1={{ param1 }}, param2 ={{ param2 }} 13 | 14 | 15 | -------------------------------------------------------------------------------- /language_features/roles/foo/templates/foo.j2: -------------------------------------------------------------------------------- 1 | I am a {{ ansible_os_family }} distribution. 2 | -------------------------------------------------------------------------------- /language_features/roles/foo/vars/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | x: '{{ ansible_machine }}' 3 | 4 | -------------------------------------------------------------------------------- /language_features/roletest2.yml: -------------------------------------------------------------------------------- 1 | # in Ansible 1.2 and later, roles allow easy best-practices organization of content 2 | # and maximize shareability of ansible building blocks. 3 | # 4 | # suppose a playbook applied to a group of hosts includes two roles, foo and bar. 5 | # 6 | # what do roles do in this case? 7 | # 8 | # listing the roles as foo and bar will auto include the following: 9 | # 10 | # tasks from ./roles/foo/tasks/main.yml, then ./roles/bar/tasks/main.yml 11 | # handlers from ./roles/foo/handlers/main.yml, then ./roles/bar/handlers/main.yml 12 | # vars from ./roles/foo/vars/main.yml, then ./roles/bar/vars/main.yml 13 | # 14 | # should any of these files not exist, that is ok, and they will simply not be loaded. 15 | # 16 | # should the task file in foo/tasks/main.yml want to include subtasks in other files, that 17 | # is also permitted. 18 | # 19 | # templates and copy operations also get smarter about where to look for content when using 20 | # roles. 21 | # 22 | # as an example, a task in foo/tasks/main.yml could copy or template a file by 23 | # referencing a "src=foo.j2" rather than having to explicitly path src=roles/foo/templates/foo.j2. 24 | 25 | --- 26 | 27 | - hosts: all 28 | roles: 29 | 30 | # a role can be listed flat like this: 31 | # 32 | # - common 33 | # - webservers 34 | 35 | # but you can also pass variables to them, so they can be parameterized. You can call 36 | # a role more than once with different parameters too. It might look like this: 37 | 38 | - role: foo 39 | param1: '{{ foo }}' 40 | param2: '{{ some_var1 + "/" + some_var2 }}' 41 | when: ansible_os_family == 'RedHat' 42 | 43 | # add as many roles as you like, roles takes a list of roles names 44 | # these paths can be qualified, but if bare, it will look from them in 45 | # roles/{{rolename}} relative to the playbook 46 | 47 | # explicit tasks and handlers can be used, but are not required. 48 | # they will run after the roles if present. 49 | 50 | tasks: 51 | 52 | # you can still have loose tasks/handlers and they will execute after roles 53 | 54 | - shell: echo 'this is a loose task' 55 | 56 | 57 | -------------------------------------------------------------------------------- /language_features/selective_file_sources.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # this is an example of how to template a file over using some variables derived 3 | # from the system. For instance, if you wanted to have different configuration 4 | # templates by OS version, this is a neat way to do it. Any Ansible facts, facter facts, 5 | # or ohai facts could be used to do this. 6 | 7 | - hosts: all 8 | 9 | tasks: 10 | 11 | - name: template a config file 12 | template: dest=/etc/imaginary_file.conf 13 | first_available_file: 14 | 15 | # first see if we have a file for this specific host 16 | - /srv/whatever/{{ansible_hostname}}.conf 17 | 18 | # next try to load something like CentOS6.2.conf 19 | - /srv/whatever/{{ansible_distribution}}{{ansible_distribution_version}}.conf 20 | 21 | # next see if there's a CentOS.conf 22 | - /srv/whatever/{{ansible_distribution}}.conf 23 | 24 | # finally give up and just use something generic 25 | - /srv/whatever/default 26 | 27 | 28 | 29 | -------------------------------------------------------------------------------- /language_features/tags.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # tags allow us to run all of a playbook or part of it. 3 | # 4 | # assume: ansible-playbook tags.yml --tags foo 5 | # 6 | # try this with: 7 | # --tags foo 8 | # --tags bar 9 | # --tags extra 10 | # 11 | # the value of a 'tags:' element can be a string or list 12 | # of tag names. Variables are not usable in tag names. 13 | 14 | - name: example play one 15 | hosts: all 16 | remote_user: root 17 | 18 | # any tags applied to the play are shorthand to applying 19 | # the tag to all tasks in it. Here, each task is given 20 | # the tag extra 21 | 22 | tags: 23 | - extra 24 | 25 | tasks: 26 | 27 | # this task will run if you don't specify any tags, 28 | # if you specify 'foo' or if you specify 'extra' 29 | 30 | - name: hi 31 | tags: ['foo'] 32 | shell: echo "first task ran" 33 | 34 | - name: example play two 35 | hosts: all 36 | remote_user: root 37 | tasks: 38 | - name: hi 39 | tags: 40 | - bar 41 | shell: echo "second task ran" 42 | - include: tasks/base.yml 43 | tags: 44 | - base 45 | 46 | -------------------------------------------------------------------------------- /language_features/tasks/base.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | # this is the example of an included tasks file. It contains a flat list of tasks 4 | # they can notify other tasks, and have full access to variables from 'vars' 5 | # or 'vars_files' directives. Further, if ohai or facter were installed on 6 | # the remote machines, variables from those tools can be accessed on the 'action' 7 | # line or in templates. Just prefix with 'facter_' or 'ohai_' before the particular 8 | # variable. 9 | 10 | # possible uses for a included yaml file might be to represent a 'class' of a system 11 | # like defining what makes up a webserver, or you might have a common 'base.yml' 12 | # (like this) that might be applied to all your systems as well. 13 | 14 | - name: no selinux 15 | command: /usr/sbin/setenforce 0 16 | 17 | - name: no iptables 18 | service: name=iptables state=stopped 19 | 20 | - name: made up task just to show variables work here 21 | command: /bin/echo release is $release 22 | -------------------------------------------------------------------------------- /language_features/templates/custom-filters.j2: -------------------------------------------------------------------------------- 1 | 1 + 1 = {{ '1+1' | generate_answer }} 2 | -------------------------------------------------------------------------------- /language_features/templates/etc_cron.d_ansible-pull.j2: -------------------------------------------------------------------------------- 1 | # Cron job to git clone/pull a repo and then run locally 2 | {{ schedule }} {{ cron_user }} ansible-pull -d {{ workdir }} -U {{ repo_url }} >>{{ logfile }} 2>&1 3 | -------------------------------------------------------------------------------- /language_features/templates/etc_logrotate.d_ansible-pull.j2: -------------------------------------------------------------------------------- 1 | {{ logfile }} { 2 | rotate 7 3 | daily 4 | compress 5 | missingok 6 | notifempty 7 | } 8 | -------------------------------------------------------------------------------- /language_features/templates/foo.j2: -------------------------------------------------------------------------------- 1 | # This is a very simple Jinja2 template representing an imaginary configuration file 2 | # for an imaginary app. 3 | 4 | # this is an example of loading a fact from the setup module 5 | system={{ ansible_system }} 6 | 7 | # here is a variable that could be set in a playbook or inventory file 8 | http_port={{ http_port }} 9 | 10 | 11 | -------------------------------------------------------------------------------- /language_features/templates/hostvars.j2: -------------------------------------------------------------------------------- 1 | # example of how to get the ipaddress of every machine in the webservers group 2 | # for use in a template 3 | 4 | {% for host in groups['webservers'] %} 5 | HOST: {{ host }} IP: {{ hostvars[host]['ansible_all_ipv4_addresses'][0] }} 6 | {% endfor %} 7 | 8 | -------------------------------------------------------------------------------- /language_features/upgraded_vars.yml: -------------------------------------------------------------------------------- 1 | # this just shows some tricks possible with variables in Ansible 1.2 and later. 2 | 3 | --- 4 | 5 | - hosts: all 6 | 7 | vars: 8 | a_list: 9 | - a 10 | - b 11 | - c 12 | 13 | tasks: 14 | - debug: msg="hello {{ ansible_hostname.upper() }}" 15 | 16 | - shell: echo match 17 | when: 2 == 2 18 | 19 | - shell: echo no match 20 | when: 2 == 2 + 1 21 | 22 | - debug: msg="{{ ansible_os_family }}" 23 | 24 | - shell: echo {{ item }} 25 | with_items: a_list 26 | 27 | - shell: echo 'RedHat' 28 | when: ansible_os_family == 'RedHat' 29 | 30 | 31 | -------------------------------------------------------------------------------- /language_features/user_commands.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # this is a demo of how the user commands work and how to reference salted passwords 3 | # in vars sections. You could also use vars_files if you like (see other examples) 4 | 5 | - hosts: all 6 | remote_user: root 7 | vars: 8 | # created with: 9 | # python -c 'import crypt; print crypt.crypt("This is my Password", "$1$SomeSalt$")' 10 | password: $1$SomeSalt$UqddPX3r4kH3UL5jq5/ZI. 11 | 12 | tasks: 13 | 14 | # Walk through account creation, modification, and deletion 15 | - name: test basic user account creation 16 | user: name=tset comment=TsetUser group=users shell=/sbin/nologin createhome=no 17 | 18 | # the following is just a simple example of how you don't have to include 19 | # the 'name' element for each task 20 | 21 | - user: name=tset comment=NyetUser 22 | - user: name=tset password={{password}} 23 | 24 | # The following will add the user to supplementary groups. 25 | 26 | # Add the user to the groups dialout and uucp. 27 | - user: name=tset groups=dialout,uucp 28 | 29 | # Add the user to the groups dialout and wheel, 30 | # This will remove tset from the group uucp. 31 | - user: name=tset groups=dialout,wheel 32 | 33 | # Add the user to the group uucp. Because append=yes, the user 34 | # will not be removed from the groups dialout and wheel. 35 | - user: name=tset groups=uucp append=yes 36 | 37 | # Finally, remove the user. 38 | - user: name=tset state=absent 39 | -------------------------------------------------------------------------------- /language_features/vars/CentOS.yml: -------------------------------------------------------------------------------- 1 | --- 2 | apache: httpd 3 | packager: yum 4 | -------------------------------------------------------------------------------- /language_features/vars/defaults.yml: -------------------------------------------------------------------------------- 1 | --- 2 | packager: apt 3 | apache: apache 4 | -------------------------------------------------------------------------------- /language_features/vars/external_vars.yml: -------------------------------------------------------------------------------- 1 | --- 2 | alpha: one 3 | beta: two 4 | -------------------------------------------------------------------------------- /language_features/zfs.yml: -------------------------------------------------------------------------------- 1 | --- 2 | ## 3 | # Example Ansible playbook that uses the Zfs module. 4 | # 5 | 6 | - hosts: webservers 7 | gather_facts: no 8 | become: yes 9 | become_method: sudo 10 | 11 | vars: 12 | pool: rpool 13 | 14 | tasks: 15 | 16 | - name: Create a zfs file system 17 | zfs: name={{pool}}/var/log/httpd state=present 18 | 19 | - name: Create a zfs file system with quota of 10GiB and visible snapdir 20 | zfs: name={{pool}}/ansible quota='10G' snapdir=visible state=present 21 | 22 | - name: Create zfs snapshot of the above file system 23 | zfs: name={{pool}}/ansible@mysnapshot state=present 24 | 25 | - name: Create zfs volume named smallvol with a size of 10MiB 26 | zfs: name={{pool}}/smallvol volsize=10M state=present 27 | 28 | - name: Removes snapshot of rpool/oldfs 29 | zfs: name={{pool}}/oldfs@oldsnapshot state=absent 30 | 31 | - name: Removes file system rpool/oldfs 32 | zfs: name={{pool}}/oldfs state=absent 33 | 34 | 35 | -------------------------------------------------------------------------------- /mongodb/LICENSE.md: -------------------------------------------------------------------------------- 1 | Copyright (C) 2013 AnsibleWorks, Inc. 2 | 3 | This work is licensed under the Creative Commons Attribution 3.0 Unported License. 4 | To view a copy of this license, visit http://creativecommons.org/licenses/by/3.0/deed.en_US. 5 | -------------------------------------------------------------------------------- /mongodb/group_vars/all: -------------------------------------------------------------------------------- 1 | # The global variable file mongodb installation 2 | 3 | # The chunksize for shards in MB 4 | mongos_chunk_size: 1 5 | 6 | # The port in which mongos server should listen on 7 | mongos_port: 8888 8 | 9 | # The port for mongo config server 10 | mongoc_port: 7777 11 | 12 | # The directory prefix where the database files would be stored 13 | mongodb_datadir_prefix: /data/ 14 | 15 | # The interface where the mongodb process should listen on. 16 | # Defaults to the first interface. Change this to: 17 | # 18 | # iface: eth1 19 | # 20 | # ...to override. 21 | # 22 | iface: '{{ ansible_default_ipv4.interface }}' 23 | 24 | # The password for admin user 25 | mongo_admin_pass: 123456 26 | -------------------------------------------------------------------------------- /mongodb/hosts: -------------------------------------------------------------------------------- 1 | #The site wide list of mongodb servers 2 | 3 | # the mongo servers need a mongod_port variable set, and they must not conflict. 4 | [mongo_servers] 5 | mongo1 mongod_port=2700 6 | mongo2 mongod_port=2701 7 | mongo3 mongod_port=2702 8 | mongo4 mongod_port=2703 9 | 10 | #The list of servers where replication should happen, by default include all servers 11 | [replication_servers] 12 | mongo4 13 | mongo3 14 | mongo1 15 | mongo2 16 | 17 | #The list of mongodb configuration servers, make sure it is 1 or 3 18 | [mongoc_servers] 19 | mongo1 20 | mongo2 21 | mongo3 22 | 23 | 24 | #The list of servers where mongos servers would run. 25 | [mongos_servers] 26 | mongo1 27 | mongo2 28 | 29 | 30 | -------------------------------------------------------------------------------- /mongodb/images/check.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ansible/ansible-examples/b50586543c6c4be907fdc88f9f78a2b35d2a895f/mongodb/images/check.png -------------------------------------------------------------------------------- /mongodb/images/nosql_primer.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ansible/ansible-examples/b50586543c6c4be907fdc88f9f78a2b35d2a895f/mongodb/images/nosql_primer.png -------------------------------------------------------------------------------- /mongodb/images/replica_set.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ansible/ansible-examples/b50586543c6c4be907fdc88f9f78a2b35d2a895f/mongodb/images/replica_set.png -------------------------------------------------------------------------------- /mongodb/images/scale.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ansible/ansible-examples/b50586543c6c4be907fdc88f9f78a2b35d2a895f/mongodb/images/scale.png -------------------------------------------------------------------------------- /mongodb/images/sharding.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ansible/ansible-examples/b50586543c6c4be907fdc88f9f78a2b35d2a895f/mongodb/images/sharding.png -------------------------------------------------------------------------------- /mongodb/images/site.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ansible/ansible-examples/b50586543c6c4be907fdc88f9f78a2b35d2a895f/mongodb/images/site.png -------------------------------------------------------------------------------- /mongodb/playbooks/testsharding.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # The playbook creates a new database test and populates data in the database to test the sharding. 3 | 4 | - hosts: $servername 5 | remote_user: root 6 | tasks: 7 | - name: Create a new database and user 8 | mongodb_user: login_user=admin login_password=${mongo_admin_pass} login_port=${mongos_port} database=test user=admin password=${mongo_admin_pass} state=present 9 | 10 | - name: Pause for the user to get created and replicated 11 | pause: minutes=3 12 | 13 | - name: Execute the collection creation script 14 | command: /usr/bin/mongo localhost:${mongos_port}/test -u admin -p ${mongo_admin_pass} /tmp/testsharding.js 15 | 16 | - name: Enable sharding on the database and collection 17 | command: /usr/bin/mongo localhost:${mongos_port}/admin -u admin -p ${mongo_admin_pass} /tmp/enablesharding.js 18 | -------------------------------------------------------------------------------- /mongodb/roles/common/files/10gen.repo.j2: -------------------------------------------------------------------------------- 1 | [10gen] 2 | name=10gen Repository 3 | baseurl=http://downloads-distro.mongodb.org/repo/redhat/os/x86_64 4 | gpgcheck=0 5 | enabled=1 6 | 7 | -------------------------------------------------------------------------------- /mongodb/roles/common/files/RPM-GPG-KEY-EPEL-6: -------------------------------------------------------------------------------- 1 | -----BEGIN PGP PUBLIC KEY BLOCK----- 2 | Version: GnuPG v1.4.5 (GNU/Linux) 3 | 4 | mQINBEvSKUIBEADLGnUj24ZVKW7liFN/JA5CgtzlNnKs7sBg7fVbNWryiE3URbn1 5 | JXvrdwHtkKyY96/ifZ1Ld3lE2gOF61bGZ2CWwJNee76Sp9Z+isP8RQXbG5jwj/4B 6 | M9HK7phktqFVJ8VbY2jfTjcfxRvGM8YBwXF8hx0CDZURAjvf1xRSQJ7iAo58qcHn 7 | XtxOAvQmAbR9z6Q/h/D+Y/PhoIJp1OV4VNHCbCs9M7HUVBpgC53PDcTUQuwcgeY6 8 | pQgo9eT1eLNSZVrJ5Bctivl1UcD6P6CIGkkeT2gNhqindRPngUXGXW7Qzoefe+fV 9 | QqJSm7Tq2q9oqVZ46J964waCRItRySpuW5dxZO34WM6wsw2BP2MlACbH4l3luqtp 10 | Xo3Bvfnk+HAFH3HcMuwdaulxv7zYKXCfNoSfgrpEfo2Ex4Im/I3WdtwME/Gbnwdq 11 | 3VJzgAxLVFhczDHwNkjmIdPAlNJ9/ixRjip4dgZtW8VcBCrNoL+LhDrIfjvnLdRu 12 | vBHy9P3sCF7FZycaHlMWP6RiLtHnEMGcbZ8QpQHi2dReU1wyr9QgguGU+jqSXYar 13 | 1yEcsdRGasppNIZ8+Qawbm/a4doT10TEtPArhSoHlwbvqTDYjtfV92lC/2iwgO6g 14 | YgG9XrO4V8dV39Ffm7oLFfvTbg5mv4Q/E6AWo/gkjmtxkculbyAvjFtYAQARAQAB 15 | tCFFUEVMICg2KSA8ZXBlbEBmZWRvcmFwcm9qZWN0Lm9yZz6JAjYEEwECACAFAkvS 16 | KUICGw8GCwkIBwMCBBUCCAMEFgIDAQIeAQIXgAAKCRA7Sd8qBgi4lR/GD/wLGPv9 17 | qO39eyb9NlrwfKdUEo1tHxKdrhNz+XYrO4yVDTBZRPSuvL2yaoeSIhQOKhNPfEgT 18 | 9mdsbsgcfmoHxmGVcn+lbheWsSvcgrXuz0gLt8TGGKGGROAoLXpuUsb1HNtKEOwP 19 | Q4z1uQ2nOz5hLRyDOV0I2LwYV8BjGIjBKUMFEUxFTsL7XOZkrAg/WbTH2PW3hrfS 20 | WtcRA7EYonI3B80d39ffws7SmyKbS5PmZjqOPuTvV2F0tMhKIhncBwoojWZPExft 21 | HpKhzKVh8fdDO/3P1y1Fk3Cin8UbCO9MWMFNR27fVzCANlEPljsHA+3Ez4F7uboF 22 | p0OOEov4Yyi4BEbgqZnthTG4ub9nyiupIZ3ckPHr3nVcDUGcL6lQD/nkmNVIeLYP 23 | x1uHPOSlWfuojAYgzRH6LL7Idg4FHHBA0to7FW8dQXFIOyNiJFAOT2j8P5+tVdq8 24 | wB0PDSH8yRpn4HdJ9RYquau4OkjluxOWf0uRaS//SUcCZh+1/KBEOmcvBHYRZA5J 25 | l/nakCgxGb2paQOzqqpOcHKvlyLuzO5uybMXaipLExTGJXBlXrbbASfXa/yGYSAG 26 | iVrGz9CE6676dMlm8F+s3XXE13QZrXmjloc6jwOljnfAkjTGXjiB7OULESed96MR 27 | XtfLk0W5Ab9pd7tKDR6QHI7rgHXfCopRnZ2VVQ== 28 | =V/6I 29 | -----END PGP PUBLIC KEY BLOCK----- 30 | -------------------------------------------------------------------------------- /mongodb/roles/common/files/epel.repo.j2: -------------------------------------------------------------------------------- 1 | [epel] 2 | name=Extra Packages for Enterprise Linux 6 - $basearch 3 | baseurl=http://download.fedoraproject.org/pub/epel/6/$basearch 4 | #mirrorlist=https://mirrors.fedoraproject.org/metalink?repo=epel-6&arch=$basearch 5 | failovermethod=priority 6 | enabled=1 7 | gpgcheck=1 8 | gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-EPEL-6 9 | 10 | [epel-debuginfo] 11 | name=Extra Packages for Enterprise Linux 6 - $basearch - Debug 12 | #baseurl=http://download.fedoraproject.org/pub/epel/6/$basearch/debug 13 | mirrorlist=https://mirrors.fedoraproject.org/metalink?repo=epel-debug-6&arch=$basearch 14 | failovermethod=priority 15 | enabled=0 16 | gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-EPEL-6 17 | gpgcheck=1 18 | 19 | [epel-source] 20 | name=Extra Packages for Enterprise Linux 6 - $basearch - Source 21 | #baseurl=http://download.fedoraproject.org/pub/epel/6/SRPMS 22 | mirrorlist=https://mirrors.fedoraproject.org/metalink?repo=epel-source-6&arch=$basearch 23 | failovermethod=priority 24 | enabled=0 25 | gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-EPEL-6 26 | gpgcheck=1 27 | -------------------------------------------------------------------------------- /mongodb/roles/common/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # Handler for mongod 3 | 4 | - name: restart iptables 5 | service: name=iptables state=restarted 6 | -------------------------------------------------------------------------------- /mongodb/roles/common/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # This Playbook runs all the common plays in the deployment 3 | 4 | - name: Create the hosts file for all machines 5 | template: src=hosts.j2 dest=/etc/hosts 6 | 7 | - name: Create the repository for 10Gen 8 | copy: src=10gen.repo.j2 dest=/etc/yum.repos.d/10gen.repo 9 | 10 | - name: Create the EPEL Repository. 11 | copy: src=epel.repo.j2 dest=/etc/yum.repos.d/epel.repo 12 | 13 | - name: Create the GPG key for EPEL 14 | copy: src=RPM-GPG-KEY-EPEL-6 dest=/etc/pki/rpm-gpg 15 | 16 | - name: Create the mongod user 17 | user: name=mongod comment="MongoD" 18 | 19 | - name: Create the data directory for the namenode metadata 20 | file: path={{ mongodb_datadir_prefix }} owner=mongod group=mongod state=directory 21 | 22 | - name: Install the mongodb package 23 | yum: name={{ item }} state=installed 24 | with_items: 25 | - libselinux-python 26 | - mongo-10gen 27 | - mongo-10gen-server 28 | - bc 29 | - python-pip 30 | 31 | - name: Install the latest pymongo package 32 | pip: name=pymongo state=latest use_mirrors=no 33 | 34 | - name: Create the iptables file 35 | template: src=iptables.j2 dest=/etc/sysconfig/iptables 36 | notify: restart iptables 37 | -------------------------------------------------------------------------------- /mongodb/roles/common/templates/hosts.j2: -------------------------------------------------------------------------------- 1 | 127.0.0.1 localhost 2 | {% for host in groups['all'] %} 3 | {{ hostvars[host]['ansible_' + iface].ipv4.address }} {{ host }} 4 | {% endfor %} 5 | -------------------------------------------------------------------------------- /mongodb/roles/common/templates/iptables.j2: -------------------------------------------------------------------------------- 1 | # Firewall configuration written by system-config-firewall 2 | # Manual customization of this file is not recommended. 3 | *filter 4 | :INPUT ACCEPT [0:0] 5 | :FORWARD ACCEPT [0:0] 6 | :OUTPUT ACCEPT [0:0] 7 | {% if 'mongoc_servers' in group_names %} 8 | -A INPUT -p tcp --dport 7777 -j ACCEPT 9 | {% endif %} 10 | {% if 'mongos_servers' in group_names %} 11 | -A INPUT -p tcp --dport 8888 -j ACCEPT 12 | {% endif %} 13 | {% if 'mongo_servers' in group_names %} 14 | {% for host in groups['mongo_servers'] %} 15 | -A INPUT -p tcp --dport {{ hostvars[host]['mongod_port'] }} -j ACCEPT 16 | {% endfor %} 17 | {% endif %} 18 | -A INPUT -m state --state ESTABLISHED,RELATED -j ACCEPT 19 | -A INPUT -p icmp -j ACCEPT 20 | -A INPUT -i lo -j ACCEPT 21 | -A INPUT -m state --state NEW -m tcp -p tcp --dport 22 -j ACCEPT 22 | -A INPUT -j REJECT --reject-with icmp-host-prohibited 23 | -A FORWARD -j REJECT --reject-with icmp-host-prohibited 24 | COMMIT 25 | 26 | 27 | 28 | -------------------------------------------------------------------------------- /mongodb/roles/mongoc/files/secret: -------------------------------------------------------------------------------- 1 | qGO6OYb64Uth9p9Tm8s9kqarydmAg1AUdgVz+ecjinaLZ1SlWxXMY1ug8AO7C/Vu 2 | D8kA3+rE37Gv1GuZyPYi87NSfDhKXo4nJWxI00BxTBppmv2PTzbi7xLCx1+8A1uQ 3 | 4XU0HA 4 | -------------------------------------------------------------------------------- /mongodb/roles/mongoc/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # This playbook deploys the mongodb configurationdb servers 3 | 4 | - name: Create data directory for mongoc configuration server 5 | file: path={{ mongodb_datadir_prefix }}/configdb state=directory owner=mongod group=mongod 6 | 7 | - name: Create the mongo configuration server startup file 8 | template: src=mongoc.j2 dest=/etc/init.d/mongoc mode=0655 9 | 10 | 11 | - name: Create the mongo configuration server file 12 | template: src=mongoc.conf.j2 dest=/etc/mongoc.conf 13 | 14 | 15 | - name: Copy the keyfile for authentication 16 | copy: src=roles/mongod/files/secret dest={{ mongodb_datadir_prefix }}/secret owner=mongod group=mongod mode=0400 17 | 18 | - name: Start the mongo configuration server service 19 | command: creates=/var/lock/subsys/mongoc /etc/init.d/mongoc start 20 | 21 | - name: pause 22 | pause: seconds=20 23 | 24 | - name: add the admin user 25 | mongodb_user: database=admin name=admin password={{ mongo_admin_pass }} login_port={{ mongoc_port }} state=present 26 | ignore_errors: yes 27 | -------------------------------------------------------------------------------- /mongodb/roles/mongoc/templates/adduser.j2: -------------------------------------------------------------------------------- 1 | db.addUser('admin','{{ mongo_admin_pass }}') 2 | -------------------------------------------------------------------------------- /mongodb/roles/mongoc/templates/mongoc.conf.j2: -------------------------------------------------------------------------------- 1 | 2 | #where to log 3 | logpath=/var/log/mongo/mongod-config.log 4 | 5 | logappend=true 6 | 7 | # fork and run in background 8 | fork = true 9 | 10 | port = {{ mongoc_port }} 11 | 12 | dbpath={{ mongodb_datadir_prefix }}configdb 13 | keyFile={{ mongodb_datadir_prefix }}secret 14 | # location of pidfile 15 | pidfilepath = /var/run/mongo/mongoc.pid 16 | 17 | configsvr=true 18 | -------------------------------------------------------------------------------- /mongodb/roles/mongod/files/secret: -------------------------------------------------------------------------------- 1 | qGO6OYb64Uth9p9Tm8s9kqarydmAg1AUdgVz+ecjinaLZ1SlWxXMY1ug8AO7C/Vu 2 | D8kA3+rE37Gv1GuZyPYi87NSfDhKXo4nJWxI00BxTBppmv2PTzbi7xLCx1+8A1uQ 3 | 4XU0HA 4 | -------------------------------------------------------------------------------- /mongodb/roles/mongod/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # This role deploys the mongod processes and sets up the replication set. 3 | 4 | - name: create data directory for mongodb 5 | file: path={{ mongodb_datadir_prefix }}/mongo-{{ inventory_hostname }} state=directory owner=mongod group=mongod 6 | delegate_to: '{{ item }}' 7 | with_items: groups.replication_servers 8 | 9 | - name: create log directory for mongodb 10 | file: path=/var/log/mongo state=directory owner=mongod group=mongod 11 | 12 | - name: create run directory for mongodb 13 | file: path=/var/run/mongo state=directory owner=mongod group=mongod 14 | 15 | - name: Create the mongodb startup file 16 | template: src=mongod.j2 dest=/etc/init.d/mongod-{{ inventory_hostname }} mode=0655 17 | delegate_to: '{{ item }}' 18 | with_items: groups.replication_servers 19 | 20 | 21 | - name: Create the mongodb configuration file 22 | template: src=mongod.conf.j2 dest=/etc/mongod-{{ inventory_hostname }}.conf 23 | delegate_to: '{{ item }}' 24 | with_items: groups.replication_servers 25 | 26 | - name: Copy the keyfile for authentication 27 | copy: src=secret dest={{ mongodb_datadir_prefix }}/secret owner=mongod group=mongod mode=0400 28 | 29 | 30 | - name: Start the mongodb service 31 | command: creates=/var/lock/subsys/mongod-{{ inventory_hostname }} /etc/init.d/mongod-{{ inventory_hostname }} start 32 | delegate_to: '{{ item }}' 33 | with_items: groups.replication_servers 34 | 35 | - name: Create the file to initialize the mongod replica set 36 | template: src=repset_init.j2 dest=/tmp/repset_init.js 37 | 38 | - name: Pause for a while 39 | pause: seconds=20 40 | 41 | - name: Initialize the replication set 42 | shell: /usr/bin/mongo --port "{{ mongod_port }}" /tmp/repset_init.js 43 | -------------------------------------------------------------------------------- /mongodb/roles/mongod/tasks/shards.yml: -------------------------------------------------------------------------------- 1 | --- 2 | #This Playbooks adds shards to the mongos servers once everythig is added 3 | 4 | 5 | - name: Create the file to initialize the mongod Shard 6 | template: src=shard_init.j2 dest=/tmp/shard_init_{{ inventory_hostname }}.js 7 | delegate_to: '{{ item }}' 8 | with_items: groups.mongos_servers 9 | 10 | - name: Add the shard to the mongos 11 | shell: /usr/bin/mongo localhost:{{ mongos_port }}/admin -u admin -p {{ mongo_admin_pass }} /tmp/shard_init_{{ inventory_hostname }}.js 12 | delegate_to: '{{ item }}' 13 | with_items: groups.mongos_servers 14 | 15 | 16 | -------------------------------------------------------------------------------- /mongodb/roles/mongod/templates/mongod.conf.j2: -------------------------------------------------------------------------------- 1 | # mongo.conf 2 | smallfiles=true 3 | 4 | #where to log 5 | logpath=/var/log/mongo/mongod-{{ inventory_hostname }}.log 6 | 7 | logappend=true 8 | 9 | # fork and run in background 10 | fork = true 11 | 12 | port = {{ mongod_port }} 13 | 14 | dbpath={{ mongodb_datadir_prefix }}mongo-{{ inventory_hostname }} 15 | keyFile={{ mongodb_datadir_prefix }}/secret 16 | 17 | # location of pidfile 18 | pidfilepath = /var/run/mongo/mongod-{{ inventory_hostname }}.pid 19 | 20 | 21 | # Ping interval for Mongo monitoring server. 22 | #mms-interval = 23 | 24 | # Replication Options 25 | replSet={{ inventory_hostname }} 26 | -------------------------------------------------------------------------------- /mongodb/roles/mongod/templates/repset_init.j2: -------------------------------------------------------------------------------- 1 | rs.initiate() 2 | sleep(13000) 3 | {% for host in groups['replication_servers'] %} 4 | rs.add("{{ host }}:{{ mongod_port }}") 5 | sleep(8000) 6 | {% endfor %} 7 | printjson(rs.status()) 8 | -------------------------------------------------------------------------------- /mongodb/roles/mongod/templates/shard_init.j2: -------------------------------------------------------------------------------- 1 | sh.addShard("{{ inventory_hostname}}/{{ inventory_hostname }}:{{ mongod_port }}") 2 | printjson(rs.status()) 3 | -------------------------------------------------------------------------------- /mongodb/roles/mongos/files/secret: -------------------------------------------------------------------------------- 1 | qGO6OYb64Uth9p9Tm8s9kqarydmAg1AUdgVz+ecjinaLZ1SlWxXMY1ug8AO7C/Vu 2 | D8kA3+rE37Gv1GuZyPYi87NSfDhKXo4nJWxI00BxTBppmv2PTzbi7xLCx1+8A1uQ 3 | 4XU0HA 4 | -------------------------------------------------------------------------------- /mongodb/roles/mongos/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | #This Playbook configures the mongos service of mongodb 3 | 4 | - name: Create the mongos startup file 5 | template: src=mongos.j2 dest=/etc/init.d/mongos mode=0655 6 | 7 | 8 | - name: Create the mongos configuration file 9 | template: src=mongos.conf.j2 dest=/etc/mongos.conf 10 | 11 | - name: Copy the keyfile for authentication 12 | copy: src=roles/mongod/files/secret dest={{ mongodb_datadir_prefix }}/secret owner=mongod group=mongod mode=0400 13 | 14 | - name: Start the mongos service 15 | command: creates=/var/lock/subsys/mongos /etc/init.d/mongos start 16 | - name: pause 17 | pause: seconds=20 18 | 19 | - name: copy the file for shard test 20 | template: src=testsharding.j2 dest=/tmp/testsharding.js 21 | 22 | - name: copy the file enable sharding 23 | template: src=enablesharding.j2 dest=/tmp/enablesharding.js 24 | -------------------------------------------------------------------------------- /mongodb/roles/mongos/templates/enablesharding.j2: -------------------------------------------------------------------------------- 1 | db.runCommand( { enableSharding : "test" } ) 2 | db.runCommand( { shardCollection : "test.test_collection", key : {"number":1} }) 3 | 4 | -------------------------------------------------------------------------------- /mongodb/roles/mongos/templates/mongos.conf.j2: -------------------------------------------------------------------------------- 1 | #where to log 2 | logpath=/var/log/mongo/mongos.log 3 | 4 | logappend=true 5 | 6 | # fork and run in background 7 | fork = true 8 | 9 | port = {{ mongos_port }} 10 | {% set hosts = '' %} 11 | {% for host in groups['mongoc_servers'] %} 12 | {% if loop.last %} 13 | {% set hosts = hosts + host + ':' ~ mongoc_port %} 14 | configdb = {{ hosts }} 15 | {% else %} 16 | {% set hosts = hosts + host + ':' ~ mongoc_port + ',' %} 17 | {% endif %} 18 | {% endfor %} 19 | 20 | # location of pidfile 21 | pidfilepath = /var/run/mongodb/mongos.pid 22 | keyFile={{ mongodb_datadir_prefix }}/secret 23 | chunkSize={{ mongos_chunk_size }} 24 | -------------------------------------------------------------------------------- /mongodb/roles/mongos/templates/testsharding.j2: -------------------------------------------------------------------------------- 1 | people = ["Marc", "Bill", "George", "Eliot", "Matt", "Trey", "Tracy", "Greg", "Steve", "Kristina", "Katie", "Jeff"]; 2 | 3 | for(var i=0; i<100000; i++){ 4 | name = people[Math.floor(Math.random()*people.length)]; 5 | user_id = i; 6 | boolean = [true, false][Math.floor(Math.random()*2)]; 7 | added_at = new Date(); 8 | number = Math.floor(Math.random()*10001); 9 | db.test_collection.save({"name":name, "user_id":user_id, "boolean": boolean, "added_at":added_at, "number":number }); 10 | } 11 | db.test_collection.ensureIndex({number:1}) 12 | 13 | -------------------------------------------------------------------------------- /mongodb/site.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # This Playbook would deploy the whole mongodb cluster with replication and sharding. 3 | 4 | - hosts: all 5 | roles: 6 | - role: common 7 | 8 | - hosts: mongo_servers 9 | roles: 10 | - role: mongod 11 | 12 | - hosts: mongoc_servers 13 | roles: 14 | - role: mongoc 15 | 16 | - hosts: mongos_servers 17 | roles: 18 | - role: mongos 19 | 20 | - hosts: mongo_servers 21 | tasks: 22 | - include: roles/mongod/tasks/shards.yml 23 | -------------------------------------------------------------------------------- /phillips_hue/ansible.cfg: -------------------------------------------------------------------------------- 1 | # config file for ansible -- http://ansible.com/ 2 | # ============================================== 3 | 4 | # nearly all parameters can be overridden in ansible-playbook 5 | # or with command line flags. ansible will read ANSIBLE_CONFIG, 6 | # ansible.cfg in the current working directory, .ansible.cfg in 7 | # the home directory or /etc/ansible/ansible.cfg, whichever it 8 | # finds first 9 | 10 | [defaults] 11 | 12 | # some basic default values... 13 | 14 | inventory = hosts 15 | forks = 50 16 | host_key_checking = False 17 | retry_files_enabled = False 18 | no_target_syslog = False 19 | callback_whitelist = time 20 | 21 | [ssh_connection] 22 | scp_if_ssh = True 23 | -------------------------------------------------------------------------------- /phillips_hue/ansible_colors.yml: -------------------------------------------------------------------------------- 1 | - hosts: localhost 2 | gather_facts: no 3 | connection: local 4 | vars: 5 | ansible_mango: 6 | "on": true 7 | "bri": 254 8 | "xy": [0.5701, 0.313] 9 | ansible_pool: 10 | "on": true 11 | "bri": 254 12 | "xy": [0.1593, 0.2522] 13 | tasks: 14 | - name: INCLUDE UNIQUE USERNAME FROM REGISTER.YML 15 | include_vars: 16 | file: username_info.yml 17 | 18 | - name: GRAB HUE LIGHT INFORMATION 19 | uri: 20 | url: "http://{{ip_address}}/api/{{username}}" 21 | method: GET 22 | body: '{{body_info|to_json}}' 23 | register: light_info 24 | 25 | - name: TURN LIGHTS TO MANGO 26 | uri: 27 | url: "http://{{ip_address}}/api/{{username}}/lights/{{item}}/state" 28 | method: PUT 29 | body: '{{ansible_mango|to_json}}' 30 | loop: "{{ range(1, light_info.json.lights | length + 1)|list }}" 31 | 32 | - name: TURN LIGHTS TO POOL 33 | uri: 34 | url: "http://{{ip_address}}/api/{{username}}/lights/{{item}}/state" 35 | method: PUT 36 | body: '{{ansible_pool|to_json}}' 37 | loop: "{{ range(1, light_info.json.lights | length + 1)|list }}" 38 | 39 | - name: TURN LIGHTS TO MANGO 40 | uri: 41 | url: "http://{{ip_address}}/api/{{username}}/lights/{{item}}/state" 42 | method: PUT 43 | body: '{{ansible_mango|to_json}}' 44 | loop: "{{ range(1, light_info.json.lights | length + 1)|list }}" 45 | 46 | - name: TURN LIGHTS TO POOL 47 | uri: 48 | url: "http://{{ip_address}}/api/{{username}}/lights/{{item}}/state" 49 | method: PUT 50 | body: '{{ansible_pool|to_json}}' 51 | loop: "{{ range(1, light_info.json.lights | length + 1)|list }}" 52 | -------------------------------------------------------------------------------- /phillips_hue/effect.yml: -------------------------------------------------------------------------------- 1 | - hosts: localhost 2 | gather_facts: no 3 | connection: local 4 | vars: 5 | ansible_effect: 6 | "on": true 7 | "effect": "colorloop" 8 | ansible_none: 9 | "on": true 10 | "effect": "none" 11 | tasks: 12 | - name: INCLUDE UNIQUE USERNAME FROM REGISTER.YML 13 | include_vars: 14 | file: username_info.yml 15 | 16 | - name: GRAB HUE LIGHT INFORMATION 17 | uri: 18 | url: "http://{{ip_address}}/api/{{username}}" 19 | method: GET 20 | body: '{{body_info|to_json}}' 21 | register: light_info 22 | 23 | - name: TURN LIGHTS INTO COLORLOOP EFFECT 24 | uri: 25 | url: "http://{{ip_address}}/api/{{username}}/lights/{{item}}/state" 26 | method: PUT 27 | body: '{{ansible_effect|to_json}}' 28 | loop: "{{ range(1, light_info.json.lights | length + 1)|list }}" 29 | 30 | # Pause for 10 seconds 31 | - pause: 32 | seconds: 5 33 | 34 | - name: TURN LIGHTS INTO COLORLOOP EFFECT 35 | uri: 36 | url: "http://{{ip_address}}/api/{{username}}/lights/{{item}}/state" 37 | method: PUT 38 | body: '{{ansible_none|to_json}}' 39 | loop: "{{ range(1, light_info.json.lights | length + 1)|list }}" 40 | -------------------------------------------------------------------------------- /phillips_hue/hosts: -------------------------------------------------------------------------------- 1 | localhost 2 | -------------------------------------------------------------------------------- /phillips_hue/hue.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ansible/ansible-examples/b50586543c6c4be907fdc88f9f78a2b35d2a895f/phillips_hue/hue.gif -------------------------------------------------------------------------------- /phillips_hue/on_off.yml: -------------------------------------------------------------------------------- 1 | - hosts: localhost 2 | gather_facts: no 3 | connection: local 4 | 5 | vars: 6 | off_state: 7 | "on": false 8 | on_state: 9 | "on": true 10 | 11 | tasks: 12 | - name: INCLUDE UNIQUE USERNAME FROM REGISTER.YML 13 | include_vars: 14 | file: username_info.yml 15 | 16 | - name: GRAB HUE LIGHT INFORMATION 17 | uri: 18 | url: "http://{{ip_address}}/api/{{username}}" 19 | method: GET 20 | body: '{{body_info|to_json}}' 21 | register: light_info 22 | 23 | - name: PRINT DATA TO TERMINAL WINDOW 24 | debug: 25 | var: light_info.json.lights 26 | 27 | - name: PRINT AMOUNT OF LIGHTS TO TERMINAL WINDOW 28 | debug: 29 | msg: "THERE ARE {{light_info.json.lights | length}} HUE LIGHTS PRESENT" 30 | 31 | # - name: PRINT OUT LOOP VARS 32 | # debug: 33 | # msg: "http://{{ip_address}}/api/{{username}}/lights/{{item}}/state" 34 | # loop: "{{ range(1, light_info.json.lights | length + 1)|list }}" 35 | 36 | - name: TURN LIGHTS OFF 37 | uri: 38 | url: "http://{{ip_address}}/api/{{username}}/lights/{{item}}/state" 39 | method: PUT 40 | body: '{{off_state|to_json}}' 41 | loop: "{{ range(1, light_info.json.lights | length + 1)|list }}" 42 | 43 | - name: PROMPT USER TO TURN BACK ON 44 | pause: 45 | prompt: "Turn them back on?" 46 | 47 | - name: TURN LIGHTS ON 48 | uri: 49 | url: "http://{{ip_address}}/api/{{username}}/lights/{{item}}/state" 50 | method: PUT 51 | body: '{{on_state|to_json}}' 52 | loop: "{{ range(1, light_info.json.lights | length + 1)|list }}" 53 | -------------------------------------------------------------------------------- /phillips_hue/register.yml: -------------------------------------------------------------------------------- 1 | - hosts: localhost 2 | gather_facts: no 3 | connection: local 4 | 5 | tasks: 6 | 7 | - name: PROMPT USER TO PRESS PHYSICAL BUTTON HUE HUB 8 | pause: 9 | prompt: "Press the button on the hub now..." 10 | 11 | - name: INCLUDE IP ADDRESS FROM username_info.yml 12 | include_vars: 13 | file: username_info.yml 14 | 15 | - name: GRAB UNIQUE USERNAME 16 | uri: 17 | url: "http://{{ip_address}}/api" 18 | method: POST 19 | body: '{{body_info|to_json}}' 20 | register: username_info 21 | 22 | - name: PRINT DATA TO TERMINAL WINDOW 23 | debug: 24 | var: username_info.json 25 | - lineinfile: 26 | path: "./username_info.yml" 27 | regexp: '^username' 28 | insertafter: EOF 29 | line: 'username: {{username_info.json[0]["success"]["username"]}}' 30 | -------------------------------------------------------------------------------- /phillips_hue/username_info.yml: -------------------------------------------------------------------------------- 1 | --- 2 | username: elY1xx9p5twUBYDjELgMUuQT99kLaVqGT1p0eDrl 3 | ip_address: "192.168.86.30" 4 | body_info: 5 | devicetype: "Ansible!" 6 | -------------------------------------------------------------------------------- /rust-module-hello-world/Makefile: -------------------------------------------------------------------------------- 1 | .PHONY: all clean rust 2 | 3 | all: rust 4 | 5 | clean: 6 | rm -f library/rust_helloworld 7 | cd module-src && \ 8 | cargo clean 9 | 10 | rust: 11 | cd module-src && \ 12 | cargo build && \ 13 | cp -v target/debug/helloworld ../library/rust_helloworld 14 | -------------------------------------------------------------------------------- /rust-module-hello-world/library/.gitignore: -------------------------------------------------------------------------------- 1 | !/.gitignore 2 | /.* 3 | /* 4 | -------------------------------------------------------------------------------- /rust-module-hello-world/module-src/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "helloworld" 3 | version = "0.1.0" 4 | authors = ["Sviatoslav Sydorenko "] 5 | 6 | [dependencies] 7 | serde = "1.0.66" 8 | serde_derive = "1.0.66" 9 | serde_json = "1.0.20" 10 | -------------------------------------------------------------------------------- /rust-module-hello-world/module-src/target/.gitignore: -------------------------------------------------------------------------------- 1 | !/.gitignore 2 | /.* 3 | /* 4 | -------------------------------------------------------------------------------- /rust-module-hello-world/rust.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: localhost 3 | tasks: 4 | - debug: 5 | msg: Testing a binary module written in Rust 6 | 7 | - debug: 8 | var: ansible_system 9 | 10 | - name: ping 11 | ping: 12 | 13 | - name: Hello, World! 14 | rust_helloworld: 15 | register: hello_world 16 | 17 | - assert: 18 | that: 19 | - > 20 | hello_world.msg == "Hello, World!" 21 | 22 | - name: Hello, Ansible! 23 | rust_helloworld: 24 | name: Ansible 25 | register: hello_ansible 26 | 27 | - assert: 28 | that: 29 | - > 30 | hello_ansible.msg == "Hello, Ansible!" 31 | 32 | - name: Async Hello, World! 33 | rust_helloworld: 34 | async: 10 35 | poll: 1 36 | register: async_hello_world 37 | 38 | - assert: 39 | that: 40 | - > 41 | async_hello_world.msg == "Hello, World!" 42 | 43 | - name: Async Hello, Ansible! 44 | rust_helloworld: 45 | name: Ansible 46 | async: 10 47 | poll: 1 48 | register: async_hello_ansible 49 | 50 | - assert: 51 | that: 52 | - > 53 | async_hello_ansible.msg == "Hello, Ansible!" 54 | -------------------------------------------------------------------------------- /tomcat-memcached-failover/LICENSE.md: -------------------------------------------------------------------------------- 1 | Copyright (c) 2015 Cuong Nguyen 2 | 3 | 4 | Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: 5 | 6 | The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. 7 | 8 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 9 | -------------------------------------------------------------------------------- /tomcat-memcached-failover/README.md: -------------------------------------------------------------------------------- 1 | ## Tomcat failover with Memcached + Memcached Session Manager + Nginx (load balancer) 2 | 3 | - Tested on Ansible 1.9.3 for Debian 4 | - Expects hosts: CentOS 6.x 5 | 6 | This playbook deploys a failover solution for clustered Tomcat using Nginx as load balancer and Memcached + MSM as session manager. 7 | 8 | - Nginx: balances the requests by round robin. 9 | - Memcached: stores `sessionid` of tomcat. 10 | - MSM: manages tomcat session. 11 | 12 | For more detail about session management, see https://github.com/magro/memcached-session-manager 13 | 14 | This playbook also deploys a [demo web app](https://github.com/magro/msm-sample-webapp) to test the session management. 15 | 16 | 17 | ## Initial setup of inventory file 18 | 19 | ``` 20 | [lb_servers] 21 | lbserver 22 | 23 | [backend_servers] 24 | tomcat_server_1 25 | tomcat_server_2 26 | 27 | [memcached_servers] 28 | cached_server1 29 | cached_server2 30 | ``` 31 | 32 | Edit inventory file `hosts` to suit your requirements and run playbook: 33 | 34 | ``` 35 | $ ansible-playbook -i hosts site.yml 36 | ``` 37 | 38 | When finished, open web browser and access to http://nginx_ip/ to start testing. 39 | 40 | ## Ideas and improvements 41 | 42 | - Setup SSL for load balancer. 43 | - HA load balancer. 44 | - Hardening iptables rules. 45 | 46 | Pull requests are welcome. 47 | 48 | ## License 49 | 50 | This work is licensed under MIT license. 51 | -------------------------------------------------------------------------------- /tomcat-memcached-failover/group_vars/all: -------------------------------------------------------------------------------- 1 | # Java variables 2 | 3 | # Nginx variables 4 | nginx_http_port: 80 5 | # nginx_https_port: 443 6 | 7 | # Tomcat variables 8 | tomcat_http_port: 8080 9 | tomcat_https_port: 8443 10 | 11 | # Memcached variables 12 | memcached_port: 11211 13 | -------------------------------------------------------------------------------- /tomcat-memcached-failover/hosts: -------------------------------------------------------------------------------- 1 | [lb_servers] 2 | lbserver 3 | 4 | [backend_servers] 5 | tomcat_server_1 6 | tomcat_server_2 7 | 8 | [memcached_servers] 9 | cached_server1 10 | cached_server2 11 | -------------------------------------------------------------------------------- /tomcat-memcached-failover/roles/common/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: restart iptables 3 | service: name=iptables state=restarted 4 | -------------------------------------------------------------------------------- /tomcat-memcached-failover/roles/common/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Install libselinux-python 3 | yum: name=libselinux-python state=present 4 | 5 | - name: Install GPG key for EPEL 6 | get_url: url=https://dl.fedoraproject.org/pub/epel/RPM-GPG-KEY-EPEL-6 dest=/etc/pki/rpm-gpg/RPM-GPG-KEY-EPEL-6 7 | 8 | - name: Install EPEL repository 9 | yum: name=https://dl.fedoraproject.org/pub/epel/epel-release-latest-6.noarch.rpm state=present 10 | 11 | - name: Setup Iptables rules 12 | template: src=iptables.j2 dest=/etc/sysconfig/iptables 13 | notify: restart iptables 14 | -------------------------------------------------------------------------------- /tomcat-memcached-failover/roles/common/templates/iptables.j2: -------------------------------------------------------------------------------- 1 | # {{ ansible_managed }} 2 | # Manual customization of this file is not recommended. 3 | *filter 4 | :INPUT ACCEPT [0:0] 5 | :FORWARD ACCEPT [0:0] 6 | :OUTPUT ACCEPT [0:0] 7 | 8 | {% if (inventory_hostname in groups['lb_servers']) %} 9 | -A INPUT -p tcp --dport {{ nginx_http_port }} -j ACCEPT 10 | {% endif %} 11 | 12 | {% if inventory_hostname in groups['backend_servers'] %} 13 | -A INPUT -p tcp --dport {{ tomcat_http_port }} -j ACCEPT 14 | -A INPUT -p tcp --dport {{ tomcat_https_port }} -j ACCEPT 15 | {% endif %} 16 | 17 | {% if inventory_hostname in groups['memcached_servers'] %} 18 | -A INPUT -p tcp --dport {{ memcached_port }} -j ACCEPT 19 | {% endif %} 20 | 21 | -A INPUT -m state --state ESTABLISHED,RELATED -j ACCEPT 22 | -A INPUT -p icmp -j ACCEPT 23 | -A INPUT -i lo -j ACCEPT 24 | -A INPUT -m state --state NEW -m tcp -p tcp --dport 22 -j ACCEPT 25 | COMMIT 26 | -------------------------------------------------------------------------------- /tomcat-memcached-failover/roles/lb-nginx/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: restart nginx 3 | service: name=nginx state=restarted 4 | -------------------------------------------------------------------------------- /tomcat-memcached-failover/roles/lb-nginx/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Install nginx 3 | yum: name=nginx state=present 4 | 5 | - name: Deliver main configuration file 6 | template: src=nginx.conf.j2 dest=/etc/nginx/nginx.conf 7 | notify: restart nginx 8 | 9 | - name: Copy configuration file to nginx/sites-avaiable 10 | template: src=default.conf.j2 dest=/etc/nginx/conf.d/default.conf 11 | notify: restart nginx 12 | 13 | - name: Make sure nginx start with boot 14 | service: name=nginx state=started enabled=yes 15 | -------------------------------------------------------------------------------- /tomcat-memcached-failover/roles/lb-nginx/templates/default.conf.j2: -------------------------------------------------------------------------------- 1 | upstream tomcat { 2 | {% for host in groups['backend_servers'] %} 3 | server {{ host }}:{{ tomcat_http_port }}; 4 | {% endfor %} 5 | } 6 | 7 | server { 8 | listen 80 default_server; 9 | server_name {{ inventory_hostname }}; 10 | include /etc/nginx/default.d/*.conf; 11 | 12 | location / { 13 | proxy_pass http://tomcat; 14 | } 15 | 16 | } 17 | -------------------------------------------------------------------------------- /tomcat-memcached-failover/roles/lb-nginx/templates/nginx.conf.j2: -------------------------------------------------------------------------------- 1 | # For more information on configuration, see: 2 | # * Official English Documentation: http://nginx.org/en/docs/ 3 | # * Official Russian Documentation: http://nginx.org/ru/docs/ 4 | 5 | user nginx; 6 | worker_processes 1; 7 | 8 | error_log /var/log/nginx/error.log; 9 | #error_log /var/log/nginx/error.log notice; 10 | #error_log /var/log/nginx/error.log info; 11 | 12 | pid /var/run/nginx.pid; 13 | 14 | 15 | events { 16 | worker_connections 1024; 17 | } 18 | 19 | 20 | http { 21 | include /etc/nginx/mime.types; 22 | default_type application/octet-stream; 23 | 24 | log_format main '$remote_addr - $remote_user [$time_local] "$request" ' 25 | '$status $body_bytes_sent "$http_referer" ' 26 | '"$http_user_agent" "$http_x_forwarded_for"'; 27 | 28 | access_log /var/log/nginx/access.log main; 29 | 30 | sendfile on; 31 | #tcp_nopush on; 32 | 33 | #keepalive_timeout 0; 34 | keepalive_timeout 65; 35 | 36 | #gzip on; 37 | 38 | # Load config files from the /etc/nginx/conf.d directory 39 | # The default server is in conf.d/default.conf 40 | include /etc/nginx/conf.d/*.conf; 41 | 42 | } 43 | -------------------------------------------------------------------------------- /tomcat-memcached-failover/roles/memcached/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: restart memcached 3 | service: name=memcached state=restarted 4 | -------------------------------------------------------------------------------- /tomcat-memcached-failover/roles/memcached/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Install memcached 3 | yum: name=memcached state=present 4 | 5 | - name: Deliver configuration file 6 | template: src=memcached.conf.j2 dest=/etc/sysconfig/memcached backup=yes 7 | notify: restart memcached 8 | 9 | - name: Deliver init script 10 | template: src=init.sh.j2 dest=/etc/init.d/memcached mode=0755 11 | notify: restart memcached 12 | 13 | - name: Start memcached service 14 | service: name=memcached state=started enabled=yes 15 | -------------------------------------------------------------------------------- /tomcat-memcached-failover/roles/memcached/templates/init.sh.j2: -------------------------------------------------------------------------------- 1 | #! /bin/sh 2 | # 3 | # chkconfig: - 55 45 4 | # description: The memcached daemon is a network memory cache service. 5 | # processname: memcached 6 | # config: /etc/sysconfig/memcached 7 | # pidfile: /var/run/memcached/memcached.pid 8 | 9 | # Standard LSB functions 10 | #. /lib/lsb/init-functions 11 | 12 | # Source function library. 13 | . /etc/init.d/functions 14 | 15 | PORT=11211 16 | USER=memcached 17 | MAXCONN=1024 18 | CACHESIZE=64 19 | OPTIONS="" 20 | 21 | if [ -f /etc/sysconfig/memcached ];then 22 | . /etc/sysconfig/memcached 23 | fi 24 | 25 | # Check that networking is up. 26 | . /etc/sysconfig/network 27 | 28 | if [ "$NETWORKING" = "no" ] 29 | then 30 | exit 0 31 | fi 32 | 33 | RETVAL=0 34 | prog="memcached" 35 | pidfile=${PIDFILE-/var/run/memcached/memcached.pid} 36 | lockfile=${LOCKFILE-/var/lock/subsys/memcached} 37 | 38 | start () { 39 | echo -n $"Starting $prog: " 40 | # Ensure that /var/run/memcached has proper permissions 41 | if [ "`stat -c %U /var/run/memcached`" != "$USER" ]; then 42 | chown $USER /var/run/memcached 43 | fi 44 | 45 | # daemon --pidfile ${pidfile} memcached -d -p $PORT -u $USER -m $CACHESIZE -c $MAXCONN -P ${pidfile} $OPTIONS 46 | daemon --pidfile ${pidfile} memcached -d -p $PORT -u $USER -m $CACHESIZE -c $MAXCONN -P ${pidfile} $OPTIONS -vv > $LOGFILE 2>&1 47 | RETVAL=$? 48 | echo 49 | [ $RETVAL -eq 0 ] && touch ${lockfile} 50 | } 51 | stop () { 52 | echo -n $"Stopping $prog: " 53 | killproc -p ${pidfile} /usr/bin/memcached 54 | RETVAL=$? 55 | echo 56 | if [ $RETVAL -eq 0 ] ; then 57 | rm -f ${lockfile} ${pidfile} 58 | fi 59 | } 60 | 61 | restart () { 62 | stop 63 | start 64 | } 65 | 66 | 67 | # See how we were called. 68 | case "$1" in 69 | start) 70 | start 71 | ;; 72 | stop) 73 | stop 74 | ;; 75 | status) 76 | status -p ${pidfile} memcached 77 | RETVAL=$? 78 | ;; 79 | restart|reload|force-reload) 80 | restart 81 | ;; 82 | condrestart|try-restart) 83 | [ -f ${lockfile} ] && restart || : 84 | ;; 85 | *) 86 | echo $"Usage: $0 {start|stop|status|restart|reload|force-reload|condrestart|try-restart}" 87 | RETVAL=2 88 | ;; 89 | esac 90 | 91 | exit $RETVAL 92 | -------------------------------------------------------------------------------- /tomcat-memcached-failover/roles/memcached/templates/memcached.conf.j2: -------------------------------------------------------------------------------- 1 | # Running on Port 11211 2 | PORT="{{ memcached_port }}" 3 | 4 | # Start as memcached daemon 5 | USER="memcached" 6 | 7 | # Set max simultaneous connections to 1024 8 | MAXCONN="1024" 9 | 10 | # Set log file 11 | LOGFILE="/var/log/memcached.log" 12 | 13 | # Set Memory size to half of all memory 14 | CACHESIZE="{{ ansible_memtotal_mb / 2 }}" 15 | 16 | #Set server IP address 17 | OPTIONS="-l {{ ansible_default_ipv4['address'] }}" 18 | -------------------------------------------------------------------------------- /tomcat-memcached-failover/roles/tomcat/files/msm-sample-webapp-1.0-SNAPSHOT.war: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ansible/ansible-examples/b50586543c6c4be907fdc88f9f78a2b35d2a895f/tomcat-memcached-failover/roles/tomcat/files/msm-sample-webapp-1.0-SNAPSHOT.war -------------------------------------------------------------------------------- /tomcat-memcached-failover/roles/tomcat/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: restart tomcat 3 | service: name=tomcat state=restarted 4 | -------------------------------------------------------------------------------- /tomcat-memcached-failover/roles/tomcat/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Install OpenJDK 3 | yum: name=java-1.7.0-openjdk state=present 4 | 5 | - name: Install Tomcat 6 | yum: name=tomcat state=present 7 | 8 | - name: Deliver configuration files for tomcat 9 | template: src={{ item.src }} dest={{ item.dest }} backup=yes 10 | with_items: 11 | - { src: 'default.j2', dest: '/etc/tomcat/default' } 12 | - { src: 'server.xml.j2', dest: '/etc/tomcat/server.xml' } 13 | - { src: 'context.xml.j2', dest: '/etc/tomcat/context.xml' } 14 | notify: restart tomcat 15 | 16 | - name: Deliver libraries support memcached 17 | get_url: url="{{ item }}" dest=/usr/share/tomcat/lib/ 18 | with_items: 19 | - http://repo1.maven.org/maven2/de/javakaffee/msm/memcached-session-manager/1.8.0/memcached-session-manager-1.8.0.jar 20 | - http://repo1.maven.org/maven2/de/javakaffee/msm/memcached-session-manager-tc7/1.8.0/memcached-session-manager-tc7-1.8.0.jar 21 | - https://spymemcached.googlecode.com/files/spymemcached-2.10.2.jar 22 | 23 | - name: Deploy sample app 24 | copy: src=msm-sample-webapp-1.0-SNAPSHOT.war dest=/var/lib/tomcat/webapps/ROOT.war owner=tomcat group=tomcat 25 | 26 | - name: Start tomcat service 27 | service: name=tomcat state=started enabled=yes 28 | -------------------------------------------------------------------------------- /tomcat-memcached-failover/roles/tomcat/templates/context.xml.j2: -------------------------------------------------------------------------------- 1 | 2 | 18 | 19 | 20 | 21 | 22 | WEB-INF/web.xml 23 | 24 | 25 | 28 | 35 | 37 | 40 | 41 | 42 | -------------------------------------------------------------------------------- /tomcat-memcached-failover/roles/tomcat/templates/default.j2: -------------------------------------------------------------------------------- 1 | # Service-specific configuration file for tomcat. This will be sourced by 2 | # the SysV init script after the global configuration file 3 | # /etc/tomcat/tomcat.conf, thus allowing values to be overridden in 4 | # a per-service manner. 5 | # 6 | # NEVER change the init script itself. To change values for all services make 7 | # your changes in /etc/tomcat/tomcat.conf 8 | # 9 | # To change values for a specific service make your edits here. 10 | # To create a new service create a link from /etc/init.d/ to 11 | # /etc/init.d/tomcat (do not copy the init script) and make a copy of the 12 | # /etc/sysconfig/tomcat file to /etc/sysconfig/ and change 13 | # the property values so the two services won't conflict. Register the new 14 | # service in the system as usual (see chkconfig and similars). 15 | # 16 | 17 | # Where your java installation lives 18 | #JAVA_HOME="/usr/lib/jvm/java" 19 | 20 | # Where your tomcat installation lives 21 | #CATALINA_BASE="/usr/share/tomcat" 22 | #CATALINA_HOME="/usr/share/tomcat" 23 | #JASPER_HOME="/usr/share/tomcat" 24 | #CATALINA_TMPDIR="/var/cache/tomcat/temp" 25 | 26 | # You can pass some parameters to java here if you wish to 27 | #JAVA_OPTS="-Xminf0.1 -Xmaxf0.3" 28 | 29 | # Use JAVA_OPTS to set java.library.path for libtcnative.so 30 | #JAVA_OPTS="-Djava.library.path=/usr/lib" 31 | 32 | # What user should run tomcat 33 | #TOMCAT_USER="tomcat" 34 | 35 | # You can change your tomcat locale here 36 | #LANG="en_US" 37 | 38 | # Run tomcat under the Java Security Manager 39 | #SECURITY_MANAGER="false" 40 | 41 | # Time to wait in seconds, before killing process 42 | #SHUTDOWN_WAIT="30" 43 | 44 | # Whether to annoy the user with "attempting to shut down" messages or not 45 | #SHUTDOWN_VERBOSE="false" 46 | 47 | # Set the TOMCAT_PID location 48 | #CATALINA_PID="/var/run/tomcat.pid" 49 | 50 | # Connector port is 8080 for this tomcat instance 51 | #CONNECTOR_PORT="8080" 52 | 53 | # If you wish to further customize your tomcat environment, 54 | # put your own definitions here 55 | # (i.e. LD_LIBRARY_PATH for some jdbc drivers) 56 | -------------------------------------------------------------------------------- /tomcat-memcached-failover/site.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: all 3 | remote_user: root 4 | roles: 5 | - common 6 | 7 | - hosts: lb_servers 8 | remote_user: root 9 | roles: 10 | - lb-nginx 11 | 12 | - hosts: backend_servers 13 | remote_user: root 14 | roles: 15 | - tomcat 16 | 17 | - hosts: memcached_servers 18 | remote_user: root 19 | roles: 20 | - memcached 21 | -------------------------------------------------------------------------------- /tomcat-standalone/LICENSE.md: -------------------------------------------------------------------------------- 1 | Copyright (C) 2013 AnsibleWorks, Inc. 2 | 3 | This work is licensed under the Creative Commons Attribution 3.0 Unported License. 4 | To view a copy of this license, visit http://creativecommons.org/licenses/by/3.0/deed.en_US. 5 | -------------------------------------------------------------------------------- /tomcat-standalone/README.md: -------------------------------------------------------------------------------- 1 | ## Standalone Tomcat Deployment 2 | 3 | - Requires Ansible 1.2 or newer 4 | - Expects CentOS/RHEL 6.x hosts 5 | 6 | These playbooks deploy a very basic implementation of Tomcat Application Server, 7 | version 7. To use them, first edit the `hosts` inventory file to contain the 8 | hostnames of the machines on which you want Tomcat deployed, and edit the 9 | group_vars/tomcat-servers file to set any Tomcat configuration parameters you need. 10 | 11 | Then run the playbook, like this: 12 | 13 | ansible-playbook -i hosts site.yml 14 | 15 | When the playbook run completes, you should be able to see the Tomcat 16 | Application Server running on the ports you chose, on the target machines. 17 | 18 | This is a very simple playbook and could serve as a starting point for more 19 | complex Tomcat-based projects. 20 | 21 | ### Ideas for Improvement 22 | 23 | Here are some ideas for ways that these playbooks could be extended: 24 | 25 | - Write a playbook to deploy an actual application into the server. 26 | - Deploy Tomcat clustered with a load balancer in front. 27 | 28 | We would love to see contributions and improvements, so please fork this 29 | repository on GitHub and send us your changes via pull requests. 30 | -------------------------------------------------------------------------------- /tomcat-standalone/group_vars/tomcat-servers: -------------------------------------------------------------------------------- 1 | # Here are variables related to the Tomcat installation 2 | 3 | http_port: 8080 4 | https_port: 8443 5 | 6 | # This will configure a default manager-gui user: 7 | 8 | admin_username: admin 9 | admin_password: adminsecret 10 | -------------------------------------------------------------------------------- /tomcat-standalone/hosts: -------------------------------------------------------------------------------- 1 | [tomcat-servers] 2 | webserver1 3 | -------------------------------------------------------------------------------- /tomcat-standalone/roles/selinux/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # Download and install EPEL for Centos/RHEL version 6 3 | - name: Download EPEL Repo - Centos/RHEL 6 4 | get_url: url=http://dl.fedoraproject.org/pub/epel/epel-release-latest-6.noarch.rpm dest=/tmp/epel-release-latest-6.noarch.rpm 5 | when: "ansible_os_family == 'RedHat' and ansible_distribution_major_version == '6'" 6 | 7 | - name: Install EPEL Repo - Centos/RHEL 6 8 | command: rpm -ivh /tmp/epel-release-latest-6.noarch.rpm creates=/etc/yum.repos.d/epel.repo 9 | when: "ansible_os_family == 'RedHat' and ansible_distribution_major_version == '6'" 10 | 11 | # Download and install EPEL for Centos/RHEL version 7 12 | - name: Download EPEL Repo - Centos/RHEL 7 13 | get_url: url=http://dl.fedoraproject.org/pub/epel/epel-release-latest-7.noarch.rpm dest=/tmp/epel-release-latest-7.noarch.rpm 14 | when: "ansible_os_family == 'RedHat' and ansible_distribution_major_version == '7'" 15 | 16 | - name: Install EPEL Repo - Centos/RHEL 7 17 | command: rpm -ivh /tmp/epel-release-latest-7.noarch.rpm creates=/etc/yum.repos.d/epel.repo 18 | when: "ansible_os_family == 'RedHat' and ansible_distribution_major_version == '7'" 19 | 20 | - name: Install libselinux-python 21 | yum: name=libselinux-python 22 | -------------------------------------------------------------------------------- /tomcat-standalone/roles/tomcat/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: restart tomcat 3 | service: name=tomcat state=restarted 4 | 5 | - name: restart iptables 6 | service: name=iptables state=restarted 7 | -------------------------------------------------------------------------------- /tomcat-standalone/roles/tomcat/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Install Java 1.7 3 | yum: name=java-1.7.0-openjdk state=present 4 | 5 | - name: add group "tomcat" 6 | group: name=tomcat 7 | 8 | - name: add user "tomcat" 9 | user: name=tomcat group=tomcat home=/usr/share/tomcat createhome=no 10 | become: True 11 | become_method: sudo 12 | 13 | - name: Download Tomcat 14 | get_url: url=http://archive.apache.org/dist/tomcat/tomcat-7/v7.0.61/bin/apache-tomcat-7.0.61.tar.gz dest=/opt/apache-tomcat-7.0.61.tar.gz 15 | 16 | - name: Extract archive 17 | command: chdir=/usr/share /bin/tar xvf /opt/apache-tomcat-7.0.61.tar.gz -C /opt/ creates=/opt/apache-tomcat-7.0.61 18 | 19 | - name: Symlink install directory 20 | file: src=/opt/apache-tomcat-7.0.61 path=/usr/share/tomcat state=link 21 | 22 | - name: Change ownership of Tomcat installation 23 | file: path=/usr/share/tomcat/ owner=tomcat group=tomcat state=directory recurse=yes 24 | 25 | - name: Configure Tomcat server 26 | template: src=server.xml dest=/usr/share/tomcat/conf/ 27 | notify: restart tomcat 28 | 29 | - name: Configure Tomcat users 30 | template: src=tomcat-users.xml dest=/usr/share/tomcat/conf/ 31 | notify: restart tomcat 32 | 33 | - name: Install Tomcat init script 34 | copy: src=tomcat-initscript.sh dest=/etc/init.d/tomcat mode=0755 35 | 36 | - name: Start Tomcat 37 | service: name=tomcat state=started enabled=yes 38 | 39 | - name: deploy iptables rules 40 | template: src=iptables-save dest=/etc/sysconfig/iptables 41 | when: "ansible_os_family == 'RedHat' and ansible_distribution_major_version == '6'" 42 | notify: restart iptables 43 | 44 | - name: insert firewalld rule for tomcat http port 45 | firewalld: port={{ http_port }}/tcp permanent=true state=enabled immediate=yes 46 | when: "ansible_os_family == 'RedHat' and ansible_distribution_major_version == '7'" 47 | 48 | - name: insert firewalld rule for tomcat https port 49 | firewalld: port={{ https_port }}/tcp permanent=true state=enabled immediate=yes 50 | when: "ansible_os_family == 'RedHat' and ansible_distribution_major_version == '7'" 51 | 52 | - name: wait for tomcat to start 53 | wait_for: port={{http_port}} 54 | -------------------------------------------------------------------------------- /tomcat-standalone/roles/tomcat/templates/iptables-save: -------------------------------------------------------------------------------- 1 | # {{ ansible_managed }} 2 | *filter 3 | :INPUT ACCEPT [0:0] 4 | :FORWARD ACCEPT [0:0] 5 | :OUTPUT ACCEPT [4:512] 6 | -A INPUT -m state --state RELATED,ESTABLISHED -j ACCEPT 7 | -A INPUT -p icmp -j ACCEPT 8 | -A INPUT -i lo -j ACCEPT 9 | -A INPUT -p tcp -m state --state NEW -m tcp --dport 22 -j ACCEPT 10 | -A INPUT -p tcp -m state --state NEW -m tcp --dport {{ http_port }} -j ACCEPT 11 | -A INPUT -p tcp -m state --state NEW -m tcp --dport {{ https_port }} -j ACCEPT 12 | -A INPUT -j REJECT --reject-with icmp-host-prohibited 13 | -A FORWARD -j REJECT --reject-with icmp-host-prohibited 14 | COMMIT 15 | -------------------------------------------------------------------------------- /tomcat-standalone/roles/tomcat/templates/tomcat-users.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 21 | 22 | 27 | 32 | 33 | 34 | 35 | 42 | 43 | 44 | -------------------------------------------------------------------------------- /tomcat-standalone/site.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # This playbook deploys a simple standalone Tomcat 7 server. 3 | 4 | - hosts: tomcat-servers 5 | remote_user: root 6 | become: yes 7 | become_method: sudo 8 | 9 | roles: 10 | - selinux 11 | - tomcat 12 | -------------------------------------------------------------------------------- /windows/create-user.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Add a user 3 | hosts: all 4 | gather_facts: false 5 | tasks: 6 | - name: Add User 7 | win_user: 8 | name: ansible 9 | password: "@ns1bl3" 10 | state: present 11 | -------------------------------------------------------------------------------- /windows/deploy-site.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # This playbook uses the win_get_url module to download a simple HTML file for IIS 3 | - name: Download simple web site 4 | hosts: all 5 | 6 | gather_facts: false 7 | tasks: 8 | - name: Download simple web site to 'C:\inetpub\wwwroot\ansible.html' 9 | win_get_url: 10 | url: 'https://raw.githubusercontent.com/thisdavejohnson/mywebapp/master/index.html' 11 | dest: 'C:\inetpub\wwwroot\ansible.html' 12 | -------------------------------------------------------------------------------- /windows/enable-iis.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # This playbook installs and enables IIS on Windows hosts 3 | 4 | - name: Install IIS 5 | hosts: all 6 | gather_facts: false 7 | tasks: 8 | - name: Install IIS 9 | win_feature: 10 | name: "Web-Server" 11 | state: present 12 | restart: yes 13 | include_sub_features: yes 14 | include_management_tools: yes 15 | -------------------------------------------------------------------------------- /windows/files/helloworld.ps1: -------------------------------------------------------------------------------- 1 | # Filename: helloworld.ps1 2 | Write-Host 3 | Write-Host 'Hello World!' 4 | Write-Host "Good-bye World! `n" 5 | # end of script 6 | -------------------------------------------------------------------------------- /windows/install-msi.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Install Apache from an MSI 3 | hosts: all 4 | 5 | tasks: 6 | - name: Download the Apache installer 7 | win_get_url: 8 | url: 'http://mirror.cc.columbia.edu/pub/software/apache//httpd/binaries/win32/httpd-2.2.25-win32-x86-no_ssl.msi' 9 | dest: 'C:\Users\Administrator\Downloads\httpd-2.2.25-win32-x86-no_ssl.msi' 10 | 11 | - name: Install MSI 12 | win_package: 13 | path: 'C:\Users\Administrator\Downloads\httpd-2.2.25-win32-x86-no_ssl.msi' 14 | state: present 15 | 16 | -------------------------------------------------------------------------------- /windows/ping.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # This playbook uses the win_ping module to test connectivity to Windows hosts 3 | - name: Ping 4 | hosts: all 5 | 6 | tasks: 7 | - name: ping 8 | win_ping: 9 | 10 | -------------------------------------------------------------------------------- /windows/run-powershell.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # This playbook tests the script module on Windows hosts 3 | 4 | - name: Run powershell script 5 | hosts: all 6 | gather_facts: false 7 | tasks: 8 | - name: Run powershell script 9 | script: files/helloworld.ps1 10 | -------------------------------------------------------------------------------- /windows/test.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: test raw module 3 | hosts: all 4 | tasks: 5 | - name: run ipconfig 6 | raw: ipconfig 7 | register: ipconfig 8 | - debug: var=ipconfig 9 | 10 | - name: test stat module 11 | hosts: windows 12 | tasks: 13 | - name: test stat module on file 14 | win_stat: path="C:/Windows/win.ini" 15 | register: stat_file 16 | 17 | - debug: var=stat_file 18 | 19 | - name: check stat_file result 20 | assert: 21 | that: 22 | - "stat_file.stat.exists" 23 | - "not stat_file.stat.isdir" 24 | - "stat_file.stat.size > 0" 25 | - "stat_file.stat.md5" 26 | -------------------------------------------------------------------------------- /windows/wamp_haproxy/demo-aws-wamp-launch.yml: -------------------------------------------------------------------------------- 1 | --- 2 | #Provision some instances: 3 | - hosts: localhost 4 | connection: local 5 | gather_facts: False 6 | 7 | vars_files: 8 | - group_vars/all 9 | 10 | tasks: 11 | - name: Launch webserver instances 12 | ec2: > 13 | access_key="{{ ec2_access_key }}" 14 | secret_key="{{ ec2_secret_key }}" 15 | keypair="{{ ec2_keypair }}" 16 | group="{{ ec2_security_group }}" 17 | type="{{ ec2_instance_type }}" 18 | image="ami-0d789266" 19 | region="{{ ec2_region }}" 20 | instance_tags="{'ansible_group':'windows_webservers', 'type':'{{ ec2_instance_type }}', 'group':'{{ ec2_security_group }}', 'Name':'demo_''{{ tower_user_name }}'}" 21 | count="{{ ec2_instance_count }}" 22 | wait=true 23 | register: ec2 24 | 25 | tags: 26 | - web 27 | 28 | - name: Launch database instance 29 | ec2: > 30 | access_key="{{ ec2_access_key }}" 31 | secret_key="{{ ec2_secret_key }}" 32 | keypair="{{ ec2_keypair }}" 33 | group="{{ ec2_security_group }}" 34 | type="{{ ec2_instance_type }}" 35 | image="ami-17d66f7c" 36 | region="{{ ec2_region }}" 37 | instance_tags="{'ansible_group':'windows_dbservers', 'type':'{{ ec2_instance_type }}', 'group':'{{ ec2_security_group }}', 'Name':'demo_''{{ tower_user_name }}'}" 38 | count="1" 39 | wait=true 40 | register: ec2 41 | 42 | tags: 43 | - db 44 | 45 | - name: Wait for WinRM to come up 46 | local_action: wait_for host={{ item.public_dns_name }} 47 | port=5986 delay=60 timeout=320 state=started 48 | with_items: ec2.instances 49 | 50 | tags: 51 | - web 52 | - db 53 | 54 | -------------------------------------------------------------------------------- /windows/wamp_haproxy/group_vars/all: -------------------------------------------------------------------------------- 1 | --- 2 | ec2_access_key: 3 | ec2_secret_key: 4 | ec2_region: us-east-1 5 | ec2_zone: 6 | ec2_image: ami-bc8131d4 7 | ec2_instance_type: m1.small 8 | ec2_keypair: djohnson 9 | ec2_security_group: default 10 | ec2_instance_count: 3 11 | 12 | tower_user_name: admin 13 | -------------------------------------------------------------------------------- /windows/wamp_haproxy/group_vars/windows_dbservers: -------------------------------------------------------------------------------- 1 | --- 2 | # The variables file used by the playbooks in the dbservers group. 3 | # These don't have to be explicitly imported by vars_files: they are autopopulated. 4 | 5 | sql_port: 3306 6 | dbuser: root 7 | dbname: foodb 8 | upassword: abc 9 | -------------------------------------------------------------------------------- /windows/wamp_haproxy/group_vars/windows_webservers: -------------------------------------------------------------------------------- 1 | --- 2 | # Variables for the web server configuration 3 | 4 | # Ethernet interface on which the web server should listen. 5 | # Defaults to the first interface. Change this to: 6 | # 7 | # iface: eth1 8 | # 9 | # ...to override. 10 | # 11 | iface: '{{ ansible_default_ipv4.interface }}' 12 | 13 | # this is the repository that holds our sample webapp 14 | repository: https://github.com/bennojoy/mywebapp.git 15 | 16 | # this is the sha1sum of V5 of the test webapp. 17 | webapp_version: 351e47276cc66b018f4890a04709d4cc3d3edb0d 18 | -------------------------------------------------------------------------------- /windows/wamp_haproxy/roles/elb/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # This role creates the AWS ELB and configures it. 3 | - name: Create the ELB in AWS 4 | ec2_elb_lb: 5 | name: "ansible-windows-demo-lb" 6 | state: present 7 | region: us-east-1 8 | zones: 9 | - us-east-1b 10 | - us-east-1d 11 | - us-east-1e 12 | listeners: 13 | - protocol: http # options are http, https, ssl, tcp 14 | load_balancer_port: 80 15 | instance_port: 80 16 | 17 | -------------------------------------------------------------------------------- /windows/wamp_haproxy/roles/iis/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # This playbook installs and enables IIS on Windows hosts 3 | - name: Install IIS 4 | win_feature: 5 | name: "Web-Server" 6 | state: present 7 | restart: yes 8 | include_sub_features: yes 9 | include_management_tools: yes 10 | -------------------------------------------------------------------------------- /windows/wamp_haproxy/roles/mssql/files/create-db.ps1: -------------------------------------------------------------------------------- 1 | # Create the database 2 | set-psdebug -strict 3 | $error[0]|format-list -force 4 | [System.Reflection.Assembly]::LoadWithPartialName('Microsoft.SqlServer.SMO') | out-null 5 | $srv = new-Object Microsoft.SqlServer.Management.Smo.Server("(local)") 6 | $db = New-Object Microsoft.SqlServer.Management.Smo.Database($srv, "Ansible Demo DB") 7 | $db.Create() 8 | 9 | -------------------------------------------------------------------------------- /windows/wamp_haproxy/roles/mssql/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # This role will create the DB for MS SQL 3 | 4 | #- name: Copy the database creation script 5 | # win_copy: src=create-db.ps1 dest=c:\create-db.ps1 6 | 7 | - name: Create Application Database 8 | script: "create-db.ps1" 9 | 10 | 11 | 12 | -------------------------------------------------------------------------------- /windows/wamp_haproxy/roles/web/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # This playbook uses the win_get_url module to download a simple HTML file for IIS 3 | 4 | - name: Download simple web site to 'C:\inetpub\wwwroot\ansible.html' 5 | win_get_url: 6 | url: 'https://raw.githubusercontent.com/thisdavejohnson/mywebapp/master/index.html' 7 | dest: 'C:\inetpub\wwwroot\ansible.html' 8 | -------------------------------------------------------------------------------- /windows/wamp_haproxy/rolling_update.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # This playbook does a rolling update for all webservers serially (one at a time). 3 | # Change the value of serial: to adjust the number of server to be updated. 4 | # 5 | # The three roles that apply to the webserver hosts will be applied: web 6 | 7 | - hosts: tag_ansible_group_windows_webservers 8 | serial: 1 9 | gather_facts: False 10 | connection: winrm 11 | 12 | vars: 13 | ansible_ssh_port : 5986 14 | 15 | # These are the tasks to run before applying updates: 16 | pre_tasks: 17 | - name: Remove host from load balancing pool 18 | local_action: 19 | module: ec2_elb 20 | region: us-east-1 21 | instance_id: "{{ ec2_id }}" 22 | ec2_elbs: "ansible-windows-demo-lb" 23 | wait_timeout: 330 24 | state: 'absent' 25 | 26 | roles: 27 | # - iis 28 | - web 29 | 30 | # These tasks run after the roles: 31 | post_tasks: 32 | - name: Wait for webserver to come up 33 | local_action: wait_for host={{ inventory_hostname }} port=80 state=started timeout=80 34 | 35 | - name: Add host to load balancing pool 36 | local_action: 37 | module: ec2_elb 38 | region: us-east-1 39 | instance_id: "{{ ec2_id }}" 40 | ec2_elbs: "ansible-windows-demo-lb" 41 | wait_timeout: 330 42 | state: 'present' 43 | -------------------------------------------------------------------------------- /windows/wamp_haproxy/site.yml: -------------------------------------------------------------------------------- 1 | --- 2 | ## This playbook deploys the whole application stack in this site. 3 | 4 | # Configure and deploy database servers. 5 | - hosts: tag_ansible_group_windows_dbservers 6 | connection: winrm 7 | 8 | vars: 9 | ansible_ssh_port : 5986 10 | 11 | roles: 12 | - mssql 13 | 14 | tags: 15 | - db 16 | 17 | # Configure and deploy the web servers. Note that we include two roles here, 18 | # the 'base-apache' role which simply sets up Apache, and 'web' which includes 19 | # our example web application. 20 | - hosts: tag_ansible_group_windows_webservers 21 | connection: winrm 22 | 23 | vars: 24 | ansible_ssh_port : 5986 25 | 26 | roles: 27 | - iis 28 | - web 29 | 30 | tags: 31 | - web 32 | 33 | # Configure and deploy the load balancer(s). 34 | - hosts: localhost 35 | connection: local 36 | gather_facts: False 37 | 38 | roles: 39 | - elb 40 | 41 | tags: 42 | - lb 43 | 44 | # Add the webservers to the load balancer(s) 45 | - hosts: tag_ansible_group_windows_webservers 46 | connection: winrm 47 | gather_facts: False 48 | 49 | vars: 50 | ansible_ssh_port : 5986 51 | 52 | tasks: 53 | 54 | - name: Wait for webserver to come up 55 | local_action: wait_for host={{ inventory_hostname }} port=80 state=started timeout=80 56 | 57 | - name: Add host to load balancing pool 58 | local_action: 59 | module: ec2_elb 60 | region: us-east-1 61 | instance_id: "{{ ec2_id }}" 62 | ec2_elbs: "ansible-windows-demo-lb" 63 | wait_timeout: 330 64 | state: 'present' 65 | 66 | tags: 67 | - lb 68 | -------------------------------------------------------------------------------- /wordpress-nginx/LICENSE.md: -------------------------------------------------------------------------------- 1 | Copyright (C) 2013 AnsibleWorks, Inc. 2 | 3 | This work is licensed under the Creative Commons Attribution 3.0 Unported License. 4 | To view a copy of this license, visit http://creativecommons.org/licenses/by/3.0/deed.en_US. 5 | -------------------------------------------------------------------------------- /wordpress-nginx/README.md: -------------------------------------------------------------------------------- 1 | ## WordPress+Nginx+PHP-FPM Deployment 2 | 3 | - Requires Ansible 1.2 or newer 4 | - Expects CentOS/RHEL 6.x hosts 5 | 6 | These playbooks deploy a simple all-in-one configuration of the popular 7 | WordPress blogging platform and CMS, frontend by the Nginx web server and the 8 | PHP-FPM process manager. To use, copy the `hosts.example` file to `hosts` and 9 | edit the `hosts` inventory file to include the names or URLs of the servers 10 | you want to deploy. 11 | 12 | Then run the playbook, like this: 13 | 14 | ansible-playbook -i hosts site.yml 15 | 16 | The playbooks will configure MySQL, WordPress, Nginx, and PHP-FPM. When the run 17 | is complete, you can hit access server to begin the WordPress configuration. 18 | 19 | ### Ideas for Improvement 20 | 21 | Here are some ideas for ways that these playbooks could be extended: 22 | 23 | - Parameterize the WordPress deployment to handle multi-site configurations. 24 | - Separate the components (PHP-FPM, MySQL, Nginx) onto separate hosts and 25 | handle the configuration appropriately. 26 | - Handle WordPress upgrades automatically. 27 | 28 | We would love to see contributions and improvements, so please fork this 29 | repository on GitHub and send us your changes via pull requests. 30 | -------------------------------------------------------------------------------- /wordpress-nginx/group_vars/all: -------------------------------------------------------------------------------- 1 | --- 2 | # Which version of WordPress to deploy 3 | wp_version: 4.2.4 4 | wp_sha256sum: 42ca594afc709cbef8528a6096f5a1efe96dcf3164e7ce321e87d57ae015cc82 5 | 6 | # These are the WordPress database settings 7 | wp_db_name: wordpress 8 | wp_db_user: wordpress 9 | wp_db_password: secret 10 | 11 | # You shouldn't need to change this. 12 | mysql_port: 3306 13 | 14 | # This is used for the nginx server configuration, but access to the 15 | # WordPress site is not restricted by a named host. 16 | server_hostname: www.example.com 17 | 18 | # Disable All Updates 19 | # By default automatic updates are enabled, set this value to true to disable all automatic updates 20 | auto_up_disable: false 21 | 22 | #Define Core Update Level 23 | #true = Development, minor, and major updates are all enabled 24 | #false = Development, minor, and major updates are all disabled 25 | #minor = Minor updates are enabled, development, and major updates are disabled 26 | core_update_level: true 27 | -------------------------------------------------------------------------------- /wordpress-nginx/hosts.example: -------------------------------------------------------------------------------- 1 | [wordpress-server] 2 | webserver2 3 | -------------------------------------------------------------------------------- /wordpress-nginx/roles/common/files/RPM-GPG-KEY-EPEL-6: -------------------------------------------------------------------------------- 1 | -----BEGIN PGP PUBLIC KEY BLOCK----- 2 | Version: GnuPG v1.4.5 (GNU/Linux) 3 | 4 | mQINBEvSKUIBEADLGnUj24ZVKW7liFN/JA5CgtzlNnKs7sBg7fVbNWryiE3URbn1 5 | JXvrdwHtkKyY96/ifZ1Ld3lE2gOF61bGZ2CWwJNee76Sp9Z+isP8RQXbG5jwj/4B 6 | M9HK7phktqFVJ8VbY2jfTjcfxRvGM8YBwXF8hx0CDZURAjvf1xRSQJ7iAo58qcHn 7 | XtxOAvQmAbR9z6Q/h/D+Y/PhoIJp1OV4VNHCbCs9M7HUVBpgC53PDcTUQuwcgeY6 8 | pQgo9eT1eLNSZVrJ5Bctivl1UcD6P6CIGkkeT2gNhqindRPngUXGXW7Qzoefe+fV 9 | QqJSm7Tq2q9oqVZ46J964waCRItRySpuW5dxZO34WM6wsw2BP2MlACbH4l3luqtp 10 | Xo3Bvfnk+HAFH3HcMuwdaulxv7zYKXCfNoSfgrpEfo2Ex4Im/I3WdtwME/Gbnwdq 11 | 3VJzgAxLVFhczDHwNkjmIdPAlNJ9/ixRjip4dgZtW8VcBCrNoL+LhDrIfjvnLdRu 12 | vBHy9P3sCF7FZycaHlMWP6RiLtHnEMGcbZ8QpQHi2dReU1wyr9QgguGU+jqSXYar 13 | 1yEcsdRGasppNIZ8+Qawbm/a4doT10TEtPArhSoHlwbvqTDYjtfV92lC/2iwgO6g 14 | YgG9XrO4V8dV39Ffm7oLFfvTbg5mv4Q/E6AWo/gkjmtxkculbyAvjFtYAQARAQAB 15 | tCFFUEVMICg2KSA8ZXBlbEBmZWRvcmFwcm9qZWN0Lm9yZz6JAjYEEwECACAFAkvS 16 | KUICGw8GCwkIBwMCBBUCCAMEFgIDAQIeAQIXgAAKCRA7Sd8qBgi4lR/GD/wLGPv9 17 | qO39eyb9NlrwfKdUEo1tHxKdrhNz+XYrO4yVDTBZRPSuvL2yaoeSIhQOKhNPfEgT 18 | 9mdsbsgcfmoHxmGVcn+lbheWsSvcgrXuz0gLt8TGGKGGROAoLXpuUsb1HNtKEOwP 19 | Q4z1uQ2nOz5hLRyDOV0I2LwYV8BjGIjBKUMFEUxFTsL7XOZkrAg/WbTH2PW3hrfS 20 | WtcRA7EYonI3B80d39ffws7SmyKbS5PmZjqOPuTvV2F0tMhKIhncBwoojWZPExft 21 | HpKhzKVh8fdDO/3P1y1Fk3Cin8UbCO9MWMFNR27fVzCANlEPljsHA+3Ez4F7uboF 22 | p0OOEov4Yyi4BEbgqZnthTG4ub9nyiupIZ3ckPHr3nVcDUGcL6lQD/nkmNVIeLYP 23 | x1uHPOSlWfuojAYgzRH6LL7Idg4FHHBA0to7FW8dQXFIOyNiJFAOT2j8P5+tVdq8 24 | wB0PDSH8yRpn4HdJ9RYquau4OkjluxOWf0uRaS//SUcCZh+1/KBEOmcvBHYRZA5J 25 | l/nakCgxGb2paQOzqqpOcHKvlyLuzO5uybMXaipLExTGJXBlXrbbASfXa/yGYSAG 26 | iVrGz9CE6676dMlm8F+s3XXE13QZrXmjloc6jwOljnfAkjTGXjiB7OULESed96MR 27 | XtfLk0W5Ab9pd7tKDR6QHI7rgHXfCopRnZ2VVQ== 28 | =V/6I 29 | -----END PGP PUBLIC KEY BLOCK----- 30 | -------------------------------------------------------------------------------- /wordpress-nginx/roles/common/files/epel.repo: -------------------------------------------------------------------------------- 1 | [epel] 2 | name=Extra Packages for Enterprise Linux 6 - $basearch 3 | #baseurl=http://download.fedoraproject.org/pub/epel/6/$basearch 4 | mirrorlist=https://mirrors.fedoraproject.org/metalink?repo=epel-6&arch=$basearch 5 | failovermethod=priority 6 | enabled=1 7 | gpgcheck=1 8 | gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-EPEL-6 9 | 10 | [epel-debuginfo] 11 | name=Extra Packages for Enterprise Linux 6 - $basearch - Debug 12 | #baseurl=http://download.fedoraproject.org/pub/epel/6/$basearch/debug 13 | mirrorlist=https://mirrors.fedoraproject.org/metalink?repo=epel-debug-6&arch=$basearch 14 | failovermethod=priority 15 | enabled=0 16 | gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-EPEL-6 17 | gpgcheck=1 18 | 19 | [epel-source] 20 | name=Extra Packages for Enterprise Linux 6 - $basearch - Source 21 | #baseurl=http://download.fedoraproject.org/pub/epel/6/SRPMS 22 | mirrorlist=https://mirrors.fedoraproject.org/metalink?repo=epel-source-6&arch=$basearch 23 | failovermethod=priority 24 | enabled=0 25 | gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-EPEL-6 26 | gpgcheck=1 27 | -------------------------------------------------------------------------------- /wordpress-nginx/roles/common/files/iptables-save: -------------------------------------------------------------------------------- 1 | # {{ ansible_managed }} 2 | *filter 3 | :INPUT ACCEPT [0:0] 4 | :FORWARD ACCEPT [0:0] 5 | :OUTPUT ACCEPT [37:13960] 6 | -A INPUT -m state --state RELATED,ESTABLISHED -j ACCEPT 7 | -A INPUT -p icmp -j ACCEPT 8 | -A INPUT -i lo -j ACCEPT 9 | -A INPUT -p tcp -m state --state NEW -m tcp --dport 22 -j ACCEPT 10 | -A INPUT -p tcp -m state --state NEW -m tcp --dport 80 -j ACCEPT 11 | -A INPUT -j REJECT --reject-with icmp-host-prohibited 12 | -A FORWARD -j REJECT --reject-with icmp-host-prohibited 13 | COMMIT 14 | -------------------------------------------------------------------------------- /wordpress-nginx/roles/common/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: restart iptables 3 | service: name=iptables state=restarted 4 | -------------------------------------------------------------------------------- /wordpress-nginx/roles/common/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Install libselinux-python 3 | yum: name=libselinux-python state=present 4 | 5 | - name: Reload ansible_facts 6 | setup: 7 | 8 | - name: Copy the EPEL repository definition 9 | copy: src=epel.repo dest=/etc/yum.repos.d/epel.repo 10 | 11 | - name: Create the GPG key for EPEL 12 | copy: src=RPM-GPG-KEY-EPEL-6 dest=/etc/pki/rpm-gpg 13 | 14 | - name: Set up iptables rules 15 | copy: src=iptables-save dest=/etc/sysconfig/iptables 16 | notify: restart iptables 17 | -------------------------------------------------------------------------------- /wordpress-nginx/roles/mysql/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: restart mysql 3 | service: name=mysqld state=restarted 4 | -------------------------------------------------------------------------------- /wordpress-nginx/roles/mysql/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Install Mysql package 3 | yum: name={{ item }} state=present 4 | with_items: 5 | - mysql-server 6 | - MySQL-python 7 | - libselinux-python 8 | - libsemanage-python 9 | 10 | - name: Configure SELinux to start mysql on any port 11 | seboolean: name=mysql_connect_any state=true persistent=yes 12 | when: ansible_selinux.status == "enabled" 13 | 14 | - name: Create Mysql configuration file 15 | template: src=my.cnf.j2 dest=/etc/my.cnf 16 | notify: 17 | - restart mysql 18 | 19 | - name: Start Mysql Service 20 | service: name=mysqld state=started enabled=yes 21 | -------------------------------------------------------------------------------- /wordpress-nginx/roles/mysql/templates/my.cnf.j2: -------------------------------------------------------------------------------- 1 | [mysqld] 2 | datadir=/var/lib/mysql 3 | socket=/var/lib/mysql/mysql.sock 4 | user=mysql 5 | # Disabling symbolic-links is recommended to prevent assorted security risks 6 | symbolic-links=0 7 | port={{ mysql_port }} 8 | 9 | [mysqld_safe] 10 | log-error=/var/log/mysqld.log 11 | pid-file=/var/run/mysqld/mysqld.pid 12 | -------------------------------------------------------------------------------- /wordpress-nginx/roles/nginx/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: restart nginx 3 | service: name=nginx state=restarted enabled=yes 4 | -------------------------------------------------------------------------------- /wordpress-nginx/roles/nginx/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Install nginx 3 | yum: name=nginx state=present 4 | 5 | - name: Copy nginx configuration for wordpress 6 | template: src=default.conf dest=/etc/nginx/conf.d/default.conf 7 | notify: restart nginx 8 | -------------------------------------------------------------------------------- /wordpress-nginx/roles/nginx/templates/default.conf: -------------------------------------------------------------------------------- 1 | server { 2 | listen 80 default_server; 3 | server_name {{ server_hostname }}; 4 | root /srv/wordpress/ ; 5 | 6 | client_max_body_size 64M; 7 | 8 | # Deny access to any files with a .php extension in the uploads directory 9 | location ~* /(?:uploads|files)/.*\.php$ { 10 | deny all; 11 | } 12 | 13 | location / { 14 | index index.php index.html index.htm; 15 | try_files $uri $uri/ /index.php?$args; 16 | } 17 | 18 | location ~* \.(gif|jpg|jpeg|png|css|js)$ { 19 | expires max; 20 | } 21 | 22 | location ~ \.php$ { 23 | try_files $uri =404; 24 | fastcgi_split_path_info ^(.+\.php)(/.+)$; 25 | fastcgi_index index.php; 26 | fastcgi_pass unix:/var/run/php-fpm/wordpress.sock; 27 | fastcgi_param SCRIPT_FILENAME 28 | $document_root$fastcgi_script_name; 29 | include fastcgi_params; 30 | } 31 | } 32 | -------------------------------------------------------------------------------- /wordpress-nginx/roles/php-fpm/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: restart php-fpm 3 | service: name=php-fpm state=restarted 4 | -------------------------------------------------------------------------------- /wordpress-nginx/roles/php-fpm/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Install php-fpm and deps 3 | yum: name={{ item }} state=present 4 | with_items: 5 | - php 6 | - php-fpm 7 | - php-enchant 8 | - php-IDNA_Convert 9 | - php-mbstring 10 | - php-mysql 11 | - php-PHPMailer 12 | - php-process 13 | - php-simplepie 14 | - php-xml 15 | 16 | - name: Disable default pool 17 | command: mv /etc/php-fpm.d/www.conf /etc/php-fpm.d/www.disabled creates=/etc/php-fpm.d/www.disabled 18 | notify: restart php-fpm 19 | 20 | - name: Copy php-fpm configuration 21 | template: src=wordpress.conf dest=/etc/php-fpm.d/ 22 | notify: restart php-fpm 23 | -------------------------------------------------------------------------------- /wordpress-nginx/roles/php-fpm/templates/wordpress.conf: -------------------------------------------------------------------------------- 1 | [wordpress] 2 | listen = /var/run/php-fpm/wordpress.sock 3 | listen.owner = nginx 4 | listen.group = nginx 5 | listen.mode = 0660 6 | user = wordpress 7 | group = wordpress 8 | pm = dynamic 9 | pm.max_children = 10 10 | pm.start_servers = 1 11 | pm.min_spare_servers = 1 12 | pm.max_spare_servers = 3 13 | pm.max_requests = 500 14 | chdir = /srv/wordpress/ 15 | php_admin_value[open_basedir] = /srv/wordpress/:/tmp 16 | -------------------------------------------------------------------------------- /wordpress-nginx/roles/wordpress/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Download WordPress 3 | get_url: url=http://wordpress.org/wordpress-{{ wp_version }}.tar.gz dest=/srv/wordpress-{{ wp_version }}.tar.gz 4 | sha256sum="{{ wp_sha256sum }}" 5 | 6 | - name: Extract archive 7 | unarchive: 8 | creates: /srv/wordpress 9 | src: /srv/wordpress-{{ wp_version }}.tar.gz 10 | dest: /srv/wordpress 11 | 12 | - name: Add group "wordpress" 13 | group: name=wordpress 14 | 15 | - name: Add user "wordpress" 16 | user: name=wordpress group=wordpress home=/srv/wordpress/ 17 | 18 | - name: Fetch random salts for WordPress config 19 | get_url: 20 | url: https://api.wordpress.org/secret-key/1.1/salt/ 21 | register: "wp_salt" 22 | become: no 23 | become_method: sudo 24 | changed_when: true 25 | delegate_to: localhost 26 | 27 | - name: Create WordPress database 28 | mysql_db: name={{ wp_db_name }} state=present 29 | 30 | - name: Create WordPress database user 31 | mysql_user: name={{ wp_db_user }} password={{ wp_db_password }} priv={{ wp_db_name }}.*:ALL host='localhost' state=present 32 | 33 | - name: Copy WordPress config file 34 | template: src=wp-config.php dest=/srv/wordpress/ 35 | 36 | - name: Change ownership of WordPress installation 37 | file: path=/srv/wordpress/ owner=wordpress group=wordpress state=directory recurse=yes setype=httpd_sys_content_t 38 | 39 | - name: Start php-fpm Service 40 | service: name=php-fpm state=started enabled=yes 41 | -------------------------------------------------------------------------------- /wordpress-nginx/site.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Install WordPress, MySQL, Nginx, and PHP-FPM 3 | hosts: all 4 | remote_user: root 5 | # remote_user: user 6 | # become: yes 7 | # become_method: sudo 8 | 9 | roles: 10 | - common 11 | - mysql 12 | - nginx 13 | - php-fpm 14 | - wordpress 15 | -------------------------------------------------------------------------------- /wordpress-nginx_rhel7/LICENSE.md: -------------------------------------------------------------------------------- 1 | Modified by David Beck (techiscool@gmail.com) 2015 2 | Copyright (C) 2015 Eugene Varnavsky (varnavruz@gmail.com) 3 | 4 | This work is licensed under the Creative Commons Attribution 3.0 Unported License. 5 | To view a copy of this license, visit http://creativecommons.org/licenses/by/3.0/deed.en_US. 6 | -------------------------------------------------------------------------------- /wordpress-nginx_rhel7/README.md: -------------------------------------------------------------------------------- 1 | ## WordPress+Nginx+PHP-FPM+MariaDB Deployment 2 | 3 | - Requires Ansible 1.2 or newer 4 | - Expects CentOS/RHEL 7.x host/s 5 | 6 | RHEL7 version reflects changes in Red Hat Enterprise Linux and CentOS 7: 7 | 1. Network device naming scheme has changed 8 | 2. iptables is replaced with firewalld 9 | 3. MySQL is replaced with MariaDB 10 | 11 | These playbooks deploy a simple all-in-one configuration of the popular 12 | WordPress blogging platform and CMS, frontend by the Nginx web server and the 13 | PHP-FPM process manager. To use, copy the `hosts.example` file to `hosts` and 14 | edit the `hosts` inventory file to include the names or URLs of the servers 15 | you want to deploy. 16 | 17 | Then run the playbook, like this: 18 | 19 | ansible-playbook -i hosts site.yml 20 | 21 | The playbooks will configure MariaDB, WordPress, Nginx, and PHP-FPM. When the run 22 | is complete, you can hit access server to begin the WordPress configuration. 23 | 24 | ### Ideas for Improvement 25 | 26 | Here are some ideas for ways that these playbooks could be extended: 27 | 28 | - Parameterize the WordPress deployment to handle multi-site configurations. 29 | - Separate the components (PHP-FPM, MySQL, Nginx) onto separate hosts and 30 | handle the configuration appropriately. 31 | - Handle WordPress upgrades automatically. 32 | 33 | We would love to see contributions and improvements, so please fork this 34 | repository on GitHub and send us your changes via pull requests. -------------------------------------------------------------------------------- /wordpress-nginx_rhel7/group_vars/all: -------------------------------------------------------------------------------- 1 | --- 2 | # Variables listed here are applicable to all host groups 3 | wp_version: 4.6 4 | wp_sha256sum: c1856cf969b1e73025ba2c681491908c3a4a6c5a2333f4531bf9bfb90f634380 5 | 6 | # MySQL settings 7 | mysqlservice: mysqld 8 | mysql_port: 3306 9 | 10 | # These are the WordPress database settings 11 | wp_db_name: wordpress 12 | wp_db_user: wordpress 13 | wp_db_password: secret 14 | 15 | # This is used for the nginx server configuration, but access to the 16 | # WordPress site is not restricted by a named host. 17 | nginx_port: 80 18 | server_hostname: server.example.com 19 | 20 | # Disable All Updates 21 | # By default automatic updates are enabled, set this value to true to disable all automatic updates 22 | auto_up_disable: false 23 | 24 | #Define Core Update Level 25 | # true = Development, minor, and major updates are all enabled 26 | # false = Development, minor, and major updates are all disabled 27 | # minor = Minor updates are enabled, development, and major updates are disabled 28 | core_update_level: true 29 | -------------------------------------------------------------------------------- /wordpress-nginx_rhel7/hosts.example: -------------------------------------------------------------------------------- 1 | [wordpress-server] 2 | webserver2 3 | -------------------------------------------------------------------------------- /wordpress-nginx_rhel7/roles/common/files/RPM-GPG-KEY-EPEL-7: -------------------------------------------------------------------------------- 1 | -----BEGIN PGP PUBLIC KEY BLOCK----- 2 | Version: GnuPG v1.4.11 (GNU/Linux) 3 | 4 | mQINBFKuaIQBEAC1UphXwMqCAarPUH/ZsOFslabeTVO2pDk5YnO96f+rgZB7xArB 5 | OSeQk7B90iqSJ85/c72OAn4OXYvT63gfCeXpJs5M7emXkPsNQWWSju99lW+AqSNm 6 | jYWhmRlLRGl0OO7gIwj776dIXvcMNFlzSPj00N2xAqjMbjlnV2n2abAE5gq6VpqP 7 | vFXVyfrVa/ualogDVmf6h2t4Rdpifq8qTHsHFU3xpCz+T6/dGWKGQ42ZQfTaLnDM 8 | jToAsmY0AyevkIbX6iZVtzGvanYpPcWW4X0RDPcpqfFNZk643xI4lsZ+Y2Er9Yu5 9 | S/8x0ly+tmmIokaE0wwbdUu740YTZjCesroYWiRg5zuQ2xfKxJoV5E+Eh+tYwGDJ 10 | n6HfWhRgnudRRwvuJ45ztYVtKulKw8QQpd2STWrcQQDJaRWmnMooX/PATTjCBExB 11 | 9dkz38Druvk7IkHMtsIqlkAOQMdsX1d3Tov6BE2XDjIG0zFxLduJGbVwc/6rIc95 12 | T055j36Ez0HrjxdpTGOOHxRqMK5m9flFbaxxtDnS7w77WqzW7HjFrD0VeTx2vnjj 13 | GqchHEQpfDpFOzb8LTFhgYidyRNUflQY35WLOzLNV+pV3eQ3Jg11UFwelSNLqfQf 14 | uFRGc+zcwkNjHh5yPvm9odR1BIfqJ6sKGPGbtPNXo7ERMRypWyRz0zi0twARAQAB 15 | tChGZWRvcmEgRVBFTCAoNykgPGVwZWxAZmVkb3JhcHJvamVjdC5vcmc+iQI4BBMB 16 | AgAiBQJSrmiEAhsPBgsJCAcDAgYVCAIJCgsEFgIDAQIeAQIXgAAKCRBqL66iNSxk 17 | 5cfGD/4spqpsTjtDM7qpytKLHKruZtvuWiqt5RfvT9ww9GUUFMZ4ZZGX4nUXg49q 18 | ixDLayWR8ddG/s5kyOi3C0uX/6inzaYyRg+Bh70brqKUK14F1BrrPi29eaKfG+Gu 19 | MFtXdBG2a7OtPmw3yuKmq9Epv6B0mP6E5KSdvSRSqJWtGcA6wRS/wDzXJENHp5re 20 | 9Ism3CYydpy0GLRA5wo4fPB5uLdUhLEUDvh2KK//fMjja3o0L+SNz8N0aDZyn5Ax 21 | CU9RB3EHcTecFgoy5umRj99BZrebR1NO+4gBrivIfdvD4fJNfNBHXwhSH9ACGCNv 22 | HnXVjHQF9iHWApKkRIeh8Fr2n5dtfJEF7SEX8GbX7FbsWo29kXMrVgNqHNyDnfAB 23 | VoPubgQdtJZJkVZAkaHrMu8AytwT62Q4eNqmJI1aWbZQNI5jWYqc6RKuCK6/F99q 24 | thFT9gJO17+yRuL6Uv2/vgzVR1RGdwVLKwlUjGPAjYflpCQwWMAASxiv9uPyYPHc 25 | ErSrbRG0wjIfAR3vus1OSOx3xZHZpXFfmQTsDP7zVROLzV98R3JwFAxJ4/xqeON4 26 | vCPFU6OsT3lWQ8w7il5ohY95wmujfr6lk89kEzJdOTzcn7DBbUru33CQMGKZ3Evt 27 | RjsC7FDbL017qxS+ZVA/HGkyfiu4cpgV8VUnbql5eAZ+1Ll6Dw== 28 | =hdPa 29 | -----END PGP PUBLIC KEY BLOCK----- -------------------------------------------------------------------------------- /wordpress-nginx_rhel7/roles/common/files/RPM-GPG-KEY-NGINX: -------------------------------------------------------------------------------- 1 | -----BEGIN PGP PUBLIC KEY BLOCK----- 2 | Version: GnuPG v1.4.11 (FreeBSD) 3 | 4 | mQENBE5OMmIBCAD+FPYKGriGGf7NqwKfWC83cBV01gabgVWQmZbMcFzeW+hMsgxH 5 | W6iimD0RsfZ9oEbfJCPG0CRSZ7ppq5pKamYs2+EJ8Q2ysOFHHwpGrA2C8zyNAs4I 6 | QxnZZIbETgcSwFtDun0XiqPwPZgyuXVm9PAbLZRbfBzm8wR/3SWygqZBBLdQk5TE 7 | fDR+Eny/M1RVR4xClECONF9UBB2ejFdI1LD45APbP2hsN/piFByU1t7yK2gpFyRt 8 | 97WzGHn9MV5/TL7AmRPM4pcr3JacmtCnxXeCZ8nLqedoSuHFuhwyDnlAbu8I16O5 9 | XRrfzhrHRJFM1JnIiGmzZi6zBvH0ItfyX6ttABEBAAG0KW5naW54IHNpZ25pbmcg 10 | a2V5IDxzaWduaW5nLWtleUBuZ2lueC5jb20+iQE+BBMBAgAoBQJOTjJiAhsDBQkJ 11 | ZgGABgsJCAcDAgYVCAIJCgsEFgIDAQIeAQIXgAAKCRCr9b2Ce9m/YpvjB/98uV4t 12 | 94d0oEh5XlqEZzVMrcTgPQ3BZt05N5xVuYaglv7OQtdlErMXmRWaFZEqDaMHdniC 13 | sF63jWMd29vC4xpzIfmsLK3ce9oYo4t9o4WWqBUdf0Ff1LMz1dfLG2HDtKPfYg3C 14 | 8NESud09zuP5NohaE8Qzj/4p6rWDiRpuZ++4fnL3Dt3N6jXILwr/TM/Ma7jvaXGP 15 | DO3kzm4dNKp5b5bn2nT2QWLPnEKxvOg5Zoej8l9+KFsUnXoWoYCkMQ2QTpZQFNwF 16 | xwJGoAz8K3PwVPUrIL6b1lsiNovDgcgP0eDgzvwLynWKBPkRRjtgmWLoeaS9FAZV 17 | ccXJMmANXJFuCf26iQEcBBABAgAGBQJOTkelAAoJEKZP1bF62zmo79oH/1XDb29S 18 | YtWp+MTJTPFEwlWRiyRuDXy3wBd/BpwBRIWfWzMs1gnCjNjk0EVBVGa2grvy9Jtx 19 | JKMd6l/PWXVucSt+U/+GO8rBkw14SdhqxaS2l14v6gyMeUrSbY3XfToGfwHC4sa/ 20 | Thn8X4jFaQ2XN5dAIzJGU1s5JA0tjEzUwCnmrKmyMlXZaoQVrmORGjCuH0I0aAFk 21 | RS0UtnB9HPpxhGVbs24xXZQnZDNbUQeulFxS4uP3OLDBAeCHl+v4t/uotIad8v6J 22 | SO93vc1evIje6lguE81HHmJn9noxPItvOvSMb2yPsE8mH4cJHRTFNSEhPW6ghmlf 23 | Wa9ZwiVX5igxcvaIRgQQEQIABgUCTk5b0gAKCRDs8OkLLBcgg1G+AKCnacLb/+W6 24 | cflirUIExgZdUJqoogCeNPVwXiHEIVqithAM1pdY/gcaQZmIRgQQEQIABgUCTk5f 25 | YQAKCRCpN2E5pSTFPnNWAJ9gUozyiS+9jf2rJvqmJSeWuCgVRwCcCUFhXRCpQO2Y 26 | Va3l3WuB+rgKjsQ= 27 | =A015 28 | -----END PGP PUBLIC KEY BLOCK----- -------------------------------------------------------------------------------- /wordpress-nginx_rhel7/roles/common/files/RPM-GPG-KEY-remi: -------------------------------------------------------------------------------- 1 | -----BEGIN PGP PUBLIC KEY BLOCK----- 2 | Version: GnuPG v1.4.7 (GNU/Linux) 3 | 4 | mQGiBEJny1wRBACRnbQgZ6qLmJSuGvi/EwrRL6aW610BbdpLQRL3dnwy5wI5t9T3 5 | /JEiEJ7GTvAwfiisEHifMfk2sRlWRf2EDQFttHyrrYXfY5L6UAF2IxixK5FL7PWA 6 | /2a7tkw1IbCbt4IGG0aZJ6/xgQejrOLi4ewniqWuXCc+tLuWBZrGpE2QfwCggZ+L 7 | 0e6KPTHMP97T4xV81e3Ba5MD/3NwOQh0pVvZlW66Em8IJnBgM+eQh7pl4xq7nVOh 8 | dEMJwVU0wDRKkXqQVghOxALOSAMapj5mDppEDzGLZHZNSRcvGEs2iPwo9vmY+Qhp 9 | AyEBzE4blNR8pwPtAwL0W3cBKUx7ZhqmHr2FbNGYNO/hP4tO2ochCn5CxSwAfN1B 10 | Qs5pBACOkTZMNC7CLsSUT5P4+64t04x/STlAFczEBcJBLF1T16oItDITJmAsPxbY 11 | iee6JRfXmZKqmDP04fRdboWMcRjfDfCciSdIeGqP7vMcO25bDZB6x6++fOcmQpyD 12 | 1Fag3ZUq2yojgXWqVrgFHs/HB3QE7UQkykNp1fjQGbKK+5mWTrQkUmVtaSBDb2xs 13 | ZXQgPFJQTVNARmFtaWxsZUNvbGxldC5jb20+iGAEExECACAFAkZ+MYoCGwMGCwkI 14 | BwMCBBUCCAMEFgIDAQIeAQIXgAAKCRAATm9HAPl/Vv/UAJ9EL8ioMTsz/2EPbNuQ 15 | MP5Xx/qPLACeK5rk2hb8VFubnEsbVxnxfxatGZ25AQ0EQmfLXRAEANwGvY+mIZzj 16 | C1L5Nm2LbSGZNTN3NMbPFoqlMfmym8XFDXbdqjAHutGYEZH/PxRI6GC8YW5YK4E0 17 | HoBAH0b0F97JQEkKquahCakj0P5mGuH6Q8gDOfi6pHimnsSAGf+D+6ZwAn8bHnAa 18 | o+HVmEITYi6s+Csrs+saYUcjhu9zhyBfAAMFA/9Rmfj9/URdHfD1u0RXuvFCaeOw 19 | CYfH2/nvkx+bAcSIcbVm+tShA66ybdZ/gNnkFQKyGD9O8unSXqiELGcP8pcHTHsv 20 | JzdD1k8DhdFNhux/WPRwbo/es6QcpIPa2JPjBCzfOTn9GXVdT4pn5tLG2gHayudK 21 | 8Sj1OI2vqGLMQzhxw4hJBBgRAgAJBQJCZ8tdAhsMAAoJEABOb0cA+X9WcSAAn11i 22 | gC5ns/82kSprzBOU0BNwUeXZAJ0cvNmY7rvbyiJydyLsSxh/la6HKw== 23 | =6Rbg 24 | -----END PGP PUBLIC KEY BLOCK----- 25 | -------------------------------------------------------------------------------- /wordpress-nginx_rhel7/roles/common/files/epel.repo: -------------------------------------------------------------------------------- 1 | [epel] 2 | name=Extra Packages for Enterprise Linux 7 - $basearch 3 | #baseurl=http://download.fedoraproject.org/pub/epel/7/$basearch 4 | mirrorlist=https://mirrors.fedoraproject.org/metalink?repo=epel-7&arch=$basearch 5 | failovermethod=priority 6 | enabled=1 7 | gpgcheck=1 8 | gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-EPEL-7 -------------------------------------------------------------------------------- /wordpress-nginx_rhel7/roles/common/files/nginx.repo: -------------------------------------------------------------------------------- 1 | [nginx] 2 | name=Nginx repo - $basearch 3 | baseurl=http://nginx.org/packages/centos/7/$basearch 4 | failovermethod=priority 5 | gpgcheck=1 6 | enabled=1 7 | gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-NGINX 8 | -------------------------------------------------------------------------------- /wordpress-nginx_rhel7/roles/common/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Copy the NGINX repository definition 3 | copy: src=nginx.repo dest=/etc/yum.repos.d/ 4 | 5 | - name: Copy the EPEL repository definition 6 | copy: src=epel.repo dest=/etc/yum.repos.d/ 7 | 8 | - name: Copy the REMI repository definition 9 | copy: src=remi.repo dest=/etc/yum.repos.d/ 10 | 11 | - name: Create the GPG key for NGINX 12 | copy: src=RPM-GPG-KEY-NGINX dest=/etc/pki/rpm-gpg 13 | 14 | - name: Create the GPG key for EPEL 15 | copy: src=RPM-GPG-KEY-EPEL-7 dest=/etc/pki/rpm-gpg 16 | 17 | - name: Create the GPG key for REMI 18 | copy: src=RPM-GPG-KEY-remi dest=/etc/pki/rpm-gpg 19 | 20 | - name: Install Firewalld 21 | yum: name=firewalld state=present 22 | 23 | - name: Firewalld service state 24 | service: name=firewalld state=started enabled=yes 25 | -------------------------------------------------------------------------------- /wordpress-nginx_rhel7/roles/mariadb/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # Handler to handle DB tier notifications 3 | 4 | - name: restart mariadb 5 | service: name=mariadb state=restarted 6 | -------------------------------------------------------------------------------- /wordpress-nginx_rhel7/roles/mariadb/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # This playbook will install MariaDB and create db user and give permissions. 3 | 4 | - name: Install MariaDB package 5 | yum: name={{ item }} state=installed 6 | with_items: 7 | - mariadb-server 8 | - MySQL-python 9 | - libselinux-python 10 | - libsemanage-python 11 | 12 | - name: Configure SELinux to start mysql on any port 13 | seboolean: name=mysql_connect_any state=true persistent=yes 14 | 15 | - name: Create Mysql configuration file 16 | template: src=my.cnf.j2 dest=/etc/my.cnf 17 | notify: 18 | - restart mariadb 19 | 20 | - name: Create MariaDB log file 21 | file: path=/var/log/mysqld.log state=touch owner=mysql group=mysql mode=0775 22 | 23 | - name: Start MariaDB Service 24 | service: name=mariadb state=started enabled=yes 25 | 26 | - name: insert firewalld rule 27 | firewalld: port={{ mysql_port }}/tcp permanent=true state=enabled immediate=yes 28 | ignore_errors: yes 29 | -------------------------------------------------------------------------------- /wordpress-nginx_rhel7/roles/mariadb/templates/my.cnf.j2: -------------------------------------------------------------------------------- 1 | [mysqld] 2 | datadir=/var/lib/mysql 3 | socket=/var/lib/mysql/mysql.sock 4 | user=mysql 5 | # Disabling symbolic-links is recommended to prevent assorted security risks 6 | symbolic-links=0 7 | port={{ mysql_port }} 8 | 9 | [mysqld_safe] 10 | log-error=/var/log/mysqld.log 11 | pid-file=/var/run/mariadb/mysqld.pid 12 | -------------------------------------------------------------------------------- /wordpress-nginx_rhel7/roles/nginx/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: restart nginx 3 | service: name=nginx state=restarted enabled=yes 4 | -------------------------------------------------------------------------------- /wordpress-nginx_rhel7/roles/nginx/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Install nginx 3 | yum: name=nginx state=present 4 | 5 | - name: Copy nginx configuration for wordpress 6 | template: src=default.conf dest=/etc/nginx/conf.d/default.conf 7 | notify: restart nginx 8 | 9 | - name: insert firewalld rule for nginx 10 | firewalld: port={{ nginx_port }}/tcp permanent=true state=enabled immediate=yes 11 | ignore_errors: yes 12 | 13 | - name: http service state 14 | service: name=nginx state=started enabled=yes 15 | -------------------------------------------------------------------------------- /wordpress-nginx_rhel7/roles/nginx/templates/default.conf: -------------------------------------------------------------------------------- 1 | server { 2 | listen {{ nginx_port }} default_server; 3 | server_name {{ server_hostname }}; 4 | root /srv/wordpress/ ; 5 | 6 | client_max_body_size 64M; 7 | 8 | # Deny access to any files with a .php extension in the uploads directory 9 | location ~* /(?:uploads|files)/.*\.php$ { 10 | deny all; 11 | } 12 | 13 | location / { 14 | index index.php index.html index.htm; 15 | try_files $uri $uri/ /index.php?$args; 16 | } 17 | 18 | location ~* \.(gif|jpg|jpeg|png|css|js)$ { 19 | expires max; 20 | } 21 | 22 | location ~ \.php$ { 23 | try_files $uri =404; 24 | fastcgi_split_path_info ^(.+\.php)(/.+)$; 25 | fastcgi_index index.php; 26 | fastcgi_pass unix:/var/run/php-fpm/wordpress.sock; 27 | fastcgi_param SCRIPT_FILENAME 28 | $document_root$fastcgi_script_name; 29 | include fastcgi_params; 30 | } 31 | } 32 | -------------------------------------------------------------------------------- /wordpress-nginx_rhel7/roles/php-fpm/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: restart php-fpm 3 | service: name=php-fpm state=restarted 4 | -------------------------------------------------------------------------------- /wordpress-nginx_rhel7/roles/php-fpm/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Install php-fpm and deps 3 | yum: name={{ item }} state=present 4 | with_items: 5 | - php 6 | - php-fpm 7 | - php-enchant 8 | - php-IDNA_Convert 9 | - php-mbstring 10 | - php-mysql 11 | - php-PHPMailer 12 | - php-process 13 | - php-simplepie 14 | - php-xml 15 | 16 | - name: Disable default pool 17 | command: mv /etc/php-fpm.d/www.conf /etc/php-fpm.d/www.disabled creates=/etc/php-fpm.d/www.disabled 18 | notify: restart php-fpm 19 | 20 | - name: Copy php-fpm configuration 21 | template: src=wordpress.conf dest=/etc/php-fpm.d/ 22 | notify: restart php-fpm 23 | -------------------------------------------------------------------------------- /wordpress-nginx_rhel7/roles/php-fpm/templates/wordpress.conf: -------------------------------------------------------------------------------- 1 | [wordpress] 2 | listen = /var/run/php-fpm/wordpress.sock 3 | listen.owner = nginx 4 | listen.group = nginx 5 | listen.mode = 0660 6 | user = wordpress 7 | group = wordpress 8 | pm = dynamic 9 | pm.max_children = 10 10 | pm.start_servers = 1 11 | pm.min_spare_servers = 1 12 | pm.max_spare_servers = 3 13 | pm.max_requests = 500 14 | chdir = /srv/wordpress/ 15 | php_admin_value[open_basedir] = /srv/wordpress/:/tmp 16 | -------------------------------------------------------------------------------- /wordpress-nginx_rhel7/site.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Install WordPress, MariaDB, Nginx, and PHP-FPM 3 | hosts: wordpress-server 4 | remote_user: root 5 | # remote_user: user 6 | # sudo: yes 7 | 8 | roles: 9 | - common 10 | - mariadb 11 | - nginx 12 | - php-fpm 13 | - wordpress 14 | --------------------------------------------------------------------------------