├── .gitignore ├── 2hosts ├── MANIFEST.in ├── README.md ├── aap ├── AlienInvasion.yml ├── ansible.cfg ├── aws-ec2-demo │ ├── README.md │ ├── aws-demo-wf-1.yml │ ├── aws-demo-wf-2.yml │ ├── aws-demo-wf-3.yml │ └── aws │ │ ├── roles │ │ ├── linux │ │ │ ├── apps │ │ │ │ └── web │ │ │ │ │ ├── handlers │ │ │ │ │ └── main.yml │ │ │ │ │ ├── tasks │ │ │ │ │ ├── copy_code.yml │ │ │ │ │ ├── install_httpd.yml │ │ │ │ │ └── main.yml │ │ │ │ │ ├── templates │ │ │ │ │ └── index.html.j2 │ │ │ │ │ └── vars │ │ │ │ │ └── main.yml │ │ │ ├── postcreate │ │ │ │ ├── handlers │ │ │ │ │ └── main.yml │ │ │ │ ├── tasks │ │ │ │ │ └── main.yml │ │ │ │ ├── templates │ │ │ │ │ ├── ntp.conf.j2 │ │ │ │ │ └── ssh_howto.j2 │ │ │ │ └── vars │ │ │ │ │ └── main.yml │ │ │ └── provision │ │ │ │ ├── tasks │ │ │ │ └── main.yml │ │ │ │ └── vars │ │ │ │ └── main.yml │ │ └── windows │ │ │ ├── postcreate │ │ │ ├── handlers │ │ │ │ └── main.yml │ │ │ ├── tasks │ │ │ │ └── main.yml │ │ │ └── vars │ │ │ │ └── main.yml │ │ │ └── provision │ │ │ ├── tasks │ │ │ └── main.yml │ │ │ └── vars │ │ │ └── main.yml │ │ └── site.yml ├── azure-rg-vm-discovery.yml ├── bad_syntax.yml ├── bpm-ansible-demo │ ├── README.md │ └── aws │ │ ├── roles │ │ ├── linux │ │ │ ├── apps │ │ │ │ └── web │ │ │ │ │ ├── handlers │ │ │ │ │ └── main.yml │ │ │ │ │ ├── tasks │ │ │ │ │ ├── copy_code.yml │ │ │ │ │ ├── install_httpd.yml │ │ │ │ │ └── main.yml │ │ │ │ │ ├── templates │ │ │ │ │ └── index.html.j2 │ │ │ │ │ └── vars │ │ │ │ │ └── main.yml │ │ │ ├── postcreate │ │ │ │ ├── handlers │ │ │ │ │ └── main.yml │ │ │ │ ├── tasks │ │ │ │ │ └── main.yml │ │ │ │ ├── templates │ │ │ │ │ ├── ntp.conf.j2 │ │ │ │ │ └── ssh_howto.j2 │ │ │ │ └── vars │ │ │ │ │ └── main.yml │ │ │ └── provision │ │ │ │ ├── tasks │ │ │ │ └── main.yml │ │ │ │ └── vars │ │ │ │ └── main.yml │ │ └── windows │ │ │ ├── postcreate │ │ │ ├── handlers │ │ │ │ └── main.yml │ │ │ ├── tasks │ │ │ │ └── main.yml │ │ │ └── vars │ │ │ │ └── main.yml │ │ │ └── provision │ │ │ ├── tasks │ │ │ └── main.yml │ │ │ └── vars │ │ │ └── main.yml │ │ └── site.yml ├── delete-vm-azure-instance-stack.yml ├── delete-vm-vsphere.yml ├── deploy-vm-amazon-ami.yml ├── deploy-vm-azure-instance-stack-surveys.yml ├── deploy-vm-azure-instance-stack.yml ├── deploy-vm-azure.yml ├── deploy-vm-gcp.yml ├── deploy-vm-vsphere-from-template.yml ├── deploy-vm-vsphere-from-template2.yml ├── download-and-copy.yml ├── files │ └── local_copy_a ├── hub-collection-test.yml ├── library │ └── scan_foo.py ├── linux-account-blocks-eg.yml ├── linux-account-blocks.yml ├── linux-accounts.yml ├── long-sleep.yml ├── long_lines.yml ├── motd ├── patch-rhel-hosts.yml ├── ping-pong.yml ├── poc-rhv-discover-using-vault-creds.yml ├── poc-rhv-discover.yml ├── poc-vcenter-deploy-vm-template.yml ├── post-provision-aws.yml ├── post-provision.yml ├── rear │ └── local.conf ├── rhel7-rear.yml ├── sanity-check-aws.yml ├── sanity-check.yml ├── scan_custom.yml ├── showmepw.yml ├── snow-create-record.yml ├── snow-get-record.yml ├── snow-update-record.yml ├── touch-and-sleep.yml └── write_files.yml ├── abc.yml ├── amazon.yml ├── ara-example.yml ├── assert-eg.yml ├── async-fire-forget.yml ├── azure-pipelines.yml ├── azure-rg.yml ├── azure.yml ├── blocks-eg.yml ├── check_group_ordering.yml ├── collections ├── README.md ├── requirements.old └── requirements.yml ├── create-ami-amazon.yml ├── create-bare-metal-server-packetnet.yml ├── custom_chkuser.yml ├── custom_chkuser_tower.yml ├── custom_wheelgrpchk.yml ├── delete-vm-vsphere.yml ├── dell ├── idrac_discovery.yml └── inventory │ ├── idrac_hosts │ └── idrac_hosts_constructed_inv.yml ├── deploy-vm-amazon.yml ├── deploy-vm-azure-cmdline.yml ├── deploy-vm-azure.yml ├── deploy-vm-gcp.yml ├── deploy-vm-rhv.yml ├── deploy-vm-vsphere-from-template.yml ├── deploy-vm-vsphere.yml ├── deploy-vn-azure-cmdline.yml ├── directory-structure.txt ├── dynamic_inv_ping_check.yml ├── example-detect-reboot-and-wait.yml ├── extras.yml ├── first-checks.yml ├── fix-rhel-vulnerable.yml ├── good-eg.yml ├── hello-world.yml ├── hosts ├── immutablish-deploys ├── README.md ├── ansible.cfg ├── blue.yml ├── build_ami.yml ├── cfn_update_policy.yml ├── inventory │ ├── group_vars │ │ ├── all │ │ ├── apps │ │ ├── colors │ │ └── just_added │ ├── host_vars │ │ ├── amibuilder │ │ ├── fancyapp-blue │ │ ├── fancyapp-cfn │ │ ├── fancyapp-rolling │ │ ├── fancyapp-violet │ │ └── localhost │ └── hosts ├── roles │ ├── apache │ │ ├── README.md │ │ ├── defaults │ │ │ └── main.yml │ │ ├── handlers │ │ │ └── main.yml │ │ ├── meta │ │ │ └── main.yml │ │ ├── tasks │ │ │ ├── Debian.yml │ │ │ ├── RedHat.yml │ │ │ └── main.yml │ │ ├── templates │ │ │ └── index.html.j2 │ │ └── vars │ │ │ ├── Debian.yml │ │ │ ├── RedHat.yml │ │ │ └── main.yml │ ├── asgcfn │ │ ├── README.md │ │ ├── defaults │ │ │ └── main.yml │ │ ├── files │ │ │ └── asg_lc.json │ │ ├── handlers │ │ │ └── main.yml │ │ ├── meta │ │ │ └── main.yml │ │ ├── tasks │ │ │ └── main.yml │ │ └── vars │ │ │ └── main.yml │ ├── dual_asg │ │ ├── README.md │ │ ├── defaults │ │ │ └── main.yml │ │ ├── handlers │ │ │ └── main.yml │ │ ├── meta │ │ │ └── main.yml │ │ ├── tasks │ │ │ ├── main.yml │ │ │ └── terminate_relative.yml │ │ └── vars │ │ │ └── main.yml │ ├── infra │ │ ├── README.md │ │ ├── handlers │ │ │ └── main.yml │ │ ├── meta │ │ │ └── main.yml │ │ ├── tasks │ │ │ └── main.yml │ │ └── vars │ │ │ └── main.yml │ └── rolling_asg │ │ ├── README.md │ │ ├── defaults │ │ └── main.yml │ │ ├── handlers │ │ └── main.yml │ │ ├── meta │ │ └── main.yml │ │ ├── tasks │ │ └── main.yml │ │ └── vars │ │ └── main.yml ├── rolling_ami.yml ├── vars │ └── .gitignore └── violet.yml ├── inv.yml ├── inv_5hosts ├── inv_simple_1k_hosts.ini ├── inventory_test ├── journalctl-persist.yml ├── kindle-demo.yml ├── lamp_simple_rhel7 ├── LICENSE.md ├── README.md ├── group_vars │ ├── all │ └── dbservers ├── hosts ├── roles │ ├── common │ │ ├── handlers │ │ │ └── main.yml │ │ ├── tasks │ │ │ └── main.yml │ │ └── templates │ │ │ └── ntp.conf.j2 │ ├── db │ │ ├── handlers │ │ │ └── main.yml │ │ ├── tasks │ │ │ └── main.yml │ │ └── templates │ │ │ └── my.cnf.j2 │ └── web │ │ ├── tasks │ │ ├── copy_code.yml │ │ ├── install_httpd.yml │ │ └── main.yml │ │ └── templates │ │ └── index.php.j2 └── site.yml ├── library ├── chkuptime ├── chkuser └── chkwheelgrp ├── make-rhel-vulnerable.yml ├── multi_cloud ├── README.md ├── group_vars │ ├── all │ ├── amazon │ └── azure ├── hosts ├── roles │ ├── amazon │ │ └── tasks │ │ │ ├── amazon.yml │ │ │ ├── install_httpd.yml │ │ │ └── main.yml │ ├── azure │ │ └── tasks │ │ │ ├── azure.yml │ │ │ └── main.yml │ └── common │ │ ├── handlers │ │ └── main.yml │ │ ├── tasks │ │ └── main.yml │ │ └── templates │ │ └── ntp.conf.j2 └── site.yml ├── networking ├── ios_reporting.yml └── report-ios.j2 ├── openSCAP.yml ├── ops ├── roles │ ├── config │ │ ├── tasks │ │ │ └── main.yml │ │ └── templates │ │ │ ├── issue-Amazon.j2 │ │ │ └── issue-RedHat.j2 │ ├── disk │ │ └── tasks │ │ │ └── main.yml │ └── services │ │ ├── defaults │ │ └── main.yml │ │ └── tasks │ │ └── main.yml └── site.yml ├── ping.yml ├── ping_check.yml ├── post-provision.yml ├── reboot.yml ├── rhsm-aws-tower.yml ├── rhsm-ocp-example.yml ├── rngd.yml ├── rolling-upgrade-demo1 ├── LICENSE.md ├── README.md ├── group_vars │ └── all ├── roles │ ├── common │ │ ├── handlers │ │ │ └── main.yml │ │ ├── tasks │ │ │ └── main.yml │ │ └── templates │ │ │ └── ntp.conf.j2 │ ├── haproxy │ │ ├── handlers │ │ │ └── main.yml │ │ ├── tasks │ │ │ ├── install-haproxy.yml │ │ │ └── main.yml │ │ └── templates │ │ │ └── haproxy.j2 │ └── webservers │ │ ├── handlers │ │ └── main.yml │ │ └── tasks │ │ ├── install-apache.yml │ │ └── main.yml └── site.yml ├── run-redhat-insights.yml ├── slack.yml ├── snow_collection_example.yml ├── start_azure_vms.yml ├── sysadmin ├── group_vars │ └── all ├── roles │ ├── common │ │ ├── handlers │ │ │ └── main.yml │ │ ├── tasks │ │ │ └── main.yml │ │ └── templates │ │ │ └── ntp.conf.j2 │ └── web │ │ ├── tasks │ │ ├── copy_code.yml │ │ ├── install_httpd.yml │ │ └── main.yml │ │ └── templates │ │ └── index.php.j2 └── site.yml ├── telegram.yml ├── vm_orchestration ├── README.md ├── offpremise │ ├── roles │ │ ├── common │ │ │ ├── handlers │ │ │ │ └── main.yml │ │ │ ├── tasks │ │ │ │ └── main.yml │ │ │ ├── templates │ │ │ │ └── ntp.conf.j2 │ │ │ └── vars │ │ │ │ └── main.yml │ │ └── offprem │ │ │ ├── tasks │ │ │ ├── amazon.yml │ │ │ ├── bootstrap.yml │ │ │ └── main.yml │ │ │ └── vars │ │ │ └── main.yml │ └── site.yml └── onpremise │ ├── roles │ ├── common │ │ ├── handlers │ │ │ └── main.yml │ │ ├── tasks │ │ │ └── main.yml │ │ ├── templates │ │ │ └── ntp.conf.j2 │ │ └── vars │ │ │ └── main.yml │ └── onprem │ │ ├── tasks │ │ ├── main.yml │ │ └── vcenter.yml │ │ └── vars │ │ └── main.yml │ └── site.yml └── windows ├── README.md ├── check_psversion.yml ├── check_quicktime.yml ├── configure_desktops.yml ├── copssh.yml ├── customise-chrome.yml ├── deploy_iis_code.yml ├── deploy_web_site.yml ├── disable_sso_account.yml ├── event_logging.yml ├── getmem.yml ├── hosts ├── inform-bpm.yml ├── ipconfig.yml ├── library └── win_git.ps1 ├── openssh-win32.txt ├── openssh.yml ├── password-management.yml ├── ping.yml ├── remove_software.yml ├── roles ├── win-ad-config │ ├── defaults │ │ └── main.yml │ ├── tasks │ │ ├── configure_groups.yml │ │ ├── configure_users.yml │ │ └── main.yml │ └── vars │ │ ├── groups.yml │ │ └── users.yml ├── win-domain-pw-check │ └── tasks │ │ └── main.yml ├── win-domain-user │ └── tasks │ │ └── main.yml └── win-local-user │ └── tasks │ └── main.yml ├── scripts ├── get_sw.ps1 └── mem.ps1 ├── setup_iis.yml ├── sysinternals.yml ├── templates ├── delete_ad_account.j2 ├── disable_ad_account.j2 └── download-openssh.j2 ├── various.yml ├── win-ad-config.yml ├── win-auth-tasks-api.yml ├── win-auth-tasks.yml ├── win-domain-add-user.yml ├── win-extras.yml ├── win_updates.yml ├── winrm_ipconfig.py └── winrm_mem.py /.gitignore: -------------------------------------------------------------------------------- 1 | # ignore some stuff 2 | inventory 3 | credentials 4 | *fact* 5 | *key* 6 | *.retry 7 | *cachedir* 8 | cachedir/* 9 | .vscode 10 | .ansible* 11 | ansible.cfg 12 | 13 | # Packages 14 | *.7z 15 | *.dmg 16 | *.gz 17 | *.iso 18 | *.jar 19 | *.rar 20 | *.tar 21 | *.zip 22 | 23 | # Logs and databases 24 | *.log 25 | *.sql 26 | *.sqlite 27 | 28 | # OS generated files 29 | .DS_Store 30 | .DS_Store? 31 | ._* 32 | .Spotlight-V100 33 | .Trashes 34 | ehthumbs.db 35 | Thumbs.db 36 | -------------------------------------------------------------------------------- /2hosts: -------------------------------------------------------------------------------- 1 | [foogroup] 2 | foo1 instance_group=foo_ig_a 3 | foo2 instance_group=foo_ig_b 4 | [foogroup:vars] 5 | instance_group="foo_ig_default" 6 | -------------------------------------------------------------------------------- /MANIFEST.in: -------------------------------------------------------------------------------- 1 | exclude .git * 2 | recursive-exclude * * 3 | recursive-include tower *.yml 4 | include 2hosts 5 | include hosts 6 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Ansible All The Things! 2 | 3 | My random assortment of Ansible stuff 4 | 5 | -------------------------------------------------------------------------------- /aap/AlienInvasion.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Deploy AlienInvasion Game 3 | hosts: all 4 | gather_facts: false 5 | become: true 6 | 7 | vars: 8 | git_branch: devel 9 | endpoint: game 10 | 11 | tasks: 12 | - name: Install Software Required 13 | yum: 14 | name: "{{ packages }}" 15 | vars: 16 | packages: 17 | - httpd 18 | - git 19 | tags: software 20 | 21 | - name: Git Clone Repo on to Web Servers 22 | git: 23 | repo: 'https://gitlab.com/ffirg/AlienInvasion.git' 24 | dest: "/var/www/html/{{ endpoint }}" 25 | version: "{{ git_branch }}" 26 | force: yes 27 | tags: content 28 | 29 | - name: Start Web Services 30 | service: 31 | name: httpd 32 | state: started 33 | tags: service 34 | 35 | - name: Smoke Testing 36 | uri: 37 | url: "http://{{ public_ip }}/{{ endpoint }}" 38 | -------------------------------------------------------------------------------- /aap/ansible.cfg: -------------------------------------------------------------------------------- 1 | [defaults] 2 | callbacks_enabled = timer, profile_tasks, profile_roles 3 | -------------------------------------------------------------------------------- /aap/aws-ec2-demo/README.md: -------------------------------------------------------------------------------- 1 | # This is an AWS ec2 cloud VM creation demo 2 | 3 | ``` 4 | This is designed to be run from Ansible Tower, but can be called via a REST job_template launch 5 | ``` 6 | -------------------------------------------------------------------------------- /aap/aws-ec2-demo/aws-demo-wf-1.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: tag_Name_just_created_linux 3 | become: no 4 | become_method: sudo 5 | gather_facts: false 6 | 7 | tasks: 8 | - name: Test connection 9 | ping: 10 | -------------------------------------------------------------------------------- /aap/aws-ec2-demo/aws-demo-wf-2.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: tag_Name_just_created_linux 3 | become: yes 4 | become_method: sudo 5 | gather_facts: false 6 | 7 | tasks: 8 | - name: Create ops user accounts 9 | user: 10 | name=bill comment="Bill Rogers" uid=1102 groups=wheel append=yes 11 | name=ted comment="Ted Wonderful" uid=1103 groups=wheel append=yes 12 | -------------------------------------------------------------------------------- /aap/aws-ec2-demo/aws-demo-wf-3.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: tag_Name_just_created_linux 3 | become: yes 4 | become_method: sudo 5 | gather_facts: true 6 | 7 | tasks: 8 | 9 | - name: Install Samba and misc packages 10 | yum: name={{item}} state=present 11 | with_items: 12 | - samba-4.2.3-12.el7_2 13 | - samba-client-libs-4.2.3-12.el7_2 14 | - wget 15 | when: ansible_distribution == 'CentOS' or ansible_distribution == 'RedHat' 16 | -------------------------------------------------------------------------------- /aap/aws-ec2-demo/aws/roles/linux/apps/web/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # Handler to handle common notifications. 3 | 4 | - name: restart web server 5 | service: name=httpd state=restarted 6 | 7 | - name: restart firewall 8 | service: name=firewalld state=restarted 9 | -------------------------------------------------------------------------------- /aap/aws-ec2-demo/aws/roles/linux/apps/web/tasks/copy_code.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # Creates some front-of-shop web content so we can demo something 3 | 4 | - name: Create a index.html file 5 | template: src=index.html.j2 dest=/var/www/html/index.html 6 | -------------------------------------------------------------------------------- /aap/aws-ec2-demo/aws/roles/linux/apps/web/tasks/install_httpd.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # These tasks install http and the php modules. 3 | 4 | - name: Install web and associated packages 5 | yum: name={{ item }} state=present 6 | with_items: 7 | - httpd 8 | - php 9 | - php-mysql 10 | - git 11 | - libsemanage-python 12 | - libselinux-python 13 | 14 | #- name: insert firewalld rule for httpd 15 | # firewalld: port={{ httpd_port }}/tcp permanent=true state=enabled immediate=yes 16 | 17 | - name: Start the web service 18 | service: name=httpd state=started enabled=yes 19 | 20 | - name: Configure SELinux to allow httpd to connect to remote database 21 | seboolean: name=httpd_can_network_connect_db state=true persistent=yes 22 | -------------------------------------------------------------------------------- /aap/aws-ec2-demo/aws/roles/linux/apps/web/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - include: install_httpd.yml 3 | - include: copy_code.yml 4 | -------------------------------------------------------------------------------- /aap/aws-ec2-demo/aws/roles/linux/apps/web/templates/index.html.j2: -------------------------------------------------------------------------------- 1 | 2 | 3 | Ansible Application 4 | 5 | 6 |
7 | Homepage 8 |
9 | Hello, World! I am a web server configured using Ansible. 10 |
11 | 12 | 13 | -------------------------------------------------------------------------------- /aap/aws-ec2-demo/aws/roles/linux/apps/web/vars/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # Local variables if we need them 3 | ntpserver1: 0.rhel.pool.ntp.org 4 | ntpserver2: 127.127.1.0 5 | -------------------------------------------------------------------------------- /aap/aws-ec2-demo/aws/roles/linux/postcreate/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # Handler to handle common notifications. 3 | 4 | - name: restart ntp 5 | service: name=ntpd state=restarted 6 | 7 | - name: restart web server 8 | service: name=httpd state=restarted 9 | 10 | - name: restart firewall 11 | service: name=firewalld state=restarted 12 | -------------------------------------------------------------------------------- /aap/aws-ec2-demo/aws/roles/linux/postcreate/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # This playbook contains post VM creation tasks that will be run on all Linux nodes. 3 | 4 | - name: Create user accounts 5 | user: name=phil comment="Phil Griffiths" uid=1101 groups=wheel append=yes 6 | 7 | - name: Install latest NTP package 8 | yum: name=ntp state=latest 9 | 10 | - name: Configure NTP service 11 | template: src=ntp.conf.j2 dest=/etc/ntp.conf owner=root mode=0644 12 | notify: 13 | - restart ntp 14 | 15 | - name: Ensure NTP is started at boot 16 | service: name=ntpd state=started enabled=yes 17 | -------------------------------------------------------------------------------- /aap/aws-ec2-demo/aws/roles/linux/postcreate/templates/ntp.conf.j2: -------------------------------------------------------------------------------- 1 | 2 | driftfile /var/lib/ntp/drift 3 | 4 | restrict 127.0.0.1 5 | restrict -6 ::1 6 | 7 | server "{{ ntpserver1 }}" 8 | server "{{ ntpserver2 }}" 9 | fudge "{{ ntpserver2 }}" stratum 10 10 | 11 | includefile /etc/ntp/crypto/pw 12 | 13 | keys /etc/ntp/keys 14 | -------------------------------------------------------------------------------- /aap/aws-ec2-demo/aws/roles/linux/postcreate/templates/ssh_howto.j2: -------------------------------------------------------------------------------- 1 | 2 | Your "{{ ec2_os_type }}" instance has been created: "{{ ec2_dns_name }}" 3 | 4 | To login: 5 | 6 | Open an SSH client 7 | 8 | Locate your private key file "{{ key_name }}" 9 | 10 | Note: your key must not be publicly viewable for SSH to work. 11 | Use this command if needed: chmod 400 "{{ key_name }}" 12 | 13 | Connect to your instance using its Public DNS: 14 | 15 | ssh -i ""{{ key_name }}"" "{{ ec2_dns_name }}" 16 | -------------------------------------------------------------------------------- /aap/aws-ec2-demo/aws/roles/linux/postcreate/vars/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # Local variables if we need them 3 | ntpserver1: 0.rhel.pool.ntp.org 4 | ntpserver2: 127.127.1.0 5 | -------------------------------------------------------------------------------- /aap/aws-ec2-demo/aws/roles/linux/provision/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: 'Create Amazon VM Instance(s)' 4 | ec2: 5 | # set in vars/main.yml, just for convenience 6 | key_name: "{{ key_name }}" 7 | region: "{{ region }}" 8 | zone: "{{ zone }}" 9 | image: "{{ ami }}" 10 | instance_tags: "{{ instance_tags }}" 11 | instance_type: "{{ instance_type }}" 12 | assign_public_ip: "{{ public_ip }}" 13 | vpc_subnet_id: "{{ subnet }}" 14 | group: "{{ group }}" 15 | count: 1 16 | monitoring: yes 17 | wait: yes 18 | state: present 19 | register: newmachines 20 | 21 | - name: Wait for SSH to start 22 | wait_for: 23 | host: "{{ newmachines.instances[0].public_ip }}" 24 | port: 22 25 | timeout: 300 26 | delegate_to: localhost 27 | 28 | - name: Show me the DNS name 29 | set_fact: 30 | ec2_dns_name: "{{ newmachines.instances[0].public_dns_name }}" 31 | - debug: var=ec2_dns_name 32 | 33 | - name: Add new VM to in memory inventory so we can post-process 34 | add_host: 35 | hostname: "{{ newmachines.instances[0].public_ip }}" 36 | groups: justcreatedlinux 37 | -------------------------------------------------------------------------------- /aap/aws-ec2-demo/aws/roles/linux/provision/vars/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | key_name: ansible-tower 3 | region: eu-west-1 4 | zone: eu-west-1c 5 | ami: ami-f8cc838b 6 | public_ip: yes 7 | group: bpm-ansible-demo 8 | subnet: subnet-bc69e8e4 9 | instance_type: t2.micro 10 | # we use this AWS tag to identify newly provisioned instances and 11 | # rhsm to identify new hosts that need to be registered 12 | instance_tags: '{"type":"demo-clients","Name":"just-created-linux","rhsm":"false"}' 13 | -------------------------------------------------------------------------------- /aap/aws-ec2-demo/aws/roles/windows/postcreate/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # Handler to handle common notifications. 3 | 4 | - name: restart web server 5 | service: name=httpd state=restarted 6 | -------------------------------------------------------------------------------- /aap/aws-ec2-demo/aws/roles/windows/postcreate/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # This playbook contains post VM creation tasks that will be run on all Windows nodes. 3 | 4 | - name: Create user accounts 5 | 6 | vars: 7 | ansible_user: tower 8 | ansible_password: Redhat01 9 | ansible_connection: winrm 10 | ansible_port: 5985 11 | ansible_winrm_server_cert_validation: ignore 12 | 13 | win_user: name=phil account_disabled=yes 14 | -------------------------------------------------------------------------------- /aap/aws-ec2-demo/aws/roles/windows/postcreate/vars/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # Local variables if we need them 3 | # syntax is: 4 | # variable: value 5 | # ansible_connection: winrm 6 | # ansible_port: 5986 7 | # ansible_winrm_server_cert_validation: ignore 8 | -------------------------------------------------------------------------------- /aap/aws-ec2-demo/aws/roles/windows/provision/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: 'Create Amazon VM Instance(s)' 4 | ec2: 5 | # set in vars/main.yml, just for convenience 6 | key_name: "{{ key_name }}" 7 | region: "{{ region }}" 8 | zone: "{{ zone }}" 9 | image: "{{ ami }}" 10 | instance_tags: "{{ instance_tags }}" 11 | assign_public_ip: "{{ public_ip }}" 12 | vpc_subnet_id: "{{ subnet }}" 13 | group: "{{ group }}" 14 | count: 1 15 | monitoring: yes 16 | wait: yes 17 | state: present 18 | # passed as 'extra_vars' from BPM form input 19 | instance_type: "{{ ec2_instance_type }}" 20 | register: newmachines 21 | 22 | - name: Wait for RDP to start 23 | wait_for: 24 | host: "{{ newmachines.instances[0].public_ip }}" 25 | port: 3389 26 | timeout: 300 27 | delegate_to: localhost 28 | 29 | - name: Add the machine to in memory inventory 30 | add_host: 31 | hostname: "{{ newmachines.instances[0].public_ip }}" 32 | groups: justcreatedwindows 33 | -------------------------------------------------------------------------------- /aap/aws-ec2-demo/aws/roles/windows/provision/vars/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | key_name: ansible-tower 3 | region: eu-west-1 4 | zone: eu-west-1c 5 | ami: ami-5d47142e 6 | public_ip: yes 7 | group: bpm-ansible-demo 8 | subnet: subnet-bc69e8e4 9 | # we use this AWS tag to identify newly provisioned instances 10 | instance_tags: '{"type":"demo-clients","Name":"just-created-windows"}' 11 | -------------------------------------------------------------------------------- /aap/aws-ec2-demo/aws/site.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # This playbook provides VM orchestration on Amazon EC2 3 | # most things set in vars directory in case you're wondering :) 4 | 5 | - name: VM and Application Orchestration Demo 6 | hosts: localhost 7 | connection: local 8 | gather_facts: no 9 | ignore_errors: true 10 | 11 | roles: 12 | - { role: 'linux/provision', tags: ["instances", "linux"], when: ec2_os_type == 'linux' } 13 | - { role: 'windows/provision', tags: ["instances", "windows"], when: ec2_os_type == 'windows' } 14 | 15 | - name: Configure common options across new Linux instances 16 | hosts: justcreatedlinux 17 | gather_facts: yes 18 | become: yes 19 | become_method: sudo 20 | 21 | roles: 22 | - linux/postcreate 23 | - { role: 'linux/apps/web', tags: ["apps", "linux"], when: ec2_instance_app == 'web' } 24 | # we can add more apps roles here if we wanted, or perhaps an array of apps would be better to loop through? 25 | 26 | - name: Configure common options across new Windows instances 27 | hosts: justcreatedwindows 28 | gather_facts: no 29 | 30 | #roles: 31 | # - windows/postcreate 32 | -------------------------------------------------------------------------------- /aap/azure-rg-vm-discovery.yml: -------------------------------------------------------------------------------- 1 | - hosts: localhost 2 | connection: local 3 | gather_facts: true 4 | 5 | tasks: 6 | - name: Get facts for all virtual machines of a resource group 7 | azure_rm_virtualmachine_facts: 8 | resource_group: "{{ rg }}" 9 | 10 | - action: 11 | module: azure_rm_virtualmachine_facts 12 | resource_group: "{{ rg }}" 13 | register: azure_rm_vm_facts 14 | 15 | - action: 16 | module: debug 17 | msg: "{{ azure_rm_vm_facts }}" 18 | -------------------------------------------------------------------------------- /aap/bad_syntax.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: Wrong Snytax 4 | hosts: all 5 | become: false 6 | gather_facts: false 7 | 8 | tasks: 9 | - name: This task contains the write syntax 10 | ansible.builtin.debug: msg=Hello World 11 | 12 | - name: This task contains the wrong syntax 13 | fail: 14 | msg: This task failed 15 | -------------------------------------------------------------------------------- /aap/bpm-ansible-demo/README.md: -------------------------------------------------------------------------------- 1 | # This is a cloud VM creation demo for use with Red Hat BPM Suite for end-to-end process automation 2 | 3 | ``` 4 | This is designed to be run from Ansible Tower, and called via a REST job_template launch 5 | ``` 6 | 7 | Spin up VM instances in AWS based on input using a BPM Form, with OS, VM size and app choices 8 | 9 | #### Common Role 10 | This will apply common items no matter what the size, OS type. Things like standard config mgt, users etc 11 | 12 | #### aws Role 13 | Will spin up VM based on size choice, add applications etc based on OS choice 14 | -------------------------------------------------------------------------------- /aap/bpm-ansible-demo/aws/roles/linux/apps/web/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # Handler to handle common notifications. 3 | 4 | - name: restart web server 5 | service: name=httpd state=restarted 6 | 7 | - name: restart firewall 8 | service: name=firewalld state=restarted 9 | -------------------------------------------------------------------------------- /aap/bpm-ansible-demo/aws/roles/linux/apps/web/tasks/copy_code.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # Creates some front-of-shop web content so we can demo something 3 | 4 | - name: Create a index.html file 5 | template: src=index.html.j2 dest=/var/www/html/index.html 6 | -------------------------------------------------------------------------------- /aap/bpm-ansible-demo/aws/roles/linux/apps/web/tasks/install_httpd.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # These tasks install http and the php modules. 3 | 4 | - name: Install web and associated packages 5 | yum: name={{ item }} state=present 6 | with_items: 7 | - httpd 8 | - php 9 | - php-mysql 10 | - git 11 | - libsemanage-python 12 | - libselinux-python 13 | 14 | #- name: insert firewalld rule for httpd 15 | # firewalld: port={{ httpd_port }}/tcp permanent=true state=enabled immediate=yes 16 | 17 | - name: Start the web service 18 | service: name=httpd state=started enabled=yes 19 | 20 | - name: Configure SELinux to allow httpd to connect to remote database 21 | seboolean: name=httpd_can_network_connect_db state=true persistent=yes 22 | -------------------------------------------------------------------------------- /aap/bpm-ansible-demo/aws/roles/linux/apps/web/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - include: install_httpd.yml 3 | - include: copy_code.yml 4 | -------------------------------------------------------------------------------- /aap/bpm-ansible-demo/aws/roles/linux/apps/web/templates/index.html.j2: -------------------------------------------------------------------------------- 1 | 2 | 3 | Ansible Application 4 | 5 | 6 |
7 | Homepage 8 |
9 | Hello, World! I am a web server configured using Ansible. 10 |
11 | 12 | 13 | -------------------------------------------------------------------------------- /aap/bpm-ansible-demo/aws/roles/linux/apps/web/vars/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # Local variables if we need them 3 | ntpserver1: 0.rhel.pool.ntp.org 4 | ntpserver2: 127.127.1.0 5 | -------------------------------------------------------------------------------- /aap/bpm-ansible-demo/aws/roles/linux/postcreate/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # Handler to handle common notifications. 3 | 4 | - name: restart ntp 5 | service: name=ntpd state=restarted 6 | 7 | - name: restart web server 8 | service: name=httpd state=restarted 9 | 10 | - name: restart firewall 11 | service: name=firewalld state=restarted 12 | -------------------------------------------------------------------------------- /aap/bpm-ansible-demo/aws/roles/linux/postcreate/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # This playbook contains post VM creation tasks that will be run on all Linux nodes. 3 | 4 | #- name: Create user accounts 5 | # user: name=phil comment="Phil Griffiths" uid=1101 groups=wheel append=yes 6 | 7 | #- name: Install latest NTP package 8 | # yum: name=ntp state=latest 9 | 10 | #- name: Configure NTP service 11 | # template: src=ntp.conf.j2 dest=/etc/ntp.conf owner=root mode=0644 12 | # notify: 13 | # - restart ntp 14 | 15 | #- name: Ensure NTP is started at boot 16 | # service: name=ntpd state=started enabled=yes 17 | -------------------------------------------------------------------------------- /aap/bpm-ansible-demo/aws/roles/linux/postcreate/templates/ntp.conf.j2: -------------------------------------------------------------------------------- 1 | 2 | driftfile /var/lib/ntp/drift 3 | 4 | restrict 127.0.0.1 5 | restrict -6 ::1 6 | 7 | server "{{ ntpserver1 }}" 8 | server "{{ ntpserver2 }}" 9 | fudge "{{ ntpserver2 }}" stratum 10 10 | 11 | includefile /etc/ntp/crypto/pw 12 | 13 | keys /etc/ntp/keys 14 | -------------------------------------------------------------------------------- /aap/bpm-ansible-demo/aws/roles/linux/postcreate/templates/ssh_howto.j2: -------------------------------------------------------------------------------- 1 | 2 | Your "{{ ec2_os_type }}" instance has been created: "{{ ec2_dns_name }}" 3 | 4 | To login: 5 | 6 | Open an SSH client 7 | 8 | Locate your private key file "{{ key_name }}" 9 | 10 | Note: your key must not be publicly viewable for SSH to work. 11 | Use this command if needed: chmod 400 "{{ key_name }}" 12 | 13 | Connect to your instance using its Public DNS: 14 | 15 | ssh -i ""{{ key_name }}"" "{{ ec2_dns_name }}" 16 | -------------------------------------------------------------------------------- /aap/bpm-ansible-demo/aws/roles/linux/postcreate/vars/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # Local variables if we need them 3 | ntpserver1: 0.rhel.pool.ntp.org 4 | ntpserver2: 127.127.1.0 5 | -------------------------------------------------------------------------------- /aap/bpm-ansible-demo/aws/roles/linux/provision/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: 'Create Amazon VM Instance(s)' 4 | ec2: 5 | # set in vars/main.yml, just for convenience 6 | key_name: "{{ key_name }}" 7 | region: "{{ region }}" 8 | zone: "{{ zone }}" 9 | image: "{{ ami }}" 10 | instance_tags: "{{ instance_tags }}" 11 | assign_public_ip: "{{ public_ip }}" 12 | vpc_subnet_id: "{{ subnet }}" 13 | group: "{{ group }}" 14 | count: 1 15 | monitoring: yes 16 | wait: yes 17 | state: present 18 | # passed as 'extra_vars' from BPM form input 19 | instance_type: "{{ ec2_instance_type }}" 20 | register: newmachines 21 | 22 | - name: Wait for SSH to start 23 | wait_for: 24 | host: "{{ newmachines.instances[0].public_ip }}" 25 | port: 22 26 | timeout: 300 27 | delegate_to: localhost 28 | 29 | - name: Record DNS name for email 30 | set_fact: 31 | ec2_dns_name: "{{ newmachines.instances[0].public_dns_name }}" 32 | - debug: var=ec2_dns_name 33 | 34 | - name: Add new VM to in memory inventory so we can post-process 35 | add_host: 36 | hostname: "{{ newmachines.instances[0].public_ip }}" 37 | groups: justcreatedlinux 38 | # ec2_dns_name: "{{ newmachines.instances[0].public_dns_name }}" 39 | -------------------------------------------------------------------------------- /aap/bpm-ansible-demo/aws/roles/linux/provision/vars/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | key_name: ansible-tower 3 | region: eu-west-1 4 | zone: eu-west-1c 5 | ami: ami-f8cc838b 6 | public_ip: yes 7 | group: bpm-ansible-demo 8 | subnet: subnet-bc69e8e4 9 | # we use this AWS tag to identify newly provisioned instances 10 | instance_tags: '{"type":"demo-clients","Name":"just-created-linux","rhsm":"false"}' 11 | -------------------------------------------------------------------------------- /aap/bpm-ansible-demo/aws/roles/windows/postcreate/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # Handler to handle common notifications. 3 | 4 | - name: restart web server 5 | service: name=httpd state=restarted 6 | -------------------------------------------------------------------------------- /aap/bpm-ansible-demo/aws/roles/windows/postcreate/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # This playbook contains post VM creation tasks that will be run on all Windows nodes. 3 | 4 | - name: Create user accounts 5 | 6 | vars: 7 | ansible_user: tower 8 | ansible_password: Redhat01 9 | ansible_connection: winrm 10 | ansible_port: 5985 11 | ansible_winrm_server_cert_validation: ignore 12 | 13 | win_user: name=phil account_disabled=yes 14 | -------------------------------------------------------------------------------- /aap/bpm-ansible-demo/aws/roles/windows/postcreate/vars/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # Local variables if we need them 3 | # syntax is: 4 | # variable: value 5 | # ansible_connection: winrm 6 | # ansible_port: 5986 7 | # ansible_winrm_server_cert_validation: ignore 8 | -------------------------------------------------------------------------------- /aap/bpm-ansible-demo/aws/roles/windows/provision/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: 'Create Amazon VM Instance(s)' 4 | ec2: 5 | # set in vars/main.yml, just for convenience 6 | key_name: "{{ key_name }}" 7 | region: "{{ region }}" 8 | zone: "{{ zone }}" 9 | image: "{{ ami }}" 10 | instance_tags: "{{ instance_tags }}" 11 | assign_public_ip: "{{ public_ip }}" 12 | vpc_subnet_id: "{{ subnet }}" 13 | group: "{{ group }}" 14 | count: 1 15 | monitoring: yes 16 | wait: yes 17 | state: present 18 | # passed as 'extra_vars' from BPM form input 19 | instance_type: "{{ ec2_instance_type }}" 20 | register: newmachines 21 | 22 | - name: Wait for RDP to start 23 | wait_for: 24 | host: "{{ newmachines.instances[0].public_ip }}" 25 | port: 3389 26 | timeout: 300 27 | delegate_to: localhost 28 | 29 | - name: Add the machine to in memory inventory 30 | add_host: 31 | hostname: "{{ newmachines.instances[0].public_ip }}" 32 | groups: justcreatedwindows 33 | -------------------------------------------------------------------------------- /aap/bpm-ansible-demo/aws/roles/windows/provision/vars/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | key_name: ansible-tower 3 | region: eu-west-1 4 | zone: eu-west-1c 5 | ami: ami-5d47142e 6 | public_ip: yes 7 | group: bpm-ansible-demo 8 | subnet: subnet-bc69e8e4 9 | # we use this AWS tag to identify newly provisioned instances 10 | instance_tags: '{"type":"demo-clients","Name":"just-created-windows"}' 11 | -------------------------------------------------------------------------------- /aap/bpm-ansible-demo/aws/site.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # This playbook provides VM orchestration on Amazon EC2 3 | 4 | - name: VM and Application Orchestration Demo 5 | hosts: localhost 6 | connection: local 7 | gather_facts: no 8 | ignore_errors: True 9 | 10 | pre_tasks: 11 | 12 | - name: Callback to BPM using pid 13 | uri: 14 | method: POST 15 | user: ****** 16 | password: ****** 17 | url: "http://ec2-52-212-76-182.eu-west-1.compute.amazonaws.com:8080/business-central/rest/runtime/com.gatwick.demo:gatwickdemoproject:1.0/process/instance/{{pid}}/signal?signal=startRef" 18 | tags: 19 | - always 20 | 21 | roles: 22 | - { role: 'linux/provision', tags: ["instances", "linux"], when: ec2_os_type == 'linux' } 23 | - { role: 'windows/provision', tags: ["instances", "windows"], when: ec2_os_type == 'windows' } 24 | 25 | - name: Configure common options across new Linux instances 26 | hosts: justcreatedlinux 27 | gather_facts: yes 28 | become: yes 29 | become_method: sudo 30 | 31 | roles: 32 | - linux/postcreate 33 | - { role: 'linux/apps/web', tags: ["apps", "linux"], when: ec2_instance_app == 'web' } 34 | # we can add more apps roles here if we wanted, or perhaps an array of apps would be better to loop through? 35 | 36 | - name: Configure common options across new Windows instances 37 | hosts: justcreatedwindows 38 | gather_facts: no 39 | 40 | #roles: 41 | # - windows/postcreate 42 | 43 | - name: Final notifications stage 44 | hosts: localhost 45 | connection: local 46 | gather_facts: no 47 | ignore_errors: True 48 | 49 | post_tasks: 50 | 51 | - name: Send out login details via email 52 | mail: 53 | host='email-smtp.eu-west-1.amazonaws.com' 54 | port=587 55 | username=***** 56 | password='*****' 57 | from="noreply@bangonabout.com (Ansible VM Creator)" 58 | headers=Reply-To=helpdesk@mailinator.com 59 | to="requester " 60 | subject='Your VM has been created' 61 | body='Your "{{ ec2_os_type }}" instance has been created - "{{ ec2_dns_name }}"' 62 | delegate_to: localhost 63 | tags: 64 | - always 65 | 66 | 67 | - name: Callback to BPM using pid 68 | uri: 69 | method: POST 70 | user: ****** 71 | password: ****** 72 | url: "http://ec2-52-212-76-182.eu-west-1.compute.amazonaws.com:8080/business-central/rest/runtime/com.gatwick.demo:gatwickdemoproject:1.0/process/instance/{{pid}}/signal?signal=stopRef" 73 | tags: 74 | - always 75 | -------------------------------------------------------------------------------- /aap/delete-vm-azure-instance-stack.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Delete Azure compute stack by removing resource group 3 | hosts: localhost 4 | connection: local 5 | gather_facts: false 6 | 7 | tasks: 8 | 9 | - name: Delete resource group 10 | azure_rm_resourcegroup: 11 | name: "{{ rg }}" 12 | state: absent 13 | force: yes 14 | 15 | -------------------------------------------------------------------------------- /aap/delete-vm-vsphere.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Delete a vsphere ESXI VM 3 | hosts: localhost 4 | connection: local 5 | 6 | tasks: 7 | - name: Delete an ESXi VM Guest 8 | vsphere_guest: 9 | vcenter_hostname: 10.39.164.21 10 | username: '{{ username }}' 11 | password: '{{ password }}' 12 | guest: '{{ hostname }}' 13 | state: absent 14 | force: yes 15 | -------------------------------------------------------------------------------- /aap/deploy-vm-amazon-ami.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Setup an EC2 instances 3 | hosts: localhost 4 | connection: local 5 | tasks: 6 | - name: Create EC2 machines using our own AMI 7 | ec2: 8 | region: "{{ ec2_region }}" 9 | zone: "{{ ec2_zone }}" 10 | key_name: ansible-tower 11 | instance_type: t2.micro 12 | image: ami-d39de6a0 13 | instance_tags: 14 | Service: "{{ service_tag }}" 15 | wait: yes 16 | wait_timeout: 500 17 | group: launch-wizard-1 18 | vpc_subnet_id: subnet-0f120178 19 | assign_public_ip: yes 20 | monitoring: yes 21 | exact_count: "{{ vm_instances }}" 22 | count_tag: 23 | Service: "{{ service_tag }}" 24 | register: newmachines 25 | 26 | #- debug: var=newmachines.instances 27 | 28 | - name: Add new instances to host group 29 | add_host: 30 | hostname: "{{ item.public_ip }}" 31 | groups: freshman 32 | with_items: "{{ newmachines.instances }}" 33 | 34 | - name: Wait for SSH to start 35 | wait_for: 36 | host: "{{ item.public_ip }}" 37 | port: 22 38 | timeout: 300 39 | with_items: "{{ newmachines.instances }}" 40 | 41 | - name: Configure new instances 42 | hosts: freshman 43 | remote_user: ec2-user 44 | become: true 45 | gather_facts: true 46 | 47 | tasks: 48 | - name: Configure hostname 49 | hostname: 50 | name: "{{ inventory_hostname }}.philredhat.com" 51 | tags: 52 | - baseconfig 53 | 54 | - name: add groups 55 | group: 56 | name: fuse 57 | state: present 58 | tags: 59 | - groups 60 | 61 | #- include: roles/common/tasks/main.yml 62 | #- include: roles/fuse/tasks/main.yml 63 | -------------------------------------------------------------------------------- /aap/deploy-vm-azure-instance-stack.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Setup a complete Azure compute stack 3 | hosts: localhost 4 | connection: local 5 | gather_facts: false 6 | 7 | tasks: 8 | 9 | - name: Create a resource group 10 | azure_rm_resourcegroup: 11 | name: tower-demos-rg1 12 | location: northeurope 13 | tags: 14 | demo: tower 15 | type: standard 16 | 17 | - name: Create storage account 18 | azure_rm_storageaccount: 19 | resource_group: tower-demos-rg1 20 | name: towerdemos 21 | account_type: Standard_LRS 22 | 23 | - name: Create virtual network 24 | azure_rm_virtualnetwork: 25 | resource_group: tower-demos-rg1 26 | name: vn001 27 | address_prefixes: "10.10.0.0/16" 28 | 29 | - name: Add subnet 30 | azure_rm_subnet: 31 | resource_group: tower-demos-rg1 32 | name: subnet001 33 | address_prefix: "10.10.0.0/24" 34 | virtual_network: vn001 35 | 36 | - name: Create public ip 37 | azure_rm_publicipaddress: 38 | resource_group: tower-demos-rg1 39 | allocation_method: Static 40 | name: pubip001 41 | 42 | - name: Create security group that allows SSH 43 | azure_rm_securitygroup: 44 | resource_group: tower-demos-rg1 45 | name: secgroup001 46 | rules: 47 | - name: SSH 48 | protocol: Tcp 49 | destination_port_range: 22 50 | access: Allow 51 | priority: 101 52 | direction: Inbound 53 | 54 | - name: Create NIC 55 | azure_rm_networkinterface: 56 | resource_group: tower-demos-rg1 57 | name: nic001 58 | virtual_network: vn001 59 | subnet: subnet001 60 | public_ip_name: pubip001 61 | security_group: secgroup001 62 | 63 | - name: Create virtual machine 64 | azure_rm_virtualmachine: 65 | resource_group: tower-demos-rg1 66 | name: tower-demo-vm001 67 | short_hostname: vm001 68 | tags: 69 | workload: enduser 70 | usecase: demo 71 | vm_size: Standard_D1 72 | storage_account: towerdemos 73 | storage_container: tower-demo-vm001 74 | storage_blob: tower-demo-vm001.vhd 75 | admin_username: "{{ username }}" 76 | ssh_password_enabled: false 77 | ssh_public_keys: 78 | - path: "{{ keypath }}" 79 | key_data: "{{ sshkey }}" 80 | network_interfaces: nic001 81 | image: RHELGOLD 82 | 83 | -------------------------------------------------------------------------------- /aap/deploy-vm-azure.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Setup an Azure instance 3 | hosts: localhost 4 | connection: local 5 | gather_facts: false 6 | 7 | tasks: 8 | - name: Create Azure VM with defaults 9 | azure_rm_virtualmachine: 10 | resource_group: tower-demos-rg1 11 | name: '{{vm_name}}' 12 | vm_size: '{{vm_size}}' 13 | storage_account: towerdemos 14 | admin_username: '{{admin_username}}' 15 | admin_password: '{{admin_password}}' 16 | image: 17 | offer: CentOS 18 | publisher: OpenLogic 19 | sku: '7.1' 20 | version: latest 21 | -------------------------------------------------------------------------------- /aap/deploy-vm-gcp.yml: -------------------------------------------------------------------------------- 1 | - name: Create GCP instance(s) 2 | hosts: localhost 3 | gather_facts: no 4 | connection: local 5 | 6 | vars: 7 | machine_type: n1-standard-1 # default 8 | image: debian-7 9 | project_id: redhat-examples 10 | 11 | tasks: 12 | - name: Launch instances 13 | gce: 14 | instance_names: ansible-example-vm 15 | machine_type: '{{ machine_type }}' 16 | image: '{{ image }}' 17 | #service_account_email: '{{ service_account_email }}' 18 | #credentials_file: '{{ credentials_file }}' 19 | project_id: '{{ project_id }}' 20 | tags: webserver 21 | register: gce 22 | 23 | - name: Wait for SSH to come up 24 | wait_for: host='{{ item.public_ip }}' port=22 delay=10 timeout=60 25 | with_items: '{{gce.instance_data}}' 26 | 27 | - name: Add host to groupname 28 | add_host: hostname='{{ item.public_ip }}' groupname=new_instances 29 | with_items: '{{gce.instance_data}}' 30 | 31 | #- name: Manage new instances 32 | # hosts: new_instances 33 | # connection: ssh 34 | # sudo: True 35 | # roles: 36 | # - base_configuration 37 | # - production_server 38 | -------------------------------------------------------------------------------- /aap/deploy-vm-vsphere-from-template.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Create a vCenter VM from a template 3 | hosts: localhost 4 | connection: local 5 | 6 | vars: 7 | vmotion_host: cloud07-acc.gps.hst.ams2.redhat.com 8 | 9 | tasks: 10 | - name: Create VM guest from the template 11 | vsphere_guest: 12 | vcenter_hostname: 10.39.164.21 13 | username: '{{ username }}' 14 | password: '{{ password }}' 15 | guest: '{{ vm_hostname }}' 16 | from_template: yes 17 | template_src: tpl-rhel7-minimal 18 | cluster: Demonstrations 19 | vm_extra_config: 20 | vcpu.hotadd: yes 21 | mem.hotadd: yes 22 | notes: "VM Created Using Ansible Tower" 23 | esxi: 24 | datacenter: EMEA CloudLab 25 | hostname: cloud12-acc.gps.hst.ams2.redhat.com 26 | tags: 27 | vm-create 28 | 29 | - name: Reconfigure VMware VM (add CPU and RAM) 30 | vsphere_guest: 31 | vcenter_hostname: 10.39.164.21 32 | username: '{{ username }}' 33 | password: '{{ password }}' 34 | guest: '{{ vm_hostname }}' 35 | state: reconfigured 36 | force: true 37 | vm_hardware: 38 | memory_mb: '{{ vm_memory }}' 39 | num_cpus: '{{ vm_cpus }}' 40 | tags: 41 | vm-config 42 | 43 | - name: Gather VM guest facts that could be acted upon 44 | vsphere_guest: 45 | vcenter_hostname: 10.39.164.21 46 | username: '{{ username }}' 47 | password: '{{ password }}' 48 | guest: '{{ vm_hostname }}' 49 | vmware_guest_facts: yes 50 | register: vm_facts 51 | 52 | - debug: var=vm_facts 53 | 54 | # This will be possible in ansible core version 2.2 55 | #- name: Perform vMotion of VM 56 | # local_action: 57 | # module: vmware_vmotion 58 | # hostname: 10.39.164.21 59 | # username: '{{ username }}' 60 | # password: '{{ password }}' 61 | # validate_certs: False 62 | # vm_name: '{{ vm_hostname }}' 63 | # destination_host: '{{ vmotion_host }}' 64 | 65 | - name: Register Host in Ansible Inventory 66 | add_host: 67 | name: "{{vm_hostname}}" 68 | groups: vm_just_added 69 | 70 | #- name: Configure new instances 71 | # hosts: vm_just_added 72 | # remote_user: someone 73 | # become: true 74 | # gather_facts: true 75 | 76 | # tasks: 77 | # - name: Configure hostname 78 | # hostname: 79 | # name: "{{ vm_hostname }}" 80 | # tags: 81 | # - baseconfig 82 | -------------------------------------------------------------------------------- /aap/deploy-vm-vsphere-from-template2.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Setup an vsphere ESXI VM from a template 3 | hosts: localhost 4 | connection: local 5 | 6 | tasks: 7 | - name: Create an ESXi VM Guest From Template 8 | vsphere_guest: 9 | guest: tower-generated-vm 10 | from_template: yes 11 | template_src: tpl-rhel7-minimal 12 | cluster: Demonstrations 13 | esxi: 14 | datacenter: EMEA CloudLab 15 | hostname: cloud12-acc.gps.hst.ams2.redhat.com 16 | -------------------------------------------------------------------------------- /aap/download-and-copy.yml: -------------------------------------------------------------------------------- 1 | - hosts: hostA 2 | tasks: 3 | - name: Download to local download folder 4 | connection: local 5 | get_url: 6 | url: http://ipv4.download.thinkbroadband.com/5MB.zip 7 | dest: /opt/ansible_files/mbse/test.zip 8 | 9 | - name: copy file to folder 10 | become: yes 11 | copy: 12 | src: /opt/ansible_files/mbse/test.zip 13 | dest: /tmp/test.zip 14 | -------------------------------------------------------------------------------- /aap/files/local_copy_a: -------------------------------------------------------------------------------- 1 | This is fileA contents... 2 | -------------------------------------------------------------------------------- /aap/hub-collection-test.yml: -------------------------------------------------------------------------------- 1 | # vim:ft=ansible: 2 | # setup namespaces/upload collections into containerized PAH 3 | # turn verbose project syncing on 4 | # call one of the collection things here 5 | --- 6 | - name: Add user to EDA Controller 7 | hosts: localhost 8 | connection: local 9 | gather_facts: false 10 | vars: 11 | eda_validate_certs: false 12 | 13 | roles: 14 | - infra.aap_configuration.eda_users 15 | -------------------------------------------------------------------------------- /aap/library/scan_foo.py: -------------------------------------------------------------------------------- 1 | #!/bin/env python 2 | 3 | from ansible.module_utils.basic import AnsibleModule 4 | 5 | def main(): 6 | module = AnsibleModule( 7 | argument_spec = dict()) 8 | 9 | foo = [ 10 | { 11 | "hello": "world" 12 | }, 13 | { 14 | "foo": "bar" 15 | } 16 | ] 17 | results = dict(ansible_facts=dict(foo=foo)) 18 | module.exit_json(**results) 19 | 20 | main() -------------------------------------------------------------------------------- /aap/linux-account-blocks-eg.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Perform Actions On Linux User Account 3 | # we use AWS tagging to help identify our hosts 4 | hosts: tag_os_type_linux 5 | # we don't need any host facts, so disable to make run faster 6 | gather_facts: false 7 | 8 | tasks: 9 | 10 | - block: 11 | 12 | - name: Disable local UNIX user account 13 | user: name='{{ firstname|lower }}.{{ surname|lower }}' shell=/bin/false expires=0 14 | ignore_errors: true 15 | 16 | rescue: 17 | - debug: msg='Oops! Something went wrong - please investigate' 18 | 19 | always: 20 | - debug: msg='Tasks to disable UNIX user accounts have been run' 21 | 22 | tags: 23 | - deactivate 24 | 25 | - block: 26 | 27 | - name: Delete local UNIX user account 28 | user: name='{{ firstname|lower }}.{{ surname|lower }}' state=absent remove=yes 29 | ignore_errors: true 30 | 31 | rescue: 32 | - debug: msg='Oops! Something went wrong - please investigate' 33 | 34 | always: 35 | - debug: msg='Tasks to delete UNIX user accounts have been run' 36 | 37 | tags: 38 | - delete 39 | -------------------------------------------------------------------------------- /aap/linux-account-blocks.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Perform Actions On Linux User Account 3 | # we use AWS tagging to help identify our hosts 4 | hosts: tag_os_type_linux 5 | # we don't need any host facts, so disable to make run faster 6 | gather_facts: false 7 | 8 | tasks: 9 | 10 | - block: 11 | 12 | - name: Disable local UNIX user account 13 | user: name='{{ firstname|lower }}.{{ surname|lower }}' shell=/bin/false expires=0 14 | ignore_errors: true 15 | 16 | rescue: 17 | - debug: msg='Oops! Something went wrong - please investigate' 18 | 19 | always: 20 | - debug: msg='Tasks to disable UNIX user accounts have been run' 21 | 22 | tags: 23 | - deactivate 24 | 25 | - block: 26 | 27 | - name: Delete local UNIX user account 28 | user: name='{{ firstname|lower }}.{{ surname|lower }}' state=absent remove=yes 29 | ignore_errors: true 30 | 31 | rescue: 32 | - debug: msg='Oops! Something went wrong - please investigate' 33 | 34 | always: 35 | - debug: msg='Tasks to delete UNIX user accounts have been run' 36 | 37 | tags: 38 | - delete 39 | -------------------------------------------------------------------------------- /aap/linux-accounts.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Linux Account Admin (we do nothing without a valid tag) 3 | hosts: web 4 | # we don't need any host facts, so disable to make run faster 5 | gather_facts: false 6 | become: yes 7 | tags: never 8 | 9 | tasks: 10 | 11 | - block: 12 | 13 | - name: Disable Local Linux User Account 14 | user: 15 | name: '{{ account|lower }}' 16 | password_lock: yes 17 | shell: /bin/false 18 | expires: 0 19 | 20 | rescue: 21 | - debug: msg='Oops! Something went wrong DISABLING the account - please investigate' 22 | 23 | always: 24 | - debug: msg='Tasks to disable Linux user account have been run' 25 | 26 | tags: 27 | - disable 28 | 29 | - block: 30 | 31 | - name: Delete Local Linux User Account 32 | user: 33 | name: '{{ account|lower }}' 34 | state: absent 35 | remove: yes 36 | 37 | rescue: 38 | - debug: msg='Oops! Something went wrong DELETING the account - please investigate' 39 | 40 | always: 41 | - debug: msg='Tasks to delete Linux user account have been run' 42 | 43 | tags: 44 | - delete -------------------------------------------------------------------------------- /aap/long-sleep.yml: -------------------------------------------------------------------------------- 1 | # vim:ft=ansible: 2 | --- 3 | - hosts: all 4 | gather_facts: false 5 | ignore_errors: yes 6 | 7 | vars: 8 | period: 120 9 | myfile: /mydata/file 10 | 11 | tasks: 12 | 13 | - name: Collect only selected facts 14 | ansible.builtin.setup: 15 | filter: 16 | - 'ansible_distribution' 17 | - 'ansible_machine_id' 18 | - 'ansible_memtotal_mb' 19 | - 'ansible_memfree_mb' 20 | 21 | - name: "I'm feeling real sleepy..." 22 | ansible.builtin.wait_for: 23 | timeout: "{{ period }}" 24 | delegate_to: localhost 25 | 26 | - ansible.builtin.debug: 27 | msg: "Isolated paths mounted into execution node: {{ AWX_ISOLATION_PATHS }}" 28 | 29 | - name: "Read pre-existing file..." 30 | ansible.builtin.debug: 31 | msg: "{{ lookup('file', '{{ myfile }}_read') }}" 32 | 33 | - name: "Write to a new file..." 34 | ansible.builtin.copy: 35 | dest: "{{ myfile }}_write" 36 | content: | 37 | This is the file I've just written to. 38 | 39 | - name: "Read written out file..." 40 | ansible.builtin.debug: 41 | msg: "{{ lookup('file', '{{ myfile }}_write') }}" 42 | 43 | -------------------------------------------------------------------------------- /aap/long_lines.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: long_log_lines 3 | hosts: localhost 4 | connection: local 5 | gather_facts: false 6 | vars: 7 | how_many: 10 8 | delay_secs: 1 9 | 10 | tasks: 11 | - name: Create a list 12 | ansible.builtin.command: 13 | cmd: seq {{ how_many }} 14 | register: the_list 15 | 16 | - name: Set a fact for the list 17 | ansible.builtin.set_fact: 18 | the_list: "{{ the_list.stdout_lines }}" 19 | 20 | - name: Ping list times 21 | ansible.builtin.shell: 22 | cmd: | 23 | echo {{ item }} 24 | sleep {{ delay_secs }} 25 | loop: "{{ the_list }}" 26 | -------------------------------------------------------------------------------- /aap/motd: -------------------------------------------------------------------------------- 1 | 2 | !! ANSIBLE IS AWESOME!! 3 | 4 | -------------------------------------------------------------------------------- /aap/patch-rhel-hosts.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Patch RHEL systems 3 | - hosts: "{{ target_hosts | default('all') }}" 4 | gather_facts:yes 5 | become: yes 6 | 7 | vars: 8 | reboot_after_update: no 9 | packages: 10 | RedHat7: 11 | - kernel 12 | - microcode_ctl 13 | - perf 14 | - python-perf 15 | RedHat6: 16 | - kernel 17 | - kernel-firmware 18 | - perf 19 | - python-perf 20 | 21 | tasks: 22 | - name: RHEL | Install updates 23 | yum: 24 | name: "{{ packages[ansible_os_family ~ ansible_distribution_major_version] }}" 25 | state: present 26 | when: ansible_pkg_mgr == 'yum' 27 | notify: reboot system -------------------------------------------------------------------------------- /aap/ping-pong.yml: -------------------------------------------------------------------------------- 1 | # vim:ft=ansible: 2 | --- 3 | - hosts: all 4 | become: no 5 | become_method: sudo 6 | gather_facts: true 7 | 8 | tasks: 9 | - name: Test SSH Connection 10 | ping: 11 | tags: 12 | - ssh, ping 13 | -------------------------------------------------------------------------------- /aap/poc-rhv-discover-using-vault-creds.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: RHV Discovery 3 | hosts: all 4 | gather_facts: false 5 | 6 | tasks: 7 | 8 | - block: 9 | 10 | - debug: 11 | var: rhvm_password 12 | verbosity: 2 13 | 14 | #- name: Authenticate against RHVM 15 | # ovirt_auth: 16 | # # pass username/password in using Tower Survey 17 | # url: 'https://lnlabrhvm01.opstklab.local/ovirt-engine/api' 18 | # username: '{{ rhvm_username }}' 19 | # password: '{{ rhvm_password }}' 20 | # insecure: yes 21 | 22 | #- name: List VMs 23 | # ovirt_vms_facts: 24 | # auth: "{{ ovirt_auth }}" 25 | 26 | #always: 27 | # - name: Always revoke the SSO token 28 | # ovirt_auth: 29 | # state: absent 30 | # ovirt_auth: "{{ ovirt_auth }}" 31 | -------------------------------------------------------------------------------- /aap/poc-rhv-discover.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: RHV Discovery 3 | hosts: all 4 | 5 | tasks: 6 | 7 | - block: 8 | 9 | - name: Authenticate against RHVM 10 | ovirt_auth: 11 | # pass username/password in using Tower Survey 12 | url: 'https://lnlabrhvm01.opstklab.local/ovirt-engine/api' 13 | username: '{{ rhvm_username }}' 14 | password: '{{ rhvm_password }}' 15 | insecure: yes 16 | 17 | - name: List VMs 18 | ovirt_vms_facts: 19 | auth: "{{ ovirt_auth }}" 20 | 21 | always: 22 | - name: Always revoke the SSO token 23 | ovirt_auth: 24 | state: absent 25 | ovirt_auth: "{{ ovirt_auth }}" 26 | -------------------------------------------------------------------------------- /aap/poc-vcenter-deploy-vm-template.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Setup an vsphere ESXI VM from a template 3 | hosts: localhost 4 | connection: local 5 | 6 | tasks: 7 | - name: Create an ESXi VM Guest From Template 8 | vmware_guest: 9 | hostname: '{{ VMWARE_HOST }}' 10 | username: '{{ VMWARE_USER }}' 11 | password: '{{ VMWARE_PASSWORD }}' 12 | name: tower-generated-vm 13 | template: RHEL7.4-template-vmware 14 | cluster: LAB 15 | datacenter: LNLAB-OPNSTK 16 | #state: poweredon 17 | wait_for_ip_address: true 18 | validate_certs: False 19 | -------------------------------------------------------------------------------- /aap/post-provision-aws.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: all 3 | gather_facts: true 4 | 5 | tasks: 6 | 7 | - name: add user accounts 8 | user: name={{ item.name }} state=present groups={{ item.groups }} 9 | with_items: 10 | - {name: 'phil', groups: 'wheel' } 11 | - {name: 'fred', groups: 'games' } 12 | - {name: 'wilma', groups: 'games' } 13 | 14 | - name: Update MOTD 15 | copy: 16 | src: ./motd 17 | dest: /etc/motd 18 | owner: root 19 | group: root 20 | mode: 0444 21 | -------------------------------------------------------------------------------- /aap/post-provision.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: all 3 | remote_user: vagrant 4 | become: yes 5 | become_method: sudo 6 | gather_facts: true 7 | 8 | tasks: 9 | 10 | - name: Configure hostname 11 | hostname: 12 | name: "{{ inventory_hostname }}.philredhat.com" 13 | 14 | - name: add user accounts 15 | user: name={{ item.name }} state=present groups={{ item.groups }} 16 | with_items: 17 | - {name: 'pgriffit', groups: 'wheel' } 18 | - {name: 'fred', groups: 'games' } 19 | - {name: 'wilma', groups: 'games' } 20 | 21 | - name: Update MOTD 22 | copy: 23 | src: ./motd 24 | dest: /etc/motd 25 | owner: root 26 | group: root 27 | mode: 0444 28 | -------------------------------------------------------------------------------- /aap/rear/local.conf: -------------------------------------------------------------------------------- 1 | OUTPUT={{ output_type }} 2 | OUTPUT_URL=nfs://{{ nfs_server }}/{{ nfs_mountpoint }} 3 | BACKUP=NETFS 4 | BACKUP_URL=nfs://{{ nfs_server }}/{{ nfs_mountpoint }} 5 | SSH_ROOT_PASSWORD="{{ root_password }}" 6 | BACKUP_PROG_EXCLUDE=("${BACKUP_PROG_EXCLUDE[@]}" '/media' '/var/tmp' '/var/crash') 7 | NETFS_KEEP_OLD_BACKUP_COPY= 8 | -------------------------------------------------------------------------------- /aap/rhel7-rear.yml: -------------------------------------------------------------------------------- 1 | # vim:ft=ansible: 2 | --- 3 | - hosts: all 4 | gather_facts: yes 5 | become: yes 6 | 7 | vars: 8 | output_type: ISO 9 | nfs_server: 192.168.133.103 10 | nfs_mountpoint: recovery 11 | # NOTE: root password is required and gets passed in via Tower survey 12 | 13 | tasks: 14 | 15 | - name: Demonstrates RHEL7's Relax-and-Recover (ReaR) utility 16 | command: /bin/echo "How to use ReaR on RHEL - https://access.redhat.com/solutions/2115051" 17 | 18 | - name: Install packages required for creating ISO images 19 | yum: name={{ item }} state=installed 20 | with_items: 21 | - rear 22 | - genisoimage 23 | - syslinux 24 | when: ansible_distribution == 'RedHat' and ansible_distribution_major_version == '7' 25 | 26 | - name: Create local ReaR configuration 27 | template: src=./rear/local.conf dest=/etc/rear/local.conf 28 | 29 | - name: Create disaster recovery system and generate backup files 30 | command: rear -d -v mkbackup 31 | 32 | - name: Refer to documentation on how to recover 33 | command: /bin/echo "ReaR recovery steps - https://access.redhat.com/solutions/2115051" 34 | -------------------------------------------------------------------------------- /aap/sanity-check-aws.yml: -------------------------------------------------------------------------------- 1 | # vim:ft=ansible: 2 | --- 3 | - hosts: all 4 | gather_facts: true 5 | 6 | tasks: 7 | - name: Test SSH Connection 8 | ping: 9 | tags: 10 | - ssh, ping, ip 11 | 12 | - name: Check IP Gateway 13 | debug: msg="Server {{ inventory_hostname }} has gateway {{ ansible_default_ipv4.gateway }}" 14 | when: ansible_default_ipv4.gateway is defined 15 | tags: 16 | - ip 17 | 18 | - name: Verify Firewall Rules 19 | firewalld: port={{ item }} permanent=true state=enabled 20 | with_items: 21 | - 80/tcp 22 | - 443/tcp 23 | - 22/tcp 24 | tags: 25 | - firewall 26 | -------------------------------------------------------------------------------- /aap/sanity-check.yml: -------------------------------------------------------------------------------- 1 | # vim:ft=ansible: 2 | --- 3 | - hosts: all 4 | become: yes 5 | become_method: sudo 6 | gather_facts: true 7 | 8 | tasks: 9 | - name: Test SSH Connection 10 | ping: 11 | tags: 12 | - ssh, ping, ip 13 | 14 | - name: Check IP Gateway 15 | debug: msg="Server {{ inventory_hostname }} has gateway {{ ansible_default_ipv4.gateway }}" 16 | when: ansible_default_ipv4.gateway is defined 17 | tags: 18 | - ip 19 | 20 | - name: Verify Firewall Rules 21 | firewalld: port={{ item }} permanent=true state=enabled 22 | with_items: 23 | - 80/tcp 24 | - 8080/tcp 25 | - 443/tcp 26 | - 22/tcp 27 | tags: 28 | - firewall 29 | -------------------------------------------------------------------------------- /aap/scan_custom.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Custom Fact Scan Example 3 | hosts: all 4 | gather_facts: false 5 | 6 | tasks: 7 | - scan_foo: 8 | -------------------------------------------------------------------------------- /aap/showmepw.yml: -------------------------------------------------------------------------------- 1 | - name: Passwords 2 | hosts: all 3 | 4 | tasks: 5 | - name: Display password 6 | ansible.builtin.debug: 7 | msg: "{{ ansible_password }}" 8 | -------------------------------------------------------------------------------- /aap/snow-create-record.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: localhost 3 | gather_facts: false 4 | no_log: false 5 | 6 | vars: 7 | table: incident 8 | short_desc: "Web Services Creation" 9 | 10 | tasks: 11 | - name: Create a ServiceNow record 12 | snow_record: 13 | state: present 14 | username: "{{ snow_username }}" 15 | password: "{{ snow_password }}" 16 | instance: "{{ snow_instance }}" 17 | table: "{{ table }}" 18 | data: 19 | short_description: "{{ short_desc }}" 20 | severity: 3 21 | priority: 2 22 | category: web 23 | register: new_snow_record 24 | 25 | - debug: 26 | var: new_snow_record.record.number 27 | verbosity: 1 28 | 29 | - name: "Save ServiceNow Record for updating" 30 | set_stats: 31 | data: 32 | snow_record: "{{ new_snow_record.record.number }}" -------------------------------------------------------------------------------- /aap/snow-get-record.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: localhost 3 | 4 | tasks: 5 | - name: Grab a user record 6 | snow_record: 7 | username: xxxxx 8 | password: xxxxx 9 | instance: xxxxx 10 | state: present 11 | number: 7696f1d9dba963001fe855d0cf961995 12 | table: sys_user 13 | lookup_field: sys_id 14 | -------------------------------------------------------------------------------- /aap/snow-update-record.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: localhost 3 | gather_facts: false 4 | no_log: false 5 | 6 | vars: 7 | table: incident 8 | 9 | tasks: 10 | 11 | - fail: 12 | msg: "We don't have a ServiceNow {{ table }} number to update" 13 | when: snow_record is not defined 14 | 15 | - name: Update the ServiceNow record 16 | snow_record: 17 | state: present 18 | username: "{{ snow_username }}" 19 | password: "{{ snow_password }}" 20 | instance: "{{ snow_instance }}" 21 | table: "{{ table }}" 22 | number: "{{ snow_record }}" 23 | data: 24 | comments : "Update from Ansible Tower. Sweet." 25 | 26 | 27 | 28 | 29 | 30 | -------------------------------------------------------------------------------- /aap/touch-and-sleep.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: "{{ ansible_limit | default(None) }}" 3 | gather_facts: false 4 | serial: 1 5 | 6 | vars: 7 | sleepTime: 30 8 | 9 | tasks: 10 | - name: Touch /tmp/touch-and-sleep 11 | file: 12 | path: /tmp/touch-and-sleep 13 | state: touch 14 | 15 | - name: Sleep for sleepTime seconds 16 | pause: 17 | seconds: "{{ sleepTime }}" 18 | -------------------------------------------------------------------------------- /aap/write_files.yml: -------------------------------------------------------------------------------- 1 | - hosts: localhost 2 | 3 | vars: 4 | unsafe_writes: 5 | ANSIBLE_UNSAFE_WRITES: 1 6 | 7 | tasks: 8 | 9 | - name: Make a copy of a file inside the container 10 | ansible.builtin.copy: 11 | src: local_file_a 12 | dest: local_copy_a 13 | environment: "{{ unsafe_writes }}" 14 | delegate_to: localhost 15 | -------------------------------------------------------------------------------- /abc.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Test telegram module 3 | hosts: localhost 4 | 5 | tasks: 6 | - telegram: 7 | token: "*****" 8 | chat_id: ***** 9 | msg: "Ansible playbook calling" 10 | -------------------------------------------------------------------------------- /amazon.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: amazon 3 | remote_user: ec2-user 4 | become: yes 5 | become_method: sudo 6 | 7 | tasks: 8 | - name: install Apache web server 9 | yum: 10 | name: httpd 11 | state: latest 12 | notify: 13 | - restart Apache 14 | 15 | - name: add user accounts 16 | user: name={{ item.name }} state=present groups={{ item.groups }} 17 | with_items: 18 | - {name: 'foo', groups: 'wheel' } 19 | 20 | handlers: 21 | - name: restart Apache 22 | service: 23 | name: httpd 24 | state: restarted 25 | -------------------------------------------------------------------------------- /ara-example.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Test playbook 3 | hosts: localhost 4 | tasks: 5 | - name: Get git version of playbooks 6 | command: git rev-parse HEAD 7 | register: git_version 8 | 9 | - name: Record git version 10 | ara_record: 11 | key: "git_version" 12 | value: "{{ git_version.stdout }}" 13 | -------------------------------------------------------------------------------- /assert-eg.yml: -------------------------------------------------------------------------------- 1 | - hosts: localhost 2 | gather_facts: false 3 | vars: 4 | fruits: 5 | <<<<<<< HEAD 6 | - oranges 7 | ======= 8 | - lemons 9 | >>>>>>> 767595d (slight mod) 10 | 11 | tasks: 12 | - name: Check for apples... 13 | assert: 14 | that: >- 15 | 'apples' in fruits 16 | msg: You have no apples 17 | -------------------------------------------------------------------------------- /async-fire-forget.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # Requires ansible 1.8+ 3 | 4 | - name: Ansible async example 5 | hosts: localhost 6 | 7 | tasks: 8 | 9 | - name: 'YUM - fire and forget task' 10 | yum: name=docker-io state=installed 11 | async: 1000 12 | poll: 0 13 | register: yum_sleeper 14 | 15 | - name: 'YUM - check on fire and forget task' 16 | async_status: jid={{ yum_sleeper.ansible_job_id }} 17 | register: job_result 18 | until: job_result.finished 19 | retries: 30 20 | -------------------------------------------------------------------------------- /azure-pipelines.yml: -------------------------------------------------------------------------------- 1 | # Starter pipeline 2 | # Start with a minimal pipeline that you can customize to build and deploy your code. 3 | # Add steps that build, run tests, deploy, and more: 4 | # https://aka.ms/yaml 5 | 6 | pool: 7 | vmImage: 'Ubuntu 16.04' 8 | 9 | steps: 10 | - script: echo Hello, world! 11 | displayName: 'Run a one-line script' 12 | 13 | - script: | 14 | echo Add other tasks to build, test, and deploy your project. 15 | echo See https://aka.ms/yaml 16 | displayName: 'Run a multi-line script' 17 | -------------------------------------------------------------------------------- /azure-rg.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Manage Azure Resource Groups 3 | hosts: localhost 4 | connection: local 5 | gather_facts: false 6 | tasks: 7 | - name: Create Azure resource group 8 | azure_rm_resourcegroup: 9 | name: "tower-testing-rg" 10 | location: 'West Europe' 11 | tags: 12 | testing: tower-testing 13 | -------------------------------------------------------------------------------- /azure.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: azure 3 | remote_user: pgriffit 4 | become: yes 5 | become_method: sudo 6 | 7 | tasks: 8 | - name: Install Packages If Non Red Hat 9 | apt: name={{item}} state=latest 10 | with_items: 11 | - apache2 12 | - ntp 13 | when: ansible_distribution == 'Debian' or ansible_distribution == 'Ubuntu' 14 | 15 | - name: Install Packages if Red Hat Based 16 | yum: name={{item}} state=latest 17 | with_items: 18 | - httpd 19 | - ntp 20 | when: ansible_distribution == 'CentOS' or ansible_distribution == 'Red Hat Enterprise Linux' 21 | 22 | - name: Create Some Users 23 | user: name={{ item.name }} state=present groups={{ item.groups }} 24 | with_items: 25 | - {name: 'foo', groups: 'root' } 26 | -------------------------------------------------------------------------------- /blocks-eg.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: localhost 3 | #remote_user: vagrant 4 | #become: yes 5 | #become_method: sudo 6 | #gather_facts: true 7 | 8 | tasks: 9 | 10 | - block: 11 | - ping: 12 | - command: echo "Hello there" 13 | - command: /bin/this-command-doesnt-exist 14 | ignore_errors: true 15 | 16 | rescue: 17 | - debug: msg='Caught an error in this block' 18 | - local_action: command /usr/bin/say "Oops! Houston we have a problem" 19 | #- pause: seconds=2 20 | 21 | always: 22 | - local_action: command /usr/bin/say "We're done with this block" 23 | 24 | #when: ansible_distribution != 'RedHat' 25 | #become: no 26 | 27 | - name: Now onto the next step 28 | win_ping: 29 | ignore_errors: true 30 | -------------------------------------------------------------------------------- /check_group_ordering.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: Print order of hosts in each host group 4 | hosts: all 5 | gather_facts: no 6 | serial: 1 7 | 8 | tasks: 9 | - name: Print order of hosts in each host group in the inventory 10 | block: 11 | - name: Print all host groups 12 | debug: 13 | msg: "{{ groups }}" 14 | delegate_to: localhost 15 | run_once: yes 16 | -------------------------------------------------------------------------------- /collections/README.md: -------------------------------------------------------------------------------- 1 | # Ansible Collections 2 | 3 | There are different ways to interact with Ansible Collections and your Ansible Automation: 4 | 5 | - Install into your runtime environment or virtual env 6 | - Provide as part of your SCM tree 7 | - Using a requirements file (this example) 8 | 9 | Quick example of how to install Ansible Collections using a requirements.yml file 10 | 11 | ## Installation 12 | 13 | You need to setup credentials and set in your ansible.cfg file in order to pull download content. This [blog](https://www.ansible.com/blog/hands-on-with-ansible-collections) described it beautifully :) 14 | 15 | First install, just do it: 16 | ``` 17 | ansible-galaxy collection install -r ansible/collections/requirements.yml 18 | ``` 19 | 20 | Force install (latest version) and show me more: 21 | ``` 22 | ansible-galaxy collection install -r ansible/collections/requirements.yml --force -v 23 | ``` 24 | 25 | ## Output 26 | 27 | Sample when used with --force -v: 28 | 29 | ``` 30 | Using /Users/pgriffit/ansible.cfg as config file 31 | Process install dependency map 32 | Starting collection install process 33 | Installing 'junipernetworks.junos:0.0.2' to '/Users/pgriffit/collections/ansible_collections/junipernetworks/junos' 34 | Installing 'servicenow.servicenow:1.0.1' to '/Users/pgriffit/collections/ansible_collections/servicenow/servicenow' 35 | Installing 'f5networks.f5_modules:1.1.0' to '/Users/pgriffit/collections/ansible_collections/f5networks/f5_modules' 36 | Skipping 'ansible.netcommon' as it is already installed 37 | ``` 38 | 39 | ## Usage 40 | 41 | Refer to this [example](../snow_collection_example.yml), to see how to call and use a collection with a simple playbook 42 | -------------------------------------------------------------------------------- /collections/requirements.old: -------------------------------------------------------------------------------- 1 | collections: 2 | 3 | # Ansible Galaxy Sourced (requires API key): 4 | 5 | - name: junipernetworks.junos 6 | source: https://galaxy.ansible.com 7 | 8 | - name: servicenow.servicenow 9 | source: https://galaxy.ansible.com 10 | 11 | # Redhat Automation Hub Sourced (requires token): 12 | 13 | - name: f5networks.f5_modules 14 | source: https://cloud.redhat.com/api/automation-hub/ 15 | -------------------------------------------------------------------------------- /collections/requirements.yml: -------------------------------------------------------------------------------- 1 | collections: 2 | - name: infra.aap_configuration 3 | -------------------------------------------------------------------------------- /create-ami-amazon.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Setup an EC2 instance 3 | hosts: localhost 4 | connection: local 5 | tasks: 6 | - name: Basic AMI Creation 7 | ec2_ami: 8 | region: eu-west-1 9 | instance_id: "{{ ec2_instance_id }}" 10 | wait: yes 11 | name: "{{ ec2_ami_name }}" 12 | tags: 13 | Name: amibuilder 14 | Service: Build 15 | register: newmachines 16 | -------------------------------------------------------------------------------- /create-bare-metal-server-packetnet.yml: -------------------------------------------------------------------------------- 1 | # Simple example playbook for use with packet.net cloud 2 | # needs ansible 2.3 to support the packet_device module 3 | # assumes you've already set env var PACKET_API_TOKEN (or add it in here), and 4 | # the project is already setup and ready to be used as a pre-requisite 5 | 6 | - name: "Example packet.net cloud provisioning" 7 | hosts: localhost 8 | connection: local 9 | 10 | tasks: 11 | - name: "Provision 2 small bare metal servers" 12 | packet_device: 13 | project_id: something123 14 | hostnames: [my-demo-server1, my-demo-server2] 15 | operating_system: ubuntu_16_04 16 | plan: baremetal_0 17 | facility: ams1 18 | wait: yes 19 | register: newhosts 20 | 21 | - name: wait for SSH 22 | wait_for: 23 | delay: 1 24 | host: "{{ item.public_ipv4 }}" 25 | port: 22 26 | state: started 27 | timeout: 500 28 | with_items: "{{ newhosts.devices }}" 29 | -------------------------------------------------------------------------------- /custom_chkuser.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: beckton 3 | remote_user: vagrant 4 | gather_facts: false 5 | 6 | tasks: 7 | - name: Check if user exists 8 | action: chkuser username=foobar 9 | register: user 10 | 11 | - debug: msg="{{ user.msg }}" 12 | -------------------------------------------------------------------------------- /custom_chkuser_tower.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: tag_os_type_linux 3 | gather_facts: true 4 | 5 | tasks: 6 | - name: Check if user exists 7 | action: chkuser username="{{ username }}" 8 | register: user 9 | 10 | - debug: msg="{{ user.msg }}" 11 | -------------------------------------------------------------------------------- /custom_wheelgrpchk.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: all 3 | remote_user: vagrant 4 | gather_facts: false 5 | 6 | tasks: 7 | - name: Check number of users in wheel group 8 | action: chkwheelgrp 9 | register: wheel 10 | 11 | - debug: var=wheel 12 | -------------------------------------------------------------------------------- /delete-vm-vsphere.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Delete a vsphere ESXI VM 3 | hosts: localhost 4 | connection: local 5 | 6 | vars_prompt: 7 | - name: "username" 8 | prompt: "Enter vcenter username: " 9 | private: yes 10 | - name: "password" 11 | prompt: "Enter vcenter password: " 12 | private: yes 13 | 14 | tasks: 15 | - name: Delete an ESXi VM Guest 16 | vsphere_guest: 17 | vcenter_hostname: 10.39.164.21 18 | # version 2.1 only -> validate_certs: no 19 | username: '{{ username }}' 20 | password: '{{ password }}' 21 | guest: ansible_mynewvm01_template 22 | state: absent 23 | force: yes 24 | -------------------------------------------------------------------------------- /dell/idrac_discovery.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: idrac 3 | connection: local 4 | name: Get system inventory 5 | gather_facts: false 6 | 7 | collections: 8 | - dellemc.openmanage 9 | 10 | tasks: 11 | - name: Get system inventory. 12 | idrac_system_info: 13 | idrac_ip: "{{ inventory_hostname }}" 14 | idrac_user: "{{ user }}" 15 | idrac_password: "{{ password }}" 16 | validate_certs: false 17 | -------------------------------------------------------------------------------- /dell/inventory/idrac_hosts: -------------------------------------------------------------------------------- 1 | [idrac] 2 | 192.168.0.1 3 | idrac_1.example.com region=datacenter1 4 | idrac_2.eample.com region=datacenter2 5 | 6 | [idrac:vars] 7 | ansible_python_interpreter=/usr/bin/python3.8 8 | user=user 9 | password=password 10 | -------------------------------------------------------------------------------- /dell/inventory/idrac_hosts_constructed_inv.yml: -------------------------------------------------------------------------------- 1 | plugin: ansible.builtin.constructed 2 | strict: False 3 | 4 | groups: 5 | idrac_managed_servers: inventory_hostname.startswith('idrac_') 6 | idrac_unmanaged_servers: inventory_hostname.startswith('192.') 7 | 8 | keyed_groups: 9 | - prefix: "" 10 | separator: "" 11 | key: region 12 | -------------------------------------------------------------------------------- /deploy-vm-amazon.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Setup an EC2 instance 3 | hosts: localhost 4 | connection: local 5 | tasks: 6 | - name: Create an EC2 machine 7 | ec2: 8 | key_name: ***** 9 | region: eu-west-1 10 | zone: eu-west-1b 11 | instance_type: t2.micro 12 | image: ami-8b8c57f8 13 | wait: yes 14 | group: launch-wizard-1 15 | vpc_subnet_id: subnet-0f120178 16 | assign_public_ip: yes 17 | monitoring: no 18 | count: 1 19 | state: present 20 | register: newmachines 21 | 22 | - name: Wait for SSH to start 23 | wait_for: 24 | host: "{{ newmachines.instances[0].public_ip }}" 25 | port: 22 26 | timeout: 300 27 | delegate_to: localhost 28 | 29 | - name: Add the machine to in memory inventory 30 | add_host: 31 | hostname: "{{ newmachines.instances[0].public_ip }}" 32 | groups: amazon 33 | -------------------------------------------------------------------------------- /deploy-vm-azure-cmdline.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Setup an Azure instance 3 | hosts: localhost 4 | connection: local 5 | gather_facts: false 6 | tasks: 7 | - name: Create Azure VM 8 | azure_rm_virtualmachine: 9 | resource_group: 'tower' 10 | name: pjgtestvm1 11 | vm_size: Standard_D1 12 | ad_user: ***** 13 | admin_username: root 14 | admin_password: xxxxxx 15 | image: 16 | offer: CentOS 17 | publisher: OpenLogic 18 | sku: '7.1' 19 | version: latest 20 | -------------------------------------------------------------------------------- /deploy-vm-azure.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Setup an Azure instance 3 | hosts: localhost 4 | connection: local 5 | gather_facts: false 6 | tasks: 7 | - name: Create Azure VM 8 | azure: 9 | name: "my-azure-vm" 10 | hostname: "ansible-azure-vm-01" 11 | role_size: Small 12 | image: 5112500ae3b842c8b9c604889f8753c3__OpenLogic-CentOS-72-20160308 13 | static_virtual_network_ip_address: none 14 | location: 'West Europe' 15 | user: pgriffit 16 | os_type: linux 17 | ssh_cert_path: *****/azure-cert.pem 18 | management_cert_path: *****/azure-manage.cer 19 | password: "*****" 20 | subscription_id: ***** 21 | storage_account: ***** 22 | state: present 23 | wait: yes 24 | register: newmachines 25 | 26 | #- name: Wait for SSH to start 27 | # wait_for: 28 | # host: "{{ newmachines.instances[0].public_ip }}" 29 | # port: 22 30 | # timeout: 300 31 | # delegate_to: localhost 32 | 33 | #- name: Add the machine to the inventory 34 | # add_host: 35 | # hostname: "{{ newmachines.instances[0].public_ip }}" 36 | # groupname: azure 37 | -------------------------------------------------------------------------------- /deploy-vm-gcp.yml: -------------------------------------------------------------------------------- 1 | - name: Create GCP instance(s) 2 | hosts: localhost 3 | gather_facts: no 4 | connection: local 5 | 6 | vars: 7 | machine_type: n1-standard-1 # default 8 | image: debian-7 9 | project_id: redhat-examples 10 | 11 | tasks: 12 | - name: Launch instances 13 | gce: 14 | instance_names: ansible-example-vm 15 | machine_type: '{{ machine_type }}' 16 | image: '{{ image }}' 17 | service_account_email: '{{ service_account_email }}' 18 | credentials_file: '{{ credentials_file }}' 19 | project_id: '{{ project_id }}' 20 | tags: webserver 21 | register: gce 22 | 23 | - name: Wait for SSH to come up 24 | wait_for: host='{{ item.public_ip }}' port=22 delay=10 timeout=60 25 | with_items: gce.instance_data 26 | 27 | - name: Add host to groupname 28 | add_host: hostname='{{ item.public_ip }}' groupname=new_instances 29 | with_items: gce.instance_data 30 | 31 | #- name: Manage new instances 32 | # hosts: new_instances 33 | # connection: ssh 34 | # sudo: True 35 | # roles: 36 | # - base_configuration 37 | # - production_server 38 | -------------------------------------------------------------------------------- /deploy-vm-rhv.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Create a RHEV based VM 3 | hosts: localhost 4 | connection: local 5 | gather_facts: false 6 | 7 | vars: 8 | rhvm: lnlabrhvm01.opstklab.local 9 | hostname: pgriffit-ansible-test1 10 | 11 | vars_prompt: 12 | - name: "username" 13 | prompt: "Enter RHEV-M username: " 14 | private: yes 15 | - name: "password" 16 | prompt: "Enter RHEV-M users password: " 17 | private: yes 18 | 19 | tasks: 20 | 21 | - name: Authenicate with RHV 22 | ovirt_auth: 23 | url: https://'{{rhvm}}'/ovirt-engine/api 24 | username: '{{ username }}@internal' 25 | password: "{{ password }}" 26 | 27 | - name: Create a VM called {{ hostname }} 28 | ovirt_vms: 29 | state: present 30 | name: '{{ hostname }}' 31 | template: pgriffit-rhel7-base-template 32 | -------------------------------------------------------------------------------- /deploy-vm-vsphere-from-template.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Setup an vsphere ESXI VM from a template 3 | hosts: localhost 4 | connection: local 5 | 6 | vars_prompt: 7 | - name: "username" 8 | prompt: "Enter vcenter username: " 9 | private: yes 10 | - name: "password" 11 | prompt: "Enter vcenter password: " 12 | private: yes 13 | 14 | tasks: 15 | - name: Create an ESXi VM Guest From Template 16 | vsphere_guest: 17 | vcenter_hostname: 10.39.164.21 18 | # version 2.1 only -> validate_certs: no 19 | username: '{{ username }}' 20 | password: '{{ password }}' 21 | guest: ansible_mynewvm01_template 22 | from_template: yes 23 | template_src: tpl-rhel7-minimal 24 | cluster: Demonstrations 25 | #resource_pool: "/Resources" 26 | #vm_extra_config: 27 | # folder: MyFolder 28 | esxi: 29 | datacenter: EMEA CloudLab 30 | hostname: cloud12-acc.gps.hst.ams2.redhat.com 31 | -------------------------------------------------------------------------------- /deploy-vm-vsphere.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Setup an vsphere ESXI VM 3 | hosts: localhost 4 | connection: local 5 | 6 | vars_prompt: 7 | - name: "username" 8 | prompt: "Enter vcenter username: " 9 | private: yes 10 | - name: "password" 11 | prompt: "Enter vcenter password: " 12 | private: yes 13 | 14 | tasks: 15 | - name: Create an ESXi VM Guest 16 | vsphere_guest: 17 | vcenter_hostname: vcenter.gps.hst.ams2.redhat.com 18 | # version 2.1 only -> validate_certs: no 19 | username: '{{ username }}' 20 | password: '{{ password }}' 21 | guest: mynewvm01 22 | state: powered_on 23 | vm_extra_config: 24 | vcpu.hotadd: yes 25 | mem.hotadd: yes 26 | notes: This is a test VM spun up using an Ansible playbook 27 | #folder: MyFolder 28 | vm_disk: 29 | disk1: 30 | size_gb: 10 31 | type: thin 32 | datastore: ds-cloud12-localdisk 33 | #folder: EMEA CloudLab/Individuals/pgriffit 34 | vm_nic: 35 | nic1: 36 | type: vmxnet3 37 | network: VM Network 38 | network_type: standard 39 | nic2: 40 | type: vmxnet3 41 | network: DPortGroup-Public 42 | network_type: dvs 43 | vm_hardware: 44 | memory_mb: 2048 45 | num_cpus: 2 46 | osid: centos64Guest 47 | scsi: paravirtual 48 | #vm_cdrom: 49 | # type: "iso" 50 | # iso_path: "DatastoreName/cd-image.iso" 51 | #vm_floppy: 52 | # type: "image" 53 | # image_path: "DatastoreName/floppy-image.flp" 54 | esxi: 55 | datacenter: EMEA CloudLab 56 | hostname: cloud12-acc.gps.hst.ams2.redhat.com 57 | -------------------------------------------------------------------------------- /deploy-vn-azure-cmdline.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Setup an Azure instance 3 | hosts: localhost 4 | connection: local 5 | gather_facts: false 6 | 7 | tasks: 8 | - name: Create virtual network 9 | azure_rm_virtualnetwork: 10 | resource_group: tower 11 | name: testvn001 12 | address_prefixes: "10.10.0.0/24" 13 | -------------------------------------------------------------------------------- /directory-structure.txt: -------------------------------------------------------------------------------- 1 | site.yml <--- master YAML playbook 2 | hosts <--- hosts Inventory file 3 | group_vars/ <--- common host grouping variables 4 | group1 5 | group2 6 | host_vars/ <--- common host specific variables 7 | hostname1 8 | hostname2 9 | roles/ <--- use roles when your playbook is getting big! 10 | common/ 11 | files/ <--- what's to be copied over without modification 12 | templates/ <--- like files but allow modification 13 | tasks/ <--- what performs the actions 14 | handlers/ <--- called when 'notify' is used, triggers changes 15 | vars/ <--- same as defaults but higher priority 16 | defaults/ <--- default variables for the role 17 | meta/ <--- dependencies, attributes etc 18 | webservers/ 19 | … 20 | applicationservers/ 21 | … 22 | databaseservers/ 23 | … 24 | 25 | # ansible-playbook -i hosts site.yml 26 | -------------------------------------------------------------------------------- /dynamic_inv_ping_check.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: all 3 | gather_facts: false 4 | 5 | tasks: 6 | 7 | - name: check device connectivity 8 | shell: "ping -c 1 {{inventory_hostname}}" 9 | register: ping_results 10 | ignore_errors: true 11 | delegate_to: localhost 12 | 13 | - name: debug 14 | debug: 15 | var: ping_results.stdout 16 | verbosity: 1 17 | 18 | - name: add device to our in-memory inventory 19 | add_host: 20 | name: "{{inventory_hostname}}" 21 | groups: iot_online 22 | when: not ('100% packet loss' in ping_results.stdout) 23 | -------------------------------------------------------------------------------- /extras.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: all !cloud 3 | remote_user: vagrant 4 | become: yes 5 | become_method: sudo 6 | gather_facts: false 7 | 8 | tasks: 9 | - name: update packages 10 | ignore_errors: yes 11 | # yum: name=wget state=latest 12 | yum: 13 | name: wget 14 | state: latest 15 | tags: 16 | - packages 17 | 18 | -------------------------------------------------------------------------------- /first-checks.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: all 3 | remote_user: vagrant 4 | become: yes 5 | become_method: sudo 6 | gather_facts: true 7 | ignore_errors: true 8 | 9 | tasks: 10 | - name: Test connection 11 | ping: 12 | 13 | - debug: msg="Server {{ inventory_hostname }} has gateway {{ ansible_default_ipv4.gateway }}" 14 | when: ansible_default_ipv4.gateway is defined 15 | 16 | tags: 17 | - ping 18 | 19 | - hostname: 20 | name: barnsley 21 | tags: 22 | - config 23 | 24 | - firewalld: port={{ item }} permanent=true state=enabled 25 | with_items: 26 | - 80/tcp 27 | - 8080/tcp 28 | - 443/tcp 29 | - 22/tcp 30 | tags: 31 | - firewall 32 | 33 | notify: 34 | - restart firewalld 35 | 36 | handlers: 37 | - name: restart firewalld 38 | service: 39 | name: firewalld 40 | state: restarted 41 | -------------------------------------------------------------------------------- /fix-rhel-vulnerable.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Fix things that made the server vulnerable!! 3 | hosts: all 4 | 5 | tasks: 6 | - name: Update bash - shellshock 7 | yum: 8 | name: bash 9 | state: latest 10 | 11 | - name: Upgrade sudo 12 | yum: 13 | name: sudo 14 | state: latest 15 | 16 | - name: Run Insights 17 | command: /bin/redhat-access-insights 18 | -------------------------------------------------------------------------------- /good-eg.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: beckton 3 | 4 | vars: 5 | http_port: 80 6 | max_clients: 200 7 | remote_user: vagrant 8 | become: yes 9 | become_method: sudo 10 | gather_facts: false 11 | 12 | vars_prompt: 13 | - name: "continue" 14 | prompt: "Danger Will Robinson, do you wish to continue? " 15 | default: "y" 16 | 17 | tasks: 18 | 19 | - fail: msg="Chicken!" 20 | when: continue != "y" 21 | 22 | - name: ensure apache is at the latest version 23 | yum: pkg=httpd state=latest 24 | 25 | - name: write the apache config file 26 | template: src=/srv/httpd.j2 dest=/etc/httpd/conf/httpd.conf 27 | ignore_errors: true 28 | notify: 29 | - restart apache 30 | 31 | - name: ensure apache is running (and enable it at boot) 32 | service: name=httpd state=started enabled=yes 33 | 34 | - shell: httpd -v | grep version | awk '{print $3}'|cut -f2 -d'/' 35 | register: result 36 | 37 | - set_fact: apache_version={{ result.stdout }} 38 | 39 | - debug: var=apache_version 40 | when: apache_version is defined 41 | 42 | - fail: msg="Apache version '{{apache_version}}' just isn't good enough :(" 43 | when: apache_version == "2.4.6" 44 | 45 | handlers: 46 | - name: restart apache 47 | service: name=httpd state=restarted 48 | -------------------------------------------------------------------------------- /hello-world.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Hello World! 3 | hosts: all 4 | 5 | tasks: 6 | 7 | - name: Hello World! 8 | shell: echo "Hi! Tower is working." 9 | -------------------------------------------------------------------------------- /hosts: -------------------------------------------------------------------------------- 1 | [barnsley] 2 | 192.168.133.103 3 | 4 | [mansfield] 5 | 6 | [westbrom] 7 | 8 | [north:children] 9 | barnsley 10 | mansfield 11 | westbrom 12 | 13 | [north:vars] 14 | banter="It's not grim up north!" 15 | voice="Moira" 16 | 17 | [beckton] 18 | 192.168.133.101 19 | 20 | [croydon] 21 | 22 | [luton] 23 | 24 | [south:children] 25 | beckton 26 | croydon 27 | luton 28 | 29 | [south:vars] 30 | banter="It's all good down south!" 31 | voice="Daniel" 32 | 33 | [switchsites:children] 34 | north 35 | south 36 | 37 | [amazon] 38 | #ec2-52-50-107-129.eu-west-1.compute.amazonaws.com 39 | #52.50.58.27 40 | 52.17.233.91 41 | 42 | [azure] 43 | #ubuntu-azure-1.westeurope.cloudapp.azure.com 44 | my-azure-vm.cloudapp.net 45 | 46 | [cloud:children] 47 | amazon 48 | azure 49 | -------------------------------------------------------------------------------- /immutablish-deploys/README.md: -------------------------------------------------------------------------------- 1 | #Immutable Systems and Ansible - Building and Deploying AMIs to AutoScaling Groups 2 | Find the docs on [our blog](http://www.ansible.com/blog/immutable-systems?utm_content=8145640&utm_medium=social&utm_source=github) 3 | -------------------------------------------------------------------------------- /immutablish-deploys/ansible.cfg: -------------------------------------------------------------------------------- 1 | [defaults] 2 | # uncomment this to disable SSH key host checking 3 | host_key_checking = False 4 | hostfile = inventory/hosts 5 | -------------------------------------------------------------------------------- /immutablish-deploys/blue.yml: -------------------------------------------------------------------------------- 1 | - hosts: fancyapp-blue 2 | vars_files: 3 | - vars/settings.yml 4 | roles: 5 | - infra 6 | - dual_asg 7 | -------------------------------------------------------------------------------- /immutablish-deploys/build_ami.yml: -------------------------------------------------------------------------------- 1 | - hosts: all 2 | tasks: 3 | - name: launch temporary instance 4 | ec2: 5 | assign_public_ip: yes 6 | region: "{{ region }}" 7 | group_id: "{{ group_id }}" 8 | instance_type: m1.small 9 | vpc_subnet_id: "{{ vpc_subnet_id }}" 10 | image: "{{ base_image }}" 11 | wait: yes 12 | wait_timeout: 500 13 | exact_count: 1 14 | count_tag: 15 | role: ami_builder 16 | instance_tags: 17 | role: ami_builder 18 | register: ami_instance 19 | 20 | - name: waiting for ssh to start 21 | wait_for: port=22 host={{ ami_instance.tagged_instances.0.public_ip }} timeout=300 22 | search_regex=OpenSSH 23 | 24 | - name: add host to group 25 | add_host: name={{ ami_instance.tagged_instances.0.public_ip }} groups=just_created 26 | 27 | - hosts: just_created 28 | remote_user: root 29 | roles: 30 | - apache 31 | 32 | - hosts: all 33 | tasks: 34 | 35 | - name: bundle ami 36 | action: 37 | module: ec2_ami 38 | instance_id: "{{ ami_instance.tagged_instances.0.id }}" 39 | region: "{{ region }}" 40 | state: present 41 | description: This was provisioned {{ ansible_date_time.iso8601 }} 42 | name: myappami-{{ ansible_date_time.epoch }} 43 | wait: yes 44 | register: amioutput 45 | 46 | - name: terminate temporary instance 47 | action: 48 | module: ec2 49 | state: absent 50 | region: "{{ region }}" 51 | instance_ids: "{{ ami_instance.tagged_instances.0.id }}" 52 | 53 | - name: create vars file with new ami info 54 | copy: 55 | content: | 56 | image_id: {{ amioutput.image_id }} 57 | lc_suffix: {{ ansible_date_time.epoch }} 58 | dest: vars/settings.yml 59 | when: deploy is defined and deploy|bool == True 60 | -------------------------------------------------------------------------------- /immutablish-deploys/cfn_update_policy.yml: -------------------------------------------------------------------------------- 1 | - hosts: fancyapp-cfn 2 | vars_files: 3 | - vars/settings.yml 4 | roles: 5 | - role: infra 6 | - role: asgcfn 7 | 8 | -------------------------------------------------------------------------------- /immutablish-deploys/inventory/group_vars/all: -------------------------------------------------------------------------------- 1 | app_name: rollingAMI 2 | instance_type: t2.micro 3 | region: eu-west-1 4 | lc_security_groups: 5 | - "{{ app_security_group.group_id }}" 6 | - "{{ admin_access_group.group_id }}" 7 | asg_subnets: 8 | - "{{ vpc.subnets[0].id }}" 9 | - "{{ vpc.subnets[1].id }}" 10 | asg_desired_capacity: 5 11 | asg_min_size: 5 12 | asg_max_size: 10 13 | vpc_subnets: 14 | - cidr: 172.31.0.0/16 15 | resource_tags: { "Name":"{{ app_name }}-1" } 16 | az: eu-west-1a 17 | - cidr: 172.31.0.0/16 18 | resource_tags: { "Name":"{{ app_name }}-2" } 19 | az: eu-west-1b 20 | route_tables: 21 | - subnets: 22 | - 172.31.0.0/16 23 | routes: 24 | - dest: 0.0.0.0/0 25 | gw: igw 26 | -------------------------------------------------------------------------------- /immutablish-deploys/inventory/group_vars/apps: -------------------------------------------------------------------------------- 1 | ansible_python_interpreter: /usr/bin/env python 2 | ansible_connection: local 3 | -------------------------------------------------------------------------------- /immutablish-deploys/inventory/group_vars/colors: -------------------------------------------------------------------------------- 1 | lb_name: fancyapp-blue-violet 2 | load_balancers: 3 | - fancyapp-blue-violet 4 | -------------------------------------------------------------------------------- /immutablish-deploys/inventory/group_vars/just_added: -------------------------------------------------------------------------------- 1 | ansible_connection: ssh 2 | -------------------------------------------------------------------------------- /immutablish-deploys/inventory/host_vars/amibuilder: -------------------------------------------------------------------------------- 1 | base_image: ami-8b8c57f8 2 | group_id: sg-225a4346 3 | vpc_subnet_id: subnet-0f120178 4 | ansible_ssh_user: root 5 | -------------------------------------------------------------------------------- /immutablish-deploys/inventory/host_vars/fancyapp-blue: -------------------------------------------------------------------------------- 1 | relative: fancyapp-violet 2 | asg_group_name: fancyapp-blue 3 | lc_name: fancyapp-blue-{{ lc_suffix }} 4 | -------------------------------------------------------------------------------- /immutablish-deploys/inventory/host_vars/fancyapp-cfn: -------------------------------------------------------------------------------- 1 | app_name: rollingAMI 2 | lb_name: rollingAMI-CFN 3 | load_balancers: 4 | - rollingAMI-CFN -------------------------------------------------------------------------------- /immutablish-deploys/inventory/host_vars/fancyapp-rolling: -------------------------------------------------------------------------------- 1 | lb_name: rollingAMI 2 | lc_name: rollingAMI-{{ lc_suffix }} 3 | load_balancers: 4 | - rollingAMI 5 | asg_group_name: rollingAMI 6 | batch_size: 1 7 | -------------------------------------------------------------------------------- /immutablish-deploys/inventory/host_vars/fancyapp-violet: -------------------------------------------------------------------------------- 1 | relative: fancyapp-blue 2 | asg_group_name: fancyapp-violet 3 | lc_name: fancyapp-violet-{{ lc_suffix }} 4 | 5 | -------------------------------------------------------------------------------- /immutablish-deploys/inventory/host_vars/localhost: -------------------------------------------------------------------------------- 1 | ansible_python_interpreter: /usr/bin/env python 2 | ansible_connection: local 3 | base_image: ami-8b8c57f8 4 | group_id: sg-225a4346 5 | vpc_subnet_id: subnet-0f120178 6 | ansible_ssh_user: root 7 | -------------------------------------------------------------------------------- /immutablish-deploys/inventory/hosts: -------------------------------------------------------------------------------- 1 | localhost 2 | fancyapp-blue 3 | fancyapp-violet 4 | fancyapp-rolling 5 | fancyapp-cfn 6 | amibuilder 7 | 8 | [apps] 9 | fancyapp-blue 10 | fancyapp-violet 11 | fancyapp-rolling 12 | fancyapp-cfn 13 | amibuilder 14 | 15 | [colors] 16 | fancyapp-blue 17 | fancyapp-violet 18 | -------------------------------------------------------------------------------- /immutablish-deploys/roles/apache/README.md: -------------------------------------------------------------------------------- 1 | Role Name 2 | ======== 3 | 4 | A brief description of the role goes here. 5 | 6 | Requirements 7 | ------------ 8 | 9 | Any pre-requisites that may not be covered by Ansible itself or the role should be mentioned here. For instance, if the role uses the EC2 module, it may be a good idea to mention in this section that the boto package is required. 10 | 11 | Role Variables 12 | -------------- 13 | 14 | A description of the settable variables for this role should go here, including any variables that are in defaults/main.yml, vars/main.yml, and any variables that can/should be set via parameters to the role. Any variables that are read from other roles and/or the global scope (ie. hostvars, group vars, etc.) should be mentioned here as well. 15 | 16 | Dependencies 17 | ------------ 18 | 19 | A list of other roles hosted on Galaxy should go here, plus any details in regards to parameters that may need to be set for other roles, or variables that are used from other roles. 20 | 21 | Example Playbook 22 | ------------------------- 23 | 24 | Including an example of how to use your role (for instance, with variables passed in as parameters) is always nice for users too: 25 | 26 | - hosts: servers 27 | roles: 28 | - { role: username.rolename, x: 42 } 29 | 30 | License 31 | ------- 32 | 33 | BSD 34 | 35 | Author Information 36 | ------------------ 37 | 38 | An optional section for the role authors to include contact information, or a website (HTML is not allowed). 39 | -------------------------------------------------------------------------------- /immutablish-deploys/roles/apache/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # defaults file for apache 3 | apache_test_message: This is a test - -------------------------------------------------------------------------------- /immutablish-deploys/roles/apache/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # handlers file for apache 3 | 4 | - name: restart apache 5 | service: name={{ apache_service }} state=restarted 6 | 7 | -------------------------------------------------------------------------------- /immutablish-deploys/roles/apache/meta/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | galaxy_info: 3 | author: your name 4 | description: 5 | company: your company (optional) 6 | # Some suggested licenses: 7 | # - BSD (default) 8 | # - MIT 9 | # - GPLv2 10 | # - GPLv3 11 | # - Apache 12 | # - CC-BY 13 | license: license (GPLv2, CC-BY, etc) 14 | min_ansible_version: 1.2 15 | # 16 | # Below are all platforms currently available. Just uncomment 17 | # the ones that apply to your role. If you don't see your 18 | # platform on this list, let us know and we'll get it added! 19 | # 20 | #platforms: 21 | #- name: EL 22 | # versions: 23 | # - all 24 | # - 5 25 | # - 6 26 | # - 7 27 | #- name: GenericUNIX 28 | # versions: 29 | # - all 30 | # - any 31 | #- name: Fedora 32 | # versions: 33 | # - all 34 | # - 16 35 | # - 17 36 | # - 18 37 | # - 19 38 | # - 20 39 | #- name: opensuse 40 | # versions: 41 | # - all 42 | # - 12.1 43 | # - 12.2 44 | # - 12.3 45 | # - 13.1 46 | # - 13.2 47 | #- name: Amazon 48 | # versions: 49 | # - all 50 | # - 2013.03 51 | # - 2013.09 52 | #- name: GenericBSD 53 | # versions: 54 | # - all 55 | # - any 56 | #- name: FreeBSD 57 | # versions: 58 | # - all 59 | # - 8.0 60 | # - 8.1 61 | # - 8.2 62 | # - 8.3 63 | # - 8.4 64 | # - 9.0 65 | # - 9.1 66 | # - 9.1 67 | # - 9.2 68 | #- name: Ubuntu 69 | # versions: 70 | # - all 71 | # - lucid 72 | # - maverick 73 | # - natty 74 | # - oneiric 75 | # - precise 76 | # - quantal 77 | # - raring 78 | # - saucy 79 | # - trusty 80 | #- name: SLES 81 | # versions: 82 | # - all 83 | # - 10SP3 84 | # - 10SP4 85 | # - 11 86 | # - 11SP1 87 | # - 11SP2 88 | # - 11SP3 89 | #- name: GenericLinux 90 | # versions: 91 | # - all 92 | # - any 93 | #- name: Debian 94 | # versions: 95 | # - all 96 | # - etch 97 | # - lenny 98 | # - squeeze 99 | # - wheezy 100 | # 101 | # Below are all categories currently available. Just as with 102 | # the platforms above, uncomment those that apply to your role. 103 | # 104 | #categories: 105 | #- cloud 106 | #- cloud:ec2 107 | #- cloud:gce 108 | #- cloud:rax 109 | #- clustering 110 | #- database 111 | #- database:nosql 112 | #- database:sql 113 | #- development 114 | #- monitoring 115 | #- networking 116 | #- packaging 117 | #- system 118 | #- web 119 | dependencies: [] 120 | # List your role dependencies here, one per line. Only 121 | # dependencies available via galaxy should be listed here. 122 | # Be sure to remove the '[]' above if you add dependencies 123 | # to this list. 124 | 125 | -------------------------------------------------------------------------------- /immutablish-deploys/roles/apache/tasks/Debian.yml: -------------------------------------------------------------------------------- 1 | - name: install packages (Debian) 2 | apt: name={{ item }} state=present update_cache=yes cache_valid_time=3600 3 | with_items: packages 4 | tags: package 5 | -------------------------------------------------------------------------------- /immutablish-deploys/roles/apache/tasks/RedHat.yml: -------------------------------------------------------------------------------- 1 | - name: stop iptables 2 | service: name=iptables state=stopped enabled=no 3 | 4 | - name: install packages (Red Hat) 5 | yum: name={{ item }} state=present 6 | with_items: packages 7 | tags: package -------------------------------------------------------------------------------- /immutablish-deploys/roles/apache/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # tasks file for apache 3 | 4 | - name: Add the OS specific variables 5 | include_vars: "{{ ansible_os_family }}.yml" 6 | 7 | # CentOS specific package installations 8 | - include: RedHat.yml 9 | when: ansible_os_family == "RedHat" 10 | 11 | # Ubuntu specific package installations 12 | - include: Debian.yml 13 | when: ansible_os_family == "Debian" 14 | 15 | - name: copy index.html 16 | template: src=index.html.j2 dest={{ apache_docroot }}/index.html 17 | 18 | - name: start and enable apache service 19 | service: name={{ apache_service }} state=started enabled=yes 20 | tags: service -------------------------------------------------------------------------------- /immutablish-deploys/roles/apache/templates/index.html.j2: -------------------------------------------------------------------------------- 1 | {{ apache_test_message }} {{ ansible_distribution }} {{ ansible_distribution_version }}
2 | Current Host: {{ ansible_hostname }} VERSION 2
-------------------------------------------------------------------------------- /immutablish-deploys/roles/apache/vars/Debian.yml: -------------------------------------------------------------------------------- 1 | packages: 2 | - apache2 3 | apache_service: apache2 4 | apache_docroot: /var/www 5 | -------------------------------------------------------------------------------- /immutablish-deploys/roles/apache/vars/RedHat.yml: -------------------------------------------------------------------------------- 1 | packages: 2 | - httpd 3 | - libselinux-python 4 | apache_service: httpd 5 | apache_docroot: /var/www/html 6 | -------------------------------------------------------------------------------- /immutablish-deploys/roles/apache/vars/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # vars file for apache 3 | -------------------------------------------------------------------------------- /immutablish-deploys/roles/asgcfn/README.md: -------------------------------------------------------------------------------- 1 | Role Name 2 | ========= 3 | 4 | A brief description of the role goes here. 5 | 6 | Requirements 7 | ------------ 8 | 9 | Any pre-requisites that may not be covered by Ansible itself or the role should be mentioned here. For instance, if the role uses the EC2 module, it may be a good idea to mention in this section that the boto package is required. 10 | 11 | Role Variables 12 | -------------- 13 | 14 | A description of the settable variables for this role should go here, including any variables that are in defaults/main.yml, vars/main.yml, and any variables that can/should be set via parameters to the role. Any variables that are read from other roles and/or the global scope (ie. hostvars, group vars, etc.) should be mentioned here as well. 15 | 16 | Dependencies 17 | ------------ 18 | 19 | A list of other roles hosted on Galaxy should go here, plus any details in regards to parameters that may need to be set for other roles, or variables that are used from other roles. 20 | 21 | Example Playbook 22 | ---------------- 23 | 24 | Including an example of how to use your role (for instance, with variables passed in as parameters) is always nice for users too: 25 | 26 | - hosts: servers 27 | roles: 28 | - { role: username.rolename, x: 42 } 29 | 30 | License 31 | ------- 32 | 33 | BSD 34 | 35 | Author Information 36 | ------------------ 37 | 38 | An optional section for the role authors to include contact information, or a website (HTML is not allowed). 39 | -------------------------------------------------------------------------------- /immutablish-deploys/roles/asgcfn/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # defaults file for asg 3 | instance_type: t2.small 4 | availability_zones: 5 | - us-east-1b 6 | - us-east-1c 7 | region: us-east-1 -------------------------------------------------------------------------------- /immutablish-deploys/roles/asgcfn/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # handlers file for asg 3 | -------------------------------------------------------------------------------- /immutablish-deploys/roles/asgcfn/meta/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | galaxy_info: 3 | author: your name 4 | description: 5 | company: your company (optional) 6 | # Some suggested licenses: 7 | # - BSD (default) 8 | # - MIT 9 | # - GPLv2 10 | # - GPLv3 11 | # - Apache 12 | # - CC-BY 13 | license: license (GPLv2, CC-BY, etc) 14 | min_ansible_version: 1.2 15 | # 16 | # Below are all platforms currently available. Just uncomment 17 | # the ones that apply to your role. If you don't see your 18 | # platform on this list, let us know and we'll get it added! 19 | # 20 | #platforms: 21 | #- name: EL 22 | # versions: 23 | # - all 24 | # - 5 25 | # - 6 26 | # - 7 27 | #- name: GenericUNIX 28 | # versions: 29 | # - all 30 | # - any 31 | #- name: Fedora 32 | # versions: 33 | # - all 34 | # - 16 35 | # - 17 36 | # - 18 37 | # - 19 38 | # - 20 39 | #- name: opensuse 40 | # versions: 41 | # - all 42 | # - 12.1 43 | # - 12.2 44 | # - 12.3 45 | # - 13.1 46 | # - 13.2 47 | #- name: Amazon 48 | # versions: 49 | # - all 50 | # - 2013.03 51 | # - 2013.09 52 | #- name: GenericBSD 53 | # versions: 54 | # - all 55 | # - any 56 | #- name: FreeBSD 57 | # versions: 58 | # - all 59 | # - 8.0 60 | # - 8.1 61 | # - 8.2 62 | # - 8.3 63 | # - 8.4 64 | # - 9.0 65 | # - 9.1 66 | # - 9.1 67 | # - 9.2 68 | #- name: Ubuntu 69 | # versions: 70 | # - all 71 | # - lucid 72 | # - maverick 73 | # - natty 74 | # - oneiric 75 | # - precise 76 | # - quantal 77 | # - raring 78 | # - saucy 79 | # - trusty 80 | #- name: SLES 81 | # versions: 82 | # - all 83 | # - 10SP3 84 | # - 10SP4 85 | # - 11 86 | # - 11SP1 87 | # - 11SP2 88 | # - 11SP3 89 | #- name: GenericLinux 90 | # versions: 91 | # - all 92 | # - any 93 | #- name: Debian 94 | # versions: 95 | # - all 96 | # - etch 97 | # - lenny 98 | # - squeeze 99 | # - wheezy 100 | # 101 | # Below are all categories currently available. Just as with 102 | # the platforms above, uncomment those that apply to your role. 103 | # 104 | #categories: 105 | #- cloud 106 | #- cloud:ec2 107 | #- cloud:gce 108 | #- cloud:rax 109 | #- clustering 110 | #- database 111 | #- database:nosql 112 | #- database:sql 113 | #- development 114 | #- monitoring 115 | #- networking 116 | #- packaging 117 | #- system 118 | #- web 119 | dependencies: [] 120 | # List your role dependencies here, one per line. Only 121 | # dependencies available via galaxy should be listed here. 122 | # Be sure to remove the '[]' above if you add dependencies 123 | # to this list. 124 | 125 | -------------------------------------------------------------------------------- /immutablish-deploys/roles/asgcfn/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | #tasks file for asg 3 | 4 | - name: Create AutoScale Group and LaunchConfig via cloudformation 5 | cloudformation: > 6 | stack_name={{ app_name }} 7 | state=present 8 | region={{ region }} 9 | disable_rollback=true 10 | template=roles/asgcfn/files/asg_lc.json 11 | args: 12 | template_parameters: 13 | AvailabilityZones: "{{ availability_zones | join(',') }}" 14 | Subnets: "{{ asg_subnets | join(',') }}" 15 | ImageId: "{{ image_id }}" 16 | KeyName: "{{ key_name }}" 17 | SecurityGroups: "{{ lc_security_groups | join(',')}}" 18 | LoadBalancerNames: "{{ load_balancers | join(',') }}" 19 | register: stack -------------------------------------------------------------------------------- /immutablish-deploys/roles/asgcfn/vars/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # vars file for asg 3 | -------------------------------------------------------------------------------- /immutablish-deploys/roles/dual_asg/README.md: -------------------------------------------------------------------------------- 1 | Role Name 2 | ========= 3 | 4 | A brief description of the role goes here. 5 | 6 | Requirements 7 | ------------ 8 | 9 | Any pre-requisites that may not be covered by Ansible itself or the role should be mentioned here. For instance, if the role uses the EC2 module, it may be a good idea to mention in this section that the boto package is required. 10 | 11 | Role Variables 12 | -------------- 13 | 14 | A description of the settable variables for this role should go here, including any variables that are in defaults/main.yml, vars/main.yml, and any variables that can/should be set via parameters to the role. Any variables that are read from other roles and/or the global scope (ie. hostvars, group vars, etc.) should be mentioned here as well. 15 | 16 | Dependencies 17 | ------------ 18 | 19 | A list of other roles hosted on Galaxy should go here, plus any details in regards to parameters that may need to be set for other roles, or variables that are used from other roles. 20 | 21 | Example Playbook 22 | ---------------- 23 | 24 | Including an example of how to use your role (for instance, with variables passed in as parameters) is always nice for users too: 25 | 26 | - hosts: servers 27 | roles: 28 | - { role: username.rolename, x: 42 } 29 | 30 | License 31 | ------- 32 | 33 | BSD 34 | 35 | Author Information 36 | ------------------ 37 | 38 | An optional section for the role authors to include contact information, or a website (HTML is not allowed). 39 | -------------------------------------------------------------------------------- /immutablish-deploys/roles/dual_asg/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # defaults file for asg 3 | instance_type: t2.small 4 | availability_zones: 5 | - us-east-1b 6 | - us-east-1c 7 | region: us-east-1 -------------------------------------------------------------------------------- /immutablish-deploys/roles/dual_asg/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # handlers file for asg 3 | -------------------------------------------------------------------------------- /immutablish-deploys/roles/dual_asg/meta/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | galaxy_info: 3 | author: your name 4 | description: 5 | company: your company (optional) 6 | # Some suggested licenses: 7 | # - BSD (default) 8 | # - MIT 9 | # - GPLv2 10 | # - GPLv3 11 | # - Apache 12 | # - CC-BY 13 | license: license (GPLv2, CC-BY, etc) 14 | min_ansible_version: 1.2 15 | # 16 | # Below are all platforms currently available. Just uncomment 17 | # the ones that apply to your role. If you don't see your 18 | # platform on this list, let us know and we'll get it added! 19 | # 20 | #platforms: 21 | #- name: EL 22 | # versions: 23 | # - all 24 | # - 5 25 | # - 6 26 | # - 7 27 | #- name: GenericUNIX 28 | # versions: 29 | # - all 30 | # - any 31 | #- name: Fedora 32 | # versions: 33 | # - all 34 | # - 16 35 | # - 17 36 | # - 18 37 | # - 19 38 | # - 20 39 | #- name: opensuse 40 | # versions: 41 | # - all 42 | # - 12.1 43 | # - 12.2 44 | # - 12.3 45 | # - 13.1 46 | # - 13.2 47 | #- name: Amazon 48 | # versions: 49 | # - all 50 | # - 2013.03 51 | # - 2013.09 52 | #- name: GenericBSD 53 | # versions: 54 | # - all 55 | # - any 56 | #- name: FreeBSD 57 | # versions: 58 | # - all 59 | # - 8.0 60 | # - 8.1 61 | # - 8.2 62 | # - 8.3 63 | # - 8.4 64 | # - 9.0 65 | # - 9.1 66 | # - 9.1 67 | # - 9.2 68 | #- name: Ubuntu 69 | # versions: 70 | # - all 71 | # - lucid 72 | # - maverick 73 | # - natty 74 | # - oneiric 75 | # - precise 76 | # - quantal 77 | # - raring 78 | # - saucy 79 | # - trusty 80 | #- name: SLES 81 | # versions: 82 | # - all 83 | # - 10SP3 84 | # - 10SP4 85 | # - 11 86 | # - 11SP1 87 | # - 11SP2 88 | # - 11SP3 89 | #- name: GenericLinux 90 | # versions: 91 | # - all 92 | # - any 93 | #- name: Debian 94 | # versions: 95 | # - all 96 | # - etch 97 | # - lenny 98 | # - squeeze 99 | # - wheezy 100 | # 101 | # Below are all categories currently available. Just as with 102 | # the platforms above, uncomment those that apply to your role. 103 | # 104 | #categories: 105 | #- cloud 106 | #- cloud:ec2 107 | #- cloud:gce 108 | #- cloud:rax 109 | #- clustering 110 | #- database 111 | #- database:nosql 112 | #- database:sql 113 | #- development 114 | #- monitoring 115 | #- networking 116 | #- packaging 117 | #- system 118 | #- web 119 | dependencies: [] 120 | # List your role dependencies here, one per line. Only 121 | # dependencies available via galaxy should be listed here. 122 | # Be sure to remove the '[]' above if you add dependencies 123 | # to this list. 124 | 125 | -------------------------------------------------------------------------------- /immutablish-deploys/roles/dual_asg/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # tasks file for asg 3 | - name: create launch config 4 | ec2_lc: 5 | name: "{{ lc_name }}" 6 | image_id: "{{ image_id }}" 7 | key_name: "{{ key_name }}" 8 | region: "{{ region }}" 9 | security_groups: "{{ lc_security_groups }}" 10 | instance_type: "{{ instance_type }}" 11 | assign_public_ip: yes 12 | tags: launch_config 13 | 14 | - name: create autoscale groups 15 | ec2_asg: 16 | name: "{{ asg_group_name }}" 17 | health_check_period: 60 18 | load_balancers: "{{ load_balancers }}" 19 | health_check_type: ELB 20 | availability_zones: "{{ availability_zones | join(',')}}" 21 | launch_config_name: "{{ lc_name }}" 22 | min_size: "{{ asg_min_size }}" 23 | max_size: "{{ asg_max_size }}" 24 | desired_capacity: "{{ asg_desired_capacity }}" 25 | region: "{{ region }}" 26 | vpc_zone_identifier: "{{ asg_subnets | join(',') }}" 27 | tags: autoscale_group 28 | 29 | - name: wait for viable_instances >= asg_desired_capacity 30 | ec2_asg: name={{ asg_group_name }} region={{ region }} 31 | register: result 32 | until: result.viable_instances >= asg_desired_capacity 33 | delay: 10 34 | retries: 120 35 | 36 | - include: terminate_relative.yml 37 | when: relative is defined and terminate_relative is defined and terminate_relative|bool == True 38 | 39 | 40 | 41 | 42 | 43 | -------------------------------------------------------------------------------- /immutablish-deploys/roles/dual_asg/tasks/terminate_relative.yml: -------------------------------------------------------------------------------- 1 | - name: terminate relative asg 2 | ec2_asg: 3 | name: "{{ relative }}" 4 | state: absent 5 | region: "{{ region }}" 6 | register: result 7 | 8 | - name: terminate relative launch_config 9 | ec2_lc: 10 | name: "{{ result.launch_config_name }}" 11 | region: "{{ region }}" 12 | state: absent 13 | -------------------------------------------------------------------------------- /immutablish-deploys/roles/dual_asg/vars/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # vars file for asg 3 | -------------------------------------------------------------------------------- /immutablish-deploys/roles/infra/README.md: -------------------------------------------------------------------------------- 1 | Role Name 2 | ======== 3 | 4 | A brief description of the role goes here. 5 | 6 | Requirements 7 | ------------ 8 | 9 | Any pre-requisites that may not be covered by Ansible itself or the role should be mentioned here. For instance, if the role uses the EC2 module, it may be a good idea to mention in this section that the boto package is required. 10 | 11 | Role Variables 12 | -------------- 13 | 14 | A description of the settable variables for this role should go here, including any variables that are in defaults/main.yml, vars/main.yml, and any variables that can/should be set via parameters to the role. Any variables that are read from other roles and/or the global scope (ie. hostvars, group vars, etc.) should be mentioned here as well. 15 | 16 | Dependencies 17 | ------------ 18 | 19 | A list of other roles hosted on Galaxy should go here, plus any details in regards to parameters that may need to be set for other roles, or variables that are used from other roles. 20 | 21 | Example Playbook 22 | ------------------------- 23 | 24 | Including an example of how to use your role (for instance, with variables passed in as parameters) is always nice for users too: 25 | 26 | - hosts: servers 27 | roles: 28 | - { role: username.rolename, x: 42 } 29 | 30 | License 31 | ------- 32 | 33 | BSD 34 | 35 | Author Information 36 | ------------------ 37 | 38 | An optional section for the role authors to include contact information, or a website (HTML is not allowed). 39 | -------------------------------------------------------------------------------- /immutablish-deploys/roles/infra/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # handlers file for myapp_infra 3 | -------------------------------------------------------------------------------- /immutablish-deploys/roles/infra/meta/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | galaxy_info: 3 | author: your name 4 | description: 5 | company: your company (optional) 6 | # Some suggested licenses: 7 | # - BSD (default) 8 | # - MIT 9 | # - GPLv2 10 | # - GPLv3 11 | # - Apache 12 | # - CC-BY 13 | license: license (GPLv2, CC-BY, etc) 14 | min_ansible_version: 1.2 15 | # 16 | # Below are all platforms currently available. Just uncomment 17 | # the ones that apply to your role. If you don't see your 18 | # platform on this list, let us know and we'll get it added! 19 | # 20 | #platforms: 21 | #- name: EL 22 | # versions: 23 | # - all 24 | # - 5 25 | # - 6 26 | # - 7 27 | #- name: GenericUNIX 28 | # versions: 29 | # - all 30 | # - any 31 | #- name: Fedora 32 | # versions: 33 | # - all 34 | # - 16 35 | # - 17 36 | # - 18 37 | # - 19 38 | # - 20 39 | #- name: opensuse 40 | # versions: 41 | # - all 42 | # - 12.1 43 | # - 12.2 44 | # - 12.3 45 | # - 13.1 46 | # - 13.2 47 | #- name: Amazon 48 | # versions: 49 | # - all 50 | # - 2013.03 51 | # - 2013.09 52 | #- name: GenericBSD 53 | # versions: 54 | # - all 55 | # - any 56 | #- name: FreeBSD 57 | # versions: 58 | # - all 59 | # - 8.0 60 | # - 8.1 61 | # - 8.2 62 | # - 8.3 63 | # - 8.4 64 | # - 9.0 65 | # - 9.1 66 | # - 9.1 67 | # - 9.2 68 | #- name: Ubuntu 69 | # versions: 70 | # - all 71 | # - lucid 72 | # - maverick 73 | # - natty 74 | # - oneiric 75 | # - precise 76 | # - quantal 77 | # - raring 78 | # - saucy 79 | # - trusty 80 | #- name: SLES 81 | # versions: 82 | # - all 83 | # - 10SP3 84 | # - 10SP4 85 | # - 11 86 | # - 11SP1 87 | # - 11SP2 88 | # - 11SP3 89 | #- name: GenericLinux 90 | # versions: 91 | # - all 92 | # - any 93 | #- name: Debian 94 | # versions: 95 | # - all 96 | # - etch 97 | # - lenny 98 | # - squeeze 99 | # - wheezy 100 | # 101 | # Below are all categories currently available. Just as with 102 | # the platforms above, uncomment those that apply to your role. 103 | # 104 | #categories: 105 | #- cloud 106 | #- cloud:ec2 107 | #- cloud:gce 108 | #- cloud:rax 109 | #- clustering 110 | #- database 111 | #- database:nosql 112 | #- database:sql 113 | #- development 114 | #- monitoring 115 | #- networking 116 | #- packaging 117 | #- system 118 | #- web 119 | dependencies: [] 120 | # List your role dependencies here, one per line. Only 121 | # dependencies available via galaxy should be listed here. 122 | # Be sure to remove the '[]' above if you add dependencies 123 | # to this list. 124 | 125 | -------------------------------------------------------------------------------- /immutablish-deploys/roles/infra/tasks/main.yml: -------------------------------------------------------------------------------- 1 | # tasks file for myapp_infra 2 | - name: create vpc 3 | ec2_vpc: 4 | state: present 5 | cidr_block: 172.22.0.0/16 6 | resource_tags: { "Name":" {{ app_name }}" } 7 | subnets: "{{ vpc_subnets }}" 8 | internet_gateway: True 9 | route_tables: "{{ route_tables }}" 10 | region: "{{ region }}" 11 | wait: yes 12 | register: vpc 13 | 14 | 15 | - name: create admin access 16 | ec2_group: 17 | name: admin_access 18 | description: SSH access for admins and tower 19 | region: "{{ region }}" 20 | vpc_id: "{{ vpc.vpc_id }}" 21 | rules: 22 | - proto: tcp 23 | from_port: 22 24 | to_port: 22 25 | cidr_ip: 0.0.0.0/0 26 | tags: security_groups 27 | register: admin_access_group 28 | 29 | - name: create app security group 30 | ec2_group: 31 | name: "{{ app_name }}" 32 | description: "{{ app_name }} security group" 33 | region: "{{ region }}" 34 | rules: 35 | - proto: tcp 36 | from_port: 80 37 | to_port: 80 38 | cidr_ip: 0.0.0.0/0 39 | - proto: tcp 40 | from_port: 0 41 | to_port: 65535 42 | group_name: "{{ app_name }}" 43 | - proto: udp 44 | from_port: 0 45 | to_port: 65535 46 | group_name: "{{ app_name }}" 47 | - proto: icmp 48 | from_port: 0 49 | to_port: 0 50 | group_name: "{{ app_name }}" 51 | vpc_id: "{{ vpc.vpc_id }}" 52 | tags: security_groups 53 | register: app_security_group 54 | 55 | - name: launch load balancer 56 | ec2_elb_lb: 57 | name: "{{ lb_name }}" 58 | region: "{{ region }}" 59 | state: present 60 | subnets: "{{ asg_subnets }}" 61 | connection_draining_timeout: 60 62 | listeners: 63 | - protocol: http 64 | load_balancer_port: 80 65 | instance_port: 80 66 | health_check: 67 | ping_protocol: http 68 | ping_port: 80 69 | ping_path: "/" 70 | response_timeout: 5 71 | interval: 30 72 | unhealthy_threshold: 3 73 | healthy_threshold: 3 74 | register: load_balancer 75 | 76 | 77 | 78 | 79 | 80 | 81 | 82 | 83 | -------------------------------------------------------------------------------- /immutablish-deploys/roles/infra/vars/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # vars file for myapp_infra 3 | -------------------------------------------------------------------------------- /immutablish-deploys/roles/rolling_asg/README.md: -------------------------------------------------------------------------------- 1 | Role Name 2 | ========= 3 | 4 | A brief description of the role goes here. 5 | 6 | Requirements 7 | ------------ 8 | 9 | Any pre-requisites that may not be covered by Ansible itself or the role should be mentioned here. For instance, if the role uses the EC2 module, it may be a good idea to mention in this section that the boto package is required. 10 | 11 | Role Variables 12 | -------------- 13 | 14 | A description of the settable variables for this role should go here, including any variables that are in defaults/main.yml, vars/main.yml, and any variables that can/should be set via parameters to the role. Any variables that are read from other roles and/or the global scope (ie. hostvars, group vars, etc.) should be mentioned here as well. 15 | 16 | Dependencies 17 | ------------ 18 | 19 | A list of other roles hosted on Galaxy should go here, plus any details in regards to parameters that may need to be set for other roles, or variables that are used from other roles. 20 | 21 | Example Playbook 22 | ---------------- 23 | 24 | Including an example of how to use your role (for instance, with variables passed in as parameters) is always nice for users too: 25 | 26 | - hosts: servers 27 | roles: 28 | - { role: username.rolename, x: 42 } 29 | 30 | License 31 | ------- 32 | 33 | BSD 34 | 35 | Author Information 36 | ------------------ 37 | 38 | An optional section for the role authors to include contact information, or a website (HTML is not allowed). 39 | -------------------------------------------------------------------------------- /immutablish-deploys/roles/rolling_asg/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # defaults file for asg 3 | instance_type: t2.small 4 | availability_zones: 5 | - us-east-1b 6 | - us-east-1c 7 | region: us-east-1 -------------------------------------------------------------------------------- /immutablish-deploys/roles/rolling_asg/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # handlers file for asg 3 | -------------------------------------------------------------------------------- /immutablish-deploys/roles/rolling_asg/meta/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | galaxy_info: 3 | author: your name 4 | description: 5 | company: your company (optional) 6 | # Some suggested licenses: 7 | # - BSD (default) 8 | # - MIT 9 | # - GPLv2 10 | # - GPLv3 11 | # - Apache 12 | # - CC-BY 13 | license: license (GPLv2, CC-BY, etc) 14 | min_ansible_version: 1.2 15 | # 16 | # Below are all platforms currently available. Just uncomment 17 | # the ones that apply to your role. If you don't see your 18 | # platform on this list, let us know and we'll get it added! 19 | # 20 | #platforms: 21 | #- name: EL 22 | # versions: 23 | # - all 24 | # - 5 25 | # - 6 26 | # - 7 27 | #- name: GenericUNIX 28 | # versions: 29 | # - all 30 | # - any 31 | #- name: Fedora 32 | # versions: 33 | # - all 34 | # - 16 35 | # - 17 36 | # - 18 37 | # - 19 38 | # - 20 39 | #- name: opensuse 40 | # versions: 41 | # - all 42 | # - 12.1 43 | # - 12.2 44 | # - 12.3 45 | # - 13.1 46 | # - 13.2 47 | #- name: Amazon 48 | # versions: 49 | # - all 50 | # - 2013.03 51 | # - 2013.09 52 | #- name: GenericBSD 53 | # versions: 54 | # - all 55 | # - any 56 | #- name: FreeBSD 57 | # versions: 58 | # - all 59 | # - 8.0 60 | # - 8.1 61 | # - 8.2 62 | # - 8.3 63 | # - 8.4 64 | # - 9.0 65 | # - 9.1 66 | # - 9.1 67 | # - 9.2 68 | #- name: Ubuntu 69 | # versions: 70 | # - all 71 | # - lucid 72 | # - maverick 73 | # - natty 74 | # - oneiric 75 | # - precise 76 | # - quantal 77 | # - raring 78 | # - saucy 79 | # - trusty 80 | #- name: SLES 81 | # versions: 82 | # - all 83 | # - 10SP3 84 | # - 10SP4 85 | # - 11 86 | # - 11SP1 87 | # - 11SP2 88 | # - 11SP3 89 | #- name: GenericLinux 90 | # versions: 91 | # - all 92 | # - any 93 | #- name: Debian 94 | # versions: 95 | # - all 96 | # - etch 97 | # - lenny 98 | # - squeeze 99 | # - wheezy 100 | # 101 | # Below are all categories currently available. Just as with 102 | # the platforms above, uncomment those that apply to your role. 103 | # 104 | #categories: 105 | #- cloud 106 | #- cloud:ec2 107 | #- cloud:gce 108 | #- cloud:rax 109 | #- clustering 110 | #- database 111 | #- database:nosql 112 | #- database:sql 113 | #- development 114 | #- monitoring 115 | #- networking 116 | #- packaging 117 | #- system 118 | #- web 119 | dependencies: [] 120 | # List your role dependencies here, one per line. Only 121 | # dependencies available via galaxy should be listed here. 122 | # Be sure to remove the '[]' above if you add dependencies 123 | # to this list. 124 | 125 | -------------------------------------------------------------------------------- /immutablish-deploys/roles/rolling_asg/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # tasks file for asg 3 | 4 | - name: create launch config 5 | ec2_lc: 6 | name: "{{ lc_name }}" 7 | image_id: "{{ image_id }}" 8 | key_name: "{{ key_name }}" 9 | region: "{{ region }}" 10 | security_groups: "{{ lc_security_groups }}" 11 | instance_type: "{{ instance_type }}" 12 | assign_public_ip: yes 13 | tags: launch_config 14 | 15 | - name: create autoscale groups 16 | ec2_asg: 17 | name: "{{ asg_group_name }}" 18 | health_check_period: 60 19 | load_balancers: "{{ load_balancers }}" 20 | health_check_type: ELB 21 | availability_zones: "{{ availability_zones | join(',')}}" 22 | launch_config_name: "{{ lc_name }}" 23 | min_size: "{{ asg_min_size }}" 24 | max_size: "{{ asg_max_size }}" 25 | desired_capacity: "{{ asg_desired_capacity }}" 26 | region: "{{ region }}" 27 | replace_all_instances: yes 28 | vpc_zone_identifier: "{{ asg_subnets | join(',') }}" 29 | until: asg_result.viable_instances|int >= asg_desired_capacity|int 30 | delay: 10 31 | retries: 120 32 | register: asg_result 33 | tags: autoscale_group -------------------------------------------------------------------------------- /immutablish-deploys/roles/rolling_asg/vars/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # vars file for asg 3 | tmp_desired_capacity: "{{ asg_desired_capacity + batch_size }}" 4 | tmp_min_size: "{{ asg_min_size + batch_size }}" 5 | tmp_max_size: "{{ asg_max_size + batch_size }}" -------------------------------------------------------------------------------- /immutablish-deploys/rolling_ami.yml: -------------------------------------------------------------------------------- 1 | - hosts: fancyapp-rolling 2 | vars_files: 3 | - vars/settings.yml 4 | roles: 5 | - infra 6 | - rolling_asg 7 | -------------------------------------------------------------------------------- /immutablish-deploys/vars/.gitignore: -------------------------------------------------------------------------------- 1 | * 2 | !.gitignore 3 | -------------------------------------------------------------------------------- /immutablish-deploys/violet.yml: -------------------------------------------------------------------------------- 1 | - hosts: fancyapp-violet 2 | vars_files: 3 | - vars/settings.yml 4 | roles: 5 | - infra 6 | - dual_asg 7 | -------------------------------------------------------------------------------- /inv.yml: -------------------------------------------------------------------------------- 1 | all: 2 | hosts: 3 | example1.server.debeka.de: 4 | example2.server.debeka.de: 5 | vars: 6 | ansible_connection: local 7 | -------------------------------------------------------------------------------- /inv_5hosts: -------------------------------------------------------------------------------- 1 | [simples] 2 | foohost_[1:5] 3 | [simples:vars] 4 | hello=world 5 | -------------------------------------------------------------------------------- /inv_simple_1k_hosts.ini: -------------------------------------------------------------------------------- 1 | [foo_group] 2 | host_[1:100] 3 | [foo_group:vars] 4 | ansible_connection=local 5 | ansible_python_interpreter='{{ ansible_playbook_python }}' 6 | 7 | [bar_group] 8 | host_[101:1000] 9 | [bar_group:vars] 10 | ansible_connection=local 11 | ansible_python_interpreter='{{ ansible_playbook_python }}' 12 | -------------------------------------------------------------------------------- /inventory_test: -------------------------------------------------------------------------------- 1 | [TEST] 2 | localhost my_custom_var=foobar 3 | 4 | [TEST:vars] 5 | ansible_connection=local 6 | ansible_python_interpreter='{{ ansible_playbook_python }}' 7 | my_group_custom_var=foobar2 8 | -------------------------------------------------------------------------------- /journalctl-persist.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: all 3 | become: yes 4 | 5 | tasks: 6 | 7 | - name: Persist journalctl logs to disk 8 | lineinfile: 9 | path: /etc/systemd/journald.conf 10 | regexp: '^Storage=' 11 | insertafter: '^#Storage=auto' 12 | line: Storage=persistent 13 | -------------------------------------------------------------------------------- /kindle-demo.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: kindle 3 | remote_user: root 4 | gather_facts: false 5 | 6 | tasks: 7 | - name: Test connection 8 | ping: 9 | 10 | tags: 11 | - ping 12 | 13 | - name: Announce what we'll do 14 | local_action: 15 | command /usr/bin/say -v {{ voice }} "{{ banter }}" 16 | 17 | tags: 18 | - voice 19 | 20 | - name: Change the display 21 | shell: eips -c && eips 1 1 "*** RE-CONFIGURED USING ANSIBLE ***" && sleep 3 && eips -p 22 | args: 23 | executable: /bin/sh 24 | 25 | tags: 26 | - display 27 | 28 | - name: Play some audio on the kindle 29 | shell: /mnt/us/mplayer/mplayer /mnt/us/music/ansible-awesome.m4a 30 | args: 31 | executable: /bin/sh 32 | 33 | tags: 34 | - audio 35 | 36 | - name: Show clock 37 | shell: eips -c && /mnt/us/timelit/timelit.sh 38 | args: 39 | executable: /bin/sh 40 | 41 | tags: 42 | - clock 43 | -------------------------------------------------------------------------------- /lamp_simple_rhel7/LICENSE.md: -------------------------------------------------------------------------------- 1 | Copyright (C) 2015 Eugene Varnavsky (varnavruz@gmail.com) 2 | 3 | This work is licensed under the Creative Commons Attribution 3.0 Unported License. 4 | To view a copy of this license, visit http://creativecommons.org/licenses/by/3.0/deed.en_US. 5 | -------------------------------------------------------------------------------- /lamp_simple_rhel7/README.md: -------------------------------------------------------------------------------- 1 | Building a simple LAMP stack and deploying Application using Ansible Playbooks. 2 | ------------------------------------------- 3 | 4 | These playbooks require Ansible 1.2. 5 | 6 | These playbooks are meant to be a reference and starter's guide to building 7 | Ansible Playbooks. These playbooks were tested on CentOS 7.x so we recommend 8 | that you use CentOS or RHEL to test these modules. 9 | 10 | RHEL7 version reflects changes in Red Hat Enterprise Linux and CentOS 7: 11 | 1. Network device naming scheme has changed 12 | 2. iptables is replaced with firewalld 13 | 3. MySQL is replaced with MariaDB 14 | 15 | This LAMP stack can be on a single node or multiple nodes. The inventory file 16 | 'hosts' defines the nodes in which the stacks should be configured. 17 | 18 | [webservers] 19 | localhost 20 | 21 | [dbservers] 22 | bensible 23 | 24 | Here the webserver would be configured on the local host and the dbserver on a 25 | server called "bensible". The stack can be deployed using the following 26 | command: 27 | 28 | ansible-playbook -i hosts site.yml 29 | 30 | Once done, you can check the results by browsing to http://localhost/index.php. 31 | You should see a simple test page and a list of databases retrieved from the 32 | database server. 33 | -------------------------------------------------------------------------------- /lamp_simple_rhel7/group_vars/all: -------------------------------------------------------------------------------- 1 | --- 2 | # Variables listed here are applicable to all host groups 3 | 4 | httpd_port: 80 5 | ntpserver: 192.168.1.2 6 | repository: https://github.com/bennojoy/mywebapp.git 7 | -------------------------------------------------------------------------------- /lamp_simple_rhel7/group_vars/dbservers: -------------------------------------------------------------------------------- 1 | --- 2 | # The variables file used by the playbooks in the dbservers group. 3 | # These don't have to be explicitly imported by vars_files: they are autopopulated. 4 | 5 | mysqlservice: mysqld 6 | mysql_port: 3306 7 | dbuser: foouser 8 | dbname: foodb 9 | upassword: abc 10 | -------------------------------------------------------------------------------- /lamp_simple_rhel7/hosts: -------------------------------------------------------------------------------- 1 | [webservers] 2 | 192.168.133.100 3 | 4 | [dbservers] 5 | 192.168.133.101 6 | -------------------------------------------------------------------------------- /lamp_simple_rhel7/roles/common/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # Handler to handle common notifications. Handlers are called by other plays. 3 | # See http://docs.ansible.com/playbooks_intro.html for more information about handlers. 4 | 5 | - name: restart ntp 6 | service: name=ntpd state=restarted 7 | -------------------------------------------------------------------------------- /lamp_simple_rhel7/roles/common/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # This playbook contains common plays that will be run on all nodes. 3 | 4 | - name: Install ntp 5 | yum: name=ntp state=present 6 | tags: ntp 7 | 8 | - name: Configure ntp file 9 | template: src=ntp.conf.j2 dest=/etc/ntp.conf 10 | tags: ntp 11 | notify: restart ntp 12 | 13 | - name: Start the ntp service 14 | service: name=ntpd state=started enabled=yes 15 | tags: ntp 16 | -------------------------------------------------------------------------------- /lamp_simple_rhel7/roles/common/templates/ntp.conf.j2: -------------------------------------------------------------------------------- 1 | 2 | driftfile /var/lib/ntp/drift 3 | 4 | restrict 127.0.0.1 5 | restrict -6 ::1 6 | 7 | server {{ ntpserver }} 8 | 9 | includefile /etc/ntp/crypto/pw 10 | 11 | keys /etc/ntp/keys 12 | 13 | -------------------------------------------------------------------------------- /lamp_simple_rhel7/roles/db/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # Handler to handle DB tier notifications 3 | 4 | - name: restart mariadb 5 | service: name=mariadb state=restarted 6 | -------------------------------------------------------------------------------- /lamp_simple_rhel7/roles/db/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # This playbook will install MariaDB and create db user and give permissions. 3 | 4 | - name: Install MariaDB package 5 | yum: name={{ item }} state=installed 6 | with_items: 7 | - mariadb-server 8 | - MySQL-python 9 | - libselinux-python 10 | - libsemanage-python 11 | 12 | - name: Configure SELinux to start mysql on any port 13 | seboolean: name=mysql_connect_any state=true persistent=yes 14 | 15 | - name: Create Mysql configuration file 16 | template: src=my.cnf.j2 dest=/etc/my.cnf 17 | notify: 18 | - restart mariadb 19 | 20 | - name: Create MariaDB log file 21 | file: path=/var/log/mysqld.log state=touch owner=mysql group=mysql mode=0775 22 | 23 | - name: Create MariaDB PID directory 24 | file: path=/var/run/mysqld state=directory owner=mysql group=mysql mode=0775 25 | 26 | - name: Start MariaDB Service 27 | service: name=mariadb state=started enabled=yes 28 | 29 | #- name: insert firewalld rule 30 | # firewalld: port={{ mysql_port }}/tcp permanent=true state=enabled immediate=yes 31 | 32 | - name: Create Application Database 33 | mysql_db: name={{ dbname }} state=present 34 | 35 | - name: Create Application DB User 36 | mysql_user: name={{ dbuser }} password={{ upassword }} priv=*.*:ALL host='%' state=present 37 | -------------------------------------------------------------------------------- /lamp_simple_rhel7/roles/db/templates/my.cnf.j2: -------------------------------------------------------------------------------- 1 | [mysqld] 2 | datadir=/var/lib/mysql 3 | socket=/var/lib/mysql/mysql.sock 4 | user=mysql 5 | # Disabling symbolic-links is recommended to prevent assorted security risks 6 | symbolic-links=0 7 | port={{ mysql_port }} 8 | 9 | [mysqld_safe] 10 | log-error=/var/log/mysqld.log 11 | pid-file=/var/run/mysqld/mysqld.pid 12 | -------------------------------------------------------------------------------- /lamp_simple_rhel7/roles/web/tasks/copy_code.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # These tasks are responsible for copying the latest dev/production code from 3 | # the version control system. 4 | 5 | - name: Copy the code from repository 6 | git: repo={{ repository }} dest=/tmp/html 7 | 8 | - name: Creates the index.php file 9 | template: src=index.php.j2 dest=/var/www/html/index.php 10 | -------------------------------------------------------------------------------- /lamp_simple_rhel7/roles/web/tasks/install_httpd.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # These tasks install http and the php modules. 3 | 4 | - name: Install http and php etc 5 | yum: name={{ item }} state=present 6 | with_items: 7 | - httpd 8 | - php 9 | - php-mysql 10 | - git 11 | - libsemanage-python 12 | - libselinux-python 13 | 14 | #- name: insert firewalld rule for httpd 15 | # firewalld: port={{ httpd_port }}/tcp permanent=true state=enabled immediate=yes 16 | 17 | - name: http service state 18 | service: name=httpd state=started enabled=yes 19 | 20 | - name: Configure SELinux to allow httpd to connect to remote database 21 | seboolean: name=httpd_can_network_connect_db state=true persistent=yes 22 | -------------------------------------------------------------------------------- /lamp_simple_rhel7/roles/web/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - include: install_httpd.yml 3 | - include: copy_code.yml 4 | -------------------------------------------------------------------------------- /lamp_simple_rhel7/roles/web/templates/index.php.j2: -------------------------------------------------------------------------------- 1 | 2 | 3 | Ansible Application 4 | 5 | 6 |
7 | Homepage 8 |
9 | "; 13 | echo "List of Databases:
"; 14 | {% for host in groups['dbservers'] %} 15 | $link = mysqli_connect('{{ hostvars[host].ansible_default_ipv4.address }}', '{{ hostvars[host].dbuser }}', '{{ hostvars[host].upassword }}') or die(mysqli_connect_error($link)); 16 | {% endfor %} 17 | $res = mysqli_query($link, "SHOW DATABASES;"); 18 | while ($row = mysqli_fetch_assoc($res)) { 19 | echo $row['Database'] . "\n"; 20 | } 21 | ?> 22 | 23 | 24 | 25 | -------------------------------------------------------------------------------- /lamp_simple_rhel7/site.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # This playbook deploys the whole application stack in this site. 3 | 4 | - name: apply common configuration to all nodes 5 | hosts: all 6 | remote_user: vagrant 7 | become: yes 8 | become_method: sudo 9 | 10 | roles: 11 | - common 12 | 13 | - name: configure and deploy the webservers and application code 14 | hosts: webservers 15 | remote_user: vagrant 16 | become: yes 17 | become_method: sudo 18 | # ignore_errors: yes 19 | 20 | roles: 21 | - web 22 | 23 | - name: deploy MySQL and configure the databases 24 | hosts: dbservers 25 | remote_user: vagrant 26 | become: yes 27 | become_method: sudo 28 | 29 | roles: 30 | - db 31 | -------------------------------------------------------------------------------- /library/chkuptime: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # The module checks for system uptime of the target machine. 4 | # The module takes in 'detailed' bool argument from the user 5 | # It returns a JSON output since an Ansible module should 6 | # output a Valid JSON. 7 | 8 | source $1 9 | 10 | if [ -f "/proc/uptime" ]; then 11 | uptime=`cat /proc/uptime` 12 | uptime=${uptime%%.*} 13 | days=$(( uptime/60/60/24 )) 14 | hours=$(( uptime/60/60%24 )) 15 | if [ $detailed = "true" ]; then 16 | minutes=$(( uptime/60%60 )) 17 | seconds=$(( uptime%60 )) 18 | uptime="$days days, $hours hours, $minutes minutes, $seconds seconds" 19 | else 20 | uptime="$days days, $hours hours" 21 | fi 22 | else 23 | uptime="" 24 | fi 25 | 26 | echo -e "{\"uptime\":\""$uptime"\"}" 27 | -------------------------------------------------------------------------------- /library/chkuser: -------------------------------------------------------------------------------- 1 | #!/bin/env python 2 | 3 | DOCUMENTATION = """ 4 | --- 5 | module: chkuser 6 | version_added: 0.1 7 | short_description: Check if user exists on the target machine 8 | options: 9 | username: 10 | decription: 11 | - Accept username from the user 12 | required: True 13 | """ 14 | 15 | EXAMPLES = """ 16 | #Usage Example 17 | - name: Check if user exists 18 | action: chkuser username=rdas 19 | """ 20 | 21 | def is_user_exists(username): 22 | try: 23 | import pwd 24 | return(username in [entry.pw_name for entry in pwd.getpwall()]) 25 | except: 26 | module.fail_json(msg='Module pwd does not exists') 27 | 28 | 29 | def main(): 30 | module = AnsibleModule( 31 | argument_spec = dict( 32 | username = dict(required=True) 33 | ) 34 | ) 35 | username = module.params.get('username') 36 | exists = is_user_exists(username) 37 | if exists: 38 | status = '%s user exists' % username 39 | else: 40 | status = '%s user does not exist' % username 41 | module.exit_json(changed=True, msg=str(status)) 42 | 43 | from ansible.module_utils.basic import * 44 | main() 45 | -------------------------------------------------------------------------------- /library/chkwheelgrp: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # The module checks the number of users in /etc/group wheel 4 | # It returns JSON output since an Ansible module requires that. 5 | 6 | GROUP="/etc/group" 7 | FIND="wheel" 8 | 9 | number="`grep $FIND $GROUP | awk -F: '{print $4}' | awk -F',' '{print NF}'`" 10 | 11 | echo -e "{\"wheel_users\":\""$number"\"}" 12 | -------------------------------------------------------------------------------- /make-rhel-vulnerable.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Break things to make the server vulnerable!! 3 | hosts: all 4 | become: yes 5 | become_method: sudo 6 | 7 | tasks: 8 | - name: Downgrade bash - shellshock 9 | yum: 10 | name: bash-4.2.45-5.el7.x86_64 11 | state: installed 12 | allow_downgrade: yes 13 | 14 | - name: Downgrade sudo 15 | yum: 16 | name: sudo-1.8.6p7-13.el7.x86_64 17 | state: installed 18 | allow_downgrade: yes 19 | 20 | - name: Run Insights 21 | command: /bin/redhat-access-insights 22 | -------------------------------------------------------------------------------- /multi_cloud/README.md: -------------------------------------------------------------------------------- 1 | # Configure Various Cloud Provider VMs 2 | 3 | ``` 4 | ansible-playbook -i hosts site.yml 5 | ``` 6 | 7 | Uses roles to demonstrate how to perform post install provisioning across both AWS EC2 and MS Azure VMs. 8 | 9 | #### Common Role 10 | Configures NTP using Jinja2 template and groups_vars variable 11 | Generic service handler restarts (NTP, Apache, Firewall) 12 | 13 | #### Amazon Role 14 | main.yml uses includes to incorporate other playbooks 15 | Adds some user accounts, install Apache/PHP plus dependencies. Restarts firewall after adding HTTP port and configured SELinux 16 | 17 | #### Azure Role 18 | main.yml uses includes to incorporate other playbooks 19 | Installs Apache and NTP - packages selected based OS distribution 20 | Creates some user accounts 21 | Also demonstrates tags 22 | 23 | 24 | -------------------------------------------------------------------------------- /multi_cloud/group_vars/all: -------------------------------------------------------------------------------- 1 | --- 2 | # Variables listed here are applicable to all host groups 3 | 4 | ntpserver: pool.ntp.org 5 | httpd_port: 80 6 | -------------------------------------------------------------------------------- /multi_cloud/group_vars/amazon: -------------------------------------------------------------------------------- 1 | httpd_port: 8000 2 | -------------------------------------------------------------------------------- /multi_cloud/group_vars/azure: -------------------------------------------------------------------------------- 1 | 2 | -------------------------------------------------------------------------------- /multi_cloud/hosts: -------------------------------------------------------------------------------- 1 | [amazon] 2 | #ec2-52-50-107-129.eu-west-1.compute.amazonaws.com 3 | 52.50.160.18 4 | 5 | [amazon:vars] 6 | ansible_ssh_user=ec2-user 7 | 8 | [azure] 9 | #ubuntu-azure-1.westeurope.cloudapp.azure.com 10 | #40.118.108.249 11 | #13.94.206.138 12 | my-azure-vm.cloudapp.net 13 | 14 | [cloud:children] 15 | amazon 16 | azure 17 | -------------------------------------------------------------------------------- /multi_cloud/roles/amazon/tasks/amazon.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: add user accounts 4 | user: name={{ item.name }} state=present groups={{ item.groups }} 5 | with_items: 6 | - {name: 'foo', groups: 'wheel' } 7 | become: yes 8 | become_method: sudo 9 | -------------------------------------------------------------------------------- /multi_cloud/roles/amazon/tasks/install_httpd.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # These tasks install web services, firewall rules and SELinux 3 | 4 | - block: 5 | 6 | - name: Install http and php etc 7 | yum: name={{ item }} state=present 8 | with_items: 9 | - httpd 10 | - php 11 | - php-mysql 12 | - git 13 | - libsemanage-python 14 | - libselinux-python 15 | - firewalld 16 | 17 | - name: Start firewall 18 | service: name=firewalld state=started enabled=yes 19 | 20 | - name: Insert firewalld rule for httpd 21 | ignore_errors: no 22 | firewalld: port={{ httpd_port }}/tcp permanent=true state=enabled immediate=yes 23 | notify: restart firewall 24 | 25 | - name: Configure SELinux to allow httpd to connect to remote database 26 | seboolean: name=httpd_can_network_connect_db state=true persistent=yes 27 | 28 | ignore_errors: yes 29 | become: yes 30 | become_user: root 31 | become_method: sudo 32 | -------------------------------------------------------------------------------- /multi_cloud/roles/amazon/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # This playbook will configure Amazon hosts 3 | 4 | - include: amazon.yml 5 | - include: install_httpd.yml 6 | -------------------------------------------------------------------------------- /multi_cloud/roles/azure/tasks/azure.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - block: 4 | 5 | - name: Install Packages If Non Red Hat 6 | apt: name={{item}} state=latest 7 | with_items: 8 | - apache2 9 | - ntp 10 | when: ansible_distribution == 'Debian' or ansible_distribution == 'Ubuntu' 11 | tags: packages 12 | 13 | - name: Install Packages if Red Hat Based 14 | yum: name={{item}} state=latest 15 | with_items: 16 | - httpd 17 | - ntp 18 | when: ansible_distribution == 'CentOS' or ansible_distribution == 'Red Hat Enterprise Linux' 19 | tags: packages 20 | 21 | - name: Create Some Users 22 | user: name={{ item.name }} state=present groups={{ item.groups }} 23 | with_items: 24 | - {name: 'foo', groups: 'root' } 25 | tags: users 26 | 27 | ignore_errors: yes 28 | become: yes 29 | -------------------------------------------------------------------------------- /multi_cloud/roles/azure/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # This playbook will configure Azure hosts 3 | 4 | - include: azure.yml 5 | -------------------------------------------------------------------------------- /multi_cloud/roles/common/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # Handler to handle common notifications. 3 | 4 | - name: restart ntp 5 | service: name=ntpd state=restarted 6 | 7 | - name: restart web server 8 | service: name=httpd state=restarted 9 | 10 | - name: restart firewall 11 | service: name=firewalld state=restarted 12 | -------------------------------------------------------------------------------- /multi_cloud/roles/common/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # This playbook contains common plays that will be run on all nodes. 3 | 4 | - name: Configure ntp file 5 | template: src=ntp.conf.j2 dest=/etc/ntp.conf 6 | tags: ntp 7 | become: yes 8 | 9 | -------------------------------------------------------------------------------- /multi_cloud/roles/common/templates/ntp.conf.j2: -------------------------------------------------------------------------------- 1 | 2 | driftfile /var/lib/ntp/drift 3 | 4 | restrict 127.0.0.1 5 | restrict -6 ::1 6 | 7 | server {{ ntpserver }} 8 | 9 | includefile /etc/ntp/crypto/pw 10 | 11 | keys /etc/ntp/keys 12 | 13 | -------------------------------------------------------------------------------- /multi_cloud/site.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # This playbook configures VMs across multiple cloud providers 3 | 4 | - name: apply common configuration to all nodes 5 | hosts: cloud 6 | 7 | roles: 8 | - common 9 | 10 | - name: configure in Amazon AWS EC2 11 | hosts: amazon 12 | 13 | roles: 14 | - amazon 15 | 16 | - name: configure in Microsoft Azure 17 | hosts: azure 18 | remote_user: pgriffit 19 | 20 | roles: 21 | - azure 22 | -------------------------------------------------------------------------------- /networking/ios_reporting.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: build IOS inventory report 3 | hosts: ios 4 | connection: network_cli 5 | gather_facts: False 6 | 7 | vars: 8 | desired_ios_version: "16.06.01" 9 | file_path: /tmp/inventory_report_ios.html 10 | 11 | tasks: 12 | 13 | - name: gathering IOS facts 14 | ios_facts: 15 | register: all_facts 16 | 17 | - name: create HTML report 18 | template: 19 | src: report-ios.j2 20 | dest: "{{ file_path }}" 21 | delegate_to: localhost 22 | run_once: true 23 | -------------------------------------------------------------------------------- /networking/report-ios.j2: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | 16 | 17 | 18 | {% for network_switch in groups['ios'] %} 19 | 20 | 21 | 22 | 23 | 24 | 25 | 26 | {% endfor %} 27 | 28 |
HostnameModel TypeSerial NumberMgmt interface IPCode Version
{{hostvars[network_switch]['ansible_net_hostname']}}{{hostvars[network_switch]['ansible_net_model']}}{{hostvars[network_switch]['ansible_net_serialnum']}}{{hostvars[network_switch]['ansible_net_interfaces']['GigabitEthernet1']['ipv4'][0]['address']}}{{hostvars[network_switch]['ansible_net_version']}}
29 | 30 | 31 | 32 | -------------------------------------------------------------------------------- /openSCAP.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # tasks file for OpenSCAP 3 | - hosts: all 4 | 5 | tasks: 6 | - name: install SCAP Security Guide 7 | yum: 8 | name: scap-security-guide 9 | state: latest 10 | 11 | - name: command to run oscap 12 | command: oscap xccdf eval --profile xccdf_org.ssgproject.content_profile_pci-dss --report /root/build_compliance_report.html /usr/share/xml/scap/ssg/content/ssg-rhel7-ds.xml 13 | ignore_errors: True 14 | 15 | - wait_for: path=/root/build_compliance_report.html 16 | 17 | - fetch: src=/root/build_compliance_report.html dest="/root/{{ ansible_hostname }}.html" 18 | -------------------------------------------------------------------------------- /ops/roles/config/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # tasks file for config role 3 | 4 | - name: Gather EC2 facts 5 | ec2_facts: 6 | when: ansible_distribution == 'Amazon' 7 | 8 | - name: Ensure /etc/issue is present 9 | template: 10 | src: "issue-{{ ansible_distribution }}.j2" 11 | dest: /etc/issue 12 | owner: root 13 | group: root 14 | mode: 0644 15 | -------------------------------------------------------------------------------- /ops/roles/config/templates/issue-Amazon.j2: -------------------------------------------------------------------------------- 1 | _____________________ 2 | < I've been Ansibled! > 3 | --------------------- 4 | \ ^__^ 5 | \ (oo)\_______ 6 | (__)\ )\/\ 7 | ||----w | 8 | || || 9 | 10 | Distro: {{ ansible_distribution }} 11 | IP: {{ ansible_ec2_public_ipv4 }} ID: {{ ansible_ec2_reservation_id }} 12 | 13 | -------------------------------------------------------------------------------- /ops/roles/config/templates/issue-RedHat.j2: -------------------------------------------------------------------------------- 1 | Kernel \r on an \m 2 | 3 | IP: {{ ansible_default_ipv4.address }} MAC: {{ ansible_default_ipv4.macaddress }} 4 | -------------------------------------------------------------------------------- /ops/roles/disk/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # tasks file for disk role 3 | 4 | - name: Find large files 5 | command: find "{{ disk_path | default('/') }}" -size +{{ disk_min_size_check | default('1') }}M -ls 6 | register: output 7 | 8 | - name: Output 9 | debug: 10 | var: output.stdout_lines 11 | when: output.stdout != '' 12 | -------------------------------------------------------------------------------- /ops/roles/services/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # defaults file for services role 3 | 4 | services_list: 5 | - httpd 6 | - chronyd 7 | -------------------------------------------------------------------------------- /ops/roles/services/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # tasks file for services role 3 | 4 | - name: Ensure service state is {{ services_state }} 5 | service: 6 | name: "{{ services_name }}" 7 | state: "{{ services_state }}" 8 | -------------------------------------------------------------------------------- /ops/site.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: Common ops tasks 4 | hosts: all 5 | 6 | roles: 7 | # - { role: config } 8 | # - { role: services } 9 | - { role: disk, disk_path: /tmp } 10 | -------------------------------------------------------------------------------- /ping.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: all 3 | remote_user: vagrant 4 | become: yes 5 | become_method: sudo 6 | gather_facts: false 7 | 8 | tasks: 9 | - name: Test connection 10 | ping: 11 | -------------------------------------------------------------------------------- /ping_check.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: test ping 3 | hosts: localhost 4 | connection: local 5 | gather_facts: false 6 | tasks: 7 | - name: ping 8 | shell: /usr/bin/ping google.com -c 5 9 | register: ping 10 | -------------------------------------------------------------------------------- /post-provision.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: north 3 | remote_user: vagrant 4 | become: yes 5 | become_method: sudo 6 | gather_facts: false 7 | 8 | tasks: 9 | - name: update packages 10 | #yum:name=wget state=latest 11 | yum: 12 | name: wget 13 | state: latest 14 | tags: 15 | - packages 16 | 17 | - name: add user accounts 18 | user: name={{ item.name }} state=present groups={{ item.groups }} 19 | with_items: 20 | - {name: 'pgriffit', groups: 'wheel' } 21 | - {name: 'fred', groups: 'wheel' } 22 | tags: 23 | - users 24 | 25 | - local_action: command /usr/bin/say -v {{ voice }} "{{ banter }}" 26 | become: no 27 | tags: 28 | - notify 29 | 30 | 31 | - hosts: south 32 | remote_user: vagrant 33 | become: yes 34 | become_method: sudo 35 | #gather_facts: false 36 | 37 | tasks: 38 | - name: Update MOTD 39 | copy: 40 | src: /tmp/foo 41 | dest: /etc/motd 42 | owner: root 43 | group: root 44 | mode: 0444 45 | tags: 46 | - configuration 47 | 48 | - name: add user accounts 49 | user: name={{ item.name }} state=present groups={{ item.groups }} 50 | with_items: 51 | - {name: 'pgriffit', groups: 'wheel' } 52 | - {name: 'wilma', groups: 'wheel' } 53 | tags: 54 | - users 55 | 56 | - local_action: command /usr/bin/say -v {{ voice }} "{{ banter }}" 57 | become: no 58 | tags: 59 | - notify 60 | -------------------------------------------------------------------------------- /reboot.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: beckton 3 | remote_user: vagrant 4 | become: yes 5 | become_method: sudo 6 | gather_facts: false 7 | 8 | tasks: 9 | #- name: Upgrade all packages 10 | # yum: name=* state=latest 11 | 12 | - name: Checking if we need a reboot 13 | # NOTE: this string check is wrong so always comes back as "yes" 14 | shell: if [ $(rpm -q kernel | sort -Vr | head -n 1) != kernel-$(uname -r) ]; then echo "yes"; fi 15 | register: reboot 16 | ignore_errors: true 17 | 18 | - name: Reboot required 19 | command: shutdown -r now "Ansible kernel update applied" 20 | async: 1 21 | poll: 0 22 | ignore_errors: true 23 | when: reboot.changed 24 | 25 | - name: Waiting for server to come back up 26 | local_action: 27 | wait_for 28 | host="{{ inventory_hostname }}" 29 | port=22 30 | delay=10 31 | timeout=60 32 | become: false 33 | when: reboot.changed 34 | -------------------------------------------------------------------------------- /rhsm-aws-tower.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Register to Red Hat RHSM 3 | # I use the tag below to pick up unsubscribed hosts from the inventory 4 | hosts: tag_rhsm_false 5 | # need to be root in order to install and register 6 | become: yes 7 | become_method: sudo 8 | 9 | tasks: 10 | - name: Install OS Packages 11 | yum: 12 | name: subscription-manager 13 | state: present 14 | tags: 15 | - install 16 | 17 | - name: "Register host and subscribe (using tower survey)" 18 | ignore_errors: yes 19 | redhat_subscription: 20 | state: present 21 | username: '{{rhn_username}}' 22 | password: '{{rhn_password}}' 23 | #autosubscribe: true 24 | force_register: true 25 | pool: '{{rhn_pool_id}}' 26 | tags: 27 | - register 28 | -------------------------------------------------------------------------------- /rhsm-ocp-example.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - block: 3 | - name: Allow rhsm a longer timeout to help out with subscription-manager 4 | lineinfile: 5 | dest: /etc/rhsm/rhsm.conf 6 | line: 'server_timeout=600' 7 | insertafter: '^proxy_password =' 8 | 9 | - name: Check for sat config file 10 | stat: path=/etc/rhsm/rhsm.conf.kat-backup 11 | register: sat_cfg 12 | 13 | - name: Remove satellite configuration if using RH CDN 14 | command: "mv -f /etc/rhsm/rhsm.conf.kat-backup /etc/rhsm/rhsm.conf" 15 | when: rhsm_user is defined and rhsm_user and sat_cfg.stat.exists == True 16 | ignore_errors: yes 17 | 18 | - name: Remove satellite SSL if using RH CDN 19 | command: "rpm -e $(rpm -qa katello-ca-consumer*)" 20 | when: rhsm_user is defined and rhsm_user and sat_cfg.stat.exists == True 21 | ignore_errors: yes 22 | 23 | - name: Is the host already registered? 24 | command: "subscription-manager status" 25 | register: subscribed 26 | changed_when: no 27 | ignore_errors: yes 28 | 29 | - name: RedHat subscriptions 30 | redhat_subscription: 31 | username: "{{ rhsm_user }}" 32 | password: "{{ rhsm_password }}" 33 | when: "'Current' not in subscribed.stdout and rhsm_user is defined and rhsm_user" 34 | 35 | - name: Retrieve the OpenShift Pool ID 36 | command: subscription-manager list --available --matches="{{ rhsm_pool }}" --pool-only 37 | register: openshift_pool_id 38 | when: rhsm_pool is defined and rhsm_pool 39 | changed_when: False 40 | 41 | - name: Determine if OpenShift Pool Already Attached 42 | command: subscription-manager list --consumed --matches="{{ rhsm_pool }}" --pool-only 43 | register: openshift_pool_attached 44 | changed_when: False 45 | when: rhsm_pool is defined and rhsm_pool and openshift_pool_id.stdout == '' 46 | 47 | - fail: 48 | msg: "Unable to find pool matching {{ rhsm_pool }} in available or consumed pools" 49 | when: rhsm_pool is defined and rhsm_pool and openshift_pool_id.stdout == '' and openshift_pool_attached is defined and openshift_pool_attached.stdout == '' 50 | 51 | - name: Attach to OpenShift Pool 52 | command: subscription-manager subscribe --pool {{ openshift_pool_id.stdout_lines[0] }} 53 | when: rhsm_pool is defined and rhsm_pool and openshift_pool_id.stdout != '' 54 | 55 | when: ansible_distribution == "RedHat" 56 | -------------------------------------------------------------------------------- /rngd.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: all 3 | 4 | tasks: 5 | 6 | - name: Check this is a RHEL7 system 7 | fail: msg="This playbook will ony run on RHEL 7 systems" 8 | when: ansible_distribution != "RedHat" or ansible_distribution_major_version != "7" 9 | 10 | - name: Install the random number generator package 11 | yum: name=rng-tools state=latest 12 | 13 | - name: Update the /usr/lib/systemd/system/rngd.service file 14 | ini_file: dest=/usr/lib/systemd/system/rngd.service 15 | section=service 16 | option=ExecStart 17 | value="/sbin/rngd -f -r /dev/urandom -o /dev/random" backup=yes 18 | notify: 19 | - restart rngd 20 | 21 | - name: ensure rngd is running (and enable it at boot) 22 | service: name=httpd state=started enabled=yes 23 | 24 | 25 | handlers: 26 | - name: restart rngd 27 | service: name=rngd state=restarted 28 | -------------------------------------------------------------------------------- /rolling-upgrade-demo1/LICENSE.md: -------------------------------------------------------------------------------- 1 | Copyright (C) 2015 Eugene Varnavsky (varnavruz@gmail.com) 2 | 3 | This work is licensed under the Creative Commons Attribution 3.0 Unported License. 4 | To view a copy of this license, visit http://creativecommons.org/licenses/by/3.0/deed.en_US. 5 | -------------------------------------------------------------------------------- /rolling-upgrade-demo1/README.md: -------------------------------------------------------------------------------- 1 | Building a simple LAMP stack and deploying Application using Ansible Playbooks. 2 | ------------------------------------------- 3 | 4 | These playbooks require Ansible 1.2. 5 | 6 | These playbooks are meant to be a reference and starter's guide to building 7 | Ansible Playbooks. These playbooks were tested on CentOS 7.x so we recommend 8 | that you use CentOS or RHEL to test these modules. 9 | 10 | RHEL7 version reflects changes in Red Hat Enterprise Linux and CentOS 7: 11 | 1. Network device naming scheme has changed 12 | 2. iptables is replaced with firewalld 13 | 3. MySQL is replaced with MariaDB 14 | 15 | This LAMP stack can be on a single node or multiple nodes. The inventory file 16 | 'hosts' defines the nodes in which the stacks should be configured. 17 | 18 | [webservers] 19 | localhost 20 | 21 | [dbservers] 22 | bensible 23 | 24 | Here the webserver would be configured on the local host and the dbserver on a 25 | server called "bensible". The stack can be deployed using the following 26 | command: 27 | 28 | ansible-playbook -i hosts site.yml 29 | 30 | Once done, you can check the results by browsing to http://localhost/index.php. 31 | You should see a simple test page and a list of databases retrieved from the 32 | database server. 33 | -------------------------------------------------------------------------------- /rolling-upgrade-demo1/group_vars/all: -------------------------------------------------------------------------------- 1 | --- 2 | # Variables listed here are applicable to all host groups 3 | 4 | httpd_port: 80 5 | ntpserver: 192.168.1.2 6 | -------------------------------------------------------------------------------- /rolling-upgrade-demo1/roles/common/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # Handler to handle common notifications. Handlers are called by other plays. 3 | # See http://docs.ansible.com/playbooks_intro.html for more information about handlers. 4 | 5 | - name: restart ntp 6 | service: name=ntpd state=restarted 7 | -------------------------------------------------------------------------------- /rolling-upgrade-demo1/roles/common/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # This playbook contains common plays that will be run on all nodes. 3 | 4 | - name: Install ntp 5 | yum: name=ntp state=present 6 | ignore_errors: yes 7 | tags: ntp 8 | 9 | - name: Configure ntp file 10 | template: src=ntp.conf.j2 dest=/etc/ntp.conf 11 | tags: ntp 12 | notify: restart ntp 13 | 14 | - name: Start the ntp service 15 | service: name=ntpd state=started enabled=yes 16 | tags: ntp 17 | -------------------------------------------------------------------------------- /rolling-upgrade-demo1/roles/common/templates/ntp.conf.j2: -------------------------------------------------------------------------------- 1 | 2 | driftfile /var/lib/ntp/drift 3 | 4 | restrict 127.0.0.1 5 | restrict -6 ::1 6 | 7 | server {{ ntpserver }} 8 | 9 | includefile /etc/ntp/crypto/pw 10 | 11 | keys /etc/ntp/keys 12 | 13 | -------------------------------------------------------------------------------- /rolling-upgrade-demo1/roles/haproxy/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # Handler to handle common notifications. Handlers are called by other plays. 3 | # See http://docs.ansible.com/playbooks_intro.html for more information about handlers. 4 | 5 | - name: restart haproxy 6 | service: name=haproxy state=restarted 7 | -------------------------------------------------------------------------------- /rolling-upgrade-demo1/roles/haproxy/tasks/install-haproxy.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # These tasks install http and the php modules. 3 | 4 | - name: Install haproxy 5 | yum: name=haproxy state=latest 6 | 7 | - name: Deploy haproxy configuration file 8 | template: src=haproxy.j2 dest=/etc/haproxy/haproxy.cfg 9 | notify: 10 | - restart haproxy 11 | -------------------------------------------------------------------------------- /rolling-upgrade-demo1/roles/haproxy/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - include: install-haproxy.yml 3 | -------------------------------------------------------------------------------- /rolling-upgrade-demo1/roles/haproxy/templates/haproxy.j2: -------------------------------------------------------------------------------- 1 | global 2 | log 127.0.0.1 local0 3 | log 127.0.0.1 local1 debug 4 | maxconn 45000 # Total Max Connections. 5 | daemon 6 | nbproc 1 # Number of processing cores. 7 | defaults 8 | timeout server 86400000 9 | timeout connect 86400000 10 | timeout client 86400000 11 | timeout queue 1000s 12 | 13 | # [HTTP Site Configuration] 14 | listen http_web 192.168.133.105:80 15 | mode http 16 | balance roundrobin # Load Balancing algorithm 17 | option httpchk 18 | option forwardfor 19 | server server1 192.168.133.101:80 weight 1 maxconn 512 check 20 | server server2 192.168.133.103:80 weight 1 maxconn 512 check 21 | 22 | # [HTTPS Site Configuration] 23 | listen https_web 192.168.133.105:443 24 | mode tcp 25 | balance source# Load Balancing algorithm 26 | #reqadd X-Forwarded-Proto: http 27 | server server1 192.168.133.101:443 weight 1 maxconn 512 check 28 | server server2 192.168.133.103:443 weight 1 maxconn 512 check 29 | 30 | -------------------------------------------------------------------------------- /rolling-upgrade-demo1/roles/webservers/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # Handler to handle common notifications. Handlers are called by other plays. 3 | # See http://docs.ansible.com/playbooks_intro.html for more information about handlers. 4 | 5 | - name: restart httpd 6 | service: name=httpd state=restarted 7 | -------------------------------------------------------------------------------- /rolling-upgrade-demo1/roles/webservers/tasks/install-apache.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # These tasks install http 3 | 4 | - name: Install httpd 5 | yum: name=httpd state=present 6 | 7 | - service: name=httpd enabled=yes state=started 8 | -------------------------------------------------------------------------------- /rolling-upgrade-demo1/roles/webservers/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - include: install-apache.yml 3 | -------------------------------------------------------------------------------- /rolling-upgrade-demo1/site.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # This playbook deploys the haproxy and web services 3 | 4 | - name: apply common configuration to all nodes 5 | hosts: haproxy webservers 6 | remote_user: vagrant 7 | become: yes 8 | become_method: sudo 9 | 10 | roles: 11 | - common 12 | 13 | - name: install and configure the haproxy 14 | hosts: haproxy 15 | remote_user: vagrant 16 | become: yes 17 | become_method: sudo 18 | 19 | roles: 20 | - haproxy 21 | 22 | - name: install and configure the webservers 23 | hosts: webservers 24 | remote_user: vagrant 25 | become: yes 26 | become_method: sudo 27 | 28 | roles: 29 | - webservers 30 | -------------------------------------------------------------------------------- /run-redhat-insights.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Update Red Hat Insights 3 | hosts: all 4 | 5 | tasks: 6 | - name: Run Insights command on the host 7 | command: /bin/redhat-access-insights 8 | -------------------------------------------------------------------------------- /slack.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Test slack module 3 | hosts: localhost 4 | 5 | tasks: 6 | - name: Send test slack notification 7 | local_action: 8 | module: slack 9 | token: thetoken/generatedby/slack 10 | msg: "A test Ansible slack playbook message!" 11 | -------------------------------------------------------------------------------- /snow_collection_example.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Using ServiceNow Collection 3 | hosts: localhost 4 | gather_facts: no 5 | collections: 6 | - servicenow.servicenow 7 | 8 | tasks: 9 | - name: Create an incident 10 | snow_record: 11 | username: xxxxxx 12 | password: xxxxxx 13 | instance: dev82827 14 | state: present 15 | data: 16 | short_description: "This is a test incident opened by Ansible using the SNOW collection module" 17 | severity: 3 18 | priority: 2 19 | -------------------------------------------------------------------------------- /start_azure_vms.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: start Azure VMs 3 | hosts: localhost 4 | connection: local 5 | gather_facts: false 6 | ignore_errors: true 7 | 8 | vars: 9 | instances: 10 | # - { name: 'skylight-dc', rg: 'skylight-resource' } 11 | - { name: 'tower1', rg: 'core-infra' } 12 | 13 | tasks: 14 | - name: "start instances" 15 | azure_rm_virtualmachine: 16 | resource_group: "{{ item.rg }}" 17 | name: "{{ item.name }}" 18 | state: present 19 | started: true 20 | loop: "{{ instances }}" 21 | register: output 22 | until: output is not failed 23 | retries: 3 24 | -------------------------------------------------------------------------------- /sysadmin/group_vars/all: -------------------------------------------------------------------------------- 1 | --- 2 | # Variables listed here are applicable to all host groups 3 | 4 | ntpserver1: 10.161.48.206 5 | ntpserver2: 10.52.32.206 6 | -------------------------------------------------------------------------------- /sysadmin/roles/common/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # Handler to handle common notifications. Handlers are called by other plays. 3 | # See http://docs.ansible.com/playbooks_intro.html for more information about handlers. 4 | 5 | - name: restart ntp 6 | service: name=ntpd state=restarted 7 | -------------------------------------------------------------------------------- /sysadmin/roles/common/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # This playbook contains common plays that will be run on all nodes. 3 | 4 | - name: Install ntp 5 | yum: name=ntp state=present 6 | tags: ntp 7 | 8 | - name: Configure ntp file 9 | template: src=ntp.conf.j2 dest=/etc/ntp.conf 10 | tags: ntp 11 | notify: restart ntp 12 | 13 | - name: Start the ntp service 14 | service: name=ntpd state=started enabled=yes 15 | tags: ntp 16 | 17 | - name: Configure hostname 18 | hostname: 19 | name: "{{ inventory_hostname }}" 20 | tags: config 21 | 22 | - name: add user accounts 23 | user: name={{ item.name }} state=present groups={{ item.groups }} 24 | with_items: 25 | - {name: 'ansible', groups: 'wheel' } 26 | - {name: 'matt', groups: 'wheel' } 27 | - {name: 'paul', groups: 'wheel' } 28 | tags: users 29 | -------------------------------------------------------------------------------- /sysadmin/roles/common/templates/ntp.conf.j2: -------------------------------------------------------------------------------- 1 | 2 | driftfile /var/lib/ntp/drift 3 | 4 | restrict 127.0.0.1 5 | restrict -6 ::1 6 | 7 | server {{ ntpserver1 }} 8 | server {{ ntpserver2 }} 9 | 10 | includefile /etc/ntp/crypto/pw 11 | 12 | keys /etc/ntp/keys 13 | -------------------------------------------------------------------------------- /sysadmin/roles/web/tasks/copy_code.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # These tasks are responsible for copying the latest dev/production code from 3 | # the version control system. 4 | 5 | - name: Copy the code from repository 6 | git: repo={{ repository }} dest=/tmp/html 7 | 8 | - name: Creates the index.php file 9 | template: src=index.php.j2 dest=/var/www/html/index.php 10 | -------------------------------------------------------------------------------- /sysadmin/roles/web/tasks/install_httpd.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # These tasks install http and the php modules. 3 | 4 | - name: Install http and php etc 5 | yum: name={{ item }} state=present 6 | with_items: 7 | - httpd 8 | - php 9 | - php-mysql 10 | - git 11 | - libsemanage-python 12 | - libselinux-python 13 | 14 | #- name: insert firewalld rule for httpd 15 | # firewalld: port={{ httpd_port }}/tcp permanent=true state=enabled immediate=yes 16 | 17 | - name: http service state 18 | service: name=httpd state=started enabled=yes 19 | 20 | - name: Configure SELinux to allow httpd to connect to remote database 21 | seboolean: name=httpd_can_network_connect_db state=true persistent=yes 22 | -------------------------------------------------------------------------------- /sysadmin/roles/web/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - include: install_httpd.yml 3 | #- include: copy_code.yml 4 | -------------------------------------------------------------------------------- /sysadmin/roles/web/templates/index.php.j2: -------------------------------------------------------------------------------- 1 | 2 | 3 | Ansible Application 4 | 5 | 6 |
7 | Homepage 8 |
9 | "; 13 | echo "List of Databases:
"; 14 | {% for host in groups['dbservers'] %} 15 | $link = mysqli_connect('{{ hostvars[host].ansible_default_ipv4.address }}', '{{ hostvars[host].dbuser }}', '{{ hostvars[host].upassword }}') or die(mysqli_connect_error($link)); 16 | {% endfor %} 17 | $res = mysqli_query($link, "SHOW DATABASES;"); 18 | while ($row = mysqli_fetch_assoc($res)) { 19 | echo $row['Database'] . "\n"; 20 | } 21 | ?> 22 | 23 | 24 | 25 | -------------------------------------------------------------------------------- /sysadmin/site.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # This playbook deploys the whole application stack in this site. 3 | 4 | - name: Apply common configuration to all nodes 5 | hosts: all 6 | remote_user: vagrant 7 | become: yes 8 | become_method: sudo 9 | 10 | roles: 11 | - common 12 | - { role: web, when: "ansible_os_family != 'RedHat'" } 13 | -------------------------------------------------------------------------------- /telegram.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Test telegram module 3 | hosts: localhost 4 | connection: local 5 | 6 | tasks: 7 | - name: Send test message 8 | command: /usr/bin/say "Is it working?" 9 | #telegram: 10 | # token: "*****" 11 | # chat_id: ***** 12 | # msg: "Ansible playbook calling" 13 | tags: 14 | - text 15 | -------------------------------------------------------------------------------- /vm_orchestration/README.md: -------------------------------------------------------------------------------- 1 | # Examples of VM Orchestration Using Ansible 2 | 3 | ``` 4 | This is designed to be run from Ansible Tower, as it makes use of surveys to get user inputs. 5 | ``` 6 | 7 | Spin up/down VMs across on and off premise environments, including vcenter, amazon. 8 | 9 | #### Common Role 10 | 11 | #### OnPrem Role 12 | 13 | #### OffPrem Role 14 | 15 | -------------------------------------------------------------------------------- /vm_orchestration/offpremise/roles/common/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # Handler to handle common notifications. 3 | 4 | - name: restart ntp 5 | service: name=ntpd state=restarted 6 | 7 | - name: restart web server 8 | service: name=httpd state=restarted 9 | 10 | - name: restart firewall 11 | service: name=firewalld state=restarted 12 | -------------------------------------------------------------------------------- /vm_orchestration/offpremise/roles/common/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # This playbook contains common plays that will be run on all nodes. 3 | 4 | - name: "Common tasks go here..." 5 | shell: echo "This command was run on `/bin/date`" 6 | 7 | -------------------------------------------------------------------------------- /vm_orchestration/offpremise/roles/common/templates/ntp.conf.j2: -------------------------------------------------------------------------------- 1 | 2 | driftfile /var/lib/ntp/drift 3 | 4 | restrict 127.0.0.1 5 | restrict -6 ::1 6 | 7 | server {{ ntpserver }} 8 | 9 | includefile /etc/ntp/crypto/pw 10 | 11 | keys /etc/ntp/keys 12 | 13 | -------------------------------------------------------------------------------- /vm_orchestration/offpremise/roles/common/vars/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # Variables listed here are common across the roles 3 | vm_env: OffPrem 4 | -------------------------------------------------------------------------------- /vm_orchestration/offpremise/roles/offprem/tasks/amazon.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - block: 4 | - name: 'Create Amazon VM Instance(s)' 5 | ec2: 6 | #key_name: "ansible-tower" 7 | #aws_access_key: "{{ AWS_ACCESS_KEY|default(lookup('env', 'AWS_ACCESS_KEY')) }}" 8 | #aws_secret_key: "{{ AWS_SECRET_KEY|default(lookup('env', 'AWS_SECRET_KEY')) }}" 9 | region: eu-west-1 10 | zone: eu-west-1b 11 | instance_type: t2.micro 12 | image: ami-2bc3a858 13 | wait: yes 14 | group: launch-wizard-1 15 | vpc_subnet_id: subnet-0f120178 16 | assign_public_ip: yes 17 | monitoring: no 18 | count: "{{vm_env_instances}}" 19 | state: present 20 | register: newmachines 21 | 22 | - name: Wait for SSH to start 23 | wait_for: 24 | host: "{{ newmachines.instances[0].public_ip }}" 25 | port: 22 26 | timeout: 300 27 | delegate_to: localhost 28 | 29 | - name: Add the machine to in memory inventory 30 | add_host: 31 | hostname: "{{ newmachines.instances[0].public_ip }}" 32 | groups: amazon 33 | 34 | - name: Configure instance hostname 35 | set_fact: hostname_to_use={{ vm_hostname }} 36 | 37 | when: '"create" in vm_env_action' 38 | become: no 39 | ignore_errors: no 40 | 41 | 42 | - block: 43 | - name: 'Destroy Amazon VM instance(s)' 44 | shell: /bin/true 45 | 46 | when: '"destroy" in vm_env_action' 47 | become: no 48 | -------------------------------------------------------------------------------- /vm_orchestration/offpremise/roles/offprem/tasks/bootstrap.yml: -------------------------------------------------------------------------------- 1 | # vim:ft=ansible: 2 | --- 3 | 4 | - block: 5 | 6 | - name: Pull bootstrap.py script 7 | git: repo=https://github.com/Katello/katello-client-bootstrap.git dest=/tmp/bootstrap 8 | tags: 9 | - git 10 | 11 | - name: Run bootstrap.py and attach to our satellite 12 | command: > 13 | /usr/bin/python /tmp/bootstrap/bootstrap.py 14 | -l '{{ SAT6_USERNAME }}' 15 | -p '{{ SAT6_PASSWORD }}' 16 | -a '{{ ACTIVATION_KEY }}' 17 | -o '{{ SAT6_ORG }}' 18 | -g '{{ HOSTGROUP }}' 19 | --location='{{ AWS_LOCATION }}' 20 | --server='{{ SATELLITE_FQDN }}' 21 | tags: 22 | - git 23 | 24 | when: '"create" in vm_env_action' 25 | ignore_errors: no 26 | -------------------------------------------------------------------------------- /vm_orchestration/offpremise/roles/offprem/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # This playbook will configure cloud based hosts 3 | 4 | - name: Amazon Orchestration 5 | include: amazon.yml 6 | when: '"Amazon" in vm_env_provider' 7 | 8 | -------------------------------------------------------------------------------- /vm_orchestration/offpremise/roles/offprem/vars/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # Variables listed here are for Amazon 3 | vm_env_provider: Amazon 4 | -------------------------------------------------------------------------------- /vm_orchestration/offpremise/site.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # This playbook provides VM orchestration on Amazon EC2 3 | 4 | - name: VM Orchestration 5 | hosts: all 6 | connection: local 7 | gather_facts: no 8 | remote_user: root 9 | 10 | pre_tasks: 11 | - name: Log what we are doing 12 | shell: /bin/logger 'Going to {{ vm_env_action }} {{vm_env_instances}} VM(s) for provider {{ vm_env_provider }}' 13 | 14 | roles: 15 | - common 16 | - { role: offprem, when: vm_env == 'OffPrem' } 17 | 18 | post_tasks: 19 | - name: Log what we have done 20 | shell: /bin/logger '{{ vm_env_action }} VM(s) for provider {{ vm_env_provider }} complete' 21 | 22 | -------------------------------------------------------------------------------- /vm_orchestration/onpremise/roles/common/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # Handler to handle common notifications. 3 | 4 | - name: restart ntp 5 | service: name=ntpd state=restarted 6 | 7 | - name: restart web server 8 | service: name=httpd state=restarted 9 | 10 | - name: restart firewall 11 | service: name=firewalld state=restarted 12 | -------------------------------------------------------------------------------- /vm_orchestration/onpremise/roles/common/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # This playbook contains common plays that will be run on all nodes. 3 | 4 | - name: "Common tasks go here..." 5 | shell: echo "This command was run on `/bin/date`" 6 | 7 | -------------------------------------------------------------------------------- /vm_orchestration/onpremise/roles/common/templates/ntp.conf.j2: -------------------------------------------------------------------------------- 1 | 2 | driftfile /var/lib/ntp/drift 3 | 4 | restrict 127.0.0.1 5 | restrict -6 ::1 6 | 7 | server {{ ntpserver }} 8 | 9 | includefile /etc/ntp/crypto/pw 10 | 11 | keys /etc/ntp/keys 12 | 13 | -------------------------------------------------------------------------------- /vm_orchestration/onpremise/roles/common/vars/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # Variables listed here are common across the roles 3 | vm_env: OnPrem 4 | -------------------------------------------------------------------------------- /vm_orchestration/onpremise/roles/onprem/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # This playbook will configure on premise hosts 3 | 4 | - name: VMware Orchestration 5 | include: vcenter.yml 6 | when: '"vmware" in vm_env_provider' 7 | -------------------------------------------------------------------------------- /vm_orchestration/onpremise/roles/onprem/tasks/vcenter.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - block: 4 | - name: Create an ESXi VM Guest From Template 5 | vsphere_guest: 6 | vcenter_hostname: '{{ vcenter_hostname }}' 7 | username: '{{ username }}' 8 | password: '{{ password }}' 9 | guest: '{{ vm_hostname }}' 10 | from_template: yes 11 | template_src: tpl-rhel7-minimal 12 | cluster: Demonstrations 13 | #resource_pool: "/Resources" 14 | #vm_extra_config: 15 | # folder: MyFolder 16 | esxi: 17 | datacenter: EMEA CloudLab 18 | hostname: cloud12-acc.gps.hst.ams2.redhat.com 19 | when: '"create" in vm_env_action' 20 | become: no 21 | ignore_errors: yes 22 | 23 | - block: 24 | - name: Destroy ESXi VM Guest 25 | vsphere_guest: 26 | vcenter_hostname: '{{ vcenter_hostname }}' 27 | username: '{{ username }}' 28 | password: '{{ password }}' 29 | guest: '{{ vm_hostname }}' 30 | state: absent 31 | force: yes 32 | when: '"destroy" in vm_env_action' 33 | become: no 34 | -------------------------------------------------------------------------------- /vm_orchestration/onpremise/roles/onprem/vars/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # Variables listed here are for vmware/vcenter 3 | vm_env_provider: vmware 4 | -------------------------------------------------------------------------------- /vm_orchestration/onpremise/site.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # This playbook provides VM orchestration across VMware vCenter 3 | 4 | - name: VM Orchestration 5 | hosts: all 6 | connection: local 7 | gather_facts: no 8 | 9 | pre_tasks: 10 | - name: Log what we are doing 11 | shell: /bin/logger 'Going to {{ vm_env_action }} VM {{ vm_hostname }} for provider {{ vm_env_provider }}' 12 | 13 | roles: 14 | - common 15 | - { role: onprem, when: vm_env == 'OnPrem' } 16 | 17 | post_tasks: 18 | - name: Log what we have done 19 | shell: /bin/logger '{{ vm_env_action }} VM {{ vm_hostname }} for provider {{ vm_env_provider }} complete' 20 | -------------------------------------------------------------------------------- /windows/README.md: -------------------------------------------------------------------------------- 1 | # Ansible Windows Playbooks 2 | 3 | Some example playbooks which can be used with ansible core or via tower for Windows hosts. 4 | 5 | ### Setup 6 | 7 | I've used a Windows 2012 Server host in these examples. 8 | 9 | #### Facts 10 | windows_facts.eg contains all the facts as discovered by the gather_facts module for a typical Windows host. 11 | 12 | #### Inventory hosts file 13 | 14 | I use a local hosts file for the Windows hosts and setting generic connection variables to use WinRM. 15 | 16 | #### enable_iis.yml 17 | ``` 18 | ansible-playbook -i hosts enable_iis.yml 19 | ``` 20 | 21 | Install IIS and some sub features. 22 | By default, a Windows 2012 host comes with such software installed. This configures it for use. 23 | 24 | Shows: win_feature module 25 | 26 | #### deploy_web_site.yml 27 | ``` 28 | ansible-playbook -i hosts deploy_web_site.yml 29 | ``` 30 | 31 | After setting up IIS, use this playbook to deploy a new simple web site under wwwroot. 32 | 33 | Shows: win_get_url module 34 | 35 | #### getmem.yml 36 | ``` 37 | ansible-playbook -i hosts getmem.yml 38 | ``` 39 | 40 | Rather silly example, as this is discovered by facts, but shows how to run a Powershell script. 41 | To see how much more work it is to code natively in Python see winrm_mem.py :) 42 | 43 | Shows: script module, verbose 'debug' output 44 | 45 | #### various.yml 46 | ``` 47 | ansible-playbook -i hosts various.yml 48 | ``` 49 | 50 | Use the raw module to run a Windows command (can be used when there's no core/extra module to do something better) and show the output 51 | Various file checks based on assumptions, add a user account. 52 | 53 | Shows: raw, debug, win_stat, assert, win_user modules 54 | 55 | #### win-extras.yml 56 | ``` 57 | ansible-playbook -i hosts win-extras.yml 58 | ``` 59 | 60 | Installs various applications using win_chocolately and a for loop. 61 | 62 | Shows: win_chocolately module, for loop. 63 | 64 | #### sysinternals.yml 65 | ``` 66 | ansible-playbook -i hosts sysinternals.yml 67 | ``` 68 | 69 | Downloads sysinternals.zip to Administrators Downloads folder. 70 | 71 | Shows: win_get_url module, debug output. 72 | 73 | #### openssh.yml 74 | ``` 75 | ansible-playbook -i hosts openssh.yml 76 | ``` 77 | 78 | Attempts to download, install and configure openSSH Server port for Windows. 79 | Almost works apart from service starting at the end of the playbook. Failure seems to be to do with SSH key placements :( 80 | 81 | Shows: win_copy, get_url, win_file, win_copy, win_unzip, raw, win_firewall_rule, win_chocolatey, win_nssm modules 82 | Local playbook variables, delegation to localhost (get_url) to save downloading to each and every Windows host. 83 | 84 | -------------------------------------------------------------------------------- /windows/check_psversion.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # This playbook uses various Windows modules to test their functionality 3 | 4 | - name: Use raw module to get config details 5 | hosts: all 6 | tasks: 7 | - name: check Powershell version 8 | raw: "PowerShell -NoProfile -NonInteractive -ExecutionPolicy Unrestricted -Command write-host $PSVersionTable.PSVersion" 9 | register: psversion 10 | - debug: var=psversion.stdout_lines 11 | -------------------------------------------------------------------------------- /windows/check_quicktime.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Apple Quicktime Zero Day Vulnerabilities 3 | hosts: all 4 | tasks: 5 | - name: Check for Apple Quicktime 6 | script: scripts/get_sw.ps1 7 | register: software 8 | #- debug: var=software 9 | 10 | - name: Installed software 11 | raw: echo "{{ item }}" 12 | with_items: '{{ software.stdout_lines }}' 13 | 14 | - name: Remove Apple Quicktime Software 15 | win_chocolatey: 16 | name: quicktime 17 | state: absent 18 | #raw: MsiExec.exe /I{111EE7DF-FC45-40C7-98A7-753AC46B12FB}. QuickTimePlayer.exe 19 | #win_package: 20 | # name="Qucktime" 21 | # product_id="Quicktime 7" 22 | # path="c:\Users\Administrator\Downloads\QuickTimeInstaller.exe" 23 | # state="absent" 24 | when: "'QuickTime' in software.stdout_lines" 25 | -------------------------------------------------------------------------------- /windows/copssh.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Extra Copssh on Windows Server 3 | hosts: windows 4 | gather_facts: true 5 | remote_user: vagrant 6 | vars: 7 | tmp_dir: 'C:\Temp' 8 | zip_file: 'Copssh_5.4.2_x86_Free_Installer.zip' 9 | 10 | tasks: 11 | 12 | - name: Create {{ tmp_dir }} on client 13 | win_file: path='{{ tmp_dir }}' state=directory 14 | 15 | - name: Download latest build 16 | win_get_url: 17 | url: 'https://www.itefix.net/dl/{{ zip_file }}' 18 | dest: '{{ tmp_dir }}\{{ zip_file }}' 19 | force: yes 20 | skip_certificate_validation: 21 | 22 | - name: Extract contents 23 | win_unzip: 24 | src: '{{ tmp_dir }}\{{ zip_file }}' 25 | dest: '{{ tmp_dir }}\CopSSH' 26 | 27 | - name: Install CopSSH 28 | win_msi: path='{{ tmp_dir }}\CopSSH\Copssh_5.4.2_x86_Free_Installer.exe /u=vagrant /p=vagrant /S' 29 | #raw: cmd /c '{{ tmp_dir }}\CopSSH\Copssh_5.4.2_x86_Free_Installer.exe /u=vagrant /p=vagrant /S' 30 | register: install 31 | - debug: var=install 32 | -------------------------------------------------------------------------------- /windows/customise-chrome.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: '{{ target | default("os_windows") }}' 3 | gather_facts: no 4 | 5 | tasks: 6 | 7 | - name: install Chocolatey 8 | win_chocolatey: 9 | name: chocolatey 10 | state: present 11 | 12 | - name: disable enhanced exit codes 13 | win_chocolatey_feature: 14 | name: useEnhancedExitCodes 15 | state: disabled 16 | when: ansible_version.full is version_compare('2.8', '<') 17 | 18 | - name: Install Chrome 19 | win_chocolatey: 20 | name: "{{ item }}" 21 | ignore_checksums: true 22 | with_items: 23 | - googlechrome 24 | 25 | - name: Set Chrome to default Browser 26 | win_regedit: 27 | path: HKCU:\Software\Microsoft\Windows\Shell\Associations\UrlAssociations\{{ item }}\UserChoice 28 | name: ProgId 29 | data: ChromeHTML 30 | with_items: 31 | - http 32 | - https 33 | 34 | - name: Set Chrome to not ask about default browser 35 | win_regedit: 36 | path: HKLM:\Software\Policies\Google\Chrome 37 | name: DefaultBrowserSettingEnabled 38 | data: 0 39 | type: dword 40 | 41 | - name: Disable Chrome Welcome Screen 42 | win_regedit: 43 | path: HKLM:\Software\Policies\Google\Chrome 44 | name: "{{ item }}" 45 | data: 0 46 | type: dword 47 | with_items: 48 | - PromotionalTabsEnabled 49 | - WelcomePageOnOSUpgradeEnabled 50 | 51 | - name: Disable Chrome Syncing 52 | win_regedit: 53 | path: HKLM:\Software\Policies\Google\Chrome 54 | name: SyncDisabled 55 | data: 1 56 | type: dword 57 | -------------------------------------------------------------------------------- /windows/deploy_iis_code.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # This playbook does basic IIS stuff on Windows hosts 3 | 4 | - name: IIS Web Code Deployment 5 | hosts: windows 6 | gather_facts: false 7 | 8 | tasks: 9 | - name: Install IIS 10 | win_feature: 11 | name: "Web-Server" 12 | state: present 13 | restart: yes 14 | 15 | - name: Start IIS service 16 | win_service: 17 | name: W3Svc 18 | state: started 19 | 20 | - name: Configure Client DNS 21 | win_dns_client: 22 | adapter_names: Ethernet 23 | ipv4_addresses: 24 | - 10.0.1.4 25 | - 8.8.8.8 26 | 27 | - name: Ensure git Installed 28 | win_chocolatey: 29 | name: git 30 | state: present 31 | 32 | - name: Deploy Web Code 33 | win_git: 34 | repo: 'https://gitlab.com/ffirg/AlienInvasion.git' 35 | dest: 'C:\inetpub\wwwroot' 36 | branch: devel 37 | replace_dest: yes 38 | recursive: yes 39 | update: yes 40 | clone: yes 41 | accept_hostkey: yes 42 | failed_when: false -------------------------------------------------------------------------------- /windows/deploy_web_site.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # This playbook uses the win_get_url module to download a simple HTML file for IIS 3 | 4 | - name: Download simple web site 5 | hosts: all 6 | gather_facts: false 7 | tasks: 8 | - name: Download simple web site to 'C:\inetpub\wwwroot\ansible.html' 9 | win_get_url: 10 | url: 'https://raw.githubusercontent.com/thisdavejohnson/mywebapp/master/index.html' 11 | dest: 'C:\inetpub\wwwroot\ansible.html' 12 | -------------------------------------------------------------------------------- /windows/disable_sso_account.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Disable Users SSO 3 | hosts: 127.0.0.1 4 | gather_facts: false 5 | ignore_errors: true 6 | 7 | tasks: 8 | 9 | - name: Disable Octa SSO 10 | # use uri to call a pretend octa SSO API 11 | uri: 12 | url: https://jsonplaceholder.typicode.com/posts 13 | method: POST 14 | #body: "{ 'user': 'disable' }" 15 | -------------------------------------------------------------------------------- /windows/event_logging.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: '{{ target | default("all") }}' 3 | gather_facts: no 4 | 5 | tasks: 6 | 7 | - name: Add a new event log for automation events 8 | win_eventlog: 9 | name: Automation 10 | sources: 11 | - ansible-actions 12 | state: present 13 | 14 | - name: Write an entry to the automation event log 15 | win_eventlog_entry: 16 | log: Automation 17 | source: ansible-actions 18 | event_id: 1 19 | message: "{{ tower_user_name}} just added this using {{ tower_job_template_name }}" -------------------------------------------------------------------------------- /windows/getmem.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Get Server Memory 3 | hosts: all 4 | tasks: 5 | - name: run a powershell script 6 | script: scripts/mem.ps1 7 | register: out 8 | - debug: var=out 9 | -------------------------------------------------------------------------------- /windows/hosts: -------------------------------------------------------------------------------- 1 | [windows] 2 | 192.168.133.104 3 | 4 | [windows:vars] 5 | ansible_user=vagrant 6 | ansible_ssh_pass=vagrant 7 | ansible_port=5986 8 | ansible_connection=winrm 9 | # The following is necessary for Python 2.7.9+ when using default WinRM self-signed certificates: 10 | ansible_winrm_server_cert_validation=ignore 11 | -------------------------------------------------------------------------------- /windows/inform-bpm.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Inform BPM of updates to process flow 3 | # we run this on the Tower server, as win_uri doesn't have auth yet 4 | hosts: localhost 5 | # we don't need any host facts, so disable to make run faster 6 | gather_facts: false 7 | 8 | tasks: 9 | 10 | - name: Callback to BPM using pid 11 | uri: 12 | method: POST 13 | user: ***** 14 | password: ***** 15 | url: "http://ec2-52-212-76-182.eu-west-1.compute.amazonaws.com:8080/business-central/rest/runtime/com.gatwick.demo:gatwickdemoproject:1.0/process/instance/12/signal?signal={stopRef}" 16 | -------------------------------------------------------------------------------- /windows/ipconfig.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: test raw module 3 | hosts: windows 4 | remote_user: vagrant 5 | 6 | tasks: 7 | - name: run ipconfig 8 | raw: ipconfig 9 | register: ipconfig 10 | - debug: var=ipconfig 11 | -------------------------------------------------------------------------------- /windows/openssh-win32.txt: -------------------------------------------------------------------------------- 1 | Install openSSH win32 server 2 | 3 | Download https://github.com/PowerShell/Win32-OpenSSH/releases/download/3_19_2016/OpenSSH-Win64.zip 4 | 5 | - or better - Powershell - copy to template and copy to dest? 6 | $url = 'https://github.com/PowerShell/Win32-OpenSSH/releases/latest/' 7 | $request = [System.Net.WebRequest]::Create($url) 8 | $request.AllowAutoRedirect=$false 9 | $response=$request.GetResponse() 10 | $([String]$response.GetResponseHeader("Location")).Replace('tag','download') + '/OpenSSH-Win64.zip' 11 | $([String]$response.GetResponseHeader("Location")).Replace('tag','download') + '/OpenSSH-Win32.zip' 12 | 13 | Extract contents to C:\Program Files\OpenSSH-Win32 - use expand??? 14 | 15 | Start Powershell as Administrator 16 | cd 'C:\Program Files\OpenSSH-Win32' 17 | 18 | Setup SSH host keys (this will generate all the 'host' keys that sshd expects when its starts) 19 | .\ssh-keygen.exe -A 20 | 21 | Open Firewall - if server - check ansible_fact? 22 | New-NetFirewallRule -Protocol TCP -LocalPort 22 -Direction Inbound -Action Allow -DisplayName SSH 23 | If you're on a workstation try: 24 | netsh advfirewall firewall add rule name='SSH Port' dir=in action=allow protocol=TCP localport=22 25 | 26 | If you need key-based authentication, run the following to setup the key-auth package 27 | powershell.exe .\install-sshlsa.ps1 28 | 29 | Restart-Computer 30 | 31 | Install and run daemon as NT Service running as Local System 32 | .\sshd.exe install 33 | 34 | Start-Service sshd 35 | 36 | Make the service start on boot (PowerShell): Set-Service sshd -StartupType Automatic 37 | -------------------------------------------------------------------------------- /windows/password-management.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Windows Password Management 3 | hosts: skylight-dc 4 | gather_facts: no 5 | 6 | tasks: 7 | - include_role: 8 | name: win-domain-pw-check 9 | tags: check -------------------------------------------------------------------------------- /windows/ping.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Ping 3 | hosts: all 4 | gather_facts: false 5 | 6 | tasks: 7 | - name: Check we can contact the server... 8 | win_ping: 9 | -------------------------------------------------------------------------------- /windows/remove_software.yml: -------------------------------------------------------------------------------- 1 | - hosts: all 2 | gather_facts: no 3 | 4 | tasks: 5 | 6 | - name: Remove unauthorised software 7 | win_regedit: 8 | path: '{{ item.path }}' 9 | name: '{{ item.name }}' 10 | data: '{{ item.data|default(None) }}' 11 | type: '{{ item.type|default("string") }}' 12 | state: '{{ item.state|default("present") }}' 13 | 14 | with_items: 15 | 16 | # Remove OneDrive from Run hook for new users 17 | - path: HKU:\Default\SOFTWARE\Microsoft\Windows\CurrentVersion\Run 18 | name: OneDrive 19 | state: absent 20 | 21 | # Remove OneDrive from Run hook for current user 22 | - path: HKLM:\Software\Microsoft\Windows\CurrentVersion\Run 23 | name: OneDrive 24 | state: absent 25 | 26 | # Remove OneDrive from Explorer Name Space Tree (32bit) 27 | - path: HKCR:\CLSID\{018D5C66-4533-4307-9B53-224DE2ED1FE6} 28 | name: System.IsPinnedToNameSpaceTree 29 | data: 0 30 | type: dword 31 | 32 | # Remove OneDrive from Explorer Name Space Tree (64bit) 33 | - path: HKCR:\Wow6432Node\CLSID\{018D5C66-4533-4307-9B53-224DE2ED1FE6} 34 | name: System.IsPinnedToNameSpaceTree 35 | data: 0 36 | type: dword 37 | 38 | - path: HKLM:\Software\Policies\Microsoft\Windows\OneDrive 39 | name: DisableFileSyncNGSC 40 | data: 1 41 | type: dword 42 | 43 | - path: HKLM:\Software\Wow6432Node\Policies\Microsoft\Windows\OneDrive 44 | name: DisableFileSyncNGSC 45 | data: 1 46 | type: dword 47 | tags: 48 | - onedrive 49 | 50 | - name: Remove OneDrive 51 | win_shell: | 52 | taskkill.exe /F /IM OneDrive.exe 53 | register: taskkill 54 | changed_when: taskkill.rc == 0 and 'The process "OneDrive.exe" not found.' not in taskkill.stderr 55 | failed_when: taskkill.rc != 0 and 'The process "OneDrive.exe" not found.' not in taskkill.stderr 56 | ignore_errors: yes 57 | tags: 58 | - onedrive 59 | 60 | - name: Remove OneDrive leftovers 61 | win_file: 62 | path: '{{ item }}' 63 | state: absent 64 | with_items: 65 | - '%LOCALAPPDATA%\Microsoft\OneDrive' 66 | - '%PROGRAMDATA%\Microsoft OneDrive' 67 | - '%USERPROFILE%\OneDrive' 68 | - '%USERPROFILE%\AppData\Roaming\Microsoft\Windows\Start Menu\Programs\OneDrive.lnk' 69 | - C:\OneDriveTemp 70 | -------------------------------------------------------------------------------- /windows/roles/win-ad-config/defaults/main.yml: -------------------------------------------------------------------------------- 1 | active_directory_ensure: present 2 | 3 | ### USERS 4 | active_directory_domain: 'ansibleskylight.com' 5 | active_directory_email_domain: 'ansibleskylight.com' 6 | active_directory_users: {} 7 | active_directory_user_base_ou: 'CN=Users,DC=ansibleskylight,DC=com' 8 | active_directory_update_password: 'on_create' 9 | active_directory_group_action: 'add' 10 | active_directory_password_never_expires_enabled: false 11 | active_directory_user_cannot_change_password_enabled: false 12 | active_directory_account_locked_enabled: false 13 | active_directory_user_default_password: '' 14 | 15 | ### GROUPS 16 | active_directory_groups: {} 17 | active_directory_group_base_ou: 'OU=Groups,DC=ansibleskylight,DC=com' 18 | active_directory_group_scope: 'global' 19 | -------------------------------------------------------------------------------- /windows/roles/win-ad-config/tasks/configure_groups.yml: -------------------------------------------------------------------------------- 1 | - name: configure active_directory groups 2 | win_domain_group: 3 | state: "{{ item.value.ensure | default(active_directory_ensure) }}" 4 | name: "{{ item.key }}" 5 | description: "{{ item.value.description }}" 6 | # path: "{{ item.value.path + ',' + active_directory_group_base_ou }}" 7 | scope: "{{ item.value.scope | default(active_directory_group_scope) }}" 8 | attributes: 9 | mail: "{{ item.value.mail | default(item.key | lower + '@' + active_directory_email_domain) }}" 10 | with_dict: "{{ active_directory_groups | default({}) }}" 11 | -------------------------------------------------------------------------------- /windows/roles/win-ad-config/tasks/configure_users.yml: -------------------------------------------------------------------------------- 1 | - name: configure active_directory users 2 | win_domain_user: 3 | state: "{{ item.ensure | default(active_directory_ensure) }}" 4 | name: "{{ item.name }}" 5 | firstname: "{{ item.first_name }}" 6 | surname: "{{ item.last_name }}" 7 | password: "{{ item.password | default(active_directory_user_default_password) }}" 8 | groups: "{{ item.groups }}" 9 | email: "{{ item.email | default(item.first_name | lower + '.' + item.last_name | lower + '@' + active_directory_email_domain) }}" 10 | loop: "{{ active_directory_users }}" 11 | -------------------------------------------------------------------------------- /windows/roles/win-ad-config/tasks/main.yml: -------------------------------------------------------------------------------- 1 | - include_vars: groups.yml 2 | - import_tasks: configure_groups.yml 3 | - include_vars: users.yml 4 | - import_tasks: configure_users.yml 5 | -------------------------------------------------------------------------------- /windows/roles/win-ad-config/vars/groups.yml: -------------------------------------------------------------------------------- 1 | custom_group_all: 2 | - 'Tower_Users' 3 | 4 | active_directory_groups: 5 | Operations: 6 | description: 'Operations Team' 7 | path: 'CN=Operations' 8 | Development: 9 | description: 'Development Team' 10 | path: 'CN=Development' 11 | Applications: 12 | description: 'Application Team' 13 | path: 'CN=Applications' 14 | 15 | active_directory_default_groups: 16 | Operations: 17 | - Operations 18 | Development: 19 | - Development 20 | Applications: 21 | - Applications 22 | -------------------------------------------------------------------------------- /windows/roles/win-ad-config/vars/users.yml: -------------------------------------------------------------------------------- 1 | active_directory_users: 2 | - name: alan 3 | first_name: 'Alan' 4 | last_name: 'Ops' 5 | password: '' 6 | groups: 7 | - 'Operations' 8 | - name: dave 9 | first_name: 'Dave' 10 | last_name: 'Developer' 11 | password: '' 12 | groups: 13 | - 'Development' 14 | - name: alison 15 | first_name: 'Alison' 16 | last_name: 'Applications' 17 | password: '' 18 | groups: 19 | - 'Applications' 20 | -------------------------------------------------------------------------------- /windows/roles/win-domain-pw-check/tasks/main.yml: -------------------------------------------------------------------------------- 1 | - name: query domain for user account 2 | win_domain_user: 3 | name: "{{ username }}" 4 | state: query 5 | register: domainuserquery 6 | tags: check 7 | 8 | - name: check domain membership 9 | assert: 10 | that: 11 | - "'present' in domainuserquery.state" 12 | success_msg: "{{ username }} is a DOMAIN account" 13 | fail_msg: "{{ username }} is not a DOMAIN member" 14 | quiet: true 15 | ignore_errors: true 16 | tags: check 17 | 18 | - name: Domain user account changes 19 | include_role: 20 | name: win-domain-user 21 | when: domainuserquery.state == "present" 22 | tags: domain 23 | 24 | - name: Local user account changes 25 | include_role: 26 | name: win-local-user 27 | when: domainuserquery.state == "absent" 28 | tags: local -------------------------------------------------------------------------------- /windows/roles/win-domain-user/tasks/main.yml: -------------------------------------------------------------------------------- 1 | - name: change user password 2 | win_domain_user: 3 | name: "{{ username }}" 4 | state: present 5 | password: "{{ password }}" -------------------------------------------------------------------------------- /windows/roles/win-local-user/tasks/main.yml: -------------------------------------------------------------------------------- 1 | - name: check for LOCAL user account 2 | win_user: 3 | name: "{{ username }}" 4 | state: query 5 | register: localuserquery 6 | 7 | - name: check local membership 8 | assert: 9 | that: 10 | - "'present' in localuserquery.state" 11 | success_msg: "{{ username }} has a LOCAL account" 12 | fail_msg: "{{ username }} does not have a LOCAL account" 13 | quiet: true 14 | 15 | - name: change user password 16 | win_user: 17 | name: "{{ username }}" 18 | state: present 19 | password: "{{ password }}" -------------------------------------------------------------------------------- /windows/scripts/get_sw.ps1: -------------------------------------------------------------------------------- 1 | $strComputer = $Host 2 | Clear 3 | #Get-ItemProperty HKLM:\Software\Wow6432Node\Microsoft\Windows\CurrentVersion\Uninstall\* | Select-Object DisplayName | Format-List 4 | @(Get-ItemProperty HKLM:\Software\Microsoft\Windows\CurrentVersion\Uninstall\*)+@(Get-ItemProperty HKLM:\Software\Wow6432Node\Microsoft\Windows\CurrentVersion\Uninstall\*) | foreach { $_.DisplayName } | Get-Unique 5 | -------------------------------------------------------------------------------- /windows/scripts/mem.ps1: -------------------------------------------------------------------------------- 1 | $strComputer = $Host 2 | Clear 3 | $RAM = WmiObject Win32_ComputerSystem 4 | $MB = 1048576 5 | "Installed Memory: " + [int]($RAM.TotalPhysicalMemory /$MB) + " MB" 6 | -------------------------------------------------------------------------------- /windows/setup_iis.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # This playbook uses the win_get_url module to download a simple HTML file for IIS 3 | 4 | - name: Demo Windows Web Services Creation 5 | hosts: all 6 | gather_facts: false 7 | 8 | - name: Install/enable IIS 9 | import_playbook: enable_iis.yml 10 | 11 | - name: Create web site content 12 | import_playbook: deploy_web_site.yml 13 | -------------------------------------------------------------------------------- /windows/sysinternals.yml: -------------------------------------------------------------------------------- 1 | - name: Deploy Admin Tools 2 | hosts: all 3 | gather_facts: false 4 | 5 | tasks: 6 | - name: Download Sysinternals Utilities 7 | win_get_url: 8 | dest="c:\Users\Administrator\Downloads\SysinternalsSuite.zip" 9 | url="https://download.sysinternals.com/files/SysinternalsSuite.zip" 10 | register: out 11 | - debug: var=out 12 | -------------------------------------------------------------------------------- /windows/templates/delete_ad_account.j2: -------------------------------------------------------------------------------- 1 | $Firstname = '{{ firstname }}' 2 | $Surname = '{{ surname }}' 3 | Get-ADUser -filter 'GivenName -eq $Firstname -and Surname -eq $Surname' | Remove-ADUser -confirm:$false 4 | Get-ADUser -filter 'GivenName -eq $Firstname -and Surname -eq $Surname' 5 | -------------------------------------------------------------------------------- /windows/templates/disable_ad_account.j2: -------------------------------------------------------------------------------- 1 | $Firstname = '{{ firstname }}' 2 | $Surname = '{{ surname }}' 3 | Get-ADUser -filter 'GivenName -eq $Firstname -and Surname -eq $Surname' | Disable-ADAccount 4 | Get-ADUser -filter 'GivenName -eq $Firstname -and Surname -eq $Surname' | findstr Enabled 5 | -------------------------------------------------------------------------------- /windows/templates/download-openssh.j2: -------------------------------------------------------------------------------- 1 | $url = 'https://github.com/PowerShell/Win32-OpenSSH/releases/latest/' 2 | $request = [System.Net.WebRequest]::Create($url) 3 | $request.AllowAutoRedirect=$false 4 | $response=$request.GetResponse() 5 | $([String]$response.GetResponseHeader("Location")).Replace('tag','download') + '/OpenSSH-Win64.zip' 6 | $([String]$response.GetResponseHeader("Location")).Replace('tag','download') + '/OpenSSH-Win32.zip' 7 | -------------------------------------------------------------------------------- /windows/various.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # This playbook uses various Windows modules to test their functionality 3 | 4 | - name: test raw module 5 | hosts: all 6 | tasks: 7 | - name: run ipconfig 8 | raw: ipconfig 9 | register: ipconfig 10 | - debug: var=ipconfig 11 | 12 | - name: test stat module 13 | hosts: all 14 | tasks: 15 | - name: test stat module on file 16 | win_stat: path="C:/Windows/win.ini" 17 | register: stat_file 18 | 19 | - debug: var=stat_file 20 | 21 | - name: check stat_file result 22 | assert: 23 | that: 24 | - "stat_file.stat.exists" 25 | - "not stat_file.stat.isdir" 26 | - "stat_file.stat.size > 0" 27 | - "stat_file.stat.md5" 28 | 29 | - name: Add a user 30 | hosts: all 31 | gather_facts: false 32 | tasks: 33 | - name: Add User 34 | win_user: 35 | name: ansible 36 | password: "@ns1bl3" 37 | -------------------------------------------------------------------------------- /windows/win-ad-config.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # This playbook is used to setup a dummy AD schema for demos etc 3 | # Designed to be run from Tower (as that has the skylight inventory required already) 4 | 5 | - name: Populate Active Directory with dummy demo data 6 | hosts: skylight-dc 7 | gather_facts: no 8 | 9 | roles: 10 | - { role: 'win-ad-config', tags: ["groups", "users"] } 11 | -------------------------------------------------------------------------------- /windows/win-auth-tasks.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Steps To Disable Users AD Account 3 | # we run this on the AD server, to save messing with permissions 4 | hosts: tag_Name_active_directory_server 5 | # we don't need any host facts, so disable to make run faster 6 | gather_facts: false 7 | 8 | vars: 9 | # where we'll put, and call, the disablement script 10 | tmp_dir: 'C:\Temp' 11 | script: 'disable_ad_account' 12 | 13 | tasks: 14 | 15 | - name: Create {{ tmp_dir }} on server if required 16 | win_file: path='{{ tmp_dir }}' state=directory 17 | 18 | - name: Create powershell disablement script 19 | win_template: 20 | src: 'templates/{{script}}.j2' 21 | dest: '{{tmp_dir}}/{{script}}.ps1' 22 | 23 | - name: Disable Users AD Account 24 | # until V2.2, we'll need to use the raw module. 2.2 has win_command! 25 | raw: '{{tmp_dir}}/{{script}}.ps1' 26 | -------------------------------------------------------------------------------- /windows/win-domain-add-user.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Windows AD Tasks 3 | hosts: skylight-dc 4 | gather_facts: no 5 | 6 | tasks: 7 | - name: Add AD user account 8 | win_domain_user: 9 | name: pgriffit 10 | firstname: Phil 11 | surname: Griffiths 12 | company: Red Hat 13 | password: "{{ password }}" 14 | groups: 15 | - Domain Admins 16 | -------------------------------------------------------------------------------- /windows/win-extras.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Extra Windows Server Stuff 3 | hosts: all 4 | gather_facts: true 5 | remote_user: vagrant 6 | vars: 7 | 8 | tasks: 9 | - name: Install Applications 10 | win_chocolatey: 11 | name={{ item }} 12 | with_items: 13 | - nssm 14 | - git 15 | - notepadplusplus.install 16 | -------------------------------------------------------------------------------- /windows/win_updates.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: '{{ target | default("os_windows") }}' 3 | gather_facts: no 4 | 5 | tasks: 6 | 7 | - name: Apply Windows updates 8 | win_updates: 9 | category_names: 10 | - "{{ categories }}" -------------------------------------------------------------------------------- /windows/winrm_ipconfig.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | import winrm 4 | 5 | s = winrm.Session('192.168.133.104', auth=('vagrant', 'vagrant')) 6 | r = s.run_cmd('ipconfig', ['/all']) 7 | print r.status_code 8 | print r.std_out 9 | print r.std_err 10 | -------------------------------------------------------------------------------- /windows/winrm_mem.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | import winrm 4 | 5 | ps_script = open('scripts/mem.ps1','r').read() 6 | s = winrm.Session('192.168.133.104', auth=('vagrant', 'vagrant')) 7 | r = s.run_ps(ps_script) 8 | print r.status_code 9 | print r.std_out 10 | print r.std_err 11 | --------------------------------------------------------------------------------