├── control-remediation ├── files │ ├── syslog │ ├── sudoers │ └── chrony.conf ├── apply-updates.yml ├── vars │ └── ec2-vars ├── aws-launch.yml ├── apply-configuration.yml └── apply-configuration-and-abort.yml ├── badlock ├── windows-security.yml ├── samba-patch-generic.yml └── samba-patch-el.yml ├── self-service ├── delete-instances.survey ├── upload-key.yml ├── upload-key.survey ├── delete-instances.yml ├── launch-instances.yml └── launch-instances.survey ├── README.md ├── control-inventory ├── simple-script.py └── complex-script.py └── drown ├── openssl-patch-generic.yml └── openssl-patch-el.yml /control-remediation/files/syslog: -------------------------------------------------------------------------------- 1 | *.* @@127.0.0.42:514 2 | -------------------------------------------------------------------------------- /control-remediation/apply-updates.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: all 3 | gather_facts: true 4 | become_method: sudo 5 | become_user: root 6 | tasks: 7 | - name: Apply any available updates 8 | yum: 9 | name: "*" 10 | state: latest 11 | update_cache: yes 12 | 13 | -------------------------------------------------------------------------------- /control-remediation/vars/ec2-vars: -------------------------------------------------------------------------------- 1 | --- 2 | ec2_access_key: 3 | ec2_secret_key: 4 | ec2_region: us-east-1 5 | ec2_zone: 6 | ec2_image: ami-61bbf104 7 | ec2_instance_type: m3.medium 8 | ec2_keypair: your-keypair-here 9 | ec2_security_group: default 10 | ec2_instance_count: 1 11 | demo_tag_name: you-are-it 12 | ec2_hosts: all 13 | wait_for_port: 22 14 | -------------------------------------------------------------------------------- /badlock/windows-security.yml: -------------------------------------------------------------------------------- 1 | - hosts: all 2 | gather_facts: true 3 | 4 | pre_tasks: 5 | - name: apply security updates 6 | win_updates: 7 | category_names: [u'SecurityUpdates',u'CriticalUpdates'] 8 | state: installed 9 | register: updated 10 | 11 | roles: 12 | - { role: trondhindenes.win_reboot, when: "updated.reboot_required" } 13 | -------------------------------------------------------------------------------- /self-service/delete-instances.survey: -------------------------------------------------------------------------------- 1 | { 2 | "description": "", 3 | "name": "", 4 | "spec": [ 5 | { 6 | "question_description": "", 7 | "min": 0, 8 | "default": "", 9 | "max": 32, 10 | "required": true, 11 | "choices": "", 12 | "variable": "env_tag_name", 13 | "question_name": "What environment should we deprovision?", 14 | "type": "text" 15 | } 16 | ] 17 | } 18 | -------------------------------------------------------------------------------- /self-service/upload-key.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Upload new ssh key 3 | connection: local 4 | hosts: localhost 5 | gather_facts: false 6 | 7 | tasks: 8 | - name: Verify key 9 | shell: ssh-keygen -l -f /dev/stdin <<< '{{ key_data }}' 10 | 11 | - name: Upload user-provided key 12 | ec2_key: 13 | name: "{{ tower_user_name }}" 14 | key_material: "{{ key_data }}" 15 | region: us-east-1 16 | state: present 17 | 18 | -------------------------------------------------------------------------------- /self-service/upload-key.survey: -------------------------------------------------------------------------------- 1 | { 2 | "description": "", 3 | "name": "", 4 | "spec": [ 5 | { 6 | "question_description": "", 7 | "min": 0, 8 | "default": "", 9 | "max": 2048, 10 | "required": true, 11 | "choices": "", 12 | "new_question": true, 13 | "variable": "key_data", 14 | "question_name": "Please paste the public key to upload to Amazon here.", 15 | "type": "textarea" 16 | } 17 | ] 18 | } 19 | -------------------------------------------------------------------------------- /self-service/delete-instances.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: terminate hosts 3 | hosts: "{{ ec2_hosts }}" 4 | gather_facts: false 5 | connection: local 6 | 7 | vars: 8 | ec2_hosts: tag_Name_{{ tower_user_name }}_{{ env_tag_name }} 9 | 10 | tasks: 11 | - name: terminate EC2 hosts 12 | ec2: 13 | region: us-east-1 14 | instance_ids: "{{ item }}" 15 | state: absent 16 | wait: True 17 | with_items: 18 | - "{{ hostvars[inventory_hostname]['ec2_id'] }}" 19 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | Ansible Blog Examples 2 | ===================== 3 | 4 | Some example playbooks used on https://www.ansible.com/blog. 5 | 6 | drown/ - Playbooks from https://www.ansible.com/blog/drown-openssl-vulnerability 7 | 8 | badlock/ - Playbooks from https://www.ansible.com/blog/badlock-vulnerability 9 | 10 | control-remediation/ - Playbooks from https://www.ansible.com/blog/control-with-ansible-tower-part-1 11 | 12 | control-inventory/ - Inventory examples from https://www.ansible.com/blog/control-with-ansible-tower-part-2 13 | 14 | self-service/ - Playbooks and surveys from https://www.ansible.com/blog/simple-self-service-with-ansible-and-tower 15 | -------------------------------------------------------------------------------- /control-inventory/simple-script.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python 2 | import sys 3 | 4 | import json 5 | 6 | atl = { 7 | 'hosts': [ 'host1', 'host2'] 8 | } 9 | rdu = { 10 | 'hosts': [ 'host2', 'host3'] 11 | } 12 | se = { 13 | "children": [ 'atlanta', 'raleigh' ], 14 | "vars": { 15 | "nameserver": "dns.southeast.example.com", 16 | "halon_system_timeout": 30, 17 | "self_destruct_countdown": 60, 18 | "escape_pods": 2 19 | } 20 | } 21 | 22 | usa = { 23 | "children": [ 'southeast' ] 24 | } 25 | 26 | inv = { 'atlanta': atl, 'raleigh': rdu, 'southeast': se, 'usa': usa, '_meta' : { 'hostvars': {} } } 27 | 28 | if len(sys.argv) > 1 and sys.argv[1] == '--list': 29 | print json.dumps(inv) 30 | -------------------------------------------------------------------------------- /drown/openssl-patch-generic.yml: -------------------------------------------------------------------------------- 1 | - hosts: all 2 | gather_facts: true 3 | become_method: sudo 4 | become_user: root 5 | tasks: 6 | - name: update openssl from apt if available 7 | apt: 8 | name: openssl 9 | state: latest 10 | update_cache: yes 11 | when: ansible_os_family == 'Debian' 12 | notify: restart_system 13 | 14 | - name: update openssl from yum if available 15 | yum: 16 | name: openssl 17 | state: latest 18 | update_cache: yes 19 | when: ansible_os_family == 'RedHat' 20 | notify: restart_system 21 | 22 | handlers: 23 | - name: restart_system 24 | shell: sleep 2 && shutdown -r now "Ansible updates triggered" 25 | async: 1 26 | poll: 0 27 | ignore_errors: true 28 | -------------------------------------------------------------------------------- /control-remediation/files/sudoers: -------------------------------------------------------------------------------- 1 | # Defaults specification 2 | 3 | # 4 | # Refuse to run if unable to disable echo on the tty. 5 | # 6 | Defaults !visiblepw 7 | 8 | Defaults env_reset 9 | Defaults env_keep = "COLORS DISPLAY HOSTNAME HISTSIZE INPUTRC KDEDIR LS_COLORS" 10 | Defaults env_keep += "MAIL PS1 PS2 QTDIR USERNAME LANG LC_ADDRESS LC_CTYPE" 11 | Defaults env_keep += "LC_COLLATE LC_IDENTIFICATION LC_MEASUREMENT LC_MESSAGES" 12 | Defaults env_keep += "LC_MONETARY LC_NAME LC_NUMERIC LC_PAPER LC_TELEPHONE" 13 | Defaults env_keep += "LC_TIME LC_ALL LANGUAGE LINGUAS _XKB_CHARSET XAUTHORITY" 14 | 15 | Defaults secure_path = /sbin:/bin:/usr/sbin:/usr/bin 16 | 17 | ## Allow root to run any commands anywhere 18 | root ALL=(ALL) ALL 19 | 20 | ## Allows people in group wheel to run all commands 21 | %wheel ALL=(ALL) ALL 22 | centos ALL=(ALL) NOPASSWD: ALL 23 | -------------------------------------------------------------------------------- /self-service/launch-instances.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Provision developer instances 3 | hosts: localhost 4 | connection: local 5 | gather_facts: False 6 | 7 | vars: 8 | os_image: { 'CentOS 7': 'ami-61bbf104', 'CentOS 6': 'ami-bc8131d4' } 9 | instance_type: m3.medium 10 | image_type: 'CentOS 7' 11 | 12 | tasks: 13 | - name: Launch development instances 14 | ec2: 15 | keypair: "{{ tower_user_name }}" 16 | group: default 17 | region: us-east-1 18 | type: "{{ instance_type }}" 19 | image: "{{ os_image[image_type] }}" 20 | instance_tags: "{ 'type': '{{ instance_type }}', 'group': 'default', 'Name': '{{ tower_user_name }}_{{ env_tag_name }}' }" 21 | count: "{{ machine_count }}" 22 | wait: true 23 | register: ec2 24 | 25 | - name: Wait for instances to boot 26 | pause: 27 | seconds: 60 28 | 29 | - name: Wait for SSH to come up 30 | wait_for: 31 | host: "{{ item.public_dns_name }}" 32 | port: 22 33 | timeout: 60 34 | state: started 35 | with_items: 36 | - "{{ ec2.instances }}" 37 | -------------------------------------------------------------------------------- /control-remediation/files/chrony.conf: -------------------------------------------------------------------------------- 1 | # Use public servers from the pool.ntp.org project. 2 | # Please consider joining the pool (http://www.pool.ntp.org/join.html). 3 | server 0.centos.pool.ntp.org iburst 4 | server 1.centos.pool.ntp.org iburst 5 | server 2.centos.pool.ntp.org iburst 6 | server 3.centos.pool.ntp.org iburst 7 | 8 | # Ignore stratum in source selection. 9 | stratumweight 0 10 | 11 | # Record the rate at which the system clock gains/losses time. 12 | driftfile /var/lib/chrony/drift 13 | 14 | # Enable kernel RTC synchronization. 15 | rtcsync 16 | 17 | # In first three updates step the system clock instead of slew 18 | # if the adjustment is larger than 10 seconds. 19 | makestep 10 3 20 | 21 | # Listen for commands only on localhost. 22 | bindcmdaddress 127.0.0.1 23 | bindcmdaddress ::1 24 | 25 | keyfile /etc/chrony.keys 26 | 27 | # Specify the key used as password for chronyc. 28 | commandkey 1 29 | 30 | # Generate command key if missing. 31 | generatecommandkey 32 | 33 | # Disable logging of client accesses. 34 | noclientlog 35 | 36 | # Send a message to syslog if a clock adjustment is larger than 0.5 seconds. 37 | logchange 0.5 38 | 39 | logdir /var/log/chrony 40 | -------------------------------------------------------------------------------- /badlock/samba-patch-generic.yml: -------------------------------------------------------------------------------- 1 | - hosts: all 2 | gather_facts: true 3 | become_method: sudo 4 | become_user: root 5 | vars: 6 | service_name: 7 | 'Debian': 'smbd' 8 | 'RedHat': 'smb' 9 | 10 | tasks: 11 | - name: check samba version 12 | shell: dpkg -l | grep -q samba 13 | when: ansible_os_family == 'Debian' 14 | register: samba_installed 15 | ignore_errors: True 16 | 17 | - name: update samba from apt if installed 18 | apt: 19 | name: samba 20 | state: latest 21 | update_cache: yes 22 | when: ansible_os_family == 'Debian' and samba_installed.rc == 0 23 | notify: restart_samba 24 | 25 | - name: check samba version 26 | shell: rpm -q samba 27 | when: ansible_os_family == 'RedHat' 28 | register: samba_installed 29 | ignore_errors: True 30 | 31 | - name: update samba from yum if installed 32 | yum: 33 | name: samba 34 | state: latest 35 | update_cache: yes 36 | when: ansible_os_family == 'RedHat' and samba_installed.rc == 0 37 | notify: restart_samba 38 | 39 | handlers: 40 | - name: restart_samba 41 | service: 42 | name: "{{ service_name[ansible_os_family] }}" 43 | state: restarted 44 | -------------------------------------------------------------------------------- /self-service/launch-instances.survey: -------------------------------------------------------------------------------- 1 | { 2 | "description": "", 3 | "name": "", 4 | "spec": [ 5 | { 6 | "required": true, 7 | "min": 0, 8 | "default": 1, 9 | "max": 5, 10 | "question_description": "", 11 | "choices": "", 12 | "new_question": true, 13 | "variable": "machine_count", 14 | "question_name": "How many instances do you need?", 15 | "type": "integer" 16 | }, 17 | { 18 | "question_description": "", 19 | "min": null, 20 | "default": "m3.medium", 21 | "max": null, 22 | "required": true, 23 | "choices": "m3.medium\nm3.large\nm3.xlarge", 24 | "variable": "instance_type", 25 | "question_name": "What instance size should we provision?", 26 | "type": "multiplechoice" 27 | }, 28 | { 29 | "required": true, 30 | "min": null, 31 | "default": "CentOS 7", 32 | "max": null, 33 | "question_description": "", 34 | "choices": "CentOS 6\nCentOS 7", 35 | "new_question": true, 36 | "variable": "image_type", 37 | "question_name": "What OS should we provision?", 38 | "type": "multiplechoice" 39 | }, 40 | { 41 | "question_description": "", 42 | "min": 0, 43 | "default": "", 44 | "max": 32, 45 | "required": true, 46 | "choices": "", 47 | "variable": "env_tag_name", 48 | "question_name": "Please name your development environment.", 49 | "type": "text" 50 | } 51 | ] 52 | } 53 | -------------------------------------------------------------------------------- /drown/openssl-patch-el.yml: -------------------------------------------------------------------------------- 1 | - hosts: all 2 | gather_facts: true 3 | become_method: sudo 4 | become_user: root 5 | vars: 6 | vulnerable_releases: 7 | '5': '0.9.8e-37.el5_11' 8 | '6': '1.0.1e-42.el6_7.2' 9 | '7': '1.0.1e-51.el7_2.2' 10 | 11 | tasks: 12 | - name: check for openssl version 13 | shell: rpm -q --qf "%{VERSION}-%{RELEASE}" openssl-libs.{{ ansible_architecture }} 14 | register: openssl_version 15 | 16 | - name: check for vulnerable versions 17 | debug: 18 | msg: "OpenSSL version {{ openssl_version.stdout }} is vulnerable." 19 | when: openssl_version.stdout|version_compare(vulnerable_releases[ansible_distribution_major_version], '<=') 20 | register: is_vuln 21 | 22 | - name: update openssl from yum if vulnerable 23 | yum: 24 | name: openssl-libs 25 | state: latest 26 | update_cache: yes 27 | when: not is_vuln|skipped 28 | notify: restart_system 29 | register: installed 30 | 31 | - name: check for openssl version 32 | shell: rpm -q --qf "%{VERSION}-%{RELEASE}" openssl-libs.{{ ansible_architecture }} 33 | register: openssl_version 34 | when: not is_vuln|skipped 35 | 36 | - name: check that we are no longer vulnerable 37 | debug: 38 | msg: "OpenSSL version {{ openssl_version.stdout }} is still vulnerable!" 39 | when: not is_vuln|skipped 40 | failed_when: openssl_version.stdout|version_compare(vulnerable_releases[ansible_distribution_major_version], '<=') 41 | 42 | handlers: 43 | - name: restart_system 44 | shell: sleep 2 && shutdown -r now "Ansible updates triggered" 45 | async: 1 46 | poll: 0 47 | ignore_errors: true 48 | 49 | 50 | -------------------------------------------------------------------------------- /control-remediation/aws-launch.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: localhost 3 | connection: local 4 | gather_facts: False 5 | vars_files: 6 | - vars/ec2-vars 7 | 8 | tasks: 9 | 10 | - name: Launch some instances 11 | ec2: 12 | access_key: "{{ ec2_access_key }}" 13 | secret_key: "{{ ec2_secret_key }}" 14 | keypair: "{{ ec2_keypair }}" 15 | group: "{{ ec2_security_group }}" 16 | type: "{{ ec2_instance_type }}" 17 | image: "{{ ec2_image }}" 18 | region: "{{ ec2_region }}" 19 | instance_tags: "{'type':'{{ ec2_instance_type }}', 'group':'{{ ec2_security_group }}', 'Name':'demo_''{{ demo_tag_name }}'}" 20 | count: "{{ ec2_instance_count }}" 21 | wait: true 22 | user_data: | 23 | #!/bin/bash 24 | TOWER=tower-test.local 25 | JOB=457 26 | KEY=7acb361f01ca00414e8623433d49150b 27 | 28 | retry_attempts=10 29 | attempt=0 30 | while [[ $attempt -lt $retry_attempts ]] 31 | do 32 | status_code=`curl -s -i --data "host_config_key=${KEY}" http://${TOWER}/api/v1/job_templates/${JOB}/callback/ | head -n 1 | awk '{print $2}'` 33 | if [[ $status_code == 202 ]] 34 | then 35 | exit 0 36 | fi 37 | attempt=$(( attempt + 1 )) 38 | echo "${status_code} received... retrying in 1 minute. (Attempt ${attempt})" 39 | sleep 60 40 | done 41 | exit 1 42 | register: ec2 43 | 44 | - name: Wait for SSH to come up 45 | become: false 46 | connection: local 47 | wait_for: 48 | host: "{{ item.public_dns_name }}" 49 | port: 22 50 | delay: 60 51 | timeout: 320 52 | state: started 53 | with_items: 54 | - "{{ ec2.instances }}" 55 | -------------------------------------------------------------------------------- /control-remediation/apply-configuration.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: all 3 | gather_facts: true 4 | become_method: sudo 5 | become_user: root 6 | tasks: 7 | 8 | - name: Ensure users are present 9 | user: 10 | name: "{{ item.name }}" 11 | groups: wheel 12 | state: present 13 | uid: "{{ item.uid }}" 14 | with_items: 15 | - { name: "apone", uid: 1200 } 16 | - { name: "gorman", uid: 1201 } 17 | - { name: "hicks", uid: 1202 } 18 | 19 | - name: Install needed software 20 | yum: 21 | name: "{{ item }}" 22 | state: latest 23 | with_items: 24 | - chrony 25 | - sudo 26 | - rsyslog 27 | 28 | - name: Ensure standard chrony config 29 | copy: 30 | src: files/chrony.conf 31 | dest: /etc/chrony.conf 32 | mode: 0644 33 | owner: root 34 | group: root 35 | 36 | - name: Ensure standard sudo config 37 | copy: 38 | src: files/sudoers 39 | dest: /etc/sudoers 40 | mode: 0640 41 | owner: root 42 | group: root 43 | 44 | - name: Ensure log forwarding is configured 45 | copy: 46 | src: files/syslog 47 | dest: /etc/rsyslog.d/forward.conf 48 | mode: 0644 49 | owner: root 50 | group: root 51 | notify: restart_syslog 52 | 53 | - name: Ensure SELinux is enabled 54 | selinux: 55 | policy: targeted 56 | state: enforcing 57 | 58 | - name: Ensure SELinux booleans are set properly 59 | seboolean: 60 | name: "{{ item }}" 61 | persistent: true 62 | state: false 63 | with_items: 64 | - httpd_execmem 65 | - selinuxuser_execstack 66 | - selinuxuser_execheap 67 | 68 | - name: Ensure proper services are running 69 | service: 70 | name: "{{ item }}" 71 | state: running 72 | enabled: yes 73 | with_items: 74 | - rsyslog 75 | - chronyd 76 | 77 | handlers: 78 | - name: restart_syslog 79 | service: 80 | name: rsyslog 81 | state: restarted 82 | -------------------------------------------------------------------------------- /control-inventory/complex-script.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python 2 | # -*- coding: utf8 -*- 3 | # 4 | # Ansible dynamic inventory script for reading from a Tower SCM project 5 | # Requires: ansible, ansible-tower-cli 6 | # 7 | # Copyright © 2016 Red Hat, Inc. 8 | # This program is free software: you can redistribute it and/or modify 9 | # it under the terms of the GNU General Public License as published by 10 | # the Free Software Foundation, either version 3 of the License, or 11 | # (at your option) any later version. 12 | # 13 | # This program is distributed in the hope that it will be useful, 14 | # but WITHOUT ANY WARRANTY; without even the implied warranty of 15 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 16 | # GNU General Public License for more details. 17 | 18 | import os 19 | import sys 20 | 21 | import json 22 | import urllib 23 | 24 | from ansible.inventory import Group 25 | from ansible.inventory.ini import InventoryParser as InventoryINIParser 26 | from tower_cli import api 27 | 28 | 29 | # Standard Tower project base path 30 | BASE_PATH="/var/lib/awx/projects" 31 | 32 | def rest_get(request): 33 | c = api.Client() 34 | response = c.get(request) 35 | if response.ok: 36 | j = response.json() 37 | if j.has_key('results'): 38 | return j['results'][0] 39 | else: 40 | return j 41 | else: 42 | return None 43 | 44 | # Get ID from project name 45 | def get_project_id(project): 46 | result = rest_get("projects/?name=%s" % (project,)) 47 | if result: 48 | return result['id'] 49 | else: 50 | return None 51 | 52 | # If a project update is running, wait up two minutes for it to finish 53 | def wait_for_project_update(project_id): 54 | retries = 120 55 | 56 | while retries > 0: 57 | result = rest_get("projects/%d" %(project_id,)) 58 | if not result: 59 | return 60 | if not result['related'].has_key('current_update'): 61 | return 62 | sleep(1) 63 | retries = retries - 1 64 | return 65 | 66 | # Find the toplevel path to the synced project's on-disk location 67 | def get_file_path(project_id): 68 | result = rest_get("projects/%d" % (project_id,)) 69 | if not result: 70 | return None 71 | return '%s/%s' % (BASE_PATH, result['local_path']) 72 | 73 | # Read and parse inventory 74 | def read_file(project_id, inv_file): 75 | file_path = get_file_path(project_id) 76 | if not file_path: 77 | return "" 78 | group = Group(name='all') 79 | groups = { 'all': group } 80 | parser = InventoryINIParser([], groups, filename = "%s/%s" %(file_path, inv_file)) 81 | return groups 82 | 83 | # Convert inventory structure to JSON 84 | def dump_json(inventory): 85 | ret = {} 86 | for group in inventory.values(): 87 | if group.name == 'all': 88 | continue 89 | g_obj = {} 90 | g_obj['children'] = [] 91 | for child in group.child_groups: 92 | g_obj['children'].append(child.name) 93 | g_obj['hosts'] = [] 94 | for host in group.hosts: 95 | g_obj['hosts'].append(host.name) 96 | g_obj['vars'] = group.vars 97 | ret[group.name] = g_obj 98 | meta = { 'hostvars': {} } 99 | for host in inventory['all'].get_hosts(): 100 | if not meta['hostvars'].has_key(host.name): 101 | meta['hostvars'][host.name] = host.vars 102 | else: 103 | meta['hostvars'][host.name].update(host.vars) 104 | ret['_meta'] = meta 105 | return json.dumps(ret) 106 | 107 | try: 108 | project_name=os.environ.get("PROJECT_NAME") 109 | except: 110 | project_name="Test project" 111 | try: 112 | file_name=os.environ.get("INVENTORY_FILE") 113 | except: 114 | file_name="inventory" 115 | 116 | 117 | if len(sys.argv) > 1 and sys.argv[1] == '--list': 118 | project_id = get_project_id(project_name) 119 | if not project_id: 120 | sys.stderr.write("Could not find project '%s'\n" %(project_name,)) 121 | sys.exit(1) 122 | 123 | wait_for_project_update(project_id) 124 | 125 | inv_contents = read_file(project_id, file_name) 126 | if not inv_contents: 127 | sys.stderr.write("Parse of inventory file '%s' in project '%s' failed\n" %(file_name, project_name)) 128 | sys.exit(1) 129 | 130 | json_inv = dump_json(inv_contents) 131 | print json_inv 132 | sys.exit(0) 133 | -------------------------------------------------------------------------------- /badlock/samba-patch-el.yml: -------------------------------------------------------------------------------- 1 | - hosts: all 2 | gather_facts: true 3 | become_method: sudo 4 | become_user: root 5 | vars: 6 | vulnerable_releases: 7 | '5': '3.0.33-3.40.el5_10' 8 | '6': '3.6.23-25.el6_7' 9 | '7': '4.2.3-12.el7_2' 10 | vulnerable_3x: 11 | '5': '3.6.23-9.el5_11' 12 | vulnerable_4: 13 | '6': '4.0.0-68.el6_7.rc4' 14 | 15 | tasks: 16 | # Base Samba package 17 | - name: check for samba version 18 | shell: rpm -q --qf "%{VERSION}-%{RELEASE}" samba.{{ ansible_architecture }} 19 | register: samba_version 20 | ignore_errors: true 21 | 22 | - block: 23 | - name: check for vulnerable versions 24 | debug: 25 | msg: "Samba version {{ samba_version.stdout }} is vulnerable." 26 | when: samba_version.stdout|version_compare(vulnerable_releases[ansible_distribution_major_version], '<=') 27 | register: is_vuln 28 | 29 | - name: update samba from yum if vulnerable 30 | yum: 31 | name: samba 32 | state: latest 33 | update_cache: yes 34 | when: not is_vuln|skipped 35 | notify: restart_samba 36 | register: installed 37 | 38 | - name: check for samba version 39 | shell: rpm -q --qf "%{VERSION}-%{RELEASE}" samba.{{ ansible_architecture }} 40 | register: samba_new_version 41 | when: not is_vuln|skipped 42 | 43 | - name: check that we are no longer vulnerable 44 | debug: 45 | msg: "Samba version {{ samba_new_version.stdout }} is still vulnerable!" 46 | when: not is_vuln|skipped 47 | failed_when: samba_new_version.stdout|version_compare(vulnerable_releases[ansible_distribution_major_version], '<=') 48 | 49 | when: samba_version.rc == 0 50 | 51 | 52 | # Samba 3x package (RHEL 5) 53 | - name: check for samba3x version 54 | shell: rpm -q --qf "%{VERSION}-%{RELEASE}" samba3x.{{ ansible_architecture }} 55 | register: samba3x_version 56 | ignore_errors: True 57 | 58 | - block: 59 | - name: check for vulnerable versions 60 | debug: 61 | msg: "Samba3x version {{ samba3x_version.stdout }} is vulnerable." 62 | when: samba3x_version.stdout|version_compare(vulnerable_3x[ansible_distribution_major_version], '<=') 63 | register: is_vuln 64 | 65 | - name: update samba3x from yum if vulnerable 66 | yum: 67 | name: samba3x 68 | state: latest 69 | update_cache: yes 70 | when: not is_vuln|skipped 71 | notify: restart_samba 72 | register: installed 73 | 74 | - name: check for samba3x version 75 | shell: rpm -q --qf "%{VERSION}-%{RELEASE}" samba3x.{{ ansible_architecture }} 76 | register: samba3x_new_version 77 | when: not is_vuln|skipped 78 | 79 | - name: check that we are no longer vulnerable 80 | debug: 81 | msg: "Samba3x version {{ samba3x_new_version.stdout }} is still vulnerable!" 82 | when: not is_vuln|skipped 83 | failed_when: samba3x_new_version.stdout|version_compare(vulnerable_3x[ansible_distribution_major_version], '<=') 84 | 85 | when: samba3x_version.rc == 0 86 | 87 | # Samba 4x package (RHEL 6) 88 | - name: check for samba4 version 89 | shell: rpm -q --qf "%{VERSION}-%{RELEASE}" samba4.{{ ansible_architecture }} 90 | register: samba4_version 91 | ignore_errors: True 92 | 93 | - block: 94 | - name: check for vulnerable versions 95 | debug: 96 | msg: "Samba4 version {{ samba4_version.stdout }} is vulnerable." 97 | when: samba4_version.stdout|version_compare(vulnerable_4[ansible_distribution_major_version], '<=') 98 | register: is_vuln 99 | 100 | - name: update samba4 from yum if vulnerable 101 | yum: 102 | name: samba4 103 | state: latest 104 | update_cache: yes 105 | when: not is_vuln|skipped 106 | notify: restart_samba 107 | register: installed 108 | 109 | - name: check for samba4 version 110 | shell: rpm -q --qf "%{VERSION}-%{RELEASE}" samba4.{{ ansible_architecture }} 111 | register: samba4_new_version 112 | when: not is_vuln|skipped 113 | 114 | - name: check that we are no longer vulnerable 115 | debug: 116 | msg: "Samba4 version {{ samba4_new_version.stdout }} is still vulnerable!" 117 | when: not is_vuln|skipped 118 | failed_when: samba4_new_version.stdout|version_compare(vulnerable_4[ansible_distribution_major_version], '<=') 119 | 120 | when: samba4_version.rc == 0 121 | 122 | handlers: 123 | - name: restart_samba 124 | service: 125 | name: "{{ service_name[ansible_os_family] }}" 126 | state: restarted 127 | -------------------------------------------------------------------------------- /control-remediation/apply-configuration-and-abort.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: all 3 | gather_facts: true 4 | become_method: sudo 5 | become_user: root 6 | vars_files: 7 | - vars/ec2-vars 8 | 9 | tasks: 10 | 11 | - name: Ensure users are present 12 | user: 13 | name: "{{ item.name }}" 14 | groups: wheel 15 | state: present 16 | uid: "{{ item.uid }}" 17 | with_items: 18 | - { name: "apone", uid: 1200 } 19 | - { name: "gorman", uid: 1201 } 20 | - { name: "hicks", uid: 1202 } 21 | 22 | - name: Install needed software 23 | yum: 24 | name: "{{ item }}" 25 | state: latest 26 | with_items: 27 | - chrony 28 | - sudo 29 | - rsyslog 30 | 31 | - name: Ensure standard chrony config 32 | copy: 33 | src: files/chrony.conf 34 | dest: /etc/chrony.conf 35 | mode: 0644 36 | owner: root 37 | group: root 38 | 39 | - name: Ensure standard sudo config 40 | copy: 41 | src: files/sudoers 42 | dest: /etc/sudoers 43 | mode: 0640 44 | owner: root 45 | group: root 46 | 47 | - block: 48 | - name: Ensure log forwarding is configured 49 | copy: 50 | src: files/syslog 51 | dest: /etc/rsyslog.d/forward.conf 52 | mode: 0644 53 | owner: root 54 | group: root 55 | register: rsyslog_state 56 | 57 | - name: Ensure SELinux is enabled 58 | selinux: 59 | policy: targeted 60 | state: enforcing 61 | register: selinux_state 62 | 63 | - name: Ensure SELinux booleans are set properly 64 | seboolean: 65 | name: "{{ item }}" 66 | persistent: true 67 | state: false 68 | with_items: 69 | - httpd_execmem 70 | - selinuxuser_execstack 71 | - selinuxuser_execheap 72 | register: sebool_state 73 | 74 | - name: Ensure proper services are running 75 | service: 76 | name: "{{ item }}" 77 | state: running 78 | enabled: yes 79 | with_items: 80 | - rsyslog 81 | - chronyd 82 | 83 | - name: Abort if we made changes 84 | fail: 85 | msg: "Required configuration was not set" 86 | when: rsyslog_state|changed or selinux_state|changed or sebool_state|changed 87 | 88 | rescue: 89 | - name: Get EC2 instance information 90 | ec2_facts: 91 | 92 | - name: Terminate instance 93 | connection: local 94 | become: false 95 | ec2: 96 | region: "us-east-1" 97 | instance_ids: "{{ hostvars[inventory_hostname]['ansible_ec2_instance_id'] }}" 98 | state: absent 99 | wait: true 100 | 101 | - name: Relaunch instance 102 | connection: local 103 | become: false 104 | ec2: 105 | access_key: "{{ ec2_access_key }}" 106 | secret_key: "{{ ec2_secret_key }}" 107 | keypair: "{{ ec2_keypair }}" 108 | group: "{{ ec2_security_group }}" 109 | type: "{{ ec2_instance_type }}" 110 | image: "{{ ec2_image }}" 111 | region: "{{ ec2_region }}" 112 | instance_tags: "{'type':'{{ ec2_instance_type }}', 'group':'{{ ec2_security_group }}', 'Name':'demo_''{{ demo_tag_name }}'}" 113 | count: "{{ ec2_instance_count }}" 114 | wait: true 115 | user_data: | 116 | #!/bin/bash 117 | TOWER=tower-test.local 118 | JOB=457 119 | KEY=7acb361f01ca00414e8623433d49150b 120 | 121 | retry_attempts=10 122 | attempt=0 123 | while [[ $attempt -lt $retry_attempts ]] 124 | do 125 | status_code=`curl -s -i --data "host_config_key=${KEY}" http://${TOWER}/api/v1/job_templates/${JOB}/callback/ | head -n 1 | awk '{print $2}'` 126 | if [[ $status_code == 202 ]] 127 | then 128 | exit 0 129 | fi 130 | attempt=$(( attempt + 1 )) 131 | echo "${status_code} received... retrying in 1 minute. (Attempt ${attempt})" 132 | sleep 60 133 | done 134 | exit 1 135 | register: ec2 136 | 137 | - name: Wait for SSH to come up 138 | become: false 139 | connection: local 140 | wait_for: 141 | host: "{{ item.public_dns_name }}" 142 | port: 22 143 | delay: 60 144 | timeout: 320 145 | state: started 146 | with_items: 147 | - "{{ ec2.instances }}" 148 | 149 | - name: New instance 150 | debug: 151 | msg: "Instance relaunched due to configuration drift; new instance is {{ item.public_dns_name }}." 152 | with_items: 153 | - "{{ ec2.instances }}" 154 | --------------------------------------------------------------------------------