├── README.md
├── chapter-1
├── group_vars
│ └── lamp.yml
├── inventory
├── roles
│ ├── common
│ │ └── tasks
│ │ │ └── main.yml
│ ├── db
│ │ ├── handlers
│ │ │ └── main.yml
│ │ ├── tasks
│ │ │ ├── harden.yml
│ │ │ └── main.yml
│ │ └── vars
│ │ │ └── main.yml
│ ├── php
│ │ └── tasks
│ │ │ └── main.yml
│ └── web
│ │ ├── handlers
│ │ └── main.yml
│ │ ├── tasks
│ │ └── main.yml
│ │ ├── templates
│ │ └── web.conf.j2
│ │ └── vars
│ │ └── main.yml
└── site.yml
├── chapter-10
├── ansible-module-owasp-zap
│ ├── LICENSE
│ ├── README.md
│ ├── owasp-zap-site-scan-module-playbook.yml
│ ├── owasp_zap_test_module.py
│ └── owasp_zap_test_module_module.html
└── dev-setup
│ ├── hosts
│ └── main.yml
├── chapter-11
├── vault-encrypt-string-example
│ └── main.yml
└── vault-mysql-example
│ ├── .vautlpass
│ ├── group_vars
│ └── mysql.yml
│ ├── hosts
│ ├── main.yml
│ └── roles
│ └── mysqlsetup
│ └── tasks
│ └── main.yml
├── chapter-2
├── jenkins
│ └── site.yml
└── rundeck
│ ├── site.yml
│ └── templates
│ └── rundeck-config.properties.j2
├── chapter-3
├── apache
│ ├── handlers
│ │ └── main.yml
│ ├── tasks
│ │ └── main.yml
│ └── templates
│ │ ├── 000-default.conf.j2
│ │ ├── apache2.conf.j2
│ │ └── default-ssl.conf.j2
├── iis
│ └── tasks
│ │ └── main.yml
├── linux-apache2-mod_php
│ ├── handlers
│ │ └── main.yml
│ ├── tasks
│ │ └── main.yml
│ ├── templates
│ │ ├── apache2.conf.j2
│ │ └── wordpress-website.conf.j2
│ └── vars
│ │ └── main.yml
├── linux-apache2-php_fpm
│ ├── handlers
│ │ └── main.yml
│ ├── tasks
│ │ └── main.yml
│ ├── templates
│ │ ├── apache2.conf.j2
│ │ └── wordpress-website.conf.j2
│ └── vars
│ │ └── main.yml
├── linux-mysql-hardening
│ ├── handlers
│ │ └── main.yml
│ ├── tasks
│ │ └── main.yml
│ └── vars
│ │ └── main.yml
├── linux-nginx-php_fpm
│ ├── tasks
│ │ └── main.yml
│ ├── templates
│ │ └── default.conf.j2
│ └── vars
│ │ └── main.yml
├── mysql
│ ├── handlers
│ │ └── main.yml
│ ├── tasks
│ │ └── main.yml
│ └── vars
│ │ └── main.yml
├── nginx
│ ├── handlers
│ │ └── main.yml
│ ├── tasks
│ │ └── main.yml
│ └── templates
│ │ └── default.conf.j2
├── php
│ ├── tasks
│ │ └── main.yml
│ ├── templates
│ │ └── default.conf
│ └── vars
│ │ └── main.yml
├── wordpress-auto-updates
│ ├── inventory
│ └── site.yml
└── wordpress
│ ├── group_vars
│ └── wordpress.yml
│ ├── inventory
│ ├── roles
│ ├── certbot
│ │ ├── tasks
│ │ │ ├── generate-certs.yml
│ │ │ └── main.yml
│ │ └── templates
│ │ │ └── website.conf
│ ├── common
│ │ └── tasks
│ │ │ └── main.yml
│ ├── duply
│ │ ├── tasks
│ │ │ └── main.yml
│ │ └── templates
│ │ │ ├── conf
│ │ │ └── exclude
│ ├── firewall-setup
│ │ └── tasks
│ │ │ └── main.yml
│ ├── mysql-hardening
│ │ └── tasks
│ │ │ └── main.yml
│ ├── mysql
│ │ ├── handlers
│ │ │ └── main.yml
│ │ └── tasks
│ │ │ ├── main.yml
│ │ │ └── wordpress-db-user-setup.yml
│ ├── nginx
│ │ ├── tasks
│ │ │ └── main.yml
│ │ └── templates
│ │ │ └── website.conf
│ ├── php-fpm
│ │ └── tasks
│ │ │ └── main.yml
│ └── wordpress
│ │ └── tasks
│ │ ├── main.yml
│ │ └── site-setup.yml
│ └── site.yml
├── chapter-4
├── beats-for-elastic-stack
│ ├── inventory
│ ├── main.yml
│ └── roles
│ │ ├── filebeat
│ │ ├── handlers
│ │ │ └── main.yml
│ │ ├── tasks
│ │ │ ├── configure-filebeat.yml
│ │ │ ├── install-filebeat.yml
│ │ │ └── main.yml
│ │ └── templates
│ │ │ └── filebeat.yml.j2
│ │ ├── metricbeat
│ │ ├── handlers
│ │ │ └── main.yml
│ │ ├── tasks
│ │ │ ├── configure-metricbeat.yml
│ │ │ ├── install-metricbeat.yml
│ │ │ └── main.yml
│ │ └── templates
│ │ │ └── metricbeat.yml.j2
│ │ └── packetbeat
│ │ ├── handlers
│ │ └── main.yml
│ │ ├── tasks
│ │ ├── configure-packetbeat.yml
│ │ ├── install-packetbeat.yml
│ │ └── main.yml
│ │ └── templates
│ │ └── packetbeat.yml.j2
├── elastalert
│ ├── roles
│ │ ├── aws-serverless
│ │ │ ├── tasks
│ │ │ │ └── main.yml
│ │ │ └── templates
│ │ │ │ ├── aws-credentials.j2
│ │ │ │ ├── config.js.j2
│ │ │ │ ├── handler.js.j2
│ │ │ │ ├── iamRoleStatements.json.j2
│ │ │ │ ├── initDb.js.j2
│ │ │ │ └── serverless.yml.j2
│ │ └── setup
│ │ │ ├── tasks
│ │ │ └── main.yml
│ │ │ └── templates
│ │ │ ├── elastalert-config.j2
│ │ │ ├── elastalert-service.j2
│ │ │ └── elastalert-sshrule.j2
│ └── site.yml
└── elastic-stack
│ ├── group_vars
│ └── elastic-stack.yml
│ ├── inventory
│ ├── main.yml
│ └── roles
│ ├── common
│ └── tasks
│ │ └── main.yml
│ ├── elasticsearch
│ ├── handlers
│ │ └── main.yml
│ ├── tasks
│ │ ├── configure-elasticsearch.yml
│ │ ├── install-elasticsearch.yml
│ │ └── main.yml
│ └── templates
│ │ ├── elasticsearch.yml.j2
│ │ └── jvm.options.j2
│ ├── kibana
│ ├── handlers
│ │ └── main.yml
│ └── tasks
│ │ └── main.yml
│ ├── logstash
│ ├── handlers
│ │ └── main.yml
│ ├── tasks
│ │ ├── configure-logstash.yml
│ │ ├── install-logstash.yml
│ │ └── main.yml
│ └── templates
│ │ ├── 02-beats-input.conf.j2
│ │ ├── 10-sshlog-filter.conf.j2
│ │ ├── 11-weblog-filter.conf.j2
│ │ └── 30-elasticsearch-output.conf.j2
│ └── nginx-reverse-proxy
│ ├── handlers
│ └── main.yml
│ ├── tasks
│ └── main.yml
│ └── templates
│ └── nginxdefault.j2
├── chapter-5
├── docker
│ ├── inventory
│ └── site.yml
├── dvsw-playbook
│ ├── inventory
│ └── site.yml
├── jenkins
│ ├── inventory
│ └── main.yml
├── zap-baseline-scan
│ ├── inventory
│ └── site.yml
├── zap-full-scan
│ ├── inventory
│ └── site.yml
└── zap-setup-playbook
│ ├── inventory
│ └── site.yml
├── chapter-6
├── autonessus
│ ├── inventory
│ ├── roles
│ │ ├── listpolices
│ │ │ └── tasks
│ │ │ │ └── main.yml
│ │ ├── listscans
│ │ │ └── tasks
│ │ │ │ └── main.yml
│ │ ├── pausescan
│ │ │ ├── tasks
│ │ │ │ └── main.yml
│ │ │ └── vars
│ │ │ │ └── main.yml
│ │ ├── resumescan
│ │ │ ├── tasks
│ │ │ │ └── main.yml
│ │ │ └── vars
│ │ │ │ └── main.yml
│ │ ├── setup
│ │ │ ├── tasks
│ │ │ │ └── main.yml
│ │ │ └── vars
│ │ │ │ └── main.yml
│ │ ├── startscan
│ │ │ ├── tasks
│ │ │ │ └── main.yml
│ │ │ └── vars
│ │ │ │ └── main.yml
│ │ └── stopscan
│ │ │ ├── tasks
│ │ │ └── main.yml
│ │ │ └── vars
│ │ │ └── main.yml
│ └── site.yml
├── nessus-restapi
│ └── main.yml
└── nessus-setup
│ ├── group_vars
│ └── nessus.yml
│ ├── inventory
│ ├── roles
│ └── setup
│ │ └── tasks
│ │ └── main.yml
│ └── site.yml
├── chapter-7
├── aws-cis-benchmarks
│ └── main.yml
├── blue-green-setup
│ ├── inventory
│ ├── main.yml
│ └── roles
│ │ ├── bluecode
│ │ ├── tasks
│ │ │ └── main.yml
│ │ └── templates
│ │ │ └── index.html
│ │ ├── common
│ │ └── tasks
│ │ │ └── main.yml
│ │ ├── greencode
│ │ ├── tasks
│ │ │ └── main.yml
│ │ └── templates
│ │ │ └── index.html
│ │ ├── haproxy
│ │ ├── tasks
│ │ │ └── main.yml
│ │ └── templates
│ │ │ └── haproxy.cfg.j2
│ │ └── nginx
│ │ └── tasks
│ │ └── main.yml
├── blue-green-update
│ ├── inventory
│ ├── main.yml
│ └── templates
│ │ └── haproxy.cfg.j2
├── brakeman-scan
│ ├── inventory
│ └── main.yml
├── lynis
│ ├── inventory
│ └── main.yml
├── nikto-scan
│ ├── inventory
│ └── main.yml
├── nmap-basic-scan
│ └── main.yml
├── nmap-nse
│ └── main.yml
├── owasp-dependency-check
│ ├── inventory
│ └── main.yml
├── scout2-scan
│ └── main.yml
├── scout2-setup
│ └── main.yml
├── windows-audit-playbook
│ ├── inventory
│ └── windows-security-audit.yml
├── windows-updates-playbook
│ ├── inventory
│ └── windows-security-updates.yml
└── wp-scan
│ └── main.yml
├── chapter-8
├── anchore-cli-scan
│ ├── inventory
│ └── main.yml
├── anchore-server
│ ├── inventory
│ ├── main.yml
│ └── templates
│ │ └── config.yaml.j2
├── clair-scanner-setup
│ ├── inventory
│ └── main.yaml
├── clair-scanning-images
│ ├── inventory
│ └── main.yaml
├── docker-bench-security
│ └── main.yml
├── osquery-setup
│ ├── inventory
│ ├── main.yml
│ └── templates
│ │ ├── fim.conf
│ │ └── osquery.conf
├── vuls-scanning
│ ├── inventory
│ ├── main.yml
│ └── templates
│ │ ├── 192-168-33-80
│ │ └── config.toml
└── vuls
│ ├── group_vars
│ └── vuls.yml
│ ├── inventory
│ ├── main.yml
│ └── roles
│ ├── vuls_containers_download
│ └── tasks
│ │ └── main.yml
│ └── vuls_database_download
│ └── tasks
│ └── main.yml
└── chapter-9
├── cuckoo-scan
├── inventory
└── main.yml
├── cuckoo-setup
├── inventory
├── main.yml
└── roles
│ ├── cuckoo
│ └── tasks
│ │ └── main.yml
│ ├── dependencies
│ └── tasks
│ │ └── main.yml
│ ├── start-cuckoo
│ ├── tasks
│ │ └── main.yml
│ └── templates
│ │ ├── auxiliary.conf
│ │ ├── cuckoo.conf
│ │ ├── reporting.conf
│ │ └── virtualbox.conf
│ ├── virtualbox
│ └── tasks
│ │ └── main.yml
│ └── yara
│ ├── tasks
│ └── main.yml
│ └── templates
│ ├── ssdeep.sh
│ └── yara.sh
├── log-collection
├── inventory
└── main.yml
├── s3-backup
├── main.yml
└── templates
│ └── s3cmd.j2
├── viper-setup
├── inventory
├── main.yml
└── roles
│ ├── dependencies
│ ├── tasks
│ │ └── main.yml
│ └── templates
│ │ └── ssdeep.sh
│ └── setup
│ └── tasks
│ └── main.yml
├── virus-total-scan
├── inventory
├── main.yml
└── templates
│ └── config.j2
└── virus-total
├── inventory
└── main.yml
/README.md:
--------------------------------------------------------------------------------
1 | # Security Automation with Ansible2
2 |
3 | This repository contains all the code, playbooks, details regarding the book on [Security Automation with Ansible2](https://www.packtpub.com/virtualization-and-cloud/security-automation-ansible-2).
4 |
5 | ## Index
6 |
7 | ### Part-1: Essential Ansible for building complex playbooks
8 | - Introduction to Ansible Playbooks and Roles
9 | - Ansible Tower, Jenkins and other automation tools
10 |
11 | ### Part-2: Security automation techniques and approaches
12 | - Setting up a hardened WordPress with encrypted automated backups
13 | - Log monitoring and server-less automated defense (ELK in AWS)
14 | - Automated Web Application Security Testing using OWASP ZAP
15 | - Vulnerability Scanning with Nessus
16 | - Security Hardening for applications and networks
17 | - Continuous security scanning for Docker containers
18 | - Automating lab setups for forensics collection, malware analysis
19 |
20 | ### Part-3: Extending and programming Ansible for even more security
21 | - Writing an Ansible module for security testing
22 | - Ansible security best practices, references and further reading
23 |
--------------------------------------------------------------------------------
/chapter-1/group_vars/lamp.yml:
--------------------------------------------------------------------------------
1 | remote_username: "hodor"
--------------------------------------------------------------------------------
/chapter-1/inventory:
--------------------------------------------------------------------------------
1 | [lamp]
2 | 192.168.56.10
3 |
--------------------------------------------------------------------------------
/chapter-1/roles/common/tasks/main.yml:
--------------------------------------------------------------------------------
1 | # In ubuntu 16.04 by default there is no python2
2 | - name: install python 2
3 | raw: test -e /usr/bin/python || (apt -y update && apt install -y python-minimal)
4 |
5 | - name: install curl and git
6 | apt:
7 | name: "{{ item }}"
8 | state: present
9 | update_cache: yes
10 |
11 | with_items:
12 | - curl
13 | - git
--------------------------------------------------------------------------------
/chapter-1/roles/db/handlers/main.yml:
--------------------------------------------------------------------------------
1 | - name: start mysql
2 | service:
3 | state: started
4 | name: mysql
5 |
6 | - name: stop mysql
7 | service:
8 | state: stopped
9 | name: mysql
10 |
11 | - name: restart mysql
12 | service:
13 | state: restarted
14 | name: mysql
15 | daemon_reload: yes
16 |
--------------------------------------------------------------------------------
/chapter-1/roles/db/tasks/harden.yml:
--------------------------------------------------------------------------------
1 | - name: deletes anonymous mysql user
2 | mysql_user:
3 | user: ""
4 | state: absent
5 | login_password: "{{ mysql_root_password }}"
6 | login_user: root
7 |
8 | - name: secures the mysql root user
9 | mysql_user:
10 | user: root
11 | password: "{{ mysql_root_password }}"
12 | host: "{{ item }}"
13 | login_password: "{{ mysql_root_password }}"
14 | login_user: root
15 |
16 | with_items:
17 | - 127.0.0.1
18 | - localhost
19 | - ::1
20 | - "{{ ansible_fqdn }}"
21 |
22 | - name: removes the mysql test database
23 | mysql_db:
24 | db: test
25 | state: absent
26 | login_password: "{{ mysql_root_password }}"
27 | login_user: root
28 |
29 | - name: enable mysql on startup
30 | service:
31 | name: mysql
32 | enabled: yes
33 |
34 | notify:
35 | - start mysql
36 |
--------------------------------------------------------------------------------
/chapter-1/roles/db/tasks/main.yml:
--------------------------------------------------------------------------------
1 | - name: set mysql root password
2 | debconf:
3 | name: mysql-server
4 | question: mysql-server/root_password
5 | value: "{{ mysql_root_password | quote }}"
6 | vtype: password
7 |
8 | - name: confirm mysql root password
9 | debconf:
10 | name: mysql-server
11 | question: mysql-server/root_password_again
12 | value: "{{ mysql_root_password | quote }}"
13 | vtype: password
14 |
15 | - name: install mysqlserver
16 | apt:
17 | name: "{{ item }}"
18 | state: present
19 |
20 | with_items:
21 | - mysql-server
22 | - mysql-client
23 |
24 | - include: harden.yml
25 |
--------------------------------------------------------------------------------
/chapter-1/roles/db/vars/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | mysql_root_password: "R4nd0mP4$$w0rd"
--------------------------------------------------------------------------------
/chapter-1/roles/php/tasks/main.yml:
--------------------------------------------------------------------------------
1 | - name: install php7
2 | apt:
3 | name: "{{ item }}"
4 | state: present
5 |
6 | with_items:
7 | - php7.0-mysql
8 | - php7.0-curl
9 | - php7.0-json
10 | - php7.0-cgi
11 | - php7.0
12 | - libapache2-mod-php7
13 |
14 | - name: restart apache2
15 | service:
16 | state: restarted
17 | name: apache2
18 | daemon_reload: yes
19 |
--------------------------------------------------------------------------------
/chapter-1/roles/web/handlers/main.yml:
--------------------------------------------------------------------------------
1 | - name: start apache2
2 | service:
3 | state: started
4 | name: apache2
5 |
6 | - name: stop apache2
7 | service:
8 | state: stopped
9 | name: apache2
10 |
11 | - name: restart apache2
12 | service:
13 | state: restarted
14 | name: apache2
15 | daemon_reload: yes
16 |
--------------------------------------------------------------------------------
/chapter-1/roles/web/tasks/main.yml:
--------------------------------------------------------------------------------
1 | - name: install apache2 server
2 | apt:
3 | name: apache2
4 | state: present
5 |
6 | - name: update the apache2 server configuration
7 | template:
8 | src: web.conf.j2
9 | dest: /etc/apache2/sites-available/000-default.conf
10 | owner: root
11 | group: root
12 | mode: 0644
13 |
14 | - name: enable apache2 on startup
15 | service:
16 | name: apache2
17 | enabled: yes
18 |
19 | notify:
20 | - start apache2
21 |
--------------------------------------------------------------------------------
/chapter-1/roles/web/templates/web.conf.j2:
--------------------------------------------------------------------------------
1 |
2 |
3 | ServerAdmin {{server_admin_email}}
4 | DocumentRoot {{server_document_root}}
5 |
6 | ErrorLog ${APACHE_LOG_DIR}/error.log
7 | CustomLog ${APACHE_LOG_DIR}/access.log combined
8 |
9 |
--------------------------------------------------------------------------------
/chapter-1/roles/web/vars/main.yml:
--------------------------------------------------------------------------------
1 | server_admin_email: "hodor@localhost.local"
2 | server_document_root: "/var/www/html"
--------------------------------------------------------------------------------
/chapter-1/site.yml:
--------------------------------------------------------------------------------
1 | - name: lamp stack setup on ubuntu 16.04
2 | hosts: lamp
3 | gather_facts: False
4 | remote_user: "{{ remote_username }}"
5 | become: yes
6 |
7 | roles:
8 | - common
9 | - web
10 | - db
11 | - php
12 |
--------------------------------------------------------------------------------
/chapter-10/ansible-module-owasp-zap/LICENSE:
--------------------------------------------------------------------------------
1 | MIT License
2 |
3 | Copyright (c) 2017 Appsecco
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the "Software"), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in all
13 | copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | SOFTWARE.
22 |
--------------------------------------------------------------------------------
/chapter-10/ansible-module-owasp-zap/README.md:
--------------------------------------------------------------------------------
1 | # ansible-module-owasp-zap
2 | Ansible module for OWASP ZAP using Python API to scan web targets for security issues
3 |
4 | ## Why use this?
5 | A simple module to enable using Ansible to initiate web security scans using OWASP ZAP.
6 |
7 | ## What
8 | This module enables you to interact with an already setup and configured ZAP instance to execute passive active scans against web targets for security tests.
9 |
10 | ## How
11 | The module works with the OWASP ZAP API available when we have an existing running ZAP instance. This is similar to the [ZAP Baseline Scan](https://github.com/zaproxy/zaproxy/wiki/ZAP-Baseline-Scan) in the default settings.
12 |
13 | ## Get Started
14 | ### Start ZAP
15 |
16 | docker run --name zap -u zap -p 8080:8080 -i owasp/zap2docker-stable zap.sh -daemon -host 0.0.0.0 -port 8080 -config api.disablekey=true -config api.addrs.addr.name=.* -config api.addrs.addr.regex=true
17 |
18 | _For testing, API key is disabled. Please change as per your requirement_
19 |
20 | ### Software Pre-requisites
21 | Ensure that the OWASP ZAP Python client is installed
22 |
23 | pip install python-owasp-zap-v2.4
24 |
25 | Assuming that `ansible` is already setup the following command will work if you don't want to copy the module to a path which is ANSIBLE_LIBRARY
26 |
27 | $ ANSIBLE_LIBRARY=. ansible -m owasp_zap_test_module localhost -a "host=http://ZAP-Proxy:PORT target=http://target-webapp"
28 |
29 | If you want to specify an API KEY
30 |
31 | $ ANSIBLE_LIBRARY=. ansible -m owasp_zap_test_module localhost -a "host=http://ZAP-Proxy:PORT target=http://target-webapp apikey=SECRET-VALUE"
32 |
33 | If you want to run an Active scan
34 |
35 | $ ANSIBLE_LIBRARY=. ansible -m owasp_zap_test_module localhost -a "host=http://ZAP-Proxy:PORT target=http://target-webapp scantype=active"
36 |
37 | ### Sample Playbook
38 | A sample playbook you can use
39 |
40 | - name: Testing OWASP ZAP Test Module
41 | connection: local
42 | hosts: localhost
43 | tasks:
44 | - name: Scan a website
45 | owasp_zap_test_module:
46 | host: "http://ZAP-Proxy:PORT"
47 | target: "http://target-webapp"
48 | scantype: passive
49 | register: output
50 | - name: Print version
51 | debug:
52 | msg: "Scan Report: {{ output }}"
53 |
54 |
55 |
56 |
57 |
58 |
59 |
--------------------------------------------------------------------------------
/chapter-10/ansible-module-owasp-zap/owasp-zap-site-scan-module-playbook.yml:
--------------------------------------------------------------------------------
1 | - name: Testing OWASP ZAP Test Module
2 | connection: local
3 | hosts: localhost
4 | tasks:
5 | - name: Scan a website
6 | owasp_zap_test_module:
7 | host: "http://172.16.1.102:8080"
8 | target: "http://testphp.vulnweb.com"
9 | scantype: passive
10 | register: output
11 | - name: Print version
12 | debug:
13 | msg: "Scan Report: {{ output }}"
--------------------------------------------------------------------------------
/chapter-10/ansible-module-owasp-zap/owasp_zap_test_module.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/python
2 | # Copyright (c) 2017 Akash Mahajan & Madhu Akula
3 | # Apache License 2.0
4 |
5 | ANSIBLE_METADATA = {
6 | 'metadata_version': '1.1',
7 | 'status': ['preview'],
8 | 'supported_by': 'community'
9 | }
10 |
11 | DOCUMENTATION = '''
12 | ---
13 | module: OWASP ZAP Module
14 | short_description: Scan Web Targets with OWASP ZAP
15 | description:
16 | - Scan web targets using this OWASP ZAP.
17 | - By default it will spider and do a passive scan.
18 | - This requires OWASP ZAP to be hosted and configured already.
19 | - Written based on an idea for a chapter in Security Automation with Ansible2 book.
20 | version_added: "2.4"
21 | author: "Akash Mahajan (@makash on GitHub) and Madhu Akula (@madhuakula on Github)"
22 | options:
23 | host:
24 | description:
25 | - The IP address/domain name with port number of the hosted OWASP ZAP instance
26 | required: true
27 | target:
28 | description:
29 | - The website that needs to be scanned by OWASP ZAP
30 | required: true
31 | apikey:
32 | description:
33 | - API Key value if the ZAP instance has one
34 | required: false
35 | default: null
36 | scantype:
37 | description:
38 | - Type of scan to be done by OWASP ZAP
39 | required: false
40 | default: passive
41 | choices:
42 | - passive
43 | - active
44 | notes:
45 | - The module will indicate changed status if spidering and scanning take place
46 | - No authentication implemented so far
47 | requirements:
48 | - Requires the following module to be installed 'python-owasp-zap-v2.4'
49 | - pip install python-owasp-zap-v2.4 will work
50 | - Tested with Python2.7 version
51 | '''
52 |
53 | EXAMPLES = '''
54 | # Pass in a message
55 | - name: Scan a website
56 | owasp_zap_test_module:
57 | host: "http://172.16.1.102:8080"
58 | target: "http://testphp.vulnweb.com"
59 | scantype: passive
60 | register: output
61 | '''
62 |
63 | RETURN = '''
64 | changed:
65 | description: If changed or not (true if scan completed)
66 | type: bool
67 | output:
68 | description: Output of OWASP ZAP scan
69 | type: JSON
70 | target:
71 | description: Hostname of the web site that was scanned and attacked
72 | type: str
73 | host:
74 | description: Hostname of the OWASP ZAP scanner instance
75 | type: str
76 | '''
77 | try:
78 | from zapv2 import ZAPv2
79 | HAS_ZAPv2 = True
80 | except ImportError:
81 | HAS_ZAPv2 = False
82 |
83 | from ansible.module_utils.basic import AnsibleModule
84 | import time
85 |
86 | def run_module():
87 | # define the available arguments/parameters that a user can pass to
88 | # the module
89 | module_args = dict(
90 | host=dict(type='str', required=True),
91 | target=dict(type='str', required=True),
92 | apikey=dict(type='str',required=False,default=None),
93 | scantype=dict(default='passive', choices=['passive','active'])
94 | )
95 |
96 | result = dict(
97 | changed=False,
98 | original_message='',
99 | message=''
100 | )
101 |
102 | module = AnsibleModule(
103 | argument_spec=module_args,
104 | supports_check_mode=True
105 | )
106 |
107 | if not HAS_ZAPv2:
108 | module.fail_json(msg = 'OWASP python-owasp-zap-v2.4 required. pip install python-owasp-zap-v2.4')
109 |
110 | if module.check_mode:
111 | return result
112 |
113 | host = module.params['host']
114 | target = module.params['target']
115 | scantype = module.params['scantype']
116 | apikey = module.params['apikey']
117 |
118 | # if apikey:
119 | # apikey = module.params['apikey']
120 |
121 | zap = ZAPv2(apikey=apikey, proxies={'http':host,'https':host})
122 | zap.urlopen(target)
123 | try:
124 | scanid = zap.spider.scan(target)
125 | time.sleep(2)
126 | while (int(zap.spider.status(scanid)) < 100):
127 | time.sleep(2)
128 | except:
129 | module.fail_json(msg='Spidering failed')
130 |
131 | time.sleep(5)
132 |
133 | if scantype == 'active':
134 | try:
135 | scanid = zap.ascan.scan(target)
136 | while (int(zap.ascan.status(scanid)) < 100):
137 | time.sleep(5)
138 | except:
139 | module.fail_json(msg='Active Scan Failed')
140 | else:
141 | try:
142 | while (int(zap.pscan.records_to_scan) > 0):
143 | time.sleep(2)
144 | except:
145 | module.fail_json(msg='Passive Scan Failed')
146 |
147 | result['changed'] = True
148 | result['output'] = zap.core.alerts()
149 | result['target'] = target
150 | result['host'] = host
151 |
152 | module.exit_json(**result)
153 |
154 | def main():
155 | run_module()
156 |
157 | if __name__ == '__main__':
158 | main()
--------------------------------------------------------------------------------
/chapter-10/dev-setup/hosts:
--------------------------------------------------------------------------------
1 | [dev]
2 | 192.168.56.101 ansible_host=192.168.56.101 ansible_user=madhu ansible_password=vagrant
--------------------------------------------------------------------------------
/chapter-10/dev-setup/main.yml:
--------------------------------------------------------------------------------
1 | - name: Setting Developer Environment
2 | hosts: dev
3 | remote_user: madhu
4 | become: yes
5 | vars:
6 | ansible_code_path: "/home/madhu/ansible-code"
7 |
8 | tasks:
9 | - name: installing prerequirements if not installed
10 | apt:
11 | name: "{{ item }}"
12 | state: present
13 | update_cache: yes
14 |
15 | with_items:
16 | - git
17 | - virtualenv
18 | - python-pip
19 |
20 | - name: downloading ansible repo locally
21 | git:
22 | repo: https://github.com/ansible/ansible.git
23 | dest: "{{ ansible_code_path }}/venv"
24 |
25 | - name: creating virtual environment
26 | pip:
27 | virtualenv: "{{ ansible_code_path }}"
28 | virtualenv_command: virtualenv
29 | requirements: "{{ ansible_code_path }}/venv/requirements.txt"
--------------------------------------------------------------------------------
/chapter-11/vault-encrypt-string-example/main.yml:
--------------------------------------------------------------------------------
1 | - name: ViewDNS domain information
2 | hosts: localhost
3 | vars:
4 | domain: google.com
5 | api_key: !vault |
6 | $ANSIBLE_VAULT;1.1;AES256
7 | 30636235316261383936353632386561313033383961613062643438303838393833313031626232
8 | 6631383133343631303539303833656266633264616431390a613335316536333839383132653939
9 | 35633162383336623533333430653066326162613230373061613331646537313231316337366162
10 | 3162363561303935660a353661653334396161633465383936646638626231363132366665323838
11 | 33346566313233653865306430373035613836613038333037303933306630336239613034653131
12 | 6363396231656331313033653564623466353135623032363239
13 | output_type: json
14 |
15 | tasks:
16 | - name: "getting {{ domain }} server info"
17 | uri:
18 | url: "https://api.viewdns.info/reverseip/?host={{ domain }}&apikey={{ api_key }}&output={{ output_type }}"
19 | method: GET
20 | register: results
21 |
22 | - debug:
23 | msg: "{{ results.json }}"
--------------------------------------------------------------------------------
/chapter-11/vault-mysql-example/.vautlpass:
--------------------------------------------------------------------------------
1 | thisisvaultpassword
--------------------------------------------------------------------------------
/chapter-11/vault-mysql-example/group_vars/mysql.yml:
--------------------------------------------------------------------------------
1 | $ANSIBLE_VAULT;1.1;AES256
2 | 32386133393565656432393265356162663137356137303530613739616439313933353236376138
3 | 3434623630653265383836323665383965303939366261620a616131633431333764346363376537
4 | 31646435373431346335306566363435313362393735376137316164306266666231396436616136
5 | 3436616233616364360a663439613036653831313634393063613063646231333437323230663166
6 | 33663431643863653931643736613437323362623331333531346239323332363035346135373939
7 | 3530623466336561363738386366373339373861643937613235
8 |
--------------------------------------------------------------------------------
/chapter-11/vault-mysql-example/hosts:
--------------------------------------------------------------------------------
1 | [mysql]
2 | 192.168.33.22 ansible_host=192.168.33.22 ansible_user=ubuntu ansible_password=vagrant
--------------------------------------------------------------------------------
/chapter-11/vault-mysql-example/main.yml:
--------------------------------------------------------------------------------
1 | - name: Installing MySQL server
2 | hosts: mysql
3 | remote_user: ubuntu
4 | become: yes
5 | gather_facts: no
6 |
7 | roles:
8 | - mysqlsetup
--------------------------------------------------------------------------------
/chapter-11/vault-mysql-example/roles/mysqlsetup/tasks/main.yml:
--------------------------------------------------------------------------------
1 | - name: set mysql root password
2 | debconf:
3 | name: mysql-server
4 | question: mysql-server/root_password
5 | value: "{{ mysql_root_password | quote }}"
6 | vtype: password
7 |
8 | - name: confirm mysql root password
9 | debconf:
10 | name: mysql-server
11 | question: mysql-server/root_password_again
12 | value: "{{ mysql_root_password | quote }}"
13 | vtype: password
14 |
15 | - name: install mysqlserver
16 | apt:
17 | name: "{{ item }}"
18 | state: present
19 | update_cache: yes
20 |
21 | with_items:
22 | - mysql-server
23 | - mysql-client
--------------------------------------------------------------------------------
/chapter-2/jenkins/site.yml:
--------------------------------------------------------------------------------
1 | - name: installing jenkins in ubuntu 16.04
2 | hosts: "192.168.1.7"
3 | remote_user: ubuntu
4 | gather_facts: False
5 | become: True
6 |
7 | tasks:
8 | - name: install python 2
9 | raw: test -e /usr/bin/python || (apt -y update && apt install -y python-minimal)
10 |
11 | - name: install curl and git
12 | apt: name={{ item }} state=present update_cache=yes
13 |
14 | with_items:
15 | - curl
16 | - git
17 |
18 | - name: adding jenkins gpg key
19 | apt_key:
20 | url: https://pkg.jenkins.io/debian/jenkins-ci.org.key
21 | state: present
22 |
23 | - name: jeknins repository to system
24 | apt_repository:
25 | repo: http://pkg.jenkins.io/debian-stable binary/
26 | state: present
27 |
28 | - name: installing jenkins
29 | apt:
30 | name: jenkins
31 | state: present
32 | update_cache: yes
33 |
34 | - name: adding jenkins to startup
35 | service:
36 | name: jenkins
37 | state: started
38 | enabled: yes
39 |
40 | - name: printing jenkins default administration password
41 | command: cat /var/lib/jenkins/secrets/initialAdminPassword
42 | register: jenkins_default_admin_password
43 |
44 | - debug:
45 | msg: "{{ jenkins_default_admin_password.stdout }}"
--------------------------------------------------------------------------------
/chapter-2/rundeck/site.yml:
--------------------------------------------------------------------------------
1 | - name: installing rundeck on ubuntu 16.04
2 | hosts: "192.168.1.7"
3 | remote_user: ubuntu
4 | gatherfacts: False
5 | become: yes
6 |
7 | tasks:
8 | - name: installing python2 minimal
9 | raw: test -e /usr/bin/python || (apt -y update && apt install -y python-minimal)
10 |
11 | - name: java and curl installation
12 | apt: name={{ item }} state=present update_cache=yes
13 |
14 | with_items:
15 | - curl
16 | - openjdk-8-jdk
17 |
18 | - name: downloading and installing rundeck deb package
19 | apt:
20 | deb: "http://dl.bintray.com/rundeck/rundeck-deb/rundeck-2.8.4-1-GA.deb"
21 |
22 | - name: updating the hostname in configuration
23 | template:
24 | src: rundeck-config.properties.j2
25 | dest: /etc/rundeck/rundeck-config.properties
26 |
27 | - name: add to startup and start rundeck
28 | service:
29 | name: rundeckd
30 | state: started
--------------------------------------------------------------------------------
/chapter-2/rundeck/templates/rundeck-config.properties.j2:
--------------------------------------------------------------------------------
1 | #loglevel.default is the default log level for jobs: ERROR,WARN,INFO,VERBOSE,DEBUG
2 | loglevel.default=INFO
3 | rdeck.base=/var/lib/rundeck
4 |
5 | #rss.enabled if set to true enables RSS feeds that are public (non-authenticated)
6 | rss.enabled=false
7 | # change hostname here
8 | grails.serverURL=http://192.168.1.7:4440
9 | dataSource.dbCreate = update
10 | dataSource.url = jdbc:h2:file:/var/lib/rundeck/data/rundeckdb;MVCC=true
--------------------------------------------------------------------------------
/chapter-3/apache/handlers/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: start apache2
3 | service: name=apache2 state=started
4 |
5 | - name: stop apache2
6 | service: name=apache2 state=stopped
7 |
8 | - name: restart apache2
9 | service: name=apache2 state=restarted
10 |
11 | - name: reload apache2
12 | service: name=apache2 state=reloaded
13 |
14 | - name: startup apache2
15 | service: name=apache2 enabled=yes
16 |
--------------------------------------------------------------------------------
/chapter-3/apache/tasks/main.yml:
--------------------------------------------------------------------------------
1 | - name: installing apache2 server
2 | apt:
3 | name: apache2
4 | update_cache: yes
5 | state: present
6 |
7 | - name: updating customized templates for apache2 configuration
8 | template:
9 | src: "{{ item.src }}"
10 | dest: "{{ item.dst }}"
11 | mode: 0644
12 |
13 | with_tems:
14 | - { src: 'apache2.conf.j2', dst: '/etc/apache2/conf.d/apache2.conf' }
15 | - { src: '000-default.conf.j2', dst: '/etc/apache2/sites-available/000-default.conf' }
16 | - { src: 'default-ssl.conf.j2', dst: '/etc/apache2/sites-available/default-ssl.conf' }
17 |
18 | - name: adding custom link for sites-enabled from sites-available
19 | file:
20 | src: "{{ item.src }}"
21 | dest: "{{ item.dest }}"
22 | state: link
23 |
24 | with_items:
25 | - { src: '/etc/apache2/sites-available/000-default.conf', dest: '/etc/apache2/sites-enabled/000-default.conf' }
26 | - { src: '/etc/apache2/sites-available/default-ssl.conf', dest: '/etc/apache2/sites-enabled/default-ssl.conf' }
27 |
28 | notify:
29 | - start apache2
30 | - startup apache2
31 |
--------------------------------------------------------------------------------
/chapter-3/apache/templates/000-default.conf.j2:
--------------------------------------------------------------------------------
1 |
2 | # The ServerName directive sets the request scheme, hostname and port that
3 | # the server uses to identify itself. This is used when creating
4 | # redirection URLs. In the context of virtual hosts, the ServerName
5 | # specifies what hostname must appear in the request's Host: header to
6 | # match this virtual host. For the default virtual host (this file) this
7 | # value is not decisive as it is used as a last resort host regardless.
8 | # However, you must set it for any further virtual host explicitly.
9 | #ServerName www.example.com
10 |
11 | ServerAdmin webmaster@localhost
12 | DocumentRoot /var/www/html
13 |
14 | # Available loglevels: trace8, ..., trace1, debug, info, notice, warn,
15 | # error, crit, alert, emerg.
16 | # It is also possible to configure the loglevel for particular
17 | # modules, e.g.
18 | #LogLevel info ssl:warn
19 |
20 | ErrorLog ${APACHE_LOG_DIR}/error.log
21 | CustomLog ${APACHE_LOG_DIR}/access.log combined
22 |
23 | # For most configuration files from conf-available/, which are
24 | # enabled or disabled at a global level, it is possible to
25 | # include a line for only one particular virtual host. For example the
26 | # following line enables the CGI configuration for this host only
27 | # after it has been globally disabled with "a2disconf".
28 | #Include conf-available/serve-cgi-bin.conf
29 |
30 |
--------------------------------------------------------------------------------
/chapter-3/apache/templates/apache2.conf.j2:
--------------------------------------------------------------------------------
1 | # This is the main Apache server configuration file. It contains the
2 | # configuration directives that give the server its instructions.
3 | # See http://httpd.apache.org/docs/2.4/ for detailed information about
4 | # the directives and /usr/share/doc/apache2/README.Debian about Debian specific
5 | # hints.
6 | #
7 | #
8 | # Summary of how the Apache 2 configuration works in Debian:
9 | # The Apache 2 web server configuration in Debian is quite different to
10 | # upstream's suggested way to configure the web server. This is because Debian's
11 | # default Apache2 installation attempts to make adding and removing modules,
12 | # virtual hosts, and extra configuration directives as flexible as possible, in
13 | # order to make automating the changes and administering the server as easy as
14 | # possible.
15 |
16 | # It is split into several files forming the configuration hierarchy outlined
17 | # below, all located in the /etc/apache2/ directory:
18 | #
19 | # /etc/apache2/
20 | # |-- apache2.conf
21 | # | `-- ports.conf
22 | # |-- mods-enabled
23 | # | |-- *.load
24 | # | `-- *.conf
25 | # |-- conf-enabled
26 | # | `-- *.conf
27 | # `-- sites-enabled
28 | # `-- *.conf
29 | #
30 | #
31 | # * apache2.conf is the main configuration file (this file). It puts the pieces
32 | # together by including all remaining configuration files when starting up the
33 | # web server.
34 | #
35 | # * ports.conf is always included from the main configuration file. It is
36 | # supposed to determine listening ports for incoming connections which can be
37 | # customized anytime.
38 | #
39 | # * Configuration files in the mods-enabled/, conf-enabled/ and sites-enabled/
40 | # directories contain particular configuration snippets which manage modules,
41 | # global configuration fragments, or virtual host configurations,
42 | # respectively.
43 | #
44 | # They are activated by symlinking available configuration files from their
45 | # respective *-available/ counterparts. These should be managed by using our
46 | # helpers a2enmod/a2dismod, a2ensite/a2dissite and a2enconf/a2disconf. See
47 | # their respective man pages for detailed information.
48 | #
49 | # * The binary is called apache2. Due to the use of environment variables, in
50 | # the default configuration, apache2 needs to be started/stopped with
51 | # /etc/init.d/apache2 or apache2ctl. Calling /usr/bin/apache2 directly will not
52 | # work with the default configuration.
53 |
54 |
55 | # Global configuration
56 | #
57 |
58 | #
59 | # ServerRoot: The top of the directory tree under which the server's
60 | # configuration, error, and log files are kept.
61 | #
62 | # NOTE! If you intend to place this on an NFS (or otherwise network)
63 | # mounted filesystem then please read the Mutex documentation (available
64 | # at );
65 | # you will save yourself a lot of trouble.
66 | #
67 | # Do NOT add a slash at the end of the directory path.
68 | #
69 | #ServerRoot "/etc/apache2"
70 |
71 | #
72 | # The accept serialization lock file MUST BE STORED ON A LOCAL DISK.
73 | #
74 | Mutex file:${APACHE_LOCK_DIR} default
75 |
76 | #
77 | # PidFile: The file in which the server should record its process
78 | # identification number when it starts.
79 | # This needs to be set in /etc/apache2/envvars
80 | #
81 | PidFile ${APACHE_PID_FILE}
82 |
83 | #
84 | # Timeout: The number of seconds before receives and sends time out.
85 | #
86 | Timeout 300
87 |
88 | #
89 | # KeepAlive: Whether or not to allow persistent connections (more than
90 | # one request per connection). Set to "Off" to deactivate.
91 | #
92 | KeepAlive On
93 |
94 | #
95 | # MaxKeepAliveRequests: The maximum number of requests to allow
96 | # during a persistent connection. Set to 0 to allow an unlimited amount.
97 | # We recommend you leave this number high, for maximum performance.
98 | #
99 | MaxKeepAliveRequests 100
100 |
101 | #
102 | # KeepAliveTimeout: Number of seconds to wait for the next request from the
103 | # same client on the same connection.
104 | #
105 | KeepAliveTimeout 5
106 |
107 |
108 | # These need to be set in /etc/apache2/envvars
109 | User ${APACHE_RUN_USER}
110 | Group ${APACHE_RUN_GROUP}
111 |
112 | #
113 | # HostnameLookups: Log the names of clients or just their IP addresses
114 | # e.g., www.apache.org (on) or 204.62.129.132 (off).
115 | # The default is off because it'd be overall better for the net if people
116 | # had to knowingly turn this feature on, since enabling it means that
117 | # each client request will result in AT LEAST one lookup request to the
118 | # nameserver.
119 | #
120 | HostnameLookups Off
121 |
122 | # ErrorLog: The location of the error log file.
123 | # If you do not specify an ErrorLog directive within a
124 | # container, error messages relating to that virtual host will be
125 | # logged here. If you *do* define an error logfile for a
126 | # container, that host's errors will be logged there and not here.
127 | #
128 | ErrorLog ${APACHE_LOG_DIR}/error.log
129 |
130 | #
131 | # LogLevel: Control the severity of messages logged to the error_log.
132 | # Available values: trace8, ..., trace1, debug, info, notice, warn,
133 | # error, crit, alert, emerg.
134 | # It is also possible to configure the log level for particular modules, e.g.
135 | # "LogLevel info ssl:warn"
136 | #
137 | LogLevel warn
138 |
139 | # Include module configuration:
140 | IncludeOptional mods-enabled/*.load
141 | IncludeOptional mods-enabled/*.conf
142 |
143 | # Include list of ports to listen on
144 | Include ports.conf
145 |
146 |
147 | # Sets the default security model of the Apache2 HTTPD server. It does
148 | # not allow access to the root filesystem outside of /usr/share and /var/www.
149 | # The former is used by web applications packaged in Debian,
150 | # the latter may be used for local directories served by the web server. If
151 | # your system is serving content from a sub-directory in /srv you must allow
152 | # access here, or in any related virtual host.
153 |
154 | Options FollowSymLinks
155 | AllowOverride None
156 | Require all denied
157 |
158 |
159 |
160 | AllowOverride None
161 | Require all granted
162 |
163 |
164 |
165 | Options Indexes FollowSymLinks
166 | AllowOverride None
167 | Require all granted
168 |
169 |
170 | #
171 | # Options Indexes FollowSymLinks
172 | # AllowOverride None
173 | # Require all granted
174 | #
175 |
176 |
177 |
178 |
179 | # AccessFileName: The name of the file to look for in each directory
180 | # for additional configuration directives. See also the AllowOverride
181 | # directive.
182 | #
183 | AccessFileName .htaccess
184 |
185 | #
186 | # The following lines prevent .htaccess and .htpasswd files from being
187 | # viewed by Web clients.
188 | #
189 |
190 | Require all denied
191 |
192 |
193 |
194 | #
195 | # The following directives define some format nicknames for use with
196 | # a CustomLog directive.
197 | #
198 | # These deviate from the Common Log Format definitions in that they use %O
199 | # (the actual bytes sent including headers) instead of %b (the size of the
200 | # requested file), because the latter makes it impossible to detect partial
201 | # requests.
202 | #
203 | # Note that the use of %{X-Forwarded-For}i instead of %h is not recommended.
204 | # Use mod_remoteip instead.
205 | #
206 | LogFormat "%v:%p %h %l %u %t \"%r\" %>s %O \"%{Referer}i\" \"%{User-Agent}i\"" vhost_combined
207 | LogFormat "%h %l %u %t \"%r\" %>s %O \"%{Referer}i\" \"%{User-Agent}i\"" combined
208 | LogFormat "%h %l %u %t \"%r\" %>s %O" common
209 | LogFormat "%{Referer}i -> %U" referer
210 | LogFormat "%{User-agent}i" agent
211 |
212 | # Include of directories ignores editors' and dpkg's backup files,
213 | # see README.Debian for details.
214 |
215 | # Include generic snippets of statements
216 | IncludeOptional conf-enabled/*.conf
217 |
218 | # Include the virtual host configurations:
219 | IncludeOptional sites-enabled/*.conf
220 |
--------------------------------------------------------------------------------
/chapter-3/apache/templates/default-ssl.conf.j2:
--------------------------------------------------------------------------------
1 |
2 |
3 | ServerAdmin webmaster@localhost
4 |
5 | DocumentRoot /var/www/html
6 |
7 | # Available loglevels: trace8, ..., trace1, debug, info, notice, warn,
8 | # error, crit, alert, emerg.
9 | # It is also possible to configure the loglevel for particular
10 | # modules, e.g.
11 | #LogLevel info ssl:warn
12 |
13 | ErrorLog ${APACHE_LOG_DIR}/error.log
14 | CustomLog ${APACHE_LOG_DIR}/access.log combined
15 |
16 | # For most configuration files from conf-available/, which are
17 | # enabled or disabled at a global level, it is possible to
18 | # include a line for only one particular virtual host. For example the
19 | # following line enables the CGI configuration for this host only
20 | # after it has been globally disabled with "a2disconf".
21 | #Include conf-available/serve-cgi-bin.conf
22 |
23 | # SSL Engine Switch:
24 | # Enable/Disable SSL for this virtual host.
25 | SSLEngine on
26 |
27 | # A self-signed (snakeoil) certificate can be created by installing
28 | # the ssl-cert package. See
29 | # /usr/share/doc/apache2/README.Debian.gz for more info.
30 | # If both key and certificate are stored in the same file, only the
31 | # SSLCertificateFile directive is needed.
32 | SSLCertificateFile /etc/ssl/certs/ssl-cert-snakeoil.pem
33 | SSLCertificateKeyFile /etc/ssl/private/ssl-cert-snakeoil.key
34 |
35 | # Server Certificate Chain:
36 | # Point SSLCertificateChainFile at a file containing the
37 | # concatenation of PEM encoded CA certificates which form the
38 | # certificate chain for the server certificate. Alternatively
39 | # the referenced file can be the same as SSLCertificateFile
40 | # when the CA certificates are directly appended to the server
41 | # certificate for convinience.
42 | #SSLCertificateChainFile /etc/apache2/ssl.crt/server-ca.crt
43 |
44 | # Certificate Authority (CA):
45 | # Set the CA certificate verification path where to find CA
46 | # certificates for client authentication or alternatively one
47 | # huge file containing all of them (file must be PEM encoded)
48 | # Note: Inside SSLCACertificatePath you need hash symlinks
49 | # to point to the certificate files. Use the provided
50 | # Makefile to update the hash symlinks after changes.
51 | #SSLCACertificatePath /etc/ssl/certs/
52 | #SSLCACertificateFile /etc/apache2/ssl.crt/ca-bundle.crt
53 |
54 | # Certificate Revocation Lists (CRL):
55 | # Set the CA revocation path where to find CA CRLs for client
56 | # authentication or alternatively one huge file containing all
57 | # of them (file must be PEM encoded)
58 | # Note: Inside SSLCARevocationPath you need hash symlinks
59 | # to point to the certificate files. Use the provided
60 | # Makefile to update the hash symlinks after changes.
61 | #SSLCARevocationPath /etc/apache2/ssl.crl/
62 | #SSLCARevocationFile /etc/apache2/ssl.crl/ca-bundle.crl
63 |
64 | # Client Authentication (Type):
65 | # Client certificate verification type and depth. Types are
66 | # none, optional, require and optional_no_ca. Depth is a
67 | # number which specifies how deeply to verify the certificate
68 | # issuer chain before deciding the certificate is not valid.
69 | #SSLVerifyClient require
70 | #SSLVerifyDepth 10
71 |
72 | # SSL Engine Options:
73 | # Set various options for the SSL engine.
74 | # o FakeBasicAuth:
75 | # Translate the client X.509 into a Basic Authorisation. This means that
76 | # the standard Auth/DBMAuth methods can be used for access control. The
77 | # user name is the `one line' version of the client's X.509 certificate.
78 | # Note that no password is obtained from the user. Every entry in the user
79 | # file needs this password: `xxj31ZMTZzkVA'.
80 | # o ExportCertData:
81 | # This exports two additional environment variables: SSL_CLIENT_CERT and
82 | # SSL_SERVER_CERT. These contain the PEM-encoded certificates of the
83 | # server (always existing) and the client (only existing when client
84 | # authentication is used). This can be used to import the certificates
85 | # into CGI scripts.
86 | # o StdEnvVars:
87 | # This exports the standard SSL/TLS related `SSL_*' environment variables.
88 | # Per default this exportation is switched off for performance reasons,
89 | # because the extraction step is an expensive operation and is usually
90 | # useless for serving static content. So one usually enables the
91 | # exportation for CGI and SSI requests only.
92 | # o OptRenegotiate:
93 | # This enables optimized SSL connection renegotiation handling when SSL
94 | # directives are used in per-directory context.
95 | #SSLOptions +FakeBasicAuth +ExportCertData +StrictRequire
96 |
97 | SSLOptions +StdEnvVars
98 |
99 |
100 | SSLOptions +StdEnvVars
101 |
102 |
103 | # SSL Protocol Adjustments:
104 | # The safe and default but still SSL/TLS standard compliant shutdown
105 | # approach is that mod_ssl sends the close notify alert but doesn't wait for
106 | # the close notify alert from client. When you need a different shutdown
107 | # approach you can use one of the following variables:
108 | # o ssl-unclean-shutdown:
109 | # This forces an unclean shutdown when the connection is closed, i.e. no
110 | # SSL close notify alert is send or allowed to received. This violates
111 | # the SSL/TLS standard but is needed for some brain-dead browsers. Use
112 | # this when you receive I/O errors because of the standard approach where
113 | # mod_ssl sends the close notify alert.
114 | # o ssl-accurate-shutdown:
115 | # This forces an accurate shutdown when the connection is closed, i.e. a
116 | # SSL close notify alert is send and mod_ssl waits for the close notify
117 | # alert of the client. This is 100% SSL/TLS standard compliant, but in
118 | # practice often causes hanging connections with brain-dead browsers. Use
119 | # this only for browsers where you know that their SSL implementation
120 | # works correctly.
121 | # Notice: Most problems of broken clients are also related to the HTTP
122 | # keep-alive facility, so you usually additionally want to disable
123 | # keep-alive for those clients, too. Use variable "nokeepalive" for this.
124 | # Similarly, one has to force some clients to use HTTP/1.0 to workaround
125 | # their broken HTTP/1.1 implementation. Use variables "downgrade-1.0" and
126 | # "force-response-1.0" for this.
127 | BrowserMatch "MSIE [2-6]" \
128 | nokeepalive ssl-unclean-shutdown \
129 | downgrade-1.0 force-response-1.0
130 | # MSIE 7 and newer should be able to use keepalive
131 | BrowserMatch "MSIE [17-9]" ssl-unclean-shutdown
132 |
133 |
134 |
135 |
--------------------------------------------------------------------------------
/chapter-3/iis/tasks/main.yml:
--------------------------------------------------------------------------------
1 | - name: installing IIS server
2 | win_feature:
3 | name: "Web-Server"
4 | state: present
5 | restart: yes
6 | include_sub_features: yes
7 | include_management_tools: yes
8 |
--------------------------------------------------------------------------------
/chapter-3/linux-apache2-mod_php/handlers/main.yml:
--------------------------------------------------------------------------------
1 | # handlers file for linux-apache2-mod_php
2 | - name: start apache2
3 | service:
4 | name: apache2
5 | state: started
6 |
7 | - name: enable apache2
8 | service:
9 | name: apache2
10 | enabled: yes
11 |
12 | - name: stop apache2
13 | service:
14 | name: apache2
15 | state: stopped
16 |
17 | - name: restart apache2
18 | service:
19 | name: apache2
20 | state: restarted
--------------------------------------------------------------------------------
/chapter-3/linux-apache2-mod_php/tasks/main.yml:
--------------------------------------------------------------------------------
1 | # tasks file for linux-apache2-mod_php
2 |
3 | - name: installing apache2 and mod_php
4 | apt:
5 | name: "{{ item }}"
6 | update_cache: yes
7 | state: present
8 | with_items:
9 | - apache2
10 | - php5-mysql
11 | - php5
12 | - php5-mcrypt
13 | - php5-gd
14 | - libapache2-mod-php5
15 | - libapache2-mod-auth-mysql
16 |
17 | - name: disable the default.conf
18 | file:
19 | path: /etc/apache2/sites-enabled/000-default.conf
20 | state: absent
21 |
22 | - name: adding site and apache2 cofiguration for website
23 | template:
24 | src: "{{ item.src }}"
25 | dest: "{{ item.dest }}"
26 | with_items:
27 | - { src: "wordpress-website.conf.j2", dest: "/etc/apache2/sites-available/wordpress-website.conf" }
28 | - { src: "apache2.conf.j2", dest: "/etc/apache2/apache2.conf" }
29 |
30 | - name: enabling the configuration
31 | file:
32 | src: "/etc/apache2/sites-available/wordpress-website.conf"
33 | dest: "/etc/apache2/sites-enabled/wordpress-website.conf"
34 | state: link
35 |
36 | - name: adding directory permissions and ownership
37 | file:
38 | path: "{{ website_root_directory }}"
39 | owner: www-data
40 | group: www-data
41 | recurse: yes
42 |
43 | - name: enable apache2 for starting on boot
44 | service:
45 | name: apache2
46 | enabled: yes
47 | state: started
--------------------------------------------------------------------------------
/chapter-3/linux-apache2-mod_php/templates/apache2.conf.j2:
--------------------------------------------------------------------------------
1 | # This is the main Apache server configuration file. It contains the
2 | # configuration directives that give the server its instructions.
3 | # See http://httpd.apache.org/docs/2.4/ for detailed information about
4 | # the directives and /usr/share/doc/apache2/README.Debian about Debian specific
5 | # hints.
6 | #
7 | #
8 | # Summary of how the Apache 2 configuration works in Debian:
9 | # The Apache 2 web server configuration in Debian is quite different to
10 | # upstream's suggested way to configure the web server. This is because Debian's
11 | # default Apache2 installation attempts to make adding and removing modules,
12 | # virtual hosts, and extra configuration directives as flexible as possible, in
13 | # order to make automating the changes and administering the server as easy as
14 | # possible.
15 |
16 | # It is split into several files forming the configuration hierarchy outlined
17 | # below, all located in the /etc/apache2/ directory:
18 | #
19 | # /etc/apache2/
20 | # |-- apache2.conf
21 | # | `-- ports.conf
22 | # |-- mods-enabled
23 | # | |-- *.load
24 | # | `-- *.conf
25 | # |-- conf-enabled
26 | # | `-- *.conf
27 | # `-- sites-enabled
28 | # `-- *.conf
29 | #
30 | #
31 | # * apache2.conf is the main configuration file (this file). It puts the pieces
32 | # together by including all remaining configuration files when starting up the
33 | # web server.
34 | #
35 | # * ports.conf is always included from the main configuration file. It is
36 | # supposed to determine listening ports for incoming connections which can be
37 | # customized anytime.
38 | #
39 | # * Configuration files in the mods-enabled/, conf-enabled/ and sites-enabled/
40 | # directories contain particular configuration snippets which manage modules,
41 | # global configuration fragments, or virtual host configurations,
42 | # respectively.
43 | #
44 | # They are activated by symlinking available configuration files from their
45 | # respective *-available/ counterparts. These should be managed by using our
46 | # helpers a2enmod/a2dismod, a2ensite/a2dissite and a2enconf/a2disconf. See
47 | # their respective man pages for detailed information.
48 | #
49 | # * The binary is called apache2. Due to the use of environment variables, in
50 | # the default configuration, apache2 needs to be started/stopped with
51 | # /etc/init.d/apache2 or apache2ctl. Calling /usr/bin/apache2 directly will not
52 | # work with the default configuration.
53 |
54 |
55 | # Global configuration
56 | #
57 |
58 | #
59 | # ServerRoot: The top of the directory tree under which the server's
60 | # configuration, error, and log files are kept.
61 | #
62 | # NOTE! If you intend to place this on an NFS (or otherwise network)
63 | # mounted filesystem then please read the Mutex documentation (available
64 | # at );
65 | # you will save yourself a lot of trouble.
66 | #
67 | # Do NOT add a slash at the end of the directory path.
68 | #
69 | #ServerRoot "/etc/apache2"
70 |
71 | #
72 | # The accept serialization lock file MUST BE STORED ON A LOCAL DISK.
73 | #
74 | Mutex file:${APACHE_LOCK_DIR} default
75 |
76 | #
77 | # PidFile: The file in which the server should record its process
78 | # identification number when it starts.
79 | # This needs to be set in /etc/apache2/envvars
80 | #
81 | PidFile ${APACHE_PID_FILE}
82 |
83 | #
84 | # Timeout: The number of seconds before receives and sends time out.
85 | #
86 | Timeout 300
87 |
88 | #
89 | # KeepAlive: Whether or not to allow persistent connections (more than
90 | # one request per connection). Set to "Off" to deactivate.
91 | #
92 | KeepAlive On
93 |
94 | #
95 | # MaxKeepAliveRequests: The maximum number of requests to allow
96 | # during a persistent connection. Set to 0 to allow an unlimited amount.
97 | # We recommend you leave this number high, for maximum performance.
98 | #
99 | MaxKeepAliveRequests 100
100 |
101 | #
102 | # KeepAliveTimeout: Number of seconds to wait for the next request from the
103 | # same client on the same connection.
104 | #
105 | KeepAliveTimeout 5
106 |
107 |
108 | # These need to be set in /etc/apache2/envvars
109 | User ${APACHE_RUN_USER}
110 | Group ${APACHE_RUN_GROUP}
111 |
112 | #
113 | # HostnameLookups: Log the names of clients or just their IP addresses
114 | # e.g., www.apache.org (on) or 204.62.129.132 (off).
115 | # The default is off because it'd be overall better for the net if people
116 | # had to knowingly turn this feature on, since enabling it means that
117 | # each client request will result in AT LEAST one lookup request to the
118 | # nameserver.
119 | #
120 | HostnameLookups Off
121 |
122 | # ErrorLog: The location of the error log file.
123 | # If you do not specify an ErrorLog directive within a
124 | # container, error messages relating to that virtual host will be
125 | # logged here. If you *do* define an error logfile for a
126 | # container, that host's errors will be logged there and not here.
127 | #
128 | ErrorLog ${APACHE_LOG_DIR}/error.log
129 |
130 | #
131 | # LogLevel: Control the severity of messages logged to the error_log.
132 | # Available values: trace8, ..., trace1, debug, info, notice, warn,
133 | # error, crit, alert, emerg.
134 | # It is also possible to configure the log level for particular modules, e.g.
135 | # "LogLevel info ssl:warn"
136 | #
137 | LogLevel warn
138 |
139 | # Include module configuration:
140 | IncludeOptional mods-enabled/*.load
141 | IncludeOptional mods-enabled/*.conf
142 |
143 | # Include list of ports to listen on
144 | Include ports.conf
145 |
146 |
147 | # Sets the default security model of the Apache2 HTTPD server. It does
148 | # not allow access to the root filesystem outside of /usr/share and /var/www.
149 | # The former is used by web applications packaged in Debian,
150 | # the latter may be used for local directories served by the web server. If
151 | # your system is serving content from a sub-directory in /srv you must allow
152 | # access here, or in any related virtual host.
153 |
154 | Options FollowSymLinks
155 | AllowOverride None
156 | Require all denied
157 |
158 |
159 |
160 | AllowOverride None
161 | Require all granted
162 |
163 |
164 |
165 | Options Indexes FollowSymLinks
166 | AllowOverride None
167 | Require all granted
168 |
169 |
170 | #
171 | # Options Indexes FollowSymLinks
172 | # AllowOverride None
173 | # Require all granted
174 | #
175 |
176 |
177 |
178 |
179 | # AccessFileName: The name of the file to look for in each directory
180 | # for additional configuration directives. See also the AllowOverride
181 | # directive.
182 | #
183 | AccessFileName .htaccess
184 |
185 | #
186 | # The following lines prevent .htaccess and .htpasswd files from being
187 | # viewed by Web clients.
188 | #
189 |
190 | Require all denied
191 |
192 |
193 |
194 | #
195 | # The following directives define some format nicknames for use with
196 | # a CustomLog directive.
197 | #
198 | # These deviate from the Common Log Format definitions in that they use %O
199 | # (the actual bytes sent including headers) instead of %b (the size of the
200 | # requested file), because the latter makes it impossible to detect partial
201 | # requests.
202 | #
203 | # Note that the use of %{X-Forwarded-For}i instead of %h is not recommended.
204 | # Use mod_remoteip instead.
205 | #
206 | LogFormat "%v:%p %h %l %u %t \"%r\" %>s %O \"%{Referer}i\" \"%{User-Agent}i\"" vhost_combined
207 | LogFormat "%h %l %u %t \"%r\" %>s %O \"%{Referer}i\" \"%{User-Agent}i\"" combined
208 | LogFormat "%h %l %u %t \"%r\" %>s %O" common
209 | LogFormat "%{Referer}i -> %U" referer
210 | LogFormat "%{User-agent}i" agent
211 |
212 | # Include of directories ignores editors' and dpkg's backup files,
213 | # see README.Debian for details.
214 |
215 | # Include generic snippets of statements
216 | IncludeOptional conf-enabled/*.conf
217 |
218 | # Include the virtual host configurations:
219 | IncludeOptional sites-enabled/*.conf
--------------------------------------------------------------------------------
/chapter-3/linux-apache2-mod_php/templates/wordpress-website.conf.j2:
--------------------------------------------------------------------------------
1 |
2 | ServerName {{ server_domain_name_or_ip }}
3 | ServerAdmin {{ server_admin_email }}
4 | DocumentRoot {{ website_root_directory }}
5 |
6 | # Available loglevels: trace8, ..., trace1, debug, info, notice, warn,
7 | # error, crit, alert, emerg.
8 | # It is also possible to configure the loglevel for particular
9 | # modules, e.g.
10 | #LogLevel info ssl:warn
11 |
12 | ErrorLog ${APACHE_LOG_DIR}/error.log
13 | CustomLog ${APACHE_LOG_DIR}/access.log combined
14 |
15 |
--------------------------------------------------------------------------------
/chapter-3/linux-apache2-mod_php/vars/main.yml:
--------------------------------------------------------------------------------
1 | # vars file for linux-apache2-mod_php
2 | server_domain_name_or_ip: "127.0.0.1"
3 | website_root_directory: "/var/www/html"
4 | server_admin_email: "webmaster@localhost"
--------------------------------------------------------------------------------
/chapter-3/linux-apache2-php_fpm/handlers/main.yml:
--------------------------------------------------------------------------------
1 | # handlers file for linux-apache2-php_fpm
2 | - name: start apache2
3 | service:
4 | name: apache2
5 | state:
6 | tarted:
7 | - name: enable apache2
8 | service:
9 | name: apache2
10 | enabled: yes
11 |
12 | - name: stop apache2
13 | service:
14 | name: apache2
15 | state: stopped
16 |
17 | - name: restart apache2
18 | service:
19 | name: apache2
20 | state: restarted
21 |
22 | - name: start php5-fpm
23 | service:
24 | name: php5-fpm
25 | state: started
26 |
27 | - name: enable php5-fpm
28 | service:
29 | name: php5-fpm
30 | enabled: yes
31 |
32 | - name: stop php5-fpm
33 | service:
34 | name: php5-fpm
35 | state: stopped
36 |
37 | - name: restart php5-fpm
38 | service:
39 | name: php5-fpm
40 | state: restarted
--------------------------------------------------------------------------------
/chapter-3/linux-apache2-php_fpm/tasks/main.yml:
--------------------------------------------------------------------------------
1 | # tasks file for linux-apache2-php_fpm
2 | - name: installing apache2 and php_fpm
3 | apt:
4 | name: "{{ item }}"
5 | update_cache: yes
6 | state: present
7 |
8 | with_items:
9 | - apache2
10 | - php5-fpm
11 | - php5-mysql
12 | - php5-mcrypt
13 | - php5-gd
14 | - php5-curl
15 |
16 | - name: disable the default.conf
17 | file:
18 | path: /etc/apache2/sites-enabled/000-default.conf
19 | state: absent
20 |
21 | - name: adding site, apache2 and php5-fpm cofiguration for website
22 | template:
23 | src: "{{ item.src }}"
24 | dest: "{{ item.dest }}"
25 |
26 | with_items:
27 | - { src: "wordpress-website.conf.j2", dest: "/etc/apache2/sites-available/wordpress-website.conf" }
28 | - { src: "apache2.conf.j2", dest: "/etc/apache2/apache2.conf" }
29 |
30 | - name: enabling the configuration
31 | file:
32 | src: /etc/apache2/sites-available/wordpress-website.conf
33 | dest: /etc/apache2/sites-enabled/wordpress-website.conf
34 | state: link
35 |
36 | - name: adding directory permissions and ownership
37 | file:
38 | path: "{{ website_root_directory }}"
39 | owner: www-data
40 | group: www-data
41 | recurse: yes
42 |
43 | - name: enable php5-fpm for starting on boot
44 | service:
45 | name: php5-fpm
46 | enabled: yes
47 | state: started
48 |
49 | - name: enable apache2 for starting on boot
50 | service:
51 | name: apache2
52 | enabled: yes
53 | state: started
--------------------------------------------------------------------------------
/chapter-3/linux-apache2-php_fpm/templates/wordpress-website.conf.j2:
--------------------------------------------------------------------------------
1 |
2 | ServerName {{ server_domain_name_or_ip }}
3 | ServerAdmin webmaster@localhost
4 | DocumentRoot /var/www/html
5 |
6 | # Available loglevels: trace8, ..., trace1, debug, info, notice, warn,
7 | # error, crit, alert, emerg.
8 | # It is also possible to configure the loglevel for particular
9 | # modules, e.g.
10 | #LogLevel info ssl:warn
11 |
12 |
13 | SetHandler "proxy:unix:/var/run/php5-fpm.sock|fcgi://localhost/"
14 |
15 |
16 | ErrorLog ${APACHE_LOG_DIR}/error.log
17 | CustomLog ${APACHE_LOG_DIR}/access.log combined
18 |
19 |
--------------------------------------------------------------------------------
/chapter-3/linux-apache2-php_fpm/vars/main.yml:
--------------------------------------------------------------------------------
1 | # vars file for linux-apache2-php_fpm
2 | server_domain_name_or_ip: "127.0.0.1"
3 | website_root_directory: "/var/www/html"
4 | server_admin_email: "webmaster@localhost"
--------------------------------------------------------------------------------
/chapter-3/linux-mysql-hardening/handlers/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | # tasks file for linux-mysql-hardening
3 |
4 | - name: start mysql
5 | service: name=mysql state=started
6 |
7 | - name: stop mysql
8 | service: name=mysql state=stopped
9 |
10 | - name: restart mysql
11 | service: name=mysql state=restarted
--------------------------------------------------------------------------------
/chapter-3/linux-mysql-hardening/tasks/main.yml:
--------------------------------------------------------------------------------
1 | # tasks file for linux-mysql-hardening
2 | - name: gather facts
3 | setup:
4 |
5 | - name: delete anonymous mysql user for localhost
6 | mysql_user:
7 | user: ""
8 | state: absent
9 | login_password: "{{ mysql_root_password }}"
10 | login_user: root
11 |
12 | - name: secure mysql root user
13 | mysql_user:
14 | user: "root"
15 | password: "{{ mysql_root_password }}"
16 | host: "{{ item }}"
17 | login_password: "{{ mysql_root_password }}"
18 | login_user: root
19 |
20 | with_items:
21 | - 127.0.0.1
22 | - localhost
23 | - ::1
24 | - "{{ ansible_fqdn }}"
25 |
26 | - name: removes mysql test database
27 | mysql_db:
28 | db: test
29 | state: absent
30 | login_password: "{{ mysql_root_password }}"
31 | login_user: root
32 |
33 | notify:
34 | - restart mysql
--------------------------------------------------------------------------------
/chapter-3/linux-mysql-hardening/vars/main.yml:
--------------------------------------------------------------------------------
1 | # tasks file for linux-mysql-hardening
2 | mysql_root_password: "mysqlrootpassword"
--------------------------------------------------------------------------------
/chapter-3/linux-nginx-php_fpm/tasks/main.yml:
--------------------------------------------------------------------------------
1 | # tasks file for linux-nginx-php_fpm
2 | - name: installing nginx and php-fpm
3 | apt:
4 | name: "{{ item }}"
5 | update_cache: yes
6 | state: present
7 | with_items:
8 | - nginx
9 | - php5-fpm
10 | - php5
11 | - php5-mysql
12 | - php5-mcrypt
13 | - php5-gd
14 |
15 | - name: configuring php.ini for php processor
16 | lineinfile:
17 | path: /etc/php5/fpm/php.ini
18 | line: "cgi.fix_pathinfo=0"
19 |
20 | - name: update the nginx configuration to support php-fpm
21 | template:
22 | src: "{{ item.src }}"
23 | dest: "{{ item.dest }}"
24 |
25 | with_items:
26 | - { src: "default.conf.j2", dest: "/etc/nginx/conf.d/default.conf" }
27 |
28 | - name: enable and start php5-fpm on startup
29 | service:
30 | name: php5-fpm
31 | enabled: yes
32 | state: restarted
33 |
34 | - name: enable and start nginx on startup
35 | service:
36 | name: nginx
37 | enabled: yes
38 | state: restarted
39 |
--------------------------------------------------------------------------------
/chapter-3/linux-nginx-php_fpm/templates/default.conf.j2:
--------------------------------------------------------------------------------
1 | server {
2 | listen 80 default_server;
3 | listen [::]:80 default_server ipv6only=on;
4 |
5 | root /usr/share/nginx/html;
6 | index index.php index.html index.htm;
7 |
8 | server_name {{ server_domain_name_or_ip }};
9 |
10 | location / {
11 | try_files $uri $uri/ =404;
12 | }
13 |
14 | error_page 404 /404.html;
15 | error_page 500 502 503 504 /50x.html;
16 | location = /50x.html {
17 | root /usr/share/nginx/html;
18 | }
19 |
20 | location ~ \.php$ {
21 | try_files $uri =404;
22 | fastcgi_split_path_info ^(.+\.php)(/.+)$;
23 | fastcgi_pass unix:/var/run/php5-fpm.sock;
24 | fastcgi_index index.php;
25 | fastcgi_param SCRIPT_FILENAME $document_root$fastcgi_script_name;
26 | include fastcgi_params;
27 | }
28 | }
29 |
--------------------------------------------------------------------------------
/chapter-3/linux-nginx-php_fpm/vars/main.yml:
--------------------------------------------------------------------------------
1 | # tasks file for linux-nginx-php_fpm
2 | server_domain_name_or_ip: "127.0.0.1"
--------------------------------------------------------------------------------
/chapter-3/mysql/handlers/main.yml:
--------------------------------------------------------------------------------
1 | # tasks file for mysql
2 | - name: start mysql
3 | service:
4 | name: mysql
5 | state: started
6 |
7 | - name: stop mysql
8 | service:
9 | name: mysql
10 | state: stopped
11 |
12 | - name: restart mysql
13 | service:
14 | name: mysql
15 | state: restarted
16 |
17 | - name: startup mysql
18 | service:
19 | name: mysql
20 | enabled: yes
--------------------------------------------------------------------------------
/chapter-3/mysql/tasks/main.yml:
--------------------------------------------------------------------------------
1 | # tasks file for mysql
2 | - name: set mysql root password
3 | debconf:
4 | name: mysql-server
5 | question: mysql-server/root_password
6 | value: "{{ mysql_root_password | quote }}"
7 | vtype: password
8 |
9 | - name: confirm mysql root password
10 | debconf:
11 | name: mysql-server
12 | question: mysql-server/root_password_again
13 | value: "{{ mysql_root_password | quote }}"
14 | vtype: password
15 |
16 | - name: instaling mysql in ubuntu
17 | apt:
18 | name: "{{ item }}"
19 | update_cache: yes
20 | state: present
21 |
22 | with_items:
23 | - mysql-server
24 | - mysql-client
25 | - python-mysqldb
26 |
27 | - name: enable mysql on boot and sart the service
28 | service:
29 | name: mysql
30 | enabled: yes
31 | state: restarted
32 |
--------------------------------------------------------------------------------
/chapter-3/mysql/vars/main.yml:
--------------------------------------------------------------------------------
1 | # tasks file for mysql
2 | mysql_root_password: "mysqlrootpassword"
--------------------------------------------------------------------------------
/chapter-3/nginx/handlers/main.yml:
--------------------------------------------------------------------------------
1 | - name: start nginx
2 | service:
3 | name: nginx
4 | state: started
5 |
6 | - name: stop nginx
7 | service:
8 | name: nginx
9 | state: stopped
10 |
11 | - name: restart nginx
12 | service:
13 | name: nginx
14 | state: restarted
15 |
16 | - name: reload nginx
17 | service:
18 | name: nginx
19 | state: reloaded
20 |
21 | - name: startup nginx
22 | servicec:
23 | name: nginx
24 | enabled: yes
25 |
--------------------------------------------------------------------------------
/chapter-3/nginx/tasks/main.yml:
--------------------------------------------------------------------------------
1 | - name: adding nginx signing key
2 | apt_key:
3 | url: "http://nginx.org/keys/nginx_signing.key"
4 | state: present
5 |
6 | - name: adding sources.list deb url for nginx
7 | apt_repository:
8 | repo: "deb http://nginx.org/packages/mainline/ubuntu/ trusty nginx"
9 | state: present
10 |
11 | - name: update the cache and install nginx server
12 | apt:
13 | name: nginx
14 | update_cache: yes
15 | state: present
16 |
17 | - name: updating customized templates for nginx configuration
18 | template:
19 | src: "{{ item.src }}"
20 | dest: "{{ item.dst }}"
21 |
22 | with_items:
23 | - { src: "templates/defautlt.conf.j2", dst: "/etc/nginx/conf.d/default.conf" }
24 |
25 | notify:
26 | - start nginx
27 | - startup nginx
28 |
--------------------------------------------------------------------------------
/chapter-3/nginx/templates/default.conf.j2:
--------------------------------------------------------------------------------
1 | server {
2 | listen 80 default_server;
3 | listen [::]:80 default_server ipv6only=on;
4 |
5 | root /usr/share/nginx/html;
6 | index index.html index.htm;
7 |
8 | server_name localhost;
9 |
10 | location / {
11 | try_files $uri $uri/ =404;
12 | }
13 | }
14 |
--------------------------------------------------------------------------------
/chapter-3/php/tasks/main.yml:
--------------------------------------------------------------------------------
1 | - name: installing php
2 | apt:
3 | name: "{{ item }}"
4 | state: present
5 | update_cache: yes
6 |
7 | with_items:
8 | - php5
9 | - libapache2-mod-php5
10 | - php5-fpm
11 | - php5-mysql
12 |
13 | - name: configuring php.ini for php processor
14 | replace:
15 | path: /etc/php5/fpm/php.ini
16 | regex: ';cgi.fix_pathinfo=1'
17 | replace: 'cgi.fix_pathinfo=0'
18 | backup: yes
19 |
20 | - name: enable php fpm on startup
21 | service:
22 | name: 'php5-fpm'
23 | enabled: yes
24 |
25 | - name: restart the php fpm service
26 | service:
27 | name: php5-fpm
28 | state: restarted
29 |
30 | - name: update the nginx configuration to support php-fpm
31 | template:
32 | src: "{{ item.src }}"
33 | dest: "{{ item.dst }}"
34 |
35 | with_items:
36 | - { src: "defautlt.conf.j2", dst: "/etc/nginx/conf.d/default.conf" }
37 |
38 | - name: restart the nginx
39 | service:
40 | name: nginx
41 | state: restarted
--------------------------------------------------------------------------------
/chapter-3/php/templates/default.conf:
--------------------------------------------------------------------------------
1 | server {
2 | listen 80 default_server;
3 | listen [::]:80 default_server ipv6only=on;
4 |
5 | root /usr/share/nginx/html;
6 | index index.php index.html index.htm;
7 |
8 | server_name {{ server_domain_name_or_ip }};
9 |
10 | location / {
11 | try_files $uri $uri/ =404;
12 | }
13 |
14 | error_page 404 /404.html;
15 | error_page 500 502 503 504 /50x.html;
16 | location = /50x.html {
17 | root /usr/share/nginx/html;
18 | }
19 |
20 | location ~ \.php$ {
21 | try_files $uri =404;
22 | fastcgi_split_path_info ^(.+\.php)(/.+)$;
23 | fastcgi_pass unix:/var/run/php5-fpm.sock;
24 | fastcgi_index index.php;
25 | fastcgi_param SCRIPT_FILENAME $document_root$fastcgi_script_name;
26 | include fastcgi_params;
27 | }
28 | }
29 |
--------------------------------------------------------------------------------
/chapter-3/php/vars/main.yml:
--------------------------------------------------------------------------------
1 | server_domain_name_or_ip: "127.0.0.1"
--------------------------------------------------------------------------------
/chapter-3/wordpress-auto-updates/inventory:
--------------------------------------------------------------------------------
1 | [wordpress]
2 | 192.168.56.100
--------------------------------------------------------------------------------
/chapter-3/wordpress-auto-updates/site.yml:
--------------------------------------------------------------------------------
1 | - name: wordpress core, themes and plugins auto update
2 | remote_user: ubuntu
3 | hosts: wordpress
4 | gather_facts: False
5 | become: True
6 | vars:
7 | wordpress_system_user: ubuntu
8 | wordpress_site_home_directory: /var/www/html
9 |
10 | tasks:
11 | - name: ensure backup completed
12 | command: /etc/cron.hourly/duply-backup
13 |
14 | - name: updating wordpress core
15 | command: wp core update
16 | args:
17 | chdir: "{{ wordpress_site_home_directory }}"
18 | become: "{{ wordpress_system_user }}"
19 | register: wp_core_update_output
20 | ignore_errors: yes
21 |
22 | - name: wp core update output
23 | debug:
24 | msg: "{{ wp_core_update_output.stdout }}"
25 |
26 | - name: updating wordpress themes
27 | command: wp theme update --all
28 | args:
29 | chdir: "{{ wordpress_site_home_directory }}"
30 | become: "{{ wordpress_system_user }}"
31 | register: wp_theme_update_output
32 | ignore_errors: yes
33 |
34 | - name: wp themes update output
35 | debug:
36 | msg: "{{ wp_theme_update_output.stdout }}"
37 |
38 | - name: updating wordpress plugins
39 | command: wp plugin update --all
40 | args:
41 | chdir: "{{ wordpress_site_home_directory }}"
42 | become: "{{ wordpress_system_user }}"
43 | register: wp_plugin_update_output
44 | ignore_errors: yes
45 |
46 | - name: wp plugins update output
47 | debug:
48 | msg: "{{ wp_plugin_update_output.stdout }}"
--------------------------------------------------------------------------------
/chapter-3/wordpress/group_vars/wordpress.yml:
--------------------------------------------------------------------------------
1 | # COMMON
2 | remote_user_name: ubuntu
3 |
4 | # NGINX ROLE
5 | website_domain_name: security-hardening-with-ansible2.com
6 |
7 | # MYSQL ROLE
8 | mysql_root_password: randompassword
9 | wordpress_database_name: automation
10 | wordpress_mysql_username: chapmion
11 | wordpress_mysql_user_password: doublerandompassword
12 |
13 | # WORDPRESS ROLE
14 | wordpress_system_user: ubuntu
15 | wordpress_site_title: "Security Automation with Ansible"
16 | wordpress_admin_user: hodor
17 | wordpress_admin_user_password: strongpassword
18 | wordpress_admin_email: wordpress@localhost.local
19 | wordpress_site_home_directory: /var/www/html
20 |
21 | # CERTBOT ROLE
22 | website_domain_name: security-hardening-with-ansible2.com
23 | cerbot_service_admin_email: emailid@domain.com
24 |
25 | # DUPLY ROLE
26 | duply_bakup_name: wpsite
27 | gpg_key: 12ABE2DE
28 | gpg_key_password: superstrongpass
29 | target_s3_location: s3://s3-us-west-2.amazonaws.com/backupfiles/site/
30 | aws_access_key: pleaseupdate
31 | aws_secret_access_key: pleaseupdatethis
32 | source_backup_location: /var/www/html
33 | max_backup_age: 1M
34 | max_full_backups: 1
35 | max_full_backup_age: 1M
--------------------------------------------------------------------------------
/chapter-3/wordpress/inventory:
--------------------------------------------------------------------------------
1 | [wordpress]
2 | 192.168.56.100
--------------------------------------------------------------------------------
/chapter-3/wordpress/roles/certbot/tasks/generate-certs.yml:
--------------------------------------------------------------------------------
1 | - name: chcek if we have generated a cert already
2 | stat:
3 | path: "/etc/letsencrypt/live/{{ website_domain_name }}/fullchain.pem"
4 | register: cert_stats
5 |
6 | - name: run certbot to generate the certificates
7 | shell: "certbot certonly --standalone -d {{ website_domain_name }} --email {{ cerbot_service_admin_email }} --non-interactive --agree-tos"
8 | when: cert_stats.stat.exists == False
9 |
10 | - name: configuring site files
11 | template:
12 | src: website.conf
13 | dest: "/etc/nginx/sites-available/{{ website_domain_name }}"
14 |
15 | - name: restart nginx
16 | service:
17 | name: nginx
18 | state: restarted
--------------------------------------------------------------------------------
/chapter-3/wordpress/roles/certbot/tasks/main.yml:
--------------------------------------------------------------------------------
1 | - name: adding certbot ppa
2 | apt_repository:
3 | repo: "ppa:certbot/certbot"
4 |
5 | - name: install certbot
6 | apt:
7 | name: "{{ item }}"
8 | update_cache: yes
9 | state: present
10 |
11 | with_items:
12 | - python-certbot-nginx
13 |
14 | - include: generate-certs.yml
--------------------------------------------------------------------------------
/chapter-3/wordpress/roles/certbot/templates/website.conf:
--------------------------------------------------------------------------------
1 | server {
2 | listen 80 default_server;
3 | server_name {{ website_domain_name }} www.{{ website_domain_name }};
4 | return 301 https://{{ website_domain_name }}$request_uri;
5 | }
6 |
7 | server {
8 | listen 443 ssl http2;
9 |
10 | server_name {{ website_domain_name }} www.{{ website_domain_name }};
11 | server_tokens off;
12 |
13 | root /var/www/html;
14 | index index.php index.htm index.html;
15 |
16 | ssl_certificate /etc/letsencrypt/live/{{ website_domain_name }}/fullchain.pem;
17 | ssl_certificate_key /etc/letsencrypt/live/{{ website_domain_name }}/privkey.pem;
18 |
19 | ssl_protocols TLSv1 TLSv1.1 TLSv1.2;
20 | ssl_ciphers 'ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-AES256-GCM-SHA384:DHE-RSA-AES128-GCM-SHA256:DHE-DSS-AES128-GCM-SHA256:kEDH+AESGCM:ECDHE-RSA-AES128-SHA256:ECDHE-ECDSA-AES128-SHA256:ECDHE-RSA-AES128-SHA:ECDHE-ECDSA-AES128-SHA:ECDHE-RSA-AES256-SHA384:ECDHE-ECDSA-AES256-SHA384:ECDHE-RSA-AES256-SHA:ECDHE-ECDSA-AES256-SHA:DHE-RSA-AES128-SHA256:DHE-RSA-AES128-SHA:DHE-DSS-AES128-SHA256:DHE-RSA-AES256-SHA256:DHE-DSS-AES256-SHA:DHE-RSA-AES256-SHA:AES128-GCM-SHA256:AES256-GCM-SHA384:AES128-SHA256:AES256-SHA256:AES128-SHA:AES256-SHA:AES:CAMELLIA:DES-CBC3-SHA:!aNULL:!eNULL:!EXPORT:!DES:!RC4:!MD5:!PSK:!aECDH:!EDH-DSS-DES-CBC3-SHA:!EDH-RSA-DES-CBC3-SHA:!KRB5-DES-CBC3-SHA';
21 | ssl_prefer_server_ciphers on;
22 |
23 | ssl_session_timeout 5m;
24 | ssl_session_cache shared:SSL:5m;
25 |
26 | ssl_stapling on;
27 | ssl_stapling_verify on;
28 | resolver 8.8.4.4 8.8.8.8 valid=300s;
29 | resolver_timeout 10s;
30 |
31 | add_header Strict-Transport-Security "max-age=15768000";
32 | add_header X-Frame-Options DENY;
33 | add_header X-Content-Type-Options nosniff;
34 | add_header X-XSS-Protection "1; mode=block";
35 |
36 | charset utf-8;
37 | add_header Fastcgi-Cache $upstream_cache_status;
38 |
39 | location / {
40 | #try_files $uri $uri/ =404;
41 | try_files $uri $uri/ /index.php$is_args$args;
42 | }
43 |
44 | location ~ \.php$ {
45 | include snippets/fastcgi-php.conf;
46 | fastcgi_pass unix:/run/php/php7.0-fpm.sock;
47 | }
48 |
49 | location ~* \.(css|gif|ico|jpeg|jpg|js|png)$ {
50 | expires max;
51 | log_not_found off;
52 | }
53 |
54 | # Common disclosure locations
55 | location ~ /(\.DS_Store|wp-config.php|readme.html.gz|readme.txt.gz|readme.html|readme.txt|error_log|license.txt|changelog|changelog.txt) {
56 | return 404;
57 | deny all;
58 | }
59 |
60 | # defeat wp fingerprinting : 4.4.2
61 | # WP emoji is something not generally used anyways
62 | location ~ ^/wp-includes/js/wp-emoji-loader.min.js {
63 | deny all;
64 | return 444;
65 | }
66 |
67 | # Blocking common files
68 | location ~ ^/(wp-signup.php|xmlrpc.php|install.php) {
69 | deny all;
70 | return 444;
71 | }
72 | # Blocking wp-json
73 | location ~ /wp-json/ {
74 | deny all;
75 | return 444;
76 | }
77 | # blocking data folder locations
78 | location ~ /wp-content/ {
79 | deny all;
80 | return 444;
81 | }
82 | # Deny access to wp-config.php file
83 | location = /wp-config.php {
84 | deny all;
85 | return 444;
86 | }
87 |
88 | # Deny access to specific files in the /wp-content/ directory (including sub-folders)
89 | location ~* ^/data/.*.(txt|md|exe)$ {
90 | deny all;
91 | return 444;
92 | }
93 |
94 | # block access to .htaccess and any file with .ht extension
95 | location ~ /\.ht {
96 | deny all;
97 | return 444;
98 |
99 | }
100 |
101 | # Don't allow any PHP file in uploads folder
102 | location ~* /(?:uploads|files)/.*\.php$ {
103 | deny all;
104 | return 444;
105 |
106 | }
107 |
108 | location ~* \.(engine|log|inc|info|install|make|module|profile|test|po|sh|.*sql|theme|tpl(\.php)?|xtmpl)$|^(\..*|Entries.*|Repository|Root|Tag|Template)$|\.php_
109 | {
110 | deny all;
111 | return 444;
112 | }
113 |
114 | #nocgi
115 | location ~* \.(pl|cgi|py|sh|lua)\$ {
116 | deny all;
117 | return 444;
118 | }
119 |
120 | #disallow
121 | location ~* (roundcube|webdav|smtp|http\:|soap|w00tw00t) {
122 | deny all;
123 | return 444;
124 | }
125 |
126 | # Username enumeration block
127 | if ($args ~ "^/?author=([0-9]*)"){
128 | return 403;
129 | }
130 |
131 | # Attachment enumeration block
132 | if ($query_string ~ "attachment_id=([0-9]*)"){
133 | return 403;
134 | }
135 |
136 | }
--------------------------------------------------------------------------------
/chapter-3/wordpress/roles/common/tasks/main.yml:
--------------------------------------------------------------------------------
1 | - name: install python 2
2 | raw: test -e /usr/bin/python || (apt -y update && apt install -y python-minimal)
--------------------------------------------------------------------------------
/chapter-3/wordpress/roles/duply/tasks/main.yml:
--------------------------------------------------------------------------------
1 | - name: installing duply
2 | apt:
3 | name: "{{ item }}"
4 | update_cache: yes
5 | state: present
6 |
7 | with_items:
8 | - python-boto
9 | - duply
10 |
11 | - name: chcek if we have generated a config already
12 | stat:
13 | path: "/home/{{ wordpress_system_user }}/.duply/{{ duply_bakup_name }}"
14 | register: duply_dir_stats
15 |
16 | - name: create backup directories
17 | shell: "duply {{ duply_bakup_name }} create"
18 | become: "{{ wordpress_system_user }}"
19 | when: duply_dir_stats.stat.exists == False
20 |
21 | - name: copy configurations
22 | template:
23 | src: "{{ item.src }}"
24 | dest: "{{ item.dest }}"
25 | owner: root
26 | group: root
27 |
28 | with_items:
29 | - { src: conf, dest: "/home/{{ wordpress_system_user }}/.duply/{{ duply_bakup_name }}/conf" }
30 | - { src: exclude, dest: "/home/{{ wordpress_system_user }}/.duply/{{ duply_bakup_name }}/exclude" }
31 | - { src: gpgkey.pub.asc, dest: "/home/{{ wordpress_system_user }}/.duply/{{ duply_bakup_name }}/gpgkey.{{ gpg_key }}.pub.asc" }
32 | - { src: gpgkey.sec.asc, dest: "/home/{{ wordpress_system_user }}/.duply/{{ duply_bakup_name }}/gpgkey.{{ gpg_key }}.sec.asc" }
--------------------------------------------------------------------------------
/chapter-3/wordpress/roles/duply/templates/conf:
--------------------------------------------------------------------------------
1 | GPG_KEY={{ gpg_key }}
2 | GPG_PW={{ gpg_key_password }}
3 |
4 | TARGET={{ target_s3_location }}
5 |
6 | export AWS_ACCESS_KEY_ID={{ aws_access_key }}
7 | export AWS_SECRET_ACCESS_KEY={{ aws_secret_access_key }}
8 |
9 | SOURCE={{ source_backup_location }}
10 |
11 | MAX_AGE={{ max_backup_age }}
12 |
13 | MAX_FULL_BACKUPS={{ max_full_backups }}
14 |
15 | MAX_FULLBKP_AGE={{ max_full_backup_age }}
16 | DUPL_PARAMS="$DUPL_PARAMS --full-if-older-than $MAX_FULLBKP_AGE "
17 |
18 | VOLSIZE=250
19 | DUPL_PARAMS="$DUPL_PARAMS --volsize $VOLSIZE "
--------------------------------------------------------------------------------
/chapter-3/wordpress/roles/duply/templates/exclude:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/madhuakula/security-automation-with-ansible-2/a0171fbb347edacf5ffcdde550b57b8bf2ff3648/chapter-3/wordpress/roles/duply/templates/exclude
--------------------------------------------------------------------------------
/chapter-3/wordpress/roles/firewall-setup/tasks/main.yml:
--------------------------------------------------------------------------------
1 | - name: installing ufw package
2 | apt:
3 | name: "ufw"
4 | update_cache: yes
5 | state: present
6 |
7 | - name: enable ufw logging
8 | ufw:
9 | logging: on
10 |
11 | - name: default ufw setting
12 | ufw:
13 | direction: "{{ item.direction }}"
14 | policy: "{{ item.policy }}"
15 |
16 | with_items:
17 | - { direction: 'incoming', policy: 'deny' }
18 | - { direction: 'outgoing', policy: 'allow' }
19 |
20 | - name: allow required ports to access server
21 | ufw:
22 | rule: "{{ item.policy }}"
23 | port: "{{ item.port }}"
24 | proto: "{{ item.protocol }}"
25 |
26 | with_items:
27 | - { port: "22", protocol: "tcp", policy: "allow" }
28 | - { port: "80", protocol: "tcp", policy: "allow" }
29 | - { port: "443", protocol: "tcp", policy: "allow" }
30 |
31 | - name: enable ufw
32 | ufw:
33 | state: enabled
34 |
35 | - name: restart ufw and add to start up programs
36 | service:
37 | name: ufw
38 | state: restarted
39 | enabled: yes
--------------------------------------------------------------------------------
/chapter-3/wordpress/roles/mysql-hardening/tasks/main.yml:
--------------------------------------------------------------------------------
1 | - name: deletes anonymous mysql user
2 | mysql_user:
3 | user: ""
4 | state: absent
5 | login_password: "{{ mysql_root_password }}"
6 | login_user: root
7 |
8 | - name: secures the mysql root user
9 | mysql_user:
10 | user: root
11 | password: "{{ mysql_root_password }}"
12 | host: "{{ item }}"
13 | login_password: "{{ mysql_root_password }}"
14 | login_user: root
15 |
16 | with_items:
17 | - 127.0.0.1
18 | - localhost
19 | - ::1
20 |
21 | - name: removes the mysql test database
22 | mysql_db:
23 | db: test
24 | state: absent
25 | login_password: "{{ mysql_root_password }}"
26 | login_user: root
27 |
28 | - name: restart mysql service
29 | service:
30 | name: mysql
31 | state: restarted
--------------------------------------------------------------------------------
/chapter-3/wordpress/roles/mysql/handlers/main.yml:
--------------------------------------------------------------------------------
1 | - name: enable mysql
2 | service:
3 | name: mysql
4 | enabled: yes
5 |
6 | - name: start mysql
7 | service:
8 | name: mysql
9 | state: started
10 |
11 | - name: stop mysql
12 | service:
13 | name: mysql
14 | state: stopped
15 |
16 | - name: restart mysql
17 | service:
18 | name: mysql
19 | state: restarted
--------------------------------------------------------------------------------
/chapter-3/wordpress/roles/mysql/tasks/main.yml:
--------------------------------------------------------------------------------
1 | - name: set mysql root password
2 | debconf:
3 | name: mysql-server
4 | question: mysql-server/root_password
5 | value: "{{ mysql_root_password | quote }}"
6 | vtype: password
7 |
8 | - name: confirm mysql root password
9 | debconf:
10 | name: mysql-server
11 | question: mysql-server/root_password_again
12 | value: "{{ mysql_root_password | quote }}"
13 | vtype: password
14 |
15 | - name: install mysqlserver
16 | apt: name="{{ item }}" state=present
17 |
18 | with_items:
19 | - mysql-server
20 | - mysql-client
21 | - python-mysqldb
22 |
23 | notify:
24 | - enable mysql
25 | - start mysql
26 |
27 | - include: wordpress-db-user-setup.yml
--------------------------------------------------------------------------------
/chapter-3/wordpress/roles/mysql/tasks/wordpress-db-user-setup.yml:
--------------------------------------------------------------------------------
1 | - name: create new database for wordpress
2 | mysql_db:
3 | name: "{{ wordpress_database_name }}"
4 | state: present
5 | login_password: "{{ mysql_root_password }}"
6 | login_user: root
7 |
8 | - name: create new user for wordpress
9 | mysql_user:
10 | name: "{{ wordpress_mysql_username }}"
11 | password: "{{ wordpress_mysql_user_password }}"
12 | priv: '{{ wordpress_database_name }}.*:ALL'
13 | state: present
14 | login_password: "{{ mysql_root_password }}"
15 | login_user: root
16 |
17 | notify:
18 | - restart mysql
--------------------------------------------------------------------------------
/chapter-3/wordpress/roles/nginx/tasks/main.yml:
--------------------------------------------------------------------------------
1 | - name: installing nginx server
2 | apt:
3 | name: "{{ item }}"
4 | update_cache: yes
5 | state: present
6 |
7 | with_items:
8 | - nginx
9 |
10 | - name: configuring site files
11 | template:
12 | src: website.conf
13 | dest: /etc/nginx/sites-available/{{ website_domain_name }}
14 |
15 | - name: removing default symlink of nginx
16 | file:
17 | path: /etc/nginx/sites-enabled/default
18 | state: absent
19 |
20 | - name: adding symlink for sites
21 | file:
22 | src: "/etc/nginx/sites-available/{{ website_domain_name }}"
23 | dest: "/etc/nginx/sites-enabled/{{ website_domain_name }}"
24 | state: link
25 |
26 | - name: adding nginx to startup and restart
27 | service:
28 | name: nginx
29 | enabled: yes
30 | state: restarted
--------------------------------------------------------------------------------
/chapter-3/wordpress/roles/nginx/templates/website.conf:
--------------------------------------------------------------------------------
1 | server {
2 | listen 80;
3 |
4 | server_name {{ website_domain_name }} www.{{ website_domain_name }};
5 | server_tokens off;
6 |
7 | root {{ wordpress_site_home_directory }};
8 | index index.php index.htm index.html;
9 |
10 | add_header Strict-Transport-Security "max-age=15768000";
11 | add_header X-Frame-Options DENY;
12 | add_header X-Content-Type-Options nosniff;
13 | add_header X-XSS-Protection "1; mode=block";
14 |
15 | charset utf-8;
16 | add_header Fastcgi-Cache $upstream_cache_status;
17 |
18 | location / {
19 | #try_files $uri $uri/ =404;
20 | try_files $uri $uri/ /index.php$is_args$args;
21 | }
22 |
23 | location ~ \.php$ {
24 | include snippets/fastcgi-php.conf;
25 | fastcgi_pass unix:/run/php/php7.0-fpm.sock;
26 | }
27 |
28 | location ~* \.(css|gif|ico|jpeg|jpg|js|png)$ {
29 | expires max;
30 | log_not_found off;
31 | }
32 |
33 | # Common disclosure locations
34 | location ~ /(\.DS_Store|wp-config.php|readme.html.gz|readme.txt.gz|readme.html|readme.txt|error_log|license.txt|changelog|changelog.txt) {
35 | return 404;
36 | deny all;
37 | }
38 |
39 | # defeat wp fingerprinting : 4.4.2
40 | # WP emoji is something not generally used anyways
41 | location ~ ^/wp-includes/js/wp-emoji-loader.min.js {
42 | deny all;
43 | return 444;
44 | }
45 |
46 | # Blocking common files
47 | location ~ ^/(wp-signup.php|xmlrpc.php|install.php) {
48 | deny all;
49 | return 444;
50 | }
51 | # Blocking wp-json
52 | location ~ /wp-json/ {
53 | deny all;
54 | return 444;
55 | }
56 | # blocking data folder locations
57 | location ~ /wp-content/ {
58 | deny all;
59 | return 444;
60 | }
61 | # Deny access to wp-config.php file
62 | location = /wp-config.php {
63 | deny all;
64 | return 444;
65 | }
66 |
67 | # Deny access to specific files in the /wp-content/ directory (including sub-folders)
68 | location ~* ^/data/.*.(txt|md|exe)$ {
69 | deny all;
70 | return 444;
71 | }
72 |
73 | # block access to .htaccess and any file with .ht extension
74 | location ~ /\.ht {
75 | deny all;
76 | return 444;
77 |
78 | }
79 |
80 | # Don't allow any PHP file in uploads folder
81 | location ~* /(?:uploads|files)/.*\.php$ {
82 | deny all;
83 | return 444;
84 |
85 | }
86 |
87 | location ~* \.(engine|log|inc|info|install|make|module|profile|test|po|sh|.*sql|theme|tpl(\.php)?|xtmpl)$|^(\..*|Entries.*|Repository|Root|Tag|Template)$|\.php_
88 | {
89 | deny all;
90 | return 444;
91 | }
92 |
93 | #nocgi
94 | location ~* \.(pl|cgi|py|sh|lua)\$ {
95 | deny all;
96 | return 444;
97 | }
98 |
99 | #disallow
100 | location ~* (roundcube|webdav|smtp|http\:|soap|w00tw00t) {
101 | deny all;
102 | return 444;
103 | }
104 |
105 | # Username enumeration block
106 | if ($args ~ "^/?author=([0-9]*)"){
107 | return 403;
108 | }
109 |
110 | # Attachment enumeration block
111 | if ($query_string ~ "attachment_id=([0-9]*)"){
112 | return 403;
113 | }
114 |
115 | }
--------------------------------------------------------------------------------
/chapter-3/wordpress/roles/php-fpm/tasks/main.yml:
--------------------------------------------------------------------------------
1 | - name: installing php
2 | apt:
3 | name: "{{ item }}"
4 | update_cache: yes
5 | state: present
6 |
7 | with_items:
8 | - php-fpm
9 | - php-mysql
10 | - php-curl
11 | - php-gd
12 | - php-mbstring
13 | - php-mcrypt
14 | - php-xml
15 | - php-xmlrpc
16 |
17 | - name: update the php configuration
18 | lineinfile:
19 | path: /etc/php/7.0/fpm/php.ini
20 | line: 'cgi.fix_pathinfo=0'
21 |
22 | - name: add php-fpm to start up and restart
23 | service:
24 | name: php7.0-fpm
25 | enabled: yes
26 | state: restarted
--------------------------------------------------------------------------------
/chapter-3/wordpress/roles/wordpress/tasks/main.yml:
--------------------------------------------------------------------------------
1 | - name: downloading and keeping in path for wp-cli
2 | get_url:
3 | url: https://raw.githubusercontent.com/wp-cli/builds/gh-pages/phar/wp-cli.phar
4 | dest: /usr/local/bin/wp
5 | mode: 0755
6 |
7 | - include: site-setup.yml
--------------------------------------------------------------------------------
/chapter-3/wordpress/roles/wordpress/tasks/site-setup.yml:
--------------------------------------------------------------------------------
1 | - name: adding "{{ wordpress_system_user }}" as owner for "{{ wordpress_site_home_directory }}"
2 | file:
3 | path: "{{ wordpress_site_home_directory }}"
4 | state: directory
5 | owner: "{{ wordpress_system_user }}"
6 | group: "{{ wordpress_system_user }}"
7 |
8 | - name: downloading wp-core
9 | shell: wp core download
10 | become: yes
11 | become_user: "{{ wordpress_system_user }}"
12 | args:
13 | chdir: "{{ wordpress_site_home_directory }}"
14 | # register: wp_core_output
15 | ignore_errors: yes
16 | # failed_when: "'already' in wp_core_output.stderr"
17 |
18 | - name: configure wp core setup
19 | shell: wp core config --dbname="{{ wordpress_database_name }}" --dbuser="{{ wordpress_mysql_username }}" --dbpass={{ wordpress_mysql_user_password }}
20 | become_user: "{{ wordpress_system_user }}"
21 | args:
22 | chdir: "{{ wordpress_site_home_directory }}"
23 | ignore_errors: yes
24 |
25 | - name: configure wp core
26 | shell: wp core install --url="{{ website_domain_name }}" --title='"{{ wordpress_site_title }}"' --admin_user="{{ wordpress_admin_user }}" --admin_password="{{ wordpress_admin_user_password }}" --admin_email="{{ wordpress_admin_email }}"
27 | become_user: "{{ wordpress_system_user }}"
28 | args:
29 | chdir: "{{ wordpress_site_home_directory }}"
30 | ignore_errors: yes
31 |
32 | - name: disable wordpress editor changes
33 | lineinfile:
34 | path: "{{ wordpress_site_home_directory }}/wp-config.php"
35 | line: "{{ item }}"
36 |
37 | with_items:
38 | - define('FS_METHOD', 'direct');
39 | - define('DISALLOW_FILE_EDIT', true);
40 |
41 | - name: update the ownership permissions
42 | file:
43 | path: "{{ wordpress_site_home_directory }}"
44 | recurse: yes
45 | owner: "{{ wordpress_system_user }}"
46 | group: www-data
47 |
48 | - name: updated the permissions
49 | shell: find "{{ wordpress_site_home_directory }}" -type d -exec chmod g+s {} \;
50 |
51 | - name: update group permissions
52 | file:
53 | path: "{{ item }}"
54 | mode: g+w
55 | recurse: yes
56 |
57 | with_items:
58 | - "{{ wordpress_site_home_directory }}/wp-content"
59 | - "{{ wordpress_site_home_directory }}/themes"
60 | - "{{ wordpress_site_home_directory }}/plugins"
61 |
62 | - name: updating file and directory permissions
63 | shell: "{{ item }}"
64 |
65 | with_items:
66 | - find "{{ wordpress_site_home_directory }}" -type d -exec chmod 755 {} \;
67 | - find "{{ wordpress_site_home_directory }}" -type f -exec chmod 644 {} \;
--------------------------------------------------------------------------------
/chapter-3/wordpress/site.yml:
--------------------------------------------------------------------------------
1 | - name: setting up wordpress stack
2 | hosts: wordpress
3 | remote_user: "{{ remote_user_name }}"
4 | gather_facts: False
5 | become: True
6 |
7 | roles:
8 | - common
9 | - nginx
10 | - php-fpm
11 | - mysql
12 | - mysql-hardening
13 | - wordpress
14 | - certbot
15 | - duply
16 | - firewall-setup
--------------------------------------------------------------------------------
/chapter-4/beats-for-elastic-stack/inventory:
--------------------------------------------------------------------------------
1 | [monitor]
2 | 192.168.56.200
3 |
--------------------------------------------------------------------------------
/chapter-4/beats-for-elastic-stack/main.yml:
--------------------------------------------------------------------------------
1 | - name: setting up elastic beats on ubuntu 16.04
2 | hosts: monitor
3 | remote_user: ubuntu
4 | become: yes
5 | vars:
6 | logstash_server_ip: "192.168.56.102"
7 |
8 | roles:
9 | - filebeat
10 | - packetbeat
11 | - metricbeat
12 |
--------------------------------------------------------------------------------
/chapter-4/beats-for-elastic-stack/roles/filebeat/handlers/main.yml:
--------------------------------------------------------------------------------
1 | - name: start filebeat
2 | service:
3 | name: filebeat
4 | state: started
5 |
6 | - name: restart filebeat
7 | service:
8 | name: filebeat
9 | state: restarted
--------------------------------------------------------------------------------
/chapter-4/beats-for-elastic-stack/roles/filebeat/tasks/configure-filebeat.yml:
--------------------------------------------------------------------------------
1 | - name: configuring filebeat.yml file
2 | template:
3 | src: "{{ item.src }}"
4 | dest: "/etc/filebeat/{{ item.dst }}"
5 |
6 | with_items:
7 | - { src: 'filebeat.yml.j2', dst: 'filebeat.yml' }
8 |
9 | notify:
10 | - restart filebeat
--------------------------------------------------------------------------------
/chapter-4/beats-for-elastic-stack/roles/filebeat/tasks/install-filebeat.yml:
--------------------------------------------------------------------------------
1 | - name: adding elastic gpg key for filebeat
2 | apt_key:
3 | url: "https://artifacts.elastic.co/GPG-KEY-elasticsearch"
4 | state: present
5 |
6 | - name: adding the elastic repository
7 | apt_repository:
8 | repo: "deb https://artifacts.elastic.co/packages/5.x/apt stable main"
9 | state: present
10 |
11 | - name: installing filebeat
12 | apt:
13 | name: "{{ item }}"
14 | state: present
15 | update_cache: yes
16 |
17 | with_items:
18 | - apt-transport-https
19 | - filebeat
20 |
21 | - name: adding filebeat to the startup programs
22 | service:
23 | name: filebeat
24 | enabled: yes
25 |
26 | notify:
27 | - start filebeat
--------------------------------------------------------------------------------
/chapter-4/beats-for-elastic-stack/roles/filebeat/tasks/main.yml:
--------------------------------------------------------------------------------
1 | - include: install-filebeat.yml
2 | - include: configure-filebeat.yml
--------------------------------------------------------------------------------
/chapter-4/beats-for-elastic-stack/roles/filebeat/templates/filebeat.yml.j2:
--------------------------------------------------------------------------------
1 | filebeat:
2 | prospectors:
3 | -
4 | paths:
5 | - /var/log/auth.log
6 | # - /var/log/syslog
7 | # - /var/log/*.log
8 |
9 | document_type: sshlog
10 |
11 | -
12 | paths:
13 | - /var/log/nginx/access.log
14 |
15 | document_type: weblog
16 |
17 | registry_file: /var/lib/filebeat/registry
18 |
19 | output:
20 | logstash:
21 | hosts: ["{{ logstash_server_ip }}:5044"]
22 | bulk_max_size: 1024
23 |
24 | #ssl:
25 | # certificate_authorities: ["/etc/pki/tls/certs/logstash-forwarder.crt"]
26 |
27 | logging:
28 | files:
29 | rotateeverybytes: 10485760 # = 10MB
--------------------------------------------------------------------------------
/chapter-4/beats-for-elastic-stack/roles/metricbeat/handlers/main.yml:
--------------------------------------------------------------------------------
1 | - name: start metricbeat
2 | service:
3 | name: metricbeat
4 | state: started
5 |
6 | - name: restart metricbeat
7 | service:
8 | name: metricbeat
9 | state: restarted
--------------------------------------------------------------------------------
/chapter-4/beats-for-elastic-stack/roles/metricbeat/tasks/configure-metricbeat.yml:
--------------------------------------------------------------------------------
1 | - name: configuring metricbeat.yml file
2 | template:
3 | src: "{{ item.src }}"
4 | dest: "/etc/metricbeat/{{ item.dst }}"
5 |
6 | with_items:
7 | - { src: 'metricbeat.yml.j2', dst: 'metricbeat.yml' }
8 |
9 | notify:
10 | - restart metricbeat
--------------------------------------------------------------------------------
/chapter-4/beats-for-elastic-stack/roles/metricbeat/tasks/install-metricbeat.yml:
--------------------------------------------------------------------------------
1 | - name: adding elastic gpg key for metricbeat
2 | apt_key:
3 | url: "https://artifacts.elastic.co/GPG-KEY-elasticsearch"
4 | state: present
5 |
6 | - name: adding the elastic repository
7 | apt_repository:
8 | repo: "deb https://artifacts.elastic.co/packages/5.x/apt stable main"
9 | state: present
10 |
11 | - name: installing metricbeat
12 | apt:
13 | name: "{{ item }}"
14 | state: present
15 | update_cache: yes
16 |
17 | with_items:
18 | - apt-transport-https
19 | - metricbeat
20 |
21 | - name: adding metricbeat to the startup programs
22 | service:
23 | name: metricbeat
24 | enabled: yes
25 |
26 | notify:
27 | - start metricbeat
--------------------------------------------------------------------------------
/chapter-4/beats-for-elastic-stack/roles/metricbeat/tasks/main.yml:
--------------------------------------------------------------------------------
1 | - include: install-metricbeat.yml
2 | - include: configure-metricbeat.yml
--------------------------------------------------------------------------------
/chapter-4/beats-for-elastic-stack/roles/metricbeat/templates/metricbeat.yml.j2:
--------------------------------------------------------------------------------
1 | ###################### Metricbeat Configuration Example #######################
2 |
3 | # This file is an example configuration file highlighting only the most common
4 | # options. The metricbeat.full.yml file from the same directory contains all the
5 | # supported options with more comments. You can use it as a reference.
6 | #
7 | # You can find the full configuration reference here:
8 | # https://www.elastic.co/guide/en/beats/metricbeat/index.html
9 |
10 | #========================== Modules configuration ============================
11 | metricbeat.modules:
12 |
13 | #------------------------------- System Module -------------------------------
14 | - module: system
15 | metricsets:
16 | # CPU stats
17 | - cpu
18 |
19 | # System Load stats
20 | - load
21 |
22 | # Per CPU core stats
23 | #- core
24 |
25 | # IO stats
26 | - diskio
27 |
28 | # Per filesystem stats
29 | #- filesystem
30 |
31 | # File system summary stats
32 | - fsstat
33 |
34 | # Memory stats
35 | - memory
36 |
37 | # Network stats
38 | - network
39 |
40 | # Per process stats
41 | #- process
42 |
43 | # Sockets (linux only)
44 | #- socket
45 | enabled: true
46 | period: 10s
47 | processes: ['.*']
48 |
49 |
50 |
51 | #================================ General =====================================
52 |
53 | # The name of the shipper that publishes the network data. It can be used to group
54 | # all the transactions sent by a single shipper in the web interface.
55 | #name:
56 |
57 | # The tags of the shipper are included in their own field with each
58 | # transaction published.
59 | #tags: ["service-X", "web-tier"]
60 |
61 | # Optional fields that you can specify to add additional information to the
62 | # output.
63 | #fields:
64 | # env: staging
65 |
66 | #================================ Outputs =====================================
67 |
68 | # Configure what outputs to use when sending the data collected by the beat.
69 | # Multiple outputs may be used.
70 |
71 | #-------------------------- Elasticsearch output ------------------------------
72 | #output.elasticsearch:
73 | # Array of hosts to connect to.
74 | #hosts: ["localhost:9200"]
75 |
76 | # Optional protocol and basic auth credentials.
77 | #protocol: "https"
78 | #username: "elastic"
79 | #password: "changeme"
80 |
81 | #----------------------------- Logstash output --------------------------------
82 | output.logstash:
83 | # The Logstash hosts
84 | hosts: ["{{ logstash_server_ip }}:5044"]
85 |
86 | #ssl:
87 | # certificate_authorities: ["certificate.crt"]
88 | # Optional SSL. By default is off.
89 | # List of root certificates for HTTPS server verifications
90 | #ssl.certificate_authorities: ["/etc/pki/root/ca.pem"]
91 |
92 | # Certificate for SSL client authentication
93 | #ssl.certificate: "/etc/pki/client/cert.pem"
94 |
95 | # Client Certificate Key
96 | #ssl.key: "/etc/pki/client/cert.key"
97 |
98 | #================================ Logging =====================================
99 |
100 | # Sets log level. The default log level is info.
101 | # Available log levels are: critical, error, warning, info, debug
102 | #logging.level: debug
103 |
104 | # At debug level, you can selectively enable logging only for some components.
105 | # To enable all selectors use ["*"]. Examples of other selectors are "beat",
106 | # "publish", "service".
107 | #logging.selectors: ["*"]
108 |
--------------------------------------------------------------------------------
/chapter-4/beats-for-elastic-stack/roles/packetbeat/handlers/main.yml:
--------------------------------------------------------------------------------
1 | - name: start packetbeat
2 | service:
3 | name: packetbeat
4 | state: started
5 |
6 | - name: restart packetbeat
7 | service:
8 | name: packetbeat
9 | state: restarted
--------------------------------------------------------------------------------
/chapter-4/beats-for-elastic-stack/roles/packetbeat/tasks/configure-packetbeat.yml:
--------------------------------------------------------------------------------
1 | - name: configuring packetbeat.yml file
2 | template:
3 | src: "{{ item.src }}"
4 | dest: "/etc/packetbeat/{{ item.dst }}"
5 |
6 | with_items:
7 | - { src: 'packetbeat.yml.j2', dst: 'packetbeat.yml' }
8 |
9 | notify:
10 | - restart packetbeat
11 |
--------------------------------------------------------------------------------
/chapter-4/beats-for-elastic-stack/roles/packetbeat/tasks/install-packetbeat.yml:
--------------------------------------------------------------------------------
1 | - name: adding elastic gpg key for packetbeat
2 | apt_key:
3 | url: "https://artifacts.elastic.co/GPG-KEY-elasticsearch"
4 | state: present
5 |
6 | - name: adding the elastic repository
7 | apt_repository:
8 | repo: "deb https://artifacts.elastic.co/packages/5.x/apt stable main"
9 | state: present
10 |
11 | - name: installing packetbeat
12 | apt:
13 | name: "{{ item }}"
14 | state: present
15 | update_cache: yes
16 |
17 | with_items:
18 | - packetbeat
19 |
20 | - name: adding packetbeat to the startup programs
21 | service:
22 | name: packetbeat
23 | enabled: yes
24 |
25 | notify:
26 | - start packetbeat
--------------------------------------------------------------------------------
/chapter-4/beats-for-elastic-stack/roles/packetbeat/tasks/main.yml:
--------------------------------------------------------------------------------
1 | - include: install-packetbeat.yml
2 | - include: configure-packetbeat.yml
--------------------------------------------------------------------------------
/chapter-4/beats-for-elastic-stack/roles/packetbeat/templates/packetbeat.yml.j2:
--------------------------------------------------------------------------------
1 | #################### Packetbeat Configuration Example #########################
2 |
3 | # This file is an example configuration file highlighting only the most common
4 | # options. The packetbeat.full.yml file from the same directory contains all the
5 | # supported options with more comments. You can use it as a reference.
6 | #
7 | # You can find the full configuration reference here:
8 | # https://www.elastic.co/guide/en/beats/packetbeat/index.html
9 |
10 | #============================== Network device ================================
11 |
12 | # Select the network interface to sniff the data. On Linux, you can use the
13 | # "any" keyword to sniff on all connected interfaces.
14 | packetbeat.interfaces.device: any
15 |
16 | #================================== Flows =====================================
17 |
18 | # Set `enabled: false` or comment out all options to disable flows reporting.
19 | packetbeat.flows:
20 | # Set network flow timeout. Flow is killed if no packet is received before being
21 | # timed out.
22 | timeout: 30s
23 |
24 | # Configure reporting period. If set to -1, only killed flows will be reported
25 | period: 10s
26 |
27 | #========================== Transaction protocols =============================
28 |
29 | packetbeat.protocols.icmp:
30 | # Enable ICMPv4 and ICMPv6 monitoring. Default: false
31 | enabled: true
32 |
33 | packetbeat.protocols.amqp:
34 | # Configure the ports where to listen for AMQP traffic. You can disable
35 | # the AMQP protocol by commenting out the list of ports.
36 | ports: [5672]
37 |
38 | packetbeat.protocols.cassandra:
39 | #Cassandra port for traffic monitoring.
40 | ports: [9042]
41 |
42 | packetbeat.protocols.dns:
43 | # Configure the ports where to listen for DNS traffic. You can disable
44 | # the DNS protocol by commenting out the list of ports.
45 | ports: [53]
46 |
47 | # include_authorities controls whether or not the dns.authorities field
48 | # (authority resource records) is added to messages.
49 | include_authorities: true
50 |
51 | # include_additionals controls whether or not the dns.additionals field
52 | # (additional resource records) is added to messages.
53 | include_additionals: true
54 |
55 | packetbeat.protocols.http:
56 | # Configure the ports where to listen for HTTP traffic. You can disable
57 | # the HTTP protocol by commenting out the list of ports.
58 | ports: [80, 8080, 8000, 5000, 8002]
59 |
60 | packetbeat.protocols.memcache:
61 | # Configure the ports where to listen for memcache traffic. You can disable
62 | # the Memcache protocol by commenting out the list of ports.
63 | ports: [11211]
64 |
65 | packetbeat.protocols.mysql:
66 | # Configure the ports where to listen for MySQL traffic. You can disable
67 | # the MySQL protocol by commenting out the list of ports.
68 | ports: [3306]
69 |
70 | packetbeat.protocols.pgsql:
71 | # Configure the ports where to listen for Pgsql traffic. You can disable
72 | # the Pgsql protocol by commenting out the list of ports.
73 | ports: [5432]
74 |
75 | packetbeat.protocols.redis:
76 | # Configure the ports where to listen for Redis traffic. You can disable
77 | # the Redis protocol by commenting out the list of ports.
78 | ports: [6379]
79 |
80 | packetbeat.protocols.thrift:
81 | # Configure the ports where to listen for Thrift-RPC traffic. You can disable
82 | # the Thrift-RPC protocol by commenting out the list of ports.
83 | ports: [9090]
84 |
85 | packetbeat.protocols.mongodb:
86 | # Configure the ports where to listen for MongoDB traffic. You can disable
87 | # the MongoDB protocol by commenting out the list of ports.
88 | ports: [27017]
89 |
90 | packetbeat.protocols.nfs:
91 | # Configure the ports where to listen for NFS traffic. You can disable
92 | # the NFS protocol by commenting out the list of ports.
93 | ports: [2049]
94 |
95 | #================================ General =====================================
96 |
97 | # The name of the shipper that publishes the network data. It can be used to group
98 | # all the transactions sent by a single shipper in the web interface.
99 | #name:
100 |
101 | # The tags of the shipper are included in their own field with each
102 | # transaction published.
103 | #tags: ["service-X", "web-tier"]
104 |
105 | # Optional fields that you can specify to add additional information to the
106 | # output.
107 | #fields:
108 | # env: staging
109 |
110 | #================================ Outputs =====================================
111 |
112 | # Configure what outputs to use when sending the data collected by the beat.
113 | # Multiple outputs may be used.
114 |
115 | #-------------------------- Elasticsearch output ------------------------------
116 | output.elasticsearch:
117 | # Array of hosts to connect to.
118 | hosts: ["localhost:9200"]
119 |
120 | # Optional protocol and basic auth credentials.
121 | #protocol: "https"
122 | #username: "elastic"
123 | #password: "changeme"
124 |
125 | #----------------------------- Logstash output --------------------------------
126 | output.logstash:
127 | # The Logstash hosts
128 | hosts: ["{{ logstash_server_ip }}:5044"]
129 |
130 | # Optional SSL. By default is off.
131 | # List of root certificates for HTTPS server verifications
132 | #ssl.certificate_authorities: ["/etc/pki/root/ca.pem"]
133 |
134 | # Certificate for SSL client authentication
135 | #ssl.certificate: "/etc/pki/client/cert.pem"
136 |
137 | # Client Certificate Key
138 | #ssl.key: "/etc/pki/client/cert.key"
139 |
140 | #================================ Logging =====================================
141 |
142 | # Sets log level. The default log level is info.
143 | # Available log levels are: critical, error, warning, info, debug
144 | #logging.level: debug
145 |
146 | # At debug level, you can selectively enable logging only for some components.
147 | # To enable all selectors use ["*"]. Examples of other selectors are "beat",
148 | # "publish", "service".
149 | #logging.selectors: ["*"]
150 |
--------------------------------------------------------------------------------
/chapter-4/elastalert/roles/aws-serverless/tasks/main.yml:
--------------------------------------------------------------------------------
1 | - name: installing node run time and npm
2 | apt:
3 | name: "{{ item }}"
4 | state: present
5 | update_cache: yes
6 |
7 | with_items:
8 | - nodejs
9 | - npm
10 |
11 | - name: installing serverless package
12 | npm:
13 | name: "{{ item }}"
14 | global: yes
15 | state: present
16 |
17 | with_items:
18 | - serverless
19 | - aws-sdk
20 |
21 | - name: copy the setup files
22 | template:
23 | src: "{{ item.src }}"
24 | dest: "{{ item.dst }}"
25 |
26 | with_items:
27 | - { src: 'config.js.j2', dst: '/opt/serverless/config.js' }
28 | - { src: 'handler.js.j2', dst: '/opt/serverless/handler.js' }
29 | - { src: 'iamRoleStatements.json.j2', dst: '/opt/serverless/iamRoleStatements.json' }
30 | - { src: 'initDb.js.j2', dst: '/opt/serverless/initDb.js' }
31 | - { src: 'serverless.yml.j2', dst: '/opt/serverless/serverless.yml' }
32 | - { src: 'aws-credentials.j2', dst: '~/.aws/credentials' }
33 |
34 | - name: create dynamo db table
35 | command: "node initDb.js"
36 | args:
37 | chdir: /opt/serverless/
38 |
39 | - name: deploy the serverless
40 | command: "serverless deploy"
41 | args:
42 | chdir: /opt/serverless/
--------------------------------------------------------------------------------
/chapter-4/elastalert/roles/aws-serverless/templates/aws-credentials.j2:
--------------------------------------------------------------------------------
1 | [default]
2 | aws_access_key_id=YOUR_ACCESS_KEY_ID
3 | aws_secret_access_key=YOUR_SECRET_ACCESS_KEY
--------------------------------------------------------------------------------
/chapter-4/elastalert/roles/aws-serverless/templates/config.js.j2:
--------------------------------------------------------------------------------
1 | module.exports = {
2 | region: "us-east-1", // AWS Region to deploy in
3 | accessToken: "YOUR_R4NDOM_S3CR3T_ACCESS_TOKEN_GOES_HERE", // Accesstoken to make requests to blacklist
4 | aclLimit: 20, // Maximum number of acl rules
5 | ruleStartId: 10, // Starting id for acl entries
6 | aclId: "YOUR_ACL_ID", // AclId that you want to be managed
7 | tableName: "blacklist_ip", // DynamoDB table that will be created
8 | ruleValidity: 5 // Validity of Blacklist rule in minutes
9 | }
--------------------------------------------------------------------------------
/chapter-4/elastalert/roles/aws-serverless/templates/handler.js.j2:
--------------------------------------------------------------------------------
1 | const config = require('./config.js')
2 | const AWS = require('aws-sdk')
3 |
4 | var dynamodb = new AWS.DynamoDB();
5 | var ec2 = new AWS.EC2();
6 | const docClient = new AWS.DynamoDB.DocumentClient({region: config.region})
7 | var actoken = config.accessToken
8 | var aclLimit = config.aclLimit
9 | var ruleStartId = config.ruleStartId
10 | var ruleValidity = config.ruleValidity
11 | var aclID = config.aclId
12 | var table_name = config.tableName
13 |
14 | function sendResponse (res,statusCode,body){
15 | const response = {
16 | statusCode: statusCode,
17 | body: body
18 | }
19 | res(null, response);
20 | }
21 |
22 | function addACLRule (ip,ruleNo){
23 | var params = {
24 | CidrBlock: ip + "/32",
25 | Egress: false,
26 | NetworkAclId: aclID,
27 | Protocol: "-1",
28 | RuleAction: "deny",
29 | RuleNumber: ruleNo
30 | };
31 | ec2.createNetworkAclEntry(params, function(err, data) {
32 | if (err) console.log(err, err.stack);
33 | else console.log(data);
34 | });
35 |
36 | }
37 |
38 | function delACLRule (ruleNo){
39 | var params = {
40 | Egress: false,
41 | NetworkAclId: aclID,
42 | RuleNumber: ruleNo
43 | };
44 | ec2.deleteNetworkAclEntry(params, function(err, data) {
45 | if (err) console.log(err, err.stack); // an error occurred
46 | else console.log(data); // successful response
47 | });
48 | }
49 |
50 | module.exports.blacklistip = function(event,context,response){
51 |
52 | var ip = event.queryStringParameters.ip
53 | var accessToken = event.queryStringParameters.accessToken
54 |
55 | //Core logic (No Change)
56 | if(accessToken==actoken){
57 | if(ip){
58 | var timestamp = Math.floor(Date.now()/1000)
59 | var expirymin = Math.ceil(timestamp/60) + ruleValidity
60 |
61 | var params = {
62 | TableName: table_name,
63 | ProjectionExpression: "id,ip",
64 | }
65 |
66 | //Scanning for id's to decide which to use for the rule
67 | docClient.scan(params, onScan);
68 |
69 | function onScan (err,data){
70 | if(err){
71 | console.log(err)
72 | }
73 | ips = data.Items
74 | console.log(ips)
75 | iparray = ips.map(function(vals) { return vals.ip })
76 | indexvalofip = iparray.indexOf(ip)
77 | if(indexvalofip!=-1){
78 | var params = {
79 | TableName: table_name,
80 | Key:{
81 | "id": ips[indexvalofip].id
82 | },
83 | UpdateExpression: "set expirymin = :expmin",
84 | ExpressionAttributeValues:{
85 | ":expmin":expirymin
86 | },
87 | ReturnValues:"UPDATED_NEW"
88 | };
89 |
90 | docClient.update(params, function(err, data) {
91 | if (err) {
92 | console.error("Unable to update item. Error JSON:", JSON.stringify(err, null, 2));
93 | sendResponse(response,200,'error')
94 | } else {
95 | console.log("Blocking expiry increased for IP: ", ip);
96 | sendResponse(response,200,'expiryextended')
97 | }
98 | });
99 | }else{
100 | if(data.Items.length>=aclLimit){
101 | console.log('Rule Limit Reached!')
102 | sendResponse(response,500,'rulelimitreached')
103 | }else{
104 | ids = data.Items
105 | ids = ids.map(function(vals) { return vals.id })
106 |
107 | //Checking for missing Id to use in ACL Rule
108 | for(var i = ruleStartId; i < ruleStartId + aclLimit ; i++) {
109 | if(ids.indexOf(i)==-1) {
110 | //Adding to Table
111 | var params = {
112 | Item: {
113 | id: i,
114 | expirymin: expirymin,
115 | ip: ip,
116 | timestamp: timestamp,
117 | },
118 | TableName: table_name
119 | }
120 | //Adding to ACL
121 | console.log('Blocking '+ ip + ' with Rule no: '+ i )
122 | docClient.put(params, function(err,data){
123 | if(err){
124 | console.log(err)
125 | }
126 | })
127 | addACLRule(ip,i)
128 | break;
129 | }
130 | }
131 | sendResponse(response,200,'blocked')
132 | }
133 | }
134 | }
135 | }else{
136 | sendResponse(response,400,'badrequest')
137 | }
138 | }else{
139 | sendResponse(response,401,'unauthorized')
140 | }
141 | }//)
142 |
143 | module.exports.handleexpiry = handleexpiry
144 | function handleexpiry (){
145 | var expirymin = Math.floor(Date.now()/1000/60)
146 | console.log('expirymin:'+expirymin)
147 | var params = {
148 | TableName: table_name,
149 | ProjectionExpression: "id,ip",
150 | IndexName: "expirymin_index",
151 | KeyConditionExpression: "expirymin = :expmin",
152 | ExpressionAttributeValues: {
153 | ":expmin": expirymin
154 | }
155 | }
156 |
157 | docClient.query(params, function (err,data){
158 | if(err){
159 | console.log('DBERR:' + err)
160 | }else{
161 | data.Items.forEach(function(item){
162 | // AWS ACL Query
163 | var params = {
164 | TableName: table_name,
165 | Key:{
166 | "id":item.id
167 | }
168 | };
169 | // Deleting from DB
170 | docClient.delete(params, function(err, data) {
171 | if (err) {
172 | console.error("Unable to delete item. Error JSON:", JSON.stringify(err, null, 2));
173 | } else {
174 | // Delete from ACL
175 | delACLRule(item.id)
176 | console.log("Deleting Blackist Rule for: "+ item.ip)
177 | }
178 | });
179 | })
180 | }
181 | })
182 | }
--------------------------------------------------------------------------------
/chapter-4/elastalert/roles/aws-serverless/templates/iamRoleStatements.json.j2:
--------------------------------------------------------------------------------
1 | [
2 | {
3 | "Action": "ec2:*",
4 | "Effect": "Allow",
5 | "Resource": "*"
6 | },
7 | {
8 | "Effect": "Allow",
9 | "Action": "elasticloadbalancing:*",
10 | "Resource": "*"
11 | },
12 | {
13 | "Effect": "Allow",
14 | "Action": "cloudwatch:*",
15 | "Resource": "*"
16 | },
17 | {
18 | "Effect": "Allow",
19 | "Action": "autoscaling:*",
20 | "Resource": "*"
21 | },
22 | {
23 | "Effect": "Allow",
24 | "Action": "dynamodb:*",
25 | "Resource": "*"
26 | }
27 | ]
28 |
--------------------------------------------------------------------------------
/chapter-4/elastalert/roles/aws-serverless/templates/initDb.js.j2:
--------------------------------------------------------------------------------
1 | var config = require('./config.js')
2 |
3 | var AWS = require("aws-sdk")
4 | AWS.config.update({
5 | region: config.region
6 | });
7 |
8 | var dynamodb = new AWS.DynamoDB();
9 |
10 | var params = {
11 | "AttributeDefinitions": [
12 | {
13 | "AttributeName": "id",
14 | "AttributeType": "N"
15 | },
16 | {
17 | "AttributeName": "ip",
18 | "AttributeType": "S"
19 | },
20 | {
21 | "AttributeName": "expirymin",
22 | "AttributeType": "N"
23 | }
24 | ],
25 | "GlobalSecondaryIndexes": [
26 | {
27 | "IndexName": "ip_index",
28 | "KeySchema": [
29 | {
30 | "AttributeName": "ip",
31 | "KeyType": "HASH"
32 | }
33 | ],
34 | "Projection": {
35 | "ProjectionType": "ALL"
36 | },
37 | "ProvisionedThroughput": {
38 | "ReadCapacityUnits": 100,
39 | "WriteCapacityUnits": 100
40 | }
41 | },
42 | {
43 | "IndexName": "expirymin_index",
44 | "KeySchema": [
45 | {
46 | "AttributeName": "expirymin",
47 | "KeyType": "HASH"
48 | }
49 | ],
50 | "Projection": {
51 | "ProjectionType": "ALL"
52 | },
53 | "ProvisionedThroughput": {
54 | "ReadCapacityUnits": 100,
55 | "WriteCapacityUnits": 100
56 | }
57 | }
58 | ],
59 | "KeySchema": [
60 | {
61 | "AttributeName": "id",
62 | "KeyType": "HASH"
63 | }
64 | ],
65 | "ProvisionedThroughput": {
66 | "ReadCapacityUnits": 100,
67 | "WriteCapacityUnits": 100
68 | },
69 | "TableName": config.tableName
70 | }
71 |
72 | dynamodb.createTable(params, function(err, data) {
73 | if (err) {
74 | console.error("Unable to create table. Error JSON:", JSON.stringify(err, null, 2));
75 | } else {
76 | console.log("Created table. Table description JSON:", JSON.stringify(data, null, 2));
77 | }
78 | });
--------------------------------------------------------------------------------
/chapter-4/elastalert/roles/aws-serverless/templates/serverless.yml.j2:
--------------------------------------------------------------------------------
1 | service: automated-defence
2 |
3 | stage: dev
4 | region: us-east-1
5 |
6 | provider:
7 | name: aws
8 | runtime: nodejs6.10
9 | iamRoleStatements:
10 | $ref: ./iamRoleStatements.json
11 |
12 | functions:
13 | blacklist:
14 | handler: handler.blacklistip
15 | events:
16 | - http:
17 | path: blacklistip
18 | method: get
19 |
20 | handleexpiry:
21 | handler: handler.handleexpiry
22 | events:
23 | - schedule: rate(1 minute)
24 |
--------------------------------------------------------------------------------
/chapter-4/elastalert/roles/setup/tasks/main.yml:
--------------------------------------------------------------------------------
1 | - name: installing pre requsuites for elastalert
2 | apt:
3 | name: "{{ item }}"
4 | state: present
5 | update_cache: yes
6 |
7 | with_items:
8 | - python-pip
9 | - python-dev
10 | - libffi-dev
11 | - libssl-dev
12 | - python-setuptools
13 | - build-essential
14 |
15 | - name: installing elastalert
16 | pip:
17 | name: elastalert
18 |
19 | - name: creating elastalert directories
20 | file:
21 | path: "{{ item }}"
22 | state: directory
23 | mode: 0755
24 |
25 | with_items:
26 | - /opt/elastalert/rules
27 | - /opt/elastalert/config
28 |
29 | - name: creating elastalert configuration
30 | template:
31 | src: "{{ item.src }}"
32 | dest: "{{ item.dst }}"
33 |
34 | with_items:
35 | - { src: 'elastalert-config.j2', dst: '/opt/elastalert/config/config.yml' }
36 | - { src: 'elastalert-service.j2', dst: '/lib/systemd/system/elastalert.service' }
37 | - { src: 'elastalert-sshrule.j2', dst: '/opt/elastalert/rules/ssh-bruteforce.yml' }
38 |
39 | - name: enable elastalert service
40 | service:
41 | name: elastalert
42 | state: started
43 | enabled: yes
--------------------------------------------------------------------------------
/chapter-4/elastalert/roles/setup/templates/elastalert-config.j2:
--------------------------------------------------------------------------------
1 | rules_folder: "/opt/elastalert/rules"
2 | run_every:
3 | seconds: 30
4 | buffer_time:
5 | minutes: 5
6 | es_host: localhost
7 | es_port: 9200
8 | writeback_index: elastalert_status
9 | alert_time_limit:
10 | days: 2
--------------------------------------------------------------------------------
/chapter-4/elastalert/roles/setup/templates/elastalert-service.j2:
--------------------------------------------------------------------------------
1 | [Unit]
2 | Description=elastalert
3 | After=multi-user.target
4 |
5 | [Service]
6 | Type=simple
7 | WorkingDirectory=/opt/elastalert
8 | ExecStart=/usr/local/bin/elastalert --config /opt/elastalert/config/config.yml
9 |
10 | [Install]
11 | WantedBy=multi-user.target
--------------------------------------------------------------------------------
/chapter-4/elastalert/roles/setup/templates/elastalert-sshrule.j2:
--------------------------------------------------------------------------------
1 | es_host: localhost
2 | es_port: 9200
3 | name: "SSH Bruteforce attack alert"
4 | type: frequency
5 | index: filebeat-*
6 | num_events: 20
7 | timeframe:
8 | minutes: 1
9 |
10 | # For more info: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/query-dsl.html
11 |
12 | filter:
13 | - query:
14 | query_string:
15 | query: '_type:sshlog AND login:failed AND (username: "ubuntu" OR username: "root")'
16 |
17 | alert:
18 | - slack:
19 | slack_webhook_url: "https://hooks.slack.com/services/xxxxx"
20 | slack_username_override: "attack-bot"
21 | slack_emoji_override: "robot_face"
22 |
23 | - command: ["/usr/bin/curl", "https://xxxxxxxxxxx.execute-api.us-east-1.amazonaws.com/dev/zzzzzzzzzzzzzz/ip/inframonitor/%(ip)s"]
24 |
25 | realert:
26 | minutes: 0
--------------------------------------------------------------------------------
/chapter-4/elastalert/site.yml:
--------------------------------------------------------------------------------
1 | - name: setting up elastalert & automated defence in aws
2 | hosts: elastic-stack
3 | remote_user: ubuntu
4 | become: yes
5 | gather_facts: no
6 |
7 | roles:
8 | - setup
9 | - aws-serverless
--------------------------------------------------------------------------------
/chapter-4/elastic-stack/group_vars/elastic-stack.yml:
--------------------------------------------------------------------------------
1 | #---------- ANSIBLE PLAYBOOK CONFIGURATION -------
2 |
3 | remote_user_login_name: "ubuntu"
4 |
5 |
6 | #---------- NGINX REVERSE PROXY CONFIGURATION ----
7 |
8 | basic_auth_username: "elkadmin"
9 | basic_auth_password: "elkadmin"
10 |
11 |
12 | #---------- ELASTICSEARCH CONFIGURATION ----------
13 |
14 | elasticsearch_cluster_name: "elastic-stack"
15 | elasticsearch_node_name: "node1"
16 | elasticsearch_network_host_address: "127.0.0.1"
17 | elasticsearch_backups_repo_path: "/var/backups/elasticsearch"
18 |
19 |
20 | elasticsearch_jvm_heap_Xms: "Xms1g"
21 | elasticsearch_jvm_heap_Xmx: "Xmx1g"
22 |
23 | #---------- LOGSTASH CONFIGURATION ---------------
24 | logstash_server_domain_name: "example.com"
25 |
26 |
27 | #---------- KIBANA CONFIGURATION -----------------
28 |
--------------------------------------------------------------------------------
/chapter-4/elastic-stack/inventory:
--------------------------------------------------------------------------------
1 | [elastic-stack]
2 | 192.168.33.222
3 |
--------------------------------------------------------------------------------
/chapter-4/elastic-stack/main.yml:
--------------------------------------------------------------------------------
1 | - name: setting up elastic stack on ubuntu 16.04
2 | hosts: elastic-stack
3 | remote_user: "{{ remote_user_login_name }}"
4 | become: yes
5 | gather_facts: no
6 |
7 | roles:
8 | - common
9 | - elasticsearch
10 | - logstash
11 | - kibana
12 | - nginx-reverse-proxy
13 |
--------------------------------------------------------------------------------
/chapter-4/elastic-stack/roles/common/tasks/main.yml:
--------------------------------------------------------------------------------
1 | - name: install python 2
2 | raw: test -e /usr/bin/python || (apt -y update && apt install -y python-minimal)
3 |
4 | - name: accepting oracle java license agreement
5 | debconf:
6 | name: 'oracle-java8-installer'
7 | question: 'shared/accepted-oracle-license-v1-1'
8 | value: 'true'
9 | vtype: 'select'
10 |
11 | - name: adding ppa repo for oracle java by webupd8team
12 | apt_repository:
13 | repo: 'ppa:webupd8team/java'
14 | state: present
15 | update_cache: yes
16 |
17 | - name: installing java nginx apache2-utils and git
18 | apt:
19 | name: "{{ item }}"
20 | state: present
21 | update_cache: yes
22 |
23 | with_items:
24 | - python-software-properties
25 | - oracle-java8-installer
26 | - nginx
27 | - apache2-utils
28 | - python-pip
29 | - python-passlib
30 |
--------------------------------------------------------------------------------
/chapter-4/elastic-stack/roles/elasticsearch/handlers/main.yml:
--------------------------------------------------------------------------------
1 | - name: start elasticsearch
2 | service:
3 | name: elasticsearch
4 | state: started
5 |
6 | - name: restart elasticsearch
7 | service:
8 | name: elasticsearch
9 | state: restarted
10 |
--------------------------------------------------------------------------------
/chapter-4/elastic-stack/roles/elasticsearch/tasks/configure-elasticsearch.yml:
--------------------------------------------------------------------------------
1 | - name: creating elasticsearch backup repo directory at {{ elasticsearch_backups_repo_path }}
2 | file:
3 | path: "{{ elasticsearch_backups_repo_path }}"
4 | state: directory
5 | mode: 0755
6 | owner: elasticsearch
7 | group: elasticsearch
8 |
9 | - name: configuring elasticsearch.yml file
10 | template:
11 | src: "{{ item.src }}"
12 | dest: /etc/elasticsearch/"{{ item.dst }}"
13 |
14 | with_items:
15 | - { src: 'elasticsearch.yml.j2', dst: 'elasticsearch.yml' }
16 | - { src: 'jvm.options.j2', dst: 'jvm.options' }
17 |
18 | notify:
19 | - restart elasticsearch
20 |
--------------------------------------------------------------------------------
/chapter-4/elastic-stack/roles/elasticsearch/tasks/install-elasticsearch.yml:
--------------------------------------------------------------------------------
1 | - name: adding elastic gpg key for elasticsearch
2 | apt_key:
3 | url: "https://artifacts.elastic.co/GPG-KEY-elasticsearch"
4 | state: present
5 |
6 | - name: adding the elastic repository
7 | apt_repository:
8 | repo: "deb https://artifacts.elastic.co/packages/5.x/apt stable main"
9 | state: present
10 |
11 | - name: installing elasticsearch
12 | apt:
13 | name: "{{ item }}"
14 | state: present
15 | update_cache: yes
16 |
17 | with_items:
18 | - elasticsearch
19 |
20 | - name: adding elasticsearch to the startup programs
21 | service:
22 | name: elasticsearch
23 | enabled: yes
24 |
25 | notify:
26 | - start elasticsearch
27 |
--------------------------------------------------------------------------------
/chapter-4/elastic-stack/roles/elasticsearch/tasks/main.yml:
--------------------------------------------------------------------------------
1 | - include: install-elasticsearch.yml
2 | - include: configure-elasticsearch.yml
3 |
--------------------------------------------------------------------------------
/chapter-4/elastic-stack/roles/elasticsearch/templates/elasticsearch.yml.j2:
--------------------------------------------------------------------------------
1 | # ======================== Elasticsearch Configuration =========================
2 | #
3 | # NOTE: Elasticsearch comes with reasonable defaults for most settings.
4 | # Before you set out to tweak and tune the configuration, make sure you
5 | # understand what are you trying to accomplish and the consequences.
6 | #
7 | # The primary way of configuring a node is via this file. This template lists
8 | # the most important settings you may want to configure for a production cluster.
9 | #
10 | # Please see the documentation for further information on configuration options:
11 | #
12 | #
13 | # ---------------------------------- Cluster -----------------------------------
14 | #
15 | # Use a descriptive name for your cluster:
16 | #
17 | cluster.name: "{{ elasticsearch_cluster_name }}"
18 | #
19 | # ------------------------------------ Node ------------------------------------
20 | #
21 | # Use a descriptive name for the node:
22 | #
23 | node.name: "{{ elasticsearch_node_name }}"
24 | #
25 | # Add custom attributes to the node:
26 | #
27 | # node.rack: r1
28 | #
29 | # ----------------------------------- Paths ------------------------------------
30 | #
31 | # Path to directory where to store the data (separate multiple locations by comma):
32 | #
33 | # path.data: /path/to/data
34 | #
35 | # Path to log files:
36 | #
37 | # path.logs: /path/to/logs
38 | #
39 | # ----------------------------------- Memory -----------------------------------
40 | #
41 | # Lock the memory on startup:
42 | #
43 | # bootstrap.mlockall: true
44 | #
45 | # Make sure that the `ES_HEAP_SIZE` environment variable is set to about half the memory
46 | # available on the system and that the owner of the process is allowed to use this limit.
47 | #
48 | # Elasticsearch performs poorly when the system is swapping the memory.
49 | #
50 | # ---------------------------------- Network -----------------------------------
51 | #
52 | # Set the bind address to a specific IP (IPv4 or IPv6):
53 | #
54 | network.host: "{{ elasticsearch_network_host_address }}"
55 | #
56 | # Set a custom port for HTTP:
57 | #
58 | # http.port: 9200
59 | #
60 | # For more information, see the documentation at:
61 | #
62 | #
63 | # --------------------------------- Discovery ----------------------------------
64 | #
65 | # Pass an initial list of hosts to perform discovery when new node is started:
66 | # The default list of hosts is ["127.0.0.1", "[::1]"]
67 | #
68 | # discovery.zen.ping.unicast.hosts: ["host1", "host2"]
69 | #
70 | # Prevent the "split brain" by configuring the majority of nodes (total number of nodes / 2 + 1):
71 | #
72 | # discovery.zen.minimum_master_nodes: 3
73 | #
74 | # For more information, see the documentation at:
75 | #
76 | #
77 | # ---------------------------------- Gateway -----------------------------------
78 | #
79 | # Block initial recovery after a full cluster restart until N nodes are started:
80 | #
81 | # gateway.recover_after_nodes: 3
82 | #
83 | # For more information, see the documentation at:
84 | #
85 | #
86 | # ---------------------------------- Various -----------------------------------
87 | #
88 | # Disable starting multiple nodes on a single system:
89 | #
90 | # node.max_local_storage_nodes: 1
91 | #
92 | # Require explicit names when deleting indices:
93 | #
94 | # action.destructive_requires_name: true
95 | #
96 | #------------------------------- Custom configuration --------------------------
97 | path.repo: "{{ elasticsearch_backups_repo_path }}"
98 |
--------------------------------------------------------------------------------
/chapter-4/elastic-stack/roles/elasticsearch/templates/jvm.options.j2:
--------------------------------------------------------------------------------
1 | ## JVM configuration
2 |
3 | ################################################################
4 | ## IMPORTANT: JVM heap size
5 | ################################################################
6 | ##
7 | ## You should always set the min and max JVM heap
8 | ## size to the same value. For example, to set
9 | ## the heap to 4 GB, set:
10 | ##
11 | ## -Xms4g
12 | ## -Xmx4g
13 | ##
14 | ## See https://www.elastic.co/guide/en/elasticsearch/reference/current/heap-size.html
15 | ## for more information
16 | ##
17 | ################################################################
18 |
19 | # Xms represents the initial size of total heap space
20 | # Xmx represents the maximum size of total heap space
21 |
22 | -"{{ elasticsearch_jvm_heap_Xms }}"
23 | -"{{ elasticsearch_jvm_heap_Xmx }}"
24 |
25 | ################################################################
26 | ## Expert settings
27 | ################################################################
28 | ##
29 | ## All settings below this section are considered
30 | ## expert settings. Don't tamper with them unless
31 | ## you understand what you are doing
32 | ##
33 | ################################################################
34 |
35 | ## GC configuration
36 | -XX:+UseConcMarkSweepGC
37 | -XX:CMSInitiatingOccupancyFraction=75
38 | -XX:+UseCMSInitiatingOccupancyOnly
39 |
40 | ## optimizations
41 |
42 | # disable calls to System#gc
43 | -XX:+DisableExplicitGC
44 |
45 | # pre-touch memory pages used by the JVM during initialization
46 | -XX:+AlwaysPreTouch
47 |
48 | ## basic
49 |
50 | # force the server VM (remove on 32-bit client JVMs)
51 | -server
52 |
53 | # explicitly set the stack size (reduce to 320k on 32-bit client JVMs)
54 | -Xss1m
55 |
56 | # set to headless, just in case
57 | -Djava.awt.headless=true
58 |
59 | # ensure UTF-8 encoding by default (e.g. filenames)
60 | -Dfile.encoding=UTF-8
61 |
62 | # use our provided JNA always versus the system one
63 | -Djna.nosys=true
64 |
65 | # use old-style file permissions on JDK9
66 | -Djdk.io.permissionsUseCanonicalPath=true
67 |
68 | # flags to keep Netty from being unsafe
69 | -Dio.netty.noUnsafe=true
70 | -Dio.netty.noKeySetOptimization=true
71 |
72 | # log4j 2
73 | -Dlog4j.shutdownHookEnabled=false
74 | -Dlog4j2.disable.jmx=true
75 | -Dlog4j.skipJansi=true
76 |
77 | ## heap dumps
78 |
79 | # generate a heap dump when an allocation from the Java heap fails
80 | # heap dumps are created in the working directory of the JVM
81 | -XX:+HeapDumpOnOutOfMemoryError
82 |
83 | # specify an alternative path for heap dumps
84 | # ensure the directory exists and has sufficient space
85 | #-XX:HeapDumpPath=${heap.dump.path}
86 |
87 | ## GC logging
88 |
89 | #-XX:+PrintGCDetails
90 | #-XX:+PrintGCTimeStamps
91 | #-XX:+PrintGCDateStamps
92 | #-XX:+PrintClassHistogram
93 | #-XX:+PrintTenuringDistribution
94 | #-XX:+PrintGCApplicationStoppedTime
95 |
96 | # log GC status to a file with time stamps
97 | # ensure the directory exists
98 | #-Xloggc:${loggc}
99 |
100 | # Elasticsearch 5.0.0 will throw an exception on unquoted field names in JSON.
101 | # If documents were already indexed with unquoted fields in a previous version
102 | # of Elasticsearch, some operations may throw errors.
103 | #
104 | # WARNING: This option will be removed in Elasticsearch 6.0.0 and is provided
105 | # only for migration purposes.
106 | #-Delasticsearch.json.allow_unquoted_field_names=true
107 |
--------------------------------------------------------------------------------
/chapter-4/elastic-stack/roles/kibana/handlers/main.yml:
--------------------------------------------------------------------------------
1 | - name: start kibana
2 | service:
3 | name: kibana
4 | state: started
5 |
6 | - name: restart kibana
7 | service:
8 | name: kibana
9 | state: restarted
10 |
--------------------------------------------------------------------------------
/chapter-4/elastic-stack/roles/kibana/tasks/main.yml:
--------------------------------------------------------------------------------
1 | - name: adding elastic gpg key for kibana
2 | apt_key:
3 | url: "https://artifacts.elastic.co/GPG-KEY-elasticsearch"
4 | state: present
5 |
6 | - name: adding the elastic repository
7 | apt_repository:
8 | repo: "deb https://artifacts.elastic.co/packages/5.x/apt stable main"
9 | state: present
10 |
11 | - name: installing kibana
12 | apt:
13 | name: "{{ item }}"
14 | state: present
15 | update_cache: yes
16 |
17 | with_items:
18 | - kibana
19 |
20 | - name: adding kibana to the startup programs
21 | service:
22 | name: kibana
23 | enabled: yes
24 |
25 | notify:
26 | - start kibana
27 |
--------------------------------------------------------------------------------
/chapter-4/elastic-stack/roles/logstash/handlers/main.yml:
--------------------------------------------------------------------------------
1 | - name: start logstash
2 | service:
3 | name: logstash
4 | state: started
5 |
6 | - name: restart logstash
7 | service:
8 | name: logstash
9 | state: restarted
10 |
--------------------------------------------------------------------------------
/chapter-4/elastic-stack/roles/logstash/tasks/configure-logstash.yml:
--------------------------------------------------------------------------------
1 | - name: logstash configuration files
2 | template:
3 | src: "{{ item.src }}"
4 | dest: /etc/logstash/conf.d/"{{ item.dst }}"
5 |
6 | with_items:
7 | - { src: '02-beats-input.conf.j2', dst: '02-beats-input.conf' }
8 | - { src: '10-sshlog-filter.conf.j2', dst: '10-sshlog-filter.conf' }
9 | - { src: '11-weblog-filter.conf.j2', dst: '11-weblog-filter.conf' }
10 | - { src: '30-elasticsearch-output.conf.j2', dst: '10-elasticsearch-output.conf' }
11 |
12 | notify:
13 | - restart logstash
14 |
--------------------------------------------------------------------------------
/chapter-4/elastic-stack/roles/logstash/tasks/install-logstash.yml:
--------------------------------------------------------------------------------
1 | - name: adding elastic gpg key for logstash
2 | apt_key:
3 | url: "https://artifacts.elastic.co/GPG-KEY-elasticsearch"
4 | state: present
5 |
6 | - name: adding the elastic repository
7 | apt_repository:
8 | repo: "deb https://artifacts.elastic.co/packages/5.x/apt stable main"
9 | state: present
10 |
11 | - name: installing logstash
12 | apt:
13 | name: "{{ item }}"
14 | state: present
15 | update_cache: yes
16 |
17 | with_items:
18 | - logstash
19 |
20 | - name: adding logstash to the startup programs
21 | service:
22 | name: logstash
23 | enabled: yes
24 |
25 | notify:
26 | - start logstash
27 |
--------------------------------------------------------------------------------
/chapter-4/elastic-stack/roles/logstash/tasks/main.yml:
--------------------------------------------------------------------------------
1 | - include: install-logstash.yml
2 | - include: configure-logstash.yml
3 |
--------------------------------------------------------------------------------
/chapter-4/elastic-stack/roles/logstash/templates/02-beats-input.conf.j2:
--------------------------------------------------------------------------------
1 | input {
2 | beats {
3 | port => 5044
4 | #ssl => true
5 | #ssl_certificate => "/etc/pki/tls/certs/logstash-forwarder.crt"
6 | #ssl_key => "/etc/pki/tls/private/logstash-forwarder.key"
7 | }
8 | }
--------------------------------------------------------------------------------
/chapter-4/elastic-stack/roles/logstash/templates/10-sshlog-filter.conf.j2:
--------------------------------------------------------------------------------
1 | filter {
2 | if [type] == "sshlog" {
3 | grok {
4 | match => [
5 | "message", "%{SYSLOGTIMESTAMP:syslog_date} %{SYSLOGHOST:syslog_host} %{DATA:syslog_program}(?:\[%{POSINT}\])?: %{WORD:login} password for %{USERNAME:username} from %{IP:ip} %{GREEDYDATA}",
6 | "message", "%{SYSLOGTIMESTAMP:syslog_date} %{SYSLOGHOST:syslog_host} %{DATA:syslog_program}(?:\[%{POSINT}\])?: message repeated 2 times: \[ %{WORD:login} password for %{USERNAME:username} from %{IP:ip} %{GREEDYDATA}",
7 | "message", "%{SYSLOGTIMESTAMP:syslog_date} %{SYSLOGHOST:syslog_host} %{DATA:syslog_program}(?:\[%{POSINT}\])?: %{WORD:login} password for invalid user %{USERNAME:username} from %{IP:ip} %{GREEDYDATA}",
8 | "message", "%{SYSLOGTIMESTAMP:syslog_date} %{SYSLOGHOST:syslog_host} %{DATA:syslog_program}(?:\[%{POSINT}\])?: %{WORD:login} %{WORD:auth_method} for %{USERNAME:username} from %{IP:ip} %{GREEDYDATA}"
9 | ]
10 | }
11 |
12 | date {
13 | match => [ "timestamp", "dd/MMM/YYYY:HH:mm:ss Z" ]
14 | locale => en
15 | }
16 |
17 | geoip {
18 | source => "ip"
19 | }
20 | }
21 | }
--------------------------------------------------------------------------------
/chapter-4/elastic-stack/roles/logstash/templates/11-weblog-filter.conf.j2:
--------------------------------------------------------------------------------
1 | filter {
2 | if [type] == "weblog" {
3 | grok {
4 | match => {
5 | "message" => '%{IPORHOST:clientip} %{USER:ident} %{USER:auth} \[%{HTTPDATE:timestamp}\] "%{WORD:verb} %{DATA:request} HTTP/%{NUMBER:httpversion}" %{NUMBER:response:int} (?:-|%{NUMBER:bytes:int}) %{QS:referrer} %{QS:agent}'
6 | }
7 | }
8 |
9 | date {
10 | match => [ "timestamp", "dd/MMM/YYYY:HH:mm:ss Z" ]
11 | locale => en
12 | }
13 |
14 | geoip {
15 | source => "clientip"
16 | }
17 |
18 | useragent {
19 | source => "agent"
20 | target => "useragent"
21 | }
22 | }
23 | }
--------------------------------------------------------------------------------
/chapter-4/elastic-stack/roles/logstash/templates/30-elasticsearch-output.conf.j2:
--------------------------------------------------------------------------------
1 | output {
2 | elasticsearch {
3 | hosts => ["localhost:9200"]
4 | #sniffing => true
5 | manage_template => false
6 | index => "%{[@metadata][beat]}-%{+YYYY.MM.dd}"
7 | document_type => "%{[@metadata][type]}"
8 | }
9 | }
--------------------------------------------------------------------------------
/chapter-4/elastic-stack/roles/nginx-reverse-proxy/handlers/main.yml:
--------------------------------------------------------------------------------
1 | - name: restart nginx
2 | service:
3 | name: nginx
4 | state: restarted
5 |
--------------------------------------------------------------------------------
/chapter-4/elastic-stack/roles/nginx-reverse-proxy/tasks/main.yml:
--------------------------------------------------------------------------------
1 | - name: htpasswd generation
2 | #command: htpasswd -c /etc/nginx/htpasswd.users
3 | htpasswd:
4 | path: "/etc/nginx/htpasswd.users"
5 | name: "{{ basic_auth_username }}"
6 | password: "{{ basic_auth_password }}"
7 | owner: root
8 | group: root
9 | mode: 0644
10 |
11 | - name: nginx virtualhost configuration
12 | template:
13 | src: "templates/nginxdefault.j2"
14 | dest: "/etc/nginx/sites-available/default"
15 |
16 | notify:
17 | - restart nginx
18 |
--------------------------------------------------------------------------------
/chapter-4/elastic-stack/roles/nginx-reverse-proxy/templates/nginxdefault.j2:
--------------------------------------------------------------------------------
1 | server {
2 | listen 80;
3 |
4 | server_name localhost;
5 |
6 | auth_basic "Restricted Access";
7 | auth_basic_user_file /etc/nginx/htpasswd.users;
8 |
9 | location / {
10 | proxy_pass http://localhost:5601;
11 | proxy_http_version 1.1;
12 | proxy_set_header Upgrade $http_upgrade;
13 | proxy_set_header Connection 'upgrade';
14 | proxy_set_header Host $host;
15 | proxy_cache_bypass $http_upgrade;
16 | }
17 | }
18 |
--------------------------------------------------------------------------------
/chapter-5/docker/inventory:
--------------------------------------------------------------------------------
1 | [zap]
--------------------------------------------------------------------------------
/chapter-5/docker/site.yml:
--------------------------------------------------------------------------------
1 | - name: installing docker on ubuntu
2 | hosts: zap
3 | remote_user: ubuntu
4 | gather_facts: no
5 | become: yes
6 | vars:
7 | apt_repo_data: "deb [arch=amd64] https://download.docker.com/linux/ubuntu xenial stable"
8 | apt_gpg_key: https://download.docker.com/linux/ubuntu/gpg
9 |
10 | tasks:
11 | - name: adding docker gpg key
12 | apt_key:
13 | url: "{{ apt_gpg_key }}"
14 | state: present
15 |
16 | - name: add docker repository
17 | apt_repository:
18 | repo: "{{ apt_repo_data }}"
19 | state: present
20 |
21 | - name: installing docker-ce
22 | apt:
23 | name: docker-ce
24 | state: present
25 | update_cache: yes
26 |
27 | - name: install python-pip
28 | apt:
29 | name: python-pip
30 | state: present
31 |
32 | - name: install docker-py
33 | pip:
34 | name: "{{ item }}"
35 | state: present
36 |
37 | with_items:
38 | - docker-py
39 | - docker
40 |
--------------------------------------------------------------------------------
/chapter-5/dvsw-playbook/inventory:
--------------------------------------------------------------------------------
1 | [dvsw]
2 | 192.168.33.111
--------------------------------------------------------------------------------
/chapter-5/dvsw-playbook/site.yml:
--------------------------------------------------------------------------------
1 | - name: setting up DVWS container
2 | hosts: dvsw
3 | remote_user: ubuntu
4 | gather_facts: no
5 | become: yes
6 | vars:
7 | dvws_image_name: cyrivs89/web-dvws
8 |
9 | tasks:
10 | - name: pulling {{ dvws_image_name }} container
11 | docker_image:
12 | name: "{{ dvws_image_name }}"
13 |
14 | - name: running dvws container
15 | docker_container:
16 | name: dvws
17 | image: "{{ dvws_image_name }}"
18 | interactive: yes
19 | state: started
20 | ports:
21 | - "80:80"
--------------------------------------------------------------------------------
/chapter-5/jenkins/inventory:
--------------------------------------------------------------------------------
1 | [jenkins]
2 | 192.168.56.101
--------------------------------------------------------------------------------
/chapter-5/jenkins/main.yml:
--------------------------------------------------------------------------------
1 | - name: installing jenkins in ubuntu 16.04
2 | hosts: jenkins
3 | remote_user: ubuntu
4 | gather_facts: False
5 | become: yes
6 |
7 | tasks:
8 | - name: adding jenkins gpg key
9 | apt_key:
10 | url: 'https://pkg.jenkins.io/debian/jenkins-ci.org.key'
11 | state: present
12 |
13 | - name: jeknins repository to system
14 | apt_repository:
15 | repo: 'deb http://pkg.jenkins.io/debian-stable binary/'
16 | state: present
17 |
18 | - name: installing jenkins
19 | apt:
20 | name: jenkins
21 | state: present
22 | update_cache: yes
23 |
24 | - name: adding jenkins to startup
25 | service:
26 | name: jenkins
27 | state: started
28 | enabled: yes
29 |
30 | - name: printing jenkins default administration password
31 | command: cat "/var/lib/jenkins/secrets/initialAdminPassword"
32 | register: jenkins_default_admin_password
33 |
34 | - debug:
35 | msg: "{{ jenkins_default_admin_password.stdout }}"
--------------------------------------------------------------------------------
/chapter-5/zap-baseline-scan/inventory:
--------------------------------------------------------------------------------
1 | [zap]
2 | 192.168.56.100
--------------------------------------------------------------------------------
/chapter-5/zap-baseline-scan/site.yml:
--------------------------------------------------------------------------------
1 | - name: Running OWASP ZAP Baseline Scan
2 | hosts: zap
3 | remote_user: ubuntu
4 | gather_facts: no
5 | become: yes
6 | vars:
7 | owasp_zap_image_name: owasp/zap2docker-weekly
8 | website_url: http://192.168.33.111
9 | reports_location: /zapdata
10 | scan_name: owasp-zap-base-line-scan-dvws
11 |
12 | tasks:
13 | - name: adding write permissions to reports directory
14 | file:
15 | path: "{{ reports_location }}"
16 | state: directory
17 | owner: root
18 | group: root
19 | recurse: yes
20 | mode: 0770
21 |
22 | - name: running owasp zap baseline scan container against "{{ website_url }}"
23 | docker_container:
24 | name: "{{ scan_name }}"
25 | image: "{{ owasp_zap_image_name }}"
26 | interactive: yes
27 | auto_remove: yes
28 | state: started
29 | volumes: "{{ reports_location }}:/zap/wrk:rw"
30 | command: "zap-baseline.py -t {{ website_url }} -r {{ scan_name }}_report.html"
31 |
32 | - name: getting raw output of the scan
33 | raw: "docker logs -f {{ scan_name }}"
34 | register: scan_output
35 |
36 | - debug:
37 | msg: "{{ scan_output }}"
38 |
--------------------------------------------------------------------------------
/chapter-5/zap-full-scan/inventory:
--------------------------------------------------------------------------------
1 | [zap]
2 | 192.168.56.100
--------------------------------------------------------------------------------
/chapter-5/zap-full-scan/site.yml:
--------------------------------------------------------------------------------
1 | - name: Running OWASP ZAP Full Scan
2 | hosts: zap
3 | remote_user: ubuntu
4 | gather_facts: no
5 | become: yes
6 | vars:
7 | owasp_zap_image_name: owasp/zap2docker-weekly
8 | website_url: http://192.168.33.111
9 | reports_location: /zapdata/
10 | scan_name: owasp-zap-full-scan-dvws
11 |
12 | tasks:
13 | - name: adding write permissions to reports directory
14 | file:
15 | path: "{{ reports_location }}"
16 | state: directory
17 | owner: root
18 | group: root
19 | recurse: yes
20 | mode: 0777
21 |
22 | - name: running owasp zap full scan container against "{{ website_url }}"
23 | docker_container:
24 | name: "{{ scan_name }}"
25 | image: "{{ owasp_zap_image_name }}"
26 | interactive: yes
27 | auto_remove: yes
28 | state: started
29 | volumes: "{{ reports_location }}:/zap/wrk:rw"
30 | command: "zap-full-scan.py -t {{ website_url }} -r {{ scan_name }}_report.html"
31 |
32 | - name: getting raw output of the scan
33 | raw: "docker logs -f {{ scan_name }}"
34 | register: scan_output
35 |
36 | - debug:
37 | msg: "{{ scan_output }}"
--------------------------------------------------------------------------------
/chapter-5/zap-setup-playbook/inventory:
--------------------------------------------------------------------------------
1 | [zap]
2 | 192.168.56.100
--------------------------------------------------------------------------------
/chapter-5/zap-setup-playbook/site.yml:
--------------------------------------------------------------------------------
1 | - name: setting up owasp zap container
2 | hosts: zap
3 | remote_user: ubuntu
4 | gather_facts: no
5 | become: yes
6 | vars:
7 | owasp_zap_image_name: owasp/zap2docker-weekly
8 |
9 | tasks:
10 | - name: pulling {{ owasp_zap_image_name }} container
11 | docker_image:
12 | name: "{{ owasp_zap_image_name }}"
13 |
14 | - name: running owasp zap container
15 | docker_container:
16 | name: owasp-zap
17 | image: "{{ owasp_zap_image_name }}"
18 | interactive: yes
19 | state: started
20 | user: zap
21 | command: zap.sh -daemon -host 0.0.0.0 -port 8080 -config api.disablekey=true -config api.addrs.addr.name=.* -config api.addrs.addr.regex=true
22 | ports:
23 | - "8080:8080"
24 |
--------------------------------------------------------------------------------
/chapter-6/autonessus/inventory:
--------------------------------------------------------------------------------
1 | [nessus]
2 | 192.168.33.109
3 |
--------------------------------------------------------------------------------
/chapter-6/autonessus/roles/listpolices/tasks/main.yml:
--------------------------------------------------------------------------------
1 | - name: list current policies using autoNessus
2 | command: "autoNessus -p"
3 | register: list_policies_output
4 |
5 | - debug:
6 | msg: "{{ list_policies_output.stdout_lines }}"
--------------------------------------------------------------------------------
/chapter-6/autonessus/roles/listscans/tasks/main.yml:
--------------------------------------------------------------------------------
1 | - name: list current scans and IDs using autoNessus
2 | command: "autoNessus -l"
3 | register: list_scans_output
4 |
5 | - debug:
6 | msg: "{{ list_scans_output.stdout_lines }}"
--------------------------------------------------------------------------------
/chapter-6/autonessus/roles/pausescan/tasks/main.yml:
--------------------------------------------------------------------------------
1 | - name: pausing nessus scan "{{ scan_id }}" using autoNessus
2 | command: "autoNessus -pS {{ scan_id }}"
3 | register: pause_scan_output
4 |
5 | - debug:
6 | msg: "{{ pause_scan_output.stdout_lines }}"
--------------------------------------------------------------------------------
/chapter-6/autonessus/roles/pausescan/vars/main.yml:
--------------------------------------------------------------------------------
1 | scan_id: 17
--------------------------------------------------------------------------------
/chapter-6/autonessus/roles/resumescan/tasks/main.yml:
--------------------------------------------------------------------------------
1 | - name: resume nessus scan "{{ scan_id }}" using autoNessus
2 | command: "autoNessus -sR {{ scan_id }}"
3 | register: resume_scan_output
4 |
5 | - debug:
6 | msg: "{{ resume_scan_output.stdout_lines }}"
--------------------------------------------------------------------------------
/chapter-6/autonessus/roles/resumescan/vars/main.yml:
--------------------------------------------------------------------------------
1 | scan_id: 17
--------------------------------------------------------------------------------
/chapter-6/autonessus/roles/setup/tasks/main.yml:
--------------------------------------------------------------------------------
1 | - name: installing python-pip
2 | apt:
3 | name: python-pip
4 | update_cache: yes
5 | state: present
6 |
7 | - name: install python requests
8 | pip:
9 | name: requests
10 |
11 | - name: setting up autonessus
12 | get_url:
13 | url: "https://github.com/redteamsecurity/AutoNessus/raw/master/autoNessus.py"
14 | dest: /usr/bin/autoNessus
15 | mode: 0755
16 |
17 | - name: updating the credentials
18 | replace:
19 | path: /usr/bin/autoNessus
20 | regexp: "{{ item.src }}"
21 | replace: "{{ item.dst }}"
22 | backup: yes
23 | no_log: True
24 |
25 | with_items:
26 | - { src: "token = ''", dst: "token = '{{ nessus_user_token }}'" }
27 | - { src: "url = 'https://localhost:8834'", dst: "url = '{{ nessus_url }}'" }
28 | - { src: "username = 'xxxxx'", dst: "username = '{{ nessus_user_name }}'" }
29 | - { src: "password = 'xxxxx'", dst: "password = '{{ nessus_user_password }}'" }
--------------------------------------------------------------------------------
/chapter-6/autonessus/roles/setup/vars/main.yml:
--------------------------------------------------------------------------------
1 | nessus_user_token: ""
2 | nessus_user_name: "bbbbbbb" # Must required
3 | nessus_user_password: "ccccccc" # Must required
4 | nessus_url: "https://localhost:8834"
--------------------------------------------------------------------------------
/chapter-6/autonessus/roles/startscan/tasks/main.yml:
--------------------------------------------------------------------------------
1 | - name: starting nessus scan "{{ scan_id }}" using autoNessus
2 | command: "autoNessus -sS {{ scan_id }}"
3 | register: start_scan_output
4 |
5 | - debug:
6 | msg: "{{ start_scan_output.stdout_lines }}"
--------------------------------------------------------------------------------
/chapter-6/autonessus/roles/startscan/vars/main.yml:
--------------------------------------------------------------------------------
1 | scan_id: 17
--------------------------------------------------------------------------------
/chapter-6/autonessus/roles/stopscan/tasks/main.yml:
--------------------------------------------------------------------------------
1 | - name: stopping nessus scan "{{ scan_id }}" using autoNessus
2 | command: "autoNessus -sP {{ scan_id }}"
3 | register: stop_scan_output
4 |
5 | - debug:
6 | msg: "{{ stop_scan_output.stdout_lines }}"
--------------------------------------------------------------------------------
/chapter-6/autonessus/roles/stopscan/vars/main.yml:
--------------------------------------------------------------------------------
1 | scan_id: 17
--------------------------------------------------------------------------------
/chapter-6/autonessus/site.yml:
--------------------------------------------------------------------------------
1 | - name: installing autonessus
2 | hosts: nessus
3 | remote_user: ubuntu
4 | gather_facts: no
5 | become: yes
6 |
7 | roles:
8 | - setup
9 | - listpolices
10 | - listscans
11 | - startscan
12 | - pausescan
13 | - resumescan
14 | - stopscan
--------------------------------------------------------------------------------
/chapter-6/nessus-restapi/main.yml:
--------------------------------------------------------------------------------
1 | - name: working with nessus rest api
2 | connection: local
3 | hosts: localhost
4 | gather_facts: no
5 | vars:
6 | scan_id: 17
7 | nessus_access_key: XXXXXXXXXXXXXXXXXXXXXXXXXXXXX
8 | nessus_secret_key: XXXXXXXXXXXXXXXXXXXXXXXXXXXXX
9 | nessus_url: https://192.168.33.109:8834
10 | nessus_report_format: html
11 |
12 | tasks:
13 | - name: export the report for given scan "{{ scan_id }}"
14 | uri:
15 | url: "{{ nessus_url }}/scans/{{ scan_id }}/export"
16 | method: POST
17 | validate_certs: no
18 | headers:
19 | X-ApiKeys: "accessKey={{ nessus_access_key }}; secretKey={{ nessus_secret_key }}"
20 | body: "format={{ nessus_report_format }}&chapters=vuln_by_host;remediations"
21 | register: export_request
22 |
23 | - debug:
24 | msg: "File id is {{ export_request.json.file }} and scan id is {{ scan_id }}"
25 |
26 | - name: check the report status for "{{ export_request.json.file }}"
27 | uri:
28 | url: "{{ nessus_url }}/scans/{{ scan_id }}/export/{{ export_request.json.file }}/status"
29 | method: GET
30 | validate_certs: no
31 | headers:
32 | X-ApiKeys: "accessKey={{ nessus_access_key }}; secretKey={{ nessus_secret_key }}"
33 | register: report_status
34 |
35 | - debug:
36 | msg: "Report status is {{ report_status.json.status }}"
37 |
38 | - name: downloading the report locally
39 | uri:
40 | url: "{{ nessus_url }}/scans/{{ scan_id }}/export/{{ export_request.json.file }}/download"
41 | method: GET
42 | validate_certs: no
43 | headers:
44 | X-ApiKeys: "accessKey={{ nessus_access_key }}; secretKey={{ nessus_secret_key }}"
45 | return_content: yes
46 | dest: "./{{ scan_id }}_{{ export_request.json.file }}.{{ nessus_report_format }}"
47 | register: report_output
48 |
49 | - debug:
50 | msg: "Report can be found at ./{{ scan_id }}_{{ export_request.json.file }}.{{ nessus_report_format }}"
--------------------------------------------------------------------------------
/chapter-6/nessus-setup/group_vars/nessus.yml:
--------------------------------------------------------------------------------
1 | remote_user_name: ubuntu
2 | nessus_download_url: "http://downloads.nessus.org/nessus3dl.php?file=Nessus-6.11.2-ubuntu1110_amd64.deb&licence_accept=yes&t=84ed6ee87f926f3d17a218b2e52b61f0"
--------------------------------------------------------------------------------
/chapter-6/nessus-setup/inventory:
--------------------------------------------------------------------------------
1 | [nessus]
2 | 192.168.56.101
--------------------------------------------------------------------------------
/chapter-6/nessus-setup/roles/setup/tasks/main.yml:
--------------------------------------------------------------------------------
1 | - name: install python 2
2 | raw: test -e /usr/bin/python || (apt -y update && apt install -y python-minimal)
3 |
4 | - name: downloading the package and installing
5 | apt:
6 | deb: "{{ nessus_download_url }}"
7 |
8 | - name: start the nessus daemon
9 | service:
10 | name: "nessusd"
11 | enabled: yes
12 | state: started
--------------------------------------------------------------------------------
/chapter-6/nessus-setup/site.yml:
--------------------------------------------------------------------------------
1 | - name: installing nessus server
2 | hosts: nessus
3 | remote_user: "{{ remote_user_name }}"
4 | gather_facts: no
5 | become: yes
6 |
7 | roles:
8 | - setup
--------------------------------------------------------------------------------
/chapter-7/aws-cis-benchmarks/main.yml:
--------------------------------------------------------------------------------
1 | - name: AWS CIS Benchmarks playbook
2 | hosts: localhost
3 | become: yes
4 | vars:
5 | aws_access_key: XXXXXXXX
6 | aws_secret_key: XXXXXXXX
7 |
8 | tasks:
9 | - name: installing aws cli and ansi2html
10 | pip:
11 | name: "{{ item }}"
12 |
13 | with_items:
14 | - awscli
15 | - ansi2html
16 |
17 | - name: downloading and setting up prowler
18 | get_url:
19 | url: https://raw.githubusercontent.com/Alfresco/prowler/master/prowler
20 | dest: /usr/bin/prowler
21 | mode: 0755
22 |
23 | - name: running prowler full scan
24 | shell: "prowler | ansi2html -la > ./aws-cis-report-{{ ansible_date_time.epoch }}.html"
25 | environment:
26 | AWS_ACCESS_KEY_ID: "{{ aws_access_key }}"
27 | AWS_SECRET_ACCESS_KEY: "{{ aws_secret_key }}"
28 |
29 | - name: AWS CIS Benchmarks report downloaded
30 | debug:
31 | msg: "Report can be found at ./aws-cis-report-{{ ansible_date_time.epoch }}.html"
32 |
--------------------------------------------------------------------------------
/chapter-7/blue-green-setup/inventory:
--------------------------------------------------------------------------------
1 | [proxyserver]
2 | proxy ansible_host=192.168.100.100 ansible_user=ubuntu ansible_password=vagrant
3 |
4 | [blue]
5 | blueserver ansible_host=192.168.100.10 ansible_user=ubuntu ansible_password=vagrant
6 |
7 | [green]
8 | greenserver ansible_host=192.168.100.20 ansible_user=ubuntu ansible_password=vagrant
9 |
10 | [webservers:children]
11 | blue
12 | green
13 |
14 | [prod:children]
15 | webservers
16 | proxyserver
--------------------------------------------------------------------------------
/chapter-7/blue-green-setup/main.yml:
--------------------------------------------------------------------------------
1 | - name: running common role
2 | hosts: prod
3 | gather_facts: false
4 | become: yes
5 | serial: 100%
6 | roles:
7 | - common
8 |
9 | - name: running haproxy role
10 | hosts: proxyserver
11 | become: yes
12 | roles:
13 | - haproxy
14 |
15 | - name: running webserver role
16 | hosts: webservers
17 | become: yes
18 | serial: 100%
19 | roles:
20 | - nginx
21 |
22 | - name: updating blue code
23 | hosts: blue
24 | become: yes
25 | roles:
26 | - bluecode
27 |
28 | - name: updating green code
29 | hosts: green
30 | become: yes
31 | roles:
32 | - greencode
--------------------------------------------------------------------------------
/chapter-7/blue-green-setup/roles/bluecode/tasks/main.yml:
--------------------------------------------------------------------------------
1 | - name: adding code in blue machine
2 | template:
3 | src: index.html
4 | dest: /var/www/html/index.html
--------------------------------------------------------------------------------
/chapter-7/blue-green-setup/roles/bluecode/templates/index.html:
--------------------------------------------------------------------------------
1 |
2 |
3 | Welcome to Blue Deployment
4 |
5 |
--------------------------------------------------------------------------------
/chapter-7/blue-green-setup/roles/common/tasks/main.yml:
--------------------------------------------------------------------------------
1 | - name: installing python if not installed
2 | raw: test -e /usr/bin/python || (apt -y update && apt install -y python-minimal)
3 |
4 | - name: updating and installing git, curl
5 | apt:
6 | name: "{{ item }}"
7 | state: present
8 | update_cache: yes
9 |
10 | with_items:
11 | - git
12 | - curl
13 |
14 | # Also we can include common any monitoring and security hardening tasks
--------------------------------------------------------------------------------
/chapter-7/blue-green-setup/roles/greencode/tasks/main.yml:
--------------------------------------------------------------------------------
1 | - name: adding code in green machine
2 | template:
3 | src: index.html
4 | dest: /var/www/html/index.html
--------------------------------------------------------------------------------
/chapter-7/blue-green-setup/roles/greencode/templates/index.html:
--------------------------------------------------------------------------------
1 |
2 |
3 | Welcome to Green Deployment
4 |
5 |
--------------------------------------------------------------------------------
/chapter-7/blue-green-setup/roles/haproxy/tasks/main.yml:
--------------------------------------------------------------------------------
1 | - name: adding haproxy repo
2 | apt_repository:
3 | repo: ppa:vbernat/haproxy-1.7
4 |
5 | - name: updating and installing haproxy
6 | apt:
7 | name: haproxy
8 | state: present
9 | update_cache: yes
10 |
11 | - name: updating the haproxy configuration
12 | template:
13 | src: haproxy.cfg.j2
14 | dest: /etc/haproxy/haproxy.cfg
15 |
16 | - name: starting the haproxy service
17 | service:
18 | name: haproxy
19 | state: started
20 | enabled: yes
--------------------------------------------------------------------------------
/chapter-7/blue-green-setup/roles/haproxy/templates/haproxy.cfg.j2:
--------------------------------------------------------------------------------
1 | global
2 | log /dev/log local0
3 | log /dev/log local1 notice
4 | chroot /var/lib/haproxy
5 | stats socket /run/haproxy/admin.sock mode 660 level admin
6 | stats timeout 30s
7 | user haproxy
8 | group haproxy
9 | daemon
10 |
11 | # Default SSL material locations
12 | ca-base /etc/ssl/certs
13 | crt-base /etc/ssl/private
14 |
15 | # Default ciphers to use on SSL-enabled listening sockets.
16 | # For more information, see ciphers(1SSL). This list is from:
17 | # https://hynek.me/articles/hardening-your-web-servers-ssl-ciphers/
18 | # An alternative list with additional directives can be obtained from
19 | # https://mozilla.github.io/server-side-tls/ssl-config-generator/?server=haproxy
20 | ssl-default-bind-ciphers ECDH+AESGCM:DH+AESGCM:ECDH+AES256:DH+AES256:ECDH+AES128:DH+AES:RSA+AESGCM:RSA+AES:!aNULL:!MD5:!DSS
21 | ssl-default-bind-options no-sslv3
22 |
23 | defaults
24 | log global
25 | mode http
26 | option httplog
27 | option dontlognull
28 | timeout connect 5000
29 | timeout client 50000
30 | timeout server 50000
31 | errorfile 400 /etc/haproxy/errors/400.http
32 | errorfile 403 /etc/haproxy/errors/403.http
33 | errorfile 408 /etc/haproxy/errors/408.http
34 | errorfile 500 /etc/haproxy/errors/500.http
35 | errorfile 502 /etc/haproxy/errors/502.http
36 | errorfile 503 /etc/haproxy/errors/503.http
37 | errorfile 504 /etc/haproxy/errors/504.http
38 |
39 | frontend http_front
40 | bind *:80
41 | stats uri /haproxy?stats
42 | default_backend http_back
43 |
44 | backend http_back
45 | balance roundrobin
46 | server {{ hostvars.blueserver.ansible_host }} {{ hostvars.blueserver.ansible_host }}:80 check
47 | #server {{ hostvars.greenserver.ansible_host }} {{ hostvars.greenserver.ansible_host }}:80 check
--------------------------------------------------------------------------------
/chapter-7/blue-green-setup/roles/nginx/tasks/main.yml:
--------------------------------------------------------------------------------
1 | - name: installing nginx
2 | apt:
3 | name: nginx
4 | state: present
5 | update_cache: yes
6 |
7 | - name: starting the nginx service
8 | service:
9 | name: nginx
10 | state: started
11 | enabled: yes
--------------------------------------------------------------------------------
/chapter-7/blue-green-update/inventory:
--------------------------------------------------------------------------------
1 | [proxyserver]
2 | proxy ansible_host=192.168.100.100 ansible_user=ubuntu ansible_password=vagrant
3 |
4 | [blue]
5 | blueserver ansible_host=192.168.100.10 ansible_user=ubuntu ansible_password=vagrant
6 |
7 | [green]
8 | greenserver ansible_host=192.168.100.20 ansible_user=ubuntu ansible_password=vagrant
9 |
10 | [webservers:children]
11 | blue
12 | green
13 |
14 | [prod:children]
15 | webservers
16 | proxyserver
--------------------------------------------------------------------------------
/chapter-7/blue-green-update/main.yml:
--------------------------------------------------------------------------------
1 | - name: Updating to GREEN deployment
2 | hosts: proxyserver
3 | become: yes
4 |
5 | tasks:
6 | - name: updating proxy configuration
7 | template:
8 | src: haproxy.cfg.j2
9 | dest: /etc/haproxy/haproxy.cfg
10 |
11 | - name: updating the service
12 | service:
13 | name: haproxy
14 | state: reloaded
15 |
16 | - debug:
17 | msg: "GREEN deployment successful. Please check your server :)"
--------------------------------------------------------------------------------
/chapter-7/blue-green-update/templates/haproxy.cfg.j2:
--------------------------------------------------------------------------------
1 | global
2 | log /dev/log local0
3 | log /dev/log local1 notice
4 | chroot /var/lib/haproxy
5 | stats socket /run/haproxy/admin.sock mode 660 level admin
6 | stats timeout 30s
7 | user haproxy
8 | group haproxy
9 | daemon
10 |
11 | # Default SSL material locations
12 | ca-base /etc/ssl/certs
13 | crt-base /etc/ssl/private
14 |
15 | # Default ciphers to use on SSL-enabled listening sockets.
16 | # For more information, see ciphers(1SSL). This list is from:
17 | # https://hynek.me/articles/hardening-your-web-servers-ssl-ciphers/
18 | # An alternative list with additional directives can be obtained from
19 | # https://mozilla.github.io/server-side-tls/ssl-config-generator/?server=haproxy
20 | ssl-default-bind-ciphers ECDH+AESGCM:DH+AESGCM:ECDH+AES256:DH+AES256:ECDH+AES128:DH+AES:RSA+AESGCM:RSA+AES:!aNULL:!MD5:!DSS
21 | ssl-default-bind-options no-sslv3
22 |
23 | defaults
24 | log global
25 | mode http
26 | option httplog
27 | option dontlognull
28 | timeout connect 5000
29 | timeout client 50000
30 | timeout server 50000
31 | errorfile 400 /etc/haproxy/errors/400.http
32 | errorfile 403 /etc/haproxy/errors/403.http
33 | errorfile 408 /etc/haproxy/errors/408.http
34 | errorfile 500 /etc/haproxy/errors/500.http
35 | errorfile 502 /etc/haproxy/errors/502.http
36 | errorfile 503 /etc/haproxy/errors/503.http
37 | errorfile 504 /etc/haproxy/errors/504.http
38 |
39 | frontend http_front
40 | bind *:80
41 | stats uri /haproxy?stats
42 | default_backend http_back
43 |
44 | backend http_back
45 | balance roundrobin
46 | #server {{ hostvars.blueserver.ansible_host }} {{ hostvars.blueserver.ansible_host }}:80 check
47 | server {{ hostvars.greenserver.ansible_host }} {{ hostvars.greenserver.ansible_host }}:80 check
--------------------------------------------------------------------------------
/chapter-7/brakeman-scan/inventory:
--------------------------------------------------------------------------------
1 | [scanner]
2 | 192.168.1.5
3 |
--------------------------------------------------------------------------------
/chapter-7/brakeman-scan/main.yml:
--------------------------------------------------------------------------------
1 | - name: Brakeman Scanning Playbook
2 | hosts: scanner
3 | remote_user: ubuntu
4 | become: yes
5 | gather_facts: false
6 | vars:
7 | repo_url: https://github.com/OWASP/railsgoat.git
8 | output_dir: /tmp/railsgoat/
9 | report_name: report.html
10 |
11 | tasks:
12 | - name: installing ruby and git
13 | apt:
14 | name: "{{ item }}"
15 | update_cache: yes
16 | state: present
17 |
18 | with_items:
19 | - ruby-full
20 | - git
21 |
22 | - name: installing brakeman gem
23 | gem:
24 | name: brakeman
25 | state: present
26 |
27 | - name: cloning the {{ repo_url }}
28 | git:
29 | repo: "{{ repo_url }}"
30 | dest: "{{ output_dir }}"
31 |
32 | - name: Brakeman scanning in action
33 | # Output available in text, html, tabs, json, markdown and csv formats
34 | command: "brakeman -p {{ output_dir }} -o {{ output_dir }}report.html"
35 | # Error handling for brakeman output
36 | failed_when: result.rc != 3
37 | register: result
38 |
39 | - name: Downloading the report
40 | fetch:
41 | src: "{{ output_dir }}/report.html"
42 | dest: "{{ report_name }}"
43 | flat: yes
44 |
45 | - debug:
46 | msg: "Report can be found at {{ report_name }}"
47 |
--------------------------------------------------------------------------------
/chapter-7/lynis/inventory:
--------------------------------------------------------------------------------
1 | [lynis]
2 | 192.168.1.5
3 |
--------------------------------------------------------------------------------
/chapter-7/lynis/main.yml:
--------------------------------------------------------------------------------
1 | - name: Lynis security audit playbook
2 | hosts: lynis
3 | remote_user: ubuntu
4 | become: yes
5 | vars:
6 | # refer to https://packages.cisofy.com/community
7 | code_name: xenial
8 |
9 | tasks:
10 | - name: adding lynis repo key
11 | apt_key:
12 | keyserver: keyserver.ubuntu.com
13 | id: C80E383C3DE9F082E01391A0366C67DE91CA5D5F
14 | state: present
15 |
16 | - name: installing apt-transport-https
17 | apt:
18 | name: apt-transport-https
19 | state: present
20 |
21 | - name: adding repo
22 | apt_repository:
23 | repo: "deb https://packages.cisofy.com/community/lynis/deb/ {{ code_name }} main"
24 | state: present
25 | filename: "cisofy-lynis"
26 |
27 | - name: installing lynis
28 | apt:
29 | name: lynis
30 | update_cache: yes
31 | state: present
32 |
33 | - name: audit scan the system
34 | shell: lynis audit system > /tmp/lynis-output.log
35 |
36 | - name: downloading report locally
37 | fetch:
38 | src: /tmp/lynis-output.log
39 | dest: ./{{ inventory_hostname }}-lynis-report-{{ ansible_date_time.date }}.log
40 | flat: yes
41 |
42 | - name: report location
43 | debug:
44 | msg: "Report can be found at ./{{ inventory_hostname }}-lynis-report-{{ ansible_date_time.date }}.log"
--------------------------------------------------------------------------------
/chapter-7/nikto-scan/inventory:
--------------------------------------------------------------------------------
1 | [scanner]
2 | 192.168.1.10 ansible_user=ubuntu ansible_password=vagrant
--------------------------------------------------------------------------------
/chapter-7/nikto-scan/main.yml:
--------------------------------------------------------------------------------
1 | - name: Nikto Playbook
2 | hosts: scanner
3 | remote_user: ubuntu
4 | become: yes
5 | vars:
6 | domain_name: idontexistdomainnamewebsite.com # Add the domain to scan
7 | report_name: report.html
8 |
9 | tasks:
10 | - name: installing pre requisuites
11 | apt:
12 | name: "{{ item }}"
13 | state: present
14 | update_cache: yes
15 |
16 | with_items:
17 | - git
18 | - perl
19 | - libnet-ssleay-perl
20 | - openssl
21 | - libauthen-pam-perl
22 | - libio-pty-perl
23 | - libmd-dev
24 |
25 | - name: downloading nikto
26 | git:
27 | repo: https://github.com/sullo/nikto.git
28 | dest: /usr/share/nikto/
29 |
30 | - name: Nikto scanning in action
31 | # Output available in csv, html, msf+, nbe, txt, xml formats
32 | command: "/usr/share/nikto/program/nikto.pl -h {{ domain_name }} -o /tmp/{{ domain_name }}-report.html"
33 |
34 | - name: downloading the report
35 | fetch:
36 | src: "/tmp/{{ domain_name }}-report.html"
37 | dest: "{{ report_name }}"
38 | flat: yes
39 |
40 | - debug:
41 | msg: "Report can be found at {{ report_name }}"
42 |
--------------------------------------------------------------------------------
/chapter-7/nmap-basic-scan/main.yml:
--------------------------------------------------------------------------------
1 | - name: Basic NMAP Scan Playbook
2 | hosts: localhost
3 | gather_facts: false
4 | vars:
5 | top_ports: 1000
6 | network_hosts:
7 | - 192.168.1.1
8 | - scanme.nmap.org
9 | - 127.0.0.1
10 |
11 | tasks:
12 | - name: check if nmap installed and install
13 | apt:
14 | name: nmap
15 | update_cache: yes
16 | state: present
17 | become: yes
18 |
19 | - name: top ports scan
20 | shell: "nmap --top-ports {{ top_ports }} -Pn -oA nmap-scan-%Y-%m-%d {{ network_hosts|join(' ') }}"
--------------------------------------------------------------------------------
/chapter-7/nmap-nse/main.yml:
--------------------------------------------------------------------------------
1 | - name: Advanced NMAP Scan using NSE
2 | hosts: localhost
3 | vars:
4 | ports:
5 | - 80
6 | - 443
7 | scan_host: scanme.nmap.org
8 |
9 | tasks:
10 | - name: Running Nmap NSE scan
11 | shell: "nmap -Pn -p {{ ports|join(',') }} --script {{ item }} -oA nmap-{{ item }}-results-%Y-%m-%d {{ scan_host }}"
12 |
13 | with_items:
14 | - http-methods
15 | - http-enum
--------------------------------------------------------------------------------
/chapter-7/owasp-dependency-check/inventory:
--------------------------------------------------------------------------------
1 | [scanner]
2 | 192.168.1.10 ansible_user=ubuntu ansible_password=vagrant
--------------------------------------------------------------------------------
/chapter-7/owasp-dependency-check/main.yml:
--------------------------------------------------------------------------------
1 | - name: OWASP Dependency Check Playbook
2 | hosts: scanner
3 | remote_user: ubuntu
4 | become: yes
5 | vars:
6 | repo_url: https://github.com/psiinon/bodgeit.git
7 | output_dir: /tmp/bodgeit/
8 | project_name: bodgeit
9 | report_name: report.html
10 |
11 | tasks:
12 | - name: installing pre requisuites
13 | apt:
14 | name: "{{ item }}"
15 | state: present
16 | update_cache: yes
17 |
18 | with_items:
19 | - git
20 | - unzip
21 | - mono-runtime
22 | - mono-devel
23 | - default-jre
24 |
25 | - name: downloading owasp dependency-check
26 | unarchive:
27 | src: http://dl.bintray.com/jeremy-long/owasp/dependency-check-3.0.2-release.zip
28 | dest: /usr/share/
29 | remote_src: yes
30 |
31 | - name: adding symlink to the system
32 | file:
33 | src: /usr/share/dependency-check/bin/dependency-check.sh
34 | dest: /usr/bin/dependency-check
35 | mode: 0755
36 | state: link
37 |
38 | - name: cloning the {{ repo_url }}
39 | git:
40 | repo: "{{ repo_url }}"
41 | dest: "{{ output_dir }}"
42 |
43 | - name: updating CVE database
44 | command: "dependency-check --updateonly"
45 |
46 | - name: OWASP dependency-check scanning in action
47 | # Output available in XML, HTML, CSV, JSON, VULN, ALL formats
48 | command: "dependency-check --project {{ project_name }} --scan {{ output_dir }} -o {{ output_dir }}{{ project_name }}-report.html"
49 |
50 | - name: Downloading the report
51 | fetch:
52 | src: "{{ output_dir }}{{ project_name }}-report.html"
53 | dest: "{{ report_name }}"
54 | flat: yes
55 |
56 | - debug:
57 | msg: "Report can be found at {{ report_name }}"
58 |
--------------------------------------------------------------------------------
/chapter-7/scout2-scan/main.yml:
--------------------------------------------------------------------------------
1 | - name: AWS Security Audit using Scout2
2 | hosts: localhost
3 | vars:
4 | aws_access_key: XXXXXXXX
5 | aws_secret_key: XXXXXXXX
6 |
7 | tasks:
8 | - name: running scout2 scan
9 | # If you are performing from less memory system add --thread-config 1 to below command
10 | command: "Scout2"
11 | environment:
12 | AWS_ACCESS_KEY_ID: "{{ aws_access_key }}"
13 | AWS_SECRET_ACCESS_KEY: "{{ aws_secret_key }}"
14 |
15 | - name: AWS Scout2 report downloaded
16 | debug:
17 | msg: "Report can be found at ./scout2-report/report.html"
--------------------------------------------------------------------------------
/chapter-7/scout2-setup/main.yml:
--------------------------------------------------------------------------------
1 | - name: AWS Security Audit using Scout2
2 | hosts: localhost
3 | become: yes
4 |
5 | tasks:
6 | - name: installing python and pip
7 | apt:
8 | name: "{{ item }}"
9 | state: present
10 | update_cache: yes
11 |
12 | with_items:
13 | - python
14 | - python-pip
15 |
16 | - name: install aws scout2
17 | pip:
18 | name: awsscout2
--------------------------------------------------------------------------------
/chapter-7/windows-audit-playbook/inventory:
--------------------------------------------------------------------------------
1 | [winblows]
2 | 192.168.1.12 ansible_user=maddy ansible_password=passwd ansible_connection=winrm ansible_winrm_server_cert_validation=ignore ansible_port=5986
--------------------------------------------------------------------------------
/chapter-7/windows-audit-playbook/windows-security-audit.yml:
--------------------------------------------------------------------------------
1 | - name: Windows Audit Playbook
2 | hosts: winblows
3 |
4 | tasks:
5 | - name: download audit script
6 | win_get_url:
7 | url: https://raw.githubusercontent.com/alanrenouf/Windows-Workstation-and-Server-Audit/master/Audit.ps1
8 | dest: C:\Audit.ps1
9 |
10 | - name: running windows audit script
11 | win_shell: C:\Audit.ps1
12 | args:
13 | chdir: C:\
--------------------------------------------------------------------------------
/chapter-7/windows-updates-playbook/inventory:
--------------------------------------------------------------------------------
1 | [winblows]
2 | 192.168.1.12 ansible_user=maddy ansible_password=passwd ansible_connection=winrm ansible_winrm_server_cert_validation=ignore ansible_port=5986
--------------------------------------------------------------------------------
/chapter-7/windows-updates-playbook/windows-security-updates.yml:
--------------------------------------------------------------------------------
1 | - name: Windows Security Updates
2 | hosts: winblows
3 |
4 | tasks:
5 | - name: install all critical and security updates
6 | win_updates:
7 | category_names:
8 | - CriticalUpdates
9 | - SecurityUpdates
10 | state: installed
11 | register: update_result
12 |
13 | - name: reboot host if required
14 | win_reboot:
15 | when: update_result.reboot_required
--------------------------------------------------------------------------------
/chapter-7/wp-scan/main.yml:
--------------------------------------------------------------------------------
1 | - name: WPScan Playbook
2 | hosts: localhost
3 | vars:
4 | domain_name: www.idontexistdomainnamewebsite.com # Specify the domain to scan
5 | wpscan_container: wpscanteam/wpscan
6 | scan_name: wpscan
7 | output_dir: /tmp # Specify the output directory to store results
8 |
9 | tasks:
10 | # This playbook assumes docker already installed
11 | - name: Downloading {{ wpscan_container }} docker container
12 | docker_image:
13 | name: "{{ wpscan_container }}"
14 |
15 | - name: creating output report file
16 | file:
17 | path: "{{output_dir }}/{{ domain_name }}.txt"
18 | state: touch
19 |
20 | - name: Scanning {{ domain_name }} website using WPScan
21 | docker_container:
22 | name: "{{ scan_name }}"
23 | image: "{{ wpscan_container }}"
24 | interactive: yes
25 | auto_remove: yes
26 | state: started
27 | volumes: "/tmp/{{ domain_name }}.txt:/wpscan/data/output.txt"
28 | command: ["--update", "--follow-redirection", "--url", "{{ domain_name }}", "--log", "/wpscan/data/output.txt"]
29 |
30 | - name: WPScan report downloaded
31 | debug:
32 | msg: "The report can be found at /tmp/{{ domain_name }}.txt"
--------------------------------------------------------------------------------
/chapter-8/anchore-cli-scan/inventory:
--------------------------------------------------------------------------------
1 | [anchore]
2 | 192.168.33.60 ansible_host=192.168.33.60 ansible_user=ubuntu ansible_password=vagrant
--------------------------------------------------------------------------------
/chapter-8/anchore-cli-scan/main.yml:
--------------------------------------------------------------------------------
1 | - name: anchore-cli scan
2 | hosts: anchore
3 | become: yes
4 | vars:
5 | scan_image_name: "docker.io/library/ubuntu:latest"
6 | anchore_vars:
7 | ANCHORE_CLI_URL: http://localhost:8228/v1
8 | ANCHORE_CLI_USER: admin
9 | ANCHORE_CLI_PASS: secretpassword
10 |
11 | tasks:
12 | - name: installing anchore-cli
13 | pip:
14 | name: "{{ item }}"
15 |
16 | with_items:
17 | - anchorecli
18 | - pyyaml
19 |
20 | - name: downloading image
21 | docker_image:
22 | name: "{{ scan_image_name }}"
23 |
24 | - name: adding image for analysis
25 | command: "anchore-cli image add {{ scan_image_name }}"
26 | environment: "{{anchore_vars}}"
27 |
28 | - name: wait for analysis to compelte
29 | command: "anchore-cli image content {{ scan_image_name }} os"
30 | register: analysis
31 | until: analysis.rc != 1
32 | retries: 10
33 | delay: 30
34 | ignore_errors: yes
35 | environment: "{{anchore_vars}}"
36 |
37 | - name: vulnerabilities results
38 | command: "anchore-cli image vuln {{ scan_image_name }} os"
39 | register: vuln_output
40 | environment: "{{anchore_vars}}"
41 |
42 | - name: "vulnerabilities in {{ scan_image_name }}"
43 | debug:
44 | msg: "{{ vuln_output.stdout_lines }}"
45 |
46 |
--------------------------------------------------------------------------------
/chapter-8/anchore-server/inventory:
--------------------------------------------------------------------------------
1 | [anchore]
2 | 192.168.33.60 ansible_host=192.168.33.60 ansible_user=ubuntu ansible_password=vagrant
--------------------------------------------------------------------------------
/chapter-8/anchore-server/main.yml:
--------------------------------------------------------------------------------
1 | - name: anchore server setup
2 | hosts: anchore
3 | become: yes
4 | vars:
5 | db_password: changeme
6 | admin_password: secretpassword
7 |
8 | tasks:
9 | - name: creating volumes
10 | file:
11 | path: "{{ item }}"
12 | recurse: yes
13 | state: directory
14 |
15 | with_items:
16 | - /root/aevolume/db
17 | - /root/aevolume/config
18 |
19 | - name: copying anchore-engine configuration
20 | template:
21 | src: config.yaml.j2
22 | dest: /root/aevolume/config/config.yaml
23 |
24 | - name: starting anchore-db container
25 | docker_container:
26 | name: anchore-db
27 | image: postgres:9
28 | volumes:
29 | - "/root/aevolume/db/:/var/lib/postgresql/data/pgdata/"
30 | env:
31 | POSTGRES_PASSWORD: "{{ db_password }}"
32 | PGDATA: "/var/lib/postgresql/data/pgdata/"
33 |
34 | - name: starting anchore-engine container
35 | docker_container:
36 | name: anchore-engine
37 | image: anchore/anchore-engine
38 | ports:
39 | - 8228:8228
40 | - 8338:8338
41 | volumes:
42 | - "/root/aevolume/config/config.yaml:/config/config.yaml:ro"
43 | - "/var/run/docker.sock:/var/run/docker.sock:ro"
44 | links:
45 | - anchore-db:anchore-db
46 |
--------------------------------------------------------------------------------
/chapter-8/anchore-server/templates/config.yaml.j2:
--------------------------------------------------------------------------------
1 | # Anchore Service Configuration File
2 | #
3 |
4 | # General system-wide configuration options, these should not need to
5 | # be altered for basic operation
6 | #
7 | # service_dir: '/config'
8 | # tmp_dir: '/tmp'
9 | # log_level: 'DEBUG'
10 | #
11 | # allow_awsecr_iam_auto: False
12 | cleanup_images: True
13 | # docker_conn: 'unix://var/run/docker.sock'
14 | # docker_conn_timeout: 600
15 | #
16 | #
17 | log_level: 'DEBUG'
18 | host_id: 'dockerhostid-xyz'
19 | internal_ssl_verify: False
20 | #catalog_endpoint: 'http://localhost:8082/v1'
21 | #
22 |
23 | # Uncomment if you have a local endpoint that can accept
24 | # notifications from the anchore-engine, as configured below
25 | #
26 | #webhooks:
27 | # webhook_user: 'user'
28 | # webhook_pass: 'pass'
29 | # ssl_verify: False
30 | # general:
31 | # url: 'http://localhost:9090/general//'
32 | # policy_eval:
33 | # url: 'http://localhost:9090/policy_eval/'
34 | # webhook_user: 'mehuser'
35 | # webhook_pass: 'mehpass'
36 | ## special webhook for FATAL service events - system will store in DB if not enabled here
37 | # # error_event:
38 | # # url: 'http://localhost:9090/error_event/'
39 | # #
40 |
41 | #
42 | # A feeds section is available for override, but shouldn't be
43 | # needed. By default, the 'admin' credentials are used if present,
44 | # otherwise anonymous access for feed sync is used
45 |
46 | #feeds:
47 | # selective_sync:
48 | # # If enabled only sync specific feeds instead of all.
49 | # enabled: True
50 | # feeds:
51 | # vulnerabilities: True
52 | # # Warning: enabling the package sync causes the service to require much
53 | # # more memory to do process the significant data volume. We recommend at least 4GB available for the container
54 | # packages: False
55 | # anonymous_user_username: anon@ancho.re
56 | # anonymous_user_password: pbiU2RYZ2XrmYQ
57 | # url: 'https://ancho.re/v1/service/feeds'
58 | # client_url: 'https://ancho.re/v1/account/users'
59 | # token_url: 'https://ancho.re/oauth/token'
60 | # connection_timeout_seconds: 3
61 | # read_timeout_seconds: 60
62 |
63 | credentials:
64 | users:
65 | admin:
66 | password: "{{ admin_password }}"
67 | email: 'admin@localhost.local'
68 | external_service_auths:
69 | # anchoreio:
70 | # anchorecli:
71 | # auth: 'myanchoreiouser:myanchoreiopass'
72 | #auto_policy_sync: True
73 |
74 | database:
75 | db_connect: "postgresql+pg8000://postgres:{{ db_password }}@anchore-db:5432/postgres"
76 | db_connect_args:
77 | timeout: 120
78 | ssl: False
79 | db_pool_size: 30
80 | db_pool_max_overflow: 100
81 |
82 | services:
83 | apiext:
84 | enabled: True
85 | require_auth: True
86 | endpoint_hostname: 'localhost'
87 | listen: '0.0.0.0'
88 | port: 8228
89 | #ssl_enable: True
90 | #ssl_cert: '/config/selfsigned-example.cert'
91 | #ssl_key: '/config/selfsigned-example.key'
92 | kubernetes_webhook:
93 | enabled: True
94 | require_auth: False
95 | endpoint_hostname: 'localhost'
96 | listen: '0.0.0.0'
97 | port: 8338
98 | #ssl_enable: True
99 | #ssl_cert: '/config/selfsigned-example.cert'
100 | #ssl_key: '/config/selfsigned-example.key'
101 | catalog:
102 | enabled: True
103 | require_auth: True
104 | endpoint_hostname: 'localhost'
105 | listen: '0.0.0.0'
106 | port: 8082
107 | use_db: True
108 | cycle_timer_seconds: '1'
109 | cycle_timers:
110 | image_watcher: 3600
111 | policy_eval: 3600
112 | feed_sync: 14400
113 | analyzer_queue: 1
114 | notifications: 30
115 | service_watcher: 15
116 | policy_bundle_sync: 300
117 | simplequeue:
118 | enabled: True
119 | require_auth: True
120 | endpoint_hostname: 'localhost'
121 | listen: '0.0.0.0'
122 | port: 8083
123 | analyzer:
124 | enabled: True
125 | cycle_timer_seconds: '1'
126 | max_threads: '1'
127 | policy_engine:
128 | enabled: True
129 | require_auth: True
130 | endpoint_hostname: 'localhost'
131 | listen: '0.0.0.0'
132 | port: 8087
133 | #ssl_enable: True
134 | #ssl_cert: '/config/selfsigned-example.cert'
135 | #ssl_key: '/config/selfsigned-example.key'
--------------------------------------------------------------------------------
/chapter-8/clair-scanner-setup/inventory:
--------------------------------------------------------------------------------
1 | [docker]
2 | 192.168.1.10 ansible_host=192.168.1.10 ansible_user=ubuntu ansible_password=vagrant
--------------------------------------------------------------------------------
/chapter-8/clair-scanner-setup/main.yaml:
--------------------------------------------------------------------------------
1 | - name: Clair Scanner Server Setup
2 | hosts: docker
3 | remote_user: ubuntu
4 | become: yes
5 |
6 | tasks:
7 | - name: setting up clair-db
8 | docker_container:
9 | name: clair_db
10 | image: arminc/clair-db
11 | exposed_ports:
12 | - 5432
13 |
14 | - name: setting up clair-local-scan
15 | docker_container:
16 | name: clair
17 | image: arminc/clair-local-scan:v2.0.1
18 | ports:
19 | - "6060:6060"
20 | links:
21 | - "clair_db:postgres"
22 |
23 | - debug:
24 | msg: "It will take some time to update the CVE databasae"
--------------------------------------------------------------------------------
/chapter-8/clair-scanning-images/inventory:
--------------------------------------------------------------------------------
1 | [docker]
2 | 192.168.1.10 ansible_host=192.168.1.10 ansible_user=ubuntu ansible_password=vagrant
--------------------------------------------------------------------------------
/chapter-8/clair-scanning-images/main.yaml:
--------------------------------------------------------------------------------
1 | - name: Scanning containers using clair-scanner
2 | hosts: docker
3 | remote_user: ubuntu
4 | become: yes
5 | vars:
6 | image_to_scan: "debian:sid" #container to scan for vulnerabilities
7 | clair_server: "http://192.168.1.10:6060" #clair server api endpoint
8 |
9 | tasks:
10 | - name: downloading and setting up clair-scanner binary
11 | get_url:
12 | url: https://github.com/arminc/clair-scanner/releases/download/v6/clair-scanner_linux_amd64
13 | dest: /usr/local/bin/clair-scanner
14 | mode: 0755
15 |
16 | - name: scanning {{ image_to_scan }} container for vulnerabilities
17 | command: "clair-scanner -r /tmp/{{ image_to_scan }}-scan-report.json -c {{ clair_server }} --ip 0.0.0.0 {{ image_to_scan }}"
18 | register: scan_output
19 | ignore_errors: yes
20 |
21 | - name: downloading the report locally
22 | fetch:
23 | src: /tmp/{{ image_to_scan }}-scan-report.json
24 | dest: {{ playbook_dir }}/{{ image_to_scan }}-scan-report.json
25 | flat: yes
26 |
--------------------------------------------------------------------------------
/chapter-8/docker-bench-security/main.yml:
--------------------------------------------------------------------------------
1 | - name: Docker bench security playbook
2 | hosts: docker
3 | remote_user: ubuntu
4 | become: yes
5 |
6 | tasks:
7 | - name: make sure git installed
8 | apt:
9 | name: git
10 | state: present
11 |
12 | - name: download the docker bench security
13 | git:
14 | repo: https://github.com/docker/docker-bench-security.git
15 | dest: /opt/docker-bench-security
16 |
17 | - name: running docker-bench-security scan
18 | command: docker-bench-security.sh -l /tmp/output.log
19 | args:
20 | chdir: /opt/docker-bench-security/
21 |
22 | - name: downloading report locally
23 | fetch:
24 | src: /tmp/output.log
25 | dest: "{{ playbook_dir }}/{{ inventory_hostname }}-docker-report-{{ ansible_date_time.date }}.log"
26 | flat: yes
27 |
28 | - name: report location
29 | debug:
30 | msg: "Report can be found at {{ playbook_dir }}/{{ inventory_hostname }}-docker-report-{{ ansible_date_time.date }}.log"
31 |
--------------------------------------------------------------------------------
/chapter-8/osquery-setup/inventory:
--------------------------------------------------------------------------------
1 | [linuxservers]
2 | 192.168.33.60 ansible_host=192.168.33.60 ansible_user=ubuntu ansible_password=vagrant
--------------------------------------------------------------------------------
/chapter-8/osquery-setup/main.yml:
--------------------------------------------------------------------------------
1 | - name: setting up osquery
2 | hosts: linuxservers
3 | become: yes
4 |
5 | tasks:
6 | - name: installing osquery
7 | apt:
8 | deb: https://pkg.osquery.io/deb/osquery_2.10.2_1.linux.amd64.deb
9 | update_cache: yes
10 |
11 | - name: adding osquery configuration
12 | template:
13 | src: "{{ item.src }}"
14 | dest: "{{ item.dst }}"
15 |
16 | with_items:
17 | - { src: fim.conf, dst: /usr/share/osquery/packs/fim.conf }
18 | - { src: osquery.conf, dst: /etc/osquery/osquery.conf }
19 |
20 | - name: starting and enabling osquery service
21 | service:
22 | name: osqueryd
23 | state: started
24 | enabled: yes
--------------------------------------------------------------------------------
/chapter-8/osquery-setup/templates/fim.conf:
--------------------------------------------------------------------------------
1 | {
2 | "queries": {
3 | "file_events": {
4 | "query": "select * from file_events;",
5 | "removed": false,
6 | "interval": 300
7 | }
8 | },
9 | "file_paths": {
10 | "homes": [
11 | "/root/.ssh/%%",
12 | "/home/%/.ssh/%%"
13 | ],
14 | "etc": [
15 | "/etc/%%"
16 | ],
17 | "home": [
18 | "/home/%%"
19 | ],
20 | "tmp": [
21 | "/tmp/%%"
22 | ]
23 | }
24 | }
--------------------------------------------------------------------------------
/chapter-8/osquery-setup/templates/osquery.conf:
--------------------------------------------------------------------------------
1 | {
2 | "options": {
3 | "config_plugin": "filesystem",
4 | "logger_plugin": "filesystem",
5 | "logger_path": "/var/log/osquery",
6 | "disable_logging": "false",
7 | "log_result_events": "true",
8 | "schedule_splay_percent": "10",
9 | "pidfile": "/var/osquery/osquery.pidfile",
10 | "events_expiry": "3600",
11 | "database_path": "/var/osquery/osquery.db",
12 | "verbose": "false",
13 | "worker_threads": "2",
14 | "enable_monitor": "true",
15 | "disable_events": "false",
16 | "disable_audit": "false",
17 | "audit_allow_config": "true",
18 | "host_identifier": "hostname",
19 | "enable_syslog": "true",
20 | "audit_allow_sockets": "true",
21 | "schedule_default_interval": "3600"
22 | },
23 | "schedule": {
24 | "crontab": {
25 | "query": "SELECT * FROM crontab;",
26 | "interval": 300
27 | },
28 | "system_profile": {
29 | "query": "SELECT * FROM osquery_schedule;"
30 | },
31 | "system_info": {
32 | "query": "SELECT hostname, cpu_brand, physical_memory FROM system_info;",
33 | "interval": 3600
34 | }
35 | },
36 | "decorators": {
37 | "load": [
38 | "SELECT uuid AS host_uuid FROM system_info;",
39 | "SELECT user AS username FROM logged_in_users ORDER BY time DESC LIMIT 1;"
40 | ]
41 | },
42 | "packs": {
43 | "fim": "/usr/share/osquery/packs/fim.conf",
44 | "osquery-monitoring": "/usr/share/osquery/packs/osquery-monitoring.conf",
45 | "incident-response": "/usr/share/osquery/packs/incident-response.conf",
46 | "it-compliance": "/usr/share/osquery/packs/it-compliance.conf",
47 | "vuln-management": "/usr/share/osquery/packs/vuln-management.conf"
48 | }
49 | }
--------------------------------------------------------------------------------
/chapter-8/vuls-scanning/inventory:
--------------------------------------------------------------------------------
1 | [vuls]
2 | 192.168.33.60 ansible_host=192.168.33.60 ansible_user=ubuntu ansible_password=vagrant
--------------------------------------------------------------------------------
/chapter-8/vuls-scanning/main.yml:
--------------------------------------------------------------------------------
1 | - name: scanning and reporting using vuls
2 | hosts: vuls
3 | become: yes
4 | vars:
5 | vuls_data_directory: "/vuls_data"
6 | slack_web_hook_url: https://hooks.slack.com/services/XXXXXXX/XXXXXXXXXXXXXXXXXXXXX
7 | slack_channel: "#vuls"
8 | slack_emoji: ":ghost:"
9 | server_to_scan: 192.168.33.80
10 | server_username: vagrant
11 | server_key_file_name: 192-168-33-80
12 |
13 | tasks:
14 | - name: copying configuraiton file and ssh keys
15 | template:
16 | src: "{{ item.src }}"
17 | dest: "{{ item.dst }}"
18 | mode: 0400
19 |
20 | with_items:
21 | - { src: 'config.toml', dst: '/root/config.toml' }
22 | - { src: '192-168-33-80', dst: '/root/.ssh/192-168-33-80' }
23 |
24 | - name: running config test
25 | docker_container:
26 | name: configtest
27 | image: vuls/vuls
28 | auto_remove: yes
29 | interactive: yes
30 | state: started
31 | command: configtest -config=/root/config.toml
32 | volumes:
33 | - "/root/.ssh:/root/.ssh:ro"
34 | - "{{ vuls_data_directory }}:/vuls"
35 | - "{{ vuls_data_directory }}/vuls-log:/var/log/vuls"
36 | - "/root/config.toml:/root/config.toml:ro"
37 |
38 | - name: running vuls scanner
39 | docker_container:
40 | name: vulsscan
41 | image: vuls/vuls
42 | auto_remove: yes
43 | interactive: yes
44 | state: started
45 | command: scan -config=/root/config.toml
46 | volumes:
47 | - "/root/.ssh:/root/.ssh:ro"
48 | - "{{ vuls_data_directory }}:/vuls"
49 | - "{{ vuls_data_directory }}/vuls-log:/var/log/vuls"
50 | - "/root/config.toml:/root/config.toml:ro"
51 | - "/etc/localtime:/etc/localtime:ro"
52 | env:
53 | TZ: "Asia/Kolkata"
54 |
55 | - name: sending slack report
56 | docker_container:
57 | name: vulsreport
58 | image: vuls/vuls
59 | auto_remove: yes
60 | interactive: yes
61 | state: started
62 | command: report -cvedb-path=/vuls/cve.sqlite3 -ovaldb-path=/vuls/oval.sqlite3 --to-slack -config=/root/config.toml
63 | volumes:
64 | - "/root/.ssh:/root/.ssh:ro"
65 | - "{{ vuls_data_directory }}:/vuls"
66 | - "{{ vuls_data_directory }}/vuls-log:/var/log/vuls"
67 | - "/root/config.toml:/root/config.toml:ro"
68 | - "/etc/localtime:/etc/localtime:ro"
69 |
70 | - name: vuls webui report
71 | docker_container:
72 | name: vulswebui
73 | image: vuls/vulsrepo
74 | interactive: yes
75 | volumes:
76 | - "{{ vuls_data_directory }}:/vuls"
77 | ports:
78 | - "80:5111"
--------------------------------------------------------------------------------
/chapter-8/vuls-scanning/templates/192-168-33-80:
--------------------------------------------------------------------------------
1 | -----BEGIN RSA PRIVATE KEY-----
2 | XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
3 | XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
4 | XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
5 | XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
6 | XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
7 | XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
8 | XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
9 | XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
10 | XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
11 | XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
12 | XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
13 | XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
14 | XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
15 | XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
16 | XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
17 | XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
18 | -----END RSA PRIVATE KEY-----
--------------------------------------------------------------------------------
/chapter-8/vuls-scanning/templates/config.toml:
--------------------------------------------------------------------------------
1 | [slack]
2 | hookURL = "{{ slack_web_hook_url}}"
3 | channel = "{{ slack_channel }}"
4 | iconEmoji = "{{ slack_emoji }}"
5 |
6 | [servers]
7 |
8 | [servers.{{ server_key_file_name }}]
9 | host = "{{ server_to_scan }}"
10 | user = "{{ server_username }}"
11 | keyPath = "/root/.ssh/{{ server_key_file_name }}"
--------------------------------------------------------------------------------
/chapter-8/vuls/group_vars/vuls.yml:
--------------------------------------------------------------------------------
1 | vuls_data_directory: "/vuls_data"
2 | nvd_database_years: 2017
3 | redhat_oval_versions:
4 | - 6
5 | - 7
6 | ubuntu_oval_versions:
7 | - 12
8 | - 14
9 | - 16
--------------------------------------------------------------------------------
/chapter-8/vuls/inventory:
--------------------------------------------------------------------------------
1 | [vuls]
2 | 192.168.33.60 ansible_host=192.168.33.60 ansible_user=ubuntu ansible_password=vagrant
--------------------------------------------------------------------------------
/chapter-8/vuls/main.yml:
--------------------------------------------------------------------------------
1 | - name: setting up vuls using docker containers
2 | hosts: vuls
3 | become: yes
4 |
5 | roles:
6 | - vuls_containers_download
7 | - vuls_database_download
--------------------------------------------------------------------------------
/chapter-8/vuls/roles/vuls_containers_download/tasks/main.yml:
--------------------------------------------------------------------------------
1 | - name: pulling containers locally
2 | docker_image:
3 | name: "{{ item }}"
4 | pull: yes
5 |
6 | with_items:
7 | - vuls/go-cve-dictionary
8 | - vuls/goval-dictionary
9 | - vuls/vuls
--------------------------------------------------------------------------------
/chapter-8/vuls/roles/vuls_database_download/tasks/main.yml:
--------------------------------------------------------------------------------
1 | - name: fetching NVD database locally
2 | docker_container:
3 | name: "cve-{{ item }}"
4 | image: vuls/go-cve-dictionary
5 | auto_remove: yes
6 | interactive: yes
7 | state: started
8 | command: fetchnvd -years "{{ item }}"
9 | volumes:
10 | - "{{ vuls_data_directory }}:/vuls"
11 | - "{{ vuls_data_directory }}/go-cve-dictionary-log:/var/log/vuls"
12 | with_sequence: start=2002 end="{{ nvd_database_years }}"
13 |
14 | - name: fetching redhat oval data
15 | docker_container:
16 | name: "redhat-oval-{{ item }}"
17 | image: vuls/goval-dictionary
18 | auto_remove: yes
19 | interactive: yes
20 | state: started
21 | command: fetch-redhat "{{ item }}"
22 | volumes:
23 | - "{{ vuls_data_directory }}:/vuls"
24 | - "{{ vuls_data_directory }}/goval-dictionary-log:/var/log/vuls"
25 | with_items: "{{ redhat_oval_versions }}"
26 |
27 | - name: fetching ubuntu oval data
28 | docker_container:
29 | name: "ubuntu-oval-{{ item }}"
30 | image: vuls/goval-dictionary
31 | auto_remove: yes
32 | interactive: yes
33 | state: started
34 | command: "fetch-ubuntu {{ item }}"
35 | volumes:
36 | - "{{ vuls_data_directory }}:/vuls"
37 | - "{{ vuls_data_directory }}/goval-dictionary-log:/var/log/vuls"
38 | with_items: "{{ ubuntu_oval_versions }}"
--------------------------------------------------------------------------------
/chapter-9/cuckoo-scan/inventory:
--------------------------------------------------------------------------------
1 | [cuckoo]
2 | 192.168.33.132 ansible_host=192.168.33.132 ansible_user=ubuntu ansible_password=vagrant
--------------------------------------------------------------------------------
/chapter-9/cuckoo-scan/main.yml:
--------------------------------------------------------------------------------
1 | - name: Cuckoo malware sample analysis
2 | hosts: cuckoo
3 | vars:
4 | local_binaries_path: /tmp/binaries
5 |
6 | tasks:
7 | - name: copying malware sample to cuckoo for analysis
8 | copy:
9 | src: "{{ local_binaries_path }}"
10 | dest: "/tmp/binaries/{{ ansible_hostname }}"
11 |
12 | - name: submitting the files to cuckoo for analysis
13 | command: "cuckoo submit /tmp/binaries/{{ ansible_hostname }}"
14 | ignore_errors: yes
--------------------------------------------------------------------------------
/chapter-9/cuckoo-setup/inventory:
--------------------------------------------------------------------------------
1 | [cuckoo]
2 | 192.168.33.132 ansible_host=192.168.33.132 ansible_user=ubuntu ansible_password=vagrant
--------------------------------------------------------------------------------
/chapter-9/cuckoo-setup/main.yml:
--------------------------------------------------------------------------------
1 | - name: setting up cuckoo
2 | hosts: cuckoo
3 | remote_user: ubuntu
4 | become: yes
5 |
6 | roles:
7 | - dependencies
8 | - virtualbox
9 | - yara
10 | - cuckoo
11 | - start-cuckoo
--------------------------------------------------------------------------------
/chapter-9/cuckoo-setup/roles/cuckoo/tasks/main.yml:
--------------------------------------------------------------------------------
1 | - name: adding cuckoo to vboxusers
2 | group:
3 | name: cuckoo
4 | state: present
5 |
6 | - name: creating new user and add to groups
7 | user:
8 | name: cuckoo
9 | shell: /bin/bash
10 | groups: vboxusers, cuckoo
11 | state: present
12 | append: yes
13 |
14 | - name: upgrading pip, setuptools and cuckoo
15 | pip:
16 | name: "{{ item }}"
17 | state: latest
18 |
19 | with_items:
20 | - pip
21 | - setuptools
22 | - pydeep
23 | - cuckoo
24 | - openpyxl
25 | - ujson
26 | - pycrypto
27 | - distorm3
28 | - pytz
29 | - weasyprint
30 |
31 | - name: creating cuckoo home direcotry
32 | command: "cuckoo"
33 | ignore_errors: yes
34 |
35 | - name: adding cuckoo as owner
36 | file:
37 | path: "/root/.cuckoo"
38 | owner: cuckoo
39 | group: cuckoo
40 | recurse: yes
--------------------------------------------------------------------------------
/chapter-9/cuckoo-setup/roles/dependencies/tasks/main.yml:
--------------------------------------------------------------------------------
1 | - name: installing pre requirements
2 | apt:
3 | name: "{{ item }}"
4 | state: present
5 | update_cache: yes
6 |
7 | with_items:
8 | - python
9 | - python-pip
10 | - python-dev
11 | - libffi-dev
12 | - libssl-dev
13 | - python-virtualenv
14 | - python-setuptools
15 | - libjpeg-dev
16 | - zlib1g-dev
17 | - swig
18 | - tcpdump
19 | - apparmor-utils
20 | - mongodb
21 | - unzip
22 | - git
23 | - volatility
24 | - autoconf
25 | - libtool
26 | - libjansson-dev
27 | - libmagic-dev
28 | - postgresql
29 | - volatility
30 | - volatility-tools
31 | - automake
32 | - make
33 | - gcc
34 | - flex
35 | - bison
36 |
37 | - name: setting capabilitites to tcpdump
38 | capabilities:
39 | path: /usr/sbin/tcpdump
40 | capability: "{{ item }}+eip"
41 | state: present
42 |
43 | with_items:
44 | - cap_net_raw
45 | - cap_net_admin
--------------------------------------------------------------------------------
/chapter-9/cuckoo-setup/roles/start-cuckoo/tasks/main.yml:
--------------------------------------------------------------------------------
1 | - name: copying the configurationss
2 | template:
3 | src: "{{ item.src }}"
4 | dest: "/root/.cuckoo/conf/{{ item.dest }}"
5 |
6 | with_items:
7 | - { src: "cuckoo.conf", dest: "cuckoo.conf"}
8 | - { src: "auxiliary.conf", dest: "auxiliary.conf"}
9 | - { src: "virtualbox.conf", dest: "virtualbox.conf"}
10 | - { src: "reporting.conf", dest: "reporting.conf"}
11 |
12 | - name: starting cuckoo server
13 | command: cuckoo -d
14 | ignore_errors: yes
15 |
16 | - name: starting cuckoo webserver
17 | command: "cuckoo web runserver 0.0.0.0:8000"
18 | args:
19 | chdir: "/root/.cuckoo/web"
20 | ignore_errors: yes
21 |
--------------------------------------------------------------------------------
/chapter-9/cuckoo-setup/roles/start-cuckoo/templates/auxiliary.conf:
--------------------------------------------------------------------------------
1 | [sniffer]
2 | # Enable or disable the use of an external sniffer (tcpdump) [yes/no].
3 | enabled = yes
4 | # Specify the path to your local installation of tcpdump. Make sure this
5 | # path is correct.
6 | # You can check this using the command: whereis tcpdump
7 | tcpdump = /usr/sbin/tcpdump
8 | # Specify the network interface name on which tcpdump should monitor the
9 | # traffic. Make sure the interface is active.
10 | # The ifconfig command will show you the interface name.
11 | interface = vboxnet0
--------------------------------------------------------------------------------
/chapter-9/cuckoo-setup/roles/start-cuckoo/templates/cuckoo.conf:
--------------------------------------------------------------------------------
1 | machinery = virtualbox
2 | [resultserver]
3 | ip = 192.168.56.1 #This is the IP address of the host
4 | port = 2042 #leave default unless you have services running
--------------------------------------------------------------------------------
/chapter-9/cuckoo-setup/roles/start-cuckoo/templates/reporting.conf:
--------------------------------------------------------------------------------
1 | [mongodb]
2 | enabled = yes
--------------------------------------------------------------------------------
/chapter-9/cuckoo-setup/roles/start-cuckoo/templates/virtualbox.conf:
--------------------------------------------------------------------------------
1 | machines = windowscuckoo
2 |
3 | [windowscuckoo]
4 | label = windowscuckoo
5 | platform = windows
6 | ip = 192.168.56.100 # IP address of the guest
7 | snapshot = windowscuckoosnap1 # name of snapshot
--------------------------------------------------------------------------------
/chapter-9/cuckoo-setup/roles/virtualbox/tasks/main.yml:
--------------------------------------------------------------------------------
1 | - name: adding virtualbox apt source
2 | apt_repository:
3 | repo: "deb http://download.virtualbox.org/virtualbox/debian xenial contrib"
4 | filename: 'virtualbox'
5 | state: present
6 |
7 | - name: adding virtualbox apt key
8 | apt_key:
9 | url: "https://www.virtualbox.org/download/oracle_vbox_2016.asc"
10 | state: present
11 |
12 | - name: install virtualbox
13 | apt:
14 | name: virtualbox-5.1
15 | state: present
16 | update_cache: yes
--------------------------------------------------------------------------------
/chapter-9/cuckoo-setup/roles/yara/tasks/main.yml:
--------------------------------------------------------------------------------
1 | - name: copying the setup scripts
2 | template:
3 | src: "{{ item.src }}"
4 | dest: "{{ item.dest }}"
5 | mode: 0755
6 |
7 | with_items:
8 | - { src: "yara.sh", dest: "/tmp/yara.sh" }
9 | - { src: "ssdeep.sh", dest: "/tmp/ssdeep.sh" }
10 |
11 | - name: downloading ssdeep and yara releases
12 | unarchive:
13 | src: "{{ item }}"
14 | dest: /tmp/
15 | remote_src: yes
16 |
17 | with_items:
18 | - https://github.com/plusvic/yara/archive/v3.4.0.tar.gz
19 | - https://github.com/ssdeep-project/ssdeep/releases/download/release-2.14.1/ssdeep-2.14.1.tar.gz
20 |
21 | - name: installing yara and ssdeep
22 | shell: "{{ item }}"
23 | ignore_errors: yes
24 |
25 | with_items:
26 | - /tmp/yara.sh
27 | - /tmp/ssdeep.sh
28 |
29 | - name: installing M2Crypto
30 | pip:
31 | name: m2crypto
32 | version: 0.24.0
--------------------------------------------------------------------------------
/chapter-9/cuckoo-setup/roles/yara/templates/ssdeep.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | cd /tmp/ssdeep-2.14.1
4 | ./configure
5 | ./bootstrap
6 | make
7 | make install
--------------------------------------------------------------------------------
/chapter-9/cuckoo-setup/roles/yara/templates/yara.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | cd /tmp/yara-3.4.0
4 | ./bootstrap
5 | ./configure --with-crypto --enable-cuckoo --enable-magic
6 | make
7 | make install
8 | cd yara-python
9 | python setup.py build
10 | python setup.py install
--------------------------------------------------------------------------------
/chapter-9/log-collection/inventory:
--------------------------------------------------------------------------------
1 | [servers]
2 | 192.168.100.10 ansible_host=192.168.100.10 ansible_user=ubuntu ansible_password=vagrant
3 | 192.168.100.20 ansible_host=192.168.100.20 ansible_user=ubuntu ansible_password=vagrant
--------------------------------------------------------------------------------
/chapter-9/log-collection/main.yml:
--------------------------------------------------------------------------------
1 | # Reference https://www.ansible.com/security-automation-with-ansible
2 |
3 | - name: Gather log files
4 | hosts: servers
5 | become: yes
6 |
7 | tasks:
8 | - name: List files to grab
9 | find:
10 | paths:
11 | - /var/log
12 | patterns:
13 | - '*.log*'
14 | recurse: yes
15 | register: log_files
16 |
17 | - name: Creating log directory
18 | file:
19 | state: directory
20 | path: /tmp/LOGS_{{ ansible_fqdn }}
21 | delegate_to: localhost
22 | become: no
23 |
24 | - name: Grab files
25 | fetch:
26 | src: "{{ item.path }}"
27 | dest: "/tmp/LOGS_{{ ansible_fqdn }}/"
28 | with_items: "{{ log_files.files }}"
--------------------------------------------------------------------------------
/chapter-9/s3-backup/main.yml:
--------------------------------------------------------------------------------
1 | - name: backing up the log data
2 | hosts: localhost
3 | gather_facts: false
4 | become: yes
5 | vars:
6 | s3_access_key: XXXXXXX # Use ansible-vault to encrypt
7 | s3_access_secret: XXXXXXX # Use ansible-vault to encrypt
8 | localfolder: /tmp/LOGS/ # Trailing slash is important
9 | remotebucket: secretforensicsdatausingansible # This should be unique in s3
10 |
11 | tasks:
12 | - name: installing s3cmd if not installed
13 | apt:
14 | name: "{{ item }}"
15 | state: present
16 | update_cache: yes
17 |
18 | with_items:
19 | - python-magic
20 | - python-dateutil
21 | - s3cmd
22 |
23 | - name: create s3cmd config file
24 | template:
25 | src: s3cmd.j2
26 | dest: /root/.s3cfg
27 | owner: root
28 | group: root
29 | mode: 0640
30 |
31 | - name: make sure "{{ remotebucket }}" is avilable
32 | command: "s3cmd mb s3://{{ remotebucket }}/ -c /root/.s3cfg"
33 |
34 | - name: running the s3 backup to "{{ remotebucket }}"
35 | command: "s3cmd sync {{ localfolder }} --preserve s3://{{ remotebucket }}/ -c /root/.s3cfg"
--------------------------------------------------------------------------------
/chapter-9/s3-backup/templates/s3cmd.j2:
--------------------------------------------------------------------------------
1 | [default]
2 | access_key = {{ s3_access_key }}
3 | secret_key = {{ s3_access_secret }}
4 | host_base = s3.amazonaws.com
5 | host_bucket = %(bucket)s.s3.amazonaws.com
6 | website_endpoint = http://%(bucket)s.s3-website-%(location)s.amazonaws.com/
7 | use_https = True
8 | signature_v2 = True
--------------------------------------------------------------------------------
/chapter-9/viper-setup/inventory:
--------------------------------------------------------------------------------
1 | [viper]
2 | 192.168.33.22 ansible_host=192.168.33.22 ansible_user=ubuntu ansible_password=vagrant
3 |
--------------------------------------------------------------------------------
/chapter-9/viper-setup/main.yml:
--------------------------------------------------------------------------------
1 | - name: Setting up Viper - binary management and analysis framework
2 | hosts: viper
3 | remote_user: ubuntu
4 | become: yes
5 |
6 | roles:
7 | - dependencies
8 | - setup
--------------------------------------------------------------------------------
/chapter-9/viper-setup/roles/dependencies/tasks/main.yml:
--------------------------------------------------------------------------------
1 | - name: installing required packages
2 | apt:
3 | name: "{{ item }}"
4 | state: present
5 | update_cache: yes
6 |
7 | with_items:
8 | - gcc
9 | - python-dev
10 | - python-pip
11 | - libssl-dev
12 | - swig
13 |
14 | - name: downloading ssdeep release
15 | unarchive:
16 | src: https://github.com/ssdeep-project/ssdeep/releases/download/release-2.14.1/ssdeep-2.14.1.tar.gz
17 | dest: /tmp/
18 | remote_src: yes
19 |
20 | - name: copy ssdeep setup script
21 | template:
22 | src: ssdeep.sh
23 | dest: /tmp/ssdeep.sh
24 | mode: 0755
25 |
26 | - name: installing ssdeep
27 | shell: /tmp/ssdeep.sh
28 | ignore_errors: yes
29 |
30 | - name: installing core dependencies
31 | pip:
32 | name: "{{ item }}"
33 | state: present
34 |
35 | with_items:
36 | - SQLAlchemy
37 | - PrettyTable
38 | - python-magic
39 | - pydeep
--------------------------------------------------------------------------------
/chapter-9/viper-setup/roles/dependencies/templates/ssdeep.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | cd /tmp/ssdeep-2.14.1
4 | ./configure
5 | ./bootstrap
6 | make
7 | make install
--------------------------------------------------------------------------------
/chapter-9/viper-setup/roles/setup/tasks/main.yml:
--------------------------------------------------------------------------------
1 | - name: downloading the release
2 | unarchive:
3 | src: https://github.com/viper-framework/viper/archive/v1.2.tar.gz
4 | dest: /opt/
5 | remote_src: yes
6 |
7 | - name: installing pip dependencies
8 | pip:
9 | requirements: /opt/viper-1.2/requirements.txt
10 |
11 | - name: starting viper webinterface
12 | shell: nohup /usr/bin/python /opt/viper-1.2/web.py -H 0.0.0.0 &
13 | ignore_errors: yes
14 |
15 | - debug:
16 | msg: "Viper web interface is running at http://{{ inventory_hostname }}:9090"
--------------------------------------------------------------------------------
/chapter-9/virus-total-scan/inventory:
--------------------------------------------------------------------------------
1 | [malware]
2 | 192.168.33.21 ansible_host=192.168.33.21 ansible_user=ubuntu ansible_password=vagrant
--------------------------------------------------------------------------------
/chapter-9/virus-total-scan/main.yml:
--------------------------------------------------------------------------------
1 | - name: scanning file in VirusTotal
2 | hosts: malware
3 | remote_user: ubuntu
4 | vars:
5 | vt_api_key: XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
6 | vt_api_type: public # public/private
7 | vt_intelligence_access: False # True/False
8 | files_in_local_system: /tmp/samples/
9 | files_in_remote_system: /tmp/sample-file/
10 |
11 | tasks:
12 | - name: creating samples directory
13 | file:
14 | path: "{{ files_in_remote_system }}"
15 | state: directory
16 |
17 | - name: copying file to remote system
18 | copy:
19 | src: "{{ files_in_local_system }}"
20 | dest: "{{ files_in_remote_system }}"
21 | directory_mode: yes
22 |
23 | - name: copying configuration
24 | template:
25 | src: config.j2
26 | dest: "{{ files_in_remote_system }}/.vtapi"
27 |
28 | - name: running VirusTotal scan
29 | command: "vt -fr {{ files_in_remote_system }}"
30 | args:
31 | chdir: "{{ files_in_remote_system }}"
32 | register: vt_scan
33 |
34 | - name: removing the samples
35 | file:
36 | path: "{{ files_in_remote_system }}"
37 | state: absent
38 |
39 | - name: VirusTotal scan results
40 | debug:
41 | msg: "{{ vt_scan.stdout_lines }}"
42 |
--------------------------------------------------------------------------------
/chapter-9/virus-total-scan/templates/config.j2:
--------------------------------------------------------------------------------
1 | [vt]
2 | apikey={{ vt_api_key }}
3 | type={{ vt_api_type }}
4 | intelligence={{ vt_intelligence_access }}
5 | #coma separated engine list, can be empty
6 | engines=
7 | timeout=60
--------------------------------------------------------------------------------
/chapter-9/virus-total/inventory:
--------------------------------------------------------------------------------
1 | [malware]
2 | 192.168.33.21 ansible_host=192.168.33.21 ansible_user=ubuntu ansible_password=vagrant
--------------------------------------------------------------------------------
/chapter-9/virus-total/main.yml:
--------------------------------------------------------------------------------
1 | - name: setting up VirusTotal
2 | hosts: malware
3 | remote_user: ubuntu
4 | become: yes
5 |
6 | tasks:
7 | - name: installing pip
8 | apt:
9 | name: "{{ item }}"
10 |
11 | with_items:
12 | - python-pip
13 | - unzip
14 |
15 | - name: checking if vt already exists
16 | stat:
17 | path: /usr/local/bin/vt
18 | register: vt_status
19 |
20 | - name: downloading VirusTotal api tool repo
21 | unarchive:
22 | src: "https://github.com/doomedraven/VirusTotalApi/archive/master.zip"
23 | dest: /tmp/
24 | remote_src: yes
25 | when: vt_status.stat.exists == False
26 |
27 | - name: installing the dependencies
28 | pip:
29 | requirements: /tmp/VirusTotalApi-master/requirements.txt
30 | when: vt_status.stat.exists == False
31 |
32 | - name: installing vt
33 | command: python /tmp/VirusTotalApi-master/setup.py install
34 | when: vt_status.stat.exists == False
35 |
--------------------------------------------------------------------------------