├── .ci ├── concourse │ └── rock-molecule.yaml └── test-shellcheck.sh ├── .gitignore ├── .tito ├── packages │ ├── .readme │ └── rock └── tito.props ├── .yamllint ├── CODING_GUIDELINES.md ├── CONTRIBUTING.md ├── LICENSE ├── NOTICE ├── README.md ├── RELEASE ├── Vagrantfile ├── bin ├── RESET_TUI ├── deploy_rock.sh ├── reset_data.sh ├── rock └── rock_setup ├── ci └── pipeline.yml ├── etc └── hosts.ini ├── images ├── rock_full.png └── rock_square.png ├── molecule ├── multi-node │ ├── cleanup.yml │ ├── create.yml │ ├── destroy.yml │ ├── molecule.yml │ ├── playbook.yml │ ├── prepare.yml │ └── tests │ │ └── tests └── single-node │ ├── cleanup.yml │ ├── create.yml │ ├── destroy.yml │ ├── molecule.yml │ ├── playbook.yml │ ├── prepare.yml │ └── tests ├── playbooks ├── ansible.cfg ├── auth-mgmt.yml ├── debug.yml ├── delete-data.yml ├── deploy-rock.yml ├── enable-xfs-quotas.yml ├── generate-defaults.yml ├── group_vars │ └── all.yml ├── manage-services.yml ├── roles ├── setup-deploy-host.yml ├── site.yml └── templates │ ├── filebeat.yml.j2 │ ├── ifcfg-monif.j2 │ ├── ifup-local.j2 │ ├── rock_config.yml.j2 │ └── setup_config.yml.j2 ├── requirements.txt ├── rock.spec ├── roles ├── common │ ├── defaults │ │ └── main.yml │ ├── files │ │ ├── RPM-GPG-KEY-RockNSM-2 │ │ ├── RPM-GPG-KEY-RockNSM-Testing │ │ ├── RPM-GPG-KEY-RockNSM-pkgcloud-2_5 │ │ ├── etc-issue.in │ │ └── nm-issue-update │ ├── handlers │ │ └── main.yml │ └── tasks │ │ ├── configure-pipelining.yml │ │ ├── configure-time.yml │ │ ├── configure.yml │ │ ├── deploy.yml │ │ ├── gather-facts.yml │ │ └── main.yml ├── docket │ ├── README.md │ ├── Vagrantfile │ ├── defaults │ │ └── main.yml │ ├── handlers │ │ └── main.yml │ ├── playbook.yml │ ├── tasks │ │ ├── crypto.yml │ │ ├── docket_config.yml │ │ ├── install.yml │ │ ├── lighttpd.yml │ │ ├── main.yml │ │ └── prereqs.yml │ ├── templates │ │ ├── docket-uwsgi.ini.j2 │ │ ├── docket_lighttpd_scgi.conf.j2 │ │ ├── docket_lighttpd_vhost.conf.j2 │ │ ├── docket_prod.yaml.j2 │ │ └── lighttpd-30-docket.conf.j2 │ ├── test.sh │ ├── tests │ │ ├── inventory │ │ └── test.yml │ └── vars │ │ └── main.yml ├── elasticsearch │ ├── defaults │ │ └── main.yml │ ├── files │ │ └── default-mapping.json │ ├── handlers │ │ └── main.yml │ ├── tasks │ │ ├── after.yml │ │ ├── before.yml │ │ ├── main.yml │ │ └── restart.yml │ └── templates │ │ ├── elasticsearch.yml.j2 │ │ └── es-jvm.options.j2 ├── filebeat │ ├── defaults │ │ └── main.yml │ ├── handlers │ │ └── main.yml │ └── tasks │ │ └── main.yml ├── kafka │ ├── README.md │ ├── defaults │ │ └── main.yml │ ├── files │ │ ├── wait-for-zookeeper.py │ │ └── wait-for-zookeeper.service │ ├── handlers │ │ └── main.yml │ ├── tasks │ │ └── main.yml │ ├── tests │ │ ├── inventory │ │ └── test.yml │ └── vars │ │ └── main.yml ├── kibana │ ├── defaults │ │ └── main.yml │ ├── files │ │ └── profile.d-kibanapw.sh │ ├── handlers │ │ └── main.yml │ ├── tasks │ │ └── main.yml │ └── templates │ │ └── kibana.yml.j2 ├── lighttpd │ ├── handlers │ │ └── main.yml │ ├── tasks │ │ ├── add-user.yml │ │ ├── main.yml │ │ └── remove-user.yml │ └── templates │ │ ├── 10-rock-auth.conf.j2 │ │ ├── 10-tls.conf.j2 │ │ ├── 20-rock-vars.conf.j2 │ │ └── 50-rockproxy.conf.j2 ├── logstash │ ├── handlers │ │ └── main.yml │ ├── tasks │ │ └── main.yml │ └── templates │ │ ├── logstash-9999-output-elasticsearch.conf.j2 │ │ └── logstash_sysconfig.j2 ├── stenographer │ ├── README.md │ ├── defaults │ │ └── main.yml │ ├── files │ │ ├── stenographer.service │ │ └── stenographer@.service │ ├── handlers │ │ └── main.yml │ ├── tasks │ │ ├── config.yml │ │ ├── deploy.yml │ │ ├── install.yml │ │ └── main.yml │ ├── templates │ │ └── stenographer-config.j2 │ ├── tests │ │ ├── inventory │ │ └── test.yml │ └── vars │ │ └── main.yml ├── suricata │ ├── README.md │ ├── defaults │ │ └── main.yml │ ├── files │ │ ├── suricata.service │ │ └── suricata.tmpfiles │ ├── handlers │ │ └── main.yml │ ├── meta │ │ └── main.yml │ ├── tasks │ │ └── main.yml │ ├── templates │ │ ├── fb-suricata.yml.j2 │ │ ├── logrotate-suricata.j2 │ │ ├── ls-input-suricata.j2 │ │ └── suricata_overrides.yaml.j2 │ ├── tests │ │ ├── inventory │ │ └── test.yml │ └── vars │ │ └── main.yml ├── zeek │ ├── README.md │ ├── defaults │ │ └── main.yml │ ├── files │ │ ├── GeoIP.conf │ │ ├── profile.d-zeek.sh │ │ ├── zeek-scripts-readme.txt │ │ └── zeekctl.sh │ ├── handlers │ │ └── main.yml │ ├── meta │ │ └── main.yml │ ├── tasks │ │ └── main.yml │ ├── templates │ │ ├── ls-filter-zeek.j2 │ │ ├── ls-input-zeek.j2 │ │ ├── networks.cfg.j2 │ │ ├── node.cfg.j2 │ │ └── zeekctl.cfg.j2 │ ├── tests │ │ ├── inventory │ │ └── test.yml │ └── vars │ │ └── main.yml └── zookeeper │ ├── README.md │ ├── defaults │ └── main.yml │ ├── handlers │ └── main.yml │ ├── tasks │ └── main.yml │ ├── tests │ ├── inventory │ └── test.yml │ └── vars │ └── main.yml └── tests ├── test_common.py ├── test_docket.py ├── test_elasticsearch.py ├── test_filebeat.py ├── test_kafka.py ├── test_kibana.py ├── test_lighttpd.py ├── test_logstash.py ├── test_sensor.py ├── test_stenographer.py ├── test_suricata.py ├── test_zeek.py ├── test_zookeeper.py └── vars ├── common.vars ├── docket.vars ├── elasticsearch.vars ├── filebeat.vars ├── fsf.vars ├── kafka.vars ├── kibana.vars ├── lighttpd.vars ├── logstash.vars ├── sensor.vars ├── stenographer.vars ├── suricata.vars ├── zeek.vars └── zookeeper.vars /.ci/test-shellcheck.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash -eu -o pipefail 2 | 3 | # 1. Find all files, excluding .tox, .ci, .git, and *.j2 files 4 | # check file magic and only print shell scripts 5 | # 2. Read in list of files and send each file to shellcheck, checking as bash 6 | EXITCODE=0 7 | CHECKPATH=$1 8 | 9 | echo "Checking scripts with:" 10 | printf "\t%s\n" "shellcheck --format gcc --shell bash --severity error filename" 11 | 12 | find "${CHECKPATH}" \ 13 | \( -path ./.tox -o -path ./.ci -o -path ./.git -o -name '*.j2' \) -prune -o \ 14 | -type f -exec sh -c 'file "$1" | grep -qE "sh(ell)? script"' _ {} \; -print | 15 | (while IFS="" read -r file; do 16 | echo -n "Checking ${file}: " 17 | if ERRORS=$(shellcheck --format gcc --shell bash --severity error "${file}"); then 18 | printf "\e[32m%s\e[0m\n" "OK" 19 | else 20 | printf "\e[31mERROR\n%s\e[0m\n" "$ERRORS" 21 | EXITCODE=1 22 | fi 23 | done 24 | 25 | exit ${EXITCODE}) 26 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | *# 2 | *.iso 3 | *.pcap 4 | *.retry 5 | *.un~ 6 | *~ 7 | .#* 8 | .*.sw[a-z] 9 | .idea* 10 | *_cache 11 | *pycache* 12 | .DS_Store 13 | .bundle/* 14 | .kitchen 15 | .kitchen.local.yml 16 | .kitchen/ 17 | .vagrant 18 | /cookbooks 19 | Berksfile.lock 20 | Gemfile.lock 21 | Vagrantfile.vmware 22 | \#*# 23 | pkg/ 24 | repo/ 25 | tmp/ 26 | .env.yml 27 | .python-version 28 | local-vars.yml 29 | .tox 30 | -------------------------------------------------------------------------------- /.tito/packages/.readme: -------------------------------------------------------------------------------- 1 | the .tito/packages directory contains metadata files 2 | named after their packages. Each file has the latest tagged 3 | version and the project's relative directory. 4 | -------------------------------------------------------------------------------- /.tito/packages/rock: -------------------------------------------------------------------------------- 1 | 2.6.0-1 ./ 2 | -------------------------------------------------------------------------------- /.tito/tito.props: -------------------------------------------------------------------------------- 1 | [buildconfig] 2 | builder = tito.builder.Builder 3 | tagger = tito.tagger.VersionTagger 4 | changelog_do_not_remove_cherrypick = 0 5 | changelog_format = %s (%ae) 6 | -------------------------------------------------------------------------------- /.yamllint: -------------------------------------------------------------------------------- 1 | extends: default 2 | 3 | ignore: | 4 | *.molecule/ 5 | molecule/cookiecutter/ 6 | 7 | rules: 8 | braces: 9 | max-spaces-inside: 1 10 | level: error 11 | brackets: 12 | max-spaces-inside: 1 13 | level: error 14 | line-length: disable 15 | -------------------------------------------------------------------------------- /NOTICE: -------------------------------------------------------------------------------- 1 | RockNSM 2 | Copyright 2014-2018 RockNSM Foundation 3 | 4 | This product includes software developed by The Rock NSM 5 | Foundation (http://rocknsm.io/). 6 | 7 | This product includes software developed by The Apache Software 8 | Foundation (http://www.apache.org/). 9 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 |

2 | 3 |

4 |
5 | 6 |

7 | Documentation | Download 8 |

9 | 10 | ROCK is a collections platform, in the spirit of Network Security Monitoring by contributors from all over industry and the public sector. It's primary focus is to provide a robust, scalable sensor platform for both enduring security monitoring and incident response missions. The platform consists of 3 core capabilities: 11 | 12 | * Passive data acquisition via AF_PACKET, feeding systems for metadata (Bro), signature detection (Suricata), and full packet capture (Stenographer). 13 | * A messaging layer (Kafka and Logstash) that provides flexibility in scaling the platform to meet operational needs, as well as providing some degree of data reliability in transit. 14 | * Reliable data storage and indexing (Elasticsearch) to support rapid retrieval and analysis (Kibana) of the data. 15 | 16 | 17 | ## Features 18 | 19 | * Full Packet Capture via Google Stenographer and Docket. 20 | * Protocol Analysis and Metadata via Bro. 21 | * Signature Based Alerting via Suricata. 22 | * Recursive File Scanning via FSF. 23 | * Message Queuing and Distribution via Apache Kafka. 24 | * Message Transport via Logstash. 25 | * Data Storage, Indexing, and Search via Elasticsearch. 26 | * Data UI and Visualization via Kibana. 27 | * Security - The system is developed and tested to run with SELinux enabled. 28 | 29 | 30 | ### Installation and Usage 31 | 32 | Please reference our [documentation](https://rocknsm.gitbooks.io/rocknsm-guide/content/) for all ROCK details to include: 33 | 34 | - installation 35 | - configuration 36 | - deployment 37 | - troubleshooting 38 | 39 | 40 | ### Testing 41 | 42 | We use [molecule](https://molecule.readthedocs.io/) for testing playbooks using 43 | vSphere instances in one or more of the developers' labs. Specifically, we're 44 | using [these CookieCutter](https://github.com/Perched/molecule-cookiecutter-vsphere) templates for molecule, as found in the `molecule/` directory. 45 | 46 | If you're looking to run these tests in a different vCenter environment, you'll 47 | need ti edit the `molecule` block in `molecule.yml` for each of the scenarios. 48 | After that, you authenticate using the environment variables `VMWARE_USER` and 49 | `VMWARE_PASSWORD`. These are the standard Ansible environment variables and get 50 | passed to the respective VMware modules. 51 | 52 | In `molecule`, the easiest way to is to create a `.env.yml` file in the root 53 | of the `rock` project directory with this information. Example: 54 | 55 | ```yaml 56 | --- 57 | VMWARE_USER: "myuser@vsphere.local" 58 | VMWARE_PASSWORD: "its-a-secret-to-everybody" 59 | ``` 60 | 61 | You can then run all the tests. 62 | 63 | ```shell 64 | docker run --rm -ti -v $(pwd):/src \ 65 | -w /src quay.io/perched/molecule-vsphere test --all 66 | ``` 67 | 68 | 69 | ## Thanks 70 | This architecture is made possible by the efforts of an ever-growing list of amazing people. Look around our Github to see the whole list. 71 | 72 | 73 | -------------------------------------------------------------------------------- /RELEASE: -------------------------------------------------------------------------------- 1 | High-level summary of changes: 2 | 3 | New Features 4 | - Docket, a REST API and web UI to query multiple stenographer instances 5 | - Added Suricata-Update to manage Suricata signatures 6 | - GPG signing of packages and repo metadata 7 | - Added functional tests using [testinfra](https://testinfra.readthedocs.io/en/latest/) 8 | - Initial support of [Elastic Common Schema](https://github.com/elastic/ecs) 9 | - Includes full Elastic (with permission) stack including features formerly known as X-Pack 10 | 11 | Upgrading 12 | - Elastic stack is updated to 6.x 13 | - Elastic dashboards, mappings, and Logstash config moved to module-like construct 14 | - Suricata is updated to 4.x 15 | - Bro is updated to 2.5.4 16 | 17 | Deprecated - will be removed in the next release 18 | - Snort 19 | - Pulled Pork 20 | -------------------------------------------------------------------------------- /Vagrantfile: -------------------------------------------------------------------------------- 1 | # -*- mode: ruby -*- 2 | # vi: set ft=ruby : 3 | 4 | # This Vagrantfile is only for development testing!! Do not try to create a sensor 5 | # using `vagrant up`. It's not going to do what you think it will do. That said 6 | # If you have enough resources, you could feasibly playback PCAP on the dummy0 7 | # interface and analyze the traffic 8 | # 9 | # THIS IS COMPLETELY UNSUPPORTED! 10 | Vagrant.configure(2) do |config| 11 | config.vm.box = "centos/7" 12 | 13 | config.ssh.forward_agent = true 14 | 15 | config.vm.provider "vmware_desktop" do |v| 16 | v.vmx["memsize"] = 16384 17 | v.vmx["numvcpus"] = 8 18 | end 19 | 20 | # ansible required for ROCK 2.0 deployment 21 | # git required to clone ROCK repo 22 | # vim & tmux because of my sanity 23 | config.vm.provision "shell", inline: <<-SHELL 24 | echo "tsflags=nodocs" | tee -a /etc/yum.conf 25 | yum -y install epel-release 26 | #sed -i 's/^mirrorlist/#mirrorlist/; s/^#baseurl/baseurl/' /etc/yum.repos.d/{CentOS-Base.repo,epel.repo} 27 | yum -y install https://packagecloud.io/rocknsm/2_3/packages/el/7/rock-release-2.3-1.noarch.rpm/download.rpm 28 | yum -y update 29 | yum -y install ansible vim git tmux tito 30 | # Create virtual interface 31 | ip link add dummy0 type dummy 32 | # Set the MTU to ludicrous mode 33 | ip link set dev dummy0 mtu 65521 34 | # Bring the interface up 35 | ip link set dummy0 up 36 | SHELL 37 | end 38 | -------------------------------------------------------------------------------- /bin/RESET_TUI: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | sudo rm /etc/rocknsm/*.backup.* 4 | cat /etc/rocknsm/config.yml.orig | sudo tee /etc/rocknsm/config.yml 5 | -------------------------------------------------------------------------------- /bin/deploy_rock.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | echo -e "\n${0} has been replaced by $(tput setaf 1)$(which rock)$(tput sgr0)\n\nPlease see $(tput setaf 3)rock help$(tput sgr0) for available options.\n" 4 | -------------------------------------------------------------------------------- /bin/reset_data.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | SCRIPT_PATH=$(dirname $(readlink -f $0)) 4 | ROCK_HOME=/usr/share/rock 5 | VERBOSE_FLAGS= 6 | if [ "x${DEBUG}" != "x" ]; then 7 | VERBOSE_FLAGS="-vvv" 8 | fi 9 | 10 | 11 | read -p "Do you really want to delete all rock data? " -n 1 -r 12 | echo # (optional) move to a new line 13 | if [[ $REPLY =~ ^[Yy]$ ]] 14 | then 15 | echo "Stopping Rock Services" 16 | 17 | 18 | cd ${ROCK_HOME}/playbooks 19 | ansible-playbook "${ROCK_HOME}/playbooks/delete-data.yml" ${VERBOSE_FLAGS} 20 | ansible-playbook "${ROCK_HOME}/playbooks/deploy-rock.yml" ${VERBOSE_FLAGS} 21 | 22 | /usr/local/bin/rockctl start 23 | fi 24 | -------------------------------------------------------------------------------- /ci/pipeline.yml: -------------------------------------------------------------------------------- 1 | --- 2 | resource_types: 3 | - name: pull-request 4 | type: docker-image 5 | source: 6 | repository: teliaoss/github-pr-resource 7 | 8 | resources: 9 | - name: pr-source 10 | type: pull-request 11 | check_every: 15m 12 | # webhook_token: ((webhook-token)) 13 | source: 14 | repository: rocknsm/rock 15 | access_token: ((github-access-token)) 16 | 17 | jobs: 18 | - name: checks 19 | plan: 20 | - get: pr-source 21 | trigger: true 22 | version: every 23 | - put: pr-source 24 | params: 25 | path: pr-source 26 | status: pending 27 | - task: unit-test 28 | config: 29 | platform: linux 30 | image_resource: 31 | type: docker-image 32 | source: {repository: quay.io/perched/molecule-vsphere, tag: "latest"} 33 | inputs: 34 | - name: pr-source 35 | run: 36 | path: molecule 37 | args: 38 | - test 39 | - --scenario-name 40 | - single-node 41 | dir: pr-source 42 | params: 43 | VMWARE_USER: ((vcenter_username)) 44 | VMWARE_PASSWORD: ((vcenter_password)) 45 | on_failure: 46 | put: pr-source 47 | params: 48 | path: pr-source 49 | status: failure 50 | - put: pr-source 51 | params: 52 | path: pr-source 53 | status: success 54 | ... 55 | -------------------------------------------------------------------------------- /etc/hosts.ini: -------------------------------------------------------------------------------- 1 | [rock] 2 | simplerockbuild.simplerock.lan ansible_host=127.0.0.1 ansible_connection=local 3 | 4 | [web] 5 | simplerockbuild.simplerock.lan ansible_host=127.0.0.1 ansible_connection=local 6 | 7 | [lighttpd:children] 8 | web 9 | 10 | [sensors:children] 11 | rock 12 | 13 | [zeek:children] 14 | sensors 15 | 16 | [fsf:children] 17 | sensors 18 | 19 | [kafka:children] 20 | sensors 21 | 22 | [stenographer:children] 23 | sensors 24 | 25 | [suricata:children] 26 | sensors 27 | 28 | [filebeat:children] 29 | fsf 30 | suricata 31 | 32 | [zookeeper] 33 | simplerockbuild.simplerock.lan ansible_host=127.0.0.1 ansible_connection=local 34 | 35 | [elasticsearch:children] 36 | es_masters 37 | es_data 38 | es_ingest 39 | 40 | [es_masters] 41 | # This group should only ever contain exactly 1 or 3 nodes! 42 | simplerockbuild.simplerock.lan ansible_host=127.0.0.1 ansible_connection=local 43 | # Multi-node example # 44 | #elasticsearch0[1:3].simplerock.lan 45 | 46 | [es_data] 47 | simplerockbuild.simplerock.lan ansible_host=127.0.0.1 ansible_connection=local 48 | # Multi-node example # 49 | #elasticsearch0[1:4].simplerock.lan 50 | 51 | [es_ingest] 52 | simplerockbuild.simplerock.lan ansible_host=127.0.0.1 ansible_connection=local 53 | # Multi-node example # 54 | #elasticsearch0[1:4].simplerock.lan 55 | 56 | [elasticsearch:vars] 57 | # Disable all node roles by default 58 | node_master=false 59 | node_data=false 60 | node_ingest=false 61 | 62 | [es_masters:vars] 63 | node_master=true 64 | 65 | [es_data:vars] 66 | node_data=true 67 | 68 | [es_ingest:vars] 69 | node_ingest=true 70 | 71 | [docket:children] 72 | web 73 | 74 | [kibana:children] 75 | web 76 | 77 | [logstash:children] 78 | sensors 79 | -------------------------------------------------------------------------------- /images/rock_full.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/rocknsm/rock/e6f09e3309e14b6f0c1a3df596d9c7dc54ae2655/images/rock_full.png -------------------------------------------------------------------------------- /images/rock_square.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/rocknsm/rock/e6f09e3309e14b6f0c1a3df596d9c7dc54ae2655/images/rock_square.png -------------------------------------------------------------------------------- /molecule/multi-node/cleanup.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Cleanup 3 | hosts: sensors 4 | gather_facts: true 5 | become: true 6 | ignore_unreachable: true 7 | vars: 8 | rhsm_orgid: "{{ lookup('env', 'RHSM_ORGID') }}" 9 | rhsm_activation_key: "{{ lookup('env', 'RHSM_ACTIVATION_KEY') }}" 10 | tasks: 11 | - name: Unregister with activationkey to RHSM 12 | redhat_subscription: 13 | state: absent 14 | when: ansible_distribution is defined and ansible_distribution == 'RedHat' 15 | -------------------------------------------------------------------------------- /molecule/multi-node/destroy.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Destroy 3 | hosts: localhost 4 | connection: local 5 | gather_facts: false 6 | no_log: "{{ not (lookup('env', 'MOLECULE_DEBUG') | bool or molecule_yml.provisioner.log|default(false) | bool) }}" 7 | vars: 8 | esxi_hostname: "{{molecule_yml.vmware.esxi_hostname|default(omit)}}" 9 | cluster: "{{molecule_yml.vmware.cluster|default(omit)}}" 10 | vmware_datacenter: "{{molecule_yml.vmware.datacenter}}" 11 | vmware_datastore: "{{molecule_yml.vmware.datastore}}" 12 | resource_pool: "{{molecule_yml.vmware.resource_pool|default('Resources')}}" 13 | environment: 14 | VMWARE_USER: "{{ lookup('env', 'VMWARE_USER') }}" 15 | VMWARE_PASSWORD: "{{ lookup('env', 'VMWARE_PASSWORD') }}" 16 | VMWARE_HOST: "{{ molecule_yml.vmware.vcenter_address }}" 17 | VMWARE_PORT: "{{ molecule_yml.vmware.vcenter_port | default('443') }}" 18 | VMWARE_VALIDATE_CERTS: "{{ molecule_yml.vmware.validate_certs | default('Yes') }}" 19 | tasks: 20 | - name: Destroy molecule instance(s) 21 | vmware_guest: 22 | esxi_hostname: "{{molecule_yml.vmware.esxi_hostname|default(omit)}}" 23 | cluster: "{{molecule_yml.vmware.cluster|default(omit)}}" 24 | datacenter: "{{ vmware_datacenter }}" 25 | folder: "/{{ vmware_datacenter }}/vm" 26 | resource_pool: "{{ resource_pool }}" 27 | name: "{{ item.name }}" 28 | state: absent 29 | force: true 30 | register: server 31 | with_items: "{{ molecule_yml.platforms }}" 32 | async: 7200 33 | poll: 0 34 | changed_when: false 35 | 36 | - name: Wait for instance(s) deletion to complete 37 | async_status: 38 | jid: "{{ item.ansible_job_id }}" 39 | register: vmware_jobs 40 | until: vmware_jobs.finished 41 | retries: 300 42 | with_items: "{{ server.results }}" 43 | 44 | - name: Remove RockNSM config directory 45 | file: 46 | state: absent 47 | path: "{{ lookup('env', 'MOLECULE_EPHEMERAL_DIRECTORY') }}/etc" 48 | become: true 49 | 50 | # Mandatory configuration for Molecule to function. 51 | - name: Populate instance config 52 | set_fact: 53 | instance_conf: {} 54 | 55 | - name: Dump instance config 56 | copy: 57 | content: "{{ instance_conf | to_json | from_json | molecule_to_yaml | molecule_header }}" 58 | dest: "{{ molecule_instance_config }}" 59 | when: vmware_jobs.changed | bool 60 | -------------------------------------------------------------------------------- /molecule/multi-node/molecule.yml: -------------------------------------------------------------------------------- 1 | --- 2 | scenario: 3 | name: multi-node # optional 4 | test_sequence: 5 | - dependency 6 | - lint 7 | - cleanup 8 | - destroy 9 | - syntax 10 | - create 11 | - prepare 12 | - converge 13 | # - idempotence 14 | - side_effect 15 | - verify 16 | - cleanup 17 | - destroy 18 | dependency: 19 | name: galaxy 20 | driver: 21 | name: delegated 22 | options: 23 | managed: true 24 | vmware: 25 | network_name: ${VMWARE_NETWORK} 26 | datastore: ${VMWARE_DATASTORE} 27 | datacenter: ${VMWARE_DATACENTER} 28 | vcenter_address: ${VCENTER_ADDRESS} 29 | esxi_hostname: ${VCENTER_ESXI} 30 | validate_certs: true 31 | resource_pool: "Resources" 32 | lint: 33 | name: yamllint 34 | platforms: 35 | - name: "rock-molecule-${MOLECULE_SCENARIO_NAME}${TOX_ENVNAME}-instance-1" 36 | memory: 16384 37 | cpu: 4 38 | disk_size: 64 39 | guest_id: ${TEMPLATE_DISTRO-centos}7_64Guest 40 | template_pattern: '^template-${TEMPLATE_DISTRO-centos}-7' 41 | groups: 42 | - rock 43 | - zookeeper 44 | - sensors 45 | - zeek 46 | - fsf 47 | - kafka 48 | - stenographer 49 | - suricata 50 | - filebeat 51 | - name: "rock-molecule-${MOLECULE_SCENARIO_NAME}${TOX_ENVNAME}-instance-2" 52 | memory: 16384 53 | cpu: 4 54 | disk_size: 64 55 | guest_id: ${TEMPLATE_DISTRO-centos}7_64Guest 56 | template_pattern: '^template-${TEMPLATE_DISTRO-centos}-7' 57 | groups: 58 | - rock 59 | - zookeeper 60 | - sensors 61 | - zeek 62 | - fsf 63 | - kafka 64 | - stenographer 65 | - suricata 66 | - filebeat 67 | - name: "rock-molecule-${MOLECULE_SCENARIO_NAME}${TOX_ENVNAME}-instance-3" 68 | memory: 16384 69 | cpu: 4 70 | disk_size: 64 71 | guest_id: ${TEMPLATE_DISTRO-centos}7_64Guest 72 | template_pattern: '^template-${TEMPLATE_DISTRO-centos}-7' 73 | groups: 74 | - logstash 75 | - name: "rock-molecule-${MOLECULE_SCENARIO_NAME}${TOX_ENVNAME}-instance-4" 76 | memory: 16384 77 | cpu: 4 78 | disk_size: 64 79 | guest_id: ${TEMPLATE_DISTRO-centos}7_64Guest 80 | template_pattern: '^template-${TEMPLATE_DISTRO-centos}-7' 81 | groups: 82 | - es_masters 83 | - es_data 84 | - es_ingest 85 | - web 86 | - lighttpd 87 | - elasticsearch 88 | - docket 89 | - kibana 90 | - name: "rock-molecule-${MOLECULE_SCENARIO_NAME}${TOX_ENVNAME}-instance-5" 91 | memory: 16384 92 | cpu: 4 93 | disk_size: 64 94 | guest_id: ${TEMPLATE_DISTRO-centos}7_64Guest 95 | template_pattern: '^template-${TEMPLATE_DISTRO-centos}-7' 96 | groups: 97 | - es_masters 98 | - es_data 99 | - es_ingest 100 | - elasticsearch 101 | - name: "rock-molecule-${MOLECULE_SCENARIO_NAME}${TOX_ENVNAME}-instance-6" 102 | memory: 16384 103 | cpu: 4 104 | disk_size: 64 105 | guest_id: ${TEMPLATE_DISTRO-centos}7_64Guest 106 | template_pattern: '^template-${TEMPLATE_DISTRO-centos}-7' 107 | groups: 108 | - es_masters 109 | - es_data 110 | - es_ingest 111 | - elasticsearch 112 | provisioner: 113 | name: ansible 114 | log: true 115 | become: true 116 | env: 117 | ANSIBLE_ROLES_PATH: "${MOLECULE_PROJECT_DIRECTORY}/roles" 118 | VMWARE_USER: "${VMWARE_USERNAME}" 119 | VMWARE_PASSWORD: "${VMWARE_PASSWORD}" 120 | RHSM_ORGID: "${RHSM_ORGID}" 121 | RHSM_ACTIVATION_KEY: "${RHSM_ACTIVATION_KEY}" 122 | options: 123 | extra-vars: 124 | rock_conf_dir: "${MOLECULE_EPHEMERAL_DIRECTORY}/etc" 125 | rock_config: "${MOLECULE_EPHEMERAL_DIRECTORY}/etc/config" 126 | rock_monifs: ["dummy0"] 127 | rock_disable_offline_repo: true 128 | rock_enable_testing: true 129 | # Must static the es_mem var for now until #386 is resolved 130 | es_mem: 8 131 | config_options: 132 | defaults: 133 | callback_whitelist: timer,profile_tasks,profile_role 134 | connection: 135 | pipelining: true 136 | inventory: 137 | group_vars: 138 | elasticsearch: 139 | node_master: true 140 | node_data: true 141 | node_ingest: true 142 | es_masters: 143 | node_master: true 144 | es_data: 145 | node_data: true 146 | es_ingest: 147 | node_ingest: true 148 | hosts: 149 | all: 150 | children: 151 | rock: 152 | web: 153 | lighttpd: 154 | children: 155 | web: 156 | sensors: 157 | children: 158 | rock: 159 | zeek: 160 | children: 161 | sensors: 162 | fsf: 163 | children: 164 | sensors: 165 | kafka: 166 | children: 167 | sensors: 168 | stenographer: 169 | children: 170 | sensors: 171 | suricata: 172 | children: 173 | sensors: 174 | filebeat: 175 | children: 176 | fsf: 177 | suricata: 178 | zookeeper: 179 | elasticsearch: 180 | children: 181 | es_masters: 182 | es_data: 183 | es_ingest: 184 | es_masters: 185 | es_data: 186 | es_ingest: 187 | docket: 188 | children: 189 | web: 190 | kibana: 191 | children: 192 | web: 193 | logstash: 194 | children: 195 | sensors: 196 | lint: 197 | name: ansible-lint 198 | config_options: 199 | defaults: 200 | roles_path: "${MOLECULE_PROJECT_DIRECTORY}/roles" 201 | 202 | verifier: 203 | name: testinfra 204 | lint: 205 | name: flake8 206 | enabled: false 207 | -------------------------------------------------------------------------------- /molecule/multi-node/playbook.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - import_playbook: ../../playbooks/generate-defaults.yml 3 | - import_playbook: ../../playbooks/deploy-rock.yml 4 | -------------------------------------------------------------------------------- /molecule/multi-node/prepare.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Prepare 3 | hosts: all 4 | gather_facts: true 5 | become: true 6 | vars: 7 | rhsm_orgid: "{{ lookup('env', 'RHSM_ORGID') }}" 8 | rhsm_activation_key: "{{ lookup('env', 'RHSM_ACTIVATION_KEY') }}" 9 | tasks: 10 | - name: Add dummy network interface kernel module 11 | modprobe: 12 | name: dummy 13 | state: present 14 | params: numdummies=0 15 | when: "'sensors' in group_names" 16 | 17 | - name: Create dummy0 interface 18 | command: /usr/sbin/ip link add dummy0 type dummy 19 | args: 20 | creates: /sys/class/net/dummy0 21 | when: "'sensors' in group_names" 22 | 23 | - name: Register with activationkey to RHSM 24 | redhat_subscription: 25 | state: present 26 | activationkey: "{{ rhsm_activation_key }}" 27 | org_id: "{{ rhsm_orgid }}" 28 | auto_attach: true 29 | when: ansible_distribution == 'RedHat' 30 | 31 | - name: Add extras and optional repos 32 | rhsm_repository: 33 | name: "{{ item }}" 34 | state: enabled 35 | loop: 36 | - "rhel-*-optional-rpms" 37 | - "rhel-*-extras-rpms" 38 | when: ansible_distribution == 'RedHat' 39 | -------------------------------------------------------------------------------- /molecule/multi-node/tests/tests: -------------------------------------------------------------------------------- 1 | ../../tests/ -------------------------------------------------------------------------------- /molecule/single-node/cleanup.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Cleanup 3 | hosts: sensors 4 | gather_facts: true 5 | become: true 6 | ignore_unreachable: true 7 | vars: 8 | rhsm_orgid: "{{ lookup('env', 'RHSM_ORGID') }}" 9 | rhsm_activation_key: "{{ lookup('env', 'RHSM_ACTIVATION_KEY') }}" 10 | tasks: 11 | - name: Unregister with activationkey to RHSM 12 | redhat_subscription: 13 | state: absent 14 | when: ansible_distribution is defined and ansible_distribution == 'RedHat' 15 | -------------------------------------------------------------------------------- /molecule/single-node/destroy.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Destroy 3 | hosts: localhost 4 | connection: local 5 | gather_facts: false 6 | no_log: "{{ not (lookup('env', 'MOLECULE_DEBUG') | bool or molecule_yml.provisioner.log|default(false) | bool) }}" 7 | vars: 8 | esxi_hostname: "{{molecule_yml.vmware.esxi_hostname|default(omit)}}" 9 | cluster: "{{molecule_yml.vmware.cluster|default(omit)}}" 10 | vmware_datacenter: "{{molecule_yml.vmware.datacenter}}" 11 | vmware_datastore: "{{molecule_yml.vmware.datastore}}" 12 | resource_pool: "{{molecule_yml.vmware.resource_pool|default('Resources')}}" 13 | environment: 14 | VMWARE_USER: "{{ lookup('env', 'VMWARE_USER') }}" 15 | VMWARE_PASSWORD: "{{ lookup('env', 'VMWARE_PASSWORD') }}" 16 | VMWARE_HOST: "{{ molecule_yml.vmware.vcenter_address }}" 17 | VMWARE_PORT: "{{ molecule_yml.vmware.vcenter_port | default('443') }}" 18 | VMWARE_VALIDATE_CERTS: "{{ molecule_yml.vmware.validate_certs | default('Yes') }}" 19 | tasks: 20 | - name: Destroy molecule instance(s) 21 | vmware_guest: 22 | esxi_hostname: "{{molecule_yml.vmware.esxi_hostname|default(omit)}}" 23 | cluster: "{{molecule_yml.vmware.cluster|default(omit)}}" 24 | datacenter: "{{ vmware_datacenter }}" 25 | folder: "/{{ vmware_datacenter }}/vm" 26 | resource_pool: "{{ resource_pool }}" 27 | name: "{{ item.name }}" 28 | state: absent 29 | force: true 30 | register: server 31 | with_items: "{{ molecule_yml.platforms }}" 32 | async: 7200 33 | poll: 0 34 | changed_when: false 35 | 36 | - name: Wait for instance(s) deletion to complete 37 | async_status: 38 | jid: "{{ item.ansible_job_id }}" 39 | register: vmware_jobs 40 | until: vmware_jobs.finished 41 | retries: 300 42 | with_items: "{{ server.results }}" 43 | 44 | - name: Remove RockNSM config directory 45 | file: 46 | state: absent 47 | path: "{{ lookup('env', 'MOLECULE_EPHEMERAL_DIRECTORY') }}/etc" 48 | become: true 49 | 50 | # Mandatory configuration for Molecule to function. 51 | - name: Populate instance config 52 | set_fact: 53 | instance_conf: {} 54 | 55 | - name: Dump instance config 56 | copy: 57 | content: "{{ instance_conf | to_json | from_json | molecule_to_yaml | molecule_header }}" 58 | dest: "{{ molecule_instance_config }}" 59 | when: vmware_jobs.changed | bool 60 | -------------------------------------------------------------------------------- /molecule/single-node/molecule.yml: -------------------------------------------------------------------------------- 1 | --- 2 | scenario: 3 | name: single-node # optional 4 | test_sequence: 5 | - dependency 6 | - lint 7 | - cleanup 8 | - destroy 9 | - syntax 10 | - create 11 | - prepare 12 | - converge 13 | # - idempotence 14 | - side_effect 15 | - verify 16 | - cleanup 17 | - destroy 18 | dependency: 19 | name: galaxy 20 | driver: 21 | name: delegated 22 | options: 23 | managed: true 24 | vmware: 25 | network_name: ${VMWARE_NETWORK} 26 | datastore: ${VMWARE_DATASTORE} 27 | datacenter: ${VMWARE_DATACENTER} 28 | vcenter_address: ${VCENTER_ADDRESS} 29 | esxi_hostname: ${VCENTER_ESXI} 30 | validate_certs: true 31 | resource_pool: "Resources" 32 | lint: 33 | name: yamllint 34 | platforms: 35 | - name: "rock-molecule-${MOLECULE_SCENARIO_NAME}${TOX_ENVNAME}-instance-1" 36 | memory: 16384 37 | cpu: 4 38 | disk_size: 64 39 | guest_id: ${TEMPLATE_DISTRO-centos}7_64Guest 40 | template_pattern: '^template-${TEMPLATE_DISTRO-centos}-7' 41 | children: 42 | - rock 43 | - web 44 | - es_masters 45 | - es_data 46 | - es_ingest 47 | - zookeeper 48 | groups: 49 | - lighttpd 50 | - sensors 51 | - zeek 52 | - fsf 53 | - kafka 54 | - stenographer 55 | - suricata 56 | - filebeat 57 | - elasticsearch 58 | - docket 59 | - kibana 60 | - logstash 61 | provisioner: 62 | name: ansible 63 | log: true 64 | become: true 65 | env: 66 | ANSIBLE_ROLES_PATH: "${MOLECULE_PROJECT_DIRECTORY}/roles" 67 | VMWARE_USER: "${VMWARE_USERNAME}" 68 | VMWARE_PASSWORD: "${VMWARE_PASSWORD}" 69 | RHSM_ORGID: "${RHSM_ORGID}" 70 | RHSM_ACTIVATION_KEY: "${RHSM_ACTIVATION_KEY}" 71 | options: 72 | extra-vars: 73 | rock_conf_dir: "${MOLECULE_EPHEMERAL_DIRECTORY}/etc" 74 | rock_config: "${MOLECULE_EPHEMERAL_DIRECTORY}/etc/config" 75 | rock_monifs: ["dummy0"] 76 | rock_disable_offline_repo: true 77 | rock_enable_testing: true 78 | # Must static the es_mem var for now until #386 is resolved 79 | es_mem: 8 80 | config_options: 81 | defaults: 82 | callback_whitelist: timer,profile_tasks,profile_role 83 | connection: 84 | pipelining: true 85 | inventory: 86 | group_vars: 87 | elasticsearch: 88 | node_master: false 89 | node_data: false 90 | node_ingest: false 91 | es_masters: 92 | node_master: true 93 | es_data: 94 | node_data: true 95 | es_ingest: 96 | node_ingest: true 97 | lint: 98 | name: ansible-lint 99 | config_options: 100 | defaults: 101 | roles_path: "${MOLECULE_PROJECT_DIRECTORY}/roles" 102 | 103 | verifier: 104 | name: testinfra 105 | lint: 106 | name: flake8 107 | enabled: false 108 | -------------------------------------------------------------------------------- /molecule/single-node/playbook.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - import_playbook: ../../playbooks/generate-defaults.yml 3 | - import_playbook: ../../playbooks/deploy-rock.yml 4 | -------------------------------------------------------------------------------- /molecule/single-node/prepare.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Prepare 3 | hosts: all 4 | gather_facts: true 5 | become: true 6 | vars: 7 | rhsm_orgid: "{{ lookup('env', 'RHSM_ORGID') }}" 8 | rhsm_activation_key: "{{ lookup('env', 'RHSM_ACTIVATION_KEY') }}" 9 | tasks: 10 | - name: Add dummy network interface kernel module 11 | modprobe: 12 | name: dummy 13 | state: present 14 | params: numdummies=0 15 | when: "'sensors' in group_names" 16 | 17 | - name: Create dummy0 interface 18 | command: /usr/sbin/ip link add dummy0 type dummy 19 | args: 20 | creates: /sys/class/net/dummy0 21 | when: "'sensors' in group_names" 22 | 23 | - name: Register with activationkey to RHSM 24 | redhat_subscription: 25 | state: present 26 | activationkey: "{{ rhsm_activation_key }}" 27 | org_id: "{{ rhsm_orgid }}" 28 | auto_attach: true 29 | when: ansible_distribution is defined and ansible_distribution == 'RedHat' 30 | 31 | - name: Add extras and optional repos 32 | rhsm_repository: 33 | name: "{{ item }}" 34 | state: enabled 35 | loop: 36 | - "rhel-*-optional-rpms" 37 | - "rhel-*-extras-rpms" 38 | when: ansible_distribution is defined and ansible_distribution == 'RedHat' 39 | -------------------------------------------------------------------------------- /molecule/single-node/tests: -------------------------------------------------------------------------------- 1 | ../../tests -------------------------------------------------------------------------------- /playbooks/auth-mgmt.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - hosts: all 4 | become: true 5 | vars: 6 | - public_keys: "{{ lookup('file', '~/.ssh/id_rsa.pub') }}" 7 | vars_files: 8 | - "{{ rock_config }}" 9 | tasks: 10 | - name: Generate SSH keys 11 | become: false 12 | command: "ssh-keygen -t rsa -b 4096 -N '' -f ~/.ssh/id_rsa" 13 | args: 14 | creates: ~/.ssh/id_rsa.pub 15 | delegate_to: localhost 16 | run_once: true 17 | 18 | - name: Set authorized keys 19 | authorized_key: 20 | user: "{{ ansible_env.SUDO_USER }}" 21 | state: present 22 | key: "{{ public_keys }}" 23 | 24 | - name: Enable sudo w/o password 25 | lineinfile: 26 | path: /etc/sudoers 27 | state: present 28 | regexp: '^{{ ansible_env.SUDO_USER }}\s' 29 | line: '{{ ansible_env.SUDO_USER }} ALL=(ALL) NOPASSWD: ALL' 30 | -------------------------------------------------------------------------------- /playbooks/debug.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - debug: msg="Dumping variables for debug" 3 | - debug: var=rock_debug 4 | - debug: var=rock_online_install 5 | - debug: var=rock_data_dir 6 | - debug: var=zeek_data_dir 7 | - debug: var=suricata_data_dir 8 | - debug: var=stenographer_data_dir 9 | - debug: var=rock_data_user 10 | - debug: var=es_mem 11 | - debug: var=zeek_cpu 12 | - debug: var=rock_monifs 13 | - debug: var=epel_baseurl 14 | - debug: var=epel_gpgurl 15 | - debug: var=elastic_baseurl 16 | - debug: var=elastic_gpgurl 17 | - debug: var=rocknsm_baseurl 18 | - debug: var=rocknsm_gpgurl 19 | - debug: var=rocknsm_local_baseurl 20 | - debug: var=http_proxy 21 | - debug: var=https_proxy 22 | -------------------------------------------------------------------------------- /playbooks/delete-data.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: all 3 | vars: 4 | rock_debug: "{{ lookup('env', 'DEBUG') }}" 5 | http_proxy: "{{ lookup('env','http_proxy') }}" 6 | https_proxy: "{{ lookup('env', 'https_proxy') }}" 7 | tasks: 8 | - name: Get default settings 9 | include_vars: rocknsm_config.dist.yml 10 | - name: Apply override settings, if available 11 | include_vars: "{{ rock_config }}" 12 | ignore_errors: true 13 | failed_when: false 14 | - name: Debug variables 15 | include: debug.yml 16 | when: rock_debug is defined and rock_debug 17 | 18 | ###################################################### 19 | ######### Stop Services ############################## 20 | ###################################################### 21 | - name: Stop rocknsm services 22 | command: /sbin/rock_stop 23 | 24 | ###################################################### 25 | ######### Delete Data ################################ 26 | ###################################################### 27 | - name: Remove rock_data_dir 28 | file: 29 | state: absent 30 | path: "{{ rock_data_dir }}" 31 | -------------------------------------------------------------------------------- /playbooks/deploy-rock.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: all 3 | tags: 4 | - common 5 | pre_tasks: 6 | - name: Include user-override vars 7 | include_vars: "{{ rock_config }}" 8 | become: true 9 | roles: 10 | - common 11 | 12 | - hosts: elasticsearch 13 | tags: 14 | - elasticsearch 15 | - elastic 16 | - es_before 17 | pre_tasks: 18 | - name: Include user-override vars 19 | include_vars: "{{ rock_config }}" 20 | vars: 21 | - es_step: "before" 22 | become: true 23 | roles: 24 | - role: elasticsearch 25 | when: "'elasticsearch' in installed_services" 26 | 27 | - hosts: elasticsearch 28 | tags: 29 | - elasticsearch 30 | - elastic 31 | - es_restart 32 | pre_tasks: 33 | - name: Include user-override vars 34 | include_vars: "{{ rock_config }}" 35 | vars: 36 | - es_step: "restart" 37 | serial: 1 38 | become: true 39 | roles: 40 | - role: elasticsearch 41 | when: "'elasticsearch' in installed_services and (es_restart is defined and es_restart)" 42 | 43 | - hosts: elasticsearch 44 | tags: 45 | - elasticsearch 46 | - elastic 47 | - es_after 48 | pre_tasks: 49 | - name: Include user-override vars 50 | include_vars: "{{ rock_config }}" 51 | vars: 52 | - es_step: "after" 53 | become: true 54 | roles: 55 | - role: elasticsearch 56 | when: "'elasticsearch' in installed_services" 57 | 58 | - hosts: zookeeper 59 | tags: 60 | - zookeeper 61 | pre_tasks: 62 | - name: Include user-override vars 63 | include_vars: "{{ rock_config }}" 64 | become: true 65 | roles: 66 | - role: zookeeper 67 | when: "'zookeeper' in installed_services" 68 | 69 | - hosts: kafka 70 | tags: 71 | - kafka 72 | pre_tasks: 73 | - name: Include user-override vars 74 | include_vars: "{{ rock_config }}" 75 | become: true 76 | roles: 77 | - role: kafka 78 | when: "'kafka' in installed_services" 79 | 80 | - hosts: stenographer 81 | tags: 82 | - docket 83 | - stenographer 84 | pre_tasks: 85 | - name: Include user-override vars 86 | include_vars: "{{ rock_config }}" 87 | become: true 88 | roles: 89 | - role: stenographer 90 | when: "'stenographer' in installed_services" 91 | stenographer_monitor_interfaces: "{{ rock_monifs }}" 92 | 93 | - hosts: zeek 94 | tags: 95 | - zeek 96 | - sensor 97 | pre_tasks: 98 | - name: Include user-override vars 99 | include_vars: "{{ rock_config }}" 100 | become: true 101 | roles: 102 | - role: zeek 103 | when: "'zeek' in installed_services" 104 | 105 | - hosts: suricata 106 | tags: 107 | - suricata 108 | - sensor 109 | pre_tasks: 110 | - name: Include user-override vars 111 | include_vars: "{{ rock_config }}" 112 | become: true 113 | roles: 114 | - role: suricata 115 | when: "'suricata' in installed_services" 116 | 117 | - hosts: 118 | - docket 119 | - kibana 120 | tags: 121 | - docket 122 | - kibana 123 | - lighttpd 124 | - web 125 | pre_tasks: 126 | - name: Include user-override vars 127 | include_vars: "{{ rock_config }}" 128 | become: true 129 | roles: 130 | - role: lighttpd 131 | when: "'lighttpd' in installed_services" 132 | 133 | - hosts: 134 | - docket 135 | - stenographer 136 | tags: 137 | - docket 138 | - stenographer 139 | pre_tasks: 140 | - name: Include user-override vars 141 | include_vars: "{{ rock_config }}" 142 | become: true 143 | roles: 144 | - role: docket 145 | when: "'docket' in installed_services" 146 | docket_enable: "{{ 'docket' in enabled_servicesl }}" 147 | 148 | - hosts: kibana 149 | tags: 150 | - kibana 151 | - elastic 152 | pre_tasks: 153 | - name: Include user-override vars 154 | include_vars: "{{ rock_config }}" 155 | become: true 156 | roles: 157 | - role: kibana 158 | when: "'kibana' in installed_services" 159 | -------------------------------------------------------------------------------- /playbooks/enable-xfs-quotas.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: all 3 | become: true 4 | tasks: 5 | 6 | - name: Discover facts about /data mount 7 | set_fact: 8 | rock_mounts: 9 | mount: "{{ item.mount }}" 10 | device: "{{ item.device }}" 11 | size_total: "{{ item.size_total }}" 12 | loop: 13 | "{{ ansible_mounts }}" 14 | when: (item.mount == default_mount and rock_mounts is not defined) 15 | 16 | - debug: 17 | msg: "Unable to set quotas, ensure that default_mount exists and is defined" 18 | failed_when: rock_mounts.mount != default_mount 19 | when: rock_mounts.mount != default_mount 20 | 21 | - name: Enable project quotas on default_mount 22 | mount: 23 | path: "{{ rock_mounts.mount }}" 24 | opts: defaults,prjquota 25 | state: present 26 | fstype: xfs 27 | src: "{{ rock_mounts.device }}" 28 | when: rock_mounts.mount == default_mount 29 | 30 | - pause: 31 | prompt: "A reboot is required for the changes to take effect. Do you want to reboot now? (y/n)" 32 | echo: true 33 | register: input 34 | 35 | - name: Reboot 36 | command: systemctl reboot 37 | when: input.user_input == 'y' 38 | -------------------------------------------------------------------------------- /playbooks/generate-defaults.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: localhost 3 | become: true 4 | vars: 5 | - config_src: rock_config.yml.j2 6 | tags: 7 | - common 8 | - local 9 | tasks: 10 | - name: Create config directory 11 | file: 12 | state: directory 13 | owner: root 14 | group: root 15 | mode: 0755 16 | path: "{{ rock_conf_dir }}" 17 | 18 | - name: Render template 19 | template: 20 | backup: true 21 | src: "{{ config_src }}" 22 | dest: "{{ rock_config }}" 23 | owner: root 24 | group: root 25 | mode: 0644 26 | -------------------------------------------------------------------------------- /playbooks/manage-services.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: all 3 | pre_tasks: 4 | - name: Include user-override vars 5 | include_vars: "{{ rock_config }}" 6 | tasks: 7 | - name: Populate service facts 8 | service_facts: 9 | 10 | - name: Flatten ROCK services 11 | set_fact: 12 | flat_services: "{{ ansible_facts.services | flatten(levels=1) | replace('.service', '') }}" 13 | 14 | - name: collect enabled services 15 | set_fact: 16 | enabled_services: "{{ rock_services | rejectattr('enabled', 'equalto', False) | map(attribute='name') | list }}" 17 | 18 | - name: Collect ROCK services from facts 19 | set_fact: 20 | host_services: "{{ flat_services | intersect(enabled_services) }}" 21 | 22 | - name: Perform requested service action 23 | service: 24 | name: "{{ item }}" 25 | state: "{{ service_state }}" 26 | loop: "{{ host_services }}" 27 | tags: 28 | - service_mgmt 29 | 30 | - name: Register service status 31 | shell: > 32 | /bin/bash -c "systemctl status {{ item }} | tee >(grep -o 'Active: .*') >( 33 | tail -2 | sed 's/.*\: //' | tee >(jq) | fold -w100) > /dev/null" | 34 | tr '"' ' ' | 35 | tr '\\\\' '\\' 36 | loop: "{{ host_services }}" 37 | register: services 38 | changed_when: false 39 | tags: 40 | - service_status 41 | 42 | - name: Output service status 43 | debug: 44 | msg: "{{ item.stdout }}" 45 | loop: "{{ services.results }}" 46 | loop_control: 47 | label: "{{ item.item }}" 48 | tags: 49 | - service_status 50 | -------------------------------------------------------------------------------- /playbooks/roles: -------------------------------------------------------------------------------- 1 | ../roles -------------------------------------------------------------------------------- /playbooks/setup-deploy-host.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - hosts: localhost 4 | gather_facts: false 5 | tasks: 6 | - name: Install RockNSM GPG keys 7 | copy: 8 | src: "{{ item }}" 9 | dest: "/etc/pki/rpm-gpg/{{ item }}" 10 | mode: 0644 11 | owner: root 12 | group: root 13 | with_items: 14 | - RPM-GPG-KEY-RockNSM-2 15 | - RPM-GPG-KEY-RockNSM-Testing 16 | - RPM-GPG-KEY-RockNSM-pkgcloud-2_5 17 | 18 | - name: Trust RockNSM GPG keys 19 | rpm_key: 20 | state: present 21 | key: "{{ item.path }}" 22 | with_items: 23 | - { repoid: "rocknsm_2_5", path: "/etc/pki/rpm-gpg/RPM-GPG-KEY-RockNSM-2" } 24 | - { repoid: "rocknsm_2_5", path: "/etc/pki/rpm-gpg/RPM-GPG-KEY-RockNSM-pkgcloud-2_5" } 25 | register: registered_keys 26 | 27 | - name: Configure RockNSM online repos 28 | yum_repository: 29 | file: rocknsm 30 | name: "{{ item.name }}" 31 | enabled: "{{ rock_online_install }}" 32 | description: "{{ item.name }}" 33 | baseurl: "{{ item.baseurl }}" 34 | repo_gpgcheck: 1 35 | gpgcheck: "{{ item.gpgcheck }}" 36 | gpgkey: 37 | - file:///etc/pki/rpm-gpg/RPM-GPG-KEY-RockNSM-pkgcloud-2_5 38 | - file:///etc/pki/rpm-gpg/RPM-GPG-KEY-RockNSM-2 39 | sslverify: 1 40 | sslcacert: /etc/pki/tls/certs/ca-bundle.crt 41 | metadata_expire: 300 42 | cost: 750 43 | state: present 44 | with_items: 45 | - { name: "rocknsm_2_5", gpgcheck: true, baseurl: "{{ rocknsm_baseurl }}" } 46 | - { name: "rocknsm_2_5-source", gpgcheck: false, baseurl: "{{ rocknsm_srpm_baseurl }}" } 47 | 48 | - name: Trust RockNSM GPG keys in yum 49 | command: "yum -q makecache -y --disablerepo='*' --enablerepo='{{ item.repoid }}'" 50 | with_items: 51 | - { repoid: "rocknsm_2_5", test: "{{ rock_online_install }}" } 52 | - { repoid: "rocknsm_2_5-source", test: "{{ rock_online_install }}" } 53 | when: item.test | bool 54 | changed_when: false 55 | # TODO: Fix this ^^ 56 | 57 | - name: Install support packages 58 | yum: 59 | name: 60 | - python2-jinja2 61 | - python2-markupsafe 62 | state: latest 63 | -------------------------------------------------------------------------------- /playbooks/site.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - import_playbook: deploy-rock.yml 3 | -------------------------------------------------------------------------------- /playbooks/templates/ifcfg-monif.j2: -------------------------------------------------------------------------------- 1 | TYPE=Ethernet 2 | BOOTPROTO=none 3 | IPV4_FAILURE_FATAL=no 4 | IPV6INIT=no 5 | IPV6_FAILURE_FATAL=no 6 | NAME={{ item }} 7 | DEVICE={{ item }} 8 | ONBOOT=yes 9 | NM_CONTROLLED=no 10 | -------------------------------------------------------------------------------- /playbooks/templates/ifup-local.j2: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # File: /sbin/ifup-local 3 | # 4 | # 5 | # This script is run after normal sysconfig network-script configuration 6 | # is performed on RHEL/CentOS-based systems. 7 | # 8 | # Parameters: 9 | # $1: network interface name 10 | # 11 | # Post ifup configuration for tuning capture interfaces 12 | # This is compatible with the ixgbe driver, YMMV 13 | 14 | # Change this to something like /tmp/ifup-local.log for troubleshooting 15 | LOG=/dev/null 16 | #LOG=/tmp/ifup-local.log 17 | 18 | case $1 in 19 | {{ rock_monifs | list | join('|') }}) 20 | 21 | for i in rx tx sg tso ufo gso gro lro rxvlan txvlan 22 | do 23 | ethtool -K $1 $i off &>$LOG 24 | done 25 | 26 | ethtool -N $1 rx-flow-hash udp4 sdfn &>$LOG 27 | ethtool -N $1 rx-flow-hash udp6 sdfn &>$LOG 28 | ethtool -n $1 rx-flow-hash udp6 &>$LOG 29 | ethtool -n $1 rx-flow-hash udp4 &>$LOG 30 | ethtool -C $1 rx-usecs 10 &>$LOG 31 | ethtool -C $1 adaptive-rx off &>$LOG 32 | ethtool -G $1 rx 4096 &>$LOG 33 | 34 | # Disable ipv6 35 | echo 1 > /proc/sys/net/ipv6/conf/$1/disable_ipv6 &>$LOG 36 | echo 0 > /proc/sys/net/ipv6/conf/$1/autoconf &>$LOG 37 | 38 | # Set promiscuous mode 39 | ip link set $1 promisc on &>$LOG 40 | 41 | # Just in case ipv6 is already on this interfaces, let's kill it 42 | ip addr show dev $1 | grep --silent inet6 43 | 44 | if [ $? -eq 0 ] 45 | then 46 | ADDR=$(ip addr show dev $1 | grep inet6 | awk '{ print $2 }') 47 | ip addr del $ADDR dev $1 &>$LOG 48 | fi 49 | 50 | ;; 51 | 52 | *) 53 | # No post commands needed for this interface 54 | ;; 55 | 56 | esac 57 | -------------------------------------------------------------------------------- /playbooks/templates/setup_config.yml.j2: -------------------------------------------------------------------------------- 1 | --- 2 | rock_monifs: 3 | {{ rock_monifs | to_nice_yaml | indent(2, False) }} 4 | rock_mgmtifs: null 5 | rock_online_install: {{ rock_online_install }} 6 | rock_services: 7 | {{ rock_services | to_nice_yaml | indent(2, False) }} 8 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | ansible 2 | ansible-lint 3 | flake8 4 | molecule 5 | pyvmomi 6 | testinfra 7 | yamllint 8 | -------------------------------------------------------------------------------- /roles/common/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | method: deploy 3 | ... 4 | -------------------------------------------------------------------------------- /roles/common/files/RPM-GPG-KEY-RockNSM-2: -------------------------------------------------------------------------------- 1 | -----BEGIN PGP PUBLIC KEY BLOCK----- 2 | 3 | mQINBFkh3UQBEADOhU75ooT5PGwfc9cWidRdOQRBKJndKh2poUev8/RD9KhhJa8L 4 | EKPOjElOw6d0Kf4vCsYVPqnpKay1rAQsxtEw2fh/GL0SoTUPiPZKtOiNzZLz/lqN 5 | kh3wudL5O2FPHeqNT8ElYCfxJK+Hujf10praCfsee51wP8DozO8E6doMYOe4RKSC 6 | 2PE/4S2IR99Y3wJ5uwg/rp79jFr6g6f08OVcbyttCqvcGAOJzulu9OBF+E7vs9JD 7 | S+Bkxh3FffHW1tMoYOqko6AtTCa4JjehQnvqCr+w58S2dmYBPSzTVcWD4KvmpUQQ 8 | dCqk7nl/ZPMCn+/CpqAAeiKMD66Q+R08cEI1qFmhuO1wo+xxfXWKLl7SZo2AU5oi 9 | 3h9tW8fk0EVGEFSnZoGM8FzcLMnNczNVtG9ZaMyYMhJui3c1y9855J3USfHzP1pd 10 | +/zdGSfkbCM9edRx3r2t5tx6HBKJEaLW0Iu26XNoqGNK623yShKG9p4bP6HCO5zE 11 | dnd8qWsIROM+CYrLohucJFtd5bNHeGXMSCVOp01ocXH5yqeCEHSOVONDp9SFl+8Z 12 | Lbj1n4ofo7CMI/Svl3S8r4GsB/jRyBD14WLZ8jmzi5IwHPeWlGXpt+a8Q+J3KeAY 13 | hGqXxTlB+4u/mOc7dqtul2HVEsza8jcOxyNevuh4RPEzeoRhKB6KtmDlAQARAQAB 14 | tERST0NLTlNNIDIgS2V5IChST0NLTlNNIDIgT2ZmaWNpYWwgU2lnbmluZyBLZXkp 15 | IDxzZWN1cml0eUByb2NrbnNtLmlvPokCPgQTAQIAKAUCWSHdRAIbAwUJCWYBgAYL 16 | CQgHAwIGFQgCCQoLBBYCAwECHgECF4AACgkQuyAYPXg6uIl1dQ//S70Af95uiffI 17 | Cefkk80fqPfPnH7d61/5ZgBQzCOXe+9JbqJ9BFDlVlZcbyHeQ1l4TIlgNrRbiNZ4 18 | 2J9u1Xc0oc3dEbgyJ87kkR/HcrXUINeLhNJPv2Awt20N7UayFBkcImUW3CQ5lRxQ 19 | 7rzkOoQ1UgVd7bT1DTsac62p1k50+NX/r1Eue1HmWz0OZeyfBTmfzf+aLoR/LLex 20 | zC9MZlxSzwPJ18tV66mcPbav6Y+6QwuqHMmTx38cXfeHzRWkbVrV6MdYQEcFlZXG 21 | FAnatpofYmrjPrdF3HAcdm/9GC1nAa/d0h67wwt9x2Ail/Kjj5+zYUul3WC2Cd7X 22 | vWcms4b8IWAdl7beHEeQNLFivR702BN/SmiwtLVbYVUXrJ2dkVWB264zjp3grh9/ 23 | bQJEQAy/JQsEfUIIAv/9ok1R3Bhy0cCawv9wtLUcbXRMSzCB+ECjOBrEJ0qKNUqi 24 | STqEmomUkEQYgHxzf9xOFkZ0DmadF0axXBA5Nw5Q6Hp9b4/jJfe5U5XrP0t3D1pP 25 | QkgzWUwa0NB+aREjGurrtBe4UZWuBoj7WteTVdhhP4G6OwHyXRYRp9u24SOR7fR/ 26 | SKJfjPLBIwutz9VnjxpvokhCtc5+EoAjVs0X3xZ8aZhq+CzSXjqdkErStcL1eT+a 27 | 0OwAiXLQ1/PGogn24FQ7rphyeJv0Kru5Ag0EWSHdRAEQANEo3EAXJ9IaQqoS3T9x 28 | wRZ1sYSjAZIKzdDS165YdiEZtwOqNx5UNCwBkOOTLnu07Sn9ayy1Bn5SC90IxjkO 29 | BnHcCmkcxrq157KJl5lTTi+prcJAtQMfacW23IyFcX+lSjnLEwyjngkVptt1kEbS 30 | rmJl+OpmP6do56W9eRbKhXE/L5ebz+PTEJLqRSARGUiyMZB8t8072qLv5fLjJghe 31 | +IXfoPPNnQJ7vq0+t1SX4uRPmChgL77VnEluOS0rOk+8pguzWmnBbmqTXlDvUUbS 32 | Oa+zHFT5WhNHeoSyQ1OTW7sOL0my7hRqMlwS+NMl899RXXb2rowKC1CfjztTsXhi 33 | Ig4XmmlvDd0OZ0N3wBHFOU4llOfcqO4svtQzJN1tXldA4maaWK3LxuXOHgp9kWzT 34 | LyTZsv1ALXL4KMgyxAcjLy+H79QAEs1N4TDtosf6I4A9+q9ReameR94Rxkxjk5N4 35 | jFbNuZXCHiU+uPNsmehhRKCeRCbW03al6Ev0bzqj56bKT2S48uMSM47hDN6kLBFd 36 | IDFHjbYwN9qeS4XoSpT2fF43FPUw2wppBduUikurCBIbeKBA+bL+4R3Qrwf+M53f 37 | rYzX0wQN/BIpXq+LZri7CHYEr2/eXa+rgB12qvUQnAJ8/pK1c4VhgBG+BYj2tkhz 38 | +QeF5gRx3vv90DMnVgIf3LdhABEBAAGJAiUEGAECAA8FAlkh3UQCGwwFCQlmAYAA 39 | CgkQuyAYPXg6uInVOw/+OzPuvD0c/Pg+YD69YGOUF4n5iJkgc4YtBn5nQfPHwkul 40 | Yeohjzoxd7eHcx7JeM7pzHA4kUtoq7OOfs/1pZICOqyPqxZEdDkn7uCxMFEMXnD9 41 | mciXXSZC9229Vabbgd4mtseBtt1lIEj4zZG8zgd5J4LGrFh/jAogPTywAvJ+XwRT 42 | SDLHJsPE8/T6jV1nPf2mLlU36IvijxhGyY3WviPaLKA4uUXilJA+47uEd7rSrAJy 43 | 3Wg33u/2mvMCv5XV149KYg0lwPqSbHwTPxnHlvfewZ8/x6fjFmET9l78Fe9c8Yi9 44 | FuWNve79gAygT2GV+Ca2mfqEyF/zw5UYYn8FR7nf9Su2nt+fNlto6OOUqis5NQJr 45 | X0wqDX8z/0eRZSFHH0Wr2bcaxY9W1Dk8iW8VPyiNooB2JwYmYY96to/KNJjZpfPf 46 | sC/J0CBVyPg7SIxdGvHEs5To/XL7g8upD2JKcKGP5gwECrXDSNY8ZuclY3v0WXvy 47 | JvWerPXYRlLZUi3shjuveR4H2yduo6io/Z/YVvGEbMPNDA/KTdVM2yCmhO1zGSkG 48 | 5AAuUxzjDdvFJzeJgB0mMaoYgKwnhWwVH+9RYG9/AYA+3qZPIVjgI8AAW1L/P/5y 49 | ORbsaXMqhBaUA4ddUEVLt81MBgFUcraT81io/ryugbAVvQG1PW5kPyZFeSr4+10= 50 | =vLlV 51 | -----END PGP PUBLIC KEY BLOCK----- 52 | -------------------------------------------------------------------------------- /roles/common/files/RPM-GPG-KEY-RockNSM-Testing: -------------------------------------------------------------------------------- 1 | -----BEGIN PGP PUBLIC KEY BLOCK----- 2 | 3 | mQENBFvP8aUBCADm7RYoOaIY4ns3sdhs4EQuNXgrfoYQXA7+yWkiqC8xlbw9KEMZ 4 | EUUt3JBTssKmQvyZ1alBarbqHJ363KTI/pzz1wNgWzw0c67pDPmBE4MPYfFdHyLB 5 | 5RQuJqgY1vQJ6Qj11FIq/IXGZsb29x43CbpnrgVBP5QJddxlxOwIQM8vq7pDfUs4 6 | gBtdm8VkrSXohwXNp8cHtePwm7n59/Xr8kuxI58T85cynggLbeqmiGhf6sOx7n1+ 7 | 3w7LvCBEQrfxD2Uv8gFPZ/vi09+kZeLjU/X1aJRMReqO8Iv1xuG+yVPkLptu753v 8 | n1B4TGiUuANEdCSr6JRUlV91MtQ2o6DTo6bNABEBAAG0QEByb2NrbnNtX3Rlc3Rp 9 | bmcgKE5vbmUpIDxAcm9ja25zbSN0ZXN0aW5nQGNvcHIuZmVkb3JhaG9zdGVkLm9y 10 | Zz6JAVQEEwEIAD4WIQQQqzlcCBf32JPi1C5ytiz1/oomfwUCW8/xpQIbLwUJCWYB 11 | gAULCQgHAgYVCgkICwIEFgIDAQIeAQIXgAAKCRBytiz1/oomf60OB/wMaYonksnF 12 | NFQLViLZ2ZGLzx86x7Lz5YbrOP4GscQgTkZQAEBDiNTA8LMUfqDeNo+MwR33AYm2 13 | 452sEnApICGSfU48Sy8+CwTTcB+i7Bos3Vf5cxyb+BjsBSmrbuEQeMfIyD+emiAC 14 | QqumDsH0F22XttIzuIDOSnfIh+bICxdzkO9AnZImmi4kLLYDr2ahTNBjcWTR1RIU 15 | l9uoZP6m3Mt0t79E3rKtZVH1Cpi98fgFsq3M2W51Cy5qdAvSjGxBvl87/VzdECQ8 16 | d2RhIuve1HetEIDkmObR3P6vILdSloDck7JxpRBkdoSpm6VBz7GORgNlx2Gn1Esd 17 | dm51XaDgyTmW 18 | =oJl4 19 | -----END PGP PUBLIC KEY BLOCK----- 20 | -------------------------------------------------------------------------------- /roles/common/files/RPM-GPG-KEY-RockNSM-pkgcloud-2_5: -------------------------------------------------------------------------------- 1 | -----BEGIN PGP PUBLIC KEY BLOCK----- 2 | 3 | mQINBF1eJGgBEAC/ba14bsOpTBA1/DB2bG1c/Gg+0gEaQnmfj5qSRDkOcZPMd0HI 4 | zQKME4r6fstfpIbDlQwV6G2LT9mZI7wh2tVsJgomwqouVgVSlIqcqjxAzNEKx/5h 5 | pE9GZFyLz+2nW61AvVRSnVdiwRJnv54Dmu/TssKjFinScAOpqG1i2y7zqC5Av6U2 6 | x1VzFfYCdUGyQxmWqxxcy8A8e1riuz9x/UhtaI9E1k6chLbx6PZuRbZCZ74u87MU 7 | k2sgKhg4JrXYr4Qo5+hdiWmmqmwFs6BDzSXRj+P3W8bX1iehtW7eqb9F5rbMh1e0 8 | Zph4DvQV+AHMJGQrympL4J8v7eMWzAM4Qay6l/UH6/ylXpnL/CmPBiWXBrXXONIe 9 | uwNuIpNvkKrD9lB0QI+kaKi2ReCVSxrBuI35gjo/TgtM1+5B+wd0Q+XJ7mc3fWeH 10 | o1raMFDygrJeZ4tUuM8dhAk5mCiT2QDDfQcjIPxvDEoIZNMYRI7DkkWQMdsC9cM3 11 | 6GqUIHgeYaGDdUPuhrj3ccomefyUXVeuZJCwfF5asEXu/rVRC7g9jusKMESRrzyp 12 | plnR7UFjNET8IBntF0CSALmmR5OgMyEW67NQlV25yeOh36ZSo8bOJXMxm+UaZ7te 13 | xlllOkfcsfZRM+p6W/qHvqYm4GUdMjubj09J+NKUCuRHUnsKSp5ixkyuYwARAQAB 14 | tGhodHRwczovL3BhY2thZ2VjbG91ZC5pby9yb2NrbnNtLzJfNSAoaHR0cHM6Ly9w 15 | YWNrYWdlY2xvdWQuaW8vZG9jcyNncGdfc2lnbmluZykgPHN1cHBvcnRAcGFja2Fn 16 | ZWNsb3VkLmlvPokCTgQTAQoAOBYhBG2bErFte5Zlfqbs4T8+H6SZ67hqBQJdXiRo 17 | AhsvBQsJCAcCBhUKCQgLAgQWAgMBAh4BAheAAAoJED8+H6SZ67hqwzgQAKtvvQ2W 18 | hxW4mZCAPywAJCeUaSbhFzEjpK6aopw38EvU1UH12mkLT+G9GKjb4Izo1dloONZK 19 | 9O1b0yGCwkc0KJyKl4GPvG/XGmEwyrLCMn52QdVn+ZN+/R7j2Ndl93B7PDJ+L2BR 20 | FwQh/tzx0Hu4CmlxaNwQNMAXXXvTngU8kexSVX3W02hXoeyF31i5468q97ZfCCI4 21 | DbVE024lxfeVca0IBkJK7ARecI+p9X6rLZ6ueTvQhixiGzMOYK2LHvCLDYnINylh 22 | WJ5FKE19FopP4s1DrtftPQzzFGYQ23McNDwUwqAaZkSWEzfItAYuJtRr+aJUbjXw 23 | 34Z3cktlO1rvYhWJxMaC+D9JtH+9fKM2rmekDCaflYo0DS5Eugq3U/xxERwB1EK/ 24 | NY0J+kunILVaDzpJ/odNyz42o4kTtWOsmfzDl5+J49stLtsFqKtVTbW5sj9t1LvE 25 | TZ6QZWmA6wM5WP/PaUSvvGKlqIBqIueHl2pAFy695OdDGig2i+YEjX2wIbCAQ2Mu 26 | 6fzoKPJYQADmFGnKi+JBXUJg7bJxIOzTDjE+5KdtWJQvzmqqNfEh0enzYK/AcyBo 27 | o2PysyfAMjZS8CGsmbf0AX5FJtu74N84v0tX2TgjqP8avEqQIb5smQIW8HVMa2j8 28 | rkTFts9JvbTwAulbwv4qCTNqrpZWkognnv63uQINBF1eJGgBEAC+Ghk5GutQDtKY 29 | y8YCnZCOVPbV5TuHl9GctG0Hvd1soO3reEu0BW/NhVIlXYvuaYOBif1JSbWK3mjp 30 | 9Q/1sr5+UhTCQiYQsBuvoE+wCpoGfO7Cfa/6PIiXvS182JqNAg8YV3gAH788wJLz 31 | 7uZYbFXDqXRHLS2PueD45CyOnoyasqvrDWm4kaPHyRC9Ziw2lu9HPQTBMNEaMOcw 32 | S00ux+uwDjZZ4z7jhSzmgPUawVAMi2F+mzlhw2suKoT55ms3+psskYoda828RT3M 33 | C2UiI2q8KiWk8JvR86ljbrLQmy9v0vdNz4seL1IfEZ0B/QBRyUEEJsT08FcT2BkJ 34 | pxSzgRiUV2djSFrNf2wWepucS0VwshdIoPTm5Lz2fDNi/Mh1oGBFj6Nn5ZQ0m8WI 35 | WliedaoDjfDUj4PRt7DG2T1UZ/gGBqu+nc6Al8BfZFoAFer30e9Ia53jOwKGFaCr 36 | 6scNzjEG4KcOZkHrXaoAMY3MVNyqxkLh/bce7Mcl1Ax3yfp7ZnpZyDn8GWLUmIso 37 | wEcmdTnxr52OpTKhhpIJrJ7ZRnsMtkgKFBfa0M76sn+VHVa+twMLO4DCrC6S94i2 38 | tSwN3B+nZwMdnseyVqA3hfKUkdYMO8wVaHCRdKOnhZ84li9+n/zt6itrbWzZVpFu 39 | zw09Nlz9M0XOggkXicj9nTQnpoofxQARAQABiQRsBBgBCgAgFiEEbZsSsW17lmV+ 40 | puzhPz4fpJnruGoFAl1eJGgCGy4CQAkQPz4fpJnruGrBdCAEGQEKAB0WIQQfHJHd 41 | ofBVLtbUlm9R3N7JmAjxigUCXV4kaAAKCRBR3N7JmAjxiuVID/9lej1nrbWY/qar 42 | IAxXoJjdLx0zfQCAHYiHSr2TOOa5c0HTNTnEEnwSnKSaPdR0xfU8Mg3WfLyMDysX 43 | hxgdHuL0up7siXDuQ21mcDxONerCWg7gTm+R6KFXj2cNKZp6le5j4dOwI/V3qehX 44 | LJ+6QzkHIz0gu2qNb5LHbx5wMa+jgoBlU2B3B1ARV5qVuFmCsp3WAxEihtdbJSY5 45 | o/OGYPlhsKpSGbhJgdMSCVIQ84HmDb6CZ9+tnB1xBjGJl7AKNpAbG98zKE3SrcZt 46 | yxBcvlWclGRbynqJ/+MbY+sMKVrq0NBVzryjYd3qGrwyiiZmGjNtQh09S/4EUEkj 47 | 9WYNF1ifKuSEXPvlZcm8KAFp9XVUKNlLrsmEzv3cGmVr8nkKYB2330oGFVKzUhvF 48 | a261ki9v3E4D40Z8v+N7OdAgZfiraEJZNcAcuzTAdVa/bRhlTRComwm7/GxMgFoR 49 | BKVBHS0r1TpQdT1slVKMwC0ugp60Jk8TmvISl8yBJEWqaZ5Rqadyu7KE5e15gh5T 50 | 5zJgNRbyUaEXN81uGt81MBdCCRB8Bvq72OMy8Yg23H5fQ30VSWTgVGad47oF01s7 51 | bWjO6yJC7cDWAVd3N98KMseeT/cn3c/8MlS0raHFl/P8TWSEe7UgkfmAlJPTLrEc 52 | JYxTACPlPZlIaKX3RvOJUVXMt/Yv2o97D/0UK5sVbdJoCpoxgIB29uDMplOzeTfu 53 | //RpdnLw0bVgFgt4n/nL2+7CR78qLtHCL6WiN3ZISWTvvFLnAHvQ07P2Nsuf3oVH 54 | l6vXKrh0xC/vTAKB62pnUOzLk44YvL8B1vWpKTEbuu9ww6J1+tqgi4AKJN/ldrW+ 55 | 93UkEO+SEEp98rw63aEQX0RxlMSJB2bmujn9xXym9b6DIMSezxNjBr7hjGwcqvYA 56 | xquZRn0DtFhqIZ5e/b0myzaHEeACiUWIQmFNFV8rHKC/XA1nPHmSzZutU/hxJyco 57 | vn4GAGrjwo5ZCj1XGLo5I9UFCGMLwYwsLjgNR86bbbjZN+kGZ19Fu6jUbZCeZv+Y 58 | LTaUNPzy/yoRsACjh8AIHU+em+Rhf47dePjYuyloR/piW28p+BFhT8WN5Hcby4tU 59 | wsrqvBo8sbzBk7FGhp/44lVbW/IPuybw0m7wMb3Pr+nbhbC2ILq21IoX2HQ4Kn1A 60 | VNqkpbo4STr4u2L+AFWzRQDwHl7dJYx/kS5l1cgJY5p9Ik0+mR0xRLO6mCGQU853 61 | ZFlWJctF3KPOqQabSNZ9L+Dr0YMYVdA8x+KZLvXRI+7S86SbLMkFAB5Ts/VrAnPW 62 | mqFXUil90N21oEuIbxCauk0l4xr5fAnswrfgJdXt3cQjVHVHG3mK909ug51AaYhm 63 | 1UjgWS8+CZXV7g== 64 | =IRgV 65 | -----END PGP PUBLIC KEY BLOCK----- 66 | -------------------------------------------------------------------------------- /roles/common/files/etc-issue.in: -------------------------------------------------------------------------------- 1 | +--------------------------------------------------+ 2 | | \\ | Hostname: \n 3 | | / \\ | OS: {{OS_RELEASE}} 4 | | / \\ | Kernel: \s 5 | | \\ / \\ | Build: \v 6 | | / \\ X \\ \\ | IP Addr: {{IP_ADDR}} 7 | | / \\ / \\ / \\ ^_v___ ____ _____ _ __ | Release: ROCK {{ROCK_VERSION}} 8 | | / v/ / /| __ \\ / __ \\ / ____| |/ / | 9 | | / / | |__) | | | | | | ' / | 10 | | / / | _ /| | | | | | < | Date: \d 11 | | / / | | \\ \\| |__| | |____| . \\ | Time: \t 12 | | / / |_| \\_\\\\____/ \\_____|_|\\_\\ | Users: \U 13 | +--------------------------------------------------+ 14 | -------------------------------------------------------------------------------- /roles/common/files/nm-issue-update: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 90-issue-update 3 | # Description: Updates /etc/issue using template /etc/issue.in and 4 | # values from system at time of interface up 5 | 6 | IF=$1 7 | STATUS=$2 8 | 9 | function update_issue() { 10 | . /etc/os-release 11 | OS_RELEASE="${NAME} ${VERSION}"; 12 | IP_ADDR=$(ip route get 255.255.255.255 | awk '{print $6 " ("$4")"; exit}') 13 | ROCK_VERSION=$(cat /etc/rocknsm/rock-version) 14 | cat /etc/issue.in | sed "s/{{OS_RELEASE}}/${OS_RELEASE}/;s/{{IP_ADDR}}/${IP_ADDR}/;s/{{ROCK_VERSION}}/${ROCK_VERSION}/" > /etc/issue 15 | 16 | # Reset gettys where a user is not logged in 17 | GETTYS=$(systemctl list-units | grep "getty@" | grep -vE "$(who | awk '{ print $2 }'|paste -sd'|' )" | awk '{print $1}'|paste -s) 18 | systemctl restart $(GETTYS) 19 | } 20 | 21 | if [ "$IF" != "lo" ] 22 | then 23 | case "$2" in 24 | up) 25 | logger -s "NM Script up triggered" 26 | update_issue 27 | ;; 28 | down) 29 | logger -s "NM Script down triggered" 30 | update_issue 31 | ;; 32 | *) 33 | ;; 34 | esac 35 | fi 36 | -------------------------------------------------------------------------------- /roles/common/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Restart sshd 3 | service: 4 | name: sshd 5 | state: restarted 6 | ... 7 | -------------------------------------------------------------------------------- /roles/common/tasks/configure-pipelining.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # Remove requiretty to make ssh pipelining work 3 | - name: Remove require tty 4 | lineinfile: 5 | regexp: '^\w+\s+requiretty' 6 | dest: /etc/sudoers 7 | state: absent 8 | -------------------------------------------------------------------------------- /roles/common/tasks/configure-time.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # timedatectl.yml - configure ntp 3 | - name: Install chrony 4 | yum: 5 | name: chrony 6 | state: installed 7 | 8 | - name: Enable and start chrony 9 | service: 10 | name: chronyd 11 | enabled: true 12 | state: started 13 | 14 | - name: Set system timezone 15 | command: /usr/bin/timedatectl set-timezone UTC 16 | when: ansible_date_time.tz != "UTC" 17 | 18 | - name: Check if RTC set to UTC 19 | shell: | 20 | set -o pipefail 21 | timedatectl | awk '/RTC in local/ { print $5 }' 22 | changed_when: false 23 | register: chrony_local_utc 24 | 25 | - name: Set system hardware clock to UTC 26 | command: | 27 | set -o pipefail 28 | /usr/bin/timedatectl set-local-rtc no 29 | when: chrony_local_utc == 'yes' 30 | 31 | - name: Check if NTP is enabled 32 | shell: | 33 | set -o pipefail 34 | timedatectl | awk '/NTP enabled/ { print $3 }' 35 | changed_when: false 36 | register: chrony_ntp_enabled 37 | 38 | - name: Set NTP enabled 39 | command: /usr/bin/timedatectl set-ntp yes 40 | when: chrony_ntp_enabled == 'no' 41 | -------------------------------------------------------------------------------- /roles/common/tasks/deploy.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # deploy.yml - Common tasks for ROCK 3 | - import_tasks: gather-facts.yml 4 | - import_tasks: configure.yml 5 | - import_tasks: configure-time.yml 6 | - import_tasks: configure-pipelining.yml 7 | ... 8 | -------------------------------------------------------------------------------- /roles/common/tasks/gather-facts.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # Set local system-specific facts 3 | - name: Gather local service facts specific to each host 4 | set_fact: 5 | local_services: "{{ rock_services | map(attribute='name') | list | intersect(group_names) }}" 6 | enabled_services: "{{ rock_services | rejectattr('enabled', 'equalto', False) | map(attribute='name') | list | intersect(group_names) }}" 7 | installed_services: "{{ rock_services | rejectattr('installed', 'equalto', False) | map(attribute='name') | list | intersect(group_names) }}" 8 | ... 9 | -------------------------------------------------------------------------------- /roles/common/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - include_tasks: "{{ method }}.yml" 3 | ... 4 | -------------------------------------------------------------------------------- /roles/docket/README.md: -------------------------------------------------------------------------------- 1 | rocknsm.docket 2 | ========= 3 | 4 | This role installs and configures Docket 5 | 6 | Requirements 7 | ------------ 8 | 9 | pyOpenSSL 10 | 11 | Role Variables 12 | -------------- 13 | 14 | A description of the settable variables for this role should go here, including any variables that are in defaults/main.yml, vars/main.yml, and any variables that can/should be set via parameters to the role. Any variables that are read from other roles and/or the global scope (ie. hostvars, group vars, etc.) should be mentioned here as well. 15 | 16 | Dependencies 17 | ------------ 18 | 19 | A list of other roles hosted on Galaxy should go here, plus any details in regards to parameters that may need to be set for other roles, or variables that are used from other roles. 20 | 21 | Example Playbook 22 | ---------------- 23 | 24 | Including an example of how to use your role (for instance, with variables passed in as parameters) is always nice for users too: 25 | 26 | - hosts: servers 27 | roles: 28 | - { role: username.rolename, x: 42 } 29 | 30 | License 31 | ------- 32 | 33 | BSD 34 | 35 | Author Information 36 | ------------------ 37 | 38 | An optional section for the role authors to include contact information, or a website (HTML is not allowed). 39 | -------------------------------------------------------------------------------- /roles/docket/Vagrantfile: -------------------------------------------------------------------------------- 1 | # -*- mode: ruby -*- 2 | # vi: set ft=ruby : 3 | 4 | $script = < "virbr0", 26 | :mode => "bridge", 27 | :type => "bridge" 28 | end 29 | 30 | config.vm.synced_folder '../', '/vagrant', type: 'rsync' 31 | 32 | config.vm.provision "shell" do |s| 33 | s.inline = $script 34 | s.keep_color = true 35 | end 36 | end 37 | -------------------------------------------------------------------------------- /roles/docket/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # defaults file for rocknsm.docket 3 | rocknsm_conf_dir: /etc/rocknsm 4 | rocknsm_conf_user: root 5 | rocknsm_conf_group: root 6 | 7 | # How are we going to install docket? 8 | # Currently supports one of: yumrepo, offline 9 | # yumrepo sets up the RockNSM repo that hosts docket 10 | # offline skips repo setup and assumes it's already configured 11 | docket_install: yumrepo 12 | docket_enable: true 13 | 14 | # Application config 15 | docket_debug: false 16 | docket_testing: false 17 | docket_secret: "{{ lookup('password', '/dev/null chars=letters,digits length=64') }}" 18 | docket_session_cookie: DOCKET_SESSION 19 | docket_sendfile: true 20 | docket_logger: docket 21 | docket_celery_url: redis://localhost:6379 22 | docket_spool_dir: /var/spool/docket 23 | docket_frontend_dir: /opt/rocknsm/docket/frontend 24 | docket_uwsgi_socket: /run/docket/docket.socket 25 | docket_no_redis: false 26 | docket_long_ago: 24h 27 | 28 | # Web server config 29 | docket_tls: true 30 | docket_user: docket 31 | docket_group: docket 32 | 33 | # An empty string defaults to all interfaces on IPv4 34 | docket_listen_ip: "0.0.0.0" 35 | docket_listen_port: "{{ 8443 if docket_tls else 8080 }}" 36 | docket_host: "{{ hostvars[groups['docket'][0]]['ansible_hostname'] }}" 37 | 38 | docket_web_server: lighttpd 39 | docket_web_pemfile: "/etc/pki/tls/private/lighttpd_docket.pem" 40 | docket_web_dhparams: "/etc/pki/tls/misc/lighttpd_dh.pem" 41 | docket_web_server_name: "{{ ansible_fqdn }}" 42 | docket_web_user: lighttpd 43 | docket_url_apppath: /app/docket 44 | docket_url_resultspath: /results 45 | docket_url_pattern: "(^/results/|^/app/docket/)" 46 | 47 | # Vars to generate keys/certs 48 | docket_x509_dir: /etc/pki/docket 49 | docket_x509_key: "{{ docket_x509_dir }}/docket_{{ hostvars[groups['docket'][0]]['ansible_default_ipv4']['address'] }}_key.pem" 50 | docket_x509_cn: "{{ docket_host }}-docket" 51 | docket_x509_o: Stenographer 52 | docket_x509_c: XX 53 | docket_x509_user: root 54 | docket_x509_group: docket 55 | 56 | # These should be overridden by host-specific vars 57 | steno_host: "{{ hostvars[groups['stenographer'][0]]['ansible_default_ipv4']['address'] }}" 58 | steno_sensor: "{{ hostvars[groups['stenographer'][0]]['ansible_hostname'] }}" 59 | steno_port: 1234 60 | steno_certs_dir: /etc/stenographer/certs 61 | steno_ca_cert: "{{ steno_certs_dir }}/ca_cert.pem" 62 | steno_ca_key: "{{ steno_certs_dir }}/ca_key.pem" 63 | 64 | # This is used to generate the config for docket on 65 | # where to connect and how to authenticate 66 | docket_steno_instances: 67 | - { host: "{{ steno_host }}", sensor: "{{ steno_sensor }}", port: "{{ steno_port }}", key: "{{ docket_x509_key }}", cert: "{{ docket_x509_dir }}/docket-{{ docket_host }}_sensor-{{ steno_sensor }}_cert.pem", ca: "{{ docket_x509_dir }}/{{ steno_sensor }}_ca_cert.pem" } 68 | -------------------------------------------------------------------------------- /roles/docket/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # handlers file for rocknsm.docket 3 | - name: Cleanup csr on docket host 4 | file: 5 | path: "{{ docket_x509_key }}.csr" 6 | state: absent 7 | when: inventory_hostname in groups['docket'] | bool 8 | 9 | - name: Cleanup csr on sensor hosts 10 | file: 11 | path: "{{ steno_certs_dir }}/{{ hostvars[item].inventory_hostname }}.csr" 12 | state: absent 13 | loop: "{{ groups['docket'] }}" 14 | when: inventory_hostname in groups['stenographer'] | bool 15 | 16 | - name: Restart redis 17 | service: 18 | name: redis 19 | state: restarted 20 | when: "'docket' in enabled_services" 21 | 22 | - name: Seed random key 23 | lineinfile: 24 | path: /etc/docket/prod.yml 25 | regexp: 'XX_NOT_A_SECRET_XX' 26 | line: "SECRET_KEY: {{ docket_secret }}" 27 | state: present 28 | 29 | - name: Restart docket celery services 30 | service: 31 | name: "{{ item }}" 32 | state: restarted 33 | loop: 34 | - docket-celery-io 35 | - docket-celery-query 36 | when: "'docket' in enabled_services" 37 | 38 | - name: Restart docket uwsgi 39 | service: 40 | name: docket 41 | state: restarted 42 | when: "'docket' in enabled_services" 43 | 44 | - name: Restart lighttpd 45 | service: 46 | name: lighttpd 47 | state: restarted 48 | when: "'docket' in enabled_services" 49 | -------------------------------------------------------------------------------- /roles/docket/playbook.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: docket:stenographer 3 | become: true 4 | vars: 5 | docket_webserver_type: 'lighttpd' 6 | roles: 7 | - role: ansible-ntp 8 | - role: ansible-timezone 9 | - role: ansible-lighttpd 10 | when: docket_web_server == "lighttpd" 11 | - role: ansible-nginx 12 | when: docket_web_server == "nginx" 13 | -------------------------------------------------------------------------------- /roles/docket/tasks/docket_config.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Check existing secret_key 3 | shell: awk '/^SECRET_KEY/ {print $2}' /etc/docket/prod.yaml 4 | register: docket_prod 5 | changed_when: false 6 | 7 | - debug: msg="{{ docket_prod }}" 8 | 9 | - name: Keep existing secret_key 10 | set_fact: docket_secret="{{ docket_prod.stdout }}" 11 | when: '"CHANGE_THIS" not in docket_prod.stdout' 12 | 13 | - name: Set production docket config 14 | template: 15 | src: docket_prod.yaml.j2 16 | dest: /etc/docket/prod.yaml 17 | notify: 18 | - Restart docket uwsgi 19 | - Restart docket celery services 20 | 21 | - name: Set uwsgi config 22 | template: 23 | src: docket-uwsgi.ini.j2 24 | dest: /etc/docket/docket-uwsgi.ini 25 | notify: 26 | - Restart docket uwsgi 27 | 28 | - name: Enable redis 29 | service: 30 | name: redis 31 | enabled: true 32 | notify: Restart redis 33 | when: "'docket' in enabled_services" 34 | 35 | - name: Enable docket celery services 36 | service: 37 | name: "{{ item }}" 38 | enabled: "{{ 'docket' in enabled_services }}" 39 | notify: Restart docket celery services 40 | loop: 41 | - docket-celery-io 42 | - docket-celery-query 43 | 44 | - name: Enable docket uwsgi service 45 | service: 46 | name: docket 47 | enabled: "{{ 'docket' in enabled_services }}" 48 | notify: Restart docket uwsgi 49 | -------------------------------------------------------------------------------- /roles/docket/tasks/install.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Configure RockNSM online repos 3 | yum_repository: 4 | file: rocknsm 5 | name: "{{ item.name }}" 6 | enabled: "{{ rock_online_install }}" 7 | description: "{{ item.name }}" 8 | baseurl: "{{ item.baseurl }}" 9 | repo_gpgcheck: 1 10 | gpgcheck: "{{ item.gpgcheck }}" 11 | gpgkey: 12 | - file:///etc/pki/rpm-gpg/RPM-GPG-KEY-RockNSM-pkgcloud-2_5 13 | - file:///etc/pki/rpm-gpg/RPM-GPG-KEY-RockNSM-2 14 | sslverify: 1 15 | sslcacert: /etc/pki/tls/certs/ca-bundle.crt 16 | metadata_expire: 300 17 | cost: 750 18 | state: present 19 | loop: 20 | - { name: "rocknsm_2_5", gpgcheck: true, baseurl: "{{ rocknsm_baseurl }}" } 21 | - { name: "rocknsm_2_5-source", gpgcheck: false, baseurl: "{{ rocknsm_srpm_baseurl }}" } 22 | when: docket_install == 'yumrepo' 23 | 24 | - name: Install packages 25 | yum: 26 | name: 27 | - docket 28 | - lighttpd 29 | state: present 30 | -------------------------------------------------------------------------------- /roles/docket/tasks/lighttpd.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # - name: docket | configure lighttpd + uwsgi 3 | # template: 4 | # src: docket_lighttpd_scgi.conf.j2 5 | # dest: /etc/lighttpd/conf.d/docket_scgi.conf 6 | # 7 | # - name: docket | configure lighttpd TLS listener 8 | # template: 9 | # src: docket_lighttpd_vhost.conf.j2 10 | # dest: /etc/lighttpd/vhosts.d/docket.conf 11 | # notify: docket | restart lighttpd 12 | 13 | - name: Create lighttpd + uwsgi config 14 | template: 15 | src: lighttpd-30-docket.conf.j2 16 | dest: /etc/lighttpd/vhosts.d/30-docket.conf 17 | notify: Restart lighttpd 18 | 19 | - name: Create vhost logdir 20 | file: 21 | state: directory 22 | path: "/var/log/lighttpd/{{ docket_web_server_name }}/" 23 | owner: lighttpd 24 | group: lighttpd 25 | mode: 0755 26 | 27 | - name: Enable lighttpd vhosts 28 | lineinfile: 29 | path: /etc/lighttpd/lighttpd.conf 30 | regexp: '^#?\s*include.*vhosts\.d/.*$' 31 | line: include "/etc/lighttpd/vhosts.d/*.conf" 32 | notify: Restart lighttpd 33 | 34 | - name: Add lighttpd into docket group 35 | user: 36 | name: lighttpd 37 | append: true 38 | groups: "{{ docket_group }}" 39 | notify: Restart lighttpd 40 | 41 | - name: Enable lighttpd service 42 | service: 43 | name: lighttpd 44 | enabled: "{{ 'docket' in enabled_services }}" 45 | notify: Restart lighttpd 46 | -------------------------------------------------------------------------------- /roles/docket/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # tasks file for rocknsm.docket 3 | 4 | 5 | # Ensure `stenographer` and `nginx` groups exists 6 | - import_tasks: prereqs.yml 7 | 8 | # Install packages 9 | - import_tasks: install.yml 10 | when: inventory_hostname in groups['docket'] 11 | 12 | # Generate/copy x509 client cert/keys and CA certs 13 | - import_tasks: crypto.yml 14 | 15 | # Configure docket app settings 16 | - import_tasks: docket_config.yml 17 | when: inventory_hostname in groups['docket'] 18 | 19 | # Configure web server settings 20 | - import_tasks: lighttpd.yml 21 | when: inventory_hostname in groups['docket'] 22 | 23 | # Enable / Activate Services 24 | -------------------------------------------------------------------------------- /roles/docket/tasks/prereqs.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # prepreqs checks for rocknsm.docket 3 | 4 | # Validate hosts exist in stenographer group 5 | - name: Check for docket and stenographer hosts 6 | assert: 7 | that: 8 | - "{{ ('docket' in groups) and (groups['docket'] | length) > 0 }}" 9 | - "{{ ('stenographer' in groups) and (groups['stenographer'] | length) > 0 }}" 10 | msg: "The [docket] and [stenographer] inventory groups must each have at least one host." 11 | -------------------------------------------------------------------------------- /roles/docket/templates/docket-uwsgi.ini.j2: -------------------------------------------------------------------------------- 1 | # file: /etc/docket/docket-uwsgi.ini 2 | [uwsgi] 3 | module = wsgi:application 4 | master = true 5 | processes = 5 6 | pcre-jit = true 7 | plugins = python 8 | pythonpath = /opt/rocknsm/docket/docket 9 | die-on-term = true 10 | 11 | # Static config 12 | static-map = {{ docket_url_apppath }}/={{ docket_frontend_dir }} 13 | static-map = {{ docket_url_resultspath }}={{ docket_spool_dir }} 14 | static-index = index.html 15 | file-serve-mode = x-sendfile 16 | -------------------------------------------------------------------------------- /roles/docket/templates/docket_lighttpd_scgi.conf.j2: -------------------------------------------------------------------------------- 1 | ####################################################################### 2 | # file: /etc/lighttpd/conf.d/docket_scgi.conf 3 | # {{ ansible_managed }} 4 | 5 | server.modules += ( "mod_scgi" ) 6 | scgi.protocol = "uwsgi" 7 | scgi.server = ( 8 | "/" => (( 9 | # Ensure lighttpd doesn't try to validate the URL 10 | "check-local" => "disable", 11 | 12 | # Needed to pass through leading "/" via uwsgi protocol 13 | "fix-root-scriptname" => "enable", 14 | 15 | # Use x-sendfile for faster file transfers from uwsgi 16 | "x-sendfile" => "enable", 17 | "x-sendfile-docroot" => ( 18 | # Set this to SPOOL_DIR 19 | "{{docket_spool_dir}}", 20 | # Set this to the path of the compiled frontend 21 | "{{docket_frontend_dir}}" 22 | ), 23 | 24 | # Use the unix domain socket for local communication 25 | "socket" => "/run/docket/docket.socket", 26 | )), 27 | ) 28 | -------------------------------------------------------------------------------- /roles/docket/templates/docket_lighttpd_vhost.conf.j2: -------------------------------------------------------------------------------- 1 | ####################################################################### 2 | ## Docket virtual host config 3 | ## file: /etc/lighttpd/vhosts.d/docket.conf 4 | ## 5 | ## {{ ansible_managed }} 6 | ## 7 | 8 | $SERVER["socket"] == "{{docket_listen_ip}}:{{docket_listen_port}}" { 9 | var.server_name = "{{ docket_web_server_name }}" 10 | server.name = server_name 11 | accesslog.filename = log_root + "/" + server.name + "/access.log" 12 | 13 | ssl.engine = "{{ 'enable' if docket_tls == true else 'disable' }}" 14 | ssl.pemfile = "{{ docket_web_pemfile }}" 15 | ssl.dh-file = "{{ docket_web_dhparams }}" 16 | 17 | include "conf.d/docket_scgi.conf" 18 | } 19 | 20 | ## 21 | ####################################################################### 22 | -------------------------------------------------------------------------------- /roles/docket/templates/lighttpd-30-docket.conf.j2: -------------------------------------------------------------------------------- 1 | ####################################################################### 2 | # Docket application config. Mounts docket at sub-path of root 3 | # file: /etc/lighttpd/vhosts.d/30-docket.conf 4 | # 5 | # {{ ansible_managed }} 6 | # 7 | 8 | server.modules += ( "mod_scgi" ) 9 | server.modules += ("mod_rewrite") 10 | 11 | scgi.debug = 1 12 | 13 | $HTTP["url"] =~ "{{ docket_url_pattern }}" { 14 | 15 | setenv.add-environment = ( "AUTH_TYPE" => "Basic" ) # "Basic" or "Digest" 16 | scgi.protocol = "uwsgi" 17 | scgi.server = ( 18 | "/" => (( 19 | # Ensure lighttpd doesn't try to validate the URL 20 | "check-local" => "disable", 21 | 22 | # Needed to pass through leading "/" via uwsgi protocol 23 | "fix-root-scriptname" => "enable", 24 | 25 | # Use x-sendfile for faster file transfers from uwsgi 26 | "x-sendfile" => "enable", 27 | "x-sendfile-docroot" => ( 28 | # Set this to the path of the compiled frontend 29 | "{{ docket_frontend_dir }}", 30 | # Set this to SPOOL_DIR 31 | "{{ docket_spool_dir }}", 32 | ), 33 | # Use the unix domain socket for local communication 34 | "socket" => "{{ docket_uwsgi_socket }}", 35 | )), 36 | ) 37 | } 38 | -------------------------------------------------------------------------------- /roles/docket/test.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | ## 3 | ## Copyright (c) 2017, 2018 RockNSM. 4 | ## 5 | ## This file is part of RockNSM 6 | ## (see http://rocknsm.io). 7 | ## 8 | ## Licensed under the Apache License, Version 2.0 (the "License"); 9 | ## you may not use this file except in compliance with the License. 10 | ## You may obtain a copy of the License at 11 | ## 12 | ## http://www.apache.org/licenses/LICENSE-2.0 13 | ## 14 | ## Unless required by applicable law or agreed to in writing, 15 | ## software distributed under the License is distributed on an 16 | ## "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY 17 | ## KIND, either express or implied. See the License for the 18 | ## specific language governing permissions and limitations 19 | ## under the License. 20 | ## 21 | ## 22 | # 23 | # Ansible role test shim. 24 | # 25 | # Usage: [OPTIONS] ./tests/test.sh 26 | # - playbook: a playbook in the tests directory (default = "test.yml") 27 | # - test_idempotence: whether to test playbook's idempotence (default = true) 28 | # 29 | # License: MIT 30 | 31 | # Exit on any individual command failure. 32 | set -e 33 | 34 | # Pretty colors. 35 | red='\033[0;31m' 36 | green='\033[0;32m' 37 | neutral='\033[0m' 38 | 39 | timestamp=$(date +%s) 40 | 41 | # Allow environment variables to override defaults. 42 | playbook=${playbook:-"test.yml"} 43 | test_idempotence=${test_idempotence:-"true"} 44 | 45 | export ANSIBLE_ROLES_PATH=$(pwd)/../ 46 | 47 | # Install requirements if `requirements.yml` is present. 48 | if [ -f "$PWD/tests/requirements.yml" ]; then 49 | printf ${green}"Requirements file detected; installing dependencies."${neutral}"\n" 50 | TERM=xterm ansible-galaxy install -r tests/requirements.yml 51 | fi 52 | 53 | printf "\n" 54 | 55 | # Test Ansible syntax. 56 | printf ${green}"Checking Ansible playbook syntax."${neutral} 57 | TERM=xterm ansible-playbook tests/$playbook --syntax-check 58 | 59 | printf "\n" 60 | 61 | # Run Ansible playbook. 62 | printf ${green}"Running command: TERM=xterm ansible-playbook tests/$playbook"${neutral} 63 | TERM=xterm ANSIBLE_FORCE_COLOR=1 ansible-playbook --become --inventory tests/inventory tests/$playbook 64 | 65 | if [ "$test_idempotence" = true ]; then 66 | # Run Ansible playbook again (idempotence test). 67 | printf ${green}"Running playbook again: idempotence test"${neutral} 68 | idempotence=$(mktemp) 69 | ansible-playbook --become --inventory tests/inventory tests/$playbook | tee -a $idempotence 70 | tail $idempotence \ 71 | | grep -q 'changed=0.*failed=0' \ 72 | && (printf ${green}'Idempotence test: pass'${neutral}"\n") \ 73 | || (printf ${red}'Idempotence test: fail'${neutral}"\n" && exit 1) 74 | fi 75 | -------------------------------------------------------------------------------- /roles/docket/tests/inventory: -------------------------------------------------------------------------------- 1 | docket01 ansible_connection=local 2 | 3 | [docket] 4 | docket01 5 | 6 | [stenographer] 7 | docket01 8 | -------------------------------------------------------------------------------- /roles/docket/tests/test.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: 3 | - docket 4 | - stenographer 5 | remote_user: root 6 | roles: 7 | - rocknsm.docket 8 | -------------------------------------------------------------------------------- /roles/docket/vars/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # vars file for rocknsm.docket 3 | -------------------------------------------------------------------------------- /roles/elasticsearch/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | es_user: elasticsearch 3 | es_group: elasticsearch 4 | es_data_dir: "{{ rock_data_dir }}/elasticsearch" 5 | es_cluster_name: rocknsm 6 | es_node_name: "{{ inventory_hostname }}" 7 | es_network_host: >- 8 | {%- if (groups['elasticsearch']|union(groups['logstash'])|union(groups['kibana']))| count > 1 -%} 9 | _site:ipv4_ 10 | {%- else -%} 11 | _local:ipv4_ 12 | {%- endif -%} 13 | es_interface: >- 14 | {%- if (groups['elasticsearch']|union(groups['logstash'])|union(groups['kibana']))| count > 1 -%} 15 | {{ inventory_hostname }} 16 | {%- else -%} 17 | localhost 18 | {%- endif -%} 19 | es_action_auto_create_index: true 20 | es_min_master_nodes: "{{ 2 if ( groups['es_masters'] | length ) == 3 else 1 }}" 21 | es_mem: "{{ (ansible_memtotal_mb // 1024 // 2) if (ansible_memtotal_mb // 1024) < 64 else 31 }}" 22 | es_url: "http://127.0.0.1:9200" 23 | es_log_dir: /var/log/elasticsearch 24 | es_memlock_override: | 25 | [Service] 26 | LimitMEMLOCK=infinity 27 | -------------------------------------------------------------------------------- /roles/elasticsearch/files/default-mapping.json: -------------------------------------------------------------------------------- 1 | { 2 | "template" : "*", 3 | "order" : 0, 4 | "settings" : { 5 | "number_of_shards" : "1", 6 | "number_of_replicas" : "0" 7 | } 8 | } 9 | -------------------------------------------------------------------------------- /roles/elasticsearch/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Reload systemd 3 | systemd: 4 | daemon_reload: true 5 | 6 | - name: Restart elasticsearch 7 | debug: msg="Restarting Elasticsearch Sequence" 8 | notify: 9 | - Disable cluster shard allocation 10 | 11 | - name: Disable cluster shard allocation 12 | uri: 13 | url: "http://{{ ansible_hostname }}:9200/_cluster/settings" 14 | body: '{"transient": {"cluster.routing.allocation.enable":"none" }}' 15 | body_format: json 16 | timeout: 2 17 | method: PUT 18 | register: result 19 | until: result.json.acknowledged is defined 20 | retries: 300 21 | delay: 3 22 | changed_when: result.json.acknowledged | bool 23 | notify: 24 | - do restart elasticsearch 25 | 26 | - name: do restart elasticsearch 27 | service: 28 | name: elasticsearch 29 | state: restarted 30 | notify: 31 | - wait node online 32 | 33 | - name: wait node online 34 | uri: 35 | url: "http://{{ ansible_hostname }}:9200/_nodes/{{ ansible_hostname }}/name" 36 | timeout: 2 37 | register: result 38 | until: result.json._nodes.total == 1 39 | retries: 200 40 | delay: 3 41 | notify: 42 | - Enable cluster shard allocation 43 | 44 | - name: Enable cluster shard allocation 45 | uri: 46 | url: "http://{{ ansible_hostname }}:9200/_cluster/settings" 47 | body: '{"transient": {"cluster.routing.allocation.enable":"all" }}' 48 | body_format: json 49 | timeout: 2 50 | method: PUT 51 | register: result 52 | until: result.json.acknowledged is defined 53 | retries: 300 54 | delay: 3 55 | changed_when: result.json.acknowledged | bool 56 | notify: 57 | - Wait until cluster green 58 | 59 | - name: Wait until cluster green 60 | uri: 61 | url: "http://{{ ansible_hostname }}:9200/_cluster/health" 62 | timeout: 2 63 | register: result 64 | until: result.json.status == "green" 65 | retries: 300 66 | delay: 3 67 | -------------------------------------------------------------------------------- /roles/elasticsearch/tasks/after.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Make sure cluster is green 3 | uri: 4 | method: "GET" 5 | url: "http://{{ es_interface }}:9200/_cluster/health" 6 | return_content: true 7 | timeout: 2 8 | register: result 9 | until: result.json is defined and result.json.status == "green" 10 | retries: 300 11 | delay: 3 12 | run_once: true 13 | 14 | - name: "Fail if Elasticsearch is RED" 15 | fail: 16 | msg: "Elasticsearch cluster has a red status" 17 | when: result.json.status == "red" 18 | 19 | - name: Check for default mapping template 20 | uri: 21 | method: "GET" 22 | url: "{{ es_url }}/_template/default" 23 | status_code: [200, 404] 24 | return_content: true 25 | timeout: 2 26 | register: default_index_template 27 | run_once: true 28 | 29 | - name: Load default elasticsearch mapping template 30 | uri: 31 | method: PUT 32 | url: "{{ es_url }}/_template/default" 33 | body: "{{ lookup('file', 'default-mapping.json') }}" 34 | body_format: json 35 | when: "'elasticsearch' in installed_services and default_index_template.status != 200" 36 | run_once: true 37 | 38 | - name: Blanket install/update elasticsearch mappings 39 | command: ./import-index-templates.sh "{{ es_url }}" 40 | args: 41 | chdir: "{{ rock_module_dir }}/ecs-configuration/elasticsearch" 42 | register: result 43 | changed_when: 'result.stdout.find("Changed: 0") != -1' 44 | run_once: true 45 | tags: 46 | - molecule-idempotence-notest 47 | ... 48 | -------------------------------------------------------------------------------- /roles/elasticsearch/tasks/before.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: Install packages 4 | yum: 5 | name: 6 | - elasticsearch-{{ rock_services | selectattr('name', 'equalto', 'elasticsearch') | map(attribute='version') | first }} 7 | state: installed 8 | register: es_install 9 | 10 | - name: Create elasticsearch directory 11 | file: 12 | path: "{{ es_data_dir }}" 13 | mode: 0755 14 | owner: "{{ es_user }}" 15 | group: "{{ es_group }}" 16 | state: directory 17 | 18 | - name: Setup elasticsearch config 19 | template: 20 | src: elasticsearch.yml.j2 21 | dest: /etc/elasticsearch/elasticsearch.yml 22 | owner: root 23 | group: "{{ es_group }}" 24 | mode: 0640 25 | register: es_config 26 | 27 | - name: Create elasticsearch systemd override dir 28 | file: 29 | path: /etc/systemd/system/elasticsearch.service.d 30 | owner: root 31 | group: root 32 | mode: 0755 33 | state: directory 34 | 35 | - name: Enable elasticsearch memlock in service override 36 | copy: 37 | content: "{{ es_memlock_override }}" 38 | dest: /etc/systemd/system/elasticsearch.service.d/override.conf 39 | mode: 0644 40 | owner: root 41 | group: root 42 | register: es_memlock 43 | 44 | - name: Setup elasticsearch JVM options 45 | template: 46 | src: templates/es-jvm.options.j2 47 | dest: /etc/elasticsearch/jvm.options 48 | mode: 0640 49 | owner: root 50 | group: "{{ es_group }}" 51 | register: es_jvm 52 | 53 | - name: Reload systemd 54 | systemd: 55 | daemon_reload: true 56 | when: es_memlock.changed 57 | tags: 58 | - skip_ansible_lint # [503] Tasks that run when changed should be handlers 59 | 60 | - name: Discover facts about data mount 61 | set_fact: 62 | rock_mounts: 63 | mount: "{{ item.mount }}" 64 | device: "{{ item.device }}" 65 | size_total: "{{ item.size_total }}" 66 | loop: 67 | "{{ ansible_mounts }}" 68 | when: (default_mount is defined and item.mount == default_mount and rock_mounts is not defined) 69 | 70 | - name: Determining if quotas are enabled 71 | command: > 72 | awk -v path="{{ default_mount }}" 73 | '$2 ~ path && $4 ~ /p(rj)?quota/ ' /etc/fstab 74 | register: prjquota 75 | changed_when: false 76 | 77 | # - debug: 78 | # msg: "{{prjquota}}" 79 | 80 | - name: Create elasticsearch quota project id 81 | getent: 82 | database: group 83 | split: ':' 84 | key: elasticsearch 85 | when: rock_mounts is defined and (prjquota.stdout|length>0) 86 | 87 | - name: Map elasticsearch quota project id to name 88 | lineinfile: 89 | create: true 90 | state: present 91 | insertafter: EOF 92 | path: /etc/projid 93 | line: "elasticsearch:{{ getent_group.elasticsearch[1] }}" 94 | when: rock_mounts is defined and (prjquota.stdout|length>0) 95 | 96 | - name: Define elasticsearch quota project directories 97 | lineinfile: 98 | create: true 99 | state: present 100 | insertafter: EOF 101 | path: /etc/projects 102 | line: "{{ getent_group.elasticsearch[1] }}:{{ es_data_dir }}" 103 | when: rock_mounts is defined and (prjquota.stdout|length>0) 104 | 105 | - name: set elasticsearch weight 106 | set_fact: 107 | elastic_weight: "{{ rock_services | selectattr('name', 'equalto', 'elasticsearch') | map(attribute='quota_weight') | first }}" 108 | when: rock_mounts is defined and (prjquota.stdout|length>0) 109 | 110 | - name: set elasticsearch quota if not user defined 111 | set_fact: 112 | elasticsearch_quota: "{{ rock_mounts.size_total | int / xfs_quota_weight | int * elastic_weight | int }}" 113 | when: rock_mounts is defined and (prjquota.stdout|length>0) 114 | 115 | - name: set elasticsearch project quota 116 | xfs_quota: 117 | type: project 118 | name: elasticsearch 119 | bhard: "{{ elasticsearch_quota }}" 120 | state: present 121 | mountpoint: "{{ rock_mounts.mount }}" 122 | when: rock_mounts is defined and (prjquota.stdout|length>0) 123 | 124 | - name: Enable and start elasticsearch 125 | service: 126 | name: elasticsearch 127 | state: started 128 | enabled: "{{ 'elasticsearch' in enabled_services }}" 129 | 130 | - name: Create internal firewall zone 131 | firewalld: 132 | state: present 133 | zone: internal 134 | permanent: true 135 | register: result 136 | 137 | - name: Reload firewalld to load zone 138 | service: 139 | name: firewalld 140 | state: restarted 141 | when: result.changed 142 | tags: 143 | - skip_ansible_lint # [503] Tasks that run when changed should be handlers 144 | 145 | - name: Configure firewall zone for internal elastic sources 146 | firewalld: 147 | permanent: true 148 | state: enabled 149 | immediate: true 150 | source: "{{ hostvars[item]['ansible_default_ipv4']['address'] }}" 151 | zone: work # This should be a different zone, but leaving this here for now 152 | when: (groups['elasticsearch']|union(groups['logstash'])|union(groups['kibana']))| count > 1 153 | loop: "{{ (groups['elasticsearch']|union(groups['logstash'])|union(groups['kibana'])) | list }}" 154 | 155 | - name: Configure firewall ports for zone 156 | firewalld: 157 | port: "{{ item }}/tcp" 158 | permanent: true 159 | state: enabled 160 | immediate: true 161 | zone: work # This should be a different zone, but leaving this here for now 162 | when: (groups['elasticsearch']|union(groups['logstash'])|union(groups['kibana']))| count > 1 163 | loop: 164 | - 9200 165 | - 9300 166 | 167 | - name: Determine if Elasticsearch needs to be restarted 168 | set_fact: 169 | es_restart: true 170 | when: "(es_config.changed or es_memlock.changed or es_jvm.changed) and not es_install.changed" 171 | tags: 172 | - skip_ansible_lint # [503] Tasks that run when changed should be handlers 173 | ... 174 | -------------------------------------------------------------------------------- /roles/elasticsearch/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: "Running step {{ es_step }}" 4 | include: "{{ es_step }}.yml" 5 | ... 6 | -------------------------------------------------------------------------------- /roles/elasticsearch/tasks/restart.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Disable cluster shard allocation 3 | uri: 4 | url: "http://{{ es_interface }}:9200/_cluster/settings" 5 | body: '{"transient": {"cluster.routing.allocation.enable":"none" }}' 6 | body_format: json 7 | timeout: 2 8 | method: PUT 9 | register: result 10 | until: result.json.acknowledged is defined 11 | retries: 300 12 | delay: 3 13 | changed_when: result.json.acknowledged | bool 14 | 15 | - name: Restart elasticsearch 16 | service: 17 | name: elasticsearch 18 | state: restarted 19 | 20 | - name: Wait for elasticsearch to become ready 21 | wait_for: 22 | host: "{{ es_interface }}" 23 | port: 9200 24 | 25 | - name: Make sure node has joined the cluster 26 | uri: 27 | url: "http://{{ es_interface }}:9200/_nodes/{{ es_node_name }}/name" 28 | return_content: true 29 | timeout: 5 30 | register: result 31 | until: result.json._nodes.total == 1 32 | retries: 200 33 | delay: 10 34 | 35 | - name: Enable cluster shard allocation 36 | uri: 37 | url: "http://{{ es_interface }}:9200/_cluster/settings" 38 | body: '{"transient": {"cluster.routing.allocation.enable":"all" }}' 39 | body_format: json 40 | timeout: 2 41 | method: PUT 42 | register: result 43 | until: result.json.acknowledged is defined 44 | retries: 300 45 | delay: 3 46 | changed_when: result.json.acknowledged | bool 47 | ... 48 | -------------------------------------------------------------------------------- /roles/elasticsearch/templates/elasticsearch.yml.j2: -------------------------------------------------------------------------------- 1 | {% if es_path_repo is defined %} 2 | path.repo: {{ es_path_repo }} 3 | {% endif %} 4 | 5 | cluster.name: {{ es_cluster_name }} 6 | node.name: {{ es_node_name }} 7 | path.data: {{ es_data_dir }} 8 | path.logs: {{ es_log_dir }} 9 | bootstrap.memory_lock: true 10 | network.host: {{ es_network_host }} 11 | discovery.zen.minimum_master_nodes: {{ es_min_master_nodes }} 12 | 13 | {# Check if more than 1 ES node has been configured in the inventory #} 14 | {% if groups['elasticsearch']|length > 1 %} 15 | {# ES versions prior to 7 used a list of unicasts hosts #} 16 | {% if elastic.major_version < 7 %} 17 | discovery.zen.ping.unicast.hosts: 18 | {% for host in query('inventory_hostnames', 'es_masters') | sort %} 19 | - {{ host }} 20 | {% endfor %} 21 | {% endif %} 22 | {# ES version 7 and higher use seed_hosts and a list of initial masters #} 23 | {% if elastic.major_version > 6 %} 24 | discovery.seed_hosts: 25 | {% for host in query('inventory_hostnames', 'es_masters') | sort %} 26 | - {{ host }} 27 | {% endfor %} 28 | cluster.initial_master_nodes: 29 | {% for host in query('inventory_hostnames', 'es_masters') | sort %} 30 | - {{ host }} 31 | {% endfor %} 32 | {% endif %} 33 | {# If we aren't running multiple nodes we can skip all the logic #} 34 | {# above and declare this will be a single node cluster #} 35 | {% else %} 36 | discovery.type: single-node 37 | {% endif %} 38 | 39 | action.auto_create_index: {{ es_action_auto_create_index }} 40 | action.destructive_requires_name: true 41 | 42 | # Node Roles 43 | node.master: {{ node_master }} 44 | node.data: {{ node_data }} 45 | node.ingest: {{ node_ingest }} 46 | 47 | # Enable Stack Monitoring 48 | xpack.monitoring.enabled: true 49 | xpack.monitoring.collection.enabled: true 50 | -------------------------------------------------------------------------------- /roles/elasticsearch/templates/es-jvm.options.j2: -------------------------------------------------------------------------------- 1 | ## JVM configuration 2 | ################################################################ 3 | ## IMPORTANT: JVM heap size 4 | ################################################################ 5 | ## 6 | ## You should always set the min and max JVM heap 7 | ## size to the same value. For example, to set 8 | ## the heap to 4 GB, set: 9 | ## 10 | ## -Xms4g 11 | ## -Xmx4g 12 | ## 13 | ## See https://www.elastic.co/guide/en/elasticsearch/reference/current/heap-size.html 14 | ## for more information 15 | ## 16 | ################################################################ 17 | 18 | # Xms represents the initial size of total heap space 19 | # Xmx represents the maximum size of total heap space 20 | 21 | -Xms{{ es_mem }}g 22 | -Xmx{{ es_mem }}g 23 | 24 | ################################################################ 25 | ## Expert settings 26 | ################################################################ 27 | ## 28 | ## All settings below this section are considered 29 | ## expert settings. Don't tamper with them unless 30 | ## you understand what you are doing 31 | ## 32 | ################################################################ 33 | 34 | ## GC configuration 35 | #-XX:+UseConcMarkSweepGC 36 | #-XX:CMSInitiatingOccupancyFraction=75 37 | #-XX:+UseCMSInitiatingOccupancyOnly 38 | 39 | ## G1GC Configuration 40 | # NOTE: G1GC is only supported on JDK version 10 or later. 41 | # To use G1GC uncomment the lines below. 42 | # 10-:-XX:-UseConcMarkSweepGC 43 | # 10-:-XX:-UseCMSInitiatingOccupancyOnly 44 | # 10-:-XX:+UseG1GC 45 | # 10-:-XX:InitiatingHeapOccupancyPercent=75 46 | 47 | ## DNS cache policy 48 | # cache ttl in seconds for positive DNS lookups noting that this overrides the 49 | # JDK security property networkaddress.cache.ttl; set to -1 to cache forever 50 | -Des.networkaddress.cache.ttl=60 51 | # cache ttl in seconds for negative DNS lookups noting that this overrides the 52 | # JDK security property networkaddress.cache.negative ttl; set to -1 to cache 53 | # forever 54 | -Des.networkaddress.cache.negative.ttl=10 55 | 56 | ## optimizations 57 | 58 | # pre-touch memory pages used by the JVM during initialization 59 | -XX:+AlwaysPreTouch 60 | 61 | ## basic 62 | # explicitly set the stack size 63 | -Xss1m 64 | 65 | # force the server VM 66 | -server 67 | 68 | # set to headless, just in case 69 | -Djava.awt.headless=true 70 | 71 | # ensure UTF-8 encoding by default (e.g. filenames) 72 | -Dfile.encoding=UTF-8 73 | 74 | # use our provided JNA always versus the system one 75 | -Djna.nosys=true 76 | 77 | # turn off a JDK optimization that throws away stack traces for common 78 | # exceptions because stack traces are important for debugging 79 | -XX:-OmitStackTraceInFastThrow 80 | 81 | # flags to configure Netty 82 | -Dio.netty.noUnsafe=true 83 | -Dio.netty.noKeySetOptimization=true 84 | -Dio.netty.recycler.maxCapacityPerThread=0 85 | 86 | # log4j 2 87 | -Dlog4j.shutdownHookEnabled=false 88 | -Dlog4j2.disable.jmx=true 89 | 90 | -Djava.io.tmpdir=${ES_TMPDIR} 91 | 92 | ## heap dumps 93 | 94 | # generate a heap dump when an allocation from the Java heap fails 95 | # heap dumps are created in the working directory of the JVM 96 | -XX:+HeapDumpOnOutOfMemoryError 97 | 98 | # specify an alternative path for heap dumps 99 | # ensure the directory exists and has sufficient space 100 | -XX:HeapDumpPath=/var/lib/elasticsearch 101 | 102 | # specify an alternative path for JVM fatal error logs 103 | -XX:ErrorFile=/var/log/elasticsearch/hs_err_pid%p.log 104 | 105 | ## JDK 8 GC logging 106 | 107 | 8:-XX:+PrintGCDetails 108 | 8:-XX:+PrintGCDateStamps 109 | 8:-XX:+PrintTenuringDistribution 110 | 8:-XX:+PrintGCApplicationStoppedTime 111 | 8:-Xloggc:/var/log/elasticsearch/gc.log 112 | 8:-XX:+UseGCLogFileRotation 113 | 8:-XX:NumberOfGCLogFiles=32 114 | 8:-XX:GCLogFileSize=64m 115 | 116 | # JDK 9+ GC logging 117 | 9-:-Xlog:gc*,gc+age=trace,safepoint:file=/var/log/elasticsearch/gc.log:utctime,pid,tags:filecount=32,filesize=64m 118 | # due to internationalization enhancements in JDK 9 Elasticsearch need to set the provider to COMPAT otherwise 119 | # time/date parsing will break in an incompatible way for some date patterns and locals 120 | 9-:-Djava.locale.providers=COMPAT 121 | -------------------------------------------------------------------------------- /roles/filebeat/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | # defaults file for filebeat 4 | -------------------------------------------------------------------------------- /roles/filebeat/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: Restart filebeat 4 | systemd: 5 | name: filebeat 6 | state: restarted 7 | -------------------------------------------------------------------------------- /roles/filebeat/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: Install filebeat package 4 | yum: 5 | name: filebeat-{{ rock_services | selectattr('name', 'equalto', 'filebeat') | map(attribute='version') | first }} 6 | state: present 7 | 8 | - name: Create filebeat config directory 9 | file: 10 | path: /etc/filebeat/configs 11 | mode: 0755 12 | owner: root 13 | group: root 14 | state: directory 15 | 16 | - name: Configure filebeat 17 | template: 18 | src: filebeat.yml.j2 19 | dest: /etc/filebeat/filebeat.yml 20 | notify: Restart filebeat 21 | 22 | - name: Add filebeat configs 23 | template: 24 | src: "{{ item.src }}" 25 | dest: "/etc/filebeat/configs/{{ item.dest }}" 26 | notify: Restart filebeat 27 | when: filebeat_configs is defined 28 | with_items: "{{ filebeat_configs }}" 29 | 30 | - name: Enable and start filebeat 31 | service: 32 | name: filebeat 33 | state: "{{ 'started' if 'filebeat' in enabled_services else 'stopped' }}" 34 | enabled: "{{ 'filebeat' in enabled_services }}" 35 | -------------------------------------------------------------------------------- /roles/kafka/README.md: -------------------------------------------------------------------------------- 1 | Role Name 2 | ========= 3 | 4 | A brief description of the role goes here. 5 | 6 | Requirements 7 | ------------ 8 | 9 | Any pre-requisites that may not be covered by Ansible itself or the role should be mentioned here. For instance, if the role uses the EC2 module, it may be a good idea to mention in this section that the boto package is required. 10 | 11 | Role Variables 12 | -------------- 13 | 14 | A description of the settable variables for this role should go here, including any variables that are in defaults/main.yml, vars/main.yml, and any variables that can/should be set via parameters to the role. Any variables that are read from other roles and/or the global scope (ie. hostvars, group vars, etc.) should be mentioned here as well. 15 | 16 | Dependencies 17 | ------------ 18 | 19 | A list of other roles hosted on Galaxy should go here, plus any details in regards to parameters that may need to be set for other roles, or variables that are used from other roles. 20 | 21 | Example Playbook 22 | ---------------- 23 | 24 | Including an example of how to use your role (for instance, with variables passed in as parameters) is always nice for users too: 25 | 26 | - hosts: servers 27 | roles: 28 | - { role: username.rolename, x: 42 } 29 | 30 | License 31 | ------- 32 | 33 | BSD 34 | 35 | Author Information 36 | ------------------ 37 | 38 | An optional section for the role authors to include contact information, or a website (HTML is not allowed). 39 | -------------------------------------------------------------------------------- /roles/kafka/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # defaults file for kafka 3 | kafka_zookeeper_host: 127.0.0.1 4 | kafka_zookeeper_port: 2181 5 | kafka_zookeeper_chroot: "" 6 | -------------------------------------------------------------------------------- /roles/kafka/files/wait-for-zookeeper.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python 2 | 3 | import socket 4 | import os 5 | import sys 6 | import logging 7 | 8 | # Byte conversion utility for compatibility between 9 | # Python 2 and 3. 10 | # http://python3porting.com/problems.html#nicer-solutions 11 | if sys.version_info < (3,): 12 | def _b(x): 13 | return x 14 | else: 15 | import codecs 16 | def _b(x): 17 | return codecs.latin_1_encode(x)[0] 18 | 19 | 20 | class SystemdNotifier: 21 | """This class holds a connection to the systemd notification socket 22 | and can be used to send messages to systemd using its notify method.""" 23 | 24 | def __init__(self, debug=False): 25 | """Instantiate a new notifier object. This will initiate a connection 26 | to the systemd notification socket. 27 | Normally this method silently ignores exceptions (for example, if the 28 | systemd notification socket is not available) to allow applications to 29 | function on non-systemd based systems. However, setting debug=True will 30 | cause this method to raise any exceptions generated to the caller, to 31 | aid in debugging. 32 | """ 33 | self.debug = debug 34 | try: 35 | self.socket = socket.socket(socket.AF_UNIX, socket.SOCK_DGRAM) 36 | addr = os.getenv('NOTIFY_SOCKET') 37 | if addr[0] == '@': 38 | addr = '\0' + addr[1:] 39 | self.socket.connect(addr) 40 | except: 41 | self.socket = None 42 | if self.debug: 43 | raise 44 | 45 | def notify(self, state): 46 | """Send a notification to systemd. state is a string; see 47 | the man page of sd_notify (http://www.freedesktop.org/software/systemd/man/sd_notify.html) 48 | for a description of the allowable values. 49 | Normally this method silently ignores exceptions (for example, if the 50 | systemd notification socket is not available) to allow applications to 51 | function on non-systemd based systems. However, setting debug=True will 52 | cause this method to raise any exceptions generated to the caller, to 53 | aid in debugging.""" 54 | try: 55 | self.socket.sendall(_b(state)) 56 | except: 57 | if self.debug: 58 | raise 59 | 60 | 61 | def main(argv): 62 | import getopt 63 | timeout = 30 64 | try: 65 | opts, args = getopt.getopt(argv[1:],"t",["timeout="]) 66 | except getopt.GetoptError: 67 | print ('{} [-t|--timeout 10] zookeeper:2181'.format(argv[0]) ) 68 | sys.exit(2) 69 | for opt, arg in opts: 70 | if opt in ("-t", "--timeout"): 71 | timeout = arg 72 | logging.info("Setting timeout to {} seconds".format(timeout)) 73 | 74 | if len(args) == 0: 75 | print ('ERROR: Zookeeper host and port are required.') 76 | sys.exit(3) 77 | 78 | 79 | host, separator, port = args[0].rpartition(':') 80 | 81 | assert separator # separator (`:`) must be present 82 | port = int(port) # convert to integer 83 | 84 | n = SystemdNotifier() 85 | n.notify("STATUS=Connecting to Zookeeper at {}:{}".format(host, port)) 86 | zk_sock = None 87 | while True: 88 | try: 89 | zk_sock = socket.create_connection((host, port), timeout) 90 | break 91 | except socket.error: 92 | logging.error("Connection refused. Trying again...") 93 | except socket.timeout: 94 | logging.error("Timeout expired. Trying again...") 95 | 96 | # We're connected. Let's ask zookeeper if it's ready 97 | zk_sock.send("ruok\n") 98 | zk_resp = zk_sock.recv(1024) 99 | if zk_resp != "imok": 100 | logging.error("Zookeeper up but not healthy: {}".format(zk_resp)) 101 | # 121 == EREMOTEIO 102 | n.notify("ERRNO=121") 103 | exit(121) 104 | 105 | n.notify("STATUS=Zookeeper is ready at {}:{}!".format(host, port)) 106 | n.notify("READY=1") 107 | 108 | exit(0) 109 | 110 | if __name__ == '__main__': 111 | import sys 112 | main(sys.argv) 113 | -------------------------------------------------------------------------------- /roles/kafka/files/wait-for-zookeeper.service: -------------------------------------------------------------------------------- 1 | # /etc/systemd/system/wait-for-zookeeper.service 2 | [Unit] 3 | Description=Wait For Zookeeper Service 4 | Before=kafka.service 5 | After=network.target 6 | After=zookeeper.service 7 | Wants=zookeeper.service 8 | PartOf=kafka.service 9 | 10 | [Service] 11 | Type=notify 12 | NotifyAccess=all 13 | WorkingDirectory=/tmp 14 | Environment=ZOOKEEPER_HOST=127.0.0.1 15 | Environment=ZOOKEEPER_PORT=2181 16 | EnvironmentFile=-/etc/sysconfig/wait-for-zookeeper 17 | ExecStart=/usr/local/sbin/wait-for-zookeeper.py ${ZOOKEEPER_HOST}:${ZOOKEEPER_PORT} 18 | Restart=on-failure 19 | 20 | [Install] 21 | WantedBy=multi-user.target 22 | RequiredBy=kafka.service 23 | -------------------------------------------------------------------------------- /roles/kafka/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # handlers file for kafka 3 | 4 | - name: Create kafka zeek topic 5 | command: > 6 | /opt/kafka/bin/kafka-topics.sh 7 | --zookeeper 127.0.0.1:2181 8 | --create 9 | --replication-factor 1 10 | --topic zeek-raw 11 | --partitions 1 12 | 13 | - name: Create kafka suricata topic 14 | command: > 15 | /opt/kafka/bin/kafka-topics.sh 16 | --zookeeper 127.0.0.1:2181 17 | --create 18 | --replication-factor 1 19 | --topic suricata-raw 20 | --partitions 1 21 | 22 | - name: Create kafka fsf topic 23 | command: > 24 | /opt/kafka/bin/kafka-topics.sh 25 | --zookeeper 127.0.0.1:2181 26 | --create 27 | --replication-factor 1 28 | --topic fsf-raw 29 | --partitions 1 30 | -------------------------------------------------------------------------------- /roles/kafka/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Install packages 3 | yum: 4 | name: 5 | - kafka 6 | - kafkacat 7 | - java-11-openjdk-headless 8 | state: present 9 | 10 | - name: Create kafka data directory 11 | file: 12 | path: "{{ kafka_data_dir }}" 13 | mode: 0755 14 | owner: "{{ kafka_user }}" 15 | group: "{{ kafka_group }}" 16 | state: directory 17 | 18 | - name: Set kafka retention 19 | lineinfile: 20 | dest: "{{ kafka_config_path }}" 21 | regexp: "log.retention.hours=" 22 | line: "log.retention.hours={{ kafka_retention }}" 23 | state: present 24 | 25 | - name: Set kafka data directory 26 | lineinfile: 27 | dest: "{{ kafka_config_path }}" 28 | regexp: "log.dirs=" 29 | line: "log.dirs={{ kafka_data_dir }}" 30 | 31 | - name: Set kafka broker.id to automatic 32 | lineinfile: 33 | dest: "{{ kafka_config_path }}" 34 | regexp: "broker.id=" 35 | line: "broker.id=-1" 36 | 37 | - name: Set kafka zookeeper.connect 38 | lineinfile: 39 | dest: "{{ kafka_config_path }}" 40 | regexp: "zookeeper.connect=" 41 | line: "zookeeper.connect={{ kafka_zookeeper_host }}:{{ kafka_zookeeper_port }}{{ kafka_zookeeper_chroot }}" 42 | 43 | - name: Discover facts about data mount 44 | set_fact: 45 | rock_mounts: 46 | mount: "{{ item.mount }}" 47 | device: "{{ item.device }}" 48 | size_total: "{{ item.size_total }}" 49 | loop: 50 | "{{ ansible_mounts }}" 51 | when: (default_mount is defined and item.mount == default_mount and rock_mounts is not defined) 52 | 53 | - name: Determining if quotas are enabled 54 | command: > 55 | awk -v path="{{ default_mount }}" 56 | '$2 ~ path && $4 ~ /p(rj)?quota/ ' /etc/fstab 57 | register: prjquota 58 | changed_when: false 59 | 60 | - name: Create kafka quota project id 61 | getent: 62 | database: group 63 | split: ':' 64 | key: kafka 65 | when: rock_mounts is defined and (prjquota.stdout|length>0) 66 | 67 | - name: Map kafka quota project id to name 68 | lineinfile: 69 | create: true 70 | state: present 71 | insertafter: EOF 72 | path: /etc/projid 73 | line: "kafka:{{ getent_group.kafka[1] }}" 74 | when: rock_mounts is defined and (prjquota.stdout|length>0) 75 | 76 | - name: Define kafka quota project directories 77 | lineinfile: 78 | create: true 79 | state: present 80 | insertafter: EOF 81 | path: /etc/projects 82 | line: "{{ getent_group.kafka[1] }}:{{ kafka_data_dir }}" 83 | when: rock_mounts is defined and (prjquota.stdout|length>0) 84 | 85 | - name: set kafka weight 86 | set_fact: 87 | kafka_weight: "{{ rock_services | selectattr('name', 'equalto', 'kafka') | map(attribute='quota_weight') | first }}" 88 | when: kafka_quota is not defined and (prjquota.stdout|length>0) 89 | 90 | - name: set kafka quota if not user defined 91 | set_fact: 92 | kafka_quota: "{{ rock_mounts.size_total | int / xfs_quota_weight | int * kafka_weight | int }}" 93 | when: kafka_quota is not defined and (prjquota.stdout|length>0) 94 | 95 | - name: set kafka project quota 96 | xfs_quota: 97 | type: project 98 | name: kafka 99 | bhard: "{{ kafka_quota }}" 100 | state: present 101 | mountpoint: "{{ rock_mounts.mount }}" 102 | when: rock_mounts is defined and (prjquota.stdout|length>0) 103 | 104 | - name: Create wait-for-zookeeper sidecar 105 | copy: 106 | dest: "{{ item.dest }}" 107 | src: "{{ item.src }}" 108 | owner: root 109 | group: root 110 | mode: "{{ item.mode }}" 111 | with_items: 112 | - src: wait-for-zookeeper.py 113 | dest: /usr/local/sbin/wait-for-zookeeper.py 114 | mode: '0755' 115 | - src: wait-for-zookeeper.service 116 | dest: /etc/systemd/system/wait-for-zookeeper.service 117 | mode: '0644' 118 | register: wait_for_zk_created 119 | 120 | - name: Create environment file for zookeeper sidecar 121 | copy: 122 | dest: /etc/sysconfig/wait-for-zookeeper 123 | content: | 124 | ZOOKEEPER_HOST={{ kafka_zookeeper_host }} 125 | ZOOKEEPER_PORT={{ kafka_zookeeper_port }} 126 | mode: '0644' 127 | owner: root 128 | group: root 129 | 130 | - name: Create kafka service overrides dir 131 | file: 132 | state: directory 133 | path: /etc/systemd/system/kafka.service.d/ 134 | recurse: true 135 | owner: root 136 | group: root 137 | mode: '0755' 138 | 139 | - name: Insert Kafka override to wait for Zookeeper 140 | copy: 141 | dest: /etc/systemd/system/kafka.service.d/override.conf 142 | content: | 143 | # /etc/systemd/system/kafka.service.d/override.conf 144 | [Unit] 145 | After=wait-for-zookeeper.service 146 | Requires=wait-for-zookeeper.service 147 | mode: '0644' 148 | owner: root 149 | group: root 150 | register: kafka_override_created 151 | 152 | - name: Enable and start kafka 153 | service: 154 | name: "{{ item }}" 155 | daemon-reload: "{{ kafka_override_created.changed or wait_for_zk_created.changed }}" 156 | state: "{{ 'started' if 'kafka' in enabled_services else 'stopped' }}" 157 | enabled: "{{ 'kafka' in enabled_services }}" 158 | with_items: 159 | - wait-for-zookeeper 160 | - kafka 161 | 162 | - name: Configure firewall ports 163 | firewalld: 164 | port: "{{ item }}/tcp" 165 | permanent: true 166 | state: enabled 167 | immediate: true 168 | zone: work 169 | loop: 170 | - 9092 171 | when: groups['kafka'] | difference(groups['logstash']) | count > 0 172 | ... 173 | -------------------------------------------------------------------------------- /roles/kafka/tests/inventory: -------------------------------------------------------------------------------- 1 | localhost 2 | 3 | -------------------------------------------------------------------------------- /roles/kafka/tests/test.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: localhost 3 | remote_user: root 4 | roles: 5 | - kafka 6 | -------------------------------------------------------------------------------- /roles/kafka/vars/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # vars file for kafka 3 | -------------------------------------------------------------------------------- /roles/kibana/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | kibana_defaultRoute: /app/kibana 3 | -------------------------------------------------------------------------------- /roles/kibana/files/profile.d-kibanapw.sh: -------------------------------------------------------------------------------- 1 | # Set passwords 2 | function kibanapw() { if [ $# -lt 2 ]; then echo -e "Usage: kibanapw USER PASSWORD\nUsers will be added to /etc/nginx/htpasswd.users"; else egrep "^${1}:" /etc/lighttpd/rock-htpasswd.user > /dev/null 2>&1; if [[ $? -eq 0 ]]; then sudo sed -i "/${1}\:/d" /etc/lighttpd/rock-htpasswd.user; fi; printf "${1}:$(echo ${2} | openssl passwd -apr1 -stdin)\n" | sudo tee -a /etc/lighttpd/rock-htpasswd.user > /dev/null 2>&1; fi; } 3 | -------------------------------------------------------------------------------- /roles/kibana/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: Restart kibana 4 | service: 5 | name: kibana 6 | state: restarted 7 | -------------------------------------------------------------------------------- /roles/kibana/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: Install packages 4 | yum: 5 | name: kibana-{{ rock_services | selectattr('name', 'equalto', 'kibana') | map(attribute='version') | first }} 6 | state: present 7 | 8 | - name: Update kibana config 9 | template: 10 | src: kibana.yml.j2 11 | dest: /etc/kibana/kibana.yml 12 | notify: Restart kibana 13 | 14 | - name: Enable and start kibana 15 | service: 16 | name: kibana 17 | enabled: "{{ 'kibana' in enabled_services }}" 18 | 19 | - name: Flush handlers 20 | meta: flush_handlers 21 | 22 | - name: "Wait for Kibana to be available" 23 | uri: 24 | url: "{{ kibana_url }}/api/kibana/settings" 25 | status_code: 200 26 | return_content: true 27 | register: result 28 | until: result.status == 200 29 | retries: 60 30 | delay: 1 31 | 32 | - name: Store Kibana settings 33 | set_fact: 34 | kbn_settings: "{{ result.json.settings }}" 35 | 36 | - name: Blanket install/update kibana saved objects 37 | command: ./import-saved-items.sh "{{ kibana_url }}" 38 | args: 39 | chdir: "{{ rock_module_dir }}/ecs-configuration/kibana" 40 | changed_when: false 41 | tags: 42 | - saved_objects 43 | # TODO: Fix this ^^ 44 | 45 | - name: Configure kibana templates 46 | uri: 47 | method: PUT 48 | url: "{{ es_url }}/_template/kibana-config" 49 | body: > 50 | { "order" : 0, "template" : ".kibana", 51 | "settings" : 52 | { "index.number_of_replicas" : "0", 53 | "index.number_of_shards" : "1" }, 54 | "mappings" : { }, "aliases" : { } } 55 | body_format: json 56 | status_code: 200,201 57 | 58 | - name: Set Kibana dark mode for the default space 59 | uri: 60 | method: POST 61 | url: "{{ kibana_url }}/api/kibana/settings" 62 | body: > 63 | {"changes": { 64 | "theme:darkMode": true 65 | } 66 | } 67 | headers: 68 | kbn-xsrf: true 69 | body_format: json 70 | status_code: 200,201 71 | when: "'theme:darkMode' not in kbn_settings or not (kbn_settings['theme:darkMode']|bool)" 72 | 73 | - name: Set Kibana to store data in session storage 74 | uri: 75 | method: POST 76 | url: "{{ kibana_url }}/api/kibana/settings" 77 | body: > 78 | {"changes": { 79 | "state:storeInSessionStorage": true 80 | } 81 | } 82 | headers: 83 | kbn-xsrf: true 84 | body_format: json 85 | status_code: 200,201 86 | when: "'state:storeInSessionStorage' not in kbn_settings or not (kbn_settings['state:storeInSessionStorage']|bool)" 87 | 88 | - name: Set fact for list of SIEM index patterns 89 | set_fact: 90 | siem_indices: "{{ kbn_settings['siem:defaultIndex']['userValue'] | default(['auditbeat-*', 'filebeat-*', 'packetbeat-*', 'winlogbeat-*']) }}" 91 | 92 | - name: Configure SIEM to read ecs-* index pattern 93 | uri: 94 | method: POST 95 | url: "{{ kibana_url }}/api/kibana/settings" 96 | body: > 97 | {"changes": { 98 | "siem:defaultIndex": {{ siem_indices | union(['ecs-*']) }} 99 | } 100 | } 101 | headers: 102 | kbn-xsrf: true 103 | body_format: json 104 | status_code: 200,201 105 | when: "'siem:defaultIndex' not in kbn_settings or 'ecs-*' not in kbn_settings['siem:defaultIndex']['userValue']" 106 | 107 | - name: Add the kibanapw shell function 108 | copy: 109 | src: profile.d-kibanapw.sh 110 | dest: /etc/profile.d/kibanapw.sh 111 | mode: 0644 112 | owner: root 113 | group: root 114 | 115 | - name: Download RockNSM elastic configs 116 | get_url: 117 | url: "{{ rock_dashboards_url }}" 118 | dest: "{{ rock_cache_dir }}/{{ rock_dashboards_filename }}" 119 | mode: 0644 120 | when: 121 | rock_online_install and ( 122 | 'elasticsearch' in installed_services or 123 | 'logstash' in installed_services 124 | ) 125 | 126 | - name: Extract RockNSM elastic configs 127 | unarchive: 128 | src: "{{ rock_cache_dir }}/{{ rock_dashboards_filename }}" 129 | dest: /opt/rocknsm 130 | owner: root 131 | group: root 132 | creates: "{{ rock_module_dir }}" 133 | remote_src: true 134 | when: "'elasticsearch' in installed_services or 'logstash' in installed_services" 135 | -------------------------------------------------------------------------------- /roles/kibana/templates/kibana.yml.j2: -------------------------------------------------------------------------------- 1 | server.port: {{ kibana_port }} 2 | server.name: "{{ ansible_hostname }}" 3 | server.defaultRoute: "{{ kibana_defaultRoute }}" 4 | elasticsearch.hosts: "{{ es_url }}" 5 | -------------------------------------------------------------------------------- /roles/lighttpd/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Enable and restart lighttpd 3 | systemd: 4 | name: lighttpd 5 | state: >- 6 | {%- if 'lighttpd' in enabled_services or 'docket' in enabled_services -%} 7 | restarted 8 | {%- else -%} 9 | stopped 10 | {%- endif -%} 11 | enabled: >- 12 | {%- if 'lighttpd' in enabled_services or 'docket' in enabled_services -%} 13 | True 14 | {%- else -%} 15 | False 16 | {%- endif -%} 17 | -------------------------------------------------------------------------------- /roles/lighttpd/tasks/add-user.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: Ensure passwd file exists 4 | file: 5 | path: /etc/lighttpd/rock-htpasswd.user 6 | state: touch 7 | owner: root 8 | group: root 9 | mode: 0644 10 | 11 | - name: Hash password 12 | command: "openssl passwd -apr1 \"{{ lighttpd_password }}\"" 13 | register: lighttpd_hashed_password 14 | no_log: true 15 | changed_when: false 16 | 17 | - name: Add a new user to lighttpd 18 | lineinfile: 19 | path: /etc/lighttpd/rock-htpasswd.user 20 | regex: "^{{ lighttpd_user }}" 21 | line: "{{ lighttpd_user }}:{{ lighttpd_hashed_password.stdout }}" 22 | -------------------------------------------------------------------------------- /roles/lighttpd/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: Install packages 4 | yum: 5 | name: 6 | - lighttpd 7 | - python2-xkcdpass 8 | state: present 9 | 10 | - name: Install ROCK lighttpd configuration 11 | template: 12 | src: templates/{{ item }}.j2 13 | dest: /etc/lighttpd/vhosts.d/{{ item }} 14 | mode: 0644 15 | owner: root 16 | group: root 17 | when: "'kibana' in installed_services" 18 | loop: 19 | - 10-rock-auth.conf 20 | - 10-tls.conf 21 | - 20-rock-vars.conf 22 | - 50-rockproxy.conf 23 | notify: Enable and restart lighttpd 24 | 25 | - name: Enable lighttpd vhosts 26 | lineinfile: 27 | path: /etc/lighttpd/lighttpd.conf 28 | regexp: '^#?\s*include.*vhosts\.d/.*$' 29 | line: include "/etc/lighttpd/vhosts.d/*.conf" 30 | notify: Enable and restart lighttpd 31 | 32 | - name: Disable lighttpd ipv6 33 | lineinfile: 34 | path: /etc/lighttpd/lighttpd.conf 35 | regexp: '^server.use-ipv6.*$' 36 | line: server.use-ipv6 = "disable" 37 | notify: Enable and restart lighttpd 38 | 39 | - name: Enable lighttpd to perform proxy connect 40 | seboolean: 41 | name: httpd_can_network_connect 42 | state: true 43 | persistent: true 44 | when: "'kibana' in installed_services" 45 | 46 | - name: Generate sensor private key 47 | openssl_privatekey: 48 | path: "{{ http_tls_key }}" 49 | when: "'kibana' in installed_services" 50 | notify: Enable and restart lighttpd 51 | 52 | - name: Generate sensor public key 53 | openssl_publickey: 54 | path: "{{ http_tls_pub }}" 55 | privatekey_path: "{{ http_tls_key }}" 56 | when: "'kibana' in installed_services" 57 | notify: Enable and restart lighttpd 58 | 59 | - name: Generate sensor CSR 60 | openssl_csr: 61 | path: "{{ http_tls_pub }}.csr" 62 | privatekey_path: "{{ http_tls_key }}" 63 | country_name: US 64 | state_or_province_name: MO 65 | locality_name: St. Louis 66 | organization_name: RockNSM 67 | organizational_unit_name: NSM Ninjas 68 | email_address: info@rocknsm.io 69 | common_name: "{{ ansible_hostname }}" 70 | when: "'kibana' in installed_services" 71 | notify: Enable and restart lighttpd 72 | 73 | - name: Generate sensor certificate 74 | openssl_certificate: 75 | path: "{{ http_tls_crt }}" 76 | privatekey_path: "{{ http_tls_key }}" 77 | csr_path: "{{ http_tls_pub }}.csr" 78 | provider: selfsigned 79 | when: "'kibana' in installed_services" 80 | notify: Enable and restart lighttpd 81 | 82 | - name: Combine sensor cert and key 83 | shell: > 84 | cat {{ http_tls_key }} {{ http_tls_crt }} > {{ http_tls_combined }} 85 | args: 86 | creates: "{{ http_tls_combined }}" 87 | when: "'kibana' in installed_services" 88 | notify: Enable and restart lighttpd 89 | 90 | - name: Generate DH parameters 91 | command: > 92 | openssl dhparam -out {{ http_tls_dhparams }} 2048 93 | args: 94 | creates: "{{ http_tls_dhparams }}" 95 | when: "'kibana' in installed_services" 96 | notify: Enable and restart lighttpd 97 | 98 | - name: Configure firewall ports 99 | firewalld: 100 | port: "{{ item }}/tcp" 101 | permanent: true 102 | state: enabled 103 | immediate: true 104 | loop: 105 | - 443 106 | 107 | - name: Check if initial user has already been created 108 | stat: 109 | path: /etc/lighttpd/rock-htpasswd.user 110 | register: rocknsm_initial_user_created 111 | 112 | - block: 113 | - name: Determine initial username 114 | shell: "getent passwd 1000 | awk -F: '{print $1}'" 115 | register: rocknsm_initial_username 116 | 117 | - name: Determine initial password 118 | command: "xkcdpass -a rock" 119 | register: rocknsm_initial_password 120 | 121 | - name: Set initial credentials 122 | include_tasks: add-user.yml 123 | vars: 124 | lighttpd_user: "{{ rocknsm_initial_username.stdout }}" 125 | lighttpd_password: "{{ rocknsm_initial_password.stdout }}" 126 | 127 | - name: Output initial credentials 128 | shell: "echo -e \"U: {{ rocknsm_initial_username.stdout }}\nP: {{ rocknsm_initial_password.stdout }}\" 129 | > /home/{{ rocknsm_initial_username.stdout }}/KIBANA_CREDS.README" 130 | when: not rocknsm_initial_user_created.stat.exists | bool 131 | -------------------------------------------------------------------------------- /roles/lighttpd/tasks/remove-user.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: Remove a user from lighttpd 4 | lineinfile: 5 | path: /etc/lighttpd/rock-htpasswd.user 6 | regex: "^{{ lighttpd_user }}" 7 | state: absent 8 | -------------------------------------------------------------------------------- /roles/lighttpd/templates/10-rock-auth.conf.j2: -------------------------------------------------------------------------------- 1 | ####################################################################### 2 | # Lighttpd shared authentication 3 | # file: /etc/lighttpd/vhosts.d/05-rock-auth.conf 4 | # 5 | # {{ ansible_managed }} 6 | # 7 | server.modules += ( "mod_auth" ) 8 | server.modules += ( "mod_authn_file" ) 9 | 10 | auth.backend = "htpasswd" 11 | auth.backend.htpasswd.userfile = "/etc/lighttpd/rock-htpasswd.user" 12 | auth.debug = 0 13 | 14 | auth.require = ( 15 | "/" => ( 16 | "method" => "basic", 17 | "realm" => "RockNSM", 18 | "require" => "valid-user" 19 | ) 20 | ) 21 | -------------------------------------------------------------------------------- /roles/lighttpd/templates/10-tls.conf.j2: -------------------------------------------------------------------------------- 1 | ####################################################################### 2 | # Lighttpd shared authentication 3 | # file: /etc/lighttpd/vhosts.d/10-tls.conf 4 | # 5 | # {{ ansible_managed }} 6 | # 7 | 8 | # Upgrade to TLS (works on lighttpd >= 1.4.50) 9 | $HTTP["scheme"] == "http" { url.redirect = ("" => "https://${authority}${path}${qsa}") } 10 | 11 | # Listen on 443 for TLS connections 12 | $SERVER["socket"] == "0.0.0.0:443" { 13 | # Global TLS Configuration 14 | ssl.engine = "enable" 15 | ssl.pemfile = "{{ http_tls_combined }}" 16 | 17 | # Use DH key exchange for Ephemeral keys and forward secrecy 18 | ssl.dh-file = "{{ http_tls_dhparams }}" 19 | ssl.ec-curve = "secp384r1" 20 | 21 | # TLS recommendations used from https://cipherli.st/ 22 | ssl.honor-cipher-order = "enable" 23 | ssl.cipher-list = "EECDH+AESGCM:EDH+AESGCM:AES256+EECDH:AES256+EDH" 24 | ssl.use-compression = "disable" 25 | setenv.add-response-header = ( 26 | "Strict-Transport-Security" => "max-age=63072000; includeSubDomains; preload", 27 | "X-Frame-Options" => "DENY", 28 | "X-Content-Type-Options" => "nosniff" 29 | ) 30 | 31 | ssl.use-sslv2 = "disable" 32 | ssl.use-sslv3 = "disable" 33 | 34 | ssl.disable-client-renegotiation = "enable" 35 | } 36 | -------------------------------------------------------------------------------- /roles/lighttpd/templates/20-rock-vars.conf.j2: -------------------------------------------------------------------------------- 1 | ####################################################################### 2 | # Lighttpd variables for RockNSM 3 | # file: /etc/lighttpd/vhosts.d/20-rock-vars.conf# 4 | # 5 | # {{ ansible_managed }} 6 | # 7 | 8 | # global vars 9 | var.vhostdir = "/var/www/rock" 10 | server.breakagelog = "/var/log/lighttpd/error.log" 11 | 12 | #debug.log-condition-handling = "enable" 13 | #debug.log-request-header = "enable" 14 | #debug.log-request-handling = "enable" 15 | #debug.log-response-header = "enable" 16 | 17 | # See Item12581: Lighttpd lower cases PATH_INFO on case-insensitive file systems. 18 | server.force-lowercase-filenames = "disable" 19 | -------------------------------------------------------------------------------- /roles/lighttpd/templates/50-rockproxy.conf.j2: -------------------------------------------------------------------------------- 1 | ####################################################################### 2 | # Lighttpd reverse proxy for Kibana 3 | # file: /etc/lighttpd/vhosts.d/30-rockproxy.conf 4 | # 5 | # {{ ansible_managed }} 6 | # 7 | 8 | server.modules += ( "mod_proxy") 9 | 10 | proxy.server = ( "/" => 11 | ( "kibana" => 12 | ( 13 | "host" => "127.0.0.1", 14 | "port" => 5601 15 | ) 16 | ) 17 | ) 18 | proxy.forwarded = ( "for" => 1, 19 | "proto" => 1, 20 | "host" => 1, 21 | "by" => 1, 22 | "remote_user" => 1 23 | ) 24 | proxy.header = ( 25 | "connect" => "enable", 26 | "upgrade" => "enable", 27 | ) 28 | -------------------------------------------------------------------------------- /roles/logstash/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: Restart logstash 4 | systemd: 5 | name: logstash 6 | state: "{{ 'restarted' if 'logstash' in enabled_services else 'stopped' }}" 7 | -------------------------------------------------------------------------------- /roles/logstash/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: Install packages 4 | yum: 5 | name: 6 | - logstash-{{ rock_services | selectattr('name', 'equalto', 'logstash') | map(attribute='version') | first }} 7 | state: present 8 | 9 | - name: Add sysconfig file 10 | template: 11 | src: logstash_sysconfig.j2 12 | dest: /etc/sysconfig/logstash 13 | notify: Restart logstash 14 | 15 | - name: Copy Logstash configs to /etc 16 | copy: 17 | remote_src: true 18 | src: "{{ rock_module_dir }}/ecs-configuration/logstash/conf.d/" 19 | dest: "/etc/logstash/conf.d/" 20 | owner: "{{ logstash_user }}" 21 | group: "{{ logstash_group }}" 22 | # notify: Restart logstash 23 | tags: 24 | - molecule-idempotence-notest 25 | 26 | - name: Copy Logstash ruby scripts to /etc 27 | copy: 28 | remote_src: true 29 | src: "{{ rock_module_dir }}/ecs-configuration/logstash/ruby/" 30 | dest: "/etc/logstash/conf.d/ruby" 31 | owner: "{{ logstash_user }}" 32 | group: "{{ logstash_group }}" 33 | # notify: Restart logstash 34 | 35 | - name: Template input configs 36 | template: 37 | src: "{{ item.src }}" 38 | dest: "/etc/logstash/conf.d/{{ item.dest }}" 39 | owner: "{{ logstash_user }}" 40 | group: "{{ logstash_group }}" 41 | mode: 0640 42 | # notify: Restart logstash 43 | when: logstash_configs is defined 44 | with_items: "{{ logstash_configs }}" 45 | tags: 46 | - molecule-idempotence-notest 47 | 48 | - name: Template Elasticsearch output for Logstash 49 | template: 50 | src: "{{ item }}.j2" 51 | dest: "/etc/logstash/conf.d/{{ item }}" 52 | owner: "{{ logstash_user }}" 53 | group: "{{ logstash_group }}" 54 | mode: 0640 55 | # notify: Restart logstash 56 | loop: 57 | - logstash-9999-output-elasticsearch.conf 58 | tags: 59 | - molecule-idempotence-notest 60 | 61 | - name: Enable logstash monitoring 62 | lineinfile: 63 | path: /etc/logstash/logstash.yml 64 | line: "xpack.monitoring.enabled: true" 65 | regexp: '^#xpack.monitoring.enabled: .*' 66 | # notify: Restart logstash 67 | tags: 68 | - molecule-idempotence-notest 69 | 70 | - name: Point logstash monitoring to elastic hosts 71 | lineinfile: 72 | path: /etc/logstash/logstash.yml 73 | line: > 74 | xpack.monitoring.elasticsearch.hosts: 75 | {% if groups['elasticsearch'] | length > 1 -%}[ 76 | {%- for host in groups['es_data'] -%} 77 | "{{ host }}" 78 | {%- if not loop.last %},{% endif -%} 79 | {%- endfor -%}] 80 | {% else %} 81 | ["127.0.0.1:9200"] 82 | {%- endif %} 83 | regexp: '^#xpack.monitoring.elasticsearch.hosts: .*' 84 | # notify: Restart logstash 85 | tags: 86 | - molecule-idempotence-notest 87 | 88 | - name: Enable and start Logstash 89 | service: 90 | name: logstash 91 | enabled: "{{ 'logstash' in enabled_services }}" 92 | # notify: Restart logstash 93 | tags: 94 | - molecule-idempotencie-notest 95 | 96 | #- name: Enable and start Logstash 97 | # service: sudo systemctl restart logstash 98 | -------------------------------------------------------------------------------- /roles/logstash/templates/logstash-9999-output-elasticsearch.conf.j2: -------------------------------------------------------------------------------- 1 | output { 2 | # Requires event module and category 3 | if [event][module] and [event][category] { 4 | 5 | # Requires event dataset 6 | if [event][dataset] { 7 | elasticsearch { 8 | {% if groups['elasticsearch'] | length > 1 %} 9 | hosts => [{% for host in groups['es_data'] %}"{{ host }}"{% if not loop.last %},{% endif %}{% endfor %}] 10 | {% else %} 11 | hosts => ["127.0.0.1:9200"] 12 | {% endif %} 13 | index => "ecs-%{[event][module]}-%{[event][category]}-%{+YYYY.MM.dd}" 14 | manage_template => false 15 | } 16 | } 17 | 18 | else { 19 | # Suricata or Zeek JSON error possibly, ie: Suricata without a event.dataset seen with filebeat error, but doesn't have a tag 20 | if [event][module] == "suricata" or [event][module] == "zeek" { 21 | elasticsearch { 22 | {% if groups['elasticsearch'] | length > 1 %} 23 | hosts => [{% for host in groups['es_data'] %}"{{ host }}"{% if not loop.last %},{% endif %}{% endfor %}] 24 | {% else %} 25 | hosts => ["127.0.0.1:9200"] 26 | {% endif %} 27 | index => "parse-failures-%{+YYYY.MM.dd}" 28 | manage_template => false 29 | } 30 | } 31 | else { 32 | elasticsearch { 33 | {% if groups['elasticsearch'] | length > 1 %} 34 | hosts => [{% for host in groups['es_data'] %}"{{ host }}"{% if not loop.last %},{% endif %}{% endfor %}] 35 | {% else %} 36 | hosts => ["127.0.0.1:9200"] 37 | {% endif %} 38 | index => "ecs-%{[event][module]}-%{[event][category]}-%{+YYYY.MM.dd}" 39 | manage_template => false 40 | } 41 | } 42 | } 43 | } 44 | 45 | else if [@metadata][stage] == "fsfraw_kafka" { 46 | elasticsearch { 47 | {% if groups['elasticsearch'] | length > 1 %} 48 | hosts => [{% for host in groups['es_data'] %}"{{ host }}"{% if not loop.last %},{% endif %}{% endfor %}] 49 | {% else %} 50 | hosts => ["127.0.0.1:9200"] 51 | {% endif %} 52 | index => "fsf-%{+YYYY.MM.dd}" 53 | manage_template => false 54 | } 55 | } 56 | 57 | else if [@metadata][stage] == "_parsefailure" { 58 | elasticsearch { 59 | {% if groups['elasticsearch'] | length > 1 %} 60 | hosts => [{% for host in groups['es_data'] %}"{{ host }}"{% if not loop.last %},{% endif %}{% endfor %}] 61 | {% else %} 62 | hosts => ["127.0.0.1:9200"] 63 | {% endif %} 64 | index => "parse-failures-%{+YYYY.MM.dd}" 65 | manage_template => false 66 | } 67 | 68 | } 69 | 70 | # Catch all index that is not RockNSM or ECS or parse failures 71 | else { 72 | elasticsearch { 73 | {% if groups['elasticsearch'] | length > 1 %} 74 | hosts => [{% for host in groups['es_data'] %}"{{ host }}"{% if not loop.last %},{% endif %}{% endfor %}] 75 | {% else %} 76 | hosts => ["127.0.0.1:9200"] 77 | {% endif %} 78 | index => "indexme-%{+YYYY.MM.dd}" 79 | manage_template => false 80 | } 81 | } 82 | } 83 | -------------------------------------------------------------------------------- /roles/logstash/templates/logstash_sysconfig.j2: -------------------------------------------------------------------------------- 1 | bootstrap_servers={% for host in groups['kafka'] %}"{{ host }}:9092"{% if not loop.last %},{% endif %}{% endfor %} 2 | elasticsearch_hosts={% for host in groups['es_data'] %}"{{ host }}:9200"{% if not loop.last %},{% endif %}{% endfor %} 3 | -------------------------------------------------------------------------------- /roles/stenographer/README.md: -------------------------------------------------------------------------------- 1 | Role Name 2 | ========= 3 | 4 | A brief description of the role goes here. 5 | 6 | Requirements 7 | ------------ 8 | 9 | Any pre-requisites that may not be covered by Ansible itself or the role should be mentioned here. For instance, if the role uses the EC2 module, it may be a good idea to mention in this section that the boto package is required. 10 | 11 | Role Variables 12 | -------------- 13 | 14 | A description of the settable variables for this role should go here, including any variables that are in defaults/main.yml, vars/main.yml, and any variables that can/should be set via parameters to the role. Any variables that are read from other roles and/or the global scope (ie. hostvars, group vars, etc.) should be mentioned here as well. 15 | 16 | Dependencies 17 | ------------ 18 | 19 | A list of other roles hosted on Galaxy should go here, plus any details in regards to parameters that may need to be set for other roles, or variables that are used from other roles. 20 | 21 | Example Playbook 22 | ---------------- 23 | 24 | Including an example of how to use your role (for instance, with variables passed in as parameters) is always nice for users too: 25 | 26 | - hosts: servers 27 | roles: 28 | - { role: username.rolename, x: 42 } 29 | 30 | License 31 | ------- 32 | 33 | BSD 34 | 35 | Author Information 36 | ------------------ 37 | 38 | An optional section for the role authors to include contact information, or a website (HTML is not allowed). 39 | -------------------------------------------------------------------------------- /roles/stenographer/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # defaults file for stenographer 3 | stenographer_packagename: stenographer 4 | stenographer_user: stenographer 5 | stenographer_group: stenographer 6 | stenographer_monitor_interfaces: [eth0] 7 | enable_stenographer: true 8 | method: deploy 9 | -------------------------------------------------------------------------------- /roles/stenographer/files/stenographer.service: -------------------------------------------------------------------------------- 1 | # 2 | # Copyright (c) 2017, 2018 RockNSM. 3 | # 4 | # This file is part of RockNSM 5 | # (see http://rocknsm.io). 6 | # 7 | # Licensed under the Apache License, Version 2.0 (the "License"); 8 | # you may not use this file except in compliance with the License. 9 | # You may obtain a copy of the License at 10 | # 11 | # http://www.apache.org/licenses/LICENSE-2.0 12 | # 13 | # Unless required by applicable law or agreed to in writing, 14 | # software distributed under the License is distributed on an 15 | # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY 16 | # KIND, either express or implied. See the License for the 17 | # specific language governing permissions and limitations 18 | # under the License. 19 | # 20 | [Unit] 21 | Description=packet capture to disk 22 | After=syslog.target network.target 23 | 24 | [Service] 25 | Type=oneshot 26 | RemainAfterExit=yes 27 | ExecStartPre=/bin/echo "Starting template instances." 28 | ExecStart=/bin/true 29 | ExecStartPost=/bin/echo "View instance status with `systemctl status stenographer*`." 30 | ExecStartPost=/bin/echo "View instance logs with `journalctl -u stenographer@*`" 31 | ExecReload=/bin/true 32 | WorkingDirectory=/etc/stenographer 33 | 34 | [Install] 35 | WantedBy=multi-user.target 36 | -------------------------------------------------------------------------------- /roles/stenographer/files/stenographer@.service: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2017, 2018 RockNSM. 2 | # 3 | # This file is part of RockNSM 4 | # (see http://rocknsm.io). 5 | # 6 | # Adapted from code from Stenographer (Copyright 2014 Google Inc.) 7 | # 8 | # Licensed under the Apache License, Version 2.0 (the "License"); 9 | # you may not use this file except in compliance with the License. 10 | # You may obtain a copy of the License at 11 | # 12 | # http://www.apache.org/licenses/LICENSE-2.0 13 | # 14 | # Unless required by applicable law or agreed to in writing, 15 | # software distributed under the License is distributed on an 16 | # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY 17 | # KIND, either express or implied. See the License for the 18 | # specific language governing permissions and limitations 19 | # under the License. 20 | # 21 | # stenographer - full packet to disk capture 22 | # 23 | # stenographer is a simple, fast method of writing live packets to disk, 24 | # then requesting those packets after-the-fact for post-hoc analysis. 25 | [Unit] 26 | Description=packet capture to disk 27 | After=network.target 28 | Before=stenographer.service 29 | PartOf=stenographer.service 30 | ReloadPropagatedFrom=stenographer.service 31 | 32 | [Service] 33 | User=stenographer 34 | Group=stenographer 35 | LimitFSIZE=4294967296 36 | LimitNOFILE=1000000 37 | ExecStart=/usr/bin/stenographer -config /etc/stenographer/config.%i 38 | ExecStopPost=/bin/pkill -9 stenotype 39 | 40 | [Install] 41 | RequiredBy=stenographer.service 42 | -------------------------------------------------------------------------------- /roles/stenographer/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # handlers file for stenographer 3 | 4 | - name: Start stenographer service 5 | service: 6 | name: stenographer 7 | state: "{{ 'started' if 'stenographer' in enabled_services else 'stopped' }}" 8 | 9 | - name: Start stenographer per interface 10 | service: 11 | name: "stenographer@{{ item }}" 12 | state: "{{ 'started' if 'stenographer' in enabled_services else 'stopped' }}" 13 | loop: "{{ stenographer_monitor_interfaces }}" 14 | 15 | - name: Restart stenographer per interface 16 | service: 17 | name: "stenographer@{{ item }}" 18 | state: "{{ 'started' if 'stenographer' in enabled_services else 'stopped' }}" 19 | loop: "{{ stenographer_monitor_interfaces }}" 20 | -------------------------------------------------------------------------------- /roles/stenographer/tasks/config.yml: -------------------------------------------------------------------------------- 1 | --- 2 | ####################################################### 3 | ################# Config Stenographer ################# 4 | ####################################################### 5 | 6 | - name: Set stenographer config 7 | template: 8 | src: stenographer-config.j2 9 | dest: "/etc/stenographer/config.{{ item.1 }}" 10 | with_indexed_items: "{{ stenographer_monitor_interfaces }}" 11 | notify: Restart stenographer per interface 12 | 13 | - name: Create stenographer directories 14 | file: 15 | path: "{{ stenographer_data_dir }}/{{ item[0] }}/{{ item[1] }}" 16 | mode: 0755 17 | owner: "{{ stenographer_user }}" 18 | group: "{{ stenographer_group }}" 19 | state: directory 20 | with_nested: 21 | - "{{ stenographer_monitor_interfaces }}" 22 | - [ 'index', 'packets' ] 23 | 24 | - name: Install stenographer service files 25 | copy: 26 | src: "{{ item }}" 27 | dest: "/etc/systemd/system/{{ item }}" 28 | mode: 0644 29 | owner: root 30 | group: root 31 | loop: 32 | - stenographer.service 33 | - stenographer@.service 34 | 35 | - name: Generate stenographer keys 36 | command: > 37 | /usr/bin/stenokeys.sh {{ stenographer_user }} {{ stenographer_group }} 38 | environment: 39 | STENOGRAPHER_CONFIG: "/etc/stenographer/config.{{ stenographer_monitor_interfaces[0] }}" 40 | args: 41 | creates: /etc/stenographer/certs/client_key.pem 42 | 43 | - name: Discover facts about data mount 44 | set_fact: 45 | rock_mounts: 46 | mount: "{{ item.mount }}" 47 | device: "{{ item.device }}" 48 | size_total: "{{ item.size_total }}" 49 | loop: 50 | "{{ ansible_mounts }}" 51 | when: (default_mount is defined and item.mount == default_mount and rock_mounts is not defined) 52 | 53 | - name: Determining if quotas are enabled 54 | command: > 55 | awk -v path="{{ default_mount }}" 56 | '$2 ~ path && $4 ~ /p(rj)?quota/ ' /etc/fstab 57 | register: prjquota 58 | changed_when: false 59 | 60 | - name: Create stenographer quota project id 61 | getent: 62 | database: group 63 | split: ':' 64 | key: stenographer 65 | when: rock_mounts is defined and (prjquota.stdout|length>0) 66 | 67 | - name: Map stenographer quota project id to name 68 | lineinfile: 69 | create: true 70 | state: present 71 | insertafter: EOF 72 | path: /etc/projid 73 | line: "stenographer:{{ getent_group.stenographer[1] }}" 74 | when: rock_mounts is defined and (prjquota.stdout|length>0) 75 | 76 | - name: Define stenographer quota project directories 77 | lineinfile: 78 | create: true 79 | state: present 80 | insertafter: EOF 81 | path: /etc/projects 82 | line: "{{ getent_group.stenographer[1] }}:{{ stenographer_data_dir }}" 83 | when: rock_mounts is defined and (prjquota.stdout|length>0) 84 | 85 | - name: set stenographer weight 86 | set_fact: 87 | stenographer_weight: "{{ rock_services | selectattr('name', 'equalto', 'stenographer') | map(attribute='quota_weight') | first }}" 88 | when: stenographer_quota is not defined and (prjquota.stdout|length>0) 89 | 90 | - name: set stenographer quota if not user defined 91 | set_fact: 92 | stenographer_quota: "{{ rock_mounts.size_total | int / xfs_quota_weight | int * stenographer_weight | int }}" 93 | when: stenographer_quota is not defined and (prjquota.stdout|length>0) 94 | 95 | - name: set stenographer project quota 96 | xfs_quota: 97 | type: project 98 | name: stenographer 99 | bhard: "{{ stenographer_quota }}" 100 | state: present 101 | mountpoint: "{{ rock_mounts.mount }}" 102 | when: rock_mounts is defined and (prjquota.stdout|length>0) 103 | 104 | - name: Configure stenographer service 105 | service: 106 | name: stenographer 107 | enabled: "{{ 'stenographer' in enabled_services }}" 108 | notify: Start stenographer service 109 | 110 | - name: Configure stenographer per interface 111 | service: 112 | name: "stenographer@{{ item }}" 113 | enabled: "{{ 'stenographer' in enabled_services }}" 114 | loop: "{{ stenographer_monitor_interfaces }}" 115 | notify: Start stenographer per interface 116 | 117 | - name: Configure firewall ports 118 | firewalld: 119 | port: "{{ 1234 + index }}/tcp" 120 | permanent: true 121 | state: enabled 122 | immediate: true 123 | loop: "{{ stenographer_monitor_interfaces }}" 124 | loop_control: 125 | index_var: index 126 | when: groups['stenographer'] | difference(groups['docket']) | count > 0 127 | ... 128 | -------------------------------------------------------------------------------- /roles/stenographer/tasks/deploy.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - import_tasks: install.yml 3 | - import_tasks: config.yml 4 | ... 5 | -------------------------------------------------------------------------------- /roles/stenographer/tasks/install.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Install packages 3 | yum: 4 | name: "{{ stenographer_packagename }}" 5 | state: "present" 6 | tags: 7 | - yum 8 | - stenographer 9 | - install 10 | -------------------------------------------------------------------------------- /roles/stenographer/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - include_tasks: "{{ method }}.yml" 3 | ... 4 | -------------------------------------------------------------------------------- /roles/stenographer/templates/stenographer-config.j2: -------------------------------------------------------------------------------- 1 | { 2 | "Threads": [ 3 | { 4 | "PacketsDirectory": "/data/stenographer/{{ item.1 }}/thread0/packets" 5 | , "IndexDirectory": "/data/stenographer/{{ item.1 }}/thread0/index" 6 | , "MaxDirectoryFiles": 30000 7 | , "DiskFreePercentage": 10 8 | } 9 | ] 10 | , "StenotypePath": "/usr/bin/stenotype" 11 | , "Interface": "{{ item.1 }}" 12 | , "Port": {{ 1234 + item.0 }} 13 | , "Host": "{{ ansible_default_ipv4.address }}" 14 | , "Flags": ["-v"] 15 | , "CertPath": "/etc/stenographer/certs" 16 | } 17 | -------------------------------------------------------------------------------- /roles/stenographer/tests/inventory: -------------------------------------------------------------------------------- 1 | localhost 2 | 3 | -------------------------------------------------------------------------------- /roles/stenographer/tests/test.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: localhost 3 | remote_user: root 4 | roles: 5 | - stenographer 6 | -------------------------------------------------------------------------------- /roles/stenographer/vars/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # vars file for stenographer 3 | -------------------------------------------------------------------------------- /roles/suricata/README.md: -------------------------------------------------------------------------------- 1 | Role Name 2 | ========= 3 | 4 | A brief description of the role goes here. 5 | 6 | Requirements 7 | ------------ 8 | 9 | Any pre-requisites that may not be covered by Ansible itself or the role should be mentioned here. For instance, if the role uses the EC2 module, it may be a good idea to mention in this section that the boto package is required. 10 | 11 | Role Variables 12 | -------------- 13 | 14 | A description of the settable variables for this role should go here, including any variables that are in defaults/main.yml, vars/main.yml, and any variables that can/should be set via parameters to the role. Any variables that are read from other roles and/or the global scope (ie. hostvars, group vars, etc.) should be mentioned here as well. 15 | 16 | Dependencies 17 | ------------ 18 | 19 | A list of other roles hosted on Galaxy should go here, plus any details in regards to parameters that may need to be set for other roles, or variables that are used from other roles. 20 | 21 | Example Playbook 22 | ---------------- 23 | 24 | Including an example of how to use your role (for instance, with variables passed in as parameters) is always nice for users too: 25 | 26 | - hosts: servers 27 | roles: 28 | - { role: username.rolename, x: 42 } 29 | 30 | License 31 | ------- 32 | 33 | BSD 34 | 35 | Author Information 36 | ------------------ 37 | 38 | An optional section for the role authors to include contact information, or a website (HTML is not allowed). 39 | -------------------------------------------------------------------------------- /roles/suricata/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # defaults file for suricata 3 | method: all 4 | -------------------------------------------------------------------------------- /roles/suricata/files/suricata.service: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=Suricata Intrusion Detection Service 3 | After=syslog.target network.target 4 | 5 | [Service] 6 | EnvironmentFile=-/etc/sysconfig/suricata 7 | ExecStart=/sbin/suricata -c /etc/suricata/suricata.yaml --af-packet 8 | ExecReload=/bin/kill -HUP $MAINPID 9 | User=suricata 10 | Group=suricata 11 | CapabilityBoundingSet=CAP_NET_ADMIN CAP_NET_RAW CAP_IPC_LOCK 12 | AmbientCapabilities=CAP_NET_ADMIN CAP_NET_RAW CAP_IPC_LOCK 13 | 14 | [Install] 15 | WantedBy=multi-user.target 16 | -------------------------------------------------------------------------------- /roles/suricata/files/suricata.tmpfiles: -------------------------------------------------------------------------------- 1 | #Type Path Mode UID GID Age Argument 2 | d /run/suricata 0770 suricata suricata - - 3 | -------------------------------------------------------------------------------- /roles/suricata/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # handlers file for suricata 3 | 4 | - name: Configure monitor interfaces 5 | shell: > 6 | for intf in {{ rock_monifs | join(' ') }}; do 7 | /sbin/ifup ${intf}; 8 | done 9 | -------------------------------------------------------------------------------- /roles/suricata/meta/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | galaxy_info: 3 | author: RockNSM Contributors 4 | description: A role to deploy Suricata to a RockNSM sensor 5 | company: RockNSM Foundation 6 | 7 | license: Apache 8 | 9 | min_ansible_version: 2.7 10 | 11 | platforms: 12 | - name: CentOS 13 | versions: 14 | - 7 15 | - name: RedHat 16 | versions: 17 | - 7 18 | 19 | galaxy_tags: 20 | - suricata 21 | - rocknsm 22 | - ids 23 | 24 | dependencies: 25 | - name: Install and configure filebeat 26 | role: filebeat 27 | vars: 28 | filebeat_configs: 29 | - { src: 'fb-suricata.yml.j2', dest: 'suricata.yml' } 30 | -------------------------------------------------------------------------------- /roles/suricata/templates/fb-suricata.yml.j2: -------------------------------------------------------------------------------- 1 | - input_type: log 2 | paths: 3 | - {{ rock_data_dir }}/suricata/eve.json 4 | json.keys_under_root: true 5 | fields: 6 | kafka_topic: suricata-raw 7 | fields_under_root: true 8 | -------------------------------------------------------------------------------- /roles/suricata/templates/logrotate-suricata.j2: -------------------------------------------------------------------------------- 1 | {{ suricata_data_dir }}/*.log {{ suricata_data_dir }}/*.json 2 | { 3 | rotate {{ suricata_retention }} 4 | missingok 5 | compress 6 | delaycompress 7 | copytruncate 8 | minsize 500k 9 | create 0644 suricata suricata 10 | sharedscripts 11 | postrotate 12 | systemctl reload suricata.service 13 | endscript 14 | } 15 | -------------------------------------------------------------------------------- /roles/suricata/templates/ls-input-suricata.j2: -------------------------------------------------------------------------------- 1 | input { 2 | kafka { 3 | topics => ["suricata-raw"] 4 | add_field => { "[@metadata][stage]" => "suricata_json" } 5 | # Set this to one per kafka partition to scale up 6 | #consumer_threads => 4 7 | group_id => "suricata_logstash" 8 | bootstrap_servers => {% for host in groups['kafka'] %}"{{ host }}:9092"{% if not loop.last %},{% endif %}{% endfor %} 9 | codec => json 10 | auto_offset_reset => "earliest" 11 | id => "input-kafka-suricata" 12 | } 13 | } 14 | -------------------------------------------------------------------------------- /roles/suricata/templates/suricata_overrides.yaml.j2: -------------------------------------------------------------------------------- 1 | %YAML 1.1 2 | --- 3 | default-rule-path: "{{ suricata_var_dir }}/rules" 4 | rule-files: 5 | - suricata.rules 6 | 7 | af-packet: 8 | {% for iface in rock_monifs %} 9 | - interface: {{ iface }} 10 | #threads: auto 11 | cluster-id: {{ 99 - loop.index0 }} 12 | cluster-type: cluster_flow 13 | defrag: true 14 | use-mmap: true 15 | mmap-locked: true 16 | #rollover: true 17 | tpacket-v3: true 18 | use-emergency-flush: true 19 | {% endfor %} 20 | default-log-dir: {{ suricata_data_dir }} 21 | 22 | #Magic file location for EL7 23 | magic-file: /usr/share/file/magic 24 | outputs: 25 | - fast: 26 | enabled: true 27 | filename: fast.log 28 | append: true 29 | - eve-log: 30 | enabled: true 31 | filetype: regular 32 | filename: eve.json 33 | types: 34 | - alert: 35 | http: true 36 | tls: true 37 | ssh: true 38 | smtp: true 39 | tagged-packets: true 40 | xff: 41 | enabled: false 42 | - files: 43 | force-magic: true 44 | force-hash: [sha256, sha1, md5] 45 | - stats: 46 | totals: true 47 | threads: false 48 | deltas: false 49 | - flow 50 | - http 51 | - dns 52 | - tls 53 | community-id: true 54 | - unified2-alert: 55 | enabled: true 56 | filename: unified2.alert 57 | limit: 32mb 58 | sensor-id: 0 59 | payload: true 60 | -------------------------------------------------------------------------------- /roles/suricata/tests/inventory: -------------------------------------------------------------------------------- 1 | localhost 2 | 3 | -------------------------------------------------------------------------------- /roles/suricata/tests/test.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: localhost 3 | remote_user: root 4 | roles: 5 | - suricata 6 | -------------------------------------------------------------------------------- /roles/suricata/vars/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # vars file for suricata 3 | -------------------------------------------------------------------------------- /roles/zeek/README.md: -------------------------------------------------------------------------------- 1 | Role Name 2 | ========= 3 | 4 | A brief description of the role goes here. 5 | 6 | Requirements 7 | ------------ 8 | 9 | Any pre-requisites that may not be covered by Ansible itself or the role should be mentioned here. For instance, if the role uses the EC2 module, it may be a good idea to mention in this section that the boto package is required. 10 | 11 | Role Variables 12 | -------------- 13 | 14 | A description of the settable variables for this role should go here, including any variables that are in defaults/main.yml, vars/main.yml, and any variables that can/should be set via parameters to the role. Any variables that are read from other roles and/or the global scope (ie. hostvars, group vars, etc.) should be mentioned here as well. 15 | 16 | Dependencies 17 | ------------ 18 | 19 | A list of other roles hosted on Galaxy should go here, plus any details in regards to parameters that may need to be set for other roles, or variables that are used from other roles. 20 | 21 | Example Playbook 22 | ---------------- 23 | 24 | Including an example of how to use your role (for instance, with variables passed in as parameters) is always nice for users too: 25 | 26 | - hosts: servers 27 | roles: 28 | - { role: username.rolename, x: 42 } 29 | 30 | License 31 | ------- 32 | 33 | BSD 34 | 35 | Author Information 36 | ------------------ 37 | 38 | An optional section for the role authors to include contact information, or a website (HTML is not allowed). 39 | -------------------------------------------------------------------------------- /roles/zeek/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # defaults file for zeek 3 | zeek_packages: 4 | - zeek 5 | - zeek-aux 6 | - zeekctl 7 | - zeek-plugin-kafka 8 | - zeek-plugin-af_packet 9 | - zeek-plugin-communityid 10 | - zeek-plugin-gquic 11 | -------------------------------------------------------------------------------- /roles/zeek/files/GeoIP.conf: -------------------------------------------------------------------------------- 1 | # If you purchase a subscription to the GeoIP database, 2 | # then you will obtain a license key which you can 3 | # use to automatically obtain updates. 4 | # for more details, please go to 5 | # http://www.maxmind.com/en/geolocation_landing 6 | 7 | # HowTo configure geoipupdate 8 | # http://www.maxmind.com/en/license_key 9 | 10 | # customer find the user_id and license key here: 11 | # https://www.maxmind.com/en/my_license_key 12 | # 13 | # UserId, and available ProductIds 14 | 15 | # Enter your license key here 16 | # customers should insert their license key and user_id 17 | # free GeoLite users should use 000000000000 as license key 18 | LicenseKey 000000000000 19 | 20 | # Enter your User ID here ( GeoLite only users should use 999999 as user_id ) 21 | UserId 999999 22 | 23 | # Enter the Product ID(s) of the database(s) you would like to update 24 | # By default 106 (MaxMind GeoIP Country) is listed below 25 | ProductIds GeoLite-Legacy-IPv6-City GeoLite-Legacy-IPv6-Country 506 517 533 26 | -------------------------------------------------------------------------------- /roles/zeek/files/profile.d-zeek.sh: -------------------------------------------------------------------------------- 1 | # Helpers 2 | alias zeek-column="sed \"s/fields.//;s/types.//\" | column -s $'\t' -t" 3 | alias zeek-awk='awk -F" "' 4 | zeek-grep() { grep -E "(^#)|$1" $2; } 5 | zeek-zgrep() { zgrep -E "(^#)|$1" $2; } 6 | topcount() { sort | uniq -c | sort -rn | head -n ${1:-10}; } 7 | colorize() { sed 's/#fields\t\|#types\t/#/g' | awk 'BEGIN {FS="\t"};{for(i=1;i<=NF;i++) printf("\x1b[%sm %s \x1b[0m",(i%7)+31,$i);print ""}'; } 8 | cm() { cat $1 | sed 's/#fields\t\|#types\t/#/g' | awk 'BEGIN {FS="\t"};{for(i=1;i<=NF;i++) printf("\x1b[%sm %s \x1b[0m",(i%7)+31,$i);print ""}'; } 9 | lesscolor() { cat $1 | sed 's/#fields\t\|#types\t/#/g' | awk 'BEGIN {FS="\t"};{for(i=1;i<=NF;i++) printf("\x1b[%sm %s \x1b[0m",(i%7)+31,$i);print ""}' | less -RS; } 10 | topconn() { if [ $# -lt 2 ]; then echo "Usage: topconn {resp|orig} {proto|service} {tcp|udp|icmp|http|dns|ssl|smtp|\"-\"}"; else cat conn.log | zeek-cut id.$1_h $2 | grep $3 | topcount; fi; } 11 | fields() { grep -m 1 -E "^#fields" $1 | awk -vRS='\t' '/^[^#]/ { print $1 }' | cat -n ; } 12 | toptalk() { for i in *.log; do echo -e "$i\n================="; cat $i | zeek-cut id.orig_h id.resp_h | topcount 20; done; } 13 | talkers() { for j in tcp udp icmp; do echo -e "\t=============\n\t $j\n\t============="; for i in resp orig; do echo -e "====\n$i\n===="; topconn $i proto $j | column -t; done; done; } 14 | 15 | toptotal() { if [ $# -lt 3 ]; then echo "Usage: toptotal {resp|orig} {orig_bytes|resp_bytes|duration} conn.log"; else 16 | zcat $3 | zeek-cut id.$1_h $2 \ 17 | | sort \ 18 | | awk '{ if (host != $1) { \ 19 | if (size != 0) \ 20 | print $1, size; \ 21 | host=$1; \ 22 | size=0 \ 23 | } else \ 24 | size += $2 \ 25 | } \ 26 | END { \ 27 | if (size != 0) \ 28 | print $1, size \ 29 | }' \ 30 | | sort -rnk 2 \ 31 | | head -n 20; fi; } 32 | 33 | topconvo() { if [ $# -lt 1 ]; then echo "Usage: topconvo conn.log"; else 34 | zcat $1 | zeek-cut id.orig_h id.resp_h orig_bytes resp_bytes \ 35 | | sort \ 36 | | awk '{ if (host != $1 || host2 != $2) { \ 37 | if (size != 0) \ 38 | print $1, $2, size; \ 39 | host=$1; \ 40 | host2=$2; \ 41 | size=0 \ 42 | } else \ 43 | size += $3; \ 44 | size += $4 \ 45 | } \ 46 | END { \ 47 | if (size != 0) \ 48 | print $1, $2, size \ 49 | }' \ 50 | | sort -rnk 3 \ 51 | | head -n 20; fi; } 52 | -------------------------------------------------------------------------------- /roles/zeek/files/zeek-scripts-readme.txt: -------------------------------------------------------------------------------- 1 | 2 | It is recommended to put zeek scripts in individual directories and use __load__.zeek files. 3 | 4 | Example: 5 | directory = scripts/something 6 | script = scripts/something/something.zeek 7 | loader = scripts/something/__load__.zeek 8 | 9 | Then in your custom.local.zeek you can @load scripts/something 10 | -------------------------------------------------------------------------------- /roles/zeek/files/zeekctl.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/bash 2 | 3 | # zeekctl should ALWAYS run as the zeek user! 4 | sudo -u zeek /usr/bin/zeekctl "$@" 5 | -------------------------------------------------------------------------------- /roles/zeek/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # handlers file for Bro 3 | 4 | - name: Configure monitor interfaces 5 | shell: > 6 | for intf in {{ rock_monifs | join(' ') }}; do 7 | /sbin/ifup ${intf}; 8 | done 9 | 10 | - name: Reload zeek 11 | service: 12 | name: zeek 13 | state: restarted 14 | when: "'zeek' in enabled_services" 15 | -------------------------------------------------------------------------------- /roles/zeek/meta/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | -------------------------------------------------------------------------------- /roles/zeek/templates/ls-input-zeek.j2: -------------------------------------------------------------------------------- 1 | input { 2 | kafka { 3 | topics => ["zeek-raw"] 4 | add_field => { "[@metadata][stage]" => "zeek_json" } 5 | # Set this to one per kafka partition to scale up 6 | #consumer_threads => 4 7 | group_id => "zeek_logstash" 8 | bootstrap_servers => {% for host in groups['kafka'] %}"{{ host }}:9092"{% if not loop.last %},{% endif %}{% endfor %} 9 | codec => json 10 | auto_offset_reset => "earliest" 11 | id => "input-kafka-zeek" 12 | } 13 | } 14 | -------------------------------------------------------------------------------- /roles/zeek/templates/networks.cfg.j2: -------------------------------------------------------------------------------- 1 | #LOCAL NETS 2 | 10.0.0.0/8 RFC1918 3 | 172.16.0.0/12 RFC1918 4 | 192.168.0.0/16 RFC1918 5 | 6 | ########## 7 | ## ROCK ## 8 | ########## 9 | # Add networks for the networks you are monitoring into this file if they're not all RFC1918. 10 | ########## 11 | -------------------------------------------------------------------------------- /roles/zeek/templates/node.cfg.j2: -------------------------------------------------------------------------------- 1 | [logger] 2 | type=logger 3 | host=localhost 4 | env_vars=fanout_id=0 5 | 6 | [manager] 7 | type=manager 8 | host=localhost 9 | env_vars=fanout_id=0 10 | 11 | [proxy-1] 12 | type=proxy 13 | host=localhost 14 | env_vars=fanout_id=0 15 | 16 | {% set procs_per_worker = (zeek_cpu | int) // (rock_monifs|length) %} 17 | {% for iface in rock_monifs %} 18 | [{{ iface }}] 19 | type=worker 20 | host=localhost 21 | {%if procs_per_worker >=2 %} 22 | interface=af_packet::{{ iface }} 23 | lb_method=custom 24 | lb_procs={{ (zeek_cpu | int) // loop.length }} 25 | {% else %} 26 | interface={{ iface }} 27 | {% endif %} 28 | env_vars=fanout_id={{ 42 + loop.index0 }} 29 | {# TODO: add logic for pinning processes #} 30 | {% endfor %} 31 | -------------------------------------------------------------------------------- /roles/zeek/templates/zeekctl.cfg.j2: -------------------------------------------------------------------------------- 1 | ## Global BroControl configuration file. 2 | 3 | ############################################### 4 | # Mail Options 5 | 6 | # Recipient address for all emails sent out by Bro and BroControl. 7 | MailTo = root@localhost 8 | 9 | # Mail connection summary reports each log rotation interval. A value of 1 10 | # means mail connection summaries, and a value of 0 means do not mail 11 | # connection summaries. This option has no effect if the trace-summary 12 | # script is not available. 13 | MailConnectionSummary = 1 14 | 15 | # Lower threshold (in percentage of disk space) for space available on the 16 | # disk that holds SpoolDir. If less space is available, "zeekctl cron" starts 17 | # sending out warning emails. A value of 0 disables this feature. 18 | MinDiskSpace = 5 19 | 20 | # Send mail when "zeekctl cron" notices the availability of a host in the 21 | # cluster to have changed. A value of 1 means send mail when a host status 22 | # changes, and a value of 0 means do not send mail. 23 | MailHostUpDown = 1 24 | 25 | ############################################### 26 | # Logging Options 27 | 28 | # Rotation interval in seconds for log files on manager (or standalone) node. 29 | # A value of 0 disables log rotation. 30 | LogRotationInterval = 3600 31 | 32 | # Expiration interval for archived log files in LogDir. Files older than this 33 | # will be deleted by "zeekctl cron". The interval is an integer followed by 34 | # one of these time units: day, hr, min. A value of 0 means that logs 35 | # never expire. 36 | LogExpireInterval = {{ zeek_log_retention }} day 37 | 38 | # Enable BroControl to write statistics to the stats.log file. A value of 1 39 | # means write to stats.log, and a value of 0 means do not write to stats.log. 40 | StatsLogEnable = 1 41 | 42 | # Number of days that entries in the stats.log file are kept. Entries older 43 | # than this many days will be removed upon running "zeekctl cron". A value of 0 44 | # means that entries never expire. 45 | StatsLogExpireInterval = {{ zeek_stats_retention }} 46 | 47 | ############################################### 48 | # Other Options 49 | 50 | # Show all output of the zeekctl status command. If set to 1, then all output 51 | # is shown. If set to 0, then zeekctl status will not collect or show the peer 52 | # information (and the command will run faster). 53 | StatusCmdShowAll = 0 54 | 55 | # Site-specific policy script to load. Bro will look for this in 56 | # $PREFIX/share/zeek/site. A default local.zeek comes preinstalled 57 | # and can be customized as desired. 58 | SitePolicyScripts = local.zeek 59 | 60 | # Location of the log directory where log files will be archived each rotation 61 | # interval. 62 | LogDir = {{ zeek_data_dir }}/logs 63 | 64 | # Location of the spool directory where files and data that are currently being 65 | # written are stored. 66 | SpoolDir = {{ zeek_data_dir }}/spool 67 | 68 | # Location of other configuration files that can be used to customize 69 | # BroControl operation (e.g. local networks, nodes). 70 | CfgDir = {{ zeek_sysconfig_dir }} 71 | -------------------------------------------------------------------------------- /roles/zeek/tests/inventory: -------------------------------------------------------------------------------- 1 | localhost 2 | 3 | -------------------------------------------------------------------------------- /roles/zeek/tests/test.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: localhost 3 | remote_user: root 4 | roles: 5 | - zeek 6 | -------------------------------------------------------------------------------- /roles/zeek/vars/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # vars file for zeek 3 | -------------------------------------------------------------------------------- /roles/zookeeper/README.md: -------------------------------------------------------------------------------- 1 | Role Name 2 | ========= 3 | 4 | A brief description of the role goes here. 5 | 6 | Requirements 7 | ------------ 8 | 9 | Any pre-requisites that may not be covered by Ansible itself or the role should be mentioned here. For instance, if the role uses the EC2 module, it may be a good idea to mention in this section that the boto package is required. 10 | 11 | Role Variables 12 | -------------- 13 | 14 | A description of the settable variables for this role should go here, including any variables that are in defaults/main.yml, vars/main.yml, and any variables that can/should be set via parameters to the role. Any variables that are read from other roles and/or the global scope (ie. hostvars, group vars, etc.) should be mentioned here as well. 15 | 16 | Dependencies 17 | ------------ 18 | 19 | A list of other roles hosted on Galaxy should go here, plus any details in regards to parameters that may need to be set for other roles, or variables that are used from other roles. 20 | 21 | Example Playbook 22 | ---------------- 23 | 24 | Including an example of how to use your role (for instance, with variables passed in as parameters) is always nice for users too: 25 | 26 | - hosts: servers 27 | roles: 28 | - { role: username.rolename, x: 42 } 29 | 30 | License 31 | ------- 32 | 33 | BSD 34 | 35 | Author Information 36 | ------------------ 37 | 38 | An optional section for the role authors to include contact information, or a website (HTML is not allowed). 39 | -------------------------------------------------------------------------------- /roles/zookeeper/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # defaults file for zookeeper 3 | method: all 4 | -------------------------------------------------------------------------------- /roles/zookeeper/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # handlers file for zookeeper 3 | - name: Restart zookeeper 4 | systemd: 5 | name: zookeeper 6 | state: "{{ 'restarted' if 'zookeeper' in enabled_services else 'stopped' }}" 7 | -------------------------------------------------------------------------------- /roles/zookeeper/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Install zookeeper packages 3 | yum: 4 | name: 5 | - java-11-openjdk-headless 6 | - zookeeper 7 | state: installed 8 | 9 | - name: Enable and Start zookeeper 10 | systemd: 11 | name: zookeeper 12 | enabled: "{{ 'zookeeper' in enabled_services }}" 13 | notify: Restart zookeeper 14 | 15 | - name: Configure firewall ports 16 | firewalld: 17 | port: "{{ item }}/tcp" 18 | permanent: true 19 | state: enabled 20 | immediate: true 21 | zone: work 22 | loop: 23 | - 2181 24 | when: groups['zookeeper'] | difference(groups['kafka']) | count > 0 25 | ... 26 | -------------------------------------------------------------------------------- /roles/zookeeper/tests/inventory: -------------------------------------------------------------------------------- 1 | localhost 2 | 3 | -------------------------------------------------------------------------------- /roles/zookeeper/tests/test.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: localhost 3 | remote_user: root 4 | roles: 5 | - zookeeper 6 | -------------------------------------------------------------------------------- /roles/zookeeper/vars/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # vars file for zookeeper 3 | -------------------------------------------------------------------------------- /tests/test_common.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import, division, print_function 2 | from builtins import (ascii, bytes, chr, dict, filter, hex, input, 3 | int, map, next, oct, open, pow, range, round, 4 | str, super, zip) 5 | import pytest 6 | import yaml 7 | 8 | with open('tests/vars/common.vars', 'r') as f: 9 | try: 10 | yml_vars = yaml.load(f) 11 | except yaml.YAMLError as e: 12 | print(e) 13 | 14 | 15 | # begin testing 16 | # parametrize all of the values in the list so that we test all of them even if one fails 17 | def test_passwordless_sudo(host): 18 | assert host.sudo() 19 | 20 | @pytest.mark.parametrize("package", yml_vars.get('packages')) 21 | # Test for packages that are installed 22 | def test_packages_installed(host, package): 23 | with host.sudo(): 24 | assert host.package(package).is_installed 25 | 26 | 27 | def test_ipv6_disabled(host): 28 | assert host.file('/etc/sysctl.d/10-ROCK.conf').contains('net.ipv6.conf.all.disable_ipv6=1') 29 | assert host.file('/etc/sysctl.d/10-ROCK.conf').contains('net.ipv6.conf.default.disable_ipv6=1') 30 | 31 | 32 | def test_ipv6_loopback_disabled(host): 33 | assert not host.file('/etc/hosts').contains('::1') 34 | -------------------------------------------------------------------------------- /tests/test_docket.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import, division, print_function 2 | from builtins import (ascii, bytes, chr, dict, filter, hex, input, 3 | int, map, next, oct, open, pow, range, round, 4 | str, super, zip) 5 | import pytest 6 | import yaml 7 | import shlex 8 | 9 | with open('tests/vars/docket.vars', 'r') as f: 10 | try: 11 | yml_vars = yaml.load(f) 12 | except yaml.YAMLError as e: 13 | print(e) 14 | 15 | # begin testing 16 | # parametrize all of the values in the list so that we test all of them even if one fails 17 | @pytest.mark.parametrize("package", yml_vars.get('packages')) 18 | # Test for packages that are installed 19 | def test_packages_installed(host, package): 20 | with host.sudo(): 21 | assert host.package(package).is_installed 22 | 23 | @pytest.mark.parametrize("service", yml_vars.get('services')) 24 | # test for services that are enabled 25 | def test_service_enabled(host, service): 26 | assert host.service(service).is_enabled 27 | assert host.service(service).is_running 28 | 29 | 30 | # Can use this if we want to split up enabled and running into separate checks 31 | # @pytest.mark.parametrize("service", services) 32 | # # test for services that are running 33 | # def test_service_running(host, service): 34 | # assert host.service(service).is_running 35 | 36 | 37 | @pytest.mark.parametrize("dir_path", yml_vars.get('dir_paths')) 38 | # test for directories that should of been made 39 | def test_directories(host, dir_path): 40 | with host.sudo(): 41 | assert host.file(dir_path).exists 42 | assert host.file(dir_path).is_directory 43 | 44 | 45 | @pytest.mark.parametrize("file_path", yml_vars.get('file_paths')) 46 | # test for files that should exist 47 | def test_files(host, file_path): 48 | file_p = file_path[0] 49 | try: 50 | file_u, file_g = file_path[1].split(':') 51 | except IndexError: 52 | file_u = None 53 | file_g = None 54 | try: 55 | file_m = oct(int(file_path[2], 8)) 56 | except IndexError: 57 | file_m = None 58 | with host.sudo(): 59 | assert host.file(file_p).exists 60 | assert host.file(file_p).is_file 61 | if file_u: 62 | assert host.file(file_p).user == file_u 63 | if file_g: 64 | assert host.file(file_p).group == file_g 65 | if file_m: 66 | assert oct(host.file(file_p).mode) == file_m 67 | 68 | 69 | @pytest.mark.parametrize("port", yml_vars.get('firewall_ports')) 70 | # test to see if port is allowed through firewalld 71 | def test_firewall_rules(host, port): 72 | with host.sudo(): 73 | out = host.check_output("firewall-cmd {}".format('--list-ports')) 74 | assert str(port) in out 75 | 76 | 77 | # work around port testing for now, still requires netstat to be installed. Does not detect ss for some reason 78 | @pytest.mark.parametrize("port", yml_vars.get('listening_ports')) 79 | # test for ports that are in a listening state 80 | def test_network_sockets(host, port): 81 | scks = host.socket.get_listening_sockets() 82 | assert [s for s in scks if s.endswith(':{p}'.format(p=port))] 83 | if(port != 6379): 84 | assert host.run_test('curl -k https://{host}:{p}'.format(host='localhost', p=port)) 85 | 86 | 87 | # Doesn't appear to work, but this is how we could test for specific ports listening 88 | # @pytest.mark.parametrize("port", yml_vars.get('listening_ports') 89 | # def test_sockets(host, port): 90 | # assert host.socket("tcp://127.0.0.1:{port}".format(port=port)).is_listening -------------------------------------------------------------------------------- /tests/test_elasticsearch.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import, division, print_function 2 | from builtins import (ascii, bytes, chr, dict, filter, hex, input, 3 | int, map, next, oct, open, pow, range, round, 4 | str, super, zip) 5 | import pytest 6 | import yaml 7 | 8 | with open('tests/vars/elasticsearch.vars', 'r') as f: 9 | try: 10 | yml_vars = yaml.load(f) 11 | except yaml.YAMLError as e: 12 | print(e) 13 | 14 | # begin testing 15 | # parametrize all of the values in the list so that we test all of them even if one fails 16 | @pytest.mark.parametrize("package", yml_vars.get('packages')) 17 | # Test for packages that are installed 18 | def test_packages_installed(host, package): 19 | with host.sudo(): 20 | assert host.package(package).is_installed 21 | 22 | @pytest.mark.parametrize("service", yml_vars.get('services')) 23 | # test for services that are enabled 24 | def test_service_enabled(host, service): 25 | assert host.service(service).is_enabled 26 | assert host.service(service).is_running 27 | 28 | 29 | # Can use this if we want to split up enabled and running into separate checks 30 | # @pytest.mark.parametrize("service", services) 31 | # # test for services that are running 32 | # def test_service_running(host, service): 33 | # assert host.service(service).is_running 34 | 35 | 36 | @pytest.mark.parametrize("dir_path", yml_vars.get('dir_paths')) 37 | # test for directories that should of been made 38 | def test_directories(host, dir_path): 39 | with host.sudo(): 40 | assert host.file(dir_path).exists 41 | assert host.file(dir_path).is_directory 42 | 43 | 44 | @pytest.mark.parametrize("file_path", yml_vars.get('file_paths')) 45 | # test for files that should exist 46 | def test_files(host, file_path): 47 | file_p = file_path[0] 48 | try: 49 | file_u, file_g = file_path[1].split(':') 50 | except IndexError: 51 | file_u = None 52 | file_g = None 53 | try: 54 | file_m = oct(int(file_path[2], 8)) 55 | except IndexError: 56 | file_m = None 57 | with host.sudo(): 58 | assert host.file(file_p).exists 59 | assert host.file(file_p).is_file 60 | if file_u: 61 | assert host.file(file_p).user == file_u 62 | if file_g: 63 | assert host.file(file_p).group == file_g 64 | if file_m: 65 | assert oct(host.file(file_p).mode) == file_m 66 | 67 | 68 | # work around port testing for now, still requires netstat to be installed. Does not detect ss for some reason 69 | @pytest.mark.parametrize("port", yml_vars.get('listening_ports')) 70 | # test for ports that are in a listening state 71 | def test_network_sockets(host, port): 72 | scks = host.socket.get_listening_sockets() 73 | assert [s for s in scks if s.endswith(':{p}'.format(p=port))] 74 | assert host.run_test('curl {host}:{p}'.format(host='localhost', p=port)) 75 | 76 | # Doesn't appear to work, but this is how we could test for specific ports listening 77 | # @pytest.mark.parametrize("port", yml_vars.get('listening_ports') 78 | # def test_sockets(host, port): 79 | # assert host.socket("tcp://127.0.0.1:{port}".format(port=port)).is_listening -------------------------------------------------------------------------------- /tests/test_filebeat.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import, division, print_function 2 | from builtins import (ascii, bytes, chr, dict, filter, hex, input, 3 | int, map, next, oct, open, pow, range, round, 4 | str, super, zip) 5 | import pytest 6 | import yaml 7 | 8 | with open('tests/vars/filebeat.vars', 'r') as f: 9 | try: 10 | yml_vars = yaml.load(f) 11 | except yaml.YAMLError as e: 12 | print(e) 13 | 14 | # begin testing 15 | # parametrize all of the values in the list so that we test all of them even if one fails 16 | @pytest.mark.parametrize("package", yml_vars.get('packages')) 17 | # Test for packages that are installed 18 | def test_packages_installed(host, package): 19 | with host.sudo(): 20 | assert host.package(package).is_installed 21 | 22 | @pytest.mark.parametrize("service", yml_vars.get('services')) 23 | # test for services that are enabled 24 | def test_service_enabled(host, service): 25 | assert host.service(service).is_enabled 26 | assert host.service(service).is_running 27 | 28 | 29 | # Can use this if we want to split up enabled and running into separate checks 30 | # @pytest.mark.parametrize("service", services) 31 | # # test for services that are running 32 | # def test_service_running(host, service): 33 | # assert host.service(service).is_running 34 | 35 | 36 | @pytest.mark.parametrize("dir_path", yml_vars.get('dir_paths')) 37 | # test for directories that should of been made 38 | def test_directories(host, dir_path): 39 | with host.sudo(): 40 | assert host.file(dir_path).exists 41 | assert host.file(dir_path).is_directory 42 | 43 | 44 | @pytest.mark.parametrize("file_path", yml_vars.get('file_paths')) 45 | # test for files that should exist 46 | def test_files(host, file_path): 47 | file_p = file_path[0] 48 | try: 49 | file_u, file_g = file_path[1].split(':') 50 | except IndexError: 51 | file_u = None 52 | file_g = None 53 | try: 54 | file_m = oct(int(file_path[2], 8)) 55 | except IndexError: 56 | file_m = None 57 | with host.sudo(): 58 | assert host.file(file_p).exists 59 | assert host.file(file_p).is_file 60 | if file_p.split('/')[-1] == 'filebeat.yml': 61 | assert host.file(file_p).contains('/data/suricata/eve.json') 62 | assert host.file(file_p).contains('/data/fsf/rockout.log') 63 | if file_u: 64 | assert host.file(file_p).user == file_u 65 | if file_g: 66 | assert host.file(file_p).group == file_g 67 | if file_m: 68 | assert oct(host.file(file_p).mode) == file_m 69 | -------------------------------------------------------------------------------- /tests/test_kafka.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import, division, print_function 2 | from builtins import (ascii, bytes, chr, dict, filter, hex, input, 3 | int, map, next, oct, open, pow, range, round, 4 | str, super, zip) 5 | import pytest 6 | import yaml 7 | 8 | with open('tests/vars/kafka.vars', 'r') as f: 9 | try: 10 | yml_vars = yaml.load(f) 11 | except yaml.YAMLError as e: 12 | print(e) 13 | 14 | # begin testing 15 | # parametrize all of the values in the list so that we test all of them even if one fails 16 | @pytest.mark.parametrize("package", yml_vars.get('packages')) 17 | # Test for packages that are installed 18 | def test_packages_installed(host, package): 19 | with host.sudo(): 20 | assert host.package(package).is_installed 21 | 22 | @pytest.mark.parametrize("service", yml_vars.get('services')) 23 | # test for services that are enabled 24 | def test_service_enabled(host, service): 25 | assert host.service(service).is_enabled 26 | assert host.service(service).is_running 27 | 28 | 29 | # Can use this if we want to split up enabled and running into separate checks 30 | # @pytest.mark.parametrize("service", services) 31 | # # test for services that are running 32 | # def test_service_running(host, service): 33 | # assert host.service(service).is_running 34 | 35 | 36 | @pytest.mark.parametrize("dir_path", yml_vars.get('dir_paths')) 37 | # test for directories that should of been made 38 | def test_directories(host, dir_path): 39 | with host.sudo(): 40 | assert host.file(dir_path).exists 41 | assert host.file(dir_path).is_directory 42 | 43 | 44 | @pytest.mark.parametrize("file_path", yml_vars.get('file_paths')) 45 | # test for files that should exist 46 | def test_files(host, file_path): 47 | file_p = file_path[0] 48 | try: 49 | file_u, file_g = file_path[1].split(':') 50 | except IndexError: 51 | file_u = None 52 | file_g = None 53 | try: 54 | file_m = oct(int(file_path[2], 8)) 55 | except IndexError: 56 | file_m = None 57 | with host.sudo(): 58 | assert host.file(file_p).exists 59 | assert host.file(file_p).is_file 60 | if file_u: 61 | assert host.file(file_p).user == file_u 62 | if file_g: 63 | assert host.file(file_p).group == file_g 64 | if file_m: 65 | assert oct(host.file(file_p).mode) == file_m 66 | 67 | 68 | # work around port testing for now, still requires netstat to be installed. Does not detect ss for some reason 69 | @pytest.mark.parametrize("port", yml_vars.get('listening_ports')) 70 | # test for ports that are in a listening state 71 | def test_network_sockets(host, port): 72 | scks = host.socket.get_listening_sockets() 73 | assert [s for s in scks if s.endswith(':{p}'.format(p=port))] 74 | 75 | 76 | # Doesn't appear to work, but this is how we could test for specific ports listening 77 | # @pytest.mark.parametrize("port", yml_vars.get('listening_ports') 78 | # def test_sockets(host, port): 79 | # assert host.socket("tcp://127.0.0.1:{port}".format(port=port)).is_listening 80 | -------------------------------------------------------------------------------- /tests/test_kibana.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import, division, print_function 2 | from builtins import (ascii, bytes, chr, dict, filter, hex, input, 3 | int, map, next, oct, open, pow, range, round, 4 | str, super, zip) 5 | import pytest 6 | import yaml 7 | 8 | with open('tests/vars/kibana.vars', 'r') as f: 9 | try: 10 | yml_vars = yaml.load(f) 11 | except yaml.YAMLError as e: 12 | print(e) 13 | 14 | # begin testing 15 | # parametrize all of the values in the list so that we test all of them even if one fails 16 | @pytest.mark.parametrize("package", yml_vars.get('packages')) 17 | # Test for packages that are installed 18 | def test_packages_installed(host, package): 19 | with host.sudo(): 20 | assert host.package(package).is_installed 21 | 22 | @pytest.mark.parametrize("service", yml_vars.get('services')) 23 | # test for services that are enabled 24 | def test_service_enabled(host, service): 25 | assert host.service(service).is_enabled 26 | assert host.service(service).is_running 27 | 28 | 29 | # Can use this if we want to split up enabled and running into separate checks 30 | # @pytest.mark.parametrize("service", services) 31 | # # test for services that are running 32 | # def test_service_running(host, service): 33 | # assert host.service(service).is_running 34 | 35 | 36 | @pytest.mark.parametrize("dir_path", yml_vars.get('dir_paths')) 37 | # test for directories that should of been made 38 | def test_directories(host, dir_path): 39 | with host.sudo(): 40 | assert host.file(dir_path).exists 41 | assert host.file(dir_path).is_directory 42 | 43 | 44 | @pytest.mark.parametrize("file_path", yml_vars.get('file_paths')) 45 | # test for files that should exist 46 | def test_files(host, file_path): 47 | file_p = file_path[0] 48 | try: 49 | file_u, file_g = file_path[1].split(':') 50 | except IndexError: 51 | file_u = None 52 | file_g = None 53 | try: 54 | file_m = oct(int(file_path[2], 8)) 55 | except IndexError: 56 | file_m = None 57 | with host.sudo(): 58 | assert host.file(file_p).exists 59 | assert host.file(file_p).is_file 60 | if file_u: 61 | assert host.file(file_p).user == file_u 62 | if file_g: 63 | assert host.file(file_p).group == file_g 64 | if file_m: 65 | assert oct(host.file(file_p).mode) == file_m 66 | 67 | 68 | @pytest.mark.parametrize("port", yml_vars.get('firewall_ports')) 69 | # test to see if port is allowed through firewalld 70 | def test_firewall_rules(host, port): 71 | with host.sudo(): 72 | out = host.check_output("firewall-cmd %s", '--list-ports') 73 | assert str(port) in out 74 | 75 | 76 | # work around port testing for now, still requires netstat to be installed. Does not detect ss for some reason 77 | @pytest.mark.parametrize("port", yml_vars.get('listening_ports')) 78 | # test for ports that are in a listening state 79 | def test_network_sockets(host, port): 80 | scks = host.socket.get_listening_sockets() 81 | assert [s for s in scks if s.endswith(':{p}'.format(p=port))] 82 | 83 | 84 | # Doesn't appear to work, but this is how we could test for specific ports listening 85 | # @pytest.mark.parametrize("port", yml_vars.get('listening_ports') 86 | # def test_sockets(host, port): 87 | # assert host.socket("tcp://127.0.0.1:{port}".format(port=port)).is_listening -------------------------------------------------------------------------------- /tests/test_lighttpd.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import, division, print_function 2 | from builtins import (ascii, bytes, chr, dict, filter, hex, input, 3 | int, map, next, oct, open, pow, range, round, 4 | str, super, zip) 5 | import pytest 6 | import yaml 7 | 8 | with open('tests/vars/lighttpd.vars', 'r') as f: 9 | try: 10 | yml_vars = yaml.load(f) 11 | except yaml.YAMLError as e: 12 | print(e) 13 | 14 | # begin testing 15 | # parametrize all of the values in the list so that we test all of them even if one fails 16 | @pytest.mark.parametrize("package", yml_vars.get('packages')) 17 | # Test for packages that are installed 18 | def test_packages_installed(host, package): 19 | with host.sudo(): 20 | assert host.package(package).is_installed 21 | 22 | @pytest.mark.parametrize("service", yml_vars.get('services')) 23 | # test for services that are enabled 24 | def test_service_enabled(host, service): 25 | assert host.service(service).is_enabled 26 | assert host.service(service).is_running 27 | 28 | 29 | # Can use this if we want to split up enabled and running into separate checks 30 | # @pytest.mark.parametrize("service", services) 31 | # # test for services that are running 32 | # def test_service_running(host, service): 33 | # assert host.service(service).is_running 34 | 35 | 36 | @pytest.mark.parametrize("dir_path", yml_vars.get('dir_paths')) 37 | # test for directories that should of been made 38 | def test_directories(host, dir_path): 39 | with host.sudo(): 40 | assert host.file(dir_path).exists 41 | assert host.file(dir_path).is_directory 42 | 43 | 44 | @pytest.mark.parametrize("file_path", yml_vars.get('file_paths')) 45 | # test for files that should exist 46 | def test_files(host, file_path): 47 | file_p = file_path[0] 48 | try: 49 | file_u, file_g = file_path[1].split(':') 50 | except IndexError: 51 | file_u = None 52 | file_g = None 53 | try: 54 | file_m = oct(int(file_path[2], 8)) 55 | except IndexError: 56 | file_m = None 57 | with host.sudo(): 58 | assert host.file(file_p).exists 59 | assert host.file(file_p).is_file 60 | if file_u: 61 | assert host.file(file_p).user == file_u 62 | if file_g: 63 | assert host.file(file_p).group == file_g 64 | if file_m: 65 | assert oct(host.file(file_p).mode) == file_m 66 | -------------------------------------------------------------------------------- /tests/test_logstash.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import, division, print_function 2 | from builtins import (ascii, bytes, chr, dict, filter, hex, input, 3 | int, map, next, oct, open, pow, range, round, 4 | str, super, zip) 5 | import pytest 6 | import yaml 7 | 8 | with open('tests/vars/logstash.vars', 'r') as f: 9 | try: 10 | yml_vars = yaml.load(f) 11 | except yaml.YAMLError as e: 12 | print(e) 13 | 14 | # begin testing 15 | # parametrize all of the values in the list so that we test all of them even if one fails 16 | @pytest.mark.parametrize("package", yml_vars.get('packages')) 17 | # Test for packages that are installed 18 | def test_packages_installed(host, package): 19 | with host.sudo(): 20 | assert host.package(package).is_installed 21 | 22 | @pytest.mark.parametrize("service", yml_vars.get('services')) 23 | # test for services that are enabled 24 | def test_service_enabled(host, service): 25 | assert host.service(service).is_enabled 26 | assert host.service(service).is_running 27 | 28 | 29 | # Can use this if we want to split up enabled and running into separate checks 30 | # @pytest.mark.parametrize("service", services) 31 | # # test for services that are running 32 | # def test_service_running(host, service): 33 | # assert host.service(service).is_running 34 | 35 | 36 | @pytest.mark.parametrize("dir_path", yml_vars.get('dir_paths')) 37 | # test for directories that should of been made 38 | def test_directories(host, dir_path): 39 | with host.sudo(): 40 | assert host.file(dir_path).exists 41 | assert host.file(dir_path).is_directory 42 | 43 | 44 | @pytest.mark.parametrize("file_path", yml_vars.get('file_paths')) 45 | # test for files that should exist 46 | def test_files(host, file_path): 47 | file_p = file_path[0] 48 | try: 49 | file_u, file_g = file_path[1].split(':') 50 | except IndexError: 51 | file_u = None 52 | file_g = None 53 | try: 54 | file_m = oct(int(file_path[2], 8)) 55 | except IndexError: 56 | file_m = None 57 | with host.sudo(): 58 | assert host.file(file_p).exists 59 | assert host.file(file_p).is_file 60 | if file_u: 61 | assert host.file(file_p).user == file_u 62 | if file_g: 63 | assert host.file(file_p).group == file_g 64 | if file_m: 65 | assert oct(host.file(file_p).mode) == file_m 66 | -------------------------------------------------------------------------------- /tests/test_sensor.py: -------------------------------------------------------------------------------- 1 | # This contains all the tests that should check to make sure all the services are working together. 2 | # The glue that holds it all together and binds it. 3 | 4 | from __future__ import absolute_import, division, print_function 5 | from builtins import (ascii, bytes, chr, dict, filter, hex, input, 6 | int, map, next, oct, open, pow, range, round, 7 | str, super, zip) 8 | import pytest 9 | import yaml 10 | import json 11 | 12 | with open('tests/vars/sensor.vars', 'r') as f: 13 | try: 14 | yml_vars = yaml.load(f) 15 | except yaml.YAMLError as e: 16 | print(e) 17 | 18 | @pytest.mark.parametrize("group_id", yml_vars.get('kafka_groups')) 19 | def test_connection_to_kafka(host, group_id): 20 | results = host.run('/opt/kafka/bin/kafka-consumer-groups.sh ' 21 | '--bootstrap-server {host}:{p} ' 22 | '--describe --group {gid}'.format(host='localhost', p='9092', gid=group_id)) 23 | assert 'Error:' not in results.stdout 24 | 25 | 26 | def test_logstash_connection_to_elasticsearch(host): 27 | result = host.run('curl {host}:{p}/_node/stats/pipelines/main'.format(host='localhost', p='9600')) 28 | result = json.loads(result.stdout) 29 | assert result['pipelines']['main']['events']['out'] != '0' 30 | 31 | 32 | @pytest.mark.parametrize("topic", yml_vars.get('topics')) 33 | def test_kafka_topics(host, topic): 34 | results = host.run('/opt/kafka/bin/kafka-topics.sh --list --zookeeper {host}:{p}'.format( 35 | host='localhost', p='2181')) 36 | assert topic in results.stdout 37 | -------------------------------------------------------------------------------- /tests/test_stenographer.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import, division, print_function 2 | from builtins import (ascii, bytes, chr, dict, filter, hex, input, 3 | int, map, next, oct, open, pow, range, round, 4 | str, super, zip) 5 | import pytest 6 | import yaml 7 | 8 | with open('tests/vars/stenographer.vars', 'r') as f: 9 | try: 10 | yml_vars = yaml.load(f) 11 | except yaml.YAMLError as e: 12 | print(e) 13 | 14 | # begin testing 15 | # parametrize all of the values in the list so that we test all of them even if one fails 16 | @pytest.mark.parametrize("package", yml_vars.get('packages')) 17 | # Test for packages that are installed 18 | def test_packages_installed(host, package): 19 | with host.sudo(): 20 | assert host.package(package).is_installed 21 | 22 | @pytest.mark.parametrize("service", yml_vars.get('services')) 23 | # test for services that are enabled 24 | def test_service_enabled(host, service): 25 | assert host.service(service).is_enabled 26 | assert host.service(service).is_running 27 | 28 | 29 | # Can use this if we want to split up enabled and running into separate checks 30 | # @pytest.mark.parametrize("service", services) 31 | # # test for services that are running 32 | # def test_service_running(host, service): 33 | # assert host.service(service).is_running 34 | 35 | 36 | @pytest.mark.parametrize("dir_path", yml_vars.get('dir_paths')) 37 | # test for directories that should of been made 38 | def test_directories(host, dir_path): 39 | with host.sudo(): 40 | assert host.file(dir_path).exists 41 | assert host.file(dir_path).is_directory 42 | 43 | 44 | @pytest.mark.parametrize("file_path", yml_vars.get('file_paths')) 45 | # test for files that should exist 46 | def test_files(host, file_path): 47 | file_p = file_path[0] 48 | try: 49 | file_u, file_g = file_path[1].split(':') 50 | except IndexError: 51 | file_u = None 52 | file_g = None 53 | try: 54 | file_m = oct(int(file_path[2], 8)) 55 | except IndexError: 56 | file_m = None 57 | with host.sudo(): 58 | assert host.file(file_p).exists 59 | assert host.file(file_p).is_file 60 | if file_u: 61 | assert host.file(file_p).user == file_u 62 | if file_g: 63 | assert host.file(file_p).group == file_g 64 | if file_m: 65 | assert oct(host.file(file_p).mode) == file_m 66 | 67 | 68 | # work around port testing for now, still requires netstat to be installed. Does not detect ss for some reason 69 | @pytest.mark.parametrize("port", yml_vars.get('listening_ports')) 70 | # test for ports that are in a listening state 71 | def test_network_sockets(host, port): 72 | scks = host.socket.get_listening_sockets() 73 | assert [s for s in scks if s.endswith(':{p}'.format(p=port))] 74 | 75 | 76 | # Doesn't appear to work, but this is how we could test for specific ports listening 77 | # @pytest.mark.parametrize("port", yml_vars.get('listening_ports') 78 | # def test_sockets(host, port): 79 | # assert host.socket("tcp://127.0.0.1:{port}".format(port=port)).is_listening -------------------------------------------------------------------------------- /tests/test_suricata.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import, division, print_function 2 | from builtins import (ascii, bytes, chr, dict, filter, hex, input, 3 | int, map, next, oct, open, pow, range, round, 4 | str, super, zip) 5 | import pytest 6 | import yaml 7 | 8 | with open('tests/vars/suricata.vars', 'r') as f: 9 | try: 10 | yml_vars = yaml.load(f) 11 | except yaml.YAMLError as e: 12 | print(e) 13 | 14 | # begin testing 15 | # parametrize all of the values in the list so that we test all of them even if one fails 16 | @pytest.mark.parametrize("package", yml_vars.get('packages')) 17 | # Test for packages that are installed 18 | def test_packages_installed(host, package): 19 | with host.sudo(): 20 | assert host.package(package).is_installed 21 | 22 | @pytest.mark.parametrize("service", yml_vars.get('services')) 23 | # test for services that are enabled 24 | def test_service_enabled(host, service): 25 | assert host.service(service).is_enabled 26 | assert host.service(service).is_running 27 | 28 | 29 | # Can use this if we want to split up enabled and running into separate checks 30 | # @pytest.mark.parametrize("service", services) 31 | # # test for services that are running 32 | # def test_service_running(host, service): 33 | # assert host.service(service).is_running 34 | 35 | 36 | @pytest.mark.parametrize("dir_path", yml_vars.get('dir_paths')) 37 | # test for directories that should of been made 38 | def test_directories(host, dir_path): 39 | with host.sudo(): 40 | assert host.file(dir_path).exists 41 | assert host.file(dir_path).is_directory 42 | 43 | 44 | @pytest.mark.parametrize("file_path", yml_vars.get('file_paths')) 45 | # test for files that should exist 46 | def test_files(host, file_path): 47 | file_p = file_path[0] 48 | try: 49 | file_u, file_g = file_path[1].split(':') 50 | except IndexError: 51 | file_u = None 52 | file_g = None 53 | try: 54 | file_m = oct(int(file_path[2], 8)) 55 | except IndexError: 56 | file_m = None 57 | with host.sudo(): 58 | assert host.file(file_p).exists 59 | assert host.file(file_p).is_file 60 | if file_u: 61 | assert host.file(file_p).user == file_u 62 | if file_g: 63 | assert host.file(file_p).group == file_g 64 | if file_m: 65 | assert oct(host.file(file_p).mode) == file_m 66 | -------------------------------------------------------------------------------- /tests/test_zeek.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import, division, print_function 2 | from builtins import (ascii, bytes, chr, dict, filter, hex, input, 3 | int, map, next, oct, open, pow, range, round, 4 | str, super, zip) 5 | import pytest 6 | import yaml 7 | 8 | with open('tests/vars/zeek.vars', 'r') as f: 9 | try: 10 | yml_vars = yaml.load(f) 11 | except yaml.YAMLError as e: 12 | print(e) 13 | 14 | # begin testing 15 | # parametrize all of the values in the list so that we test all of them even if one fails 16 | @pytest.mark.parametrize("package", yml_vars.get('packages')) 17 | # Test for packages that are installed 18 | def test_packages_installed(host, package): 19 | with host.sudo(): 20 | assert host.package(package).is_installed 21 | 22 | @pytest.mark.parametrize("service", yml_vars.get('services')) 23 | # test for services that are enabled 24 | def test_service_enabled(host, service): 25 | assert host.service(service).is_enabled 26 | assert host.service(service).is_running 27 | 28 | 29 | # Can use this if we want to split up enabled and running into separate checks 30 | # @pytest.mark.parametrize("service", services) 31 | # # test for services that are running 32 | # def test_service_running(host, service): 33 | # assert host.service(service).is_running 34 | 35 | 36 | @pytest.mark.parametrize("dir_path", yml_vars.get('dir_paths')) 37 | # test for directories that should of been made 38 | def test_directories(host, dir_path): 39 | with host.sudo(): 40 | assert host.file(dir_path).exists 41 | assert host.file(dir_path).is_directory 42 | 43 | 44 | @pytest.mark.parametrize("file_path", yml_vars.get('file_paths')) 45 | # test for files that should exist 46 | def test_files(host, file_path): 47 | file_p = file_path[0] 48 | try: 49 | file_u, file_g = file_path[1].split(':') 50 | except IndexError: 51 | file_u = None 52 | file_g = None 53 | try: 54 | file_m = oct(int(file_path[2], 8)) 55 | except IndexError: 56 | file_m = None 57 | with host.sudo(): 58 | assert host.file(file_p).exists 59 | assert host.file(file_p).is_file 60 | if file_u: 61 | assert host.file(file_p).user == file_u 62 | if file_g: 63 | assert host.file(file_p).group == file_g 64 | if file_m: 65 | assert oct(host.file(file_p).mode) == file_m 66 | 67 | 68 | def test_zeek_check(host): 69 | with host.sudo(): 70 | result = host.run('zeekctl check') 71 | assert 'error' not in result.stdout 72 | -------------------------------------------------------------------------------- /tests/test_zookeeper.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import, division, print_function 2 | from builtins import (ascii, bytes, chr, dict, filter, hex, input, 3 | int, map, next, oct, open, pow, range, round, 4 | str, super, zip) 5 | import pytest 6 | import yaml 7 | 8 | with open('tests/vars/zookeeper.vars', 'r') as f: 9 | try: 10 | yml_vars = yaml.load(f) 11 | except yaml.YAMLError as e: 12 | print(e) 13 | 14 | # begin testing 15 | # parametrize all of the values in the list so that we test all of them even if one fails 16 | @pytest.mark.parametrize("package", yml_vars.get('packages')) 17 | # Test for packages that are installed 18 | def test_packages_installed(host, package): 19 | with host.sudo(): 20 | assert host.package(package).is_installed 21 | 22 | @pytest.mark.parametrize("service", yml_vars.get('services')) 23 | # test for services that are enabled 24 | def test_service_enabled(host, service): 25 | assert host.service(service).is_enabled 26 | assert host.service(service).is_running 27 | 28 | 29 | # Can use this if we want to split up enabled and running into separate checks 30 | # @pytest.mark.parametrize("service", services) 31 | # # test for services that are running 32 | # def test_service_running(host, service): 33 | # assert host.service(service).is_running 34 | 35 | 36 | @pytest.mark.parametrize("dir_path", yml_vars.get('dir_paths')) 37 | # test for directories that should of been made 38 | def test_directories(host, dir_path): 39 | with host.sudo(): 40 | assert host.file(dir_path).exists 41 | assert host.file(dir_path).is_directory 42 | 43 | 44 | @pytest.mark.parametrize("file_path", yml_vars.get('file_paths')) 45 | # test for files that should exist 46 | def test_files(host, file_path): 47 | file_p = file_path[0] 48 | try: 49 | file_u, file_g = file_path[1].split(':') 50 | except IndexError: 51 | file_u = None 52 | file_g = None 53 | try: 54 | file_m = oct(int(file_path[2], 8)) 55 | except IndexError: 56 | file_m = None 57 | with host.sudo(): 58 | assert host.file(file_p).exists 59 | assert host.file(file_p).is_file 60 | if file_u: 61 | assert host.file(file_p).user == file_u 62 | if file_g: 63 | assert host.file(file_p).group == file_g 64 | if file_m: 65 | assert oct(host.file(file_p).mode) == file_m 66 | 67 | 68 | # work around port testing for now, still requires netstat to be installed. Does not detect ss for some reason 69 | @pytest.mark.parametrize("port", yml_vars.get('listening_ports')) 70 | # test for ports that are in a listening state 71 | def test_network_sockets(host, port): 72 | scks = host.socket.get_listening_sockets() 73 | assert [s for s in scks if s.endswith(':{p}'.format(p=port))] 74 | assert host.run_test('echo ruok | nc {host} {p}'.format(host='localhost', p=port)) 75 | 76 | 77 | # Doesn't appear to work, but this is how we could test for specific ports listening 78 | # @pytest.mark.parametrize("port", yml_vars.get('listening_ports') 79 | # def test_sockets(host, port): 80 | # assert host.socket("tcp://127.0.0.1:{port}".format(port=port)).is_listening -------------------------------------------------------------------------------- /tests/vars/common.vars: -------------------------------------------------------------------------------- 1 | packages: 2 | 3 | - tcpreplay 4 | - bats 5 | - policycoreutils-python 6 | - htop 7 | - vim-common 8 | - vim-enhanced 9 | - vim-filesystem 10 | - vim-minimal 11 | - git 12 | - tmux 13 | - nmap-ncat 14 | - perl-LWP-Protocol-https 15 | - perl-Sys-Syslog 16 | - perl-Crypt-SSLeay 17 | - perl-Archive-Tar 18 | - python2-xkcdpass 19 | - postfix 20 | -------------------------------------------------------------------------------- /tests/vars/docket.vars: -------------------------------------------------------------------------------- 1 | services: 2 | - docket 3 | - docket-celery-io 4 | - docket-celery-query 5 | - docket.socket 6 | - lighttpd 7 | 8 | file_paths: 9 | - ['/var/log/docket/docket.log', 'docket:docket', '0644'] 10 | - ['/etc/pki/tls/private/httpd-combined.pem', 'lighttpd:lighttpd', '0600'] 11 | - ['/etc/pki/tls/misc/http_tls_dhparams.pem', 'root:root', '0644'] 12 | - ['/etc/docket/prod.yaml', 'root:root', '0644'] 13 | - ['/etc/lighttpd/vhosts.d/docket.conf', 'root:root', '0644'] 14 | - ['/etc/sysconfig/docket', 'root:root', '0644'] 15 | - ['/etc/lighttpd/lighttpd.conf', 'root:root', '0644'] 16 | 17 | dir_paths: 18 | - /var/spool/docket 19 | - /etc/pki/docket 20 | - /var/log/docket 21 | - /run/docket 22 | - /etc/lighttpd/vhosts.d/ 23 | - /var/log/lighttpd 24 | 25 | packages: 26 | - docket 27 | - lighttpd 28 | 29 | firewall_ports: 30 | - 443 31 | - 8443 32 | 33 | # redis port used for que 6379 34 | listening_ports: 35 | - 6379 36 | - 8443 37 | -------------------------------------------------------------------------------- /tests/vars/elasticsearch.vars: -------------------------------------------------------------------------------- 1 | services: 2 | - elasticsearch 3 | 4 | file_paths: 5 | - ['/etc/sysconfig/elasticsearch', 'root:elasticsearch', '0660'] 6 | - ['/etc/elasticsearch/jvm.options', 'root:elasticsearch', '0640'] 7 | - ['/etc/elasticsearch/elasticsearch.yml', 'root:elasticsearch', '0640'] 8 | - ['/etc/systemd/system/elasticsearch.service.d/override.conf', 'root:root', '0644'] 9 | 10 | dir_paths: 11 | - /var/log/elasticsearch 12 | - /data/elasticsearch 13 | - /run/elasticsearch 14 | - /etc/systemd/system/elasticsearch.service.d 15 | 16 | packages: 17 | - java-11-openjdk-headless 18 | - elasticsearch 19 | 20 | listening_ports: 21 | - 9200 22 | - 9300 23 | -------------------------------------------------------------------------------- /tests/vars/filebeat.vars: -------------------------------------------------------------------------------- 1 | services: 2 | - filebeat 3 | 4 | file_paths: 5 | - ['/etc/filebeat/filebeat.yml', 'root:root', '0600'] 6 | 7 | dir_paths: 8 | - /var/lib/filebeat 9 | - /var/log/filebeat 10 | 11 | packages: 12 | - filebeat -------------------------------------------------------------------------------- /tests/vars/fsf.vars: -------------------------------------------------------------------------------- 1 | services: 2 | - fsf 3 | 4 | file_paths: 5 | - ['/opt/fsf/fsf-server/conf/config.py', 'fsf:fsf', '0644'] 6 | - ['/opt/fsf/fsf-client/conf/config.py', 'fsf:fsf', '0644'] 7 | - ['/opt/fsf/fsf-server/main.py', 'fsf:fsf', '0755'] 8 | - ['/opt/fsf/fsf-client/fsf_client.py', 'fsf:fsf', '0755'] 9 | - ['/var/lib/yara-rules/rules.yara', 'fsf:fsf', '0664'] 10 | 11 | dir_paths: 12 | - /data/fsf 13 | - /opt/fsf/fsf-server 14 | - /opt/fsf/fsf-client 15 | - /run/fsf 16 | - /var/lib/yara-rules/ 17 | 18 | packages: 19 | - fsf 20 | 21 | listening_ports: 22 | - 5800 -------------------------------------------------------------------------------- /tests/vars/kafka.vars: -------------------------------------------------------------------------------- 1 | services: 2 | - kafka 3 | 4 | file_paths: 5 | - ['/etc/sysconfig/kafka', 'root:root', '0644'] 6 | - ['/etc/kafka/server.properties', 'root:root', '0644'] 7 | 8 | dir_paths: 9 | - /var/log/kafka 10 | - /data/kafka 11 | 12 | packages: 13 | - java-11-openjdk-headless 14 | - kafka 15 | - kafkacat 16 | 17 | listening_ports: 18 | - 9092 19 | -------------------------------------------------------------------------------- /tests/vars/kibana.vars: -------------------------------------------------------------------------------- 1 | services: 2 | - lighttpd 3 | - kibana 4 | 5 | file_paths: 6 | - ['/etc/kibana/kibana.yml', 'root:root', '0644'] 7 | - ['/etc/profile.d/kibanapw.sh', 'root:root', '0644'] 8 | 9 | dir_paths: 10 | - /var/lib/kibana 11 | - /var/log/lighttpd 12 | 13 | packages: 14 | - lighttpd 15 | - kibana 16 | 17 | firewall_ports: 18 | - 443 19 | 20 | listening_ports: 21 | - 5601 22 | - 443 23 | -------------------------------------------------------------------------------- /tests/vars/lighttpd.vars: -------------------------------------------------------------------------------- 1 | services: 2 | - lighttpd 3 | 4 | file_paths: 5 | - ['/etc/pki/tls/private/httpd-combined.pem', 'root:root', '0600'] 6 | - ['/etc/pki/tls/misc/http_tls_dhparams.pem', 'root:root', '0644'] 7 | - ['/etc/lighttpd/lighttpd.conf', 'root:root', '0644'] 8 | 9 | dir_paths: 10 | - /etc/lighttpd/vhosts.d 11 | - /var/log/lighttpd 12 | 13 | packages: 14 | - lighttpd 15 | -------------------------------------------------------------------------------- /tests/vars/logstash.vars: -------------------------------------------------------------------------------- 1 | services: 2 | - logstash 3 | 4 | file_paths: 5 | - ['/etc/logstash/logstash.yml', 'root:root', '0644'] 6 | - ['/etc/logstash/conf.d/logstash-100-input-kafka-zeek.conf', 'logstash:logstash', '0640'] 7 | - ['/etc/logstash/conf.d/logstash-100-input-kafka-fsf.conf', 'logstash:logstash', '0640'] 8 | - ['/etc/logstash/conf.d/logstash-100-input-kafka-suricata.conf', 'logstash:logstash', '0640'] 9 | - ['/etc/logstash/conf.d/logstash-500-filter-zeek.conf', 'logstash:logstash', '0640'] 10 | - ['/etc/logstash/conf.d/logstash-500-filter-fsf.conf', 'logstash:logstash', '0640'] 11 | - ['/etc/logstash/conf.d/logstash-500-filter-suricata.conf', 'logstash:logstash', '0640'] 12 | - ['/etc/logstash/conf.d/logstash-998-filter-parsefailures.conf', 'logstash:logstash', '0640'] 13 | - ['/etc/logstash/conf.d/logstash-999-output-es-zeek.conf', 'logstash:logstash', '0640'] 14 | - ['/etc/logstash/conf.d/logstash-999-output-es-fsf.conf', 'logstash:logstash', '0640'] 15 | - ['/etc/logstash/conf.d/logstash-999-output-es-parsefailures.conf', 'logstash:logstash', '0640'] 16 | - ['/etc/logstash/conf.d/logstash-999-output-es-suricata.conf', 'logstash:logstash', '0640'] 17 | 18 | dir_paths: 19 | - /var/log/logstash 20 | - /var/lib/logstash 21 | 22 | packages: 23 | - logstash 24 | - java-11-openjdk-headless 25 | -------------------------------------------------------------------------------- /tests/vars/sensor.vars: -------------------------------------------------------------------------------- 1 | kafka_groups: 2 | - suricata_logstash 3 | - fsf_logstash 4 | - zeek_logstash 5 | 6 | topics: 7 | - zeek-raw 8 | - zeek-network 9 | - suricata-raw 10 | - suricata-clean 11 | - fsf-raw 12 | - fsf-clean 13 | -------------------------------------------------------------------------------- /tests/vars/stenographer.vars: -------------------------------------------------------------------------------- 1 | services: 2 | - stenographer 3 | 4 | file_paths: 5 | - ['/etc/stenographer/config', 'root:root', '0644'] 6 | - ['/etc/stenographer/certs/ca_cert.pem', 'root:root', '0444'] 7 | - ['/etc/stenographer/certs/ca_key.pem', 'root:root', '0400'] 8 | 9 | dir_paths: 10 | - /data/stenographer 11 | - /etc/stenographer/certs 12 | 13 | packages: 14 | - stenographer 15 | - tcpdump 16 | - jq 17 | 18 | listening_ports: 19 | - 1234 -------------------------------------------------------------------------------- /tests/vars/suricata.vars: -------------------------------------------------------------------------------- 1 | services: 2 | - suricata 3 | 4 | # full path to file, user:group, mode 5 | file_paths: 6 | - ['/usr/sbin/suricata', 'root:root', '0755'] 7 | - ['/etc/logrotate.d/suricata', 'root:root', '0644'] 8 | - ['/etc/suricata/suricata.yaml', 'suricata:root', '0600'] 9 | - ['/etc/suricata/classification.config','suricata:root', '0600'] 10 | - ['/etc/suricata/reference.config', 'suricata:root', '0600'] 11 | - ['/etc/suricata/rocknsm-overrides.yaml', 'root:suricata', '0640'] 12 | 13 | dir_paths: 14 | - /run/suricata 15 | - /etc/logrotate.d/ 16 | - /etc/suricata 17 | - /var/log/suricata 18 | - /data/suricata 19 | - /var/log/suricata 20 | - /run/suricata 21 | - /var/lib/suricata/rules 22 | - /var/lib/suricata/update 23 | 24 | packages: 25 | - suricata 26 | - logrotate 27 | -------------------------------------------------------------------------------- /tests/vars/zeek.vars: -------------------------------------------------------------------------------- 1 | services: 2 | - zeek 3 | 4 | file_paths: 5 | - ['/usr/bin/zeek', 'root:root', '0755'] 6 | - ['/usr/sbin/zeekctl', 'root:root', '0754'] 7 | - ['/etc/zeek/node.cfg', 'root:root', '0644'] 8 | - ['/etc/zeek/networks.cfg', 'root:root', '0644'] 9 | - ['/etc/zeek/zeekctl.cfg', 'root:root', '0644'] 10 | - ['/usr/share/zeek/site/local.zeek', 'root:root', '0644'] 11 | - ['/usr/share/zeek/site/scripts/rock/rock.zeek', 'zeek:zeek', '0644'] 12 | - ['/usr/share/zeek/site/scripts/rock/skeleton.zeek', 'zeek:zeek', '0644'] 13 | - ['/usr/share/zeek/site/scripts/rock/plugins/afpacket.zeek', 'zeek:zeek', '0644'] 14 | - ['/usr/share/zeek/site/scripts/rock/plugins/kafka.zeek', 'zeek:zeek', '0644'] 15 | - ['/usr/share/zeek/site/scripts/rock/frameworks/files/extract2fsf.zeek', 'zeek:zeek', '0644'] 16 | - ['/usr/share/zeek/site/scripts/rock/frameworks/files/extraction/file-extensions.zeek', 'zeek:zeek', '0644'] 17 | - ['/usr/share/zeek/site/scripts/rock/frameworks/files/extraction/main.zeek', 'zeek:zeek', '0644'] 18 | - ['/usr/lib64/zeek/plugins/Bro_AF_Packet/scripts/init.zeek', 'root:root', '0644'] 19 | - ['/usr/lib64/zeek/plugins/Bro_AF_Packet/scripts/__load__.zeek', 'root:root', '0644'] 20 | - ['/usr/lib64/zeek/plugins/APACHE_KAFKA/scripts/Apache/Kafka/__load__.zeek', 'root:root', '0644'] 21 | - ['/usr/lib64/zeek/plugins/APACHE_KAFKA/scripts/Apache/Kafka/logs-to-kafka.zeek', 'root:root', '0644'] 22 | - ['/data/zeek/spool/zeekctl-config.sh', 'zeek:zeek', '0644'] 23 | 24 | dir_paths: 25 | - /etc/zeek 26 | - /var/spool/zeek 27 | - /data/zeek 28 | - /data/zeek/logs 29 | - /data/zeek/spool 30 | - /var/log/zeek 31 | - /var/lib/zeek 32 | - /usr/share/zeek 33 | - /usr/share/zeek/site/ 34 | - /usr/share/zeek/site/scripts 35 | - /usr/share/zeek/site/scripts/rock 36 | 37 | packages: 38 | - zeek 39 | - zeek-aux 40 | - zeekctl 41 | - zeek-plugin-kafka 42 | - zeek-plugin-af_packet 43 | - zeek-plugin-communityid 44 | - zeek-plugin-gquic 45 | -------------------------------------------------------------------------------- /tests/vars/zookeeper.vars: -------------------------------------------------------------------------------- 1 | services: 2 | - zookeeper 3 | 4 | # full path to file, user:group, mode 5 | file_paths: 6 | - ['/etc/sysconfig/zookeeper', 'root:root', '0644'] 7 | - ['/etc/zookeeper/zoo.cfg', 'root:root', '0644'] 8 | 9 | dir_paths: 10 | - /var/lib/zookeeper 11 | - /var/log/zookeeper 12 | 13 | packages: 14 | - java-11-openjdk-headless 15 | - zookeeper 16 | 17 | listening_ports: 18 | - 2181 19 | --------------------------------------------------------------------------------