├── roles
├── apparmor
│ ├── files
│ │ └── profiles
│ │ │ └── .gitkeep
│ ├── handlers
│ │ └── main.yml
│ ├── defaults
│ │ └── main.yml
│ └── tasks
│ │ └── main.yml
├── docker
│ ├── vars
│ │ ├── main.yml
│ │ ├── Alpine.yml
│ │ ├── Archlinux.yml
│ │ ├── Debian.yml
│ │ └── RedHat.yml
│ ├── handlers
│ │ └── main.yml
│ ├── tasks
│ │ ├── docker-users.yml
│ │ ├── docker-compose.yml
│ │ ├── setup-RedHat.yml
│ │ ├── setup-Debian.yml
│ │ └── main.yml
│ ├── molecule
│ │ └── default
│ │ │ ├── molecule.yml
│ │ │ ├── converge.yml
│ │ │ └── verify.yml
│ ├── LICENSE
│ └── defaults
│ │ └── main.yml
├── ssh
│ ├── defaults
│ │ └── main.yml
│ ├── handlers
│ │ └── main.yml
│ └── tasks
│ │ └── main.yml
├── geerlingguy.pip
│ ├── .ansible-lint
│ ├── .gitignore
│ ├── meta
│ │ ├── .galaxy_install_info
│ │ └── main.yml
│ ├── .github
│ │ ├── FUNDING.yml
│ │ └── workflows
│ │ │ ├── release.yml
│ │ │ ├── ci.yml
│ │ │ └── stale.yml
│ ├── .yamllint
│ ├── defaults
│ │ └── main.yml
│ ├── molecule
│ │ └── default
│ │ │ ├── molecule.yml
│ │ │ └── converge.yml
│ ├── tasks
│ │ └── main.yml
│ ├── LICENSE
│ └── README.md
├── monit
│ ├── .DS_Store
│ ├── handlers
│ │ └── main.yml
│ ├── defaults
│ │ └── main.yml
│ └── tasks
│ │ └── main.yml
├── firewall
│ ├── handlers
│ │ └── main.yml
│ ├── tasks
│ │ ├── lockdown.yml
│ │ ├── allow.yml
│ │ └── main.yml
│ └── defaults
│ │ └── main.yml
├── cockpit
│ ├── handlers
│ │ └── main.yml
│ ├── readme.txt
│ ├── defaults
│ │ └── main.yml
│ └── tasks
│ │ └── main.yml
├── node_exporter
│ ├── handlers
│ │ └── main.yml
│ ├── defaults
│ │ └── main.yml
│ └── tasks
│ │ └── main.yml
├── timezone
│ ├── handlers
│ │ └── main.yml
│ ├── templates
│ │ └── timesyncd.conf.j2
│ ├── defaults
│ │ └── main.yml
│ └── tasks
│ │ └── main.yml
├── grafana
│ ├── handlers
│ │ └── main.yml
│ ├── files
│ │ └── provisioning
│ │ │ └── datasources
│ │ │ ├── loki.yml
│ │ │ └── prometheus.yml
│ ├── templates
│ │ └── grafana.ini.j2
│ ├── defaults
│ │ └── main.yml
│ └── tasks
│ │ └── main.yml
├── dotfiles
│ ├── defaults
│ │ └── main.yml
│ └── tasks
│ │ └── main.yml
├── prometheus
│ ├── templates
│ │ ├── prometheus.yml.j2
│ │ └── prometheus.service.j2
│ ├── handlers
│ │ └── main.yml
│ ├── defaults
│ │ └── main.yml
│ └── tasks
│ │ └── main.yml
├── fail2ban
│ ├── handlers
│ │ └── main.yml
│ ├── templates
│ │ └── jail.local.j2
│ ├── defaults
│ │ └── main.yml
│ └── tasks
│ │ └── main.yml
├── loki
│ ├── handlers
│ │ └── main.yml
│ ├── files
│ │ └── loki.service
│ ├── defaults
│ │ └── main.yml
│ └── tasks
│ │ └── main.yml
├── maldet
│ ├── handlers
│ │ └── main.yml
│ ├── templates
│ │ └── maldet.service.j2
│ ├── defaults
│ │ └── main.yml
│ └── tasks
│ │ └── main.yml
├── hostname
│ ├── handlers
│ │ └── main.yml
│ ├── defaults
│ │ └── main.yml
│ └── tasks
│ │ └── main.yml
├── logrotate
│ ├── templates
│ │ └── logrotate_item.j2
│ ├── defaults
│ │ └── main.yml
│ └── tasks
│ │ └── main.yml
├── directories
│ ├── tasks
│ │ └── main.yml
│ └── defaults
│ │ └── main.yml
├── lynis
│ ├── defaults
│ │ └── main.yml
│ └── tasks
│ │ └── main.yml
├── grafana-agent
│ ├── handlers
│ │ └── main.yml
│ ├── defaults
│ │ └── main.yml
│ ├── templates
│ │ └── agent-config.yml.j2
│ └── tasks
│ │ └── main.yml
├── blackbox_exporter
│ ├── defaults
│ │ └── main.yml
│ └── tasks
│ │ └── main.yml
├── users
│ └── tasks
│ │ └── main.yml
├── borg
│ ├── defaults
│ │ └── main.yml
│ └── tasks
│ │ └── main.yml
├── installs
│ ├── defaults
│ │ └── main.yml
│ └── tasks
│ │ └── main.yml
└── tailscale
│ ├── defaults
│ └── main.yml
│ └── tasks
│ └── main.yml
├── requirements.yml
├── playbooks
├── configs.yml
├── services.yml
├── backups.yml
├── access.yml
├── all.yml
├── security.yml
├── monitoring.yml
├── pre-checks.yml
├── essentials.yml
└── post-checks.yml
├── scripts
├── generate_project_data.py
└── scaffold_inventories.py
├── ansible.cfg
├── .gitignore
├── Makefile
├── plugins
└── pretty.py
└── readme.txt
/roles/apparmor/files/profiles/.gitkeep:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/roles/docker/vars/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | # Empty file
3 |
--------------------------------------------------------------------------------
/roles/ssh/defaults/main.yml:
--------------------------------------------------------------------------------
1 | ssh_port: 2200
2 |
3 |
--------------------------------------------------------------------------------
/roles/geerlingguy.pip/.ansible-lint:
--------------------------------------------------------------------------------
1 | skip_list:
2 | - 'yaml'
3 | - 'role-name'
4 |
--------------------------------------------------------------------------------
/roles/geerlingguy.pip/.gitignore:
--------------------------------------------------------------------------------
1 | *.retry
2 | */__pycache__
3 | *.pyc
4 | .cache
5 |
6 |
--------------------------------------------------------------------------------
/roles/monit/.DS_Store:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Lissy93/ansibles/HEAD/roles/monit/.DS_Store
--------------------------------------------------------------------------------
/roles/docker/vars/Alpine.yml:
--------------------------------------------------------------------------------
1 | ---
2 | docker_packages: "docker"
3 | docker_compose_package: docker-cli-compose
4 |
--------------------------------------------------------------------------------
/roles/docker/vars/Archlinux.yml:
--------------------------------------------------------------------------------
1 | ---
2 | docker_packages: "docker"
3 | docker_compose_package: docker-compose
4 |
--------------------------------------------------------------------------------
/roles/geerlingguy.pip/meta/.galaxy_install_info:
--------------------------------------------------------------------------------
1 | install_date: Thu 24 Apr 09:39:59 2025
2 | version: 3.1.0
3 |
--------------------------------------------------------------------------------
/roles/firewall/handlers/main.yml:
--------------------------------------------------------------------------------
1 | - name: reload ufw
2 | ansible.builtin.command: ufw reload
3 | become: true
4 |
--------------------------------------------------------------------------------
/roles/monit/handlers/main.yml:
--------------------------------------------------------------------------------
1 | - name: Restart monit
2 | systemd:
3 | name: monit
4 | state: restarted
5 |
--------------------------------------------------------------------------------
/roles/cockpit/handlers/main.yml:
--------------------------------------------------------------------------------
1 | - name: Restart cockpit
2 | systemd:
3 | name: cockpit
4 | state: restarted
5 |
--------------------------------------------------------------------------------
/roles/node_exporter/handlers/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: reload systemd
3 | ansible.builtin.systemd:
4 | daemon_reload: yes
5 |
--------------------------------------------------------------------------------
/requirements.yml:
--------------------------------------------------------------------------------
1 | roles:
2 | - src: geerlingguy.docker
3 | - src: geerlingguy.pip
4 |
5 | collections:
6 | - name: adhawkins.borgbase
7 |
--------------------------------------------------------------------------------
/roles/geerlingguy.pip/.github/FUNDING.yml:
--------------------------------------------------------------------------------
1 | # These are supported funding model platforms
2 | ---
3 | github: geerlingguy
4 | patreon: geerlingguy
5 |
--------------------------------------------------------------------------------
/roles/timezone/handlers/main.yml:
--------------------------------------------------------------------------------
1 | - name: Restart NTP
2 | ansible.builtin.service:
3 | name: "{{ ntp_package }}"
4 | state: restarted
5 |
--------------------------------------------------------------------------------
/roles/grafana/handlers/main.yml:
--------------------------------------------------------------------------------
1 | - name: Restart Grafana
2 | ansible.builtin.systemd:
3 | name: "{{ grafana_service }}"
4 | state: restarted
5 |
--------------------------------------------------------------------------------
/playbooks/configs.yml:
--------------------------------------------------------------------------------
1 | # Installs extra packages, and configures dotfiles
2 | - name: 'Configs'
3 | hosts: all
4 | become: true
5 | tags: configs
6 |
--------------------------------------------------------------------------------
/roles/dotfiles/defaults/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | dotfiles_repo: "git@github.com:Lissy93/dotfiles.git"
3 | dotfiles_dir: "{{ lookup('env', 'HOME') }}/.dotfiles"
4 |
--------------------------------------------------------------------------------
/roles/timezone/templates/timesyncd.conf.j2:
--------------------------------------------------------------------------------
1 | # /etc/systemd/timesyncd.conf
2 | [Time]
3 | {% for server in ntp_servers %}
4 | NTP={{ server }}
5 | {% endfor %}
6 |
--------------------------------------------------------------------------------
/roles/monit/defaults/main.yml:
--------------------------------------------------------------------------------
1 | monit_port: 4001
2 | monit_bind_address: "0.0.0.0"
3 | monit_user: "boss"
4 | monit_password: "changeme420"
5 | monit_public: false
6 |
--------------------------------------------------------------------------------
/roles/apparmor/handlers/main.yml:
--------------------------------------------------------------------------------
1 | - name: reload apparmor
2 | ansible.builtin.service:
3 | name: "{{ apparmor_service }}"
4 | state: reloaded
5 | enabled: yes
6 |
--------------------------------------------------------------------------------
/roles/ssh/handlers/main.yml:
--------------------------------------------------------------------------------
1 | - name: Restart SSH
2 | ansible.builtin.service:
3 | name: "{{ 'ssh' if ansible_facts['os_family'] == 'Debian' else 'sshd' }}"
4 | state: restarted
5 |
--------------------------------------------------------------------------------
/roles/geerlingguy.pip/.yamllint:
--------------------------------------------------------------------------------
1 | ---
2 | extends: default
3 |
4 | rules:
5 | line-length:
6 | max: 120
7 | level: warning
8 |
9 | ignore: |
10 | .github/workflows/stale.yml
11 |
--------------------------------------------------------------------------------
/roles/grafana/files/provisioning/datasources/loki.yml:
--------------------------------------------------------------------------------
1 | apiVersion: 1
2 | datasources:
3 | - name: Loki
4 | type: loki
5 | access: proxy
6 | url: http://localhost:3100
7 | version: 1
8 |
--------------------------------------------------------------------------------
/roles/prometheus/templates/prometheus.yml.j2:
--------------------------------------------------------------------------------
1 | global:
2 | scrape_interval: "{{ prometheus_global_scrape_interval }}"
3 |
4 | scrape_configs:
5 | {{ prometheus_scrape_configs | to_nice_yaml(indent=2) }}
6 |
--------------------------------------------------------------------------------
/scripts/generate_project_data.py:
--------------------------------------------------------------------------------
1 |
2 |
3 | # - Read and parse playbooks, and YAML metadata comments
4 | # - Read and parse roles (tasks, defaults, handlers, templates)
5 | # - Generate a giant JSON object
6 |
--------------------------------------------------------------------------------
/roles/fail2ban/handlers/main.yml:
--------------------------------------------------------------------------------
1 | - name: Restart Fail2Ban
2 | ansible.builtin.systemd:
3 | name: "{{ fail2ban_service_name }}"
4 | state: restarted
5 | listen: Restart Fail2Ban
6 | tags: [fail2ban]
7 |
--------------------------------------------------------------------------------
/roles/grafana/templates/grafana.ini.j2:
--------------------------------------------------------------------------------
1 | {% for section, options in grafana_ini.items() %}
2 | [{{ section }}]
3 | {% for key, value in options.items() %}
4 | {{ key }} = {{ value }}
5 | {% endfor %}
6 |
7 | {% endfor %}
8 |
--------------------------------------------------------------------------------
/roles/loki/handlers/main.yml:
--------------------------------------------------------------------------------
1 | - name: Reload systemd
2 | ansible.builtin.systemd:
3 | daemon_reload: true
4 |
5 | - name: Restart loki
6 | ansible.builtin.systemd:
7 | name: loki
8 | state: restarted
9 |
--------------------------------------------------------------------------------
/roles/grafana/files/provisioning/datasources/prometheus.yml:
--------------------------------------------------------------------------------
1 | apiVersion: 1
2 | datasources:
3 | - name: Prometheus
4 | type: prometheus
5 | access: proxy
6 | url: http://localhost:9091
7 | isDefault: true
8 |
--------------------------------------------------------------------------------
/roles/geerlingguy.pip/defaults/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | # For Python 3, use python3-pip.
3 | pip_package: python3-pip
4 | pip_executable: "{{ 'pip3' if pip_package.startswith('python3') else 'pip' }}"
5 |
6 | pip_install_packages: []
7 |
--------------------------------------------------------------------------------
/roles/cockpit/readme.txt:
--------------------------------------------------------------------------------
1 |
2 | Cockpit
3 | > Installs and configures the Cockpit UI server management interface
4 |
5 | USAGE
6 | -----
7 | Run `make cockpit`
8 |
9 | VARIABLES
10 | ---------
11 |
12 | STEPS
13 | ------
14 |
--------------------------------------------------------------------------------
/roles/docker/handlers/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: restart docker
3 | service:
4 | name: docker
5 | state: "{{ docker_restart_handler_state }}"
6 | ignore_errors: "{{ ansible_check_mode }}"
7 | when: docker_service_manage | bool
8 |
--------------------------------------------------------------------------------
/roles/maldet/handlers/main.yml:
--------------------------------------------------------------------------------
1 | - name: Reload systemd
2 | ansible.builtin.command: systemctl daemon-reload
3 |
4 | - name: Restart maldet
5 | ansible.builtin.systemd:
6 | name: maldet
7 | enabled: yes
8 | state: restarted
9 |
--------------------------------------------------------------------------------
/roles/hostname/handlers/main.yml:
--------------------------------------------------------------------------------
1 | - name: Reboot if needed
2 | ansible.builtin.reboot:
3 | msg: "Rebooting to apply hostname change"
4 | connect_timeout: 5
5 | reboot_timeout: 600
6 | when: reboot_after_hostname_change | bool
7 |
--------------------------------------------------------------------------------
/roles/cockpit/defaults/main.yml:
--------------------------------------------------------------------------------
1 | cockpit_port: 9090
2 | # cockpit_bind_address: "127.0.0.1" # Can override to "0.0.0.0" if public
3 | # cockpit_public: false # Set true to open in firewall
4 |
5 | cockpit_bind_address: "0.0.0.0"
6 | cockpit_public: true
7 |
--------------------------------------------------------------------------------
/roles/loki/files/loki.service:
--------------------------------------------------------------------------------
1 | [Unit]
2 | Description=Loki Log Aggregation
3 | After=network.target
4 |
5 | [Service]
6 | ExecStart=/opt/loki/loki -config.file=/etc/loki/config.yml
7 | Restart=on-failure
8 | User=root
9 | Group=root
10 |
11 | [Install]
12 | WantedBy=multi-user.target
13 |
--------------------------------------------------------------------------------
/roles/firewall/tasks/lockdown.yml:
--------------------------------------------------------------------------------
1 | - name: "Deny ports no longer in use"
2 | ansible.builtin.ufw:
3 | rule: deny
4 | port: "{{ item }}"
5 | loop: "{{ firewall_deny_ports }}"
6 | loop_control:
7 | label: "{{ item }}"
8 | when: firewall_deny_ports is defined and firewall_lockdown | bool
9 |
--------------------------------------------------------------------------------
/roles/docker/tasks/docker-users.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Ensure docker users are added to the docker group.
3 | user:
4 | name: "{{ item }}"
5 | groups: docker
6 | append: true
7 | with_items: "{{ docker_users }}"
8 |
9 | - name: Reset ssh connection to apply user changes.
10 | meta: reset_connection
11 |
--------------------------------------------------------------------------------
/roles/logrotate/templates/logrotate_item.j2:
--------------------------------------------------------------------------------
1 | {{ item.path }} {
2 | {% for opt in item.options %}
3 | {{ opt }}
4 | {% endfor %}
5 | rotate {{ item.rotate }}
6 | {{ item.frequency }}
7 | {% if item.create %}
8 | create {{ item.create_mode }} {{ item.create_owner }} {{ item.create_group }}
9 | {% endif %}
10 | }
11 |
--------------------------------------------------------------------------------
/roles/directories/tasks/main.yml:
--------------------------------------------------------------------------------
1 | - name: Ensure base server directories exist
2 | ansible.builtin.file:
3 | path: "{{ item }}"
4 | state: directory
5 | owner: "{{ server_directory_owner }}"
6 | group: "{{ server_directory_group }}"
7 | mode: "{{ server_directory_mode }}"
8 | loop: "{{ server_directories }}"
9 | when: server_structure_enabled
10 |
--------------------------------------------------------------------------------
/roles/docker/vars/Debian.yml:
--------------------------------------------------------------------------------
1 | ---
2 | # Used only for Debian/Ubuntu (Debian OS-Family)
3 | # https://docs.docker.com/engine/install/debian/#uninstall-old-versions
4 |
5 | docker_obsolete_packages:
6 | - docker
7 | - docker.io
8 | - docker-engine
9 | - docker-doc
10 | - docker-compose
11 | - docker-compose-v2
12 | - podman-docker
13 | - containerd
14 | - runc
15 |
--------------------------------------------------------------------------------
/roles/lynis/defaults/main.yml:
--------------------------------------------------------------------------------
1 | # defaults for Lynis role
2 |
3 | lynis_enabled: true
4 |
5 | lynis_install_from_repo: true # if true, use apt/yum/etc; if false, download manually
6 |
7 | lynis_scan_paths:
8 | - /
9 | lynis_scan_verbose: true
10 | lynis_scan_report: true
11 | lynis_scan_custom_options: "" # Any extra lynis parameters
12 |
13 | lynis_package_name: lynis
14 |
--------------------------------------------------------------------------------
/roles/grafana-agent/handlers/main.yml:
--------------------------------------------------------------------------------
1 | - name: Restart grafana-agent
2 | ansible.builtin.systemd:
3 | name: grafana-agent
4 | state: restarted
5 | daemon_reload: true
6 |
7 | - name: Remove grafana-agent temp
8 | ansible.builtin.file:
9 | path: "{{ item }}"
10 | state: absent
11 | loop:
12 | - /tmp/grafana-agent.zip
13 | - /tmp/grafana-agent-linux-amd64
14 |
15 |
--------------------------------------------------------------------------------
/playbooks/services.yml:
--------------------------------------------------------------------------------
1 | # Optionally configures frequently used services.
2 | # Such as Docker on conferer-based systems, or Caddy on server-based systems.
3 | - name: 'Services'
4 | hosts: all
5 | become: true
6 | tags: services
7 | tasks:
8 | - include_role:
9 | name: docker
10 | when: docker_enabled | default(true)
11 | tags: [ docker ]
12 | ignore_errors: true
13 |
--------------------------------------------------------------------------------
/roles/node_exporter/defaults/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | node_exporter_enabled: true
3 | node_exporter_version: "1.6.1"
4 | node_exporter_url: "https://github.com/prometheus/node_exporter/releases/download/v{{ node_exporter_version }}/node_exporter-{{ node_exporter_version }}.linux-amd64.tar.gz"
5 | node_exporter_install_dir: /opt
6 | node_exporter_user: prometheus
7 | node_exporter_group: prometheus
8 | node_exporter_port: 9100
9 |
--------------------------------------------------------------------------------
/roles/directories/defaults/main.yml:
--------------------------------------------------------------------------------
1 | server_structure_enabled: true
2 |
3 | server_directories:
4 | - /srv
5 | - /srv/infra
6 | - /srv/infra/stack
7 | - /srv/infra/stack/apps
8 | - /srv/infra/stack/data
9 |
10 | server_directory_owner: "{{ srv_owner | default(new_user | default('root')) }}"
11 | server_directory_group: "{{ srv_group | default(new_user | default('root')) }}"
12 |
13 | server_directory_mode: '0755'
14 |
--------------------------------------------------------------------------------
/roles/maldet/templates/maldet.service.j2:
--------------------------------------------------------------------------------
1 | [Unit]
2 | Description=Linux Malware Detect monitoring - maldet
3 | After=network.target
4 |
5 | [Service]
6 | Type=forking
7 | WorkingDirectory={{ maldet_install_path }}
8 | ExecStart={{ maldet_install_path }}/maldet --monitor {{ maldet_install_path }}
9 | PIDFile={{ maldet_install_path }}/tmp/inotifywait.pid
10 | Restart=on-failure
11 |
12 | [Install]
13 | WantedBy=multi-user.target
14 |
--------------------------------------------------------------------------------
/roles/docker/vars/RedHat.yml:
--------------------------------------------------------------------------------
1 | ---
2 | # Used only for Fedora/Rocky (RedHat OS-Family)
3 | # https://docs.docker.com/engine/install/fedora/#uninstall-old-versions
4 | # https://docs.docker.com/engine/install/centos/#uninstall-old-versions
5 |
6 | docker_obsolete_packages:
7 | - docker
8 | - docker-client
9 | - docker-client-latest
10 | - docker-common
11 | - docker-latest
12 | - docker-latest-logrotate
13 | - docker-logrotate
14 | - docker-engine
15 |
--------------------------------------------------------------------------------
/playbooks/backups.yml:
--------------------------------------------------------------------------------
1 | # Enables scheduled backups of specific directories on the server
2 | - name: 'Backups'
3 | hosts: all
4 | become: true
5 | tags: backups
6 | tasks:
7 | - block:
8 | - include_role:
9 | name: borg
10 | when: borg_enabled | default(true)
11 | rescue:
12 | - debug:
13 | msg: "⚠️ Borg role failed. Continuing with next role."
14 | always:
15 | - meta: clear_host_errors
16 |
--------------------------------------------------------------------------------
/roles/blackbox_exporter/defaults/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | blackbox_exporter_enabled: true
3 | blackbox_exporter_version: "0.24.0"
4 | blackbox_exporter_url: "https://github.com/prometheus/blackbox_exporter/releases/download/v{{ blackbox_exporter_version }}/blackbox_exporter-{{ blackbox_exporter_version }}.linux-amd64.tar.gz"
5 | blackbox_exporter_install_dir: /opt
6 | blackbox_exporter_port: 9115
7 | blackbox_modules:
8 | http_2xx:
9 | prober: http
10 | timeout: 5s
11 |
--------------------------------------------------------------------------------
/roles/hostname/defaults/main.yml:
--------------------------------------------------------------------------------
1 | hostname_set: true # Whether to set hostname (allow skipping easily)
2 |
3 | hostname_name: "{{ inventory_hostname }}" # Default: inventory hostname
4 | hostname_fqdn: "" # Optional full domain (e.g., server.example.com)
5 |
6 | update_hosts_file: true # Whether to update /etc/hosts
7 | hosts_file_content: [] # Optional custom entries for /etc/hosts
8 |
9 | reboot_after_hostname_change: false # Optional reboot if hostname changed
10 |
--------------------------------------------------------------------------------
/roles/maldet/defaults/main.yml:
--------------------------------------------------------------------------------
1 | maldet_enabled: true
2 |
3 | maldet_install_path: /usr/local/maldetect
4 | maldet_scan_paths:
5 | - /home
6 | - /var/www
7 | - /srv
8 |
9 | maldet_email_alert: false
10 | maldet_email_address: ""
11 | maldet_daily_cron: true
12 | maldet_quarantine_hits: false
13 |
14 | # You can later extend with auto-quarantine, inotify monitoring, etc if needed
15 |
16 |
17 | maldet_download_path: /tmp/maldetect-latest.tar.gz
18 | maldet_extract_path: /tmp/maldetect-*
19 |
20 |
--------------------------------------------------------------------------------
/roles/timezone/defaults/main.yml:
--------------------------------------------------------------------------------
1 | timezone_name: "UTC" # Default timezone
2 | timezone_force: false # Force re-set timezone even if correct
3 |
4 | ntp_enabled: true # Whether to install and enable NTP
5 | ntp_mode: "chrony" # NTP mode (chrony or ntp)
6 | ntp_package: "ntp" # Package name (or chrony if you want to switch)
7 | ntp_servers: [] # Custom NTP servers, optional
8 |
9 | sync_time_now: true # Whether to sync time immediately after setup
10 |
11 | ntp_service_chrony: chronyd
12 |
--------------------------------------------------------------------------------
/roles/fail2ban/templates/jail.local.j2:
--------------------------------------------------------------------------------
1 | # Fail2Ban jail.local generated by Ansible
2 |
3 | [DEFAULT]
4 | bantime = {{ fail2ban_bantime }}
5 | findtime = {{ fail2ban_findtime }}
6 | maxretry = {{ fail2ban_maxretry }}
7 | backend = systemd
8 |
9 | {% for jail in fail2ban_jails %}
10 | [{{ jail.name }}]
11 | enabled = {{ 'true' if jail.enabled else 'false' }}
12 | port = {{ jail.port }}
13 | logpath = {{ jail.logpath }}
14 | backend = {{ jail.backend }}
15 | maxretry = {{ jail.maxretry }}
16 | bantime = {{ jail.bantime }}
17 | findtime = {{ jail.findtime }}
18 | {% endfor %}
19 |
--------------------------------------------------------------------------------
/roles/prometheus/templates/prometheus.service.j2:
--------------------------------------------------------------------------------
1 | [Unit]
2 | Description=Prometheus Time Series Database
3 | Wants=network-online.target
4 | After=network-online.target
5 |
6 | [Service]
7 | User={{ prometheus_user }}
8 | Group={{ prometheus_group }}
9 | Type=simple
10 | ExecStart=/usr/local/bin/prometheus \
11 | --config.file={{ prometheus_config_dir }}/prometheus.yml \
12 | --storage.tsdb.path={{ prometheus_data_dir }} \
13 | --web.listen-address={{ prometheus_listen_address }}:{{ prometheus_port }}
14 | Restart=on-failure
15 |
16 | [Install]
17 | WantedBy=multi-user.target
18 |
--------------------------------------------------------------------------------
/roles/docker/molecule/default/molecule.yml:
--------------------------------------------------------------------------------
1 | ---
2 | role_name_check: 1
3 | dependency:
4 | name: galaxy
5 | options:
6 | ignore-errors: true
7 | driver:
8 | name: docker
9 | platforms:
10 | - name: instance
11 | image: "geerlingguy/docker-${MOLECULE_DISTRO:-rockylinux9}-ansible:latest"
12 | command: ${MOLECULE_DOCKER_COMMAND:-""}
13 | volumes:
14 | - /sys/fs/cgroup:/sys/fs/cgroup:rw
15 | cgroupns_mode: host
16 | privileged: true
17 | pre_build_image: true
18 | provisioner:
19 | name: ansible
20 | playbooks:
21 | converge: ${MOLECULE_PLAYBOOK:-converge.yml}
22 |
--------------------------------------------------------------------------------
/roles/geerlingguy.pip/molecule/default/molecule.yml:
--------------------------------------------------------------------------------
1 | ---
2 | role_name_check: 1
3 | dependency:
4 | name: galaxy
5 | options:
6 | ignore-errors: true
7 | driver:
8 | name: docker
9 | platforms:
10 | - name: instance
11 | image: "geerlingguy/docker-${MOLECULE_DISTRO:-rockylinux9}-ansible:latest"
12 | command: ${MOLECULE_DOCKER_COMMAND:-""}
13 | volumes:
14 | - /sys/fs/cgroup:/sys/fs/cgroup:rw
15 | cgroupns_mode: host
16 | privileged: true
17 | pre_build_image: true
18 | provisioner:
19 | name: ansible
20 | playbooks:
21 | converge: ${MOLECULE_PLAYBOOK:-converge.yml}
22 |
--------------------------------------------------------------------------------
/roles/grafana/defaults/main.yml:
--------------------------------------------------------------------------------
1 | grafana_enabled: true
2 |
3 | # repository key & repo definition
4 | grafana_repo_key_url: "https://apt.grafana.com/gpg.key"
5 | grafana_repo: "deb https://apt.grafana.com stable main"
6 |
7 | # package & service names
8 | grafana_package: "grafana"
9 | grafana_service: "grafana-server"
10 |
11 | # HTTP port for health check
12 | grafana_port: 3000
13 |
14 | # admin credentials (empty = leave default)
15 | grafana_admin_user: "admin"
16 | grafana_admin_password: ""
17 |
18 | # optional overrides for grafana.ini
19 | # must be a dict of sections → dict of key/value
20 | grafana_ini: {}
21 |
--------------------------------------------------------------------------------
/roles/prometheus/handlers/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Link Prometheus binaries
3 | ansible.builtin.file:
4 | src: "{{ prometheus_install_dir }}/prometheus"
5 | dest: /usr/local/bin/prometheus
6 | state: link
7 | become: true
8 |
9 | - name: Link promtool binary
10 | ansible.builtin.file:
11 | src: "{{ prometheus_install_dir }}/promtool"
12 | dest: /usr/local/bin/promtool
13 | state: link
14 | become: true
15 |
16 | - name: Reload systemd
17 | ansible.builtin.systemd:
18 | daemon_reload: yes
19 |
20 | - name: Restart Prometheus
21 | ansible.builtin.systemd:
22 | name: prometheus
23 | state: restarted
24 |
--------------------------------------------------------------------------------
/roles/fail2ban/defaults/main.yml:
--------------------------------------------------------------------------------
1 | fail2ban_enabled: true
2 | fail2ban_service_name: fail2ban
3 | fail2ban_bantime: 600 # Seconds (default: 10 minutes)
4 | fail2ban_findtime: 600 # How far back to look for failures (seconds)
5 | fail2ban_maxretry: 5 # How many failures before ban
6 | fail2ban_ignoreip: [] # List of IPs to ignore (whitelisted)
7 | fail2ban_jails: # List of enabled jails
8 | - name: sshd
9 | enabled: true
10 | port: ssh
11 | logpath: /var/log/auth.log
12 | backend: systemd
13 | maxretry: "{{ fail2ban_maxretry }}"
14 | bantime: "{{ fail2ban_bantime }}"
15 | findtime: "{{ fail2ban_findtime }}"
16 |
--------------------------------------------------------------------------------
/roles/dotfiles/tasks/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Ensure dotfiles directory exists
3 | file:
4 | path: "{{ dotfiles_dir }}"
5 | state: directory
6 | owner: "{{ ansible_user }}"
7 | group: "{{ ansible_user }}"
8 | mode: '0755'
9 |
10 | # In the future, uncomment the following tasks:
11 |
12 | # - name: Clone dotfiles repository
13 | # git:
14 | # repo: "{{ dotfiles_repo }}"
15 | # dest: "{{ dotfiles_dir }}"
16 | # version: main
17 | # accept_hostkey: true
18 |
19 | # - name: Run dotfiles install script
20 | # command: "{{ dotfiles_dir }}/install.sh"
21 | # args:
22 | # chdir: "{{ dotfiles_dir }}"
23 | # when: dotfiles_dir is defined
24 |
--------------------------------------------------------------------------------
/roles/geerlingguy.pip/meta/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | dependencies: []
3 |
4 | galaxy_info:
5 | role_name: pip
6 | author: geerlingguy
7 | description: Pip (Python package manager) for Linux.
8 | issue_tracker_url: https://github.com/geerlingguy/ansible-role-pip/issues
9 | company: "Midwestern Mac, LLC"
10 | license: "MIT"
11 | min_ansible_version: 2.10
12 | platforms:
13 | - name: Fedora
14 | versions:
15 | - all
16 | - name: Debian
17 | versions:
18 | - all
19 | - name: Ubuntu
20 | versions:
21 | - all
22 | galaxy_tags:
23 | - system
24 | - server
25 | - packaging
26 | - python
27 | - pip
28 | - tools
29 |
--------------------------------------------------------------------------------
/playbooks/access.yml:
--------------------------------------------------------------------------------
1 | # Configures easy access to the server via Cockpit (web UI) and Tailscale (VPN)
2 |
3 | - name: Access
4 | hosts: all
5 | become: true
6 | tags: access
7 |
8 | tasks:
9 | - block:
10 | - include_role:
11 | name: tailscale
12 | when: tailscale_enabled | default(true)
13 | rescue:
14 | - debug:
15 | msg: "⚠️ tailscale role failed. Continuing with next role."
16 | always:
17 | - name: clear any failure flag so next roles still run
18 | meta: clear_host_errors
19 |
20 | - include_role:
21 | name: cockpit
22 | when: cockpit_enabled | default(true)
23 | tags: [cockpit]
24 |
--------------------------------------------------------------------------------
/roles/apparmor/defaults/main.yml:
--------------------------------------------------------------------------------
1 | # Globally enable or disable this role
2 | apparmor_enabled: true
3 |
4 | # Package & service names (Ubuntu / Debian defaults)
5 | apparmor_package: apparmor
6 | apparmor_service: apparmor
7 |
8 | # List of AppArmor profiles to deploy.
9 | # Each item must be a dict with:
10 | # name:
11 | # src:
12 | # state: present|absent (optional, defaults to present)
13 | #
14 | # Example:
15 | # apparmor_profiles:
16 | # - name: usr.sbin.mysqld
17 | # src: profiles/usr.sbin.mysqld
18 | # state: present
19 | # apparmor_profiles: []
20 |
21 | apparmor_profiles: []
22 |
--------------------------------------------------------------------------------
/scripts/scaffold_inventories.py:
--------------------------------------------------------------------------------
1 |
2 | # 1. If inventories/ already exists, and so does ./production, then exit with info
3 | # 2. If it does not exist, create it
4 | # 3. Then create the file structure:
5 | # inventories
6 | # └── production
7 | # ├── group_vars
8 | # │ └── all.yml
9 | # ├── host_vars
10 | # │ └── my-server.yml
11 | # └── hosts.yml
12 |
13 | # 4. Write content to group_vars/all.yml
14 | # 5. Write content to host_vars/my-server.yml
15 | # 6. Write content to hosts.yml
16 | # 7. Print a message indicating that the inventory structure has been created
17 | # And tell them what each file is, and what they should edit in it.
18 | # Finally, tell them how they can add new servers to the inventory.
19 |
--------------------------------------------------------------------------------
/roles/docker/molecule/default/converge.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Converge
3 | hosts: all
4 | # become: true
5 |
6 | pre_tasks:
7 | - name: Update apt cache.
8 | apt: update_cache=yes cache_valid_time=600
9 | when: ansible_os_family == 'Debian'
10 |
11 | - name: Wait for systemd to complete initialization. # noqa 303
12 | command: systemctl is-system-running
13 | register: systemctl_status
14 | until: >
15 | 'running' in systemctl_status.stdout or
16 | 'degraded' in systemctl_status.stdout
17 | retries: 30
18 | delay: 5
19 | when: ansible_service_mgr == 'systemd'
20 | changed_when: false
21 | failed_when: systemctl_status.rc > 1
22 |
23 | roles:
24 | - role: geerlingguy.docker
25 |
--------------------------------------------------------------------------------
/roles/grafana-agent/defaults/main.yml:
--------------------------------------------------------------------------------
1 |
2 | agent_enabled: true
3 | agent_version: "v0.44.2"
4 | grafana_agent_url: "https://github.com/grafana/agent/releases/download/{{ agent_version }}/grafana-agent-linux-amd64.zip"
5 | grafana_agent_config_path: "/etc/grafana-agent"
6 | grafana_agent_binary_path: "/usr/local/bin/grafana-agent"
7 | grafana_agent_data_path: "/var/lib/grafana-agent"
8 | grafana_agent_config_file: "{{ grafana_agent_config_path }}/config.yml"
9 |
10 | prometheus_port: 9091
11 | loki_http_port: 3100
12 |
13 | loki_push_url: "http://localhost:{{ loki_http_port }}/loki/api/v1/push"
14 | prometheus_remote_write_url: "http://localhost:{{ prometheus_port }}/api/v1/write"
15 |
16 | log_paths:
17 | - /var/log/syslog
18 | - /var/log/auth.log
19 | - /var/log/kern.log
20 |
--------------------------------------------------------------------------------
/roles/geerlingguy.pip/tasks/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Ensure Pip is installed.
3 | package:
4 | name: "{{ pip_package }}"
5 | state: present
6 |
7 | - name: Remove EXTERNALLY-MANAGED
8 | ansible.builtin.file:
9 | path: /usr/lib/python3.{{ ansible_python.version.minor }}/EXTERNALLY-MANAGED
10 | state: absent
11 |
12 | - name: Ensure pip_install_packages are installed.
13 | pip:
14 | name: "{{ item.name | default(item) }}"
15 | version: "{{ item.version | default(omit) }}"
16 | virtualenv: "{{ item.virtualenv | default(omit) }}"
17 | state: "{{ item.state | default(omit) }}"
18 | extra_args: "{{ item.extra_args | default(omit) }}"
19 | executable: "{{ item.virtualenv | default(false) | ternary(omit, pip_executable) }}"
20 | loop: "{{ pip_install_packages }}"
21 |
--------------------------------------------------------------------------------
/roles/users/tasks/main.yml:
--------------------------------------------------------------------------------
1 | - name: Ensure docker group exists
2 | group:
3 | name: docker
4 | state: present
5 |
6 | - name: Create user
7 | user:
8 | name: "{{ new_user }}"
9 | shell: /bin/bash
10 | groups: sudo,docker
11 | append: yes
12 | state: present
13 | create_home: yes
14 | password: "{{ user_password | password_hash('sha512') }}"
15 | when: user_password is defined
16 |
17 | - name: Set up authorized SSH key
18 | authorized_key:
19 | user: "{{ new_user }}"
20 | state: present
21 | key: "{{ lookup('file', user_ssh_key_path) }}"
22 |
23 | - name: Require password when using sudo
24 | copy:
25 | dest: "/etc/sudoers.d/{{ new_user }}"
26 | content: "{{ new_user }} ALL=(ALL) ALL"
27 | mode: '0440'
28 | owner: root
29 | group: root
30 |
--------------------------------------------------------------------------------
/roles/firewall/defaults/main.yml:
--------------------------------------------------------------------------------
1 | # whether to run this role
2 | ufw_enabled: true
3 |
4 | # if true, run `ufw reset` first
5 | ufw_reset: false
6 |
7 | # default policies
8 | ufw_default_incoming_policy: deny # deny all incoming by default
9 | ufw_default_outgoing_policy: allow # allow all outgoing by default
10 |
11 | # which TCP ports to allow
12 | ufw_allowed_tcp_ports:
13 | - "{{ ssh_port | default(22) }}"
14 | - 80
15 | - 443
16 |
17 | # which UDP ports to allow
18 | ufw_allowed_udp_ports: []
19 |
20 | # which TCP ports to explicitly deny
21 | ufw_denied_tcp_ports: []
22 |
23 | # which UDP ports to explicitly deny
24 | ufw_denied_udp_ports: []
25 |
26 | # extra ports to remove when lockdown is true
27 | ufw_lockdown: false
28 |
29 | # enable logging ('on', 'off', 'low', 'high', etc)
30 | ufw_logging: on
31 |
--------------------------------------------------------------------------------
/roles/logrotate/defaults/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | # Enable or disable this entire role
3 | logrotate_enabled: true
4 |
5 | # Where to place individual logrotate config snippets
6 | logrotate_config_dir: "/etc/logrotate.d"
7 |
8 | # List of things to rotate. Each item must include:
9 | # name: unique filename under logrotate_config_dir (e.g. "myapp")
10 | # path: a file path or shell glob (e.g. "/var/log/myapp/*.log")
11 | # options: list of logrotate options (e.g. ["missingok", "compress"])
12 | # rotate: how many rotations to keep (integer)
13 | # frequency: "daily", "weekly", etc.
14 | # create: whether to include a `create` directive (boolean)
15 | # create_mode/owner/group: values for create, if enabled
16 | #
17 | # By default, we do nothing. Override in your playbook or group_vars.
18 | logrotate_items: []
19 |
--------------------------------------------------------------------------------
/roles/borg/defaults/main.yml:
--------------------------------------------------------------------------------
1 | borg_enabled: true
2 |
3 | # Borg SSH
4 | borg_ssh_key_path: "/home/{{ ansible_user }}/.ssh/borgmatic_ed25519"
5 | borgbase_key_name: "{{ ansible_user }}@{{ ansible_hostname }}"
6 | borgbase_apikey: ''
7 | borg_user: ''
8 |
9 | # Repo & backup target
10 | borg_repo: '' # e.g. xyz420@xyz420.repo.borgbase.com:repo
11 | borg_password: ''
12 | borg_name: ''
13 |
14 | # Backup sources
15 | borg_source_directories:
16 | - /home
17 | - /etc
18 | - /var/lib
19 | - /srv
20 |
21 | # Retention
22 | borg_retention:
23 | keep_within: 48H
24 | keep_daily: 7
25 | keep_weekly: 4
26 | keep_monthly: 12
27 | keep_yearly: 1
28 |
29 | # Compression & ssh
30 | borg_compression: lz4
31 | borg_retries: 3
32 | borg_retry_wait: 60
33 |
34 | # Healthchecks
35 | borg_ping_url: ''
36 | borg_ping_states: [start, finish, fail]
37 |
38 |
39 |
--------------------------------------------------------------------------------
/ansible.cfg:
--------------------------------------------------------------------------------
1 | [defaults]
2 | # The default SSH user to use when none is specified per host
3 | remote_user = root
4 | # Default (relative) inventory file or directory
5 | inventory = inventories/production
6 | # The (relative) path where roles are located
7 | roles_path = roles
8 | # Set to false to disable SSH host key checking
9 | host_key_checking = True
10 | # Enable SSH pipelining to reduce SSH sessions per task
11 | pipelining = True
12 | # Set to true to create "retry" files after failed playbook runs
13 | retry_files_enabled = False
14 | # Location for full logs for troubleshooting
15 | # log_path = logs/ansible.log
16 |
17 | # Output settings, using custom callback for pretty output
18 | stdout_callback = pretty
19 | callback_whitelist = pretty
20 | callback_plugins = ./plugins
21 | color = True
22 | nocows = 1
23 |
24 | # For Ansible Vault, point to your password file
25 | # vault_password_file = ~/.vault_pass.txt
26 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | # Ansible-specific
2 | *.retry
3 | *.log
4 | *.bak
5 | *.orig
6 | *.swp
7 | *.tmp
8 | .ansible
9 |
10 | # Ignore sensitive or dynamic inventory files
11 | inventories/*
12 | inventory/*.ini
13 | inventory/*.yml
14 | inventory/*.yaml
15 | inventory/*.json
16 | !inventory/sample_inventory.yml
17 |
18 | # Logs
19 | logs/*
20 | *.log
21 | *.log.*
22 |
23 | # Vault files (plaintext or auto-generated, if not encrypted)
24 | vault.yml
25 | group_vars/*/vault.yml
26 | host_vars/*/vault.yml
27 |
28 | # Ignore compiled Python files
29 | __pycache__/
30 | *.pyc
31 | *.pyo
32 |
33 | # Ignore virtual environments
34 | .venv/
35 | venv/
36 | ENV/
37 | env/
38 |
39 | # Ignore molecule testing directories
40 | .molecule/
41 |
42 | # Ignore SSH key files
43 | *.pem
44 | *.key
45 | *.pub
46 | .ssh/
47 |
48 | # Ignore system-specific files
49 | .DS_Store
50 | Thumbs.db
51 |
52 | # Ignore editor/project files
53 | .idea/
54 | .vscode/
55 | *.code-workspace
56 |
--------------------------------------------------------------------------------
/roles/monit/tasks/main.yml:
--------------------------------------------------------------------------------
1 | - name: Install monit
2 | apt:
3 | name: monit
4 | state: present
5 | update_cache: yes
6 |
7 | - name: Configure monit httpd
8 | copy:
9 | dest: /etc/monit/conf-enabled/httpd
10 | owner: root
11 | group: root
12 | mode: '0600'
13 | content: |
14 | set httpd port {{ monit_port }} and
15 | use address {{ monit_bind_address }}
16 | allow {{ monit_user }}:"{{ monit_password }}"
17 |
18 | notify: Restart monit
19 |
20 | - name: Validate monit config
21 | command: monit -t
22 | changed_when: false
23 |
24 | - name: Ensure monit is running and enabled
25 | systemd:
26 | name: monit
27 | enabled: true
28 | state: started
29 |
30 | - name: Conditionally allow Monit port in UFW
31 | ufw:
32 | rule: "{{ 'allow' if monit_public else 'deny' }}"
33 | port: "{{ monit_port }}"
34 | proto: tcp
35 | when: monit_bind_address in ['0.0.0.0', '::', '127.0.0.1']
36 |
--------------------------------------------------------------------------------
/roles/installs/defaults/main.yml:
--------------------------------------------------------------------------------
1 |
2 |
3 | core:
4 | - build-essential
5 | - curl
6 | - file
7 | - git
8 | - less
9 | - man-db
10 | - unzip
11 | - vim
12 | - zip
13 |
14 | shell:
15 | - fd-find
16 | - fzf
17 | - jq
18 | - neovim
19 | - ripgrep
20 | - tmux
21 | - zoxide
22 | - zsh
23 |
24 | networking:
25 | - dnsutils
26 | - iperf3
27 | - iputils-ping
28 | - mtr-tiny
29 | - net-tools
30 | - nmap
31 | - traceroute
32 | - whois
33 |
34 | monitoring:
35 | - bpytop # Resource monitoring
36 | - duff # Disk usage
37 | - gping # Ping charts
38 | - iftop # Bandwidth utilization check
39 | - iotop # Disk I/O per-process
40 | - iperf3 # Precise cross-server speed testing
41 | - mtr # Traceoute, ping, network diagnostics
42 | - ncdu # Disk usage analyzer
43 | - atop # Process viewer
44 |
45 | misc:
46 | - figlet
47 | - lolcat
48 | - neofetch
49 |
--------------------------------------------------------------------------------
/roles/cockpit/tasks/main.yml:
--------------------------------------------------------------------------------
1 | - name: Install Cockpit base package
2 | apt:
3 | name: cockpit
4 | state: present
5 | update_cache: true
6 |
7 | - name: "Install component: {{ item }}"
8 | apt:
9 | name: "{{ item }}"
10 | state: present
11 | loop:
12 | - cockpit-dashboard
13 | - cockpit-networkmanager
14 | - cockpit-storaged
15 | - cockpit-packagekit
16 | - cockpit-system
17 | - cockpit-pcp
18 | - cockpit-sensors
19 | - cockpit-files
20 | - cockpit-sosreport
21 | - cockpit-podman
22 | ignore_errors: true
23 |
24 | - name: Ensure cockpit.socket is enabled and started
25 | systemd:
26 | name: cockpit.socket
27 | enabled: true
28 | state: started
29 |
30 | - name: Conditionally allow Cockpit port in UFW
31 | ufw:
32 | rule: "{{ 'allow' if cockpit_public else 'deny' }}"
33 | port: "{{ cockpit_port }}"
34 | proto: tcp
35 | when: cockpit_bind_address in ['0.0.0.0', '::', '127.0.0.1']
36 |
--------------------------------------------------------------------------------
/roles/geerlingguy.pip/molecule/default/converge.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Converge
3 | hosts: all
4 | become: true
5 |
6 | vars:
7 | pip_install_packages:
8 | # Test installing a specific version of a package.
9 | - name: ipaddress
10 | version: "1.0.18"
11 | # Test installing a package by name.
12 | - colorama
13 |
14 | pre_tasks:
15 | - name: Update apt cache.
16 | apt: update_cache=true cache_valid_time=600
17 | when: ansible_os_family == 'Debian'
18 |
19 | - name: Set package name for older OSes.
20 | set_fact:
21 | pip_package: python-pip
22 | when: >
23 | (ansible_os_family == 'RedHat') and (ansible_distribution_major_version | int < 8)
24 | or (ansible_distribution == 'Debian') and (ansible_distribution_major_version | int < 10)
25 | or (ansible_distribution == 'Ubuntu') and (ansible_distribution_major_version | int < 18)
26 |
27 | roles:
28 | - role: geerlingguy.pip
29 |
--------------------------------------------------------------------------------
/playbooks/all.yml:
--------------------------------------------------------------------------------
1 | # This is the main entrypoint playbook, which calls all other playbooks
2 | # for a full setup of the selected host(s).
3 |
4 | # Checks connection to host(s), dependencies are met, and required variables are set
5 | - import_playbook: pre-checks.yml
6 |
7 | # Sets up SSH, users, directories, timezone, hostname, firewall, etc
8 | - import_playbook: essentials.yml
9 |
10 | # Configures the shell and dotfiles
11 | - import_playbook: configs.yml
12 |
13 | # Set up observability and monitoring
14 | - import_playbook: monitoring.yml
15 |
16 | # Sets up secheduled backups
17 | - import_playbook: backups.yml
18 |
19 | # Installs and configures Cockpit and VPN for easy server access
20 | - import_playbook: access.yml
21 |
22 | # Sets up common services, like Docker
23 | - import_playbook: services.yml
24 |
25 | # Configures security, including apparmor, maldet, Fail2Ban, Lynis
26 | - import_playbook: security.yml
27 |
28 | # All done, runs some final checks and prints output
29 | - import_playbook: post-checks.yml
30 |
--------------------------------------------------------------------------------
/roles/tailscale/defaults/main.yml:
--------------------------------------------------------------------------------
1 |
2 |
3 | # Which control plane to use: "tailscale" (managed) or "headscale" (self-hosted)
4 | tailscale_backend: "tailscale"
5 |
6 | # Required to use tailscale.
7 | # Override this in your inventory/group_vars with a valid auth key.
8 | # Get it at: https://login.tailscale.com/admin/settings/keys
9 | tailscale_auth_key: ''
10 |
11 | # The host (IP or valid hostname which is resolvable)
12 | # Uses current inventory host by default
13 | tailscale_hostname: "{{ inventory_hostname }}"
14 |
15 | # How to advertise local subnets into your Tailnet (e.g. ["10.0.0.0/24"]):
16 | tailscale_advertise_routes: []
17 |
18 | # If true, this node becomes an "exit node" for internet-bound traffic
19 | tailscale_advertise_exit_node: false
20 |
21 | # VLAN-style tags for ACLs (e.g. ["tag:linux","tag:web"]):
22 | tailscale_tags: []
23 |
24 | # Headscale settings (only needed if tailscale_backend = "headscale")
25 | headscale_server_url: null # e.g. "https://headscale.example.com"
26 | headscale_auth_key: null # preauth key you generated in Headscale
27 |
--------------------------------------------------------------------------------
/playbooks/security.yml:
--------------------------------------------------------------------------------
1 | # Properly secures the system with DevSec best practices, for example:
2 | # - System hardening (CIS benchmarks)
3 | # - Process confinement (AppArmour)
4 | # - Intrusion detection (Fail2Ban),
5 | # - Integrity monitoring (OSSEC)
6 | # - Malware scanning (Maldet)
7 | # - Automated security audits (Lynis)
8 | - name: 'Security'
9 | hosts: all
10 | become: true
11 | tags: security
12 |
13 |
14 | tasks:
15 | - include_role:
16 | name: fail2ban
17 | when: fail2ban_enabled | default(true)
18 | tags: [ fail2ban ]
19 | ignore_errors: true
20 |
21 | - include_role:
22 | name: maldet
23 | when: maldet_enabled | default(true)
24 | tags: [ maldet ]
25 | ignore_errors: true
26 |
27 | - include_role:
28 | name: lynis
29 | when: lynis_enabled | default(true)
30 | tags: [ lynis ]
31 | ignore_errors: true
32 |
33 | - include_role:
34 | name: apparmor
35 | when: apparmor_enabled | default(true)
36 | tags: [ apparmor ]
37 | ignore_errors: true
38 | vars:
39 | run_scan: true
40 |
41 |
--------------------------------------------------------------------------------
/roles/loki/defaults/main.yml:
--------------------------------------------------------------------------------
1 | loki_enabled: true
2 | loki_url: "https://github.com/grafana/loki/releases/download/v3.5.0/loki-linux-amd64.zip"
3 |
4 | loki_http_port: 3100
5 | loki_grpc_port: 9095
6 |
7 | loki_config: |
8 | auth_enabled: false
9 |
10 | server:
11 | http_listen_port: {{ loki_http_port }}
12 | grpc_listen_port: {{ loki_grpc_port }}
13 |
14 | common:
15 | path_prefix: /opt/loki
16 | replication_factor: 1
17 | ring:
18 | kvstore:
19 | store: inmemory
20 |
21 | schema_config:
22 | configs:
23 | - from: 2022-01-01
24 | store: tsdb
25 | object_store: filesystem
26 | schema: v13
27 | index:
28 | prefix: index_
29 | period: 24h
30 |
31 | ruler:
32 | storage:
33 | type: local
34 | local:
35 | directory: /opt/loki/rules
36 | rule_path: /opt/loki/rules-temp
37 | alertmanager_url: http://localhost:9093
38 | ring:
39 | kvstore:
40 | store: inmemory
41 | enable_api: true
42 |
43 | limits_config:
44 | reject_old_samples: true
45 | reject_old_samples_max_age: 168h
46 |
--------------------------------------------------------------------------------
/roles/docker/LICENSE:
--------------------------------------------------------------------------------
1 | The MIT License (MIT)
2 |
3 | Copyright (c) 2017 Jeff Geerling
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy of
6 | this software and associated documentation files (the "Software"), to deal in
7 | the Software without restriction, including without limitation the rights to
8 | use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
9 | the Software, and to permit persons to whom the Software is furnished to do so,
10 | subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in all
13 | copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
17 | FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
18 | COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
19 | IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
20 | CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
21 |
--------------------------------------------------------------------------------
/roles/geerlingguy.pip/LICENSE:
--------------------------------------------------------------------------------
1 | The MIT License (MIT)
2 |
3 | Copyright (c) 2017 Jeff Geerling
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy of
6 | this software and associated documentation files (the "Software"), to deal in
7 | the Software without restriction, including without limitation the rights to
8 | use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
9 | the Software, and to permit persons to whom the Software is furnished to do so,
10 | subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in all
13 | copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
17 | FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
18 | COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
19 | IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
20 | CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
21 |
--------------------------------------------------------------------------------
/roles/geerlingguy.pip/.github/workflows/release.yml:
--------------------------------------------------------------------------------
1 | ---
2 | # This workflow requires a GALAXY_API_KEY secret present in the GitHub
3 | # repository or organization.
4 | #
5 | # See: https://github.com/marketplace/actions/publish-ansible-role-to-galaxy
6 | # See: https://github.com/ansible/galaxy/issues/46
7 |
8 | name: Release
9 | 'on':
10 | push:
11 | tags:
12 | - '*'
13 |
14 | defaults:
15 | run:
16 | working-directory: 'geerlingguy.pip'
17 |
18 | jobs:
19 |
20 | release:
21 | name: Release
22 | runs-on: ubuntu-latest
23 | steps:
24 | - name: Check out the codebase.
25 | uses: actions/checkout@v4
26 | with:
27 | path: 'geerlingguy.pip'
28 |
29 | - name: Set up Python 3.
30 | uses: actions/setup-python@v5
31 | with:
32 | python-version: '3.x'
33 |
34 | - name: Install Ansible.
35 | run: pip3 install ansible-core
36 |
37 | - name: Trigger a new import on Galaxy.
38 | run: >-
39 | ansible-galaxy role import --api-key ${{ secrets.GALAXY_API_KEY }}
40 | $(echo ${{ github.repository }} | cut -d/ -f1) $(echo ${{ github.repository }} | cut -d/ -f2)
41 |
--------------------------------------------------------------------------------
/roles/firewall/tasks/allow.yml:
--------------------------------------------------------------------------------
1 | - name: "Allow setup SSH fallback (port 22)"
2 | ansible.builtin.ufw:
3 | rule: allow
4 | port: 22
5 | proto: tcp
6 | when:
7 | - firewall_setup_mode | bool
8 | - ssh_port | int != 22
9 |
10 | - name: "Allow SSH on custom port"
11 | ansible.builtin.ufw:
12 | rule: allow
13 | port: "{{ ssh_port }}"
14 | proto: tcp
15 |
16 | - name: "Allow TCP ports"
17 | ansible.builtin.ufw:
18 | rule: allow
19 | port: "{{ item }}"
20 | proto: tcp
21 | loop: "{{ firewall_allowed_tcp_ports }}"
22 | loop_control:
23 | label: "{{ item }}"
24 | when: firewall_allowed_tcp_ports is defined
25 |
26 | - name: "Allow UDP ports"
27 | ansible.builtin.ufw:
28 | rule: allow
29 | port: "{{ item }}"
30 | proto: udp
31 | loop: "{{ firewall_allowed_udp_ports }}"
32 | loop_control:
33 | label: "{{ item }}"
34 | when: firewall_allowed_udp_ports is defined
35 |
36 | - name: "Allow named services"
37 | ansible.builtin.ufw:
38 | rule: allow
39 | name: "{{ item }}"
40 | loop: "{{ firewall_allowed_services }}"
41 | loop_control:
42 | label: "{{ item }}"
43 | when: firewall_allowed_services is defined
44 |
--------------------------------------------------------------------------------
/roles/hostname/tasks/main.yml:
--------------------------------------------------------------------------------
1 | - name: Set system hostname
2 | ansible.builtin.hostname:
3 | name: "{{ hostname_name }}"
4 | when:
5 | - hostname_set | bool
6 | - hostname_name is defined
7 |
8 | - name: Configure /etc/hostname
9 | ansible.builtin.copy:
10 | dest: /etc/hostname
11 | owner: root
12 | group: root
13 | mode: '0644'
14 | content: "{{ hostname_name }}\n"
15 | when:
16 | - hostname_set | bool
17 | - hostname_name is defined
18 |
19 | - name: Configure /etc/hosts if requested
20 | ansible.builtin.copy:
21 | dest: /etc/hosts
22 | owner: root
23 | group: root
24 | mode: '0644'
25 | content: |
26 | 127.0.0.1 localhost
27 | 127.0.1.1 {{ hostname_name }} {{ hostname_fqdn | default('') }}
28 | {% for entry in hosts_file_content %}
29 | {{ entry }}
30 | {% endfor %}
31 | when:
32 | - hostname_set | bool
33 | - update_hosts_file | bool
34 |
35 | - name: Restart hostname service if needed
36 | ansible.builtin.service:
37 | name: systemd-hostnamed
38 | state: restarted
39 | when:
40 | - hostname_set | bool
41 | - ansible_service_mgr == 'systemd'
42 | notify: Reboot if needed
43 |
44 |
--------------------------------------------------------------------------------
/roles/docker/tasks/docker-compose.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Check current docker-compose version.
3 | command: "{{ docker_compose_path }} --version"
4 | register: docker_compose_vsn
5 | check_mode: false
6 | changed_when: false
7 | failed_when: false
8 |
9 | - set_fact:
10 | docker_compose_current_version: "{{ docker_compose_vsn.stdout | regex_search('(\\d+(\\.\\d+)+)') }}"
11 | when: >
12 | docker_compose_vsn.stdout is defined
13 | and (docker_compose_vsn.stdout | length > 0)
14 |
15 | - name: Delete existing docker-compose version if it's different.
16 | file:
17 | path: "{{ docker_compose_path }}"
18 | state: absent
19 | when: >
20 | docker_compose_current_version is defined
21 | and (docker_compose_version | regex_replace('v', '')) not in docker_compose_current_version
22 |
23 | - name: Install Docker Compose (if configured).
24 | get_url:
25 | url: "{{ docker_compose_url }}"
26 | dest: "{{ docker_compose_path }}"
27 | mode: 0755
28 | when: >
29 | (docker_compose_current_version is not defined)
30 | or (docker_compose_current_version | length == 0)
31 | or (docker_compose_current_version is version((docker_compose_version | regex_replace('v', '')), '<'))
32 |
--------------------------------------------------------------------------------
/roles/fail2ban/tasks/main.yml:
--------------------------------------------------------------------------------
1 | # fail2ban/tasks/main.yml
2 |
3 | - name: "Cancel if Fail2Ban is not enabled"
4 | debug:
5 | msg: "⚠️ Skipping Fail2Ban setup because 'fail2ban_enabled' is false."
6 | when: not fail2ban_enabled
7 | tags: [fail2ban]
8 |
9 | - name: Install Fail2Ban
10 | apt:
11 | name: fail2ban
12 | state: present
13 | update_cache: yes
14 | when: fail2ban_enabled
15 | tags: [fail2ban]
16 |
17 | - name: Ensure fail2ban service is enabled and running
18 | systemd:
19 | name: "{{ fail2ban_service_name }}"
20 | enabled: true
21 | state: started
22 | when: fail2ban_enabled
23 | tags: [fail2ban]
24 |
25 | - name: Create fail2ban local jail configuration
26 | template:
27 | src: jail.local.j2
28 | dest: /etc/fail2ban/jail.local
29 | owner: root
30 | group: root
31 | mode: '0644'
32 | notify: Restart Fail2Ban
33 | when: fail2ban_enabled
34 | tags: [fail2ban]
35 |
36 | - name: Configure ignoreip if provided
37 | lineinfile:
38 | path: /etc/fail2ban/jail.local
39 | regexp: '^ignoreip ='
40 | line: "ignoreip = {{ fail2ban_ignoreip | join(' ') }}"
41 | state: present
42 | when:
43 | - fail2ban_enabled
44 | - fail2ban_ignoreip | length > 0
45 | tags: [fail2ban]
46 |
47 |
--------------------------------------------------------------------------------
/roles/grafana-agent/templates/agent-config.yml.j2:
--------------------------------------------------------------------------------
1 |
2 | server:
3 | # minimum log verbosity
4 | log_level: {{ grafana_agent_log_level | default('info') }}
5 |
6 | metrics:
7 | # where to WAL state for resilience
8 | wal_directory: {{ grafana_agent_data_path }}/wal
9 | configs:
10 | - name: default
11 | # push scraped samples here
12 | remote_write:
13 | - url: {{ prometheus_remote_write_url }}
14 | # scrape your var/log files as Prometheus metrics
15 | scrape_configs:
16 | {% for path in log_paths %}
17 | - job_name: log_{{ loop.index }}
18 | static_configs:
19 | - targets: ['localhost']
20 | labels:
21 | job: varlogs
22 | __path__: {{ path }}
23 | {% endfor %}
24 |
25 | logs:
26 | # keep track of where you left off
27 | positions_directory: {{ grafana_agent_data_path }}/positions
28 | configs:
29 | - name: default
30 | # push logs into Loki
31 | clients:
32 | - url: {{ loki_push_url }}
33 | target_config:
34 | sync_period: 10s
35 | scrape_configs:
36 | {% for path in log_paths %}
37 | - job_name: log_{{ loop.index }}
38 | static_configs:
39 | - targets: ['localhost']
40 | labels:
41 | job: varlogs
42 | __path__: {{ path }}
43 | {% endfor %}
44 |
--------------------------------------------------------------------------------
/roles/logrotate/tasks/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Install logrotate package
3 | apt:
4 | name: logrotate
5 | state: present
6 | update_cache: yes
7 | when: logrotate_enabled | bool
8 |
9 | - name: Ensure config directory exists
10 | file:
11 | path: "{{ logrotate_config_dir }}"
12 | state: directory
13 | owner: root
14 | group: root
15 | mode: "0755"
16 | when: logrotate_enabled | bool
17 |
18 | - name: Skip if no custom logrotate items defined
19 | debug:
20 | msg: "No logrotate_items defined; nothing to deploy"
21 | when:
22 | - logrotate_enabled | bool
23 | - logrotate_items | default([]) | length == 0
24 |
25 | - name: Remove default 'system' snippet to avoid duplicate entries
26 | file:
27 | path: "{{ logrotate_config_dir }}/system"
28 | state: absent
29 | when: logrotate_enabled | bool
30 |
31 | - name: Deploy logrotate config for each item
32 | template:
33 | src: item.j2
34 | dest: "{{ logrotate_config_dir }}/{{ item.name }}"
35 | owner: root
36 | group: root
37 | mode: "0644"
38 | loop: "{{ logrotate_items }}"
39 | loop_control:
40 | label: "{{ item.name }}"
41 | when:
42 | - logrotate_enabled | bool
43 | - logrotate_items | length > 0
44 |
45 | - name: Enable and start logrotate timer
46 | systemd:
47 | name: logrotate.timer
48 | enabled: yes
49 | state: started
50 | when: logrotate_enabled | bool
51 |
--------------------------------------------------------------------------------
/playbooks/monitoring.yml:
--------------------------------------------------------------------------------
1 | # Configures detailed logging, metrics and monitoring for all processes
2 | - name: 'Monitoring'
3 | hosts: all
4 | become: true
5 | tags: monitoring
6 |
7 | tasks:
8 | - include_role:
9 | name: monit
10 | when: monit_enabled | default(true)
11 | tags: [ monit ]
12 | ignore_errors: true
13 |
14 | - include_role:
15 | name: logrotate
16 | when: logrotate_enabled | default(true)
17 | tags: [ logrotate ]
18 | ignore_errors: true
19 |
20 | - include_role:
21 | name: loki
22 | when: loki_enabled | default(true)
23 | tags: [ loki ]
24 | ignore_errors: true
25 |
26 | - include_role:
27 | name: grafana-agent
28 | when: grafana_agent_enabled | default(true)
29 | tags: [ grafana-agent ]
30 | ignore_errors: true
31 |
32 | - include_role:
33 | name: blackbox_exporter
34 | when: blackbox_exporter_enabled | default(true)
35 | tags: [ blackbox_exporter ]
36 | ignore_errors: true
37 |
38 | - include_role:
39 | name: node_exporter
40 | when: node_exporter_enabled | default(true)
41 | tags: [ node_exporter ]
42 | ignore_errors: true
43 |
44 | - include_role:
45 | name: prometheus
46 | when: prometheus_enabled | default(true)
47 | tags: [ prometheus ]
48 | ignore_errors: true
49 |
50 | - include_role:
51 | name: grafana
52 | when: grafana_enabled | default(true)
53 | tags: [ grafana ]
54 | ignore_errors: true
55 |
--------------------------------------------------------------------------------
/roles/lynis/tasks/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 |
3 | - name: "Skip Lynis setup if not enabled"
4 | ansible.builtin.debug:
5 | msg: "⚠️ Skipping Lynis setup because lynis_enabled is false"
6 | when: not lynis_enabled
7 | tags: [lynis]
8 |
9 | - name: "Install Lynis via package manager"
10 | ansible.builtin.package:
11 | name: "{{ lynis_package_name }}"
12 | state: present
13 | when: lynis_enabled and lynis_install_from_repo
14 | tags: [lynis]
15 |
16 | - name: "Download Lynis manually if not installing via package manager"
17 | ansible.builtin.get_url:
18 | url: https://downloads.cisofy.com/lynis/lynis-3.0.8.tar.gz
19 | dest: /tmp/lynis-latest.tar.gz
20 | mode: '0644'
21 | when: lynis_enabled and not lynis_install_from_repo
22 | tags: [lynis]
23 |
24 | - name: "Extract Lynis manually"
25 | ansible.builtin.unarchive:
26 | src: /tmp/lynis-latest.tar.gz
27 | dest: /opt/
28 | remote_src: yes
29 | when: lynis_enabled and not lynis_install_from_repo
30 | tags: [lynis]
31 |
32 | - name: "Run Lynis security audit"
33 | ansible.builtin.command: >
34 | lynis audit system
35 | {% if lynis_scan_verbose %} --verbose {% endif %}
36 | {% if not lynis_scan_report %} --no-log --no-report {% endif %}
37 | {{ lynis_scan_custom_options }}
38 | register: lynis_scan
39 | changed_when: false
40 | when: lynis_enabled and run_scan | default(false)
41 | tags: [lynis]
42 |
43 | - name: "Show Lynis report summary"
44 | ansible.builtin.debug:
45 | var: lynis_scan.stdout_lines
46 | when: lynis_enabled and run_scan | default(false)
47 | tags: [lynis]
48 |
49 |
--------------------------------------------------------------------------------
/roles/geerlingguy.pip/.github/workflows/ci.yml:
--------------------------------------------------------------------------------
1 | ---
2 | name: CI
3 | 'on':
4 | pull_request:
5 | push:
6 | branches:
7 | - master
8 | schedule:
9 | - cron: "0 4 * * 5"
10 |
11 | defaults:
12 | run:
13 | working-directory: 'geerlingguy.pip'
14 |
15 | jobs:
16 |
17 | lint:
18 | name: Lint
19 | runs-on: ubuntu-latest
20 | steps:
21 | - name: Check out the codebase.
22 | uses: actions/checkout@v3
23 | with:
24 | path: 'geerlingguy.pip'
25 |
26 | - name: Set up Python 3.
27 | uses: actions/setup-python@v4
28 | with:
29 | python-version: '3.x'
30 |
31 | - name: Install test dependencies.
32 | run: pip3 install yamllint
33 |
34 | - name: Lint code.
35 | run: |
36 | yamllint .
37 |
38 | molecule:
39 | name: Molecule
40 | runs-on: ubuntu-latest
41 | strategy:
42 | matrix:
43 | distro:
44 | - rockylinux9
45 | - fedora39
46 | - ubuntu2204
47 | - ubuntu2004
48 | - debian12
49 | - debian11
50 |
51 | steps:
52 | - name: Check out the codebase.
53 | uses: actions/checkout@v3
54 | with:
55 | path: 'geerlingguy.pip'
56 |
57 | - name: Set up Python 3.
58 | uses: actions/setup-python@v4
59 | with:
60 | python-version: '3.x'
61 |
62 | - name: Install test dependencies.
63 | run: pip3 install ansible molecule molecule-plugins[docker] docker
64 |
65 | - name: Run Molecule tests.
66 | run: molecule test
67 | env:
68 | PY_COLORS: '1'
69 | ANSIBLE_FORCE_COLOR: '1'
70 | MOLECULE_DISTRO: ${{ matrix.distro }}
71 |
--------------------------------------------------------------------------------
/roles/node_exporter/tasks/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: "skip node_exporter if disabled"
3 | meta: end_host
4 | when: not (node_exporter_enabled | bool)
5 |
6 | - name: ensure prometheus user exists
7 | ansible.builtin.user:
8 | name: "{{ node_exporter_user }}"
9 | system: true
10 | shell: /usr/sbin/nologin
11 |
12 | - name: fetch node_exporter
13 | ansible.builtin.get_url:
14 | url: "{{ node_exporter_url }}"
15 | dest: "/tmp/node_exporter-{{ node_exporter_version }}.tar.gz"
16 | mode: 0644
17 |
18 | - name: unpack node_exporter
19 | ansible.builtin.unarchive:
20 | src: "/tmp/node_exporter-{{ node_exporter_version }}.tar.gz"
21 | dest: "{{ node_exporter_install_dir }}"
22 | remote_src: yes
23 | extra_opts: [ "--strip-components=1" ]
24 |
25 | - name: install node_exporter binary
26 | ansible.builtin.file:
27 | src: "{{ node_exporter_install_dir }}/node_exporter"
28 | dest: /usr/local/bin/node_exporter
29 | state: link
30 |
31 | - name: create systemd unit for node_exporter
32 | ansible.builtin.copy:
33 | dest: /etc/systemd/system/node_exporter.service
34 | content: |
35 | [Unit]
36 | Description=Prometheus Node Exporter
37 | After=network.target
38 |
39 | [Service]
40 | User={{ node_exporter_user }}
41 | Group={{ node_exporter_group }}
42 | ExecStart=/usr/local/bin/node_exporter --web.listen-address=:{{ node_exporter_port }}
43 | Restart=on-failure
44 |
45 | [Install]
46 | WantedBy=multi-user.target
47 | notify: reload systemd
48 |
49 | - name: ensure node_exporter is running
50 | ansible.builtin.systemd:
51 | name: node_exporter
52 | enabled: true
53 | state: started
54 |
55 | # handler to reload systemd
56 |
--------------------------------------------------------------------------------
/roles/docker/tasks/setup-RedHat.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Ensure old versions of Docker are not installed.
3 | package:
4 | name: "{{ docker_obsolete_packages }}"
5 | state: absent
6 |
7 | - name: Add Docker GPG key.
8 | rpm_key:
9 | key: "{{ docker_yum_gpg_key }}"
10 | state: present
11 | when: docker_add_repo | bool
12 |
13 | - name: Add Docker repository.
14 | get_url:
15 | url: "{{ docker_yum_repo_url }}"
16 | dest: '/etc/yum.repos.d/docker-{{ docker_edition }}.repo'
17 | owner: root
18 | group: root
19 | mode: 0644
20 | when: docker_add_repo | bool
21 |
22 | - name: Configure Docker Nightly repo.
23 | ini_file:
24 | dest: '/etc/yum.repos.d/docker-{{ docker_edition }}.repo'
25 | section: 'docker-{{ docker_edition }}-nightly'
26 | option: enabled
27 | value: '{{ docker_yum_repo_enable_nightly }}'
28 | mode: 0644
29 | no_extra_spaces: true
30 | when: docker_add_repo | bool
31 |
32 | - name: Configure Docker Test repo.
33 | ini_file:
34 | dest: '/etc/yum.repos.d/docker-{{ docker_edition }}.repo'
35 | section: 'docker-{{ docker_edition }}-test'
36 | option: enabled
37 | value: '{{ docker_yum_repo_enable_test }}'
38 | mode: 0644
39 | no_extra_spaces: true
40 | when: docker_add_repo | bool
41 |
42 | - name: Configure containerd on RHEL 8.
43 | block:
44 | - name: Ensure runc is not installed.
45 | package:
46 | name: runc
47 | state: absent
48 |
49 | - name: Ensure container-selinux is installed.
50 | package:
51 | name: container-selinux
52 | state: present
53 |
54 | - name: Ensure containerd.io is installed.
55 | package:
56 | name: containerd.io
57 | state: present
58 | when: ansible_distribution_major_version | int == 8
59 |
--------------------------------------------------------------------------------
/roles/docker/molecule/default/verify.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Verify Docker Role
3 | hosts: all
4 | tasks:
5 | - name: Verify Docker binary is available
6 | command: docker version
7 | register: docker_version_result
8 | changed_when: false
9 | failed_when: docker_version_result.rc != 0
10 |
11 | - name: Show Docker version details
12 | debug:
13 | msg: >
14 | Docker Version Output:
15 | {{ docker_version_result.stdout_lines | join('\n') }}
16 |
17 | - name: Verify Docker service is running
18 | command: systemctl is-active docker
19 | register: docker_service_status
20 | when: ansible_service_mgr == 'systemd'
21 | changed_when: false
22 | failed_when: docker_service_status.stdout.strip() != "active"
23 |
24 | - name: Display Docker service status
25 | debug:
26 | msg: "Docker service is {{ docker_service_status.stdout.strip() }}"
27 | when: ansible_service_mgr == 'systemd'
28 |
29 | - name: Pull the 'hello-world' image
30 | command: docker pull hello-world
31 | register: docker_pull_result
32 | changed_when: true
33 | failed_when: docker_pull_result.rc != 0
34 |
35 | - name: Show result of pulling the 'hello-world' image
36 | debug:
37 | msg: >
38 | Pulling 'hello-world' completed with output:
39 | {{ docker_pull_result.stdout_lines | join('\n') }}
40 |
41 | - name: Run a test container (hello-world)
42 | command: docker run --rm hello-world
43 | register: docker_run_result
44 | changed_when: true
45 | failed_when: docker_run_result.rc != 0
46 |
47 | - name: Display test container output
48 | debug:
49 | msg: >
50 | Running 'hello-world' container completed with output:
51 | {{ docker_run_result.stdout_lines | join('\n') }}
52 |
--------------------------------------------------------------------------------
/roles/loki/tasks/main.yml:
--------------------------------------------------------------------------------
1 |
2 | - name: "Loki role disabled or missing config, skipping"
3 | ansible.builtin.meta: end_host
4 | when: not loki_enabled | default(false) or loki_config is not defined
5 |
6 | - name: "Install dependencies"
7 | ansible.builtin.apt:
8 | name:
9 | - curl
10 | - tar
11 | update_cache: true
12 | state: present
13 |
14 | - name: "Create Loki directories"
15 | ansible.builtin.file:
16 | path: "{{ item }}"
17 | state: directory
18 | owner: root
19 | group: root
20 | mode: '0755'
21 | loop:
22 | - /opt/loki
23 | - /etc/loki
24 |
25 | - name: "Download Loki binary"
26 | ansible.builtin.get_url:
27 | url: "{{ loki_url }}"
28 | dest: /tmp/loki.zip
29 | mode: '0644'
30 |
31 | - name: "Extract Loki binary"
32 | ansible.builtin.unarchive:
33 | src: /tmp/loki.zip
34 | dest: /opt/loki
35 | remote_src: true
36 |
37 | - name: "Rename Loki binary"
38 | ansible.builtin.command: mv /opt/loki/loki-linux-amd64 /opt/loki/loki
39 | args:
40 | creates: /opt/loki/loki
41 |
42 | - name: "Deploy loki config"
43 | ansible.builtin.copy:
44 | content: "{{ loki_config }}"
45 | dest: /etc/loki/config.yml
46 | owner: root
47 | group: root
48 | mode: '0644'
49 | notify: Restart loki
50 |
51 | - name: "Install systemd service"
52 | ansible.builtin.copy:
53 | src: loki.service
54 | dest: /etc/systemd/system/loki.service
55 | mode: '0644'
56 | notify: Reload systemd
57 |
58 | - name: "Ensure loki is enabled and started"
59 | ansible.builtin.systemd:
60 | name: loki
61 | enabled: true
62 | state: started
63 | register: loki_systemd
64 |
65 | - name: "Verify loki is running"
66 | ansible.builtin.command: systemctl is-active loki
67 | register: loki_status
68 | failed_when: loki_status.stdout != "active"
69 | changed_when: false
70 |
71 |
72 |
--------------------------------------------------------------------------------
/roles/firewall/tasks/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: skip firewall if disabled
3 | meta: end_host
4 | when: not ufw_enabled
5 |
6 | - name: ensure ufw is installed
7 | apt:
8 | name: ufw
9 | state: present
10 | update_cache: true
11 |
12 | - name: reset ufw to defaults
13 | ufw:
14 | state: reset
15 | when: ufw_reset
16 |
17 | - name: set default incoming policy
18 | ufw:
19 | direction: incoming
20 | policy: "{{ ufw_default_incoming_policy }}"
21 |
22 | - name: set default outgoing policy
23 | ufw:
24 | direction: outgoing
25 | policy: "{{ ufw_default_outgoing_policy }}"
26 |
27 | - name: allow TCP ports
28 | ufw:
29 | rule: allow
30 | port: "{{ item }}"
31 | proto: tcp
32 | loop: "{{ ufw_allowed_tcp_ports }}"
33 | when: ufw_allowed_tcp_ports is defined
34 |
35 | - name: allow UDP ports
36 | ufw:
37 | rule: allow
38 | port: "{{ item }}"
39 | proto: udp
40 | loop: "{{ ufw_allowed_udp_ports }}"
41 | when: ufw_allowed_udp_ports is defined
42 |
43 | - name: deny TCP ports
44 | ufw:
45 | rule: deny
46 | port: "{{ item }}"
47 | proto: tcp
48 | loop: "{{ ufw_denied_tcp_ports }}"
49 | when: ufw_denied_tcp_ports is defined
50 |
51 | - name: deny UDP ports
52 | ufw:
53 | rule: deny
54 | port: "{{ item }}"
55 | proto: udp
56 | loop: "{{ ufw_denied_udp_ports }}"
57 | when: ufw_denied_udp_ports is defined
58 |
59 | - name: lockdown - deny default SSH port 22
60 | ufw:
61 | rule: deny
62 | port: 22
63 | proto: tcp
64 | when:
65 | - ufw_lockdown
66 | - ssh_port is defined
67 | - ssh_port | int != 22
68 |
69 | - name: enable ufw
70 | ufw:
71 | state: enabled
72 | logging: "{{ ufw_logging }}"
73 |
74 | - name: verify ufw is active
75 | command: ufw status verbose
76 | register: ufw_status
77 | changed_when: false
78 | failed_when: "'Status: active' not in ufw_status.stdout"
79 |
--------------------------------------------------------------------------------
/roles/installs/tasks/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: "Ensure APT cache is up-to-date"
3 | ansible.builtin.apt:
4 | update_cache: yes
5 | cache_valid_time: 3600
6 |
7 | - name: "Upgrade all packages to latest"
8 | ansible.builtin.apt:
9 | upgrade: dist
10 | autoremove: yes
11 | autoclean: yes
12 |
13 | - block:
14 | - name: "Install core packages"
15 | ansible.builtin.apt:
16 | name: "{{ core }}"
17 | state: present
18 | update_cache: no
19 | when: core is defined and core | length > 0
20 | rescue:
21 | - name: "⚠️ Core packages failed, continuing"
22 | ansible.builtin.debug:
23 | msg: "Installation of core packages ({{ core }}) failed."
24 |
25 | - block:
26 | - name: "Install networking tools"
27 | ansible.builtin.apt:
28 | name: "{{ networking }}"
29 | state: present
30 | when: networking is defined and networking | length > 0
31 | rescue:
32 | - name: "⚠️ Networking tools failed, continuing"
33 | ansible.builtin.debug:
34 | msg: "Installation of networking packages ({{ networking }}) failed."
35 |
36 | - block:
37 | - name: "Install monitoring utilities"
38 | ansible.builtin.apt:
39 | name: "{{ monitoring }}"
40 | state: present
41 | when: monitoring is defined and monitoring | length > 0
42 | rescue:
43 | - name: "⚠️ Monitoring utilities failed, continuing"
44 | ansible.builtin.debug:
45 | msg: "Installation of monitoring packages ({{ monitoring }}) failed."
46 |
47 | - block:
48 | - name: "Install miscellaneous packages"
49 | ansible.builtin.apt:
50 | name: "{{ misc }}"
51 | state: present
52 | when: misc is defined and misc | length > 0
53 | rescue:
54 | - name: "⚠️ Misc tools failed, continuing"
55 | ansible.builtin.debug:
56 | msg: "Installation of misc packages ({{ misc }}) failed."
57 |
--------------------------------------------------------------------------------
/roles/blackbox_exporter/tasks/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: "skip blackbox_exporter if disabled"
3 | meta: end_host
4 | when: not (blackbox_exporter_enabled | bool)
5 |
6 | - name: fetch blackbox_exporter
7 | ansible.builtin.get_url:
8 | url: "{{ blackbox_exporter_url }}"
9 | dest: "/tmp/blackbox_exporter-{{ blackbox_exporter_version }}.tar.gz"
10 | mode: 0644
11 |
12 | - name: unpack blackbox_exporter
13 | ansible.builtin.unarchive:
14 | src: "/tmp/blackbox_exporter-{{ blackbox_exporter_version }}.tar.gz"
15 | dest: "{{ blackbox_exporter_install_dir }}"
16 | remote_src: yes
17 | extra_opts: [ "--strip-components=1" ]
18 |
19 | - name: install blackbox_exporter binary
20 | ansible.builtin.file:
21 | src: "{{ blackbox_exporter_install_dir }}/blackbox_exporter"
22 | dest: /usr/local/bin/blackbox_exporter
23 | state: link
24 |
25 | - name: write blackbox modules config
26 | ansible.builtin.copy:
27 | dest: /etc/blackbox_exporter.yml
28 | content: |
29 | modules:
30 | {%- for name,module in blackbox_modules.items() %}
31 | {{ name }}:
32 | prober: {{ module.prober }}
33 | timeout: "{{ module.timeout }}"
34 | {%- endfor %}
35 |
36 | - name: create systemd unit for blackbox_exporter
37 | ansible.builtin.copy:
38 | dest: /etc/systemd/system/blackbox_exporter.service
39 | content: |
40 | [Unit]
41 | Description=Prometheus Blackbox Exporter
42 | After=network.target
43 |
44 | [Service]
45 | ExecStart=/usr/local/bin/blackbox_exporter --config.file=/etc/blackbox_exporter.yml --web.listen-address=:{{ blackbox_exporter_port }}
46 | Restart=on-failure
47 |
48 | [Install]
49 | WantedBy=multi-user.target
50 | notify: reload systemd
51 |
52 | - name: ensure blackbox_exporter is running
53 | ansible.builtin.systemd:
54 | name: blackbox_exporter
55 | enabled: true
56 | state: started
57 |
--------------------------------------------------------------------------------
/roles/geerlingguy.pip/.github/workflows/stale.yml:
--------------------------------------------------------------------------------
1 | ---
2 | name: Close inactive issues
3 | 'on':
4 | schedule:
5 | - cron: "55 21 * * 5" # semi-random time
6 |
7 | jobs:
8 | close-issues:
9 | runs-on: ubuntu-latest
10 | permissions:
11 | issues: write
12 | pull-requests: write
13 | steps:
14 | - uses: actions/stale@v8
15 | with:
16 | days-before-stale: 120
17 | days-before-close: 60
18 | exempt-issue-labels: bug,pinned,security,planned
19 | exempt-pr-labels: bug,pinned,security,planned
20 | stale-issue-label: "stale"
21 | stale-pr-label: "stale"
22 | stale-issue-message: |
23 | This issue has been marked 'stale' due to lack of recent activity. If there is no further activity, the issue will be closed in another 30 days. Thank you for your contribution!
24 |
25 | Please read [this blog post](https://www.jeffgeerling.com/blog/2020/enabling-stale-issue-bot-on-my-github-repositories) to see the reasons why I mark issues as stale.
26 | close-issue-message: |
27 | This issue has been closed due to inactivity. If you feel this is in error, please reopen the issue or file a new issue with the relevant details.
28 | stale-pr-message: |
29 | This pr has been marked 'stale' due to lack of recent activity. If there is no further activity, the issue will be closed in another 30 days. Thank you for your contribution!
30 |
31 | Please read [this blog post](https://www.jeffgeerling.com/blog/2020/enabling-stale-issue-bot-on-my-github-repositories) to see the reasons why I mark issues as stale.
32 | close-pr-message: |
33 | This pr has been closed due to inactivity. If you feel this is in error, please reopen the issue or file a new issue with the relevant details.
34 | repo-token: ${{ secrets.GITHUB_TOKEN }}
35 |
--------------------------------------------------------------------------------
/roles/apparmor/tasks/main.yml:
--------------------------------------------------------------------------------
1 | - block:
2 |
3 | # 1) Install & start the service
4 | - name: "Install AppArmor"
5 | apt:
6 | name: "{{ apparmor_package }}"
7 | state: present
8 | update_cache: yes
9 |
10 | - name: "Ensure AppArmor is running"
11 | service:
12 | name: "{{ apparmor_service }}"
13 | state: started
14 | enabled: yes
15 |
16 | # 2) If no profiles, just skip to the end of the block
17 | - name: "Check if no AppArmor profiles defined"
18 | debug:
19 | msg: "ℹ️ No profiles to deploy."
20 | when: apparmor_profiles | default([]) | length == 0
21 |
22 | # 3) Prep the directory
23 | - name: "Ensure /etc/apparmor.d exists"
24 | file:
25 | path: /etc/apparmor.d
26 | state: directory
27 | mode: "0755"
28 |
29 | # 4) Deploy each profile
30 | - name: "Deploy AppArmor profile {{ item.name }}"
31 | copy:
32 | src: "{{ item.src }}"
33 | dest: "/etc/apparmor.d/{{ item.name }}"
34 | mode: "0644"
35 | loop: "{{ apparmor_profiles }}"
36 | when:
37 | - (item.state | default('present')) == 'present'
38 | - item.src is defined
39 | notify: reload apparmor
40 |
41 | # 5) Remove absent ones
42 | - name: "Remove AppArmor profile {{ item.name }}"
43 | file:
44 | path: "/etc/apparmor.d/{{ item.name }}"
45 | state: absent
46 | loop: "{{ apparmor_profiles }}"
47 | when: (item.state | default('present')) == 'absent'
48 | notify: reload apparmor
49 |
50 | # 6) Warn about misconfigured entries
51 | - name: "Skipping malformed profile entry"
52 | debug:
53 | msg: "Profile '{{ item.name }}' has no src, skipping."
54 | loop: "{{ apparmor_profiles }}"
55 | when:
56 | - (item.state | default('present')) == 'present'
57 | - item.src is not defined
58 |
59 | when: apparmor_enabled | default(true) | bool
60 |
--------------------------------------------------------------------------------
/roles/tailscale/tasks/main.yml:
--------------------------------------------------------------------------------
1 | - name: "Check if auth key provided"
2 | debug:
3 | msg: >
4 | "Skipping Tailscale role: 'tailscale_auth_key' is not defined.
5 | Set it to a valid Tailscale auth key to enable."
6 | when: (tailscale_auth_key | default('')) == ''
7 |
8 | - name: "Add Tailscale GPG key"
9 | ansible.builtin.apt_key:
10 | state: present
11 | url: >-
12 | https://pkgs.tailscale.com/stable/{{ 'ubuntu' if ansible_distribution|lower
13 | == 'ubuntu' else 'debian' }}/{{ ansible_distribution_release }}.noarmor.gpg
14 | when: ansible_facts.os_family == 'Debian'
15 |
16 | - name: "Add Tailscale APT repository"
17 | ansible.builtin.apt_repository:
18 | repo: >-
19 | deb https://pkgs.tailscale.com/stable/{{ 'ubuntu' if ansible_distribution|lower
20 | == 'ubuntu' else 'debian' }} {{ ansible_distribution_release }} main
21 | state: present
22 | filename: tailscale.list
23 | when: ansible_facts.os_family == 'Debian'
24 |
25 | - name: "Update apt cache"
26 | ansible.builtin.apt:
27 | update_cache: yes
28 | when: ansible_facts.os_family == 'Debian'
29 |
30 | - name: "Install Tailscale"
31 | ansible.builtin.apt:
32 | name: tailscale
33 | state: present
34 |
35 | - name: "Enable and start the tailscaled service"
36 | ansible.builtin.systemd:
37 | name: tailscaled
38 | enabled: yes
39 | state: started
40 | daemon_reload: yes
41 |
42 | - name: "Join Tailnet with tailscale up"
43 | ansible.builtin.command: >
44 | tailscale up
45 | --authkey={{ tailscale_auth_key }}
46 | --hostname={{ tailscale_hostname }}
47 | {%- if tailscale_advertise_routes | length > 0 -%}
48 | --advertise-routes={{ tailscale_advertise_routes | join(',') }}
49 | {%- endif -%}
50 | {%- if tailscale_advertise_exit_node | bool -%}
51 | --advertise-exit-node
52 | {%- endif -%}
53 | {%- if tailscale_tags | length > 0 -%}
54 | --tags={{ tailscale_tags | join(',') }}
55 | {%- endif -%}
56 | register: tailscale_up
57 | changed_when: tailscale_up.rc == 0 and "'already up to date'" not in tailscale_up.stdout
58 |
--------------------------------------------------------------------------------
/roles/prometheus/defaults/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | # whether to run this role
3 | prometheus_enabled: true
4 |
5 | # which version of Prometheus to install
6 | prometheus_version: "2.46.0"
7 |
8 | # download URL for the official Prometheus release tarball
9 | prometheus_download_url: https://github.com/prometheus/prometheus/releases/download/v{{ prometheus_version }}/prometheus-{{ prometheus_version }}.linux-amd64.tar.gz
10 |
11 | # where to install Prometheus
12 | prometheus_install_dir: "/opt/prometheus-{{ prometheus_version }}"
13 |
14 | # where to put config & rules
15 | prometheus_config_dir: "/etc/prometheus"
16 |
17 | # where Prometheus will store its TSDB
18 | prometheus_data_dir: "/var/lib/prometheus"
19 |
20 | # system user/group for the Prometheus process
21 | prometheus_user: prometheus
22 | prometheus_group: prometheus
23 |
24 | # minimal scrape interval
25 | prometheus_global_scrape_interval: "15s"
26 |
27 | # scrape jobs
28 | # prometheus_scrape_configs:
29 | # - job_name: prometheus
30 | # static_configs:
31 | # - targets: ["localhost:9091"]
32 |
33 | prometheus_listen_address: "0.0.0.0"
34 | prometheus_port: 9091
35 |
36 | prometheus_scrape_configs:
37 | - job_name: "prometheus"
38 | static_configs:
39 | - targets: ["localhost:{{ prometheus_port }}"]
40 |
41 | - job_name: "node_exporter"
42 | static_configs:
43 | - targets: ["localhost:{{ node_exporter_port }}"]
44 |
45 | - job_name: "cadvisor"
46 | static_configs:
47 | - targets: ["localhost:{{ cadvisor_port }}"]
48 |
49 | - job_name: "blackbox_http"
50 | metrics_path: /probe
51 | params:
52 | module: ["http_2xx"]
53 | static_configs:
54 | - targets:
55 | - "http://localhost:{{ ssh_port }}"
56 | - "http://localhost:{{ prometheus_port }}"
57 | relabel_configs:
58 | - source_labels: ["__address__"]
59 | target_label: __param_target
60 | - source_labels: ["__param_target"]
61 | target_label: instance
62 | - target_label: "__address__"
63 | replacement: "localhost:9115"
64 |
65 | - job_name: "alertmanager"
66 | static_configs:
67 | - targets: ["localhost:{{ alertmanager_port }}"]
68 |
--------------------------------------------------------------------------------
/playbooks/pre-checks.yml:
--------------------------------------------------------------------------------
1 | - name: "Initializing: Running pre-checks"
2 | hosts: all
3 | gather_facts: true
4 | become: true
5 | remote_user: "{{ new_user }}"
6 | pre_tasks:
7 | - name: "System summary"
8 | ansible.builtin.debug:
9 | msg: |
10 | ℹ️ Check system info
11 | OS: {{ ansible_facts.lsb.description
12 | | default(ansible_facts.distribution ~ " " ~ ansible_facts.distribution_version) }}
13 | Host: {{ ansible_facts.fqdn | default(ansible_facts.hostname) }}
14 | Arch: {{ (ansible_facts.architecture ~ ' ' ~ ansible_facts.system) | default('n/a', true) }}
15 | Uptime: {{ ((ansible_facts.uptime_seconds|default(0)|int) / 3600) | round(1) }}h
16 | IP: {{ ansible_facts.default_ipv4.address | default("n/a") }}
17 | Time: {{ ansible_date_time.iso8601 | default(ansible_date_time.date ~ " " ~ ansible_date_time.time, true) }}
18 | CPU: {{ ansible_facts.processor_count | default("n/a") }} cores
19 | Mem: {{ ansible_facts.memfree_mb | default("n/a") }}MB used out of {{ ansible_facts.memtotal_mb | default("n/a") }}MB
20 | Virt: {{ ansible_facts.virtualization_type | default("none") }}
21 | Gate: {{ ansible_facts.default_ipv4.gateway | default("n/a") }}
22 | Interfaces: {{ ansible_facts.interfaces | default([]) | join(', ') }}
23 | when: ansible_facts is defined
24 |
25 | - name: Ensure Python is available
26 | raw: test -e /usr/bin/python3 || (apt update && apt install -y python3)
27 | changed_when: false
28 |
29 | - name: Warn if user_password is not set
30 | ansible.builtin.debug:
31 | msg: |
32 | ⚠️ WARNING: 'user_password' is not defined!
33 | User {{ new_user }} will not be able to use sudo without a password.
34 | SSH root login will not be disabled until this is fixed.
35 | when: user_password is not defined
36 |
37 | - name: "Ensure essential variables are defined"
38 | assert:
39 | that:
40 | - user_password is defined
41 | - new_user is defined
42 | - user_ssh_key_path is defined
43 | fail_msg: |
44 | You must define user_password, new_user and user_ssh_key_path variables
45 | Put them in: inventories/production/host_vars/{{ansible_hostname}}.yml
46 | or (if shared) in: inventories/production/group_vars/all.yml
47 |
--------------------------------------------------------------------------------
/roles/docker/tasks/setup-Debian.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Ensure apt key is not present in trusted.gpg.d
3 | ansible.builtin.file:
4 | path: /etc/apt/trusted.gpg.d/docker.asc
5 | state: absent
6 |
7 | - name: Ensure old apt source list is not present in /etc/apt/sources.list.d
8 | ansible.builtin.file:
9 | path: "/etc/apt/sources.list.d/download_docker_com_linux_{{ docker_apt_ansible_distribution | lower }}.list"
10 | state: absent
11 |
12 | - name: Ensure the repo referencing the previous trusted.gpg.d key is not present
13 | apt_repository:
14 | repo: "deb [arch={{ docker_apt_arch }} signed-by=/etc/apt/trusted.gpg.d/docker.asc] {{ docker_repo_url }}/{{ docker_apt_ansible_distribution | lower }} {{ ansible_distribution_release }} {{ docker_apt_release_channel }}"
15 | state: absent
16 | filename: "{{ docker_apt_filename }}"
17 | update_cache: true
18 | when: docker_add_repo | bool
19 |
20 | - # See https://docs.docker.com/engine/install/debian/#uninstall-old-versions
21 | name: Ensure old versions of Docker are not installed.
22 | package:
23 | name: "{{ docker_obsolete_packages }}"
24 | state: absent
25 |
26 | - name: Ensure dependencies are installed.
27 | apt:
28 | name:
29 | - apt-transport-https
30 | - ca-certificates
31 | state: present
32 | when: docker_add_repo | bool
33 |
34 | - name: Ensure directory exists for /etc/apt/keyrings
35 | file:
36 | path: /etc/apt/keyrings
37 | state: directory
38 | mode: "0755"
39 |
40 | - name: Add Docker apt key.
41 | ansible.builtin.get_url:
42 | url: "{{ docker_apt_gpg_key }}"
43 | dest: /etc/apt/keyrings/docker.asc
44 | mode: "0644"
45 | force: false
46 | checksum: "{{ docker_apt_gpg_key_checksum | default(omit) }}"
47 | register: add_repository_key
48 | ignore_errors: "{{ docker_apt_ignore_key_error }}"
49 | when: docker_add_repo | bool
50 |
51 | - name: Ensure curl is present (on older systems without SNI).
52 | package: name=curl state=present
53 | when: add_repository_key is failed and docker_add_repo | bool
54 |
55 | - name: Add Docker apt key (alternative for older systems without SNI).
56 | shell: >
57 | curl -sSL {{ docker_apt_gpg_key }} | apt-key add -
58 | when: add_repository_key is failed and docker_add_repo | bool
59 |
60 | - name: Add Docker repository.
61 | apt_repository:
62 | repo: "{{ docker_apt_repository }}"
63 | state: present
64 | filename: "{{ docker_apt_filename }}"
65 | update_cache: true
66 | when: docker_add_repo | bool
67 |
--------------------------------------------------------------------------------
/roles/geerlingguy.pip/README.md:
--------------------------------------------------------------------------------
1 | # Ansible Role: Pip (for Python)
2 |
3 | [](https://github.com/geerlingguy/ansible-role-pip/actions?query=workflow%3ACI)
4 |
5 | An Ansible Role that installs [Pip](https://pip.pypa.io) on Linux.
6 |
7 | ## Requirements
8 |
9 | On RedHat/CentOS, you may need to have EPEL installed before running this role. You can use the `geerlingguy.repo-epel` role if you need a simple way to ensure it's installed.
10 |
11 | ## Role Variables
12 |
13 | Available variables are listed below, along with default values (see `defaults/main.yml`):
14 |
15 | pip_package: python3-pip
16 |
17 | The name of the package to install to get `pip` on the system. For older systems that don't have Python 3 available, you can set this to `python-pip`.
18 |
19 | pip_executable: pip3
20 |
21 | The role will try to autodetect the pip executable based on the `pip_package` (e.g. `pip` for Python 2 and `pip3` for Python 3). You can also override this explicitly, e.g. `pip_executable: pip3.6`.
22 |
23 | pip_install_packages: []
24 |
25 | A list of packages to install with pip. Examples below:
26 |
27 | pip_install_packages:
28 | # Specify names and versions.
29 | - name: docker
30 | version: "1.2.3"
31 | - name: awscli
32 | version: "1.11.91"
33 |
34 | # Or specify bare packages to get the latest release.
35 | - docker
36 | - awscli
37 |
38 | # Or uninstall a package.
39 | - name: docker
40 | state: absent
41 |
42 | # Or update a package to the latest version.
43 | - name: docker
44 | state: latest
45 |
46 | # Or force a reinstall.
47 | - name: docker
48 | state: forcereinstall
49 |
50 | # Or install a package in a particular virtualenv.
51 | - name: docker
52 | virtualenv: /my_app/venv
53 |
54 | # Or pass through any extra arguments.
55 | - name: my_special_package_from_my_special_repo
56 | extra_args: --extra-index-url https://my-domain/pypi/pypi-master/simple
57 |
58 | ## Dependencies
59 |
60 | None.
61 |
62 | ## Example Playbook
63 |
64 | - hosts: all
65 |
66 | vars:
67 | pip_install_packages:
68 | - name: docker
69 | - name: awscli
70 |
71 | roles:
72 | - geerlingguy.pip
73 |
74 | ## License
75 |
76 | MIT / BSD
77 |
78 | ## Author Information
79 |
80 | This role was created in 2017 by [Jeff Geerling](https://www.jeffgeerling.com/), author of [Ansible for DevOps](https://www.ansiblefordevops.com/).
81 |
--------------------------------------------------------------------------------
/roles/grafana/tasks/main.yml:
--------------------------------------------------------------------------------
1 | - name: Skip Grafana role if disabled
2 | ansible.builtin.meta: end_host
3 | when: not (grafana_enabled | bool)
4 |
5 | - name: Warn and skip if required vars are missing
6 | ansible.builtin.debug:
7 | msg: >
8 | Skipping Grafana role: you must define
9 | grafana_repo_key_url and grafana_repo
10 | when:
11 | - grafana_enabled | bool
12 | - grafana_repo_key_url is not defined
13 | - grafana_repo is not defined
14 |
15 | - name: End Grafana role because essentials are missing
16 | ansible.builtin.meta: end_host
17 | when:
18 | - grafana_enabled | bool
19 | - grafana_repo_key_url is not defined
20 | - grafana_repo is not defined
21 |
22 | - name: Ensure apt-transport-https, wget and gnupg are installed
23 | ansible.builtin.apt:
24 | name:
25 | - apt-transport-https
26 | - wget
27 | - gnupg
28 | state: present
29 | update_cache: yes
30 |
31 | - name: Add Grafana GPG key
32 | ansible.builtin.apt_key:
33 | url: "{{ grafana_repo_key_url }}"
34 | keyring: /etc/apt/keyrings/grafana-archive-keyring.gpg
35 | state: present
36 |
37 | - name: Add Grafana APT repository
38 | ansible.builtin.apt_repository:
39 | repo: "deb [signed-by=/etc/apt/keyrings/grafana-archive-keyring.gpg] https://apt.grafana.com stable main"
40 | filename: grafana
41 | state: present
42 | update_cache: yes
43 |
44 | - name: Install Grafana package
45 | ansible.builtin.apt:
46 | name: "{{ grafana_package }}"
47 | state: latest
48 | update_cache: no
49 |
50 | - name: Deploy custom grafana.ini if requested
51 | ansible.builtin.template:
52 | src: grafana.ini.j2
53 | dest: /etc/grafana/grafana.ini
54 | owner: root
55 | group: root
56 | mode: '0644'
57 | notify: Restart Grafana
58 | when: grafana_ini | length > 0
59 |
60 | - name: "Provision Prometheus & Loki in Grafana"
61 | ansible.builtin.copy:
62 | src: provisioning/datasources/
63 | dest: /etc/grafana/provisioning/datasources/
64 | owner: root
65 | group: root
66 | mode: 0644
67 | notify: Restart Grafana
68 |
69 | - name: Enable and start Grafana service
70 | ansible.builtin.systemd:
71 | name: "{{ grafana_service }}"
72 | state: started
73 | enabled: yes
74 |
75 | - name: Wait for Grafana HTTP to be available
76 | ansible.builtin.uri:
77 | url: "http://localhost:{{ grafana_port }}/api/health"
78 | status_code: 200
79 | return_content: no
80 | timeout: 10
81 | register: grafana_health
82 | retries: 5
83 | delay: 5
84 | until: grafana_health.status == 200
85 | failed_when: grafana_health.status != 200
86 | changed_when: false
87 |
--------------------------------------------------------------------------------
/playbooks/essentials.yml:
--------------------------------------------------------------------------------
1 | # Basic server setup, to always be run on everything
2 | # Sets up dep repos, users, ssh, time, host, firewall, mail, etc
3 |
4 | # - name: Essentials
5 | # hosts: all
6 | # become: true
7 | # tags: essentials
8 | # roles:
9 | # - role: installs # apt, packages, updates…
10 | # - role: users # user accounts
11 | # - role: directories # server structure
12 | # - role: ssh # SSH hardening
13 | # - role: timezone # timezone & NTP
14 | # - role: hostname # hostname, /etc/hosts
15 | # - role: firewall # UFW
16 | # # - role: mail # postfix
17 |
18 |
19 | - name: 'Essentials: General system configuration'
20 | hosts: all
21 | become: true
22 | tags: essentials
23 |
24 | tasks:
25 | - include_role:
26 | name: installs
27 | when: installs_enabled | default(true)
28 | tags: [ installs ]
29 | ignore_errors: true
30 |
31 | - include_role:
32 | name: users
33 | when: users_enabled | default(true)
34 | tags: [ users ]
35 | ignore_errors: true
36 |
37 | - include_role:
38 | name: directories
39 | when: directories_enabled | default(true)
40 | tags: [ directories ]
41 | ignore_errors: true
42 |
43 | - include_role:
44 | name: timezone
45 | when: timezone_enabled | default(true)
46 | tags: [ timezone ]
47 | ignore_errors: true
48 |
49 | - include_role:
50 | name: hostname
51 | when: hostname_enabled | default(true)
52 | tags: [ hostname ]
53 | ignore_errors: true
54 |
55 | - include_role:
56 | name: firewall
57 | when: firewall_enabled | default(true)
58 | tags: [ firewall ]
59 | ignore_errors: true
60 |
61 | # - include_role:
62 | # name: mail
63 | # when: mail_enabled | default(false)
64 | # tags: [ mail ]
65 |
66 | - name: 'Essentials: SSH config - pre-lockdown'
67 | hosts: all
68 | become: true
69 | roles:
70 | - role: ssh
71 | vars:
72 | ssh_lockdown: false
73 |
74 | - name: 'Essentials: Verify SSH access for user'
75 | hosts: all
76 | remote_user: "{{ new_user }}"
77 | gather_facts: false
78 | tasks:
79 | - name: Attempt to login as {{ new_user }}
80 | ping:
81 | register: ping_result
82 | failed_when: ping_result.ping != 'pong'
83 | - name: Mark this host as ready for lockdown
84 | set_fact:
85 | user_ready: true
86 |
87 | - name: 'Essentials: SSH config - Prevent root login'
88 | hosts: all
89 | become: true
90 | roles:
91 | - role: ssh
92 | vars:
93 | ssh_lockdown: true
94 | when: user_ready | default(false)
95 | tags: essentials
96 |
--------------------------------------------------------------------------------
/roles/maldet/tasks/main.yml:
--------------------------------------------------------------------------------
1 | - name: Check if Maldet should be installed
2 | ansible.builtin.debug:
3 | msg: "⚠️ Skipping Maldet setup because maldet_enabled is false"
4 | when: not maldet_enabled
5 | tags: [maldet]
6 |
7 | - name: Ensure dependencies for Maldet are installed
8 | ansible.builtin.package:
9 | name:
10 | - wget
11 | - tar
12 | - inotify-tools
13 | state: present
14 | when: maldet_enabled
15 | tags: [maldet]
16 |
17 | - name: Download Maldet
18 | ansible.builtin.get_url:
19 | url: https://www.rfxn.com/downloads/maldetect-current.tar.gz
20 | dest: "{{ maldet_download_path }}"
21 | mode: '0644'
22 | when: maldet_enabled
23 | tags: [maldet]
24 |
25 | - name: Extract Maldet
26 | ansible.builtin.unarchive:
27 | src: "{{ maldet_download_path }}"
28 | dest: /tmp/
29 | remote_src: yes
30 | when: maldet_enabled
31 | tags: [maldet]
32 |
33 | - name: Find extracted Maldet directory
34 | ansible.builtin.find:
35 | paths: /tmp/
36 | patterns: "maldetect-*"
37 | file_type: directory
38 | register: maldet_extracted
39 | when: maldet_enabled
40 | tags: [maldet]
41 |
42 | - name: Install Maldet
43 | ansible.builtin.command:
44 | cmd: ./install.sh
45 | chdir: "{{ maldet_extracted.files[0].path }}"
46 | creates: /usr/local/maldetect/maldet
47 | when: maldet_enabled and maldet_extracted.files | length > 0
48 | tags: [maldet]
49 |
50 |
51 | - name: Configure Maldet
52 | ansible.builtin.lineinfile:
53 | path: /usr/local/maldetect/conf.maldet
54 | regexp: "^{{ item.key }}="
55 | line: "{{ item.key }}={{ item.value }}"
56 | backup: yes
57 | loop:
58 | - { key: "email_alert", value: "{{ '1' if maldet_email_alert else '0' }}" }
59 | - { key: "email_addr", value: "{{ maldet_email_address }}" }
60 | - { key: "quar_hits", value: "{{ '1' if maldet_quarantine_hits else '0' }}" }
61 | when: maldet_enabled
62 | tags: [maldet]
63 |
64 | - name: Deploy maldet systemd unit
65 | template:
66 | src: maldet.service.j2
67 | dest: /etc/systemd/system/maldet.service
68 | owner: root
69 | group: root
70 | mode: '0644'
71 | notify:
72 | - Reload systemd
73 | - Restart maldet
74 | when: maldet_enabled
75 | tags: [maldet]
76 |
77 | - name: Create cron job for daily scans (if enabled)
78 | ansible.builtin.cron:
79 | name: "Maldet Daily Scan"
80 | job: "/usr/local/maldetect/maldet --scan-all {{ maldet_scan_paths | join(' ') }}"
81 | user: root
82 | minute: "0"
83 | hour: "2"
84 | when:
85 | - maldet_enabled
86 | - maldet_daily_cron | bool
87 | tags: [maldet]
88 |
89 | - name: Remove Maldet cron if disabled
90 | ansible.builtin.cron:
91 | name: "Maldet Daily Scan"
92 | state: absent
93 | when:
94 | - maldet_enabled
95 | - not maldet_daily_cron | bool
96 | tags: [maldet]
97 |
--------------------------------------------------------------------------------
/roles/prometheus/tasks/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Skip Prometheus role if disabled
3 | meta: end_host
4 | when: not prometheus_enabled | bool
5 |
6 | - name: Ensure prometheus group exists
7 | ansible.builtin.group:
8 | name: "{{ prometheus_group }}"
9 | state: present
10 | system: yes
11 |
12 | - name: Ensure prometheus user exists
13 | ansible.builtin.user:
14 | name: "{{ prometheus_user }}"
15 | group: "{{ prometheus_group }}"
16 | create_home: no
17 | shell: /usr/sbin/nologin
18 | system: yes
19 | state: present
20 |
21 | - name: Create installation, config & data directories
22 | ansible.builtin.file:
23 | path: "{{ item }}"
24 | state: directory
25 | owner: "{{ prometheus_user }}"
26 | group: "{{ prometheus_group }}"
27 | mode: "0755"
28 | loop:
29 | - "{{ prometheus_install_dir }}"
30 | - "{{ prometheus_config_dir }}"
31 | - "{{ prometheus_data_dir }}"
32 |
33 | - name: Download Prometheus {{ prometheus_version }}
34 | ansible.builtin.get_url:
35 | url: "{{ prometheus_download_url }}"
36 | dest: "/tmp/prometheus-{{ prometheus_version }}.tar.gz"
37 | mode: "0644"
38 | retries: 3
39 | delay: 5
40 | register: dl
41 | until: dl is succeeded
42 |
43 | - name: Extract Prometheus binaries
44 | ansible.builtin.unarchive:
45 | src: "/tmp/prometheus-{{ prometheus_version }}.tar.gz"
46 | dest: "{{ prometheus_install_dir }}" # ← now using the versioned dir
47 | remote_src: true
48 | extra_opts: ["--strip-components=1"]
49 | notify: Link Prometheus binaries
50 |
51 | - name: Deploy prometheus.yml
52 | ansible.builtin.template:
53 | src: "prometheus.yml.j2"
54 | dest: "{{ prometheus_config_dir }}/prometheus.yml"
55 | owner: "{{ prometheus_user }}"
56 | group: "{{ prometheus_group }}"
57 | mode: "0644"
58 | notify: Restart Prometheus
59 |
60 | - name: Deploy systemd unit for Prometheus
61 | ansible.builtin.template:
62 | src: "prometheus.service.j2"
63 | dest: "/etc/systemd/system/prometheus.service"
64 | owner: root
65 | group: root
66 | mode: "0644"
67 | notify:
68 | - Reload systemd
69 | - Restart Prometheus
70 |
71 | - name: Enable and start Prometheus service
72 | ansible.builtin.systemd:
73 | name: prometheus
74 | enabled: yes
75 | state: started
76 |
77 | - name: Wait for Prometheus ready endpoint
78 | ansible.builtin.uri:
79 | url: "http://localhost:{{ prometheus_port }}/-/ready"
80 | status_code: 200
81 | return_content: no
82 | timeout: 5
83 | register: prom_ready
84 | retries: 5
85 | delay: 3
86 | until: prom_ready.status == 200
87 | failed_when: prom_ready.status != 200
88 | changed_when: false
89 |
90 | - name: Verify Prometheus service is active
91 | ansible.builtin.command:
92 | cmd: systemctl is-active prometheus
93 | register: prom_active
94 | changed_when: false
95 | failed_when: prom_active.stdout != "active"
96 |
--------------------------------------------------------------------------------
/roles/timezone/tasks/main.yml:
--------------------------------------------------------------------------------
1 | - name: Ensure timezone is set
2 | ansible.builtin.timezone:
3 | name: "{{ timezone_name }}"
4 | when: timezone_name is defined
5 |
6 | - name: Install Chrony if selected
7 | ansible.builtin.package:
8 | name: chrony
9 | state: present
10 | when: ntp_enabled and ntp_mode == "chrony"
11 |
12 | - name: Install NTP if selected
13 | ansible.builtin.package:
14 | name: ntp
15 | state: present
16 | when: ntp_enabled and ntp_mode == "ntp"
17 |
18 |
19 | - name: Configure custom NTP servers if provided
20 | ansible.builtin.copy:
21 | dest: /etc/ntp.conf
22 | owner: root
23 | group: root
24 | mode: '0644'
25 | content: |
26 | driftfile /var/lib/ntp/ntp.drift
27 | restrict default kod nomodify notrap nopeer noquery
28 | restrict 127.0.0.1
29 | restrict ::1
30 | {% if ntp_servers %}
31 | {% for server in ntp_servers %}
32 | server {{ server }} iburst
33 | {% endfor %}
34 | {% else %}
35 | # No custom NTP servers specified, using system defaults.
36 | {% endif %}
37 | when:
38 | - ntp_enabled | bool
39 | - ntp_servers | length > 0
40 | notify: Restart NTP
41 |
42 | - name: Ensure Chrony service is running and enabled
43 | ansible.builtin.service:
44 | name: "{{ ntp_service_chrony }}"
45 | enabled: true
46 | state: started
47 | when: (ntp_enabled | default(false)) and (ntp_mode | default('timesyncd') == 'chrony')
48 |
49 | - name: Ensure NTP service is running and enabled
50 | ansible.builtin.service:
51 | name: "{{ ntp_service_ntp }}"
52 | enabled: true
53 | state: started
54 | when: (ntp_enabled | default(false)) and (ntp_mode | default('timesyncd') == 'ntp')
55 |
56 | - name: Ensure timesyncd service is running and enabled
57 | ansible.builtin.service:
58 | name: "{{ ntp_service_timesyncd }}"
59 | enabled: true
60 | state: started
61 | when: (ntp_enabled | default(false)) and (ntp_mode | default('timesyncd') == 'timesyncd')
62 |
63 | - name: Force immediate time sync (timesyncd)
64 | ansible.builtin.systemd:
65 | name: systemd-timesyncd
66 | state: restarted
67 | when:
68 | - sync_time_now | bool
69 | - ntp_enabled | bool
70 | - ntp_mode == 'timesyncd'
71 |
72 | - name: Force immediate time sync (ntp)
73 | ansible.builtin.command: ntpd -gq
74 | changed_when: true
75 | when:
76 | - sync_time_now | bool
77 | - ntp_enabled | bool
78 | - ntp_mode == 'ntp'
79 |
80 | - name: Force immediate time sync (chrony)
81 | ansible.builtin.command: chronyc makestep
82 | changed_when: true
83 | when:
84 | - sync_time_now | bool
85 | - ntp_enabled | bool
86 | - ntp_mode == 'chrony'
87 |
88 | - name: Immediate sync using ntpdate as fallback
89 | ansible.builtin.command: ntpdate pool.ntp.org
90 | when:
91 | - sync_time_now | bool
92 | - ntp_enabled | bool
93 | - ntp_mode == "ntp"
94 | - ntpq_output is defined
95 | - ntpq_output.stdout is search("No association ID")
96 |
97 |
--------------------------------------------------------------------------------
/roles/docker/defaults/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | # Edition can be one of: 'ce' (Community Edition) or 'ee' (Enterprise Edition).
3 | docker_edition: 'ce'
4 | docker_packages:
5 | - "docker-{{ docker_edition }}"
6 | - "docker-{{ docker_edition }}-cli"
7 | - "docker-{{ docker_edition }}-rootless-extras"
8 | - "containerd.io"
9 | - docker-buildx-plugin
10 | docker_packages_state: present
11 | docker_obsolete_packages:
12 | - docker
13 | - docker.io
14 | - docker-engine
15 | - docker-doc
16 | - docker-compose
17 | - docker-compose-v2
18 | - podman-docker
19 | - containerd
20 | - runc
21 |
22 | # Service options.
23 | docker_service_manage: true
24 | docker_service_state: started
25 | docker_service_enabled: true
26 | docker_restart_handler_state: restarted
27 |
28 | # Docker Compose Plugin options.
29 | docker_install_compose_plugin: true
30 | docker_compose_package: docker-compose-plugin
31 | docker_compose_package_state: present
32 |
33 | # Docker Compose options.
34 | docker_install_compose: false
35 | docker_compose_version: "v2.32.1"
36 | docker_compose_arch: "{{ ansible_architecture }}"
37 | docker_compose_url: "https://github.com/docker/compose/releases/download/{{ docker_compose_version }}/docker-compose-linux-{{ docker_compose_arch }}"
38 | docker_compose_path: /usr/local/bin/docker-compose
39 |
40 | # Enable repo setup
41 | docker_add_repo: true
42 |
43 | # Docker repo URL.
44 | docker_repo_url: https://download.docker.com/linux
45 |
46 | # Used only for Debian/Ubuntu/Pop!_OS/Linux Mint. Switch 'stable' to 'nightly' if needed.
47 | docker_apt_release_channel: stable
48 | # docker_apt_ansible_distribution is a workaround for Ubuntu variants which can't be identified as such by Ansible,
49 | # and is only necessary until Docker officially supports them.
50 | docker_apt_ansible_distribution: "{{ 'ubuntu' if ansible_distribution in ['Pop!_OS', 'Linux Mint'] else ansible_distribution }}"
51 | docker_apt_arch: "{{ 'arm64' if ansible_architecture == 'aarch64' else 'armhf' if ansible_architecture == 'armv7l' else 'amd64' }}"
52 | docker_apt_repository: "deb [arch={{ docker_apt_arch }} signed-by=/etc/apt/keyrings/docker.asc] {{ docker_repo_url }}/{{ docker_apt_ansible_distribution | lower }} {{ ansible_distribution_release }} {{ docker_apt_release_channel }}"
53 | docker_apt_ignore_key_error: true
54 | docker_apt_gpg_key: "{{ docker_repo_url }}/{{ docker_apt_ansible_distribution | lower }}/gpg"
55 | docker_apt_gpg_key_checksum: "sha256:1500c1f56fa9e26b9b8f42452a553675796ade0807cdce11975eb98170b3a570"
56 | docker_apt_filename: "docker"
57 |
58 | # Used only for RedHat/CentOS/Fedora.
59 | docker_yum_repo_url: "{{ docker_repo_url }}/{{ (ansible_distribution == 'Fedora') | ternary('fedora','centos') }}/docker-{{ docker_edition }}.repo"
60 | docker_yum_repo_enable_nightly: '0'
61 | docker_yum_repo_enable_test: '0'
62 | docker_yum_gpg_key: "{{ docker_repo_url }}/centos/gpg"
63 |
64 | # A list of users who will be added to the docker group.
65 | docker_users: []
66 |
67 | # Docker daemon options as a dict
68 | docker_daemon_options: {}
69 |
--------------------------------------------------------------------------------
/playbooks/post-checks.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: "Finishing up: Final checks"
3 | hosts: all
4 | gather_facts: true
5 | become: true
6 | remote_user: "{{ new_user }}"
7 |
8 | tasks:
9 | - block:
10 | - name: "Completion timestamp"
11 | debug:
12 | msg: "Finished at {{ ansible_date_time.date }} {{ ansible_date_time.time }}"
13 |
14 | - name: Verify SSHD is running
15 | ansible.builtin.command: systemctl is-active sshd
16 | register: sshd
17 | failed_when: sshd.stdout != "active"
18 | changed_when: false
19 | ignore_errors: true
20 |
21 | - name: Verify UFW is active
22 | ansible.builtin.command: systemctl is-active ufw
23 | register: ufw
24 | failed_when: ufw.stdout != "active"
25 | changed_when: false
26 | ignore_errors: true
27 |
28 | - name: Verify Docker daemon is running
29 | ansible.builtin.command: systemctl is-active docker
30 | register: docker
31 | failed_when: docker.stdout != "active"
32 | changed_when: false
33 | ignore_errors: true
34 |
35 | - name: Verify Tailscale daemon is running
36 | ansible.builtin.command: systemctl is-active tailscaled
37 | register: tailscaled
38 | failed_when: tailscaled.stdout != "active"
39 | changed_when: false
40 | ignore_errors: true
41 |
42 | - name: Verify Fail2Ban is running
43 | ansible.builtin.command: systemctl is-active fail2ban
44 | register: fail2ban
45 | failed_when: fail2ban.stdout != "active"
46 | changed_when: false
47 | ignore_errors: true
48 |
49 | - name: Verify Cockpit service is running
50 | ansible.builtin.command: systemctl is-active cockpit
51 | register: cockpit
52 | failed_when: cockpit.stdout != "active"
53 | changed_when: false
54 | ignore_errors: true
55 |
56 | - name: Verify Monit service is running
57 | ansible.builtin.command: systemctl is-active monit
58 | register: monit
59 | failed_when: monit.stdout != "active"
60 | changed_when: false
61 | ignore_errors: true
62 |
63 | - name: Verify AppArmor is enforcing
64 | ansible.builtin.assert:
65 | that:
66 | - ansible_facts.apparmor.status == 'enabled'
67 | success_msg: "AppArmor is enforcing"
68 | fail_msg: "AppArmor is not enforcing"
69 | ignore_errors: true
70 |
71 | rescue:
72 | - name: "⚠️ Post-run summary encountered errors"
73 | debug:
74 | msg: "Service checks failed, but continuing."
75 |
76 |
77 | - name: "Smoke-test ping"
78 | shell: ping -c1 127.0.0.1
79 | register: smoke_test
80 | ignore_errors: true
81 |
82 | - name: "Smoke-test result"
83 | debug:
84 | msg: "{{ 'ℹ️ localhost reachable' if smoke_test.rc == 0 else '❌ localhost unreachable' }}"
85 |
86 | - name: "Reboot if requested"
87 | reboot:
88 | msg: "Rebooting per post-check flag"
89 | pre_reboot_delay: 5
90 | reboot_timeout: 300
91 | when: reboot_required | default(false)
92 | ignore_errors: true
93 |
--------------------------------------------------------------------------------
/roles/grafana-agent/tasks/main.yml:
--------------------------------------------------------------------------------
1 | - name: Check if role is enabled
2 | ansible.builtin.debug:
3 | msg: "Grafana Agent role is disabled or missing config. Skipping."
4 | when: not agent_enabled
5 | tags: skip
6 |
7 | - name: Ensure required variables are set
8 | ansible.builtin.assert:
9 | that:
10 | - grafana_agent_url is defined
11 | - prometheus_remote_write_url is defined
12 | - loki_push_url is defined
13 | fail_msg: "Missing one or more required variables for Grafana Agent."
14 |
15 | - name: Create directories
16 | ansible.builtin.file:
17 | path: "{{ item }}"
18 | state: directory
19 | mode: '0755'
20 | loop:
21 | - "{{ grafana_agent_config_path }}"
22 | - "{{ grafana_agent_data_path }}"
23 |
24 | - name: Download Grafana Agent zip
25 | ansible.builtin.get_url:
26 | url: "{{ grafana_agent_url }}"
27 | dest: /tmp/grafana-agent.zip
28 | mode: '0644'
29 |
30 | - name: Extract Grafana Agent binary
31 | ansible.builtin.unarchive:
32 | src: /tmp/grafana-agent.zip
33 | dest: /tmp/
34 | remote_src: true
35 |
36 | - name: Move Grafana Agent binary to /usr/local/bin
37 | ansible.builtin.copy:
38 | src: /tmp/grafana-agent-linux-amd64
39 | dest: /usr/local/bin/grafana-agent
40 | remote_src: true
41 | mode: '0755'
42 |
43 | - name: Deploy Grafana Agent config
44 | ansible.builtin.template:
45 | src: agent-config.yml.j2
46 | dest: "{{ grafana_agent_config_file }}"
47 | mode: '0644'
48 | notify: Restart grafana-agent
49 |
50 | - name: Install Grafana Agent systemd service
51 | ansible.builtin.copy:
52 | dest: /etc/systemd/system/grafana-agent.service
53 | content: |
54 | [Unit]
55 | Description=Grafana Agent
56 | After=network.target
57 |
58 | [Service]
59 | ExecStart={{ grafana_agent_binary_path }} --config.file {{ grafana_agent_config_file }}
60 | Restart=always
61 |
62 | [Install]
63 | WantedBy=multi-user.target
64 | notify: Restart grafana-agent
65 |
66 | - name: Enable and start grafana-agent
67 | ansible.builtin.systemd:
68 | name: grafana-agent
69 | enabled: true
70 | state: started
71 | daemon_reload: true
72 | notify: Remove grafana-agent temp
73 |
74 | - name: "Verify Grafana Agent is active and healthy"
75 | block:
76 | - name: "Check grafana-agent systemd service is active"
77 | ansible.builtin.systemd:
78 | name: grafana-agent
79 | enabled: true
80 | state: started
81 | register: agent_service
82 | failed_when: agent_service.status.ActiveState != "active"
83 |
84 | - name: "Check grafana-agent /metrics endpoint"
85 | ansible.builtin.uri:
86 | url: "http://localhost:12345/metrics"
87 | method: GET
88 | status_code: 200
89 | return_content: false
90 | timeout: 5
91 | register: agent_http_check
92 | failed_when: agent_http_check.status != 200
93 | ignore_errors: false
94 |
95 | - name: "Grafana Agent is running and responding"
96 | ansible.builtin.debug:
97 | msg: "Grafana Agent is active and HTTP endpoint is OK"
98 | rescue:
99 | - name: "⚠️ Grafana Agent failed final health check"
100 | ansible.builtin.debug:
101 | msg: "Grafana Agent is not running or not responding on port 12345"
102 | failed_when: true
103 |
104 |
105 |
--------------------------------------------------------------------------------
/roles/docker/tasks/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Load OS-specific vars.
3 | include_vars: "{{ lookup('first_found', params) }}"
4 | vars:
5 | params:
6 | files:
7 | - '{{ansible_distribution}}.yml'
8 | - '{{ansible_os_family}}.yml'
9 | - main.yml
10 | paths:
11 | - 'vars'
12 |
13 | - include_tasks: setup-RedHat.yml
14 | when: ansible_os_family == 'RedHat'
15 |
16 | - include_tasks: setup-Debian.yml
17 | when: ansible_os_family == 'Debian'
18 |
19 | - name: Install Docker packages.
20 | package:
21 | name: "{{ docker_packages }}"
22 | state: "{{ docker_packages_state }}"
23 | notify: restart docker
24 | ignore_errors: "{{ ansible_check_mode }}"
25 | when: "ansible_version.full is version_compare('2.12', '<') or ansible_os_family not in ['RedHat', 'Debian']"
26 |
27 | - name: Install Docker packages (with downgrade option).
28 | package:
29 | name: "{{ docker_packages }}"
30 | state: "{{ docker_packages_state }}"
31 | allow_downgrade: true
32 | notify: restart docker
33 | ignore_errors: "{{ ansible_check_mode }}"
34 | when: "ansible_version.full is version_compare('2.12', '>=') and ansible_os_family in ['RedHat', 'Debian']"
35 |
36 | - name: Install docker-compose plugin.
37 | package:
38 | name: "{{ docker_compose_package }}"
39 | state: "{{ docker_compose_package_state }}"
40 | notify: restart docker
41 | ignore_errors: "{{ ansible_check_mode }}"
42 | when: "docker_install_compose_plugin | bool == true and (ansible_version.full is version_compare('2.12', '<') or ansible_os_family not in ['RedHat', 'Debian'])"
43 |
44 | - name: Install docker-compose-plugin (with downgrade option).
45 | package:
46 | name: "{{ docker_compose_package }}"
47 | state: "{{ docker_compose_package_state }}"
48 | allow_downgrade: true
49 | notify: restart docker
50 | ignore_errors: "{{ ansible_check_mode }}"
51 | when: "docker_install_compose_plugin | bool == true and ansible_version.full is version_compare('2.12', '>=') and ansible_os_family in ['RedHat', 'Debian']"
52 |
53 | - name: Ensure /etc/docker/ directory exists.
54 | file:
55 | path: /etc/docker
56 | state: directory
57 | mode: 0755
58 | when: docker_daemon_options.keys() | length > 0
59 |
60 | - name: Configure Docker daemon options.
61 | copy:
62 | content: "{{ docker_daemon_options | to_nice_json }}"
63 | dest: /etc/docker/daemon.json
64 | mode: 0644
65 | when: docker_daemon_options.keys() | length > 0
66 | notify: restart docker
67 |
68 | - name: Ensure Docker is started and enabled at boot.
69 | service:
70 | name: docker
71 | state: "{{ docker_service_state }}"
72 | enabled: "{{ docker_service_enabled }}"
73 | ignore_errors: "{{ ansible_check_mode }}"
74 | when: docker_service_manage | bool
75 |
76 | - name: Ensure handlers are notified now to avoid firewall conflicts.
77 | meta: flush_handlers
78 |
79 | - include_tasks: docker-compose.yml
80 | when: docker_install_compose | bool
81 |
82 | - name: Get docker group info using getent.
83 | getent:
84 | database: group
85 | key: docker
86 | split: ':'
87 | when: docker_users | length > 0
88 |
89 | - name: Check if there are any users to add to the docker group.
90 | set_fact:
91 | at_least_one_user_to_modify: true
92 | when:
93 | - docker_users | length > 0
94 | - item not in ansible_facts.getent_group["docker"][2]
95 | with_items: "{{ docker_users }}"
96 |
97 | - include_tasks: docker-users.yml
98 | when: at_least_one_user_to_modify is defined
99 |
--------------------------------------------------------------------------------
/Makefile:
--------------------------------------------------------------------------------
1 | .PHONY: help first-apply apply setup requirements lint install-ansible install-lint check-env $(ROLE_TAGS) $(PLAYBOOKS)
2 |
3 | #––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––
4 | # Variables
5 | PYTHON := python3
6 | PIP := pip3
7 | ANSIBLE_PLAYBOOK:= ansible-playbook
8 | ANSIBLE_LINT := ansible-lint
9 | GALAXY := ansible-galaxy
10 | PLAYBOOK := playbooks/all.yml
11 | BECOME := --ask-become-pass
12 | AS_ROOT := --extra-vars "ansible_user=root ansible_port=22"
13 |
14 | # List of all "role" tags
15 | ROLE_TAGS = docker \
16 | timezone \
17 | users \
18 | ssh \
19 | hostname \
20 | firewall \
21 | fail2ban \
22 | dotfiles \
23 | monit \
24 | cockpit \
25 | borg \
26 | maldet \
27 | lynis
28 |
29 | # List all available playbooks (from playbooks directory)
30 | PLAYBOOKS = $(shell find playbooks -name '*.yml' -type f | sed 's|playbooks/||;s|\.yml||')
31 |
32 | #––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––
33 | help:
34 | @echo "Usage:"
35 | @echo " make setup # install pip deps, galaxy requirements, ansible-lint"
36 | @echo " make requirements # install ansible-galaxy roles from requirements.yml"
37 | @echo " make lint # run ansible-lint on your playbooks"
38 | @echo " make first-apply # Initial bootstrap (root-only, before any users exist)"
39 | @echo " make apply # run all roles"
40 | @echo " make # run one tagged role"
41 | @echo " make # run one tagged category"
42 | @echo
43 | @echo "Available playbooks: $(PLAYBOOKS)"
44 | @echo "Available roles: $(ROLE_TAGS)"
45 |
46 | #––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––
47 | # Environment checks
48 | check-env:
49 | @command -v $(PYTHON) >/dev/null 2>&1 || { \
50 | echo >&2 "Error: $(PYTHON) not found. Please install Python 3."; \
51 | exit 1; \
52 | }
53 | @command -v $(PIP) >/dev/null 2>&1 || { \
54 | echo >&2 "Error: $(PIP) not found. Please install pip for Python 3."; \
55 | exit 1; \
56 | }
57 |
58 | check-ansible:
59 | @command -v $(ANSIBLE_LINT) >/dev/null 2>&1 || { \
60 | echo >&2 "Error: $(ANSIBLE_LINT) not found. Run `make install-lint`"; \
61 | exit 1; \
62 | }
63 | @command -v $(ANSIBLE_PLAYBOOK) >/dev/null 2>&1 || { \
64 | echo >&2 "Error: $(ANSIBLE_PLAYBOOK) not found. Run `make install-ansible`"; \
65 | exit 1; \
66 | }
67 |
68 | #––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––
69 | # pip-based installs
70 | install-ansible: check-env
71 | @echo "Installing Ansible via pip..."
72 | $(PIP) install --user ansible
73 |
74 | install-lint: check-env
75 | @echo "Installing ansible-lint via pip..."
76 | $(PIP) install --user ansible-lint
77 |
78 | #––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––
79 | # Galaxy roles
80 | requirements: check-env check-ansible
81 | @echo "Installing Ansible Galaxy roles from requirements.yml..."
82 | $(GALAXY) install -r requirements.yml
83 |
84 | #––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––
85 | # Combined setup
86 | setup: check-env install-ansible install-lint requirements
87 | @echo "✔️ Environment is ready - Ansible, ansible-lint, and Galaxy roles installed."
88 |
89 | #––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––
90 | # Linting
91 | lint: check-ansible
92 | @echo "Running ansible-lint..."
93 | $(ANSIBLE_LINT) playbooks/
94 |
95 | #––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––
96 | # Existing targets
97 |
98 | first-apply first-run:
99 | $(ANSIBLE_PLAYBOOK) $(PLAYBOOK) $(AS_ROOT)
100 |
101 | apply run:
102 | $(ANSIBLE_PLAYBOOK) $(PLAYBOOK) $(BECOME)
103 |
104 | # pattern rule: `make docker` → ansible-playbook ... --tags docker
105 | $(ROLE_TAGS):
106 | $(ANSIBLE_PLAYBOOK) $(PLAYBOOK) $(BECOME) --tags $@
107 |
108 | # This runs a specific playbook.
109 | # E.g. `make monitoring` → ansible-playbook ./playbooks/monitoring.yml
110 | $(PLAYBOOKS):
111 | $(ANSIBLE_PLAYBOOK) ./playbooks/$(@).yml $(BECOME)
112 |
--------------------------------------------------------------------------------
/roles/borg/tasks/main.yml:
--------------------------------------------------------------------------------
1 | - name: Set borg_should_run fact
2 | set_fact:
3 | borg_should_run: >-
4 | {{ (borg_enabled | default(false)) and
5 | (borg_repo | default('') | length > 0) and
6 | (borg_password | default('') | length > 0) and
7 | (user_email | default('') | length > 0) and
8 | (borgbase_apikey | default('') | length > 0) }}
9 |
10 | - name: Setup Borg backup
11 | block:
12 | - name: Skip if essential Borg variables are missing
13 | debug:
14 | msg: "⚠️ Skipping Borg setup due to missing variables"
15 | when: not borg_should_run
16 |
17 | - name: Create SSH key if it doesn't exist
18 | openssh_keypair:
19 | path: "{{ borg_ssh_key_path }}"
20 | type: ed25519
21 | comment: "{{ borgbase_key_name }}"
22 | force: false
23 |
24 | - name: Prompt user to back up private SSH key
25 | debug:
26 | msg: |
27 | 📢 Don't forget to back up your Borg SSH private key:
28 | > cat {{ borg_ssh_key_path }}
29 | You'll need it to restore from backup if this server is lost.
30 | run_once: true
31 |
32 | - name: Read public key
33 | slurp:
34 | src: "{{ borg_ssh_key_path }}.pub"
35 | register: borg_pubkey
36 | check_mode: yes
37 |
38 | - name: Register SSH key with BorgBase
39 | adhawkins.borgbase.borgbase_ssh:
40 | state: present
41 | apikey: "{{ borgbase_apikey }}"
42 | name: "{{ borgbase_key_name }}"
43 | key: "{{ borg_pubkey['content'] | b64decode }}"
44 | register: borgbase_key_result
45 |
46 | - name: Create BorgBase repository
47 | adhawkins.borgbase.borgbase_repo:
48 | state: present
49 | email: "{{ user_email }}"
50 | password: "{{ borg_password }}"
51 | name: "{{ borg_repo_name }}"
52 | full_access_keys:
53 | - "{{ borgbase_key_result.key_id }}"
54 | quota_enabled: false
55 | alert_days: 2
56 | register: borgbase_repo
57 |
58 | - name: Set borgbase repo id
59 | set_fact:
60 | borgbackup_borgbase_repo: "{{ borgbase_repo.repo.repo_id }}"
61 | when: borgbase_repo.repo.repo_id is defined
62 |
63 | - name: Set borgbackup_ssh_host
64 | set_fact:
65 | borgbackup_ssh_host: "{{ borgbackup_borgbase_repo }}.repo.borgbase.com"
66 | when: borgbackup_borgbase_repo is defined
67 |
68 | - name: Ensure borgmatic is installed
69 | apt:
70 | name: borgmatic
71 | update_cache: yes
72 | state: present
73 |
74 | - name: Create borgmatic config directory
75 | file:
76 | path: /etc/borgmatic
77 | state: directory
78 | owner: root
79 | group: root
80 | mode: '0755'
81 |
82 | - name: Write borgmatic config
83 | copy:
84 | dest: /etc/borgmatic/config.yaml
85 | owner: root
86 | group: root
87 | mode: '0600'
88 | content: |
89 | location:
90 | source_directories:
91 | {% for path in borg_source_directories %}
92 | - {{ path }}
93 | {% endfor %}
94 |
95 | repositories: [ '{{ borg_repo }}' ]
96 |
97 | storage:
98 | encryption_passphrase: "{{ borg_password }}"
99 | compression: {{ borg_compression }}
100 | retries: {{ borg_retries }}
101 | retry_wait: {{ borg_retry_wait }}
102 | ssh_command: ssh -i {{ borg_ssh_key_path }}
103 |
104 | retention:
105 | {% for key, value in borg_retention.items() %}
106 | {{ key }}: {{ value }}
107 | {% endfor %}
108 |
109 | {% if borg_ping_url %}
110 | hooks:
111 | healthchecks:
112 | ping_url: {{ borg_ping_url }}
113 | send_logs: true
114 | ping_body_limit: 100000
115 | states: {{ borg_ping_states }}
116 | {% endif %}
117 |
118 | - name: Enable borgmatic systemd timer
119 | systemd:
120 | name: borgmatic.timer
121 | enabled: yes
122 | state: started
123 |
124 | when: borg_should_run
125 | tags: [borg]
126 |
--------------------------------------------------------------------------------
/roles/ssh/tasks/main.yml:
--------------------------------------------------------------------------------
1 | - name: Ensure ~/.ssh directory exists with correct permissions
2 | ansible.builtin.file:
3 | path: "/home/{{ new_user }}/.ssh"
4 | state: directory
5 | mode: '0700'
6 | owner: "{{ new_user }}"
7 | group: "{{ new_user }}"
8 | when: new_user is defined and not ssh_lockdown | default(false)
9 |
10 | - name: Ensure ~/.ssh/authorized_keys exists with correct permissions
11 | ansible.builtin.file:
12 | path: "/home/{{ new_user }}/.ssh/authorized_keys"
13 | state: touch
14 | mode: '0600'
15 | owner: "{{ new_user }}"
16 | group: "{{ new_user }}"
17 | when: new_user is defined and not ssh_lockdown | default(false)
18 |
19 | - name: Allow SSH on configured port
20 | ufw:
21 | rule: allow
22 | port: "{{ ssh_port }}"
23 | proto: tcp
24 | when: not ssh_lockdown | default(false)
25 |
26 | - name: Ensure SSH privilege separation directory exists
27 | file:
28 | path: /run/sshd
29 | state: directory
30 | owner: root
31 | group: root
32 | mode: '0755'
33 | become: true
34 |
35 | - name: Harden SSH config
36 | lineinfile:
37 | path: /etc/ssh/sshd_config
38 | regexp: '^#?\s*{{ item.key }}\s+.*'
39 | line: '{{ item.key }} {{ item.value }}'
40 | state: present
41 | backrefs: yes
42 | insertafter: EOF
43 | validate: 'sshd -T -f %s'
44 | loop:
45 | - { key: 'Protocol', value: '2' }
46 | - { key: 'Compression', value: 'delayed' }
47 | - { key: 'MaxAuthTries', value: '5' }
48 | - { key: 'PrintLastLog', value: 'yes' }
49 | - { key: 'PermitEmptyPasswords', value: 'no' }
50 | - { key: 'IgnoreRhosts', value: 'yes' }
51 | - { key: 'IgnoreUserKnownHosts', value: 'yes' }
52 | - { key: 'HostBasedAuthentication', value: 'no' }
53 | - { key: 'Port', value: '{{ ssh_port }}' }
54 | - { key: 'StrictModes', value: 'yes' }
55 | - { key: 'PubkeyAuthentication', value: 'yes' }
56 | - { key: 'GSSAPIAuthentication', value: 'no' }
57 | - { key: 'KerberosAuthentication', value: 'no' }
58 | - { key: 'Ciphers', value: 'aes128-ctr,aes192-ctr,aes256-ctr' }
59 | - { key: 'MACs', value: 'hmac-sha2-256,hmac-sha2-512' }
60 | notify: Restart SSH
61 | when: not ssh_lockdown | default(false)
62 | changed_when: true
63 |
64 | - name: Disable root login
65 | lineinfile:
66 | path: /etc/ssh/sshd_config
67 | regexp: "{{ item.regexp }}"
68 | line: "{{ item.line }}"
69 | state: present
70 | validate: 'sshd -T -f %s'
71 | loop:
72 | - { regexp: '^PermitRootLogin', line: 'PermitRootLogin no' }
73 | - { regexp: '^PasswordAuthentication', line: 'PasswordAuthentication no' }
74 | - { regexp: '^ChallengeResponseAuthentication', line: 'ChallengeResponseAuthentication no' }
75 | when: user_ready | default(false) and not ssh_lockdown | default(false)
76 | notify: Restart SSH
77 | changed_when: true
78 | tags:
79 | - lockdown
80 |
81 |
82 | - name: Update SSH port
83 | lineinfile:
84 | path: /etc/ssh/sshd_config
85 | regexp: '^#?\s*PasswordAuthentication\s+.*'
86 | line: 'PasswordAuthentication no'
87 | validate: 'sshd -T -f %s'
88 | loop:
89 | - { regexp: '^Port', line: 'Port {{ ssh_port }}' }
90 | when: ssh_lockdown | default(false)
91 | tags:
92 | - lockdown
93 |
94 | - name: Disable password authentication
95 | lineinfile:
96 | path: /etc/ssh/sshd_config
97 | regexp: '^#?\s*PasswordAuthentication\s+.*'
98 | line: 'PasswordAuthentication no'
99 | validate: 'sshd -T -f %s'
100 | when: not ssh_lockdown | default(false)
101 | tags:
102 | - lockdown
103 |
104 |
105 | - name: Secure SSH private keys
106 | file:
107 | path: "{{ item }}"
108 | mode: '0600'
109 | with_fileglob:
110 | - /etc/ssh/*key
111 | when: item is not search(".*\\.pub$") and not ssh_lockdown | default(false)
112 |
113 | - name: Secure SSH public keys
114 | file:
115 | path: "{{ item }}"
116 | mode: '0644'
117 | with_fileglob:
118 | - /etc/ssh/*.pub
119 | when: item is search(".*\\.pub$") and not ssh_lockdown | default(false)
120 |
121 | - name: Disable systemd ssh.socket (if enabled)
122 | ansible.builtin.systemd:
123 | name: ssh.socket
124 | state: stopped
125 | enabled: no
126 | become: true
127 | when: not ssh_lockdown | default(false)
128 |
129 | - name: Enable ssh.service explicitly
130 | ansible.builtin.systemd:
131 | name: ssh
132 | state: restarted
133 | enabled: yes
134 | become: true
135 | when: not ssh_lockdown | default(false)
136 |
137 | - name: Restart SSH to apply config
138 | ansible.builtin.meta: flush_handlers
139 | when: not ssh_lockdown | default(false)
140 |
--------------------------------------------------------------------------------
/plugins/pretty.py:
--------------------------------------------------------------------------------
1 | # pretty.py
2 | DOCUMENTATION = r"""
3 | callback: pretty
4 | type: stdout
5 | short_description: Emoji + colored status stdout callback
6 | extends_documentation_fragment:
7 | - default_callback
8 | - result_format_callback
9 | """
10 |
11 | from ansible.plugins.callback.default import CallbackModule as DefaultCb
12 | from ansible.utils.color import colorize, hostcolor
13 |
14 | class CallbackModule(DefaultCb):
15 | CALLBACK_VERSION = 2.0
16 | CALLBACK_TYPE = "stdout"
17 | CALLBACK_NAME = "pretty"
18 | CALLBACK_NEEDS_WHITELIST = True
19 |
20 | STATUS_EMOJI = {
21 | "ok": ("✅", "green"),
22 | "changed": ("🔄️", "yellow"),
23 | "skipped": ("⏭️", "blue"),
24 | "failed": ("❌", "red"),
25 | "unreachable": ("🚫", "red"),
26 | "warning": ("⚠️", "yellow"),
27 | "info": ("ℹ️", "cyan"),
28 | }
29 |
30 | LABELS = {
31 | "ok": "Successful",
32 | "changed": "Changes",
33 | "skipped": "Skipped",
34 | "failed": "Failed",
35 | "unreachable": "Unreachable",
36 | "warning": "Warnings",
37 | "info": "Info",
38 | }
39 |
40 | def __init__(self, display=None):
41 | super().__init__()
42 | self._current_role = None
43 | self._last_role = None
44 | self._current_handler_role = None
45 |
46 | def v2_runner_on_start(self, host, task):
47 | return
48 |
49 | def v2_playbook_on_task_start(self, task, is_conditional):
50 | role_obj = getattr(task, '_role', None)
51 | if role_obj:
52 | role_name = role_obj.get_name() or role_obj._role_name
53 | if role_name != self._current_role:
54 | header = f"▶️ {role_name} role"
55 | line = '─' * (len(header) + 2)
56 | self._display.display("", screen_only=True)
57 | self._display.display(header, color='cyan', screen_only=True)
58 | self._display.display(line, color='cyan', screen_only=True)
59 | self._current_role = role_name
60 |
61 | def v2_playbook_on_play_start(self, play):
62 | self._play = play
63 | title = play.get_name().strip() or play._file_name or 'Mystery Playbook'
64 | min_total = 40
65 | inner = max(min_total - 2, len(title))
66 | top = "╭" + "─" * inner + "╮"
67 | middle = "│" + title.center(inner) + "│"
68 | bottom = "╰" + "─" * inner + "╯"
69 | self._display.display("", screen_only=True)
70 | for line in (top, middle, bottom):
71 | self._display.display(line, color='magenta', screen_only=True)
72 | self._printed_handler_roles = set()
73 |
74 | # —— Override includes to get an emoji —— #
75 | def v2_playbook_on_include(self, included_file):
76 | hosts = ", ".join(h.name for h in included_file._hosts)
77 | emoji, color = self.STATUS_EMOJI["info"]
78 | self._display.display(f"[{hosts}] {emoji} Prepared for next role: {included_file._filename} ", color=None, newline=False)
79 | self._display.display(f"(Done)", color=color)
80 |
81 | # —— Per‐task callbacks —— #
82 | def v2_runner_on_ok(self, result):
83 |
84 | action = result._task.action
85 | if action.endswith("debug"):
86 | host = result._host.get_name()
87 | args = result._task.args or {}
88 | data = result._result
89 | # 1) msg: ...
90 | if "msg" in args and data.get("msg") not in (None, ""):
91 | for line in str(data["msg"]).splitlines():
92 | self._display.display(f" {' ' * len(host)} {line}", color="dark gray")
93 | # 2) var: my_var
94 | elif "var" in args:
95 | varname = args["var"]
96 | val = data.get(varname)
97 | # print each element if it’s a list
98 | if isinstance(val, (list, tuple)):
99 | for line in val:
100 | self._display.display(f" {' ' * len(host)} {line}")
101 | else:
102 | self._display.display(f" {' ' * len(host)} {varname} = {val}")
103 | # nothing else to do
104 | return
105 |
106 | host = result._host.get_name()
107 | task = result.task_name or result.task.get_name()
108 | if "Gathering Facts" in task:
109 | self._display.display(f"[{host}] 🛂 {task} ", color=None, newline=False)
110 | self._display.display(f"(Done)", color="cyan")
111 | return
112 | e, c = self.STATUS_EMOJI["ok"]
113 | self._display.display(f"[{host}] {e} {task} ", color=None, newline=False)
114 | self._display.display(f"(Success)", color=c)
115 |
116 | def v2_runner_on_changed(self, result, ignore_errors=False):
117 | host = result._host.get_name()
118 | task = result.task_name or result.task.get_name()
119 | e, c = self.STATUS_EMOJI["changed"]
120 | self._display.display(f"[{host}] {e} {task} ", color=None, newline=False)
121 | self._display.display(f"(Changed)", color=c)
122 |
123 | def v2_runner_on_failed(self, result, ignore_errors=False):
124 | host = result._host.get_name()
125 | task = result.task_name or result.task.get_name()
126 | emoji, color = self.STATUS_EMOJI["failed"]
127 |
128 | self._display.display(f"[{host}] {emoji} {task} ", color=None, newline=False)
129 | self._display.display("(Failed)", color=color)
130 |
131 | msg = result._result.get("msg")
132 | if msg:
133 | indent = " " * (len(host) + 3)
134 | for line in str(msg).splitlines():
135 | self._display.display(f"{indent}{line}", color="dark gray")
136 |
137 | stderr = result._result.get("stderr")
138 | if stderr:
139 | indent = " " * (len(host) + 3)
140 | self._display.display(f"{indent}stderr: {stderr}", color="dark gray")
141 | stdout = result._result.get("stdout")
142 | if stdout:
143 | indent = " " * (len(host) + 3)
144 | self._display.display(f"{indent}stdout: {stdout}", color="dark gray")
145 |
146 |
147 | def v2_runner_on_skipped(self, result):
148 | host = result._host.get_name()
149 | task = result.task_name or result.task.get_name()
150 | e, c = self.STATUS_EMOJI["skipped"]
151 | self._display.display(f"[{host}] {e} {task} ", color=None, newline=False)
152 | self._display.display(f"(Skipped)", color=c)
153 |
154 | def v2_runner_on_unreachable(self, result):
155 | host = result._host.get_name()
156 | task = result.task_name or result.task.get_name()
157 | emoji, color = self.STATUS_EMOJI["unreachable"]
158 | self._display.display(f"[{host}] {emoji} {task} ", color=None, newline=False)
159 | self._display.display("(Unreachable)", color=color)
160 | msg = result._result.get("msg")
161 | if msg:
162 | indent = " " * (len(host) + 3)
163 | self._display.display(f"{indent}{msg}", color="dark gray")
164 |
165 | def v2_playbook_on_handler_task_start(self, task):
166 | role_obj = getattr(task, '_role', None)
167 | if not role_obj:
168 | return
169 | role_name = role_obj.get_name() or role_obj._role_name
170 | if role_name in self._printed_handler_roles:
171 | return
172 | header = f"▶️ {role_name} handlers"
173 | line = '─' * (len(header) + 2)
174 | self._display.display("", screen_only=True)
175 | self._display.display(header, color='yellow', screen_only=True)
176 | self._display.display(line, color='yellow', screen_only=True)
177 | self._printed_handler_roles.add(role_name)
178 |
179 | # —— Loop‐item callbacks —— #
180 | def _print_item_details(self, result, host):
181 | """Helper to print msg, item.key/value, and diff.before→after."""
182 | indent = " " * (len(host) + 3) # align under “[host] ”
183 | res = result._result
184 |
185 | # 1) msg
186 | msg = res.get("msg")
187 | if msg:
188 | self._display.display(f"{indent}{msg}", color="dark gray")
189 |
190 | # 2) item.key = item.value
191 | item = res.get("item")
192 | if isinstance(item, dict):
193 | key = item.get("key")
194 | val = item.get("value")
195 | if key and (val is not None and val != ""):
196 | self._display.display(f"{indent}{key} = {val}", color="dark gray")
197 |
198 | # 3) diff entries
199 | diffs = res.get("diff")
200 | if isinstance(diffs, list):
201 | entries = diffs
202 | elif isinstance(diffs, dict):
203 | entries = [diffs]
204 | else:
205 | entries = []
206 | for d in entries:
207 | if not isinstance(d, dict):
208 | continue
209 | before = d.get("before")
210 | after = d.get("after")
211 | if before and after:
212 | self._display.display(f"{indent}{before} → {after}", color="dark gray")
213 |
214 |
215 | def v2_runner_item_on_ok(self, result):
216 | host = result._host.get_name()
217 | task = result.task_name or result.task.get_name()
218 | changed = result._result.get("changed", False)
219 | key = "changed" if changed else "ok"
220 | label = "Changed" if changed else "Success"
221 | emoji, color = self.STATUS_EMOJI[key]
222 |
223 | # Summary line
224 | self._display.display(f"[{host}] {emoji} {task} ", color=None, newline=False)
225 | self._display.display(f"({label})", color=color)
226 |
227 | # Details
228 | self._print_item_details(result, host)
229 |
230 |
231 | def v2_runner_item_on_failed(self, result):
232 | host = result._host.get_name()
233 | task = result.task_name or result.task.get_name()
234 | emoji, color = self.STATUS_EMOJI["failed"]
235 | self._display.display(f"[{host}] {emoji} {task} ", color=None, newline=False)
236 | self._display.display(f"(Failed)", color=color)
237 | self._print_item_details(result, host)
238 |
239 |
240 | def v2_runner_item_on_skipped(self, result):
241 | host = result._host.get_name()
242 | task = result.task_name or result.task.get_name()
243 | emoji, color = self.STATUS_EMOJI["skipped"]
244 | self._display.display(f"[{host}] {emoji} {task} ", color=None, newline=False)
245 | self._display.display(f"(Skipped)", color=color)
246 | self._print_item_details(result, host)
247 |
248 |
249 | def v2_playbook_on_stats(self, stats):
250 | self._display.display('', screen_only=True)
251 | header = 'Summary'
252 | self._display.display(header, screen_only=True, color='magenta')
253 | self._display.display('─' * len(header), screen_only=True, color='magenta')
254 |
255 | for host in sorted(stats.processed.keys()):
256 | data = stats.summarize(host)
257 | self._display.display(hostcolor(host, data), screen_only=True)
258 | for key in ("ok", "changed", "unreachable", "failed", "skipped"):
259 | e, color = self.STATUS_EMOJI[key]
260 | count = data["failures"] if key == "failed" else data.get(key, 0)
261 | label = self.LABELS[key]
262 | if count == 0:
263 | color = "dark gray"
264 |
265 | self._display.display(f" {e} {count} {label}", color=color, screen_only=True)
266 | self._display.display("", screen_only=True)
267 |
268 | self._display.display(
269 | "Setup complete. Thanks for using https://github.com/lissy93/ansibles",
270 | color="blue", screen_only=True
271 | )
272 |
--------------------------------------------------------------------------------
/readme.txt:
--------------------------------------------------------------------------------
1 |
2 | OBJECTIVE 🎯
3 | -----------
4 | After creating a fresh new VM/VPS/server (and adding a SSH key while doing so)
5 | I should just be able to run one command on my PC, to have the new remote system
6 | perfectly setup just how I like it.
7 |
8 | Thanks to Ansible, everything is nice 'n easy, automated, repeatable and safe 😊
9 | So all my fresh machines can be correctly configured, secured, backed up, and
10 | actually usable and ready to go!
11 |
12 | I've documented the playbooks, usage guide and some info about Ansible below...
13 |
14 | ================================================================================
15 |
16 | JOBS 👔
17 | -------
18 | ➡️ RECOMMENDED TASKS:
19 | ⚒️ Essentials:
20 | ├── ✅ Apt - Configures repositories and updates packages
21 | ├── ✅ Packages - Installs essential packages
22 | ├── ✅ User accounts - Creates user(s) and sets permissions
23 | ├── ✅ SSH - Configures and hardens SSH access
24 | ├── ✅ Timezone - Sets timezone and NTP server
25 | ├── ✅ Hostname - Sets hostname and configures hosts
26 | ├── ✅ Firewall - Sets UFW rules
27 | ├── ✅ Mail - Sets up Postfix (for notification sending)
28 | └── ✅ Updates - Enables unattended upgrades
29 |
30 | ➡️ OPTIONAL TASKS:
31 | ⚙️ Configs:
32 | ├── ☑️ Packages - Installs extra packages, for easier management
33 | └── ☑️ Dotfiles - Configures settings for CLI utils and apps
34 |
35 | 💾 Backups
36 | └── ☑️ Backups - Sets up automated, encrypted, incremental Borg backups
37 |
38 | 🔑 Access:
39 | ├── ☑️ VPN - Sets up and secures Wireguard VPN
40 | └── ☑️ Cockpit - Sets up Cockpit for easy management via UI
41 |
42 | 🖥️ Apps and Services
43 | ├── ☑️ Docker - Installs and configures Docker (if needed)
44 | └── ☑️ Proxy - Sets up Caddy (only if not using Docker)
45 |
46 | 🔒 Security:
47 | ├── ☑️ System hardening - Implements some DevSec security baselines
48 | ├── ☑️ AppArmor - Sets up profiles for process confinement
49 | ├── ☑️ Intrusion detection - Configures Fail2ban to block brute-force
50 | ├── ☑️ Integrity monitoring - Sets up and automates OSSEC
51 | ├── ☑️ Malware scanning - Sets up daily Maldet scans and reporting
52 | └── ☑️ Security audits - Enables daily Lynis audits and reporting
53 |
54 | 📊 Monitoring:
55 | ├── ☑️ Log storage – Loki for ingesting and aggregating all logs
56 | ├── ☑️ Log shipping – Grafana Agent, pushes logs and metrics to Loki
57 | ├── ☑️ Metrics collection – Grafana Agent, pushing metrics into Prometheus
58 | ├── ☑️ Metrics storage – Prometheus for storing and querying metrics
59 | ├── ☑️ Visualization – Grafana for dashboards from Loki and Prometheus
60 | ├── ☑️ Alerting – Alertmanager for triggering critical notifications
61 | ├── ☑️ Log rotation - Sets up logrotate for all logs, so they don't get big
62 | └── ☑️ Monit - Monitors services and restarts them if they fail
63 |
64 | Note about Docker:
65 | On servers where Docker is used for deploying services, some of the apps above
66 | can/will be skipped. Such as Caddy, Grafana and Alertmanager
67 | Because it's better to run them in containers instead of directly on the host.
68 | For the Docker stacks, see my compose in: https://github.com/Lissy93/dockyard
69 |
70 | ================================================================================
71 |
72 | USAGE 🛠️
73 | --------
74 | STEP 0: PREREQUISITES
75 | - Ensure Python (3.8+) and Ansible are installed (2.18+) on your local system
76 | - Fetch external roles: `ansible-galaxy install -r requirements.yml`
77 | - Create a new remote server (if u like). And ensure you have SSH access to it
78 |
79 | STEP 1: SERVERS
80 | - Add servers. Create `inventories/remote.yml`
81 |
82 | STEP 2: CONFIGURATION
83 | - Add variables to `inventories/group_vars/remote.yml` or `all.yml` (shared)
84 | - Best to put secrete variables in an Ansible vault (see instructions below)
85 |
86 | STEP 4: RUNNING
87 | - Use the commands in the Makefile to execute the playbooks.
88 | - First run: `make first-run`
89 | - Subsequent runs: `make run`
90 |
91 | ================================================================================
92 |
93 | COMMANDS 💲
94 | -----------
95 | Basics:
96 | - `make` - View all available commands and our man page
97 | - `make run` - Run all playbooks (as normal user with new SSH settings)
98 | - `make ` - Run a specific role (e.g. `make ssh`)
99 | - `make ` - Run a specific playbook (e.g. `make security`)
100 |
101 | Setup:
102 | - `make install-ansible` - Install Ansible (on host)
103 | - `make requirements` - Downloads external roles from Ansible Galaxy
104 | - `make scaffold-hosts` - Creates inventory template (for you to fill in)
105 | - `make first-run` - First run on a fresh server (as root)
106 |
107 | Other:
108 | - `make lint` - Run Ansible-lint on all playbooks and roles
109 | - `make docs` - Generates documentation for roles and playbooks
110 | - `make test` - Runs all tests on playbooks and hosts
111 |
112 | Native Ansible Commands:
113 | - Run a playbook on specific servers:
114 | > ansible-playbook -i inventories/.yml playbooks/.yml
115 | - Run only roles with a specific tag:
116 | > ansible-playbook -i inventories/.yml playbooks/.yml --tags
117 | - Run a playbook on a specific server:
118 | > ansible-playbook -i inventories/.yml playbooks/.yml -l
119 | - Do a dry-run, and preview what changes will be made before applying:
120 | > ansible-playbook -i inventories/.yml playbooks/.yml --check --diff
121 | - Run an ad-hoc command on servers in an inventory:
122 | > ansible db -i inventories/.yml -m shell -a ""
123 |
124 | ================================================================================
125 |
126 | ADDING SERVERS 🖥️
127 | -----------------
128 | Define your list of hosts (servers to manage) in the inventory file(s).
129 | The path which ansible looks for hosts in, is specified in ./ansible.cfg
130 | If it's your first time, you can run `make scaffold-hosts` to create a template
131 | Then complete the values in .inventories/production/hosts.yml, like this:
132 |
133 | all:
134 | hosts:
135 | my-server:
136 | ansible_host: 111.111.111.111
137 | ansible_user: bob
138 | ansible_python_interpreter: /usr/bin/python3
139 | my-other-server:
140 | ansible_host: 000.000.000.000
141 | ansible_user: alice
142 | ansible_port: 22
143 | ansible_python_interpreter: /usr/bin/python3
144 |
145 | ================================================================================
146 |
147 | ADDING VARIABLES 🗂️
148 | --------------------
149 | - Defaults are defined per-role in: ./roles//defaults/main.yml
150 | - But you can (and should) override in: ./inventories/group_vars/all.yml
151 | - Or, set host-specific vars, in: ./inventories/host_vars/.yml
152 | - Secrets should be stored in a vault: ./inventories/group_vars/vault.yml
153 | 1. Create a vault: `ansible-vault create ./inventories/group_vars/vault.yml`
154 | 2. Edit the vault: `ansible-vault edit ./inventories/group_vars/vault.yml`
155 | 3. Use the vault by adding the `--ask-vault-pass` flag when running a playbook
156 |
157 | ================================================================================
158 |
159 | WHAT'S ANSIBLE, AND WHY USE IT? ❓
160 | ----------------------------------
161 | Ansible is a simple, open source, agentless tool for automating anything.
162 | Just describe how you want your system to look, and Ansible will ensure
163 | the state is met.
164 |
165 | 10 Reasons why I love Ansible
166 | Unlike Bash scripts or other alternatives...
167 | 1. Ansible is idempotent, so you can run it as many times as you like,
168 | and it will only make changes if the system is not in the desired state.
169 | 2. Ansible is agentless, there's nothing to install on any of your systems.
170 | 3. Ansible is declarative, you don't have to worry about the order of operations.
171 | 4. Ansible is reusable and x-platform. Write playbooks once, run them anywhere.
172 | 5. Ansible is scalable. Run it on a single host or thousands of servers at once.
173 | 6. Ansible is extensible. There's thousands of playbooks on Galaxy,
174 | or you can write your own modules in any language you want.
175 | 7. Ansible is simple. No finicky scripts, just self-documenting YAML declarations.
176 | 8. Ansible is powerful. Doing anything from simple tasks to complex orchestration.
177 | 9. Ansible is safe. Preview changes to be made (--diff), or do a dry-run (--check)
178 | 10. Ansible is configurable. Use built-in or custom 'facts' to customize playbooks.
179 |
180 | Read the Ansible docs at:
181 | https://docs.ansible.com/ansible/latest/getting_started/introduction.html
182 |
183 | ================================================================================
184 |
185 | ANSIBLE BASICS 📚
186 | -----------------
187 | Terminology:
188 | - Inventories = Who to configure (a list of hosts/servers)
189 | - Playbooks = What to do (at a high level, collection of roles)
190 | - Roles = Reusable collections of logic, made up of tasks
191 | - Tasks = The actual code that Ansible runs (usually YAML)
192 | - Plugins = Reusable code snippets (usually Python) that extend Ansible
193 | - Vars = Variables used to specify values for each playbook/role/task
194 | - ansible.cfg = The config file that tells Ansible where to find things
195 |
196 | Structure:
197 | Ansible projects follow a specific directory structure, as illustrated below:
198 | The top-level directories are:
199 | - inventories/ - Where you list the hosts/servers that changes will be applied to
200 | - playbooks/ - The playbooks that run the roles, and define the order of execution
201 | - roles/ - The individual roles, made up of tasks, handlers, and other files
202 |
203 | This is the structure of my project:
204 |
205 | .
206 | ├── ansible.cfg # Config: inventory paths, plugin dirs, etc
207 | ├── callback_plugins/ # Custom Ansible callback plugins
208 | │ └── pretty.py # Emoji & color stdout formatting
209 | ├── inventories/ # Host/group definitions and vars
210 | │ └── production/ # Remote production inventory
211 | │ ├── hosts.yml # Inventory host list
212 | │ ├── group_vars/ # Variables applied by group
213 | │ │ └── all.yml # Variables to be shared by all servers in this group
214 | │ └── host_vars/ # Per-host variables
215 | │ └── serv1.yml # Example: Add a file for each of your servers
216 | ├── Makefile # Shortcut targets for playbook runs
217 | ├── playbooks/ # Playbooks invoking roles by concern
218 | │ ├── all.yml # Main "run everything" playbook
219 | │ ├── access.yml # VPN & Cockpit setup
220 | │ ├── backups.yml # Borg backup tasks
221 | │ ├── configs.yml # General configuration tasks
222 | │ ├── essentials.yml # Core hardening, compliance, audits
223 | │ ├── monitoring.yml # Loki, Prometheus & Grafana setup
224 | │ ├── security.yml # Fail2ban, OSSEC, Maldet, Lynis, AppArmor
225 | │ └── services.yml # Docker, Caddy proxy, and other services
226 | ├── readme.txt # Project overview & usage instructions
227 | ├── requirements.yml # Galaxy roles/collections to install
228 | └── roles/ # Reusable Ansible roles (one dir per role)
229 | ├── apparmor # AppArmor profile deployment
230 | ├── blackbox_exporter # Prometheus Blackbox Exporter
231 | ├── borg # Automated Borg backups
232 | ├── cockpit # Cockpit management UI
233 | ├── directories # Directory structure setup
234 | ├── docker # Docker engine install & config
235 | ├── dotfiles # User dotfiles deployment
236 | ├── fail2ban # Intrusion-detection rules
237 | ├── firewall # UFW firewall configuration
238 | ├── geerlingguy.docker# Docker setup, from geerlingguy
239 | ├── geerlingguy.pip # Pip/Python setup, from geerlingguy
240 | ├── grafana # Grafana install, config & provisioning
241 | ├── grafana-agent # Grafana Agent setup & metrics checks
242 | ├── hostname # Hostname & /etc/hosts management
243 | ├── installs # Miscellaneous package installs
244 | ├── logrotate # Logrotate config and templates
245 | ├── loki # Loki log-aggregation setup
246 | ├── lynis # Automated Lynis security audits
247 | ├── maldet # Linux Malware Detect integration
248 | ├── monit # Service monitoring with Monit
249 | ├── node_exporter # Prometheus Node Exporter
250 | ├── prometheus # Prometheus server setup & config
251 | ├── ssh # SSH hardening & key management
252 | ├── tailscale # Tailscale VPN installation
253 | ├── timezone # Timezone & NTP configuration
254 | └── users # User account & permission management
255 |
256 | ================================================================================
257 |
258 | TROUBLESHOOTING 🫨
259 | ------------------
260 | 1. Ansible requires the locale encoding to be UTF-8; Detected None.
261 | - Fix: set `export LC_ALL=`
262 | - Or run `locale -a` to see your locales and set with `LC_ALL='en_US.UTF-8'`
263 |
264 | 2. Failed to connect to the host via ssh
265 | - Ensure you have run `make initial-apply` before running `make apply`
266 | - Check the ansible_user and ansible_host variables in your inventory file
267 | - Check SSH access to the server. Ensure you can SSH in manually.
268 |
269 | 3. The task includes an option with an undefined variable.. '___' is undefined
270 | - Define that variable in `./inventories/group_vars/all.yml` or elsewhere
271 |
272 | 4. Unable to encrypt nor hash, passlib must be installed
273 | - Install passlin, with: `pip install passlib`
274 |
275 | 5. YAML syntax or Jinja2 template errors
276 | - Check your YAML syntax with: `yamllint .yml`
277 | - Check your Jinja2 with: `ansible-playbook --syntax-check .yml`
278 |
279 | 6. The role 'foo' was not found
280 | - Install external roles with: `ansible-galaxy install -r requirements.yml`
281 | - And double check the `roles_path` and `collections_paths` in ansible.cfg
282 |
283 | 7. Help, my terminal is full of talking cows!
284 | - This happens because you have `cowsay` installed 🐮😉
285 | - Just set: `nocows=1` in your ansible.cfg file
286 |
287 | ================================================================================
288 |
289 | WARNING ⚠️
290 | ----------
291 | Should you use this? ...Probably not.
292 | Because it's really easy to create your own Ansible playbooks,
293 | and they will be better tailored to your specific needs.
294 | (Also I don't much want to be responsible if something goes wrong! 🫣)
295 |
296 | But feel free to use or copy-paste which ever parts you like into your setup 🫶
297 |
298 | IMPORTANT: Read through the playbooks and roles before running them.
299 | And make sure you understand what they do, to avoid any surprises!
300 |
301 | ================================================================================
302 |
303 | LICENSE 📃
304 | ----------
305 | DO WHAT THE FUCK YOU WANT TO PUBLIC LICENSE [Version 2, December 2004]
306 | Copyright (C) 2025 Alicia Sykes
307 |
308 | Everyone is permitted to copy and distribute verbatim or modified
309 | copies of this license document, and changing it is allowed as long
310 | as the name is changed.
311 |
312 | TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
313 | 0. You just do whatever the fuck you want to
314 |
--------------------------------------------------------------------------------